metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jhladky/hmint",
"score": 2
} |
#### File: hmint/app/core.py
```python
import pkgutil
import importlib
from os import environ
from ast import literal_eval
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.migrate import Migrate
from flask.ext.login import LoginManager
SQLALCHEMY_DATABASE_URI = environ['DATABASE_URL']
SECRET_KEY = environ['SECRET_KEY']
DEBUG = bool(environ.get('DEBUG', False))
db = SQLAlchemy()
migrate = Migrate()
login_manager = LoginManager()
def register_blueprints(app, package_name, package_path):
for _, name, _ in pkgutil.iter_modules(package_path):
m = importlib.import_module('.' + name, package_name)
if hasattr(m, 'blueprint'):
if DEBUG:
print(' * Registering blueprint ' +
str(m.blueprint.import_name))
app.register_blueprint(getattr(m, 'blueprint'))
```
#### File: app/models/account.py
```python
from app.core import db
from app.models.user import User
from app.models.institution import Institution
from app.models.account_category import AccountCategory
from app.exceptions import NotFoundError
from sqlalchemy.exc import InvalidRequestError
class Account(db.Model):
__tablename__ = 'account'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
user = db.relationship('User')
category_id = db.Column(db.Integer, db.ForeignKey(AccountCategory.id),
nullable=False)
category = db.relationship('AccountCategory')
institution_id = db.Column(db.Integer, db.ForeignKey(Institution.id),
nullable=False)
institution = db.relationship('Institution')
name = db.Column(db.String(50), nullable=False)
maintenance_fee = db.Column(db.Numeric(precision=10, scale=4))
minimum_balance = db.Column(db.Numeric(precision=10, scale=4))
interest_rate = db.Column(db.Numeric(precision=10, scale=4))
transactions = db.relationship('Transaction', backref='Account')
def __init__(self, json):
account_category = AccountCategory.query.get(json['category_id'])
institution = Institution.query.get(json['institution_id'])
if account_category is None:
raise NotFoundError(AccountCategory)
if institution is None:
raise NotFoundError(Institution)
self.account_category = account_category
self.institution = institution
self.name = json['name']
@property
def serialize(self):
# This is going to need to be fixed in the future,
# based on what type of account it is
return {
'id': self.id,
'name': self.name,
'category': self.category.serialize,
'institution': self.institution.serialize,
}
``` |
{
"source": "jhlau/acceptability-prediction-in-context",
"score": 3
} |
#### File: acceptability-prediction-in-context/code/compute_model_score.py
```python
import sys
import argparse
import torch
import math
import pickle
import numpy as np
from tqdm import tqdm
from calc_corr import get_sentence_data
from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import BertTokenizer, BertForMaskedLM, XLNetTokenizer, XLNetLMHeadModel
from scipy.stats.mstats import pearsonr as corr
from scipy.special import softmax
#global
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich <NAME>, narrates the
remainder of the story. 1883 Western Siberia,
a young <NAME> is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> """
###########
#functions#
###########
def model_score(tokenize_input, tokenize_context, model, tokenizer, device, args):
if args.model_name.startswith("gpt"):
if not args.use_context:
#prepend the sentence with <|endoftext|> token, so that the loss is computed correctly
tensor_input = torch.tensor([[50256] + tokenizer.convert_tokens_to_ids(tokenize_input)], device=device)
labels = torch.tensor([[50256] + tokenizer.convert_tokens_to_ids(tokenize_input)], device=device)
labels[:,:1] = -1
loss = model(tensor_input, labels=tensor_input)
else:
tensor_input = torch.tensor([tokenizer.convert_tokens_to_ids(tokenize_context+tokenize_input)], device=device)
labels = torch.tensor([tokenizer.convert_tokens_to_ids(tokenize_context+tokenize_input)], device=device)
#-1 label for context (loss not computed over these tokens)
labels[:,:len(tokenize_context)] = -1
loss = model(tensor_input, labels=labels)
return float(loss[0]) * -1.0 * len(tokenize_input)
elif args.model_name.startswith("bert"):
batched_indexed_tokens = []
batched_segment_ids = []
if not args.use_context:
tokenize_combined = ["[CLS]"] + tokenize_input + ["[SEP]"]
else:
tokenize_combined = ["[CLS]"] + tokenize_context + tokenize_input + ["[SEP]"]
for i in range(len(tokenize_input)):
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = i + 1 + (len(tokenize_context) if args.use_context else 0)
tokenize_masked = tokenize_combined.copy()
tokenize_masked[masked_index] = '[MASK]'
#unidir bert
#for j in range(masked_index, len(tokenize_combined)-1):
# tokenize_masked[j] = '[MASK]'
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenize_masked)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segment_ids = [0]*len(tokenize_masked)
batched_indexed_tokens.append(indexed_tokens)
batched_segment_ids.append(segment_ids)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor(batched_indexed_tokens, device=device)
segment_tensor = torch.tensor(batched_segment_ids, device=device)
# Predict all tokens
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segment_tensor)
predictions = outputs[0]
# go through each word and sum their logprobs
lp = 0.0
for i in range(len(tokenize_input)):
masked_index = i + 1 + (len(tokenize_context) if args.use_context else 0)
predicted_score = predictions[i, masked_index]
predicted_prob = softmax(predicted_score.cpu().numpy())
lp += np.log(predicted_prob[tokenizer.convert_tokens_to_ids([tokenize_combined[masked_index]])[0]])
return lp
elif args.model_name.startswith("xlnet"):
tokenize_ptext = tokenizer.tokenize(PADDING_TEXT.lower())
if not args.use_context:
tokenize_input2 = tokenize_ptext + tokenize_input
else:
tokenize_input2 = tokenize_ptext + tokenize_context + tokenize_input
# go through each word and sum their logprobs
lp = 0.0
for max_word_id in range((len(tokenize_input2)-len(tokenize_input)), (len(tokenize_input2))):
sent = tokenize_input2[:]
input_ids = torch.tensor([tokenizer.convert_tokens_to_ids(sent)], device=device)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
#if not bidir, mask target word + right/future words
if not args.xlnet_bidir:
perm_mask[:, :, max_word_id:] = 1.0
#if bidir, mask only the target word
else:
perm_mask[:, :, max_word_id] = 1.0
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, max_word_id] = 1.0
with torch.no_grad():
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0]
word_id = tokenizer.convert_tokens_to_ids([tokenize_input2[max_word_id]])[0]
predicted_prob = softmax((next_token_logits[0][-1]).cpu().numpy())
lp += np.log(predicted_prob[word_id])
return lp
######
#main#
######
def main(args):
#sentence and human ratings
sentencexdata = get_sentence_data(args.input_csv)
human_ratings = pickle.load(open(args.human_rating_pickle, "rb"))
#unigram frequencies
unigram_freq = pickle.load(open(args.unigram_pickle, "rb"))
unigram_total = sum(unigram_freq.values())
#system scores
lps = []
mean_lps = []
pen_lps = []
div_lps = []
sub_lps = []
slors = []
pen_slors = []
sent_ids = []
#Load pre-trained model and tokenizer
if args.model_name.startswith("gpt"):
model = GPT2LMHeadModel.from_pretrained(args.model_name)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name)
elif args.model_name.startswith("bert"):
model = BertForMaskedLM.from_pretrained(args.model_name)
tokenizer = BertTokenizer.from_pretrained(args.model_name,
do_lower_case=(True if "uncased" in args.model_name else False))
elif args.model_name.startswith("xlnet"):
tokenizer = XLNetTokenizer.from_pretrained(args.model_name)
model = XLNetLMHeadModel.from_pretrained(args.model_name)
else:
print("Supported models: gpt, bert and xlnet only.")
raise SystemExit
#put model to device (GPU/CPU)
device = torch.device(args.device)
model.to(device)
#eval mode; no dropout
model.eval()
#loop through each sentence and compute system scores
y = [] #human mean rating
sent_total = 0
for sent_id, ratings in tqdm(sorted(human_ratings.items())):
y.append(np.mean(ratings))
text = sentencexdata[sent_id]["SENTENCE"]
#uppercase first character
#text = text[0].upper() + text[1:]
tokenize_input = tokenizer.tokenize(text)
text_len = len(tokenize_input)
if args.use_context:
context = sentencexdata[sent_id]["CONTEXT"].replace("\t", " ")
tokenize_context = tokenizer.tokenize(context)
else:
tokenize_context = None
#unigram logprob
uni_lp = 0.0
for w in tokenize_input:
uni_lp += math.log(float(unigram_freq[w])/unigram_total)
#compute sentence logprob
lp = model_score(tokenize_input, tokenize_context, model, tokenizer, device, args)
#acceptability measures
penalty = ((5+text_len)**0.8 / (5+1)**0.8)
lps.append(lp)
mean_lps.append(lp/text_len)
pen_lps.append( lp / penalty )
div_lps.append(-lp / uni_lp)
sub_lps.append(lp - uni_lp)
slors.append((lp - uni_lp) / text_len)
pen_slors.append((lp - uni_lp) / penalty)
sent_ids.append(sent_id)
sent_total += 1
results = [corr(lps, y)[0],
corr(mean_lps, y)[0],
corr(pen_lps, y)[0],
corr(div_lps, y)[0],
corr(sub_lps, y)[0],
corr(slors, y)[0],
corr(pen_slors, y)[0]]
if args.verbose:
#print("Correlations:")
print("LP = %.2f" % results[0])
print("MeanLP = %.2f" % results[1])
print("PenLP = %.2f" % results[2])
print("NormLP = %.2f" % results[3])
#print("Norm LogProb (Sub) =", results[4])
print("SLOR = %.2f" % results[5])
#print("SLOR with Length Penalty =", results[6])
if __name__ == "__main__":
#parser arguments
desc = "Computes correlation using pytorch transformer models"
parser = argparse.ArgumentParser(description=desc)
#arguments
parser.add_argument("-r", "--human-rating-pickle", required=True, help="Pickle file containing human ratings")
parser.add_argument("-i", "--input-csv", required=True, help="Mturk input csv file containing sentence data")
parser.add_argument("-m", "--model-name", required=True,
help="Pretrained model name (gpt2/gpt2-medium/bert-base-[un]cased/bert-large-[un]cased)")
parser.add_argument("-u", "--unigram-pickle", required=True, help="Pickle file containing unigram frequencies (used for SLOR and NormLP)")
parser.add_argument("-c", "--use-context", action="store_true", help="use context at test time for the model")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose print")
parser.add_argument("-d", "--device", default="cpu",
help="specify the device to use (cpu or cuda:X for gpu); default=cpu")
parser.add_argument("--xlnet-bidir", action="store_true", help="bidir for xlnet (sees left and right context)")
args = parser.parse_args()
main(args)
``` |
{
"source": "jh-lau/solid_ai_waddle",
"score": 3
} |
#### File: posture_classification/ops/data_processor.py
```python
import os
from typing import Tuple
import pandas as pd
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
import numpy as np
def data_generator_flow(_train_dir: str,
_valid_dir: str,
_test_dir: str,
batch_size: int = 32,
target_size: Tuple = (256, 256),
multi_output_mode: bool = False) -> Tuple:
"""
数据生成器函数
@param _train_dir: 训练数据文件路径
@param _valid_dir: 验证数据文件路径
@param _test_dir: 测试数据文件路径
@param batch_size: 批量参数
@param target_size: 目标转换形状
@param multi_output_mode: 多输出模式
@return: 生成器元组
"""
train_df = pd.read_csv(os.path.join(_train_dir, 'train.csv'))
valid_df = pd.read_csv(os.path.join(_valid_dir, 'valid.csv'))
test_df = pd.read_csv(os.path.join(_test_dir, 'test.csv'))
if not multi_output_mode:
train_df.label = train_df.label.astype('str')
valid_df.label = valid_df.label.astype('str')
test_df.label = test_df.label.astype('str')
train_data_gen = ImageDataGenerator(
preprocessing_function=preprocess_input,
width_shift_range=.2,
height_shift_range=.2,
shear_range=.2,
zoom_range=.2,
channel_shift_range=np.random.choice(100),
horizontal_flip=True,
)
train_data_flow = train_data_gen.flow_from_dataframe(
dataframe=train_df,
target_size=target_size,
directory=_train_dir,
batch_size=batch_size,
class_mode='multi_output' if multi_output_mode else 'binary',
x_col='filename',
y_col=['label', 'score'] if multi_output_mode else 'label',
)
# 验证集不要做数据增强
valid_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
valid_data_flow = valid_data_gen.flow_from_dataframe(
dataframe=valid_df,
target_size=target_size,
directory=_valid_dir,
batch_size=batch_size,
class_mode='multi_output' if multi_output_mode else 'binary',
x_col='filename',
y_col=['label', 'score'] if multi_output_mode else 'label',
)
test_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_data_flow = test_data_gen.flow_from_dataframe(
dataframe=test_df,
target_size=target_size,
directory=_test_dir,
batch_size=batch_size,
class_mode='multi_output' if multi_output_mode else 'binary',
x_col='filename',
y_col=['label', 'score'] if multi_output_mode else 'label',
)
return train_data_flow, valid_data_flow, test_data_flow
```
#### File: knowledge_graph_embedding/project_distmult_rotate_transe/service.py
```python
import codecs
import json
import os
import time
from typing import Dict
import torch
from dotmap import DotMap
from .core.predict import get_entity_relation_with_id
from .layer.model import KGEModel
kge_model, entity2id, id2entity, relation2id, all_true_triples, args = None, None, None, None, None, None
def load_model(model_path: str) -> None:
"""
模型加载
@param model_path: 模型文件夹路径
@return:
"""
global kge_model, entity2id, id2entity, relation2id, all_true_triples, args
args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8')))
entity2id, id2entity, relation2id, id2relation, all_true_triples = get_entity_relation_with_id(args.data_path)
kge_model = KGEModel(
model_name=args.model,
nentity=args.nentity,
nrelation=args.nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
if args.cuda:
kge_model = kge_model.cuda()
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
kge_model.load_state_dict(checkpoint['model_state_dict'])
def inference(target_triple: str) -> Dict:
"""
推理函数
@param target_triple: 目标需预测三元组:'头实体 关系 尾实体'
@return: 头尾实体的10个预测结果
"""
if kge_model is None:
return {'预测结果': '提醒:模型未加载'}
try:
target_triple = target_triple.split()
head = entity2id[target_triple[0]]
tail = entity2id[target_triple[2]]
relation = relation2id[target_triple[1]]
target_triple = [(head, relation, tail)]
except KeyError as e:
return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'}
prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True)
head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']]
tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']]
result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction}
return result
if __name__ == '__main__':
t1 = time.time()
load_model('data_path/model/DistMult_cn_military_300k_10')
test_cases = [
'摩耶号/Maya巡洋舰 建造时间 1928年',
'1949年2月28日 星座 双鱼座'
]
t2 = time.time()
res = inference(test_cases[0])
print(f'模型加载耗时: {t2 - t1: .3}s')
print(f'推理耗时: {time.time() - t2: .3}s')
print(res)
```
#### File: triples_extraction/project_casrel/utils.py
```python
import keras.backend as K
from keras_bert import Tokenizer
import numpy as np
import codecs
from tqdm import tqdm
import json
import unicodedata
BERT_MAX_LEN = 512
class HBTokenizer(Tokenizer):
def _tokenize(self, text):
if not self._cased:
text = unicodedata.normalize('NFD', text)
text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
spaced = ''
for ch in text:
if ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
tokens = []
for word in spaced.strip().split():
tokens += self._word_piece_tokenize(word)
tokens.append('[unused1]')
return tokens
def get_tokenizer(vocab_path):
token_dict = {}
with codecs.open(vocab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
return HBTokenizer(token_dict, cased=True)
def seq_gather(x):
seq, idxs = x
idxs = K.cast(idxs, 'int32')
batch_idxs = K.arange(0, K.shape(seq)[0])
batch_idxs = K.expand_dims(batch_idxs, 1)
idxs = K.concatenate([batch_idxs, idxs], 1)
return K.tf.gather_nd(seq, idxs)
def extract_items(subject_model, object_model, tokenizer, text_in, id2rel, h_bar=0.5, t_bar=0.5):
tokens = tokenizer.tokenize(text_in)
token_ids, segment_ids = tokenizer.encode(first=text_in)
token_ids, segment_ids = np.array([token_ids]), np.array([segment_ids])
if len(token_ids[0]) > BERT_MAX_LEN:
token_ids = token_ids[:,:BERT_MAX_LEN]
segment_ids = segment_ids[:,:BERT_MAX_LEN]
sub_heads_logits, sub_tails_logits = subject_model.predict([token_ids, segment_ids])
sub_heads, sub_tails = np.where(sub_heads_logits[0] > h_bar)[0], np.where(sub_tails_logits[0] > t_bar)[0]
subjects = []
for sub_head in sub_heads:
sub_tail = sub_tails[sub_tails >= sub_head]
if len(sub_tail) > 0:
sub_tail = sub_tail[0]
subject = tokens[sub_head: sub_tail]
subjects.append((subject, sub_head, sub_tail))
if subjects:
triple_list = []
token_ids = np.repeat(token_ids, len(subjects), 0)
segment_ids = np.repeat(segment_ids, len(subjects), 0)
sub_heads, sub_tails = np.array([sub[1:] for sub in subjects]).T.reshape((2, -1, 1))
obj_heads_logits, obj_tails_logits = object_model.predict([token_ids, segment_ids, sub_heads, sub_tails])
for i, subject in enumerate(subjects):
sub = subject[0]
sub = ''.join([i.lstrip("##") for i in sub])
sub = ' '.join(sub.split('[unused1]'))
obj_heads, obj_tails = np.where(obj_heads_logits[i] > h_bar), np.where(obj_tails_logits[i] > t_bar)
for obj_head, rel_head in zip(*obj_heads):
for obj_tail, rel_tail in zip(*obj_tails):
if obj_head <= obj_tail and rel_head == rel_tail:
rel = id2rel[rel_head]
obj = tokens[obj_head: obj_tail]
obj = ''.join([i.lstrip("##") for i in obj])
obj = ' '.join(obj.split('[unused1]'))
triple_list.append((sub, rel, obj))
break
triple_set = set()
for s, r, o in triple_list:
triple_set.add((s, r, o))
return list(triple_set)
else:
return []
def partial_match(pred_set, gold_set):
pred = {(i[0].split(' ')[0] if len(i[0].split(' ')) > 0 else i[0], i[1], i[2].split(' ')[0] if len(i[2].split(' ')) > 0 else i[2]) for i in pred_set}
gold = {(i[0].split(' ')[0] if len(i[0].split(' ')) > 0 else i[0], i[1], i[2].split(' ')[0] if len(i[2].split(' ')) > 0 else i[2]) for i in gold_set}
return pred, gold
def metric(subject_model, object_model, eval_data, id2rel, tokenizer, exact_match=False, output_path=None):
if output_path:
F = open(output_path, 'w')
orders = ['subject', 'relation', 'object']
correct_num, predict_num, gold_num = 1e-10, 1e-10, 1e-10
for line in tqdm(iter(eval_data)):
Pred_triples = set(extract_items(subject_model, object_model, tokenizer, line['text'], id2rel))
Gold_triples = set(line['triple_list'])
Pred_triples_eval, Gold_triples_eval = partial_match(Pred_triples, Gold_triples) if not exact_match else (Pred_triples, Gold_triples)
correct_num += len(Pred_triples_eval & Gold_triples_eval)
predict_num += len(Pred_triples_eval)
gold_num += len(Gold_triples_eval)
if output_path:
result = json.dumps({
'text': line['text'],
'triple_list_gold': [
dict(zip(orders, triple)) for triple in Gold_triples
],
'triple_list_pred': [
dict(zip(orders, triple)) for triple in Pred_triples
],
'new': [
dict(zip(orders, triple)) for triple in Pred_triples - Gold_triples
],
'lack': [
dict(zip(orders, triple)) for triple in Gold_triples - Pred_triples
]
}, ensure_ascii=False, indent=4)
F.write(result + '\n')
if output_path:
F.close()
precision = correct_num / predict_num
recall = correct_num / gold_num
f1_score = 2 * precision * recall / (precision + recall)
print(f'correct_num:{correct_num}\npredict_num:{predict_num}\ngold_num:{gold_num}')
return precision, recall, f1_score
``` |
{
"source": "jhlau/twitter-deepgeo",
"score": 2
} |
#### File: jhlau/twitter-deepgeo/util.py
```python
import calendar
import codecs
import ujson
import sys
import operator
import numpy as np
from collections import defaultdict
#globals
day_map = { v: i for i,v in enumerate(list(calendar.day_abbr)) } #{"Mon": 0, ...}
utc_max = 14.0
utc_min = -12.0
unk_token = "<unk>"
pad_token = "<pad>"
id_city_key = { "hashed_tweet_id": ("hashed_tweet_id", "city"), "tweet_id": ("tweet_id", "tweet_city"), \
"id_str": ("id_str", "tweet_city") }
def datetime_to_float(datetime):
times = datetime.split()[3].split(":")
t = ((float(times[0])*60 + float(times[1]))%1440) / float(1440)
return t
def get_id_city_key(d):
id_key, city_key = None, None
for k in id_city_key.keys():
if k in d:
id_key = id_city_key[k][0]
city_key = id_city_key[k][1]
if id_key == None or city_key == None:
print "Unable to find tweet ID and city key; json =", d
raise SystemExit
return id_key, city_key
def load_label(filename, cf):
label = {}
id_key, city_key = None, None
for line_id, line in enumerate(codecs.open(filename, "r", "utf-8")):
d = ujson.loads(line)
if id_key == None:
id_key, city_key = get_id_city_key(d)
label[d[id_key]] = d[city_key]
if line_id % 10000 == 0 and cf.verbose:
sys.stdout.write(str(line_id) + " lines processed\r")
return label
def load_data(filename, label, check_min_len, cf):
data = []
id_key = None
for line_id, line in enumerate(codecs.open(filename, "r", "utf-8")):
d = ujson.loads(line)
if id_key == None:
id_key, _ = get_id_city_key(d)
if (d[id_key] in label) and ((not check_min_len) or (len(d["text"]) >= cf.seq_minlen)):
tweet_dic = build_tweet_dic(d, id_key)
data.append(tweet_dic)
if line_id % 10000 == 0 and cf.verbose:
sys.stdout.write(str(line_id) + " lines processed\r")
return data
def build_tweet_dic(data, id_key):
d = {}
d["text"] = data["text"]
d["id_str"] = data[id_key]
#time is converted to number of minutes since 00:00 and then normalised to 0-1
d["time"] = datetime_to_float(data["created_at"])
#mon = 0, tue = 1, etc
d["day"] = day_map[data["created_at"].split()[0]]
#utc offset (user-level metadata)
offset = 100.0 #default to this value if no offset data (it should zero out the output value)
try:
offset = float(data["user"]["utc_offset"]) / 3600
offset = (offset - utc_min) / (utc_max - utc_min)
except:
pass
d["offset"] = offset
#time zone (user-level metadata)
d["timezone"] = data["user"]["time_zone"]
if d["timezone"] == None:
d["timezone"] = unk_token
#location (user-level metadata)
d["location"] = data["user"]["location"]
#description (user-level metadata)
d["description"] = data["user"]["description"]
#name (user-level metadata)
d["name"] = data["user"]["name"]
#user created time (user-level metadata)
d["usertime"] = datetime_to_float(data["user"]["created_at"])
return d
def get_vocab(data, attr_name, granularity, minfreq):
wordfreq = defaultdict(int)
dic = {unk_token: 0, pad_token:1}
dic_rev = [unk_token, pad_token]
for d in data:
if granularity == "char":
for c in d[attr_name]:
wordfreq[c] += 1
elif granularity == "word":
wordfreq[d[attr_name]] += 1
for w, f in sorted(wordfreq.items(), key=operator.itemgetter(1), reverse=True):
if f >= minfreq:
if (w != unk_token) and (w != pad_token):
dic[w] = len(dic_rev)
dic_rev.append(w)
else:
break
return dic, dic_rev, wordfreq
def get_classes(data, label):
classes = {unk_token:0} #for missing classes in test
for d in data:
c = label[d["id_str"]]
if c not in classes:
classes[c] = len(classes)
return classes
def convert_text_to_id(text, vocab_dic):
x = []
if text != None:
for c in text:
if c in vocab_dic:
x.append(vocab_dic[c])
else:
x.append(vocab_dic[unk_token])
return x
def clean_data(data, label, vocabxid, tzxid, locxid, descxid, namexid, classes, cf):
len_x, miss_y, len_loc, len_desc, len_name = [], 0, [], [], []
text_maxlen = max(cf.bucket_sizes)
for di, d in enumerate(data):
#label
c = label[d["id_str"]]
if c in classes:
d["y"] = classes[c]
else:
d["y"] = classes[unk_token]
miss_y += 1
#tweet text
x = convert_text_to_id(d["text"], vocabxid)
len_x.append(len(x))
#user time zone
if d["timezone"] in tzxid:
d["timezone"] = tzxid[d["timezone"]]
else:
d["timezone"] = tzxid[unk_token]
d["x"] = x[:text_maxlen]
#location
loc_text = convert_text_to_id(d["location"], locxid)
d["location"] = loc_text[:cf.loc_maxlen]
len_loc.append(len(loc_text))
#description
desc_text = convert_text_to_id(d["description"], descxid)
d["description"] = desc_text[:cf.desc_maxlen]
len_desc.append(len(desc_text))
#name
name_text = convert_text_to_id(d["name"], namexid)
d["name"] = name_text[:cf.name_maxlen]
len_name.append(len(name_text))
if di % 10000 == 0 and cf.verbose:
sys.stdout.write(str(di) + " instances processed\r")
return len_x, miss_y, len_loc, len_desc, len_name
def get_batch(data, i, blen, is_training, cf):
x, y, time, day, offset, timezone, loc, desc, name, usertime = [], [], [], [], [], [], [], [], [], []
noise_size = cf.text_filter_number + cf.time_size + cf.day_size + cf.offset_size + \
cf.timezone_size + cf.loc_filter_number + cf.desc_filter_number + \
cf.name_filter_number + cf.usertime_size
#get batch max len and bucket id if blen is a dictionary
if type(blen) is int:
bucket_len = blen
bucket_id = -1
#else it is an int, use that parameter as length
else:
l = max([len(d["x"]) for d in data[(i*cf.batch_size):((i+1)*cf.batch_size)]])
bucket_id, bucket_len = blen[l]
for d in data[(i*cf.batch_size):((i+1)*cf.batch_size)]:
x.append(pad(d["x"][:bucket_len], bucket_len))
y.append(d["y"])
time.append(d["time"])
day.append(d["day"])
offset.append(d["offset"])
timezone.append(d["timezone"])
loc.append(pad(d["location"], cf.loc_maxlen))
desc.append(pad(d["description"], cf.desc_maxlen))
name.append(pad(d["name"], cf.name_maxlen))
usertime.append(d["usertime"])
#number of examples
num_examples = len(x)
#pad the batch if num_examples < batch_size
for _ in range(cf.batch_size-len(x)):
x.append(pad([], bucket_len))
y.append(0)
time.append(0.0)
day.append(0)
offset.append(0.0)
timezone.append(0)
loc.append(pad([], cf.loc_maxlen))
desc.append(pad([], cf.desc_maxlen))
name.append(pad([], cf.name_maxlen))
usertime.append(0.0)
#generate gausian noise
if is_training and cf.corruption_level > 0:
noise = np.random.normal(0.0, cf.corruption_level, (cf.batch_size, noise_size))
else:
noise = np.zeros((cf.batch_size, noise_size))
return x, y, time, day, offset, timezone, loc, desc, name, usertime, noise, num_examples, bucket_id
def pad(lst, max_len):
#<pad> ID is always 1
return lst + [1] * (max_len - len(lst))
``` |
{
"source": "jhlch/sk-score-ex",
"score": 3
} |
#### File: jhlch/sk-score-ex/classifier_scorer.py
```python
from argparse import ArgumentParser
from pyspark import SparkContext
from sklearn.externals import joblib
import sys
import os
# Parse commandline arguments
arg_parser = ArgumentParser()
arg_parser.add_argument("--model_path",
action="store",
help="Path to pickled model.")
arg_parser.add_argument("--data_path",
action="store",
help="HDFS path to data to apply model to.")
arg_parser.add_argument("--output_path",
action="store",
help="HDFS path to write model score file to.")
args = arg_parser.parse_args()
# Get a spark context
sc = SparkContext()
# Only break out of loop if we can load the model.
while True:
# Check if path to pickled model exists
if not os.path.exists(args.model_path):
print "ERROR: Model path does not exist."
try:
# Try to load the saved model
best_classifier = joblib.load(args.model_path)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
except:
e = sys.exc_info()[0]
print "Unexpected error. Could not load saved model. Error: %s" % e
else:
break
# Applies classifier to data
def apply_classifier(input_string, clf):
id, input_string = input_string.split(",")
score = clf.decision_function(input_row.features())
label = clf.predict(input_row.features())
# If the decision function is positive, the example is labeled 1.
# label = 1 if score > 0 else 0
return ",".join([id, score, label])
# If best_classifier is defined, try applying it to all of the data.
if best_classifier:
# Load in input data as an rdd
# assume the input is in the form id,input_string
input_data = sc.textFile(args.data_path)
id_prediction = input_data.rdd.map(lambda row: apply_classifier(row, best_classifier))
id_prediction.saveAsTextFile(args.output_path)
sc.stop()
``` |
{
"source": "jhlee2000/AppDevelopmentProject",
"score": 4
} |
#### File: jhlee2000/AppDevelopmentProject/StorageManager.py
```python
import shelve
"""
EXPLANATION
* Everything will be stored in "storage.db" shelve
* anything can be stored in database and shelve works like dictionary so we need
* 1. items to be stored at
* 2. the key used to store those items
"""
class StorageManager():
def __init__(self):
# error checking only
try:
self.__db = shelve.open('storage.db', 'r')
self.__db.close()
except Exception:
print("Storage not found")
# This works like a session storage, things can be stored at 'TEMP' but will be deleted when restart
# self.delete_storage('TEMP')
# This function can only be use inside this class
def is_key_found(self, name):
keys = self.__db.keys()
if name in keys:
return True
else:
return False
# Resets storage, delete everything inside db
# !!!! do not anyhow use
def reset(self):
self.__db = shelve.open('storage.db', 'c')
keys = list(self.__db.keys())
for p in keys:
del self.__db[p]
self.__db.close()
# create not storage
def create_new_storage(self, name, items=None, dict=True):
self.__db = shelve.open('storage.db', 'c')
# items must be a dictionary or list
if(self.is_key_found(name) == False):
# If no item is specified
if items == None:
# Default will create a dictionary inside the db with the name parameter as key
# Unless dict parameter is false which will create list instead
# eg. db[name] = empty dictionary / list
if dict == True:
self.__db[name] = {}
print("Created dictionary")
elif dict == False:
self.__db[name] = []
print("Created list")
# If items are specified
else:
self.__db[name] = items
print("Created storage")
# if storage with the name parameter is found, will prompt the storage name is in used
else:
print("existing name of storage found")
self.__db.close()
# delete whole storage with the key as the name
def delete_storage(self, name):
self.__db = shelve.open('storage.db', 'c')
if(self.is_key_found(name) == True):
del self.__db[name]
print("Deleted storage")
else:
print("no keys found with the given name")
self.__db.close()
# Set the whole storage as item with the key as the name
def set_storage(self, name, item):
self.__db = shelve.open('storage.db', 'c')
if(self.is_key_found(name) == True):
self.__db[name] = item
print("modified storage")
else:
print("Unable to set item due to storage name not found")
self.__db.close()
# add a single item after going INTO the storage using the name
def add_item(self, storage_name, key_to_use, item):
self.__db = shelve.open('storage.db', 'c')
if(self.is_key_found(storage_name) == True):
print("storage name found")
print(self.__db[storage_name])
# if item exist inside the storage
if key_to_use in self.__db[storage_name].keys():
print("Key is in used")
print("ALL USERS: ")
print(self.__db[storage_name].keys())
# add, if item does not have duplicate
else:
temp = self.__db[storage_name]
print("Key is not in used")
temp[key_to_use] = item
self.__db[storage_name] = temp
print("ALL USERS: ")
print(self.__db[storage_name].keys())
# if item storage does not exit
else:
print("Unable to set item due to storage name not found")
self.__db.close()
# get the whole storage back using the name
def get_storage(self, name, create=False, dict=False):
self.__db = shelve.open('storage.db', 'c')
if (self.is_key_found(name) == True):
temp = self.__db[name]
self.__db.close()
print("Storage found")
return temp
else:
print("storage name not found")
if create == True:
print("proceeds to create a new one")
if dict == True:
self.__db[name] = {}
print("Created dictionary")
else:
self.__db[name] = []
print("Created List")
self.__db.close()
# check if storage exist returns true or false
def check_exist(self, name):
self.__db = shelve.open('storage.db', 'c')
if (self.is_key_found(name) == True):
self.__db.close()
return True
else:
self.__db.close()
return False
# TEST USE ONLY
def return_keys(self, name = None):
self.__db = shelve.open('storage.db', 'c')
if(name == None):
temp = list(self.__db.keys())
self.__db.close()
return temp
elif(name in list(self.__db.keys())):
temp = list(self.__db[name].keys())
self.__db.close()
return temp
else:
return None
# TESTS WROTE BY JH
def return_object(self, name):
self.__db = shelve.open('storage.db', 'c')
temp = self.__db[name]
self.__db.close()
return temp
def update_cart(self, storage_name, key_to_use, item):
self.__db = shelve.open('storage.db', 'c')
if self.is_key_found(storage_name) == True:
temp = self.__db[storage_name]
print("Key is not in used")
temp[key_to_use] = item
self.__db[storage_name] = temp
else:
print("Unable to set item due to storage name not found")
self.__db.close()
``` |
{
"source": "jhlee7/flask-clova",
"score": 2
} |
#### File: flask-clova/flask_clova/core.py
```python
import os
import yaml
import inspect
from functools import partial
from werkzeug.local import LocalProxy
from jinja2 import BaseLoader, ChoiceLoader, TemplateNotFound
from flask import make_response, current_app, json, request as flask_request, _app_ctx_stack
from . import verifier, logger
import collections
def find_clova():
"""
Find our instance of Clova, navigating Local's and possible blueprints.
Note: This only supports returning a reference to the first instance
of Clova found.
"""
if hasattr(current_app, 'clova'):
return getattr(current_app, 'clova')
else:
if hasattr(current_app, 'blueprints'):
blueprints = getattr(current_app, 'blueprints')
for blueprint_name in blueprints:
if hasattr(blueprints[blueprint_name], 'clova'):
return getattr(blueprints[blueprint_name], 'clova')
def dbgdump(obj, default=None, cls=None):
if current_app.config.get('CLOVA_PRETTY_DEBUG_LOGS', False):
indent = 2
else:
indent = None
msg = json.dumps(obj, indent=indent, default=default, cls=cls)
logger.debug(msg)
#Define global variables
request = LocalProxy(lambda: find_clova().request)
session = LocalProxy(lambda: find_clova().session)
version = LocalProxy(lambda: find_clova().version)
context = LocalProxy(lambda: find_clova().context)
convert_errors = LocalProxy(lambda: find_clova().convert_errors)
from . import models
class Clova(object):
"""The Clova object provides the central interface for interacting with the Clova Extension Service.
Clova object maps CEK Requests to flask view functions and handles CEK sessions.
The constructor is passed a Flask App instance, and URL endpoint.
The Flask instance allows the convienient API of endpoints and their view functions,
so that CEK requests may be mapped with syntax similar to a typical Flask server.
Route provides the entry point for the skill, and must be provided if an app is given.
Keyword Arguments:
app {Flask object} -- App instance - created with Flask(__name__) (default: {None})
route {str} -- entry point to which initial CEK Requests are forwarded (default: {None})
blueprint {Flask blueprint} -- Flask Blueprint instance to use instead of Flask App (default: {None})
stream_cache {Werkzeug BasicCache} -- BasicCache-like object for storing Audio stream data (default: {SimpleCache})
path {str} -- path to templates yaml file for VUI dialog (default: {'templates.yaml'})
"""
def __init__(self, app=None, route=None, blueprint=None, path='templates.yaml'):
self.app = app
self._route = route
self._intent_view_funcs = {}
self._intent_converts = {}
self._intent_defaults = {}
self._intent_mappings = {}
self._launch_view_func = None
self._session_ended_view_func = None
self._on_session_started_callback = None
self._default_intent_view_func = None
if app is not None:
self.init_app(app, path)
elif blueprint is not None:
self.init_blueprint(blueprint, path)
def init_app(self, app, path='templates.yaml'):
"""Initializes Clova app by setting configuration variables, loading templates, and maps Clova route to a flask view.
The Clova instance is given the following configuration variables by calling on Flask's configuration:
`CLOVA_APPLICATION_ID`:
Turn on application ID verification by setting this variable to an application ID or a
list of allowed application IDs. By default, application ID verification is disabled and a
warning is logged. This variable should be set in production to ensure
requests are being sent by the applications you specify.
Default: None
`CLOVA_VERIFY_REQUESTS`:
Enables or disables CEK request verification, which ensures requests sent to your skill
are from Naver's CEK service. This setting should not be disabled in production.
It is useful for mocking JSON requests in automated tests.
Default: True
`CLOVA_PRETTY_DEBUG_LOGS`:
Add tabs and linebreaks to the CEK request and response printed to the debug log.
This improves readability when printing to the console, but breaks formatting when logging to CloudWatch.
Default: False
"""
if self._route is None:
raise TypeError("route is a required argument when app is not None")
app.clova = self
app.add_url_rule(self._route, view_func=self._flask_view_func, methods=['POST'])
app.jinja_loader = ChoiceLoader([app.jinja_loader, YamlLoader(app, path)])
def init_blueprint(self, blueprint, path='templates.yaml'):
"""Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None})
path {str} -- path to templates yaml file, relative to Blueprint (Default: {'templates.yaml'})
"""
if self._route is not None:
raise TypeError("route cannot be set when using blueprints!")
# we need to tuck our reference to this Clova instance into the blueprint object and find it later!
blueprint.clova = self
# BlueprintSetupState.add_url_rule gets called underneath the covers and
# concats the rule string, so we should set to an empty string to allow
# Blueprint('blueprint_api', __name__, url_prefix="/clova") to result in
# exposing the rule at "/clova" and not "/clova/".
blueprint.add_url_rule("", view_func=self._flask_view_func, methods=['POST'])
blueprint.jinja_loader = ChoiceLoader([YamlLoader(blueprint, path)])
@property
def clova_application_id(self):
return current_app.config.get('CLOVA_APPLICATION_ID', None)
@property
def clova_verify_requests(self):
return current_app.config.get('CLOVA_VERIFY_REQUESTS', True)
def on_session_started(self, f):
"""Decorator to call wrapped function upon starting a session.
@clova.on_session_started
def new_session():
log.info('new session started')
Because both launch and intent requests may begin a session, this decorator is used call
a function regardless of how the session began.
Arguments:
f {function} -- function to be called when session is started.
"""
self._on_session_started_callback = f
return f
def launch(self, f):
"""Decorator maps a view function as the endpoint for an CEK LaunchRequest and starts the skill.
@clova.launch
def launched():
return question('Welcome to Foo')
The wrapped function is registered as the launch view function and renders the response
for requests to the Launch URL.
A request to the launch URL is verified with the CEK server before the payload is
passed to the view function.
Arguments:
f {function} -- Launch view function
"""
self._launch_view_func = f
return f
def session_ended(self, f):
"""Decorator routes CEK SessionEndedRequest to the wrapped view function to end the skill.
@clova.session_ended
def session_ended():
return "{}", 200
The wrapped function is registered as the session_ended view function
and renders the response for requests to the end of the session.
Arguments:
f {function} -- session_ended view function
"""
self._session_ended_view_func = f
return f
def intent(self, intent_name, mapping=None, convert=None, default=None):
"""Decorator routes an CEK IntentRequest and provides the slot parameters to the wrapped function.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@clova.intent('WeatherIntent', mapping={'city': 'City'})
def weather(city):
return statement('I predict great weather for {}'.format(city))
Arguments:
intent_name {str} -- Name of the intent request to be mapped to the decorated function
Keyword Arguments:
mapping {dict} -- Maps parameters to intent slots of a different name
default: {}
convert {dict} -- Converts slot values to data types before assignment to parameters
default: {}
default {dict} -- Provides default values for Intent slots if CEK reuqest
returns no corresponding slot, or a slot with an empty value
default: {}
"""
if mapping is None:
mapping = dict()
if convert is None:
convert = dict()
if default is None:
default = dict()
def decorator(f):
self._intent_view_funcs[intent_name] = f
self._intent_mappings[intent_name] = mapping
self._intent_converts[intent_name] = convert
self._intent_defaults[intent_name] = default
return f
return decorator
def default_intent(self, f):
"""Decorator routes any CEK IntentRequest that is not matched by any existing @clova.intent routing."""
self._default_intent_view_func = f
return f
@property
def request(self):
return getattr(_app_ctx_stack.top, '_clova_request', None)
@request.setter
def request(self, value):
_app_ctx_stack.top._clova_request = value
@property
def session(self):
return getattr(_app_ctx_stack.top, '_clova_session', models._Field())
@session.setter
def session(self, value):
_app_ctx_stack.top._clova_session = value
@property
def version(self):
return getattr(_app_ctx_stack.top, '_clova_version', None)
@version.setter
def version(self, value):
_app_ctx_stack.top._clova_version = value
@property
def context(self):
return getattr(_app_ctx_stack.top, '_clova_context', None)
@context.setter
def context(self, value):
_app_ctx_stack.top._clova_context = value
@property
def convert_errors(self):
return getattr(_app_ctx_stack.top, '_clova_convert_errors', None)
@convert_errors.setter
def convert_errors(self, value):
_app_ctx_stack.top._clova_convert_errors = value
def _get_user(self):
if self.context:
return self.context.get('System', {}).get('user', {}).get('userId')
return None
def _cek_request(self, verify=True):
raw_body = flask_request.data
cek_request_payload = json.loads(raw_body)
if verify:
# verify application id
if self.clova_application_id is not None:
application_id = cek_request_payload['context']['System']['application']['applicationId']
verifier.verify_application_id(application_id, self.clova_application_id)
try:
cek_request_payload['session']
except KeyError:
logger.debug("Session field is missing.\n"
"This message should not be appeared in produciton.")
cek_request_payload['session'] = {}
return cek_request_payload
def _flask_view_func(self, *args, **kwargs):
clova_payload = self._cek_request(verify=self.clova_verify_requests)
dbgdump(clova_payload)
request_body = models._Field(clova_payload)
self.request = request_body.request
self.version = request_body.version
self.context = getattr(request_body, 'context', models._Field())
self.session = getattr(request_body, 'session', models._Field())
if not self.session.sessionAttributes:
self.session.sessionAttributes = models._Field()
try:
if self.session.new and self._on_session_started_callback is not None:
self._on_session_started_callback()
except AttributeError:
pass
result = None
request_type = self.request.type
if request_type == 'LaunchRequest' and self._launch_view_func:
result = self._launch_view_func()
elif request_type == 'SessionEndedRequest':
if self._session_ended_view_func:
result = self._session_ended_view_func()
else:
logger.info("SessionEndedRequest Handler is not defined.")
result = "{}", 200
elif request_type == 'IntentRequest' and (self._intent_view_funcs or self._default_intent_view_func):
result = self._map_intent_to_view_func(self.request.intent)()
if result is not None:
if isinstance(result, models._Response):
result = result.render_response()
response = make_response(result)
response.mimetype = 'application/json;charset=utf-8'
return response
logger.warning(request_type + " handler is not defined.")
return "", 400
def _map_intent_to_view_func(self, intent):
"""Provides appropiate parameters to the intent functions."""
if intent.intentName in self._intent_view_funcs:
view_func = self._intent_view_funcs[intent.intentName]
elif self._default_intent_view_func is not None:
view_func = self._default_intent_view_func
else:
raise NotImplementedError('Intent "{}" not found and no default intent specified.'.format(intent.intentName))
argspec = inspect.getfullargspec(view_func)
arg_names = argspec.args
arg_values = self._map_params_to_view_args(intent.intentName, arg_names)
return partial(view_func, *arg_values)
def _map_params_to_view_args(self, view_name, arg_names):
"""
find and invoke appropriate function
"""
arg_values = []
convert = self._intent_converts.get(view_name)
default = self._intent_defaults.get(view_name)
mapping = self._intent_mappings.get(view_name)
convert_errors = {}
request_data = {}
intent = getattr(self.request, 'intent', None)
if intent is not None:
if intent.slots is not None:
for slot_key in intent.slots.keys():
slot_object = getattr(intent.slots, slot_key)
request_data[slot_object.name] = getattr(slot_object, 'value', None)
else:
for param_name in self.request:
request_data[param_name] = getattr(self.request, param_name, None)
for arg_name in arg_names:
param_or_slot = mapping.get(arg_name, arg_name)
arg_value = request_data.get(param_or_slot)
if arg_value is None or arg_value == "":
if arg_name in default:
default_value = default[arg_name]
if isinstance(default_value, collections.Callable):
default_value = default_value()
arg_value = default_value
elif arg_name in convert:
convert_func = convert[arg_name]
try:
arg_value = convert_func(arg_value)
except Exception as e:
convert_errors[arg_name] = e
arg_values.append(arg_value)
self.convert_errors = convert_errors
return arg_values
class YamlLoader(BaseLoader):
def __init__(self, app, path):
self.path = app.root_path + os.path.sep + path
self.mapping = {}
self._reload_mapping()
def _reload_mapping(self):
if os.path.isfile(self.path):
self.last_mtime = os.path.getmtime(self.path)
with open(self.path) as f:
self.mapping = yaml.safe_load(f.read())
def get_source(self, environment, template):
if not os.path.isfile(self.path):
return None, None, None
if self.last_mtime != os.path.getmtime(self.path):
self._reload_mapping()
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
``` |
{
"source": "jhlee900958/PhySG",
"score": 2
} |
#### File: code/envmaps/fit_envmap_with_sg.py
```python
import imageio
imageio.plugins.freeimage.download()
import torch
import torch.nn as nn
import numpy as np
import imageio
import cv2
import os
TINY_NUMBER = 1e-8
def parse_raw_sg(sg):
SGLobes = sg[..., :3] / (torch.norm(sg[..., :3], dim=-1, keepdim=True) + TINY_NUMBER) # [..., M, 3]
SGLambdas = torch.abs(sg[..., 3:4])
SGMus = torch.abs(sg[..., -3:])
return SGLobes, SGLambdas, SGMus
#######################################################################################################
# compute envmap from SG
#######################################################################################################
def SG2Envmap(lgtSGs, H, W, upper_hemi=False):
# exactly same convetion as Mitsuba, check envmap_convention.png
if upper_hemi:
phi, theta = torch.meshgrid([torch.linspace(0., np.pi/2., H), torch.linspace(-0.5*np.pi, 1.5*np.pi, W)])
else:
phi, theta = torch.meshgrid([torch.linspace(0., np.pi, H), torch.linspace(-0.5*np.pi, 1.5*np.pi, W)])
viewdirs = torch.stack([torch.cos(theta) * torch.sin(phi), torch.cos(phi), torch.sin(theta) * torch.sin(phi)],
dim=-1) # [H, W, 3]
# print(viewdirs[0, 0, :], viewdirs[0, W//2, :], viewdirs[0, -1, :])
# print(viewdirs[H//2, 0, :], viewdirs[H//2, W//2, :], viewdirs[H//2, -1, :])
# print(viewdirs[-1, 0, :], viewdirs[-1, W//2, :], viewdirs[-1, -1, :])
# lgtSGs = lgtSGs.clone().detach()
viewdirs = viewdirs.to(lgtSGs.device)
viewdirs = viewdirs.unsqueeze(-2) # [..., 1, 3]
# [M, 7] ---> [..., M, 7]
dots_sh = list(viewdirs.shape[:-2])
M = lgtSGs.shape[0]
lgtSGs = lgtSGs.view([1,]*len(dots_sh)+[M, 7]).expand(dots_sh+[M, 7])
# sanity
# [..., M, 3]
lgtSGLobes = lgtSGs[..., :3] / (torch.norm(lgtSGs[..., :3], dim=-1, keepdim=True) + TINY_NUMBER)
lgtSGLambdas = torch.abs(lgtSGs[..., 3:4])
lgtSGMus = torch.abs(lgtSGs[..., -3:]) # positive values
# [..., M, 3]
rgb = lgtSGMus * torch.exp(lgtSGLambdas * (torch.sum(viewdirs * lgtSGLobes, dim=-1, keepdim=True) - 1.))
rgb = torch.sum(rgb, dim=-2) # [..., 3]
envmap = rgb.reshape((H, W, 3))
return envmap
# def SG2Envmap(lgtSGs, H, W):
# numLgtSGs = lgtSGs.shape[0]
# phi, theta = torch.meshgrid([torch.linspace(0., np.pi, H), torch.linspace(0.0, 2 * np.pi, W)])
# viewdirs = torch.stack((torch.cos(theta) * torch.sin(phi), torch.cos(phi), torch.sin(theta) * torch.sin(phi)),
# dim=2).cuda()
# viewdirs = viewdirs.unsqueeze(-2) # [..., 1, 3]
# # [n_envsg, 7]
# sum_sg2 = torch.cat(parse_raw_sg(lgtSGs), dim=-1)
# # [..., n_envsg, 7]
# sh = list(viewdirs.shape[:-2])
# sum_sg2 = sum_sg2.view([1, ] * len(sh) + [numLgtSGs, 7]).expand(sh + [-1, -1])
# # [..., n_envsg, 3]
# rgb = sum_sg2[..., -3:] * torch.exp(sum_sg2[..., 3:4] *
# (torch.sum(viewdirs * sum_sg2[..., :3], dim=-1, keepdim=True) - 1.))
# rgb = torch.sum(rgb, dim=-2) # [..., 3]
# env_map = rgb.reshape((H, W, 3))
# return env_map
# load ground-truth envmap
filename = '/home/kz298/envmap_museum_clamp997.exr'
filename = os.path.abspath(filename)
gt_envmap = imageio.imread(filename)[:,:,:3]
gt_envmap = cv2.resize(gt_envmap, (512, 256), interpolation=cv2.INTER_AREA)
gt_envmap = torch.from_numpy(gt_envmap).cuda()
H, W = gt_envmap.shape[:2]
print(H, W)
out_dir = filename[:-4]
print(out_dir)
os.makedirs(out_dir, exist_ok=True)
assert (os.path.isdir(out_dir))
numLgtSGs = 128
lgtSGs = nn.Parameter(torch.randn(numLgtSGs, 7).cuda()) # lobe + lambda + mu
lgtSGs.data[..., 3:4] *= 100.
lgtSGs.requires_grad = True
optimizer = torch.optim.Adam([lgtSGs,], lr=1e-2)
N_iter = 100000
pretrained_file = os.path.join(out_dir, 'sg_{}.npy'.format(numLgtSGs))
if os.path.isfile(pretrained_file):
print('Loading: ', pretrained_file)
lgtSGs.data.copy_(torch.from_numpy(np.load(pretrained_file)).cuda())
for step in range(N_iter):
optimizer.zero_grad()
env_map = SG2Envmap(lgtSGs, H, W)
loss = torch.mean((env_map - gt_envmap) * (env_map - gt_envmap))
loss.backward()
optimizer.step()
if step % 30 == 0:
print('step: {}, loss: {}'.format(step, loss.item()))
if step % 100 == 0:
envmap_check = env_map.clone().detach().cpu().numpy()
gt_envmap_check = gt_envmap.clone().detach().cpu().numpy()
im = np.concatenate((gt_envmap_check, envmap_check), axis=0)
im = np.power(im, 1./2.2)
im = np.clip(im, 0., 1.)
# im = (im - im.min()) / (im.max() - im.min() + TINY_NUMBER)
im = np.uint8(im * 255.)
imageio.imwrite(os.path.join(out_dir, 'log_im_{}.png'.format(numLgtSGs)), im)
np.save(os.path.join(out_dir, 'sg_{}.npy'.format(numLgtSGs)), lgtSGs.clone().detach().cpu().numpy())
```
#### File: code/evaluation/eval.py
```python
import sys
sys.path.append('../code')
import argparse
import GPUtil
import os
from pyhocon import ConfigFactory
import torch
import numpy as np
import cvxpy as cp
from PIL import Image
import math
import utils.general as utils
import utils.plots as plt
from utils import rend_util
from utils import vis_util
from model.sg_render import compute_envmap
import imageio
# import pyexr
def evaluate(**kwargs):
torch.set_default_dtype(torch.float32)
conf = ConfigFactory.parse_file(kwargs['conf'])
exps_folder_name = kwargs['exps_folder_name']
evals_folder_name = kwargs['evals_folder_name']
expname = conf.get_string('train.expname') + '-' + kwargs['expname']
if kwargs['timestamp'] == 'latest':
if os.path.exists(os.path.join('../', kwargs['exps_folder_name'], expname)):
timestamps = os.listdir(os.path.join('../', kwargs['exps_folder_name'], expname))
if (len(timestamps)) == 0:
print('WRONG EXP FOLDER')
exit()
else:
timestamp = sorted(timestamps)[-1]
else:
print('WRONG EXP FOLDER')
exit()
else:
timestamp = kwargs['timestamp']
utils.mkdir_ifnotexists(os.path.join('../', evals_folder_name))
expdir = os.path.join('../', exps_folder_name, expname)
evaldir = os.path.join('../', evals_folder_name, expname, os.path.basename(kwargs['data_split_dir']))
model = utils.get_class(conf.get_string('train.model_class'))(conf=conf.get_config('model'))
if torch.cuda.is_available():
model.cuda()
eval_dataset = utils.get_class(conf.get_string('train.dataset_class'))(kwargs['gamma'],
kwargs['data_split_dir'],
train_cameras=False)
eval_dataloader = torch.utils.data.DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
collate_fn=eval_dataset.collate_fn
)
total_pixels = eval_dataset.total_pixels
img_res = eval_dataset.img_res
old_checkpnts_dir = os.path.join(expdir, timestamp, 'checkpoints')
ckpt_path = os.path.join(old_checkpnts_dir, 'ModelParameters', str(kwargs['checkpoint']) + ".pth")
saved_model_state = torch.load(ckpt_path)
model.load_state_dict(saved_model_state["model_state_dict"])
epoch = saved_model_state['epoch']
print('Loaded checkpoint: ', ckpt_path)
if kwargs['geometry'].endswith('.pth'):
print('Reloading geometry from: ', kwargs['geometry'])
geometry = torch.load(kwargs['geometry'])['model_state_dict']
geometry = {k: v for k, v in geometry.items() if 'implicit_network' in k}
print(geometry.keys())
model_dict = model.state_dict()
model_dict.update(geometry)
model.load_state_dict(model_dict)
#####################################################################################################
# reset lighting
#####################################################################################################
relight = False
if kwargs['light_sg'].endswith('.npy'):
print('Loading light from: ', kwargs['light_sg'])
model.envmap_material_network.load_light(kwargs['light_sg'])
evaldir = evaldir + '_relight'
relight = True
edit_diffuse = False
if len(kwargs['diffuse_albedo']) > 0:
print('Setting diffuse albedo to: ', kwargs['diffuse_albedo'])
evaldir = evaldir + '_editdiffuse'
edit_diffuse = True
utils.mkdir_ifnotexists(evaldir)
print('Output directory is: ', evaldir)
with open(os.path.join(evaldir, 'ckpt_path.txt'), 'w') as fp:
fp.write(ckpt_path + '\n')
####################################################################################################################
print("evaluating...")
model.eval()
# extract mesh
if (not edit_diffuse) and (not relight) and eval_dataset.has_groundtruth:
with torch.no_grad():
mesh = plt.get_surface_high_res_mesh(
sdf=lambda x: model.implicit_network(x)[:, 0],
resolution=kwargs['resolution']
)
# Taking the biggest connected component
components = mesh.split(only_watertight=False)
areas = np.array([c.area for c in components], dtype=np.float)
mesh_clean = components[areas.argmax()]
mesh_clean.export('{0}/mesh.obj'.format(evaldir), 'obj')
# generate images
images_dir = evaldir
all_frames = []
psnrs = []
for data_index, (indices, model_input, ground_truth) in enumerate(eval_dataloader):
if eval_dataset.has_groundtruth:
out_img_name = os.path.basename(eval_dataset.image_paths[indices[0]])[:-4]
else:
out_img_name = '{}'.format(indices[0])
if len(kwargs['view_name']) > 0 and out_img_name != kwargs['view_name']:
print('Skipping: ', out_img_name)
continue
print('Evaluating data_index: ', data_index, len(eval_dataloader))
model_input["intrinsics"] = model_input["intrinsics"].cuda()
model_input["uv"] = model_input["uv"].cuda()
model_input["object_mask"] = model_input["object_mask"].cuda()
model_input['pose'] = model_input['pose'].cuda()
split = utils.split_input(model_input, total_pixels)
res = []
for s in split:
out = model(s)
res.append({
'points': out['points'].detach(),
'idr_rgb_values': out['idr_rgb_values'].detach(),
'sg_rgb_values': out['sg_rgb_values'].detach(),
'normal_values': out['normal_values'].detach(),
'network_object_mask': out['network_object_mask'].detach(),
'object_mask': out['object_mask'].detach(),
'sg_diffuse_albedo_values': out['sg_diffuse_albedo_values'].detach(),
'sg_diffuse_rgb_values': out['sg_diffuse_rgb_values'].detach(),
'sg_specular_rgb_values': out['sg_specular_rgb_values'].detach(),
})
batch_size = ground_truth['rgb'].shape[0]
model_outputs = utils.merge_output(res, total_pixels, batch_size)
### re-render with updated diffuse albedo
if edit_diffuse:
diffuse_albedo = imageio.imread(kwargs['diffuse_albedo']).astype(np.float32)[:, :, :3]
if not kwargs['diffuse_albedo'].endswith('.exr'):
diffuse_albedo /= 255.
diffuse_albedo = torch.from_numpy(diffuse_albedo).cuda().reshape((-1, 3))
ray_dirs, _ = rend_util.get_camera_params(model_input["uv"],
model_input['pose'],
model_input["intrinsics"])
sg_ret = model.render_sg_rgb(mask=model_outputs['network_object_mask'],
normals=model_outputs['normal_values'],
view_dirs=-ray_dirs.reshape((-1, 3)),
diffuse_albedo=diffuse_albedo)
for x in sorted(sg_ret.keys()):
assert (x in model_outputs)
model_outputs[x] = sg_ret[x]
tonemap_img = lambda x: np.power(x, 1./eval_dataset.gamma)
clip_img = lambda x: np.clip(x, 0., 1.)
assert (batch_size == 1)
if kwargs['write_idr']:
rgb_eval = model_outputs['idr_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
rgb_eval = clip_img(tonemap_img(rgb_eval))
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/idr_rgb_{1}.png'.format(images_dir, out_img_name))
rgb_eval = model_outputs['sg_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(tonemap_img(rgb_eval))
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_rgb_{1}.png'.format(images_dir, out_img_name))
all_frames.append(np.array(img))
# network_object_mask = model_outputs['network_object_mask']
# network_object_mask = network_object_mask.reshape(batch_size, total_pixels, 3)
# network_object_mask = plt.lin2img(network_object_mask, img_res).detach().cpu().numpy()[0]
# network_object_mask = network_object_mask.transpose(1, 2, 0)
# img = Image.fromarray((network_object_mask * 255).astype(np.uint8))
# img.save('{0}/object_mask_{1}.png'.format(images_dir, out_img_name))
normal = model_outputs['normal_values']
normal = normal.reshape(batch_size, total_pixels, 3)
normal = (normal + 1.) / 2.
normal = plt.lin2img(normal, img_res).detach().cpu().numpy()[0]
normal = normal.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/normal_{1}.exr'.format(images_dir, out_img_name), normal)
# pyexr.write('{0}/normal_{1}.exr'.format(images_dir, out_img_name), normal)
# np.save('{0}/normal_{1}.npy'.format(images_dir, out_img_name), normal)
else:
img = Image.fromarray((normal * 255).astype(np.uint8))
img.save('{0}/normal_{1}.png'.format(images_dir, out_img_name))
if (not relight) and eval_dataset.has_groundtruth:
depth = torch.ones(batch_size * total_pixels).cuda().float()
network_object_mask = model_outputs['network_object_mask'] & model_outputs['object_mask']
depth_valid = rend_util.get_depth(model_outputs['points'].reshape(batch_size, total_pixels, 3),
model_input['pose']).reshape(-1)[network_object_mask]
depth[network_object_mask] = depth_valid
depth[~network_object_mask] = 0.98 * depth_valid.min()
assert (batch_size == 1)
network_object_mask = network_object_mask.float().reshape(img_res[0], img_res[1]).cpu()
depth = depth.reshape(img_res[0], img_res[1]).cpu()
if kwargs['save_exr']:
depth = depth * network_object_mask
depth = depth.numpy()
imageio.imwrite('{0}/depth_{1}.exr'.format(images_dir, out_img_name), depth)
# pyexr.write('{0}/depth_{1}.exr'.format(images_dir, out_img_name), depth)
# np.save('{0}/depth_{1}.npy'.format(images_dir, out_img_name), depth)
else:
depth = vis_util.colorize(depth, cmap_name='jet')
depth = depth * network_object_mask.unsqueeze(-1) + (1. - network_object_mask.unsqueeze(-1))
depth = depth.numpy()
img = Image.fromarray((depth * 255).astype(np.uint8))
img.save('{0}/depth_{1}.png'.format(images_dir, out_img_name))
# write lighting and materials
envmap = compute_envmap(lgtSGs=model.envmap_material_network.get_light(), H=256, W=512, upper_hemi=model.envmap_material_network.upper_hemi)
envmap = envmap.cpu().numpy()
imageio.imwrite(os.path.join(images_dir, 'envmap.exr'), envmap)
roughness, specular_reflectance = model.envmap_material_network.get_base_materials()
with open(os.path.join(images_dir, 'relight_material.txt'), 'w') as fp:
for i in range(roughness.shape[0]):
fp.write('Material {}:\n'.format(i))
fp.write('\troughness: {}\n'.format(roughness[i, 0].item()))
fp.write('\tspecular_reflectance: ')
for j in range(3):
fp.write('{}, '.format(specular_reflectance[i, j].item()))
fp.write('\n\n')
rgb_gt = ground_truth['rgb']
rgb_gt = plt.lin2img(rgb_gt, img_res).numpy()[0].transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/gt_{1}.exr'.format(images_dir, out_img_name), rgb_gt)
# pyexr.write('{0}/gt_{1}.exr'.format(images_dir, out_img_name), rgb_gt)
# np.save('{0}/gt_{1}.npy'.format(images_dir, out_img_name), rgb_gt)
else:
rgb_gt = clip_img(tonemap_img(rgb_gt))
img = Image.fromarray((rgb_gt * 255).astype(np.uint8))
img.save('{0}/gt_{1}.png'.format(images_dir, out_img_name))
mask = model_input['object_mask']
mask = plt.lin2img(mask.unsqueeze(-1), img_res).cpu().numpy()[0]
mask = mask.transpose(1, 2, 0)
rgb_eval_masked = rgb_eval * mask
rgb_gt_masked = rgb_gt * mask
psnr = calculate_psnr(rgb_eval_masked, rgb_gt_masked, mask)
psnrs.append(psnr)
# verbose mode
rgb_eval = model_outputs['sg_diffuse_albedo_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_diffuse_albedo_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_diffuse_albedo_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_diffuse_albedo_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(rgb_eval)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_diffuse_albedo_{1}.png'.format(images_dir, out_img_name))
rgb_eval = model_outputs['sg_diffuse_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_diffuse_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_diffuse_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_diffuse_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(rgb_eval)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_diffuse_rgb_{1}.png'.format(images_dir, out_img_name))
rgb_eval = model_outputs['sg_specular_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_specular_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_specular_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_specular_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(rgb_eval)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_specular_rgb_{1}.png'.format(images_dir, out_img_name))
if not kwargs['save_exr']:
imageio.mimwrite(os.path.join(images_dir, 'video_rgb.mp4'), all_frames, fps=15, quality=9)
print('Done rendering', images_dir)
if len(psnrs) > 0:
psnrs = np.array(psnrs).astype(np.float64)
# print("RENDERING EVALUATION {2}: psnr mean = {0} ; psnr std = {1}".format("%.2f" % psnrs.mean(), "%.2f" % psnrs.std(), scan_id))
print("RENDERING EVALUATION: psnr mean = {0} ; psnr std = {1}".format("%.2f" % psnrs.mean(), "%.2f" % psnrs.std()))
def get_cameras_accuracy(pred_Rs, gt_Rs, pred_ts, gt_ts,):
''' Align predicted pose to gt pose and print cameras accuracy'''
# find rotation
d = pred_Rs.shape[-1]
n = pred_Rs.shape[0]
Q = torch.addbmm(torch.zeros(d, d, dtype=torch.double), gt_Rs, pred_Rs.transpose(1, 2))
Uq, _, Vq = torch.svd(Q)
sv = torch.ones(d, dtype=torch.double)
sv[-1] = torch.det(Uq @ Vq.transpose(0, 1))
R_opt = Uq @ torch.diag(sv) @ Vq.transpose(0, 1)
R_fixed = torch.bmm(R_opt.repeat(n, 1, 1), pred_Rs)
# find translation
pred_ts = pred_ts @ R_opt.transpose(0, 1)
c_opt = cp.Variable()
t_opt = cp.Variable((1, d))
constraints = []
obj = cp.Minimize(cp.sum(
cp.norm(gt_ts.numpy() - (c_opt * pred_ts.numpy() + np.ones((n, 1), dtype=np.double) @ t_opt), axis=1)))
prob = cp.Problem(obj, constraints)
prob.solve()
t_fixed = c_opt.value * pred_ts.numpy() + np.ones((n, 1), dtype=np.double) * t_opt.value
# Calculate transaltion error
t_error = np.linalg.norm(t_fixed - gt_ts.numpy(), axis=-1)
t_error = t_error
t_error_mean = np.mean(t_error)
t_error_medi = np.median(t_error)
# Calculate rotation error
R_error = compare_rotations(R_fixed, gt_Rs)
R_error = R_error.numpy()
R_error_mean = np.mean(R_error)
R_error_medi = np.median(R_error)
print('CAMERAS EVALUATION: R error mean = {0} ; t error mean = {1} ; R error median = {2} ; t error median = {3}'
.format("%.2f" % R_error_mean, "%.2f" % t_error_mean, "%.2f" % R_error_medi, "%.2f" % t_error_medi))
# return alignment and aligned pose
return R_opt.numpy(), t_opt.value, c_opt.value, R_fixed.numpy(), t_fixed
def compare_rotations(R1, R2):
cos_err = (torch.bmm(R1, R2.transpose(1, 2))[:, torch.arange(3), torch.arange(3)].sum(dim=-1) - 1) / 2
cos_err[cos_err > 1] = 1
cos_err[cos_err < -1] = -1
return cos_err.acos() * 180 / np.pi
def calculate_psnr(img1, img2, mask):
# img1 and img2 have range [0, 1]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2) * (img2.shape[0] * img2.shape[1]) / mask.sum()
if mse == 0:
return float('inf')
return 20 * math.log10(1.0 / math.sqrt(mse))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='./confs/default.conf')
parser.add_argument('--data_split_dir', type=str, default='')
parser.add_argument('--gamma', type=float, default=1., help='gamma correction coefficient')
parser.add_argument('--save_exr', default=False, action="store_true", help='')
parser.add_argument('--light_sg', type=str, default='', help='')
parser.add_argument('--geometry', type=str, default='', help='')
parser.add_argument('--diffuse_albedo', type=str, default='', help='')
parser.add_argument('--view_name', type=str, default='', help='')
parser.add_argument('--expname', type=str, default='', help='The experiment name to be evaluated.')
parser.add_argument('--exps_folder', type=str, default='exps', help='The experiments folder name.')
parser.add_argument('--timestamp', default='latest', type=str, help='The experiemnt timestamp to test.')
parser.add_argument('--checkpoint', default='latest',type=str,help='The trained model checkpoint to test')
parser.add_argument('--write_idr', default=False, action="store_true", help='')
parser.add_argument('--resolution', default=512, type=int, help='Grid resolution for marching cube')
parser.add_argument('--is_uniform_grid', default=False, action="store_true", help='If set, evaluate marching cube with uniform grid.')
parser.add_argument('--gpu', type=str, default='auto', help='GPU to use [default: GPU auto]')
opt = parser.parse_args()
if opt.gpu == "auto":
deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, maxMemory=0.5, includeNan=False, excludeID=[], excludeUUID=[])
gpu = deviceIDs[0]
else:
gpu = opt.gpu
if (not gpu == 'ignore'):
os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(gpu)
evaluate(conf=opt.conf,
write_idr=opt.write_idr,
gamma=opt.gamma,
data_split_dir=opt.data_split_dir,
expname=opt.expname,
exps_folder_name=opt.exps_folder,
evals_folder_name='evals',
timestamp=opt.timestamp,
checkpoint=opt.checkpoint,
resolution=opt.resolution,
save_exr=opt.save_exr,
light_sg=opt.light_sg,
geometry=opt.geometry,
view_name=opt.view_name,
diffuse_albedo=opt.diffuse_albedo,
)
``` |
{
"source": "jhlee9010/espnet",
"score": 3
} |
#### File: enh/layers/dpmulcat.py
```python
import torch
import torch.nn as nn
class MulCatBlock(nn.Module):
"""The MulCat block.
Args:
input_size: int, dimension of the input feature.
The input should have shape (batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
dropout: float, the dropout rate in the LSTM layer. (Default: 0.0)
bidirectional: bool, whether the RNN layers are bidirectional. (Default: True)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
dropout: float = 0.0,
bidirectional: bool = True,
):
super().__init__()
num_direction = int(bidirectional) + 1
self.rnn = nn.LSTM(
input_size,
hidden_size,
1,
dropout=dropout,
batch_first=True,
bidirectional=bidirectional,
)
self.rnn_proj = nn.Linear(hidden_size * num_direction, input_size)
self.gate_rnn = nn.LSTM(
input_size,
hidden_size,
num_layers=1,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
self.gate_rnn_proj = nn.Linear(hidden_size * num_direction, input_size)
self.block_projection = nn.Linear(input_size * 2, input_size)
def forward(self, input):
"""Compute output after MulCatBlock.
Args:
input (torch.Tensor): The input feature.
Tensor of shape (batch, time, feature_dim)
Returns:
(torch.Tensor): The output feature after MulCatBlock.
Tensor of shape (batch, time, feature_dim)
"""
orig_shape = input.shape
# run rnn module
rnn_output, _ = self.rnn(input)
rnn_output = (
self.rnn_proj(rnn_output.contiguous().view(-1, rnn_output.shape[2]))
.view(orig_shape)
.contiguous()
)
# run gate rnn module
gate_rnn_output, _ = self.gate_rnn(input)
gate_rnn_output = (
self.gate_rnn_proj(
gate_rnn_output.contiguous().view(-1, gate_rnn_output.shape[2])
)
.view(orig_shape)
.contiguous()
)
# apply gated rnn
gated_output = torch.mul(rnn_output, gate_rnn_output)
# concatenate the input with rnn output
gated_output = torch.cat([gated_output, input], 2)
# linear projection to make the output shape the same as input
gated_output = self.block_projection(
gated_output.contiguous().view(-1, gated_output.shape[2])
).view(orig_shape)
return gated_output
class DPMulCat(nn.Module):
"""Dual-path RNN module with MulCat blocks.
Args:
input_size: int, dimension of the input feature.
The input should have shape (batch, seq_len, input_size).
hidden_size: int, dimension of the hidden state.
output_size: int, dimension of the output size.
num_spk: int, the number of speakers in the output.
dropout: float, the dropout rate in the LSTM layer. (Default: 0.0)
bidirectional: bool, whether the RNN layers are bidirectional. (Default: True)
num_layers: int, number of stacked MulCat blocks. (Default: 4)
input_normalize: bool, whether to apply GroupNorm on the input Tensor.
(Default: False)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
output_size: int,
num_spk: int,
dropout: float = 0.0,
num_layers: int = 4,
bidirectional: bool = True,
input_normalize: bool = False,
):
super().__init__()
self.rows_grnn = nn.ModuleList([])
self.cols_grnn = nn.ModuleList([])
self.rows_normalization = nn.ModuleList([])
self.cols_normalization = nn.ModuleList([])
# create the dual path pipeline
for i in range(num_layers):
self.rows_grnn.append(
MulCatBlock(
input_size, hidden_size, dropout, bidirectional=bidirectional
)
)
self.cols_grnn.append(
MulCatBlock(
input_size, hidden_size, dropout, bidirectional=bidirectional
)
)
if input_normalize:
self.rows_normalization.append(nn.GroupNorm(1, input_size, eps=1e-8))
self.cols_normalization.append(nn.GroupNorm(1, input_size, eps=1e-8))
else:
# used to disable normalization
self.rows_normalization.append(nn.Identity())
self.cols_normalization.append(nn.Identity())
self.output = nn.Sequential(
nn.PReLU(), nn.Conv2d(input_size, output_size * num_spk, 1)
)
def forward(self, input):
"""Compute output after DPMulCat module.
Args:
input (torch.Tensor): The input feature.
Tensor of shape (batch, N, dim1, dim2)
Apply RNN on dim1 first and then dim2
Returns:
(list(torch.Tensor) or list(list(torch.Tensor))
In training mode, the module returns output of each DPMulCat block.
In eval mode, the module only returns output in the last block.
"""
batch_size, _, d1, d2 = input.shape
output = input
output_all = []
for i in range(len(self.rows_grnn)):
row_input = (
output.permute(0, 3, 2, 1).contiguous().view(batch_size * d2, d1, -1)
)
row_output = self.rows_grnn[i](row_input)
row_output = (
row_output.view(batch_size, d2, d1, -1).permute(0, 3, 2, 1).contiguous()
)
row_output = self.rows_normalization[i](row_output)
# apply a skip connection
output = output + row_output
col_input = (
output.permute(0, 2, 3, 1).contiguous().view(batch_size * d1, d2, -1)
)
col_output = self.cols_grnn[i](col_input)
col_output = (
col_output.view(batch_size, d1, d2, -1).permute(0, 3, 1, 2).contiguous()
)
col_output = self.cols_normalization[i](col_output).contiguous()
# apply a skip connection
output = output + col_output
# if training mode, it returns the output Tensor from all layers.
# Otherwise, it only returns the one from the last layer.
if self.training or i == (len(self.rows_grnn) - 1):
output_i = self.output(output)
output_all.append(output_i)
return output_all
``` |
{
"source": "jhlee93/pytorch-ssd",
"score": 3
} |
#### File: vision/datasets/read_piap.py
```python
import numpy
import pathlib
import cv2
import os
class PIAPDataset:
def __init__(self, root, transform=None, target_transform=None, is_test=False, keep_difficult=False, label_file=None):
self.root = pathlib.Path(root)
self.transform = transform
self.target_transform = target_transform
is_test = False
if is_test:
image_sets_file = self.root / "piap/test.txt"
else:
image_sets_file = self.root / "piap/train.txt"
self.ids = PIAPDataset._read_image_ids(image_sets_file)
self.keep_difficult = keep_difficult
# if the labels file exists, read in the class names
label_file_path = self.root / "piap/char_obj.names"
classes = []
with open(label_file_path, "r") as f:
lines = f.readlines()
for i in lines:
if "\n" in i:
classes.append(i.split("\n")[0])
else
classes.append(i)
self.class_names = tuple(classes)
self.class_dict = {class_name: i for i, class_name in enumerate(self.class_names)}
def __getitem__(self, index):
image_id = self.ids[index]
boxes, labels, is_difficult = self._get_annotation(image_id)
``` |
{
"source": "jhlee93/WNet-cGAN-Keras",
"score": 2
} |
#### File: jhlee93/WNet-cGAN-Keras/preprocess.py
```python
import rasterio as rio
import numpy as np
def preprocess(dsm_path, pan_path, label_path):
dsm = rio.open(dsm_path)
pan = rio.open(pan_path)
label = rio.open(label_path)
# Resize to DSM
dsmW, dsmH = dsm.width, dsm.height
# Meta info.
Dmeta = dsm.meta.copy()
Pmeta = pan.meta.copy()
Lmeta = label.meta.copy()
# Update PAN, LABEL meta info to DSM meta
Dmeta.update({'dtype':'float32'})
Pmeta.update({'width': dsmW, 'height': dsmH, 'count': 1, 'dtype':'float32'})
Lmeta.update({'width': dsmW, 'height': dsmH, 'count': 1, 'dtype':'float32'})
# Normalization
ary_dsm = dsm.read()
ary_pan = pan.read()
ary_label = label.read()
norm_dsm = ary_dsm / np.max(ary_dsm)
norm_pan = ary_pan / np.max(ary_pan)
norm_label = ary_label / np.max(ary_label)
# Save result
dsm_savename = dsm_path.replace('.tif', '_inp.tif')
pan_savename = pan_path.replace('.tif', '_inp.tif')
label_savename = label_path.replace('.tif', '_inp_tif')
with rio.open(dsm_savename, 'w', **Dmeta) as dsm_data:
dsm_data.write(norm_dsm.astype(np.float32))
with rio.open(pan_savename, 'w', **Pmeta) as pan_data:
pan_data.write(norm_pan.astype(np.float32))
with rio.open(label_savename, 'w', **Lmeta) as label_data:
label_data.write(norm_label.astype(np.float32))
if __name__ == '__main__':
d = './inference/sample/testset/DSM_sub_2.tif'
p = './inference/sample/testset/PAN_sub_2.tif'
l = './inference/sample/testset/LABEL_sub_2.tif'
preprocess(d, p, l)
```
#### File: WNet-cGAN-Keras/src/DataGeneration.py
```python
import keras
import numpy as np
import rasterio
from sklearn.utils import shuffle
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, DSM_IDs,
PAN_IDs,
LABEL_IDs,
batch_size=32,
shuffle=True,
pred_fn=None):
'Initialization'
self.DSM_IDs = DSM_IDs
self.PAN_IDs = PAN_IDs
self.LABEL_IDs = LABEL_IDs
self.phase = 'gen'
self.pred_fn = pred_fn
if len(self.PAN_IDs) != len(self.DSM_IDs) or len(self.DSM_IDs) != len(self.LABEL_IDs):
raise ValueError('DSM, PAN or LABEL do not match')
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.DSM_IDs) / self.batch_size))
def getitem(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
DSM_IDs_temp = [self.DSM_IDs[k] for k in indexes]
PAN_IDs_temp = [self.PAN_IDs[k] for k in indexes]
LABEL_IDs_temp = [self.LABEL_IDs[k] for k in indexes]
# Generate data
DSM, PAN, label = self.__data_generation(DSM_IDs_temp, PAN_IDs_temp, LABEL_IDs_temp)
if self.phase == 'gen':
y1 = np.ones([label.shape[0], 1])
return [DSM, PAN, label], [label, y1]
elif self.phase == 'discr':
pred = self.pred_fn([DSM,PAN])
discr_X_1 = np.concatenate((DSM,DSM), axis=0)
discr_X_2 = np.concatenate((label,pred), axis=0)
y1 = np.ones(shape=(len(label),1))
y0 = np.zeros(shape=(len(pred),1))
prob = np.concatenate([y1,y0],axis=0)
#shuffle
discr_X_1, discr_X2, prob = shuffle(discr_X_1, discr_X_2, prob, random_state=42)
discr_X = [discr_X_1, discr_X_2]
return discr_X, prob
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
DSM_IDs_temp = [self.DSM_IDs[k] for k in indexes]
PAN_IDs_temp = [self.PAN_IDs[k] for k in indexes]
LABEL_IDs_temp = [self.LABEL_IDs[k] for k in indexes]
# Generate data
DSM, PAN, label = self.__data_generation(DSM_IDs_temp, PAN_IDs_temp, LABEL_IDs_temp)
if self.phase == 'gen':
y1 = np.ones([label.shape[0], 1])
return [DSM, PAN, label], [label, y1]
elif self.phase == 'discr':
pred = self.pred_fn([DSM,PAN])
discr_X_1 = np.concatenate((DSM,DSM), axis=0)
discr_X_2 = np.concatenate((label,pred), axis=0)
y1 = np.ones(shape=(len(label),1))
y0 = np.zeros(shape=(len(pred),1))
prob = np.concatenate([y1,y0],axis=0)
#shuffle
discr_X_1, discr_X2, prob = shuffle(discr_X_1, discr_X_2, prob, random_state=42)
discr_X = [discr_X_1, discr_X_2]
return discr_X, prob
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.DSM_IDs))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, DSM_IDs_temp, PAN_IDs_temp, LABEL_IDs_temp):
'Generates data containing batch_size samples'
# X_out : (n_samples, *dim, n_channels)
# Y_out : (n_samples, *dim, n_classes)
# Initialization
DSM_out = []
PAN_out = []
LABEL_out = []
for i in range(len(DSM_IDs_temp)):
DSM_out.append(np.moveaxis(rasterio.open(DSM_IDs_temp[i]).read(),0,2))
PAN_out.append(np.moveaxis(rasterio.open(PAN_IDs_temp[i]).read(),0,2))
LABEL_out.append(np.moveaxis(rasterio.open(LABEL_IDs_temp[i]).read(),0,2))
return np.asarray(DSM_out), np.asarray(PAN_out), np.asarray(LABEL_out)
```
#### File: WNet-cGAN-Keras/src/Data.py
```python
import glob
import numpy as np
class Data:
def __init__(self, path, random=False):
"""
input:
path: path to the folder with subfolders: DSM, PAN, LABEL
max_num: int, num of samples
random: bool, to load samples randomly or from 0 to num_max
"""
self.DSM = sorted(glob.glob(path+"/DSM/*.tif"))
self.PAN = sorted(glob.glob(path+"/PAN/*.tif"))
self.LABEL = sorted(glob.glob(path+"/LABEL/*.tif"))
if len(self.DSM) != len(self.PAN) or len(self.LABEL) != len(self.PAN):
raise ValueError('DSM, PAN or LABEL do not match')
def get_data(self, start=0, num=10, as_arr=True, random=False):
"""
function: load max_num of XY into lists
output: list of numpy arrays, X (images) and Y (labels)
"""
DSM_out = []
PAN_out = []
LABEL_out = []
if random:
idx = np.random.choice(list(range(len(self.X))), num, replace=False)
print('randomly loading {0} tiles from {1} tiles'.format(num, len(self.DSM)))
else:
idx = list(range(start, start+num))
print('loading {0} - {1} image tiles'.format(start, start+num-1))
for i in idx:
DSM_out.append(np.moveaxis(rasterio.open(self.DSM[i]).read(),0,2))
PAN_out.append(np.moveaxis(rasterio.open(self.PAN[i]).read(),0,2))
LABEL_out.append(np.moveaxis(rasterio.open(self.LABEL[i]).read(),0,2))
DSM_remove = [self.DSM[i] for i in idx]
PAN_remove = [self.PAN[i] for i in idx]
LABEL_remove = [self.LABEL[i] for i in idx]
for i in range(len(DSM_remove)):
self.DSM.remove(DSM_remove[i])
self.PAN.remove(PAN_remove[i])
self.LABEL.remove(LABEL_remove[i])
if as_arr:
return np.asarray(DSM_out), np.asarray(PAN_out), np.asarray(LABEL_out)
else:
return DSM_out, PAN_out, LABEL_out
def split_trn_vld_tst(self, vld_rate=0.2, tst_rate=0.0, random=True, seed=10):
np.random.seed(seed)
num = len(self.DSM)
vld_num = int(num*vld_rate)
tst_num = int(num*tst_rate)
print('split into {0} train, {1} validation, {2} test samples'.format(num-vld_num-tst_num, vld_num, tst_num))
idx = np.arange(num)
if random:
np.random.shuffle(idx)
DSM_tst, PAN_tst, LABEL_tst = [self.DSM[k] for k in idx[:tst_num]], [self.PAN[k] for k in idx[:tst_num]], [self.LABEL[k] for k in idx[:tst_num]]
DSM_vld, PAN_vld, LABEL_vld = [self.DSM[k] for k in idx[tst_num:tst_num+vld_num]], [self.PAN[k] for k in idx[tst_num:tst_num+vld_num]], [self.LABEL[k] for k in idx[tst_num:tst_num+vld_num]]
DSM_trn, PAN_trn, LABEL_trn = [self.DSM[k] for k in idx[tst_num+vld_num:]], [self.PAN[k] for k in idx[tst_num+vld_num:]], [self.LABEL[k] for k in idx[tst_num+vld_num:]]
return DSM_trn, PAN_trn, LABEL_trn, DSM_vld, PAN_vld, LABEL_vld, DSM_tst, PAN_tst, LABEL_tst
```
#### File: WNet-cGAN-Keras/test/test.py
```python
import rasterio as rio
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import matplotlib.pyplot as plt
def min_max_scale(input_array):
scaler = MinMaxScaler(feature_range=(0,1))
ascolumns = input_array.reshape(-1, 1)
t = scaler.fit_transform(ascolumns)
result = t.reshape(input_array.shape)
return result
def standardization(input_array):
return (input_array - np.mean(input_array)) / np.std(input_array)
pan = rio.open('./origin_pan_sub_1.tif')
# arr_pan = pan.read(1)
arr_pan = pan.read(1, out_shape=(1,1500, 500))
# MinMaxScaling
mm_scaled = min_max_scale(arr_pan)
# mm_scaled = mm_scaled*255.
plt.figure()
plt.hist(mm_scaled.ravel(), 1000, [0, 1])
plt.savefig('minmax_hist.png')
# MaxScaling
max_scaled = arr_pan / np.max(arr_pan)
# max_scaled = max_scaled*255.
plt.figure()
plt.hist(max_scaled.ravel(), 1000, [0, 1])
plt.savefig('max_hist.png')
# meanstd scaling
std_scaled = standardization(arr_pan)
plt.figure()
plt.hist(std_scaled.ravel(), 1000, [np.min(std_scaled), np.max(std_scaled)])
plt.savefig('std_hist.png')
# plt.figure()
# plt.hist
'''
gray = pan.read(1)
plt.figure()
plt.hist(gray.ravel(), 100, [0, 1])
plt.show()
'''
``` |
{
"source": "JH-lee95/Korean-Voice-Cloning",
"score": 3
} |
#### File: Korean-Voice-Cloning/synthesizer/synthesizer_dataset.py
```python
import torch
from torch.utils.data import Dataset
import numpy as np
from pathlib import Path
from synthesizer.utils.text import text_to_sequence
import nltk
nltk.download('punkt')
##
class SynthesizerDataset(Dataset):
def __init__(self, metadata_fpath: Path, mel_dir: Path, embed_dir: Path, hparams):
print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, embed_dir))
with metadata_fpath.open("r",encoding="cp949") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
mel_fnames = [x[1] for x in metadata if int(x[4])]
mel_fpaths = [mel_dir.joinpath(fname) for fname in mel_fnames]
embed_fnames = [x[2] for x in metadata if int(x[4])]
embed_fpaths = [embed_dir.joinpath(fname) for fname in embed_fnames]
self.samples_fpaths = list(zip(mel_fpaths, embed_fpaths))
self.samples_texts = [x[5].strip() for x in metadata if int(x[4])]
self.metadata = metadata
self.hparams = hparams
print("Found %d samples" % len(self.samples_fpaths))
def __getitem__(self, index):
# Sometimes index may be a list of 2 (not sure why this happens)
# If that is the case, return a single item corresponding to first element in index
if index is list:
index = index[0]
mel_path, embed_path = self.samples_fpaths[index]
mel = np.load(mel_path).T.astype(np.float32)
# Load the embed
embed = np.load(embed_path)
print(self.samples_texts[index])
# Get the text and clean it
text = text_to_sequence(self.samples_texts[index], self.hparams.tts_cleaner_names)
# Convert the list returned by text_to_sequence to a numpy array
text = np.asarray(text).astype(np.int32)
return text, mel.astype(np.float32), embed.astype(np.float32), index
def __len__(self):
return len(self.samples_fpaths)
def collate_synthesizer(batch, r, hparams):
# Text
x_lens = [len(x[0]) for x in batch]
max_x_len = max(x_lens)
chars = [pad1d(x[0], max_x_len) for x in batch]
chars = np.stack(chars)
# Mel spectrogram
spec_lens = [x[1].shape[-1] for x in batch]
max_spec_len = max(spec_lens) + 1
if max_spec_len % r != 0:
max_spec_len += r - max_spec_len % r
# WaveRNN mel spectrograms are normalized to [0, 1] so zero padding adds silence
# By default, SV2TTS uses symmetric mels, where -1*max_abs_value is silence.
if hparams.symmetric_mels:
mel_pad_value = -1 * hparams.max_abs_value
else:
mel_pad_value = 0
mel = [pad2d(x[1], max_spec_len, pad_value=mel_pad_value) for x in batch]
mel = np.stack(mel)
# Speaker embedding (SV2TTS)
embeds = [x[2] for x in batch]
# Index (for vocoder preprocessing)
indices = [x[3] for x in batch]
# Convert all to tensor
chars = torch.tensor(chars).long()
mel = torch.tensor(mel)
embeds = torch.tensor(embeds)
return chars, mel, embeds, indices
def pad1d(x, max_len, pad_value=0):
return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value)
def pad2d(x, max_len, pad_value=0):
return np.pad(x, ((0, 0), (0, max_len - x.shape[-1])), mode="constant", constant_values=pad_value)
``` |
{
"source": "jhleee/kakao-chatbot-heroku",
"score": 3
} |
#### File: jhleee/kakao-chatbot-heroku/app.py
```python
import os
import sys
import logging
from flask import Flask, request
from flask_json import FlaskJSON, as_json_p
from simple_kakao import *
app = Flask(__name__)
json = FlaskJSON(app)
@app.route('/keyboard/button', methods=["GET"])
@as_json_p
def keyboard_btn():
btns = Buttons().add("A")\
.add("B")\
.add("C")
return Keyboard(BUTTONS, btns)
@app.route('/keyboard', methods=["GET"])
@as_json_p
def keyboard_txt():
return Keyboard(TEXT)
@app.route('/message', methods=["POST"])
@as_json_p
def msg():
img_url = "https://dummyimage.com/600x400/000/ffffff.gif&text=Hello+World+!"
photo = Photo(img_url, 600, 400)
msg_btn = MessageButton("button", "https://example.com")
return Response(
Message("Blah...", photo, msg_btn),
Keyboard(TEXT)
)
#
if 'DYNO' in os.environ:
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
if __name__ == '__main__':
app.run(host="0.0.0.0",debug=True, use_reloader=True)
``` |
{
"source": "jhleekr/kakao.py",
"score": 3
} |
#### File: kakao.py/examples/quickstart.py
```python
import kakao
class Myclass(kakao.Client):
async def on_ready(self):
print("Logged on")
async def on_message(self, chat):
if chat.message == "ping":
await chat.reply("pong!")
kakao.check_reg("LoginId", "LoginPw")
client = Myclass("LoginId", "LoginPw")
client.run()
``` |
{
"source": "jhlegarreta/dwi_ml",
"score": 2
} |
#### File: data/creation/hdf5_creator.py
```python
import json
import logging
import pathlib
import re
from typing import Dict, IO, List, Union
import nibabel as nib
import numpy as np
from dipy.core.gradients import (gradient_table)
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import load_tractogram
from dipy.tracking.utils import length
# Should be added in scilpy soon when all our PRs are accepted:
from scilpy.reconst.fodf import (
compute_fodf, compute_sh_coefficients)
from scilpy.reconst.frf import compute_ssst_frf
from scilpy.tracking.tools import (
filter_streamlines_by_length, resample_streamlines_step_size)
from scilpy.io.streamlines import compress_sft
from scilpy.? import resample_dwi
from scilpy.? import subsample_sft_francois
from dwi_ml.data.creation.subjects_validation import (
validate_subject_list, list_equals) # Ugly but this script will be modified and maybe we won't need it anymore.
from dwi_ml.experiment.timer import Timer
class HDF5BundleConfig(object):
"""Bundle configuration parameters."""
def __init__(self, name: str, clustering_threshold_mm: float = None,
removal_distance_mm: float = None):
"""
Parameters
----------
name : str
The name of the bundle.
clustering_threshold_mm : float
The clustering threshold applied before removing similar streamlines.
removal_distance_mm : float
The removal threshold used to remove similar streamlines.
"""
self.name = name
self.clustering_threshold_mm = clustering_threshold_mm
self.removal_distance_mm = removal_distance_mm
class HDF5CreatorAbstract(object):
"""Base class for a dataset processor."""
def __init__(self, final_subjects: List[str] = None, bval: int = None,
minimum_length_mm: float = None, step_size_mm: float = None,
bundles: Dict = None):
"""
Parameters
----------
final_subjects:
bval : int
Filter the dMRI image to keep only this b-value (and b0s).
minimum_length_mm : float
Remove streamlines shorter than this length.
step_size : float
Step size to resample streamlines (in mm).
bundles : dict
Bundle-wise parameters; these should include the name and
subsampling parameters OR If empty, datasets will be treated as
wholebrain tractograms.
"""
self.bval = bval
self.minimum_length_mm = minimum_length_mm
self.step_size = step_size_mm
self.final_subjs = final_subjects
if bundles:
# Bundle-specific options
self.bundles = []
for name, config_dict in bundles.items():
try:
bundle_config = HDF5BundleConfig(
name,
config_dict["clustering_threshold_mm"],
config_dict["removal_distance_mm"]
)
self.bundles.append(bundle_config)
except KeyError as e:
raise ValueError("Bundle {} is missing configuration "
"parameters: {}".format(name, e))
else:
# Datasets will be treated as wholebrain tractograms in
# load_and_process_streamlines
self.bundles = None
@classmethod
def from_json(cls, json_file: Union[str, IO], raw_path: str,
*args, **kwargs):
""" Create a HDF5CreatorGeneric object from a json file.
Parameters
----------
raw_path: str
Directory of folders.
json_file : str or IO
The input configuration file, wither as a string or an input stream.
args: (...)
#ToDo
kwargs: (...)
#ToDo
Returns
-------
dataset_creator : HDF5CreatorAbstract
A valid dataset configuration.
"""
# If json_file is a string, create the IO json_file
if isinstance(json_file, str):
json_file = open(json_file, 'r')
# Load the json_file data
raw_config = json.load(json_file)
# Compare subject lists 1) defined by user 2) from json 3) whose files
# are present in directory
final_subjects = cls.verify_subject_lists(raw_path,
kwargs['subject_ids'],
raw_config['subject_ids'])
# Create the creator
dataset_creator = cls(final_subjects, *args, **kwargs, **raw_config)
return dataset_creator
@staticmethod
def verify_subject_lists(raw_path: str, chosen_subjs, json_subjs):
# Find list of existing subjects from folders
all_subjs = [str(f.name) for f in raw_path.iterdir()]
if len(all_subjs) == 0:
raise ValueError('No subject folders found!')
if json_subjs is None and chosen_subjs is None:
raise ValueError('You must provide subject list. Either when '
'calling the script or from the json file!')
# Checking json_subjs
if json_subjs is not None:
non_existing, good_json_subjs, ignored = \
validate_subject_list(all_subjs, json_subjs)
if len(non_existing) > 0:
raise ValueError('Following subjects are in your json file '
'but their folders were not found: {}'
.format(non_existing))
if len(ignored) > 0:
logging.info("Careful! NOT processing subjects {} "
"because they were not included in your json "
"file!".format(ignored))
if chosen_subjs is None:
return good_json_subjs
# Checking chosen_subjs
if chosen_subjs is not None:
non_existing, good_chosen_subjs, ignored = \
validate_subject_list(all_subjs, json_subjs)
if len(non_existing) > 0:
raise ValueError('Following subjects were chosen in option '
'--subject_ids but their folders were not '
'found: {}'.format(non_existing))
if len(ignored) > 0:
logging.info("Careful! NOT processing subjects {} "
"because they were not included in in option "
"--subject_ids!".format(ignored))
if json_subjs is None:
return good_chosen_subjs
# Both json_subjs and chosen_subjs are not None.
# Comparing both lists
if not list_equals(good_chosen_subjs, good_json_subjs):
raise ValueError('TRIED TO DEAL WITH OPTION --subject_ids AS'
'WAS ADDED BY (ANTOINE?). WHAT TO DO IN THE '
'CASE WHERE JSON INFO AND OPTION INFOS ARE NOT'
' THE SAME?')
return json_subjs
def get_state_dict(self):
""" Get a dictionary representation to store in the HDF file."""
return {'bval':
self.bval if self.bval else "",
'minimum_length_mm':
self.minimum_length_mm if self.minimum_length_mm else "",
'step_size_mm':
self.step_size if self.step_size else "",
'subject_ids':
self.final_subjs if self.final_subjs else "",
'bundles':
[b.name for b in self.bundles] if self.bundles else ""}
def load_and_process_volume(self, dwi_image: nib.Nifti1Image,
bvals, bvecs, frf,
wm_mask_image: nib.Nifti1Image,
output_path: pathlib.Path):
""" Abstract method for processing a DWI volume for a specific
dataset.
Parameters
----------
dwi_image : nib.Nifti1Image
Diffusion-weighted images (4D)
bvals:
bvecs:
frf:
wm_mask_image : nib.Nifti1Image
Binary white matter mask.
output_path : str
Path to the output folder.
Returns
-------
output : np.ndarray
The processed output volume.
"""
raise NotImplementedError
def load_process_and_merge_bundles(self,
bundles_path: pathlib.Path,
dwi_ref: nib.Nifti1Image):
"""Load and process a group of bundles and merge all streamlines
together.
Parameters
----------
bundles_path : pathlib.Path
Path to bundles folder.
dwi_ref : np.ndarray
Reference used to load and send the streamlines in voxel space.
Returns
-------
output_tractogram : StatefulTractogram
All streamlines in voxel space.
output_lengths : List[float]
The euclidean length of each streamline
"""
with Timer("Processing streamlines", newline=True):
# Initialize
output_tractogram = None
output_lengths = []
n_original_streamlines = 0
if not self.bundles:
# If no bundles described in the json file, we will treat the files
# found in bundles as wholebrain tractograms
chosen_bundles_config = [HDF5BundleConfig(p.stem) for p in
bundles_path.glob('*')]
if len(chosen_bundles_config) == 0:
raise ValueError("No bundles found in the boundles folder!")
else:
chosen_bundles_config = self.bundles
available_bundles = list(bundles_path.iterdir())
for bundle_config in chosen_bundles_config:
bundle, bundle_original_count = self._load_and_process_one_bundle(
bundle_config, available_bundles, bundles_path, dwi_ref)
if bundle is None:
continue
# Keep track of original count
n_original_streamlines += bundle_original_count
# Compute euclidean lengths
output_lengths.extend(length(bundle.streamlines))
# Add processed bundle to output tractogram
if output_tractogram is None:
output_tractogram = bundle
else:
# Validate that tractograms are in the same space
# Function doesnt exist anymore but should not be necessary
# if we use SFT.
assert are_tractograms_in_same_space(output_tractogram,
bundle),\
"Inconsistent tractogram space: {}".format(bundle)
output_tractogram.streamlines.extend(bundle.streamlines)
# Transfer the streamlines to the reference space before bringing them
# to VOX space. NOTE: This is done in case the streamlines were tracked
# in a different space than the provided dataset reference
if output_tractogram is None:
output_streamlines_rasmm = []
else:
output_streamlines_rasmm = output_tractogram.streamlines
output_tractogram = StatefulTractogram(output_streamlines_rasmm,
dwi_ref,
space=Space.RASMM)
# Internal validation check
output_tractogram.remove_invalid_streamlines()
logging.debug("Ran internal tractogram validation; "
"Remaining: {}".format(len(output_tractogram)))
# Final nb of streamlines
logging.info("Final number of streamlines : "
"{} / {}".format(len(output_tractogram),
n_original_streamlines))
# Send to VOX space and make sure the origin is at the CENTER of the
# voxel. NOTE: This is really important, otherwise interpolation will
# be off by half a voxel.
output_tractogram.to_vox()
output_tractogram.to_center()
return output_tractogram, output_lengths
def _load_and_process_one_bundle(self, bundle_config: HDF5BundleConfig,
available_bundles,
bundles_path: pathlib.Path,
dwi_ref: nib.Nifti1Image):
# Find the bundle
regex = re.compile(".*_{}.t([rc])k".format(bundle_config.name))
matches = [b for b in available_bundles if re.match(regex, str(b))]
if len(matches) == 0:
logging.warning("Bundle {} was not found in "
"path: {}".format(bundle_config.name,
str(bundles_path)))
return None
if len(matches) > 1:
raise ValueError("Bundle {} has matched "
"multiple files: {}"
.format(bundle_config.name, matches))
bundle_file = matches[0]
# Load the bundle
logging.info("Processing bundle: {}".format(bundle_file))
bundle = load_tractogram(str(bundle_file), reference=dwi_ref,
to_space=Space.RASMM,
trk_header_check=False,
bbox_valid_check=False)
if len(bundle) == 0:
logging.warning("Bundle {} contains 0 streamlines, "
"skipping...".format(str(bundle_file)))
return None
# Keep count of the original number of streamlines
bundle_original_count = len(bundle)
logging.debug("Bundle contains {} streamlines"
.format(bundle_original_count))
# Remove streamlines that are too short
bundle = filter_streamlines_by_length(bundle, # toDo. Bundle has to be a sft.
self.minimum_length_mm)
logging.debug("Removed streamlines under "
"{}mm; Remaining: {}".format(self.minimum_length_mm,
len(bundle)))
# Subsample bundles to keep only the closest to centroid (only if we
# have bundle information, i.e. not wholebrain)
if (bundle_config.clustering_threshold_mm is not None
and bundle_config.removal_distance_mm is not None):
bundle = subsample_sft_francois(bundle,
bundle_config.clustering_threshold_mm,
bundle_config.removal_distance_mm)
logging.debug("Subsampled bundle using clustering "
"threshold of {}mm and a removal distance of "
"{}mm; Remaining: {}"
.format(bundle_config.clustering_threshold_mm,
bundle_config.removal_distance_mm,
len(bundle)))
# Resample streamlines to have all the same step size
if self.step_size:
bundle = resample_streamlines_step_size(bundle, self.step_size)
logging.debug("Resampled streamlines' step size to {}mm"
.format(self.step_size))
else: # If no step size is defined, compress the streamlines
bundle = compress_sft(bundle)
return bundle, bundle_original_count
class HDF5CreatorDWI(HDF5CreatorAbstract):
"""Class containing all configuration options for creating a new DWI
dataset."""
def __init__(self, resample: bool, *args, **kwargs):
"""
Parameters
----------
resample : int
Optional; resample the signal to this number of directions on the
sphere.
"""
super().__init__(*args, **kwargs)
self.resample = resample
def get_state_dict(self):
"""Get a dictionary representation to store in the HDF file."""
state_dict = super().get_state_dict()
state_dict['resample'] = self.resample
return state_dict
def load_and_process_volume(self, dwi_image: nib.Nifti1Image,
bvals, bvecs, frf,
wm_mask_image: nib.Nifti1Image,
output_path: pathlib.Path):
"""
Process a volume for raw DWI dataset, optionally resampling the
gradient directions.
"""
if self.resample:
# Load and resample:
# Brings to SH and then back to directions.
gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())
output = resample_dwi(dwi_image, gtab, sh_order=6)
else:
# Load:
output = dwi_image.get_fdata(dtype=np.float32)
return output
class HDF5CreatorDwiSH(HDF5CreatorAbstract):
"""Class containing all configuration options for creating a new DWI-SH
dataset."""
def __init__(self, sh_order: int = None, *args, **kwargs):
"""
Parameters
----------
sh_order : int
The SH order used to fit the signal
"""
super().__init__(*args, **kwargs)
self.sh_order = sh_order
if self.sh_order is None:
raise ValueError("SH order must be provided")
if self.sh_order not in [2, 4, 6, 8]:
raise ValueError("SH order must be one of [2,4,6,8]")
def get_state_dict(self):
"""Get a dictionary representation to store in the HDF file."""
state_dict = super().get_state_dict()
state_dict['sh_order'] = self.sh_order
return state_dict
def load_and_process_volume(self, dwi_image: nib.Nifti1Image,
bvals, bvecs, frf,
wm_mask_image: nib.Nifti1Image,
output_path: pathlib.Path):
"""
Process a volume for a DWI-SH dataset. Fits spherical harmonics to
the diffusion signal.
"""
gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())
output = compute_sh_coefficients(dwi_image, gtab, # toDo Antoine: get_pams. J'ai pas checké ce que ça fait.
sh_order=self.sh_order)
return output
class HDF5CreatorFodfSH(HDF5CreatorAbstract):
"""Class containing all configuration options for creating a new fODF-SH
dataset."""
def __init__(self, sh_order: int = None, *args, **kwargs):
"""
Parameters
----------
sh_order : int
The SH order used to fit the signal
"""
super().__init__(*args, **kwargs)
self.sh_order = sh_order
if self.sh_order is None:
raise ValueError("SH order must be provided")
if self.sh_order not in [2, 4, 6, 8]:
raise ValueError("SH order must be one of [2,4,6,8]")
def get_state_dict(self):
"""Get a dictionary representation to store in the HDF file."""
state_dict = super().get_state_dict()
state_dict['sh_order'] = self.sh_order
return state_dict
def load_and_process_volume(self, dwi_image: nib.Nifti1Image,
bvals, bvecs, frf,
wm_mask_image: nib.Nifti1Image,
output_path: pathlib.Path):
"""
Process a volume for a fODF-SH dataset. Compute a response function,
fit fODFs and return the corresponding SH coeffs.
"""
# Don't provide a wm mask, instead rely on FA threshold
frf = compute_ssst_frf(dwi_image, gradient_table)
# Save frf to file
np.savetxt(str(output_path.joinpath("frf.txt")), frf)
n_peaks = 1 # Cannot use 0 peaks, so we use only 1
return_sh = True
# Computing fODF only inside WM mask
peaks = compute_fodf(dwi_image.get_data(), bvals, bvecs, frf,
sh_order=self.sh_order,
nbr_processes=None,
mask=wm_mask_image, sh_basis='tournier07',
return_sh=return_sh,
n_peaks=n_peaks)
output = peaks.shm_coeff.astype(np.float32)
return output
class HDF5CreatorFODFPeaks(HDF5CreatorAbstract):
"""Class containing all configuration options for creating a new fODF-peaks
dataset."""
def __init__(self, sh_order: int = None, n_peaks: int = None, *args,
**kwargs):
"""
Parameters
----------
sh_order : int
The SH order used to fit the signal
n_peaks : int
The number of peaks to use as input to the model
"""
super().__init__(*args, **kwargs)
self.sh_order = sh_order
self.n_peaks = n_peaks
if self.sh_order is None:
raise ValueError("SH order must be provided")
if self.sh_order not in [2, 4, 6, 8]:
raise ValueError("SH order must be one of [2,4,6,8]")
if self.n_peaks is None:
raise ValueError("n_peaks must be provided")
if self.n_peaks not in [1, 2, 3]:
raise ValueError("n_peaks must be one of [1,2,3]")
def get_state_dict(self):
"""Get a dictionary representation to store in the HDF file."""
state_dict = super().get_state_dict()
state_dict['sh_order'] = self.sh_order
state_dict['n_peaks'] = self.n_peaks
return state_dict
def load_and_process_volume(self, dwi_image: nib.Nifti1Image,
bvals, bvecs, frf,
wm_mask_image: nib.Nifti1Image,
output_path: pathlib.Path):
"""
Process a volume for a fODF-peaks dataset.
Compute a response function, fit fODFs,
extract the main peaks and return a 4D volume, where the last axis
is each peak (3D vector) with its value (scalar), all flattened into
a single dimension.
"""
# Don't provide a wm mask, instead rely on FA threshold
frf = compute_ssst_frf(dwi_image, bvals, bvecs)
# Save frf to file
np.savetxt(str(output_path.joinpath("frf.txt")), frf)
return_sh = False
# Computing fODF only inside WM mask
pam = compute_fodf(dwi_image.get_data(), bvals, bvecs, frf,
sh_order=self.sh_order,
nbr_processes=None, mask=wm_mask_image,
sh_basis='tournier07', return_sh=return_sh,
n_peaks=self.n_peaks)
# Peaks directions are scaled by the normalized peaks values
fodf_peaks_dirs = pam.peak_dirs.astype(np.float32)
new_shape = wm_mask_image.shape + (-1,)
output = fodf_peaks_dirs.reshape(new_shape)
return output
```
#### File: processing/dwi/dwi.py
```python
from dipy.core.gradients import GradientTable
from dipy.core.sphere import Sphere
from dipy.data import get_sphere
from dipy.reconst.shm import sph_harm_lookup
import nibabel as nib
import numpy as np
from scilpy.io.utils import validate_sh_basis_choice
from scilpy.reconst.raw_signal import compute_sh_coefficients
def standardize_data(data: np.ndarray, mask: np.ndarray = None,
independent: bool = False):
"""Apply classic data standardization (centralized, normalized = zero-
centering and variance to 1).
Parameters
----------
data : np.ndarray with shape (X, Y, Z, #modalities)
Volume to normalize along each modality.
mask : binary np.ndarray with shape (X, Y, Z)
3D mask defining which voxels should be used for normalization. If None,
all non-zero voxels will be used. Voxels outside mask will be set to
nan.
independent: bool
If true, will normalize each modality independently (last axis). Else,
will normalize with the mean and variance of all data. There is a
big reflexion to have here. Typical approach in machine learning is to
normalize each input X separately (each modality). But our data is not
independent. Ex, with peaks, the peak in one direction and the peak in
another must probably belong to the same distribution to mean something.
We recommend using independent = False for your dwi data.
Returns
-------
standardized_data : np.ndarray with shape (X, Y, Z, #modalities)
Standardized data volume, with zero-mean and unit variance along each
axis of the last dimension.
"""
if mask is None:
# If no mask is given, use non-zero data voxels
mask = np.all(data != 0, axis=-1)
else:
# Mask resolution must fit DWI resolution
assert mask.shape == data.shape[:3], "Normalization mask resolution " \
"does not fit data..."
# Computing mean and std.
# Also dealing with extreme cases where std=0. Shouldn't happen. It means
# that this data is meaningless for your model. Here, we won't divide the
# data, just move its mean = value in all voxels will now be 0.
if independent:
mean = np.mean(data[mask], axis=0)
std = np.std(data[mask], axis=0)
std[std == 0] = 1
else:
mean = np.mean(data[mask])
std = np.std(data[mask])
if std == 0:
std = 1
standardized_data = (data - mean) / std
standardized_data[~mask] = np.nan
return standardized_data
def resample_raw_dwi_from_sh(dwi_image: nib.Nifti1Image,
gradient_table: GradientTable,
sh_basis: str = 'descoteaux07',
sphere: Sphere = None, sh_order: int = 8,
smooth: float = 0.006):
"""Resample a diffusion signal according to a set of directions using
spherical harmonics.
Parameters
----------
dwi_image : nib.Nifti1Image object
Diffusion signal as weighted images (4D).
gradient_table : GradientTable
Dipy object that contains all bvals and bvecs.
sh_basis: str
Either 'tournier07' or 'descoteaux07'. Default: descoteaux07.
sphere : dipy.core.sphere.Sphere, optional
Directions the diffusion signal will be resampled to. Directions are
assumed to be on the whole sphere, not the hemisphere like bvecs.
If omitted, 100 directions evenly distributed on the sphere will be
used (Dipy's "repulsion100").
sh_order : int, optional
SH order to fit, by default 8.
smooth : float, optional
Lambda-regularization coefficient in the SH fit, by default 0.006.
Returns
-------
resampled_dwi : np.ndarray (4D)
Resampled "raw" diffusion signal.
"""
validate_sh_basis_choice(sh_basis)
# Get the "real" SH fit
# sphere = None, so it computes the sh coefficients based on the bvecs.
data_sh = compute_sh_coefficients(dwi_image, gradient_table, sh_order,
basis_type=sh_basis, smooth=smooth)
# Get new directions
if sphere is None:
sphere = get_sphere("repulsion100")
sh_basis = sph_harm_lookup.get(sh_basis)
# Resample data
# B.T contains the new sampling scheme and B*data_sh projects to the sphere.
# B : 2-D array; real harmonics sampled at (\theta, \phi)
# m : array; degree of the sampled harmonics.
# l : array; order of the sampled harmonics.
B, m, l = sh_basis(sh_order, sphere.theta, sphere.phi)
data_resampled = np.dot(data_sh, B.T)
return data_resampled
```
#### File: dwi_ml/experiment/learning_utils.py
```python
def compute_gradient_norm(parameters):
"""Compute the gradient norm of the provided iterable parameters.
(Machine learning gradient descent, not dwi gradients!)
Parameters
----------
parameters : list of torch.Tensor
Model parameters after loss.backwards() has been called. All parameters
p must have a p.grad attribute.
Returns
-------
total_norm : float
The total gradient norm of the parameters
"""
total_norm = 0.
for p in parameters:
param_norm = p.grad.as_tensor.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
```
#### File: dwi_ml/experiment/scripts_utils.py
```python
from os import path
def add_dwi_ml_positional_args(p):
p.add_argument('train_database_path', type=str,
help="Path to the model_and_training set (.hdf5).")
p.add_argument('valid_database_path', type=str,
help="Path to the validation set (.hdf5).")
return p
def add_dwi_ml_optional_args(p):
p.add_argument('--add-streamline-noise', action="store_true",
help="Add random gaussian noise to streamline coordinates "
"on-the-fly. Noise variance is 0.1 * step-size, "
"or 0.1mm if no step size is used.")
p.add_argument('--streamlines-cut-ratio', type=float,
help="Cut a percentage of streamline at a random point in "
"each batch. [None]")
p.add_argument('--step-size', type=float,
help="Resample all streamlines to this step size. If None, "
"train on streamlines as they are (e.g. compressed). "
"[None]")
p.add_argument('--neighborhood-dist-mm', type=float,
help="Distance (in mm) at which to get neighborhood "
"information to concatenate to the input vector. If "
"None, no neighborhood is added. [None]")
p.add_argument('--nb-neighborhood-axes', type=int,
help="Nb of axes at which to get neighborhood distance. "
"Default = 6 (up, down, left, right, front, back).")
p.add_argument('--add-previous-dir', action="store_true",
help="Concatenate previous streamline direction to the "
"input vector.")
p.add_argument('--lazy', action="store_true",
help="Do not load all the model_and_training dataset in "
"memory at once. Load only what is needed for a batch.")
p.add_argument('--batch-size', type=int, default=20000,
help="Number of streamline points per batch. [20000]")
p.add_argument('--volumes-per-batch', type=int,
help="Limits the number of volumes used in a batch. Also "
"determines the cache size if --cache-manager is "
"used. If None, use true random sampling. [None]")
p.add_argument('--cycles-per-volume-batch', type=int,
help="Relevant only if --volumes-per-batch is used. Number "
"of update cycles before chaging to new volumes. [1]")
p.add_argument('--n-epoch', type=int, default=100,
help="Maximum number of epochs. [100]")
p.add_argument('--seed', type=int, default=1234,
help="Random experiment seed. [1234]")
p.add_argument('--patience', type=int, default=20,
help="Use early stopping. Defines the number of epochs "
"after which the model should stop model_and_training "
"if the loss hasn't improved. [20].")
p.add_argument('--use-gpu', action="store_true",
help="Train using the GPU.")
p.add_argument('--num-workers', type=int, default=0,
help="Number of parallel CPU workers. [0]")
p.add_argument('--worker-interpolation', action='store_true',
help="If using --num-workers > 0, interpolation will be "
"done on CPU by the workers instead of on the main "
"thread using the chosen device. [False]")
p.add_argument('--cache-manager', action="store_true",
help="Relevant only if --lazy is used. Cache volumes and "
"streamlines in-memory instead of fetching from the "
"disk everytime. Cache size is determined by "
"--volumes-per-batch.")
p.add_argument('--taskman-managed', action="store_true",
help="Instead of printing progression, print taskman-"
"relevant data.")
p.add_argument('--logging', type=str,
choices=['error', 'warning', 'info', 'debug'],
default='warning', help="Activate debug mode")
return p
def check_train_valid_args_path(args):
if not path.exists(args.train_database_path):
raise ValueError(
'The model_and_training set path seems to be wrong!')
if not path.exists(args.valid_database_path):
raise ValueError('The validation set path seems to be wrong!')
```
#### File: dwi_ml/tracking/tracker_abstract.py
```python
import os
from math import ceil
from typing import Any, Dict, List, Union
import h5py
import nibabel as nib
import numpy as np
import torch
import tqdm
from dipy.io.stateful_tractogram import Space
from dipy.io.streamline import load_tractogram
from dipy.tracking.streamline import length as slength
from nibabel.affines import apply_affine
from nibabel.streamlines import ArraySequence, Tractogram
from dwi_ml.data.dataset.single_subject_containers import (
MRIDataVolume, SubjectData)
from dwi_ml.data.processing.space.world_to_vox import convert_world_to_vox
from dwi_ml.experiment.timer import Timer
from dwi_ml.tracking.step_tracker import (StepTracker,
PreInitializedStepTracker)
from dwi_ml.tracking.utils import StoppingFlags, count_flags
class TrackerAbstract(object):
"""Use an existing model to track on a new subject."""
def __init__(self, model: torch.nn.Module,
dataset_file: str, subject_id: str,
seeding_file: str, tracking_file: str = None,
rng_seed: int = 1234, n_seeds_per_voxel: int = 1,
seeding_ref: str = None, use_gpu: bool = True,
add_neighborhood: float = None,
add_previous_dir: bool = False):
"""
Parameters
----------
model: torch.nn.Module
Trained model that will generate the tracking directions.
MUST HAVE A sample_tracking_directions FUNCTION AND A eval FUNCTION.
dataset_file : str
Path to dataset file (.hdf5).
subject_id : str
Subject id to fetch from the dataset file.
seeding_file : str
Path to seeding mask (.nii.gz) or seeding streamlines (.tck|.trk).
tracking_file : str (optional)
Path to binary tracking mask (.nii.gz).
rng_seed : int
Random seed.
n_seeds_per_voxel : int
Number of random seeds to be initialized in each voxel of the
seeding mask.
seeding_ref : str
Path to reference file neceserray if `seeding_file` is a tractogram.
use_gpu : bool
If False, do not use the GPU for tracking.
add_neighborhood : float (optional)
If given, add neighboring information to the input signal at the
given distance in each axis (in mm).
add_previous_dir : bool (optional)
If given, add the streamline previous direction to the input signal.
"""
self.rng = np.random.RandomState(seed=rng_seed)
self.n_seeds_per_voxel = n_seeds_per_voxel
self.use_gpu = use_gpu
# Load subject
with h5py.File(dataset_file, 'r') as hdf_file:
assert subject_id in list(hdf_file.keys()), \
"Subject {} not found in file: {}".format(subject_id,
dataset_file)
self.tracto_data = SubjectData.create_from_hdf(hdf_file[subject_id])
self.tracto_data.input_dv.subject_id = subject_id
ext = os.path.splitext(seeding_file)[1]
if ext in ['.nii', '.gz']:
# Load seeding mask (should be a binary image)
seeding_image = nib.load(seeding_file)
self.seeding = seeding_image.get_fdata()
self.affine_seedsvox2rasmm = seeding_image.affine
elif ext in ['.tck', '.trk']:
# Load seeding streamlines
if seeding_ref is None:
raise ValueError("A reference is necessary to load a "
"tractogram; please use --seeding-ref")
seeding_ref_img = nib.load(seeding_ref)
seeding_tractogram = load_tractogram(seeding_file, seeding_ref_img,
to_space=Space.VOX)
seeding_tractogram.to_center()
self.seeding = seeding_tractogram.streamlines
self.affine_seedsvox2rasmm = seeding_ref_img.affine
# Load tracking mask if given
self.tracking_dv = None
if tracking_file:
tracking_image = nib.load(tracking_file)
self.tracking_dv = MRIDataVolume(
data=tracking_image.get_fdata(dtype=np.float32),
affine_vox2rasmm=tracking_image.affine)
# Compute affine to bring seeds into DWI voxel space
# affine_seedsvox2dwivox : seeds voxel space => rasmm space => dwi voxel space
affine_rasmm2dwivox = np.linalg.inv(
self.tracto_data.input_dv.affine_vox2rasmm)
self.affine_seedsvox2dwivox = np.dot(
affine_rasmm2dwivox, self.affine_seedsvox2rasmm)
# Other parameters
self.add_neighborhood = add_neighborhood
self.add_previous_dir = add_previous_dir
self.model = model
self.model.eval()
@staticmethod
def _load_model(model_path: str, hyperparameters: Dict[str, Any]):
raise NotImplementedError
@staticmethod
def _run_tracker(tracker: StepTracker, seeds: Union[np.ndarray, List]) \
-> Tractogram:
"""Runs a tracker, starting from the provided seeds, and returns the
final tractogram.
Parameters
----------
tracker : StepTracker
Tracker that will grow streamlines
seeds : np.ndarray with shape (n_streamlines, 3) or (n_streamlines,
n_points, 3), or list of np.ndarray with shape (n_points, 3)
Initial starting points or initial streamlines.
Returns
-------
tractogram : nib.Tractogram
Tractogram containing all streamlines and stopping information.
"""
tractogram = None
tracker.initialize(seeds)
length_stopping_criterion = \
tracker.stopping_criteria[StoppingFlags.STOPPING_LENGTH]
with torch.no_grad(), \
tqdm.tqdm(range(length_stopping_criterion.keywords['max_nb_steps'])
) as pbar:
for _ in pbar:
tracker.grow_step()
if tractogram is None:
tractogram = tracker.harvest()
else:
tractogram += tracker.harvest()
if tracker.is_finished_tracking():
pbar.close()
break
return tractogram
@staticmethod
def _get_tracking_seeds_from_mask(mask: np.ndarray,
affine_seedsvox2dwivox: np.ndarray,
n_seeds_per_voxel: int,
rng: np.random.RandomState) -> np.ndarray:
"""Given a binary seeding mask, get seeds in DWI voxel space using the
provided affine.
Parameters
----------
mask : np.ndarray with shape (X,Y,Z)
Binary seeding mask.
affine_seedsvox2dwivox : np.ndarray
Affine to bring the seeds from their voxel space to the input voxel
space.
n_seeds_per_voxel : int
Number of seeds to generate in each voxel
rng : np.random.RandomState
Random number generator
Returns
-------
seeds : np.ndarray with shape (N_seeds, 3)
Position of each initial tracking seeds
"""
seeds = []
indices = np.array(np.where(mask)).T
for idx in indices:
seeds_in_seeding_voxel = idx + rng.uniform(-0.5, 0.5,
size=(n_seeds_per_voxel, 3))
seeds_in_dwi_voxel = nib.affines.apply_affine(affine_seedsvox2dwivox,
seeds_in_seeding_voxel)
seeds.extend(seeds_in_dwi_voxel)
seeds = np.array(seeds, dtype=np.float32)
return seeds
def track(self, max_length: float, batch_size: int = None,
step_size: float = None, max_angle: float = None,
min_length: float = None) -> Tractogram:
"""Track a whole tractogram from the seeds. First run forward,
then backwards using the streamlines that were tracked.
Parameters
----------
max_length : float
Maximum streamline length in mm.
batch_size : int (optional)
Number of streamlines that should be tracked at the same time.
If None, try with a full batch and divide by 2 until it fits into
memory.
step_size : float (optional)
Step size in mm. If None, use the model outputs without scaling.
max_angle : float
Maximum angle in degrees that two consecutive segments can have
between each other (corresponds to the maximum half-cone angle).
min_length : float
Minimum streamline length in mm.
(If given, streamlines shorter than this length will be discarded).
Returns
-------
tractogram : nib.Tractogram
Tractogram with all the tracked streamlines.
"""
if isinstance(self.seeding, np.ndarray):
# Get random seeds from seeding mask
seeds = self._get_tracking_seeds_from_mask(
self.seeding, self.affine_seedsvox2dwivox,
self.n_seeds_per_voxel, self.rng)
else:
# Use streamlines as seeds
seeds = self.seeding
# Compute minimum length voxel-wise
if min_length:
min_length_vox = convert_mm2vox(
min_length, self.tracto_data.input_dv.affine_vox2rasmm)
# Initialize trackers
if isinstance(seeds, (list, ArraySequence)):
forward_tracker_cls = PreInitializedStepTracker
else:
forward_tracker_cls = StepTracker
forward_step_tracker = forward_tracker_cls(model=self.model,
input_dv=self.tracto_data.input_dv,
mask_dv=self.tracking_dv,
step_size=step_size,
add_neighborhood=self.add_neighborhood,
add_previous_dir=self.add_previous_dir,
max_length=max_length,
max_angle=max_angle,
use_gpu=self.use_gpu)
backwards_step_tracker = PreInitializedStepTracker(model=self.model,
input_dv=self.tracto_data.input_dv,
mask_dv=self.tracking_dv,
step_size=step_size,
add_neighborhood=self.add_neighborhood,
add_previous_dir=self.add_previous_dir,
max_length=max_length,
max_angle=max_angle,
use_gpu=self.use_gpu)
if step_size:
print("Tracking using a step size of {:.3f} mm "
"({:.3f} voxels)".format(step_size, forward_step_tracker.step_size_vox))
else:
print("Tracking using the model output without scaling")
print("Tracking from {} seeds".format(len(seeds)))
if batch_size is None:
batch_size = len(seeds)
# Try batch sizes until it fits into memory (divide by 1.25 if it
# doesn't and try again)
while True:
print("Trying a batch size of {} streamlines".format(batch_size))
n_iter = int(ceil(len(seeds) / batch_size))
try:
tractogram = None
for i, start in enumerate(range(0, len(seeds), batch_size)):
end = start + batch_size
print("Iteration {} of {}".format(i + 1, n_iter))
# Forward tracking
with Timer("Forward pass", newline=True, color='green'):
batch_tractogram = self._run_tracker(forward_step_tracker,
seeds[start:end])
stopping_flags = batch_tractogram.data_per_streamline['stopping_flags'].astype(np.uint8)
print("Forward pass stopped because of - mask: {:,}\t "
"curvature: {:,}\t length: {:,}".format(
count_flags(stopping_flags, StoppingFlags.STOPPING_MASK),
count_flags(stopping_flags, StoppingFlags.STOPPING_CURVATURE),
count_flags(stopping_flags, StoppingFlags.STOPPING_LENGTH)))
# Backwards tracking
# Flip streamlines to initialize backwards tracker
streamlines_init = [s[::-1] for s in batch_tractogram.streamlines]
with Timer("Backwards pass", newline=True, color='green'):
batch_tractogram = self._run_tracker(backwards_step_tracker,
streamlines_init)
stopping_flags = batch_tractogram.data_per_streamline['stopping_flags'].astype(np.uint8)
print("Backwards pass stopped because of - mask: {:,}\t "
"curvature: {:,}\t length: {:,}".format(
count_flags(stopping_flags, StoppingFlags.STOPPING_MASK),
count_flags(stopping_flags, StoppingFlags.STOPPING_CURVATURE),
count_flags(stopping_flags, StoppingFlags.STOPPING_LENGTH)))
# Filter short streamlines
if min_length:
lengths_vox = slength(batch_tractogram.streamlines)
to_keep = np.where(lengths_vox > min_length_vox)
print("Removing {} streamlines that were under {} mm".format(
len(batch_tractogram) - len(to_keep[0]), min_length))
# Make a copy because indexing an ArraySequence creates
# a "view" with the same _data property, which causes problems
# when extending tractograms
batch_tractogram = batch_tractogram[to_keep].copy()
if tractogram is None:
tractogram = batch_tractogram
else:
tractogram += batch_tractogram
return tractogram
except MemoryError:
print("Not enough memory for a batch size of {} streamlines".format(batch_size))
batch_size = int(batch_size / 1.25)
if batch_size <= 0:
raise MemoryError("Not enough memory! You might need a "
"bigger graphics card!")
except RuntimeError as e:
if "out of memory" in e.args[0] or "CuDNN error" in e.args[0]:
print("Not enough memory for a batch size of {} streamlines"
.format(batch_size))
batch_size = int(batch_size / 1.25)
if batch_size <= 0:
raise MemoryError("Not enough memory! You might need a "
"bigger graphics card!")
else:
raise e
```
#### File: dwi_ml/training/trainer_abstract.py
```python
import datetime
import json
import os
import time
import numpy as np
import torch
from dwi_ml.data.dataset.data_list import (DataListForTorch,
LazyDataListForTorch)
from dwi_ml.experiment.timer import Timer
from dwi_ml.experiment.monitoring import ValueHistoryMonitor
class DWIMLAbstractLocal:
""" Meant for projects working on learning local information in the
voxel. Information will be X = a voxel. """
def __init__(self):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def train(self, **kwargs):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load_model(self, filepath, **kwargs):
raise NotImplementedError
class DWIMLAbstractSequences:
""" Meant for projects working on learning tractography. Information will
be X = sequences."""
def __init__(self,
train_database_path,
valid_database_path,
name: str = None,
# Concerning the choice of inputs:
nb_degree_angles: int = 128,
add_streamline_noise: bool = False,
streamlines_cut_ratio: float = None, step_size: float = None,
neighborhood_dist_mm: float = None,
nb_neighborhood_axes: int = 6,
add_previous_dir: bool = False,
lazy: bool = False,
# Concerning the memory usage:
batch_size: int = 20000, volumes_per_batch: int = None,
cycles_per_volume_batch: int = 1,
n_epoch: int = 100, seed: int = 1234, patience: int = 20,
use_gpu: bool = True, num_workers: int = 0,
worker_interpolation: bool = False,
cache_manager: bool = False, taskman_managed: bool = False):
"""
Mandatory parameters:
---------------------
train_database_path : str
Path to training database (hdf5 file)
valid_database_path : str
Path to validation database (hdf5 file)
Optional parameters:
--------------------
====> General
name : str
Optional name of the experiment. If given, it is prepended to the
auto-generated name. [None]
====> Concerning the choice of inputs:
nb_degree_angles: int
Precision for angles: number of directions on the sphere. If
previous direction is added to input, we need to know how many that
is. But we manage the output with output_model, not with this
option. [128]
add_streamline_noise : bool
If set, add random gaussian noise to streamline coordinates
on-the-fly. Noise variance is 0.1 * step-size, or 0.1mm if no step
size is used. [False]
streamlines_cut_ratio : float
Percentage of streamlines to randomly cut in each batch. If None, do
not split streamlines. [None]
NOTE: Preprocessed .hdf5 file should contain resampled
streamlines; otherwise, cutting streamlines will be biased
towards long segments (less points)
step_size : float
Constant step size that every streamline should have between points
(in mm). If None, train on streamlines as they are (ex, compressed).
[None]
neighborhood_dist_mm : float
If given, add neighboring information to the input signal at the
given distance in each axis (in mm). [None]
neighborhood_axes : int
Nb of axes at which to get neighborhood distance. Default = 6 (up,
down, left, right, front, back).
add_previous_dir : bool
If set, add the previous streamline direction to the input signal.
[False]
lazy : bool
If True, use a lazy dataset. [False]
====> Concerning the memory usage:
batch_size : int
Number of time steps to use in a batch (the length of sequences vary
a lot, so we define the number of time steps to use a more
consistent amount of memory) [20,000]
volumes_per_batch : int
Limit the number of sampled volumes inside a single batch.
If None, use true random sampling. [None]
cycles_per_volume_batch : int
Number of batches where the same volumes will be reused before
sampling new volumes. [1]
n_epoch : int
Maximum number of epochs [100]
seed : int
Seed for random numbers [1234]
patience : int
Use early stopping. Defines the number of epochs after which
the model should stop training if the loss hasn't improved. [20]
use_gpu : bool
Use the GPU; if False, use CPU. [True]
num_workers : int
Number of processes that should process the data between training
updates. [0]
worker_interpolation : bool
If True and num_workers > 0, interpolation will be done on CPU by
the workers. Otherwise, interpolation is done on the main thread
using the chosen device. [False]
cache_manager : bool
If True, use a cache manager to keep volumes and streamlines in
memory. [False]
taskman_managed : bool
If True, taskman manages the experiment. Do not output progress
bars and instead output special messages for taskman. [False]
"""
# Init mandatory properties
self.train_database_path = train_database_path
self.valid_database_path = valid_database_path
# Init optional properties
self.name = name
# Init "globals" from user's project
self.nb_degree_angles = nb_degree_angles
# Init args concerning choice of inputs
self.add_streamline_noise = add_streamline_noise
self.streamlines_cut_ratio = streamlines_cut_ratio
self.step_size = step_size
self.neighborhood_dist_mm = neighborhood_dist_mm
self.nb_neighborhood_axes = nb_neighborhood_axes # toDo. À voir!! Je vais peut-être devoir changer int pour str='method'
# On aurait la méthode "6axes" et la méthode "mimicGrid" pour mon CNN
# où je prendrais 27 axes, pas tous de la même longueur! Possiblement
# le double d'axe pour avoir l'équivalent de 2 voxels autour de mon point
# dans toutes les directions. Ça pourrait être [str|int]
self.add_previous_dir = add_previous_dir
self.lazy = lazy
# Init args concerning memory usage
self.batch_size = int(batch_size)
self.volumes_per_batch = volumes_per_batch
self.n_epoch = int(n_epoch)
self.seed = seed
self.patience = patience
self.use_gpu = use_gpu
self.num_workers = num_workers
self.worker_interpolation = worker_interpolation
self.cycles_per_volume_batch = cycles_per_volume_batch
self.cache_manager = cache_manager
self.taskman_managed = taskman_managed
self.taskman_report = {
'loss_train': None,
'loss_valid': None,
'epoch': None,
'best_epoch': None,
'best_score': None,
'update': None,
'update_loss': None
}
# Time limited run
self.hangup_time = None
htime = os.environ.get('HANGUP_TIME', None)
if htime is not None:
self.hangup_time = int(htime)
print('Will hang up at ' + htime)
# Set device
self.device = None
if self.use_gpu and torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# Set random numbers
self.rng = np.random.RandomState(self.seed)
torch.manual_seed(self.seed) # Set torch seed
if self.use_gpu:
torch.cuda.manual_seed(self.seed) # toDo. Pourquoi ça dit error?
# If using worker_interpolation, data is processed on CPU
self.dataset_device = torch.device(
'cpu') if self.worker_interpolation else self.device
# Init datasets
# NOTE. WE HOPE THAT MULTISUBJECT CAN REALLY BE COMMON TO ALL OF US.
# So, I've pu the dataset creation here in the abstract. Else we can
# bring it back to each user's script.
other_kw_args = {}
if self.lazy:
dataset_cls = LazyMultiSubjectDataset
if self.cache_manager:
other_kw_args['cache_size'] = self.volumes_per_batch
else:
dataset_cls = MultiSubjectDataset
self.train_dataset = dataset_cls(
self.train_database_path, self.rng,
add_streamline_noise=self.add_streamline_noise,
step_size=self.step_size,
neighborhood_dist_mm=self.neighborhood_dist_mm,
streamlines_cut_ratio=self.streamlines_cut_ratio,
add_previous_dir=self.add_previous_dir,
do_interpolation=self.worker_interpolation,
device=self.dataset_device,
taskman_managed=self.taskman_managed,
**other_kw_args)
self.valid_dataset = dataset_cls(
self.valid_database_path, self.rng,
add_streamline_noise=False,
step_size=self.step_size,
neighborhood_dist_mm=self.neighborhood_dist_mm,
streamlines_cut_ratio=None,
add_previous_dir=self.add_previous_dir,
do_interpolation=self.worker_interpolation,
device=self.dataset_device,
taskman_managed=self.taskman_managed,
**other_kw_args)
# Other variables
self.sh_order = None # Will be set once the dataset is loaded
self.input_size = None # Will be set once the dataset is loaded
self.current_epoch = 0
self.experiment_dir = (self.name if self.name
else datetime.datetime.now().strftime(
"%Y_%m_%d_%H%M%S")) + '_' + type(self).__name__
self.optimizer = None # Will be defined later with ADAM
self.model = None # Will be defined by the main user
# Setup monitors
self.train_loss_monitor = ValueHistoryMonitor("Training loss")
self.valid_loss_monitor = ValueHistoryMonitor("Validation loss")
self.grad_norm_monitor = ValueHistoryMonitor("Grad Norm") # ToDo Est-ce que tout le monde utilise grad norm??
def train(self, **kwargs):
raise NotImplementedError
# ToDo: "train" depends on each user, but can we define
# sub-functions here that could encapsulate some sub-tasks that
# everybody uses? One day we could compare our codes.
def save(self):
raise NotImplementedError
def load_model(self, filepath, **kwargs):
raise NotImplementedError
def load_dataset(self):
"""
This method loads the data (streamlines and data volume).
"""
with Timer("Loading training dataset", newline=True, color='blue'):
self.train_dataset.load()
input_size = self._compute_input_size()
self.input_size = input_size
self.sh_order = self.train_dataset.sh_order
with Timer("Loading validation dataset", newline=True, color='blue'):
self.valid_dataset.load()
def _compute_input_size(self):
# Basic input size
expected_input_size = self.train_dataset.multisubject_manager.feature_size
# + neighbors
if self.neighborhood_dist_mm:
expected_input_size += \
self.nb_neighborhood_axes * \
self.train_dataset.multisubject_manager.feature_size
# + previous direction
if self.add_previous_dir:
expected_input_size += self.nb_degree_angles
return expected_input_size
def _should_quit(self, iter_timer):
# If:
# hang up signal received
# time remaining is less than one epoch + 30 seconds
# exit training.
return (self.hangup_time is not None and
time.time() + iter_timer.mean * 2.0 + 30 > self.hangup_time)
def _update_taskman_report(self, updates):
self.taskman_report.update(updates)
self.taskman_report['time'] = time.time()
print('!taskman' + json.dumps(self.taskman_report), flush=True)
```
#### File: dwi_ml/please_copy_and_adapt/train_model.py
```python
import argparse
import logging
from os import path
import yaml
from dwi_ml.training.checks_for_experiment_parameters import (
check_all_experiment_parameters, check_logging_level)
from dwi_ml.training.trainer_abstract import DWIMLAbstractSequences
def parse_args():
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('parameters_filename',
help='Experiment configuration YAML filename. See '
'please_copy_and_adapt/training_parameters.yaml for an '
'example.')
arguments = p.parse_args()
return arguments
def main():
args = parse_args()
# Load parameters from yaml file
if not path.exists(args.parameters_filename):
raise FileNotFoundError("Yaml file not found: "
"{}".format(args.parameters_filename))
with open(args.parameters_filename) as f:
conf = yaml.safe_load(f.read())
# Initialize logger
logging_level = check_logging_level(conf['logging']['level'],
required=False)
logging.basicConfig(level=logging_level)
logging.info(conf)
# Perform checks.checks
organized_args = check_all_experiment_parameters(conf)
# Instantiate your class
# (Change StreamlinesBasedModelAbstract for your class.)
# Then load dataset, build model, train and save
#experiment = DWIMLAbstractSequences(organized_args)
#experiment.load_dataset()
#experiment.build_model()
#experiment.train()
#experiment.save()
if __name__ == '__main__':
main()
``` |
{
"source": "jhlegarreta/nilearn",
"score": 2
} |
#### File: datasets/tests/test_atlas.py
```python
import os
import shutil
import itertools
import numpy as np
import nibabel
import pytest
from numpy.testing import assert_array_equal
from . import test_utils as tst
from nilearn._utils.compat import _basestring, _urllib
from nilearn.datasets import utils, atlas
from nilearn.image import get_data
@pytest.fixture()
def request_mocker():
""" Mocks URL calls for atlas fetchers during testing.
Tests the fetcher code without actually downloading the files.
"""
tst.setup_mock(utils, atlas)
yield
tst.teardown_mock(utils, atlas)
def test_get_dataset_dir(tmp_path):
# testing folder creation under different environments, enforcing
# a custom clean install
os.environ.pop('NILEARN_DATA', None)
os.environ.pop('NILEARN_SHARED_DATA', None)
expected_base_dir = os.path.expanduser('~/nilearn_data')
data_dir = utils._get_dataset_dir('test', verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
expected_base_dir = str(tmp_path / 'test_nilearn_data')
os.environ['NILEARN_DATA'] = expected_base_dir
data_dir = utils._get_dataset_dir('test', verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
expected_base_dir = str(tmp_path / 'nilearn_shared_data')
os.environ['NILEARN_SHARED_DATA'] = expected_base_dir
data_dir = utils._get_dataset_dir('test', verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
expected_base_dir = str(tmp_path / 'env_data')
expected_dataset_dir = os.path.join(expected_base_dir, 'test')
data_dir = utils._get_dataset_dir(
'test', default_paths=[expected_dataset_dir], verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
no_write = str(tmp_path / 'no_write')
os.makedirs(no_write)
os.chmod(no_write, 0o400)
expected_base_dir = str(tmp_path / 'nilearn_shared_data')
os.environ['NILEARN_SHARED_DATA'] = expected_base_dir
data_dir = utils._get_dataset_dir('test',
default_paths=[no_write],
verbose=0)
# Non writeable dir is returned because dataset may be in there.
assert data_dir == no_write
assert os.path.exists(data_dir)
# Set back write permissions in order to be able to remove the file
os.chmod(no_write, 0o600)
shutil.rmtree(data_dir)
# Verify exception for a path which exists and is a file
test_file = str(tmp_path / 'some_file')
with open(test_file, 'w') as out:
out.write('abcfeg')
with pytest.raises(OSError, match=('Nilearn tried to store the dataset '
'in the following directories, but')
):
utils._get_dataset_dir('test', test_file, verbose=0)
def test_downloader(tmp_path):
# Sandboxing test
# ===============
# When nilearn downloads a file, everything is first downloaded in a
# temporary directory (sandbox) and moved to the "real" data directory if
# all files are present. In case of error, the sandbox is deleted.
# To test this feature, we do as follow:
# - create the data dir with a file that has a specific content
# - try to download the dataset but make it fail on purpose (by requesting a
# file that is not in the archive)
# - check that the previously created file is untouched :
# - if sandboxing is faulty, the file would be replaced by the file of the
# archive
# - if sandboxing works, the file must be untouched.
local_url = "file:" + _urllib.request.pathname2url(
os.path.join(tst.datadir, "craddock_2011_parcellations.tar.gz"))
datasetdir = str(tmp_path / 'craddock_2012')
os.makedirs(datasetdir)
# Create a dummy file. If sandboxing is successful, it won't be overwritten
dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'w')
dummy.write('stuff')
dummy.close()
opts = {'uncompress': True}
files = [
('random_all.nii.gz', local_url, opts),
# The following file does not exists. It will cause an abortion of
# the fetching procedure
('bald.nii.gz', local_url, opts)
]
pytest.raises(IOError, utils._fetch_files,
str(tmp_path / 'craddock_2012'), files,
verbose=0)
dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r')
stuff = dummy.read(5)
dummy.close()
assert stuff == 'stuff'
# Downloading test
# ================
# Now, we use the regular downloading feature. This will override the dummy
# file created before.
atlas.fetch_atlas_craddock_2012(data_dir=str(tmp_path), url=local_url)
dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r')
stuff = dummy.read()
dummy.close()
assert stuff == ''
def test_fail_fetch_atlas_harvard_oxford(tmp_path):
# specify non-existing atlas item
with pytest.raises(ValueError, match='Invalid atlas name'):
atlas.fetch_atlas_harvard_oxford('not_inside')
# specify existing atlas item
target_atlas = 'cort-maxprob-thr0-1mm'
target_atlas_fname = 'HarvardOxford-' + target_atlas + '.nii.gz'
ho_dir = str(tmp_path / 'fsl' / 'data' / 'atlases')
os.makedirs(ho_dir)
nifti_dir = os.path.join(ho_dir, 'HarvardOxford')
os.makedirs(nifti_dir)
target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname)
# Create false atlas
atlas_data = np.zeros((10, 10, 10), dtype=int)
# Create an interhemispheric map
atlas_data[:, :2, :] = 1
# Create a left map
atlas_data[:5, 3:5, :] = 2
# Create a right map, with one voxel on the left side
atlas_data[5:, 7:9, :] = 3
atlas_data[4, 7, 0] = 3
nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename(
target_atlas_nii)
dummy = open(os.path.join(ho_dir, 'HarvardOxford-Cortical.xml'), 'w')
dummy.write("<?xml version='1.0' encoding='us-ascii'?>\n"
"<data>\n"
'<label index="0" x="48" y="94" z="35">R1</label>\n'
'<label index="1" x="25" y="70" z="32">R2</label>\n'
'<label index="2" x="33" y="73" z="63">R3</label>\n'
"</data>")
dummy.close()
# when symmetric_split=False (by default), then atlas fetcher should
# have maps as string and n_labels=4 with background. Since, we relay on xml
# file to retrieve labels.
ho_wo_symm = atlas.fetch_atlas_harvard_oxford(target_atlas,
data_dir=str(tmp_path))
assert isinstance(ho_wo_symm.maps, _basestring)
assert isinstance(ho_wo_symm.labels, list)
assert ho_wo_symm.labels[0] == "Background"
assert ho_wo_symm.labels[1] == "R1"
assert ho_wo_symm.labels[2] == "R2"
assert ho_wo_symm.labels[3] == "R3"
# This section tests with lateralized version. In other words,
# symmetric_split=True
# Dummy xml file for lateralized control of cortical atlas images
# shipped with FSL 5.0. Atlases are already lateralized in this version
# for cortical type atlases denoted with maxprob but not full prob and but
# not also with subcortical.
# So, we test the fetcher with symmetric_split=True by creating a new
# dummy local file and fetch them and test the output variables
# accordingly.
dummy2 = open(os.path.join(ho_dir, 'HarvardOxford-Cortical-Lateralized.xml'), 'w')
dummy2.write("<?xml version='1.0' encoding='us-ascii'?>\n"
"<data>\n"
'<label index="0" x="63" y="86" z="49">Left R1</label>\n'
'<label index="1" x="21" y="86" z="33">Right R1</label>\n'
'<label index="2" x="64" y="69" z="32">Left R2</label>\n'
'<label index="3" x="26" y="70" z="32">Right R2</label>\n'
'<label index="4" x="47" y="75" z="66">Left R3</label>\n'
'<label index="5" x="43" y="80" z="61">Right R3</label>\n'
"</data>")
dummy2.close()
# Here, with symmetric_split=True, atlas maps are returned as nibabel Nifti
# image but not string. Now, with symmetric split number of labels should be
# more than without split and contain Left and Right tags in the labels.
# Create dummy image files too with cortl specified for symmetric split.
split_atlas_fname = 'HarvardOxford-' + 'cortl-maxprob-thr0-1mm' + '.nii.gz'
nifti_target_split = os.path.join(nifti_dir, split_atlas_fname)
nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename(
nifti_target_split)
ho = atlas.fetch_atlas_harvard_oxford(target_atlas,
data_dir=str(tmp_path),
symmetric_split=True)
assert isinstance(ho.maps, nibabel.Nifti1Image)
assert isinstance(ho.labels, list)
assert len(ho.labels) == 7
assert ho.labels[0] == "Background"
assert ho.labels[1] == "Left R1"
assert ho.labels[2] == "Right R1"
assert ho.labels[3] == "Left R2"
assert ho.labels[4] == "Right R2"
assert ho.labels[5] == "Left R3"
assert ho.labels[6] == "Right R3"
def test_fetch_atlas_craddock_2012(tmp_path, request_mocker):
bunch = atlas.fetch_atlas_craddock_2012(data_dir=str(tmp_path),
verbose=0)
keys = ("scorr_mean", "tcorr_mean",
"scorr_2level", "tcorr_2level",
"random")
filenames = [
"scorr05_mean_all.nii.gz",
"tcorr05_mean_all.nii.gz",
"scorr05_2level_all.nii.gz",
"tcorr05_2level_all.nii.gz",
"random_all.nii.gz",
]
assert len(tst.mock_url_request.urls) == 1
for key, fn in zip(keys, filenames):
assert bunch[key] == str(tmp_path / 'craddock_2012' / fn)
assert bunch.description != ''
def test_fetch_atlas_smith_2009(tmp_path, request_mocker):
bunch = atlas.fetch_atlas_smith_2009(data_dir=str(tmp_path), verbose=0)
keys = ("rsn20", "rsn10", "rsn70",
"bm20", "bm10", "bm70")
filenames = [
"rsn20.nii.gz",
"PNAS_Smith09_rsn10.nii.gz",
"rsn70.nii.gz",
"bm20.nii.gz",
"PNAS_Smith09_bm10.nii.gz",
"bm70.nii.gz",
]
assert len(tst.mock_url_request.urls) == 6
for key, fn in zip(keys, filenames):
assert bunch[key] == str(tmp_path / 'smith_2009' / fn)
assert bunch.description != ''
def test_fetch_coords_power_2011():
bunch = atlas.fetch_coords_power_2011()
assert len(bunch.rois) == 264
assert bunch.description != ''
def test_fetch_coords_seitzman_2018():
bunch = atlas.fetch_coords_seitzman_2018()
assert len(bunch.rois) == 300
assert len(bunch.radius) == 300
assert len(bunch.networks) == 300
assert len(bunch.regions) == 300
assert len(np.unique(bunch.networks)) == 14
assert len(np.unique(bunch.regions)) == 8
np.testing.assert_array_equal(bunch.networks, np.sort(bunch.networks))
assert bunch.description != ''
assert bunch.regions[0] == "cortexL"
bunch = atlas.fetch_coords_seitzman_2018(ordered_regions=False)
assert np.any(bunch.networks != np.sort(bunch.networks))
def test_fetch_atlas_destrieux_2009(tmp_path, request_mocker):
datadir = str(tmp_path / 'destrieux_2009')
os.mkdir(datadir)
dummy = open(os.path.join(
datadir, 'destrieux2009_rois_labels_lateralized.csv'), 'w')
dummy.write("name,index")
dummy.close()
bunch = atlas.fetch_atlas_destrieux_2009(data_dir=str(tmp_path),
verbose=0)
assert len(tst.mock_url_request.urls) == 1
assert bunch['maps'] == str(tmp_path / 'destrieux_2009'
/ 'destrieux2009_rois_lateralized.nii.gz')
dummy = open(os.path.join(
datadir, 'destrieux2009_rois_labels.csv'), 'w')
dummy.write("name,index")
dummy.close()
bunch = atlas.fetch_atlas_destrieux_2009(
lateralized=False, data_dir=str(tmp_path), verbose=0)
assert len(tst.mock_url_request.urls) == 1
assert bunch['maps'] == os.path.join(
datadir, 'destrieux2009_rois.nii.gz')
def test_fetch_atlas_msdl(tmp_path, request_mocker):
datadir = str(tmp_path / 'msdl_atlas')
os.mkdir(datadir)
os.mkdir(os.path.join(datadir, 'MSDL_rois'))
data_dir = os.path.join(datadir, 'MSDL_rois', 'msdl_rois_labels.csv')
csv = np.rec.array([(1.5, 1.5, 1.5, 'Aud', 'Aud'),
(1.2, 1.3, 1.4, 'DMN', 'DMN')],
dtype=[('x', '<f8'), ('y', '<f8'),
('z', '<f8'), ('name', 'S12'),
('net_name', 'S19')])
with open(data_dir, 'wb') as csv_file:
header = '{0}\n'.format(','.join(csv.dtype.names))
csv_file.write(header.encode())
np.savetxt(csv_file, csv, delimiter=',', fmt='%s')
dataset = atlas.fetch_atlas_msdl(data_dir=str(tmp_path), verbose=0)
assert isinstance(dataset.labels, list)
assert isinstance(dataset.region_coords, list)
assert isinstance(dataset.networks, list)
assert isinstance(dataset.maps, _basestring)
assert len(tst.mock_url_request.urls) == 1
assert dataset.description != ''
def test_fetch_atlas_yeo_2011(tmp_path, request_mocker):
dataset = atlas.fetch_atlas_yeo_2011(data_dir=str(tmp_path), verbose=0)
assert isinstance(dataset.anat, _basestring)
assert isinstance(dataset.colors_17, _basestring)
assert isinstance(dataset.colors_7, _basestring)
assert isinstance(dataset.thick_17, _basestring)
assert isinstance(dataset.thick_7, _basestring)
assert isinstance(dataset.thin_17, _basestring)
assert isinstance(dataset.thin_7, _basestring)
assert len(tst.mock_url_request.urls) == 1
assert dataset.description != ''
def test_fetch_atlas_aal(tmp_path, request_mocker):
ho_dir = str(tmp_path / 'aal_SPM12' / 'aal' / 'atlas')
os.makedirs(ho_dir)
with open(os.path.join(ho_dir, 'AAL.xml'), 'w') as xml_file:
xml_file.write("<?xml version='1.0' encoding='us-ascii'?> "
"<metadata>"
"</metadata>")
dataset = atlas.fetch_atlas_aal(data_dir=str(tmp_path), verbose=0)
assert isinstance(dataset.maps, _basestring)
assert isinstance(dataset.labels, list)
assert isinstance(dataset.indices, list)
assert len(tst.mock_url_request.urls) == 1
with pytest.raises(ValueError,
match='The version of AAL requested "FLS33"'
):
atlas.fetch_atlas_aal(version="FLS33",
data_dir=str(tmp_path),
verbose=0)
assert dataset.description != ''
def test_fetch_atlas_basc_multiscale_2015(tmp_path, request_mocker):
# default version='sym',
data_sym = atlas.fetch_atlas_basc_multiscale_2015(data_dir=str(tmp_path),
verbose=0)
# version='asym'
data_asym = atlas.fetch_atlas_basc_multiscale_2015(version='asym',
verbose=0,
data_dir=str(tmp_path))
keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064',
'scale122', 'scale197', 'scale325', 'scale444']
dataset_name = 'basc_multiscale_2015'
name_sym = 'template_cambridge_basc_multiscale_nii_sym'
basenames_sym = ['template_cambridge_basc_multiscale_sym_' +
key + '.nii.gz' for key in keys]
for key, basename_sym in zip(keys, basenames_sym):
assert data_sym[key] == str(tmp_path / dataset_name / name_sym
/ basename_sym)
name_asym = 'template_cambridge_basc_multiscale_nii_asym'
basenames_asym = ['template_cambridge_basc_multiscale_asym_' +
key + '.nii.gz' for key in keys]
for key, basename_asym in zip(keys, basenames_asym):
assert data_asym[key] == str(tmp_path / dataset_name / name_asym
/ basename_asym)
assert len(data_sym) == 10
with pytest.raises(
ValueError,
match='The version of Brain parcellations requested "aym"'):
atlas.fetch_atlas_basc_multiscale_2015(version="aym",
data_dir=str(tmp_path),
verbose=0)
assert len(tst.mock_url_request.urls) == 2
assert data_sym.description != ''
assert data_asym.description != ''
def test_fetch_coords_dosenbach_2010():
bunch = atlas.fetch_coords_dosenbach_2010()
assert len(bunch.rois) == 160
assert len(bunch.labels) == 160
assert len(np.unique(bunch.networks)) == 6
assert bunch.description != ''
np.testing.assert_array_equal(bunch.networks, np.sort(bunch.networks))
bunch = atlas.fetch_coords_dosenbach_2010(ordered_regions=False)
assert np.any(bunch.networks != np.sort(bunch.networks))
def test_fetch_atlas_allen_2011(tmp_path, request_mocker):
bunch = atlas.fetch_atlas_allen_2011(data_dir=str(tmp_path), verbose=0)
keys = ("maps",
"rsn28",
"comps")
filenames = ["ALL_HC_unthresholded_tmaps.nii.gz",
"RSN_HC_unthresholded_tmaps.nii.gz",
"rest_hcp_agg__component_ica_.nii.gz"]
assert len(tst.mock_url_request.urls) == 1
for key, fn in zip(keys, filenames):
assert bunch[key] == str(tmp_path / 'allen_rsn_2011'
/ 'allen_rsn_2011' / fn)
assert bunch.description != ''
def test_fetch_atlas_surf_destrieux(tmp_path, request_mocker, verbose=0):
data_dir = str(tmp_path / 'destrieux_surface')
os.mkdir(data_dir)
# Create mock annots
for hemi in ('left', 'right'):
nibabel.freesurfer.write_annot(
os.path.join(data_dir,
'%s.aparc.a2009s.annot' % hemi),
np.arange(4), np.zeros((4, 5)), 5 * ['a'],
)
bunch = atlas.fetch_atlas_surf_destrieux(data_dir=str(tmp_path), verbose=0)
# Our mock annots have 4 labels
assert len(bunch.labels) == 4
assert bunch.map_left.shape == (4, )
assert bunch.map_right.shape == (4, )
assert bunch.description != ''
def _get_small_fake_talairach():
labels = ['*', 'b', 'a']
all_labels = itertools.product(*(labels,) * 5)
labels_txt = '\n'.join(map('.'.join, all_labels))
extensions = nibabel.nifti1.Nifti1Extensions([
nibabel.nifti1.Nifti1Extension(
'afni', labels_txt.encode('utf-8'))
])
img = nibabel.Nifti1Image(
np.arange(243).reshape((3, 9, 9)),
np.eye(4), nibabel.Nifti1Header(extensions=extensions))
return img, all_labels
def _mock_talairach_fetch_files(data_dir, *args, **kwargs):
img, all_labels = _get_small_fake_talairach()
file_name = os.path.join(data_dir, 'talairach.nii')
img.to_filename(file_name)
return [file_name]
def test_fetch_atlas_talairach(tmp_path, request_mocker):
atlas._fetch_files = _mock_talairach_fetch_files
level_values = np.ones((81, 3)) * [0, 1, 2]
talairach = atlas.fetch_atlas_talairach('hemisphere',
data_dir=str(tmp_path))
assert_array_equal(get_data(talairach.maps).ravel(),
level_values.T.ravel())
assert_array_equal(talairach.labels, ['Background', 'b', 'a'])
talairach = atlas.fetch_atlas_talairach('ba', data_dir=str(tmp_path))
assert_array_equal(get_data(talairach.maps).ravel(),
level_values.ravel())
pytest.raises(ValueError, atlas.fetch_atlas_talairach, 'bad_level')
def test_fetch_atlas_pauli_2017(tmp_path):
data_dir = str(tmp_path / 'pauli_2017')
data = atlas.fetch_atlas_pauli_2017('det', data_dir)
assert len(data.labels) == 16
values = get_data(nibabel.load(data.maps))
assert len(np.unique(values)) == 17
data = atlas.fetch_atlas_pauli_2017('prob', data_dir)
assert nibabel.load(data.maps).shape[-1] == 16
with pytest.raises(NotImplementedError):
atlas.fetch_atlas_pauli_2017('junk for testing', data_dir)
def test_fetch_atlas_schaefer_2018(tmp_path):
valid_n_rois = list(range(100, 1100, 100))
valid_yeo_networks = [7, 17]
valid_resolution_mm = [1, 2]
pytest.raises(ValueError, atlas.fetch_atlas_schaefer_2018, n_rois=44)
pytest.raises(ValueError, atlas.fetch_atlas_schaefer_2018, yeo_networks=10)
pytest.raises(ValueError, atlas.fetch_atlas_schaefer_2018, resolution_mm=3)
for n_rois, yeo_networks, resolution_mm in \
itertools.product(valid_n_rois, valid_yeo_networks,
valid_resolution_mm):
data = atlas.fetch_atlas_schaefer_2018(n_rois=n_rois,
yeo_networks=yeo_networks,
resolution_mm=resolution_mm,
data_dir=str(tmp_path),
verbose=0)
assert data.description != ''
assert isinstance(data.maps, _basestring)
assert isinstance(data.labels, np.ndarray)
assert len(data.labels) == n_rois
assert data.labels[0].astype(str).startswith("{}Networks".
format(yeo_networks))
img = nibabel.load(data.maps)
assert img.header.get_zooms()[0] == resolution_mm
assert np.array_equal(np.unique(img.dataobj),
np.arange(n_rois+1))
``` |
{
"source": "jhlegarreta/pybids",
"score": 2
} |
#### File: analysis/tests/test_analysis.py
```python
from os.path import join
from bids.analysis import Analysis
from bids.analysis.analysis import ContrastInfo, DesignMatrixInfo
from bids.layout import BIDSLayout
from bids.tests import get_test_data_path
import numpy as np
import pytest
@pytest.fixture
def analysis():
layout_path = join(get_test_data_path(), 'ds005')
layout = BIDSLayout(layout_path)
json_file = join(layout_path, 'models', 'ds-005_type-test_model.json')
analysis = Analysis(layout, json_file)
analysis.setup(scan_length=480, subject=['01', '02'])
return analysis
def test_design_matrix_info(analysis):
result = analysis['run'].get_design_matrix(subject=['01', '02', '03'])
for dmi in result:
assert isinstance(dmi, DesignMatrixInfo)
assert dmi._fields == ('sparse', 'dense', 'entities')
assert hasattr(dmi.sparse, 'shape')
assert dmi.dense is None
assert isinstance(dmi.entities, dict)
def test_get_design_matrix_arguments(analysis):
kwargs = dict(run=1, subject='01', sparse=True)
result = analysis['run'].get_design_matrix(**kwargs)
result = result[0]
assert result.sparse.shape == (172, 9)
assert result.dense is None
kwargs = dict(run=1, subject='01', mode='dense', force=False)
result = analysis['run'].get_design_matrix(**kwargs)[0]
assert result.sparse is None
assert result.dense is None
kwargs = dict(run=1, subject='01', mode='dense', force=True,
sampling_rate='highest')
result = analysis['run'].get_design_matrix(**kwargs)[0]
assert result.sparse is None
assert result.dense.shape == (4800, 10)
kwargs = dict(run=1, subject='01', mode='dense', force=True,
sampling_rate='TR')
result = analysis['run'].get_design_matrix(**kwargs)[0]
assert result.sparse is None
assert result.dense.shape == (240, 10)
kwargs = dict(run=1, subject='01', mode='dense', force=True,
sampling_rate=0.5)
result = analysis['run'].get_design_matrix(**kwargs)[0]
assert result.sparse is None
assert result.dense.shape == (240, 10)
# format='long' should be ignored for dense output
kwargs = dict(run=1, subject='01', mode='dense', force=True,
format='long', entities=False)
result = analysis['run'].get_design_matrix(**kwargs)[0]
assert result.sparse is None
assert result.dense.shape == (240, 1)
kwargs = dict(run=1, subject='01', mode='sparse', format='wide',
entities=False)
result = analysis['run'].get_design_matrix(**kwargs)[0]
assert result.dense is None
assert result.sparse.shape == (86, 4)
def test_first_level_sparse_design_matrix(analysis):
result = analysis['run'].get_design_matrix(subject=['01'])
assert len(result) == 3
df = result[0].sparse
assert df.shape == (172, 9)
assert df['condition'].nunique() == 2
assert set(result[0][0].columns) == {'amplitude', 'onset', 'duration',
'condition', 'subject', 'run',
'task', 'datatype', 'suffix'}
def test_post_first_level_sparse_design_matrix(analysis):
result = analysis['session'].get_design_matrix(entities=False)
assert len(result) == 2
assert len(result[0]) == 3
assert result[0].sparse.shape == (9, 2)
assert result[0].entities == {
'subject': '01',
'task': 'mixedgamblestask',
'datatype': 'func',
'suffix': 'bold'}
# Participant level and also check integer-based indexing
result = analysis['participant'].get_design_matrix()
assert len(result) == 2
assert analysis[2].name == 'participant'
# Dataset level
result = analysis['group'].get_design_matrix()
assert len(result) == 1
data = result[0].sparse
assert len(data) == 10
assert data['subject'].nunique() == 2
# # Make sure columns from different levels exist
varset = {'sex', 'age', 'RT-trial_type'}
assert not (varset - set(data['condition'].unique()))
# Calling an invalid level name should raise an exception
with pytest.raises(KeyError):
result = analysis['nonexistent_name'].get_design_matrix()
def test_contrast_info(analysis):
contrast_lists = analysis['run'].get_contrasts(subject='01')
assert len(contrast_lists) == 3
for cl in contrast_lists:
assert len(cl) == 3
cl = [c for c in cl if c.type == 't']
assert set([c.name for c in cl]) == {'RT', 'RT-trial_type'}
assert set([c.type for c in cl]) == {'t'}
assert cl[0].weights.columns.tolist() == ['RT', 'trial_type']
assert cl[1].weights.columns.tolist() == ['RT']
assert np.array_equal(cl[0].weights.values, np.array([[1, -1]]))
assert np.array_equal(cl[1].weights.values, np.array([[1]]))
assert isinstance(cl[0], ContrastInfo)
assert cl[0]._fields == ('name', 'weights', 'type', 'entities')
def test_contrast_info_with_specified_variables(analysis):
varlist = ['RT', 'dummy']
contrast_lists = analysis['run'].get_contrasts(subject='01',
variables=varlist)
assert len(contrast_lists) == 3
for cl in contrast_lists:
assert len(cl) == 3
cl = [c for c in cl if c.type == 't']
assert set([c.name for c in cl]) == {'RT', 'RT-trial_type'}
assert set([c.type for c in cl]) == {'t'}
for c in cl:
assert c.weights.columns.tolist() == ['RT', 'dummy']
assert np.array_equal(c.weights.values, np.array([[1, 0]]))
assert isinstance(cl[0], ContrastInfo)
assert cl[0]._fields == ('name', 'weights', 'type', 'entities')
def test_contrast_info_F_contrast(analysis):
contrast_lists = analysis['run'].get_contrasts(subject='01',
names=["crummy-F"])
assert len(contrast_lists) == 3
for cl in contrast_lists:
assert len(cl) == 1
c = cl[0]
assert c.name == "crummy-F"
assert c.type == 'F'
assert c.weights.columns.tolist() == ['RT', 'trial_type']
assert np.array_equal(c.weights.values, np.array([[1, 0], [0, 1]]))
assert isinstance(c, ContrastInfo)
assert c._fields == ('name', 'weights', 'type', 'entities')
```
#### File: layout/tests/test_models.py
```python
import sys
import os
import pytest
import bids
import copy
import json
from pathlib import Path
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import numpy as np
from bids.layout.models import (BIDSFile, Entity, Tag, Base, Config,
FileAssociation, BIDSImageFile)
from bids.layout import BIDSLayout
from bids.tests import get_test_data_path
from bids.external import six
def create_session():
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
return Session()
@pytest.fixture
def sample_bidsfile(tmpdir):
testfile = 'sub-03_ses-2_task-rest_acq-fullbrain_run-2_bold.nii.gz'
fn = tmpdir.mkdir("tmp").join(testfile)
fn.write('###')
return BIDSFile(os.path.join(str(fn)))
@pytest.fixture(scope='module')
def subject_entity():
return Entity('subject', r"[/\\\\]sub-([a-zA-Z0-9]+)", mandatory=False,
directory="{subject}", dtype='str')
def test_entity_initialization():
e = Entity('avaricious', r'aardvark-(\d+)')
assert e.name == 'avaricious'
assert e.pattern == r'aardvark-(\d+)'
assert not e.mandatory
assert e.directory is None
assert e.files == {}
def test_entity_init_all_args(subject_entity):
ent = subject_entity
assert ent.name == 'subject'
assert ent.pattern == r"[/\\\\]sub-([a-zA-Z0-9]+)"
assert ent.mandatory == False
assert ent.directory == "{subject}"
def test_entity_init_with_bad_dtype():
with pytest.raises(ValueError) as exc:
ent = Entity('test', dtype='superfloat')
msg = exc.value.message
assert msg.startswith("Invalid dtype")
def test_entity_matches(tmpdir):
filename = "aardvark-4-reporting-for-duty.txt"
tmpdir.mkdir("tmp").join(filename).write("###")
f = BIDSFile(os.path.join(str(tmpdir), filename))
e = Entity('avaricious', r'aardvark-(\d+)')
result = e.match_file(f)
assert result == '4'
def test_entity_deepcopy(subject_entity):
e = subject_entity
clone = copy.deepcopy(subject_entity)
for attr in ['name', 'pattern', 'mandatory', 'directory', 'regex']:
assert getattr(e, attr) == getattr(clone, attr)
assert e != clone
def test_file_associations():
session = create_session()
img = BIDSFile('sub-03/func/sub-03_task-rest_run-2_bold.nii.gz')
md1 = BIDSFile('sub-03/func/sub-03_task-rest_run-2_bold.json')
md2 = BIDSFile('task-rest_run-2_bold.json')
assocs = [
FileAssociation(src=md1.path, dst=img.path, kind="MetadataFor"),
FileAssociation(src=img.path, dst=md1.path, kind="MetadataIn"),
FileAssociation(src=md1.path, dst=md2.path, kind="Child"),
FileAssociation(src=md2.path, dst=md1.path, kind="Parent"),
FileAssociation(src=md2.path, dst=img.path, kind="Informs")
]
session.add_all([img, md1, md2] + assocs)
session.commit()
assert img._associations == [md1, md2] == img.get_associations()
assert md2._associations == [md1]
assert img.get_associations(kind='MetadataFor') == []
assert img.get_associations(kind='MetadataIn') == [md1]
results = img.get_associations(kind='MetadataIn', include_parents=True)
assert set(results) == {md1, md2}
def test_tag_init(sample_bidsfile, subject_entity):
f, e = sample_bidsfile, subject_entity
tag = Tag(f, e, 'zzz')
rep = str(tag)
assert rep.startswith("<Tag file:") and f.path in rep and 'zzz' in rep
def test_tag_dtype(sample_bidsfile, subject_entity):
f, e = sample_bidsfile, subject_entity
# Various ways of initializing--should all give same result
tags = [
Tag(f, e, 4, int),
Tag(f, e, '4', 'int'),
Tag(f, e, '4', int),
Tag(f, e, 4),
Tag(file=f, entity=e, dtype=int, value='4')
]
assert all([t.dtype == int for t in tags])
def test_entity_add_file(sample_bidsfile):
session = create_session()
bf = sample_bidsfile
e = Entity('prop', r'-(\d+)')
t = Tag(file=bf, entity=e, value=4)
session.add_all([t, e, bf])
session.commit()
assert e.files[bf.path] == 4
def test_config_init_with_args():
session = create_session()
ents = [
{
"name": "task",
"pattern": "[_/\\\\]task-([a-zA-Z0-9]+)"
},
{
"name": "acquisition",
"pattern": "[_/\\\\]acq-([a-zA-Z0-9]+)"
}
]
patterns = ['this_will_never_match_anything', 'and_neither_will_this']
config = Config('custom', entities=ents, default_path_patterns=patterns)
assert config.name == 'custom'
target = {'task', 'acquisition'}
assert set(ent.name for ent in config.entities.values()) == target
assert config.default_path_patterns == patterns
def test_load_existing_config():
session = create_session()
first = Config('dummy')
session.add(first)
session.commit()
second = Config.load({"name": "dummy"}, session=session)
assert first == second
session.add(second)
session.commit()
from sqlalchemy.orm.exc import FlushError
with pytest.raises(FlushError):
second = Config.load({"name": "dummy"})
session.add(second)
session.commit()
def test_bidsfile_get_df_from_tsv_gz(layout_synthetic):
bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[0]
# With onsets
df1 = bf.get_df()
df2 = bf.get_df(include_timing=True)
assert df1.equals(df2)
assert df1.shape == (1599, 3)
assert set(df1.columns) == {'onset', 'respiratory', 'cardiac'}
assert df1.iloc[0, 0] == 0.
assert df1.iloc[1, 0] - df1.iloc[0, 0] == 0.1
# With onsets and time shifted
df3 = bf.get_df(adjust_onset=True)
assert df1.iloc[:, 1:].equals(df3.iloc[:, 1:])
assert np.allclose(df3.iloc[:,0], df1.iloc[:, 0] + 22.8)
def test_bidsdatafile_enforces_dtype(layout_synthetic):
bf = layout_synthetic.get(suffix='participants', extension='tsv')[0]
df = bf.get_df(enforce_dtypes=False)
assert df.loc[:, 'subject_id'].dtype == int
assert df.loc[:, 'subject_id'][0] == 1
df = bf.get_df(enforce_dtypes=True)
assert df.loc[:, 'subject_id'].dtype == 'O'
assert df.loc[:, 'subject_id'][0] == '001'
assert df.loc[:, 'subject_id'][1] == '2'
def test_bidsimagefile_get_image():
path = "synthetic/sub-01/ses-01/func/sub-01_ses-01_task-nback_run-01_bold.nii.gz"
path = path.split('/')
path = os.path.join(get_test_data_path(), *path)
bf = BIDSImageFile(path)
assert bf.get_image() is not None
assert bf.get_image().shape == (64, 64, 64, 64)
def test_bidsjsonfile(layout_synthetic):
jf = layout_synthetic.get(suffix='bold', extension='json')[0]
d = jf.get_dict()
assert isinstance(d, dict)
assert d['RepetitionTime'] == 2.5
j = jf.get_json()
assert isinstance(j, six.string_types)
assert 'RepetitionTime' in j
assert json.loads(j) == d
def test_bidsfile_get_metadata(layout_synthetic):
bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[0]
md = bf.get_metadata()
assert set(md.keys()) == {'Columns', 'SamplingFrequency', 'StartTime'}
def test_bidsfile_get_entities(layout_synthetic):
md_ents = {'Columns', 'SamplingFrequency', 'StartTime'}
file_ents = {'datatype', 'extension', 'run', 'session', 'subject',
'suffix', 'task'}
bf = layout_synthetic.get(suffix='physio', extension='tsv.gz')[10]
# metadata=True and values='tags'; this is equivalent to get_metadata()
md = bf.get_entities(metadata=True)
assert md == bf.get_metadata()
assert set(md.keys()) == md_ents
assert md['StartTime'] == 22.8
# metadata=True and values='objects'
md = bf.get_entities(metadata=True, values='obj')
assert set(md.keys()) == md_ents
assert all([isinstance(v, Entity) for v in md.values()])
# metadata=False and values='tags'
md = bf.get_entities(metadata=False, values='tags')
assert set(md.keys()) == file_ents
assert md['session'] == '02'
assert md['task'] == 'nback'
# metadata=False and values='obj'
md = bf.get_entities(metadata=False, values='objects')
assert set(md.keys()) == file_ents
assert all([isinstance(v, Entity) for v in md.values()])
# No metadata constraint
md = bf.get_entities(metadata='all')
md2 = bf.get_entities(metadata=None)
assert md == md2
assert set(md.keys()) == md_ents | file_ents
@pytest.mark.xfail(sys.version_info < (3, 6), reason="os.PathLike introduced in Python 3.6")
def test_bidsfile_fspath(sample_bidsfile):
bf = sample_bidsfile
bf_path = Path(bf)
assert bf_path == Path(bf.path)
assert bf_path.read_text() == '###'
```
#### File: layout/tests/test_writing.py
```python
import pytest
import os
import shutil
from os.path import join, exists, islink, dirname
from bids.layout.writing import build_path
from bids.tests import get_test_data_path
from bids import BIDSLayout
from bids.layout.models import BIDSFile, Entity, Tag, Base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture
def writable_file(tmpdir):
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
testfile = 'sub-03_ses-2_task-rest_acq-fullbrain_run-2_bold.nii.gz'
fn = tmpdir.mkdir("tmp").join(testfile)
fn.write('###')
bf = BIDSFile(os.path.join(str(fn)))
tag_dict = {
'task': 'rest',
'run': 2,
'subject': '3'
}
ents = {name: Entity(name) for name in tag_dict.keys()}
tags = [Tag(bf, ents[k], value=v)
for k, v in tag_dict.items()]
session.add_all(list(ents.values()) + tags + [bf])
session.commit()
return bf
@pytest.fixture(scope='module')
def tmp_bids(tmpdir_factory):
tmp_bids = tmpdir_factory.mktemp("tmp_bids")
yield tmp_bids
shutil.rmtree(str(tmp_bids))
# Ugly hack
try:
shutil.rmtree(join(get_test_data_path(), '7t_trt', 'sub-Bob'))
except:
pass
@pytest.fixture(scope='module')
def layout(tmp_bids):
orig_dir = join(get_test_data_path(), '7t_trt')
# return BIDSLayout(data_dir, absolute_paths=False)
new_dir = join(str(tmp_bids), 'bids')
os.symlink(orig_dir, new_dir)
return BIDSLayout(new_dir)
class TestWritableFile:
def test_build_path(self, writable_file):
# Single simple pattern
with pytest.raises(TypeError):
build_path(writable_file.entities)
pat = join(writable_file.dirname,
'{task}/sub-{subject}/run-{run}.nii.gz')
target = join(writable_file.dirname, 'rest/sub-3/run-2.nii.gz')
assert build_path(writable_file.entities, pat) == target
# Multiple simple patterns
pats = ['{session}/{task}/r-{run}.nii.gz',
't-{task}/{subject}-{run}.nii.gz',
'{subject}/{task}.nii.gz']
pats = [join(writable_file.dirname, p) for p in pats]
target = join(writable_file.dirname, 't-rest/3-2.nii.gz')
assert build_path(writable_file.entities, pats) == target
# Pattern with optional entity
pats = ['[{session}/]{task}/r-{run}.nii.gz',
't-{task}/{subject}-{run}.nii.gz']
pats = [join(writable_file.dirname, p) for p in pats]
target = join(writable_file.dirname, 'rest/r-2.nii.gz')
assert build_path(writable_file.entities, pats) == target
# Pattern with conditional values
pats = ['{task<func|acq>}/r-{run}.nii.gz',
't-{task}/{subject}-{run}.nii.gz']
pats = [join(writable_file.dirname, p) for p in pats]
target = join(writable_file.dirname, 't-rest/3-2.nii.gz')
assert build_path(writable_file.entities, pats) == target
# Pattern with valid conditional values
pats = ['{task<func|rest>}/r-{run}.nii.gz',
't-{task}/{subject}-{run}.nii.gz']
pats = [join(writable_file.dirname, p) for p in pats]
target = join(writable_file.dirname, 'rest/r-2.nii.gz')
assert build_path(writable_file.entities, pats) == target
# Pattern with optional entity with conditional values
pats = ['[{task<func|acq>}/]r-{run}.nii.gz',
't-{task}/{subject}-{run}.nii.gz']
pats = [join(writable_file.dirname, p) for p in pats]
target = join(writable_file.dirname, 'r-2.nii.gz')
assert build_path(writable_file.entities, pats) == target
# Pattern with default value
pats = ['ses-{session|A}/r-{run}.nii.gz']
assert build_path({'run': 3}, pats) == 'ses-A/r-3.nii.gz'
# Pattern with both valid and default values
pats = ['ses-{session<A|B|C>|D}/r-{run}.nii.gz']
assert build_path({'session': 1, 'run': 3}, pats) == 'ses-D/r-3.nii.gz'
pats = ['ses-{session<A|B|C>|D}/r-{run}.nii.gz']
assert build_path({'session': 'B', 'run': 3}, pats) == 'ses-B/r-3.nii.gz'
def test_strict_build_path(self):
# Test with strict matching--should fail
pats = ['[{session}/]{task}/r-{run}.nii.gz',
't-{task}/{subject}-{run}.nii.gz']
entities = {'subject': 1, 'task': "A", 'run': 2}
assert build_path(entities, pats, True)
entities = {'subject': 1, 'task': "A", 'age': 22}
assert not build_path(entities, pats, True)
def test_build_file(self, writable_file, tmp_bids, caplog):
# Simple write out
new_dir = join(writable_file.dirname, 'rest')
pat = join(writable_file.dirname,
'{task}/sub-{subject}/run-{run}.nii.gz')
target = join(writable_file.dirname, 'rest/sub-3/run-2.nii.gz')
writable_file.copy(pat)
assert exists(target)
# Conflict handling
with pytest.raises(ValueError):
writable_file.copy(pat)
with pytest.raises(ValueError):
writable_file.copy(pat, conflicts='fail')
with pytest.warns(UserWarning) as record:
writable_file.copy(pat, conflicts='skip')
log_message = record[0].message.args[0]
assert log_message == 'A file at path {} already exists, ' \
'skipping writing file.'.format(target)
writable_file.copy(pat, conflicts='append')
append_target = join(writable_file.dirname,
'rest/sub-3/run-2_1.nii.gz')
assert exists(append_target)
writable_file.copy(pat, conflicts='overwrite')
assert exists(target)
shutil.rmtree(new_dir)
# Symbolic linking
writable_file.copy(pat, symbolic_link=True)
assert islink(target)
shutil.rmtree(new_dir)
# Using different root
root = str(tmp_bids.mkdir('tmp2'))
pat = join(root, '{task}/sub-{subject}/run-{run}.nii.gz')
target = join(root, 'rest/sub-3/run-2.nii.gz')
writable_file.copy(pat, root=root)
assert exists(target)
# Copy into directory functionality
pat = join(writable_file.dirname, '{task}/')
writable_file.copy(pat)
target = join(writable_file.dirname, 'rest', writable_file.filename)
assert exists(target)
shutil.rmtree(new_dir)
class TestWritableLayout:
def test_write_files(self, tmp_bids, layout):
tmpdir = str(tmp_bids)
pat = join(str(tmpdir), 'sub-{subject<02>}'
'/ses-{session}'
'/r-{run}'
'/suffix-{suffix}'
'/acq-{acquisition}'
'/task-{task}.nii.gz')
layout.copy_files(path_patterns=pat)
example_file = join(str(tmpdir), 'sub-02'
'/ses-2'
'/r-1'
'/suffix-bold'
'/acq-fullbrain'
'/task-rest.nii.gz')
example_file2 = join(str(tmpdir), 'sub-01'
'/ses-2'
'/r-1'
'/suffix-bold'
'/acq-fullbrain'
'/task-rest.nii.gz')
assert exists(example_file)
assert not exists(example_file2)
pat = join(str(tmpdir), 'sub-{subject<01>}'
'/ses-{session}'
'/r-{run}'
'/suffix-{suffix}'
'/task-{task}.nii.gz')
example_file = join(str(tmpdir), 'sub-01'
'/ses-2'
'/r-1'
'/suffix-bold'
'/task-rest.nii.gz')
# Should fail without the 'overwrite' because there are multiple
# files that produce the same path.
with pytest.raises(ValueError):
layout.copy_files(path_patterns=pat)
try:
os.remove(example_file)
except OSError:
pass
layout.copy_files(path_patterns=pat, conflicts='overwrite')
assert exists(example_file)
def test_write_contents_to_file(self, tmp_bids, layout):
contents = 'test'
entities = {'subject': 'Bob', 'session': '01'}
pat = join('sub-{subject}/ses-{session}/desc.txt')
layout.write_contents_to_file(entities, path_patterns=pat,
contents=contents, validate=False)
target = join(str(tmp_bids), 'bids', 'sub-Bob/ses-01/desc.txt')
assert exists(target)
with open(target) as f:
written = f.read()
assert written == contents
assert target not in layout.files
def test_write_contents_to_file_defaults(self, tmp_bids, layout):
contents = 'test'
entities = {'subject': 'Bob', 'session': '01', 'run': '1',
'suffix': 'bold', 'task': 'test', 'acquisition': 'test',
'bval': 0}
layout.write_contents_to_file(entities, contents=contents)
target = join(str(tmp_bids), 'bids', 'sub-Bob', 'ses-01',
'func', 'sub-Bob_ses-01_task-test_acq-test_run-1_bold.nii.gz')
assert exists(target)
with open(target) as f:
written = f.read()
assert written == contents
def test_build_file_from_layout(self, tmpdir, layout):
entities = {'subject': 'Bob', 'session': '01', 'run': '1'}
pat = join(str(tmpdir), 'sub-{subject}'
'/ses-{session}'
'/r-{run}.nii.gz')
path = layout.build_path(entities, path_patterns=pat, validate=False)
assert path == join(str(tmpdir), 'sub-Bob/ses-01/r-1.nii.gz')
data_dir = join(dirname(__file__), 'data', '7t_trt')
filename = 'sub-04_ses-1_task-rest_acq-fullbrain_run-1_physio.tsv.gz'
file = join('sub-04', 'ses-1', 'func', filename)
path = layout.build_path(file, path_patterns=pat, validate=False)
assert path.endswith('sub-04/ses-1/r-1.nii.gz')
``` |
{
"source": "jhlegarreta/VTKExamples",
"score": 2
} |
#### File: Python/VisualizationAlgorithms/CutWithCutFunction.py
```python
import vtk
def main():
colors = vtk.vtkNamedColors()
fileName, numberOfCuts = get_program_parameters()
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(fileName)
reader.Update()
bounds = reader.GetOutput().GetBounds()
print('Bounds:', ', '.join(['{:.3f}'.format(f) for f in bounds]))
plane = vtk.vtkPlane()
plane.SetOrigin((bounds[1] + bounds[0]) / 2.0, (bounds[3] + bounds[2]) / 2.0, bounds[4])
plane.SetNormal(0, 0, 1)
# Create cutter
high = plane.EvaluateFunction((bounds[1] + bounds[0]) / 2.0, (bounds[3] + bounds[2]) / 2.0, bounds[5])
cutter = vtk.vtkCutter()
cutter.SetInputConnection(reader.GetOutputPort())
cutter.SetCutFunction(plane)
cutter.GenerateValues(numberOfCuts, 0.99, 0.99 * high)
cutterMapper = vtk.vtkPolyDataMapper()
cutterMapper.SetInputConnection(cutter.GetOutputPort())
cutterMapper.ScalarVisibilityOff()
# Create cut actor
cutterActor = vtk.vtkActor()
cutterActor.GetProperty().SetColor(colors.GetColor3d("Banana"))
cutterActor.GetProperty().SetLineWidth(2)
cutterActor.SetMapper(cutterMapper)
# Create model actor
modelMapper = vtk.vtkPolyDataMapper()
modelMapper.SetInputConnection(reader.GetOutputPort())
modelMapper.ScalarVisibilityOff()
modelActor = vtk.vtkActor()
modelActor.GetProperty().SetColor(colors.GetColor3d("Flesh"))
modelActor.SetMapper(modelMapper)
# Create renderers and add actors of plane and model
renderer = vtk.vtkRenderer()
renderer.AddActor(cutterActor)
renderer.AddActor(modelActor)
# Add renderer to renderwindow and render
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(600, 600)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderer.SetBackground(colors.GetColor3d("Burlywood"))
renderer.GetActiveCamera().SetPosition(0, -1, 0)
renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
renderer.GetActiveCamera().SetViewUp(0, 0, 1)
renderer.GetActiveCamera().Azimuth(30)
renderer.GetActiveCamera().Elevation(30)
renderer.ResetCamera()
renderWindow.Render()
renderWindow.SetWindowName('CutWithCutFunction')
interactor.Start()
def get_program_parameters():
import argparse
description = 'Cut with a cut function.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='Torso.vtp')
parser.add_argument('numberOfCuts', default=10, type=int, nargs='?', help='The number of cuts e.g 10.')
args = parser.parse_args()
return args.filename, args.numberOfCuts
if __name__ == "__main__":
main()
``` |
{
"source": "JHL-HUST/PWWS",
"score": 3
} |
#### File: JHL-HUST/PWWS/char_level_process.py
```python
import numpy as np
from config import config
def onehot_dic_build():
# use one-hot encoding
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
embedding_dic = {}
embedding_w = []
# For characters that do not exist in the alphabet or empty characters, replace them with vectors 0.
embedding_dic["UNK"] = 0
embedding_w.append(np.zeros(len(alphabet), dtype='float32'))
for i, alpha in enumerate(alphabet):
onehot = np.zeros(len(alphabet), dtype='float32')
embedding_dic[alpha] = i + 1
onehot[i] = 1
embedding_w.append(onehot)
embedding_w = np.array(embedding_w, dtype='float32')
return embedding_w, embedding_dic
def get_embedding_dict():
return {'UNK': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10,
'k': 11, 'l': 12,
'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22,
'w': 23, 'x': 24,
'y': 25, 'z': 26, '0': 27, '1': 28, '2': 29, '3': 30, '4': 31, '5': 32, '6': 33, '7': 34,
'8': 35, '9': 36,
'-': 60, ',': 38, ';': 39, '.': 40, '!': 41, '?': 42, ':': 43, "'": 44, '"': 45, '/': 46,
'\\': 47, '|': 48,
'_': 49, '@': 50, '#': 51, '$': 52, '%': 53, '^': 54, '&': 55, '*': 56, '~': 57, '`': 58,
'+': 59, '=': 61,
'<': 62, '>': 63, '(': 64, ')': 65, '[': 66, ']': 67, '{': 68, '}': 69}
def doc_process(doc, embedding_dic, dataset):
max_len = config.char_max_len[dataset]
min_len = min(max_len, len(doc))
doc_vec = np.zeros(max_len, dtype='int64')
for j in range(min_len):
if doc[j] in embedding_dic:
doc_vec[j] = embedding_dic[doc[j]]
else:
doc_vec[j] = embedding_dic['UNK']
return doc_vec
def doc_process_for_all(doc, embedding_dic, dataset):
max_len = config.char_max_len[dataset]
x = []
for d in doc:
x.append(doc_process(d, embedding_dic, dataset))
x = np.asarray(x, dtype='int64')
return x
def char_process(train_texts, train_labels, test_texts, test_labels, dataset):
embedding_w, embedding_dic = onehot_dic_build()
x_train = []
for i in range(len(train_texts)):
doc_vec = doc_process(train_texts[i].lower(), embedding_dic, dataset)
x_train.append(doc_vec)
x_train = np.asarray(x_train, dtype='int64')
y_train = np.array(train_labels, dtype='float32')
x_test = []
for i in range(len(test_texts)):
doc_vec = doc_process(test_texts[i].lower(), embedding_dic, dataset)
x_test.append(doc_vec)
x_test = np.asarray(x_test, dtype='int64')
y_test = np.array(test_labels, dtype='float32')
del embedding_w, embedding_dic
return x_train, y_train, x_test, y_test
```
#### File: JHL-HUST/PWWS/evaluate_word_saliency.py
```python
from config import config
import copy
import spacy
from word_level_process import text_to_vector
from char_level_process import doc_process, get_embedding_dict
nlp = spacy.load('en_core_web_sm')
def evaluate_word_saliency(doc, grad_guide, tokenizer, input_y, dataset, level):
word_saliency_list = []
# zero the code of the current word and calculate the amount of change in the classification probability
if level == 'word':
max_len = config.word_max_len[dataset]
text = [doc[position].text for position in range(len(doc))]
text = ' '.join(text)
origin_vector = text_to_vector(text, tokenizer, dataset)
origin_prob = grad_guide.predict_prob(input_vector=origin_vector)
for position in range(len(doc)):
if position >= max_len:
break
# get x_i^(\hat)
without_word_vector = copy.deepcopy(origin_vector)
without_word_vector[0][position] = 0
prob_without_word = grad_guide.predict_prob(input_vector=without_word_vector)
# calculate S(x,w_i) defined in Eq.(6)
word_saliency = origin_prob[input_y] - prob_without_word[input_y]
word_saliency_list.append((position, doc[position], word_saliency, doc[position].tag_))
elif level == 'char':
max_len = config.char_max_len[dataset]
embedding_dic = get_embedding_dict()
origin_vector = doc_process(doc.text.lower(), embedding_dic, dataset).reshape(1, max_len)
origin_prob = grad_guide.predict_prob(input_vector=origin_vector)
find_a_word = False
word_position = 0
without_word_vector = copy.deepcopy(origin_vector)
for i, c in enumerate(doc.text):
if i >= max_len:
break
if c is not ' ':
without_word_vector[0][i] = 0
else:
find_a_word = True
prob_without_word = grad_guide.predict_prob(without_word_vector)
word_saliency = origin_prob[input_y] - prob_without_word[input_y]
word_saliency_list.append((word_position, doc[word_position], word_saliency, doc[word_position].tag_))
word_position += 1
if find_a_word:
without_word_vector = copy.deepcopy(origin_vector)
find_a_word = False
position_word_list = []
for word in word_saliency_list:
position_word_list.append((word[0], word[1]))
return position_word_list, word_saliency_list
``` |
{
"source": "jhljx/MachineLearningInAction",
"score": 3
} |
#### File: MachineLearningInAction/Ch03_Decision_Tree/trees.py
```python
from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing', 'flippers']
return dataSet, labels
# 计算给定数据集的香农熵
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
#为所有可能分类创建字典
for featVec in dataSet:
currentLabel = featVec[-1] #
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
# print labelCounts
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries
#以2为底求对数
shannonEnt -= prob * log(prob, 2)
return shannonEnt
#按照给定特征划分数据集
#三个参数:待划分的数据集,划分数据集的特征,需要返回的特征的值
def splitDataSet(dataSet, axis, value):
#创建新的list对象
retDataSet = []
#抽取
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
#选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
#print(numFeatures)
#计算整个数据集的原始香农熵,保存最初的无序度量值,用于与划分之后的数据集计算的熵进行比较
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
#计算该类被划分的分类名称
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
#创建决策树
#使用了两个变量: 数据集和标签列表
def createTree(dataSet, labels):
classList = [examle[-1] for examle in dataSet] #包含了数据集中的所有类标签
#类别完全相同则停止继续划分
if classList.count(classList[0]) == len(classList):
return classList[0]
#遍历完所有特征时返回出现次数最多的(使用完了所有特征,但是仍然无法将数据集划分成仅包含唯一类别的分组)
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [examle[bestFeat] for examle in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] #复制类标签,原因是python函数参数传递为引用传递,为了不改变原始列表的内容
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value), subLabels)
return myTree
#使用决策树的分类函数
def classify(inputTree, featLabels, testVec):
firstStr = inputTree.keys()[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr) #将标签字符串转换成索引
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
#使用pickle模块存储决策树
def storeTree(inputTree, filename):
import pickle
fw = open(filename, 'w')
pickle.dump(inputTree, fw)
#从磁盘读取决策树文件
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)
```
#### File: MachineLearningInAction/Ch08_Linear_Regression/regression.py
```python
from numpy import *
import matplotlib.pyplot as plt
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t')) - 1
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
def standRegres(xArr, yArr):
xMat = mat(xArr)
yMat = mat(yArr).T
xTx = xMat.T * xMat
if linalg.det(xTx) == 0.0:
print "This matrix is singular, cannot do inverse"
return
ws = xTx.I * (xMat.T * yMat)
return ws
def lwlr(testPoint, xArr, yArr, k=1.0):
xMat = mat(xArr)
yMat = mat(yArr).T
m = shape(xMat)[0]
weights = mat(eye((m)))
for j in range(m):
diffMat = testPoint - xMat[j,:]
weights[j,j] = exp(diffMat*diffMat.T/(-2.0*k**2))
xTx = xMat.T * (weights * xMat)
if linalg.det(xTx) == 0.0:
print "This matrix is singular, cannot do inverse"
return
ws = xTx.I * (xMat.T * (weights * yMat))
return testPoint * ws
def lwlrTest(testArr, xArr, yArr, k=1.0):
m = shape(testArr)[0]
yHat = zeros(m)
for i in range(m):
yHat[i] = lwlr(testArr[i], xArr, yArr, k)
return yHat
def drawFigure(xArr, yArr, yHat):
xMat = mat(xArr)
srtInd = xMat[:,1].argsort(0)
xSort = xMat[srtInd][:,0,:]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xSort[:,1], yHat[srtInd])
ax.scatter(xMat[:,1].flatten().A[0], mat(yArr).T.flatten().A[0], s=2, c='red')
plt.show()
def rssError(yArr, yHatArr):
return ((yArr - yHatArr) ** 2).sum()
def ridgeRegres(xMat, yMat, lam=0.2):
xTx = xMat.T * xMat
denom = xTx + eye(shape(xMat)[1]) * lam
if linalg.det(denom) == 0.0:
print "This matrix is singular, cannot do inverse"
return
ws = denom.I * (xMat.T * yMat)
return ws
def ridgeTest(xArr, yArr):
xMat = mat(xArr)
yMat = mat(yArr).T
yMean = mean(yMat, 0)
yMat = yMat - yMean
xMeans = mean(xMat, 0)
xVar = var(xMat, 0)
xMat = (xMat - xMeans) / xVar
numTestPts = 30
wMat = zeros((numTestPts, shape(xMat)[1]))
for i in range(numTestPts):
ws = ridgeRegres(xMat, yMat, exp(i-10))
wMat[i,:] = ws.T
return wMat
def regularize(xMat):#regularize by columns
inMat = xMat.copy()
inMeans = mean(inMat,0) #calc mean then subtract it off
inVar = var(inMat,0) #calc variance of Xi then divide by it
inMat = (inMat - inMeans)/inVar
return inMat
#eps: 表示每次迭代需要调整的步长
#numIt: 迭代次数
def stageWise(xArr, yArr, eps=0.01, numIt=100):
xMat = mat(xArr)
yMat = mat(yArr).T
yMean = mean(yMat, 0)
yMat = yMat - yMean #can also regularize ys but will get smaller coef
print yMat
print "--------------"
xMat = regularize(xMat)
m, n = shape(xMat)
returnMat = zeros((numIt, n))
ws = zeros((n, 1)) #ws是最终需要的ws结果
wsTest = ws.copy() #wsTest是每次i的迭代中使用的ws的副本
wsMax = ws.copy() #wsMax记录的是每次i的迭代中的最优版本
flag = 0
for i in range(numIt):
if(i < 10 or (numIt > 100 and i > numIt - 10)):
print ws.T
elif flag == 0:
print "..."
flag = 1
lowestError = inf
for j in range(n):
for sign in [-1, 1]:
wsTest = ws.copy()
wsTest[j] += eps * sign
yTest = xMat * wsTest
rssE = rssError(yMat.A, yTest.A)
if rssE < lowestError:
lowestError = rssE
wsMax = wsTest
ws = wsMax.copy()
returnMat[i, :] = ws.T
return returnMat
from time import sleep
import json
import urllib2
def searchForSet(retX, retY, setNum, yr, numPce, origPrc):
sleep(10) #防止过短时间内有过多的API调用
myAPIstr = '<KEY>'
searchURL = 'https://www.googleapis.com/shopping/search/v1/public/products?\
key=%s&country=US&q=lego+%d&alt=json' % (myAPIstr, setNum)
pg = urllib2.urlopen(searchURL)
retDict = json.loads(pg.read())
for i in range(len(retDict['item'])):
try:
currItem = retDict['item'][i]
if currItem['product']['condition'] == 'new':
newFlag = 1
else:
newFlag = 0
listOfInv = currItem['product']['inventories']
for item in listOfInv:
sellingPrice = item['price']
if sellingPrice > origPrc * 0.5: #过滤掉不完整的套装
print "%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice)
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
except:
print "problem with item %d" % i
def setDataCollect(retX, retY):
searchForSet(retX, retY, 8288, 2006, 800, 49.99)
searchForSet(retX, retY, 10030, 2002, 3096, 269.99)
searchForSet(retX, retY, 10179, 2007, 5195, 499.99)
searchForSet(retX, retY, 10181, 2007, 3428, 199.99)
searchForSet(retX, retY, 10189, 2008, 5922, 299.99)
searchForSet(retX, retY, 10196, 2009, 3263, 249.99)
#默认为10折,所以每个lambda对应10个误差
def crossValidation(xArr, yArr, numVal=10):
m = len(yArr)
indexList = range(m)
errorMat = zeros((numVal, 30))
for i in range(numVal):
trainX = []; trainY = []
testX = []; testY = []
random.shuffle(indexList)
for j in range(m): #划分训练集与测试集
if j < m * 0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else:
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
wMat = ridgeTest(trainX, trainY)
for k in range(30):
matTestX = mat(testX)
matTrainX = mat(trainX)
meanTrain = mean(matTrainX, 0)
varTrain = var(matTrainX, 0)
#用训练时的参数将测试数据标准化
matTestX = (matTestX - meanTrain) / varTrain
yEst = matTestX * mat(wMat[k, :]).T + mean(trainY)
errorMat[i, k] = rssError(yEst.T.A, array(testY))
meanErrors = mean(errorMat, 0)
minMean = float(min(meanErrors))
bestWeights = wMat[nonzero(meanErrors==minMean)]
xMat = mat(xArr)
yMat = mat(yArr).T
meanX = mean(xMat, 0)
varX = var(xMat, 0)
unReg = bestWeights / varX
print "the best model from RIdge Regression is:\n", unReg
print "with constant term: ", -1*sum(multiply(meanX, unReg)) + mean(yMat)
```
#### File: MachineLearningInAction/Ch09_Regression_Tree/treeExplore.py
```python
from numpy import *
from Tkinter import *
import regTrees
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
def reDraw(tolS, tolN):
reDraw.f.clf()
reDraw.a = reDraw.f.add_subplot(111)
#检查复选框是否选中
if chkBtnVar.get():
if tolN < 2:
tolN = 2
myTree = regTrees.createTree(reDraw.rawDat, regTrees.modelLeaf, regTrees.modelErr, (tolS, tolN))
yHat = regTrees.createForeCast(myTree, reDraw.testDat, regTrees.modelTreeEval)
else:
myTree = regTrees.createTree(reDraw.rawDat, ops=(tolS, tolN))
yHat = regTrees.createForeCast(myTree, reDraw.testDat)
#print shape(reDraw.rawDat[:,0])
reDraw.a.scatter(reDraw.rawDat[:,0].flatten().A[0], reDraw.rawDat[:,1].flatten().A[0], s=5)
reDraw.a.plot(reDraw.testDat, yHat, linewidth=2.0)
reDraw.canvas.show()
def getInputs():
try:
tolN = int(tolNentry.get())
except:
#清除错误的输入并用默认值替换
tolN = 10
print 'entry Integer for tolN'
tolNentry.delete(0, END)
tolNentry.insert(0, '10')
try:
tolS = float(tolSentry.get())
except Exception as e:
#raise e
tolS = 1.0
print 'enter Float for tolS'
tolSentry.delete(0, END)
tolSentry.insert(0, '1.0')
return tolN, tolS
#调用getInputs()方法得到输入框的值,利用该值调用reDraw()方法生成漂亮的图
def drawNewTree():
tolN, tolS = getInputs()
reDraw(tolS, tolN)
root = Tk()
#Label(root, text='Plot Place Holder').grid(row=0, columnspan=3)
reDraw.f = Figure(figsize=(5,4), dpi=100)
reDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)
reDraw.canvas.show()
reDraw.canvas.get_tk_widget().grid(row=0, columnspan=3)
Label(root, text='tolN').grid(row=1, column=0)
tolNentry = Entry(root) #文本输入框
tolNentry.grid(row=1, column=1)
tolNentry.insert(0, '10')
Label(root, text='tolS').grid(row=2, column=0)
tolSentry = Entry(root)
tolSentry.grid(row=2, column=1)
tolSentry.insert(0, '1.0')
Button(root, text='ReDraw', command=drawNewTree).grid(row=1, column=2, rowspan=3)
#Button(root, text='Quit', fg='black', command=root.quit).grid(row=1, column=2)
chkBtnVar = IntVar() #按键整数值
chkBtn = Checkbutton(root, text='Model Tree', variable=chkBtnVar) #复选按钮
chkBtn.grid(row=3, column=0, columnspan=2)
#初始化与reDraw关联的全局变量
reDraw.rawDat = mat(regTrees.loadDataSet('sine.txt'))
reDraw.testDat = arange(min(reDraw.rawDat[:,0]), max(reDraw.rawDat[:,0]), 0.01)
reDraw(1.0, 10)
root.mainloop()
```
#### File: MachineLearningInAction/Ch14 SVD/svdRec.py
```python
import numpy as np
def loadExData():
return [[1, 1, 1, 0, 0],
[2, 2, 2, 0, 0],
[1, 1, 1, 0, 0],
[5, 5, 5, 0, 0],
[1, 1, 0, 2, 2],
[0, 0, 0, 3, 3],
[0, 0, 0, 1, 1]]
def loadExData2():
return[[0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 5],
[0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 3],
[0, 0, 0, 0, 4, 0, 0, 1, 0, 4, 0],
[3, 3, 4, 0, 0, 0, 0, 2, 2, 0, 0],
[5, 4, 5, 0, 0, 0, 0, 5, 5, 0, 0],
[0, 0, 0, 0, 5, 0, 1, 0, 0, 5, 0],
[4, 3, 4, 0, 0, 0, 0, 5, 5, 0, 1],
[0, 0, 0, 4, 0, 4, 0, 0, 0, 0, 4],
[0, 0, 0, 2, 0, 2, 5, 0, 0, 1, 2],
[0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0],
[1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0]]
#下面的三个函数分别是几种不同的距离计算公式
def ecludSim(inA, inB):
return 1.0 / (1.0 + np.linalg.norm(inA - inB))
def pearsSim(inA, inB):
if len(inA) < 3:
return 1.0
return 0.5 + 0.5 * np.corrcoef(inA, inB, rowvar=0)[0][1]
def cosSim(inA, inB):
num = float(inA.T * inB)
denom = np.linalg.norm(inA) * np.linalg.norm(inB)
return 0.5 + 0.5 * (num / denom)
#用来计算在给定相似度计算方法的条件下,用户对物品的估计评分值
#输入参数为:数据矩阵,用户编号,物品编号,相似度计算方法
#数据矩阵行为用户编号,列为物品编号
def standEst(dataMat, user, simMeas, item):
n = np.shape(dataMat)[1]
simTotal = 0.0; ratSimTotal = 0.0;
for j in range(n):
userRating = dataMat[user, j]
if userRating == 0:
continue
#寻找两个用户都评分的物品
overLap = np.nonzero(np.logical_and(dataMat[:, item].A > 0, dataMat[:, j].A > 0))[0]
if len(overLap) == 0:
similarity = 0
else:
similarity = simMeas(dataMat[overLap, item], dataMat[overLap, j])
#print 'the %d and %d similarity is: %f' % (item, j, similarity)
simTotal += similarity
ratSimTotal += similarity * userRating
if simTotal == 0:
return 0
else:
return ratSimTotal / simTotal
#推荐算法,调用standEst算法
#产生最高的N个推荐结果
def recommend(dataMat, user, N=3, simMeas=cosSim, estMethod=standEst):
unratedItems = np.nonzero(dataMat[user, :].A == 0)[1]
if len(unratedItems) == 0:
return 'you rated everything'
itemScores = []
for item in unratedItems:
estimatedScore = estMethod(dataMat, user, simMeas, item)
itemScores.append((item, estimatedScore))
return sorted(itemScores, key=lambda jj: jj[1], reverse=True)[:N]
#基于SVD的评分估计
def svdEst(dataMat, user, simMeas, item):
n = np.shape(dataMat)[1]
simTotal = 0.0; ratSimTotal = 0.0;
U, Sigma, VT = np.linalg.svd(dataMat)
Sig4 = np.mat(np.eye(4) * Sigma[:4]) #建立对角矩阵
xformedItems = dataMat.T * U[:, :4] * Sig4.I #构建转换后的物品
for j in range(n):
userRating = dataMat[user, j]
if userRating == 0 or j == item:
continue
similarity = simMeas(xformedItems[item, :].T, xformedItems[j, :].T)
print 'the %d and %d similarity is : %f' % (item, j, similarity)
simTotal += similarity
ratSimTotal += similarity * userRating
if simTotal == 0:
return 0
else:
return ratSimTotal / simTotal
def printMat(inMat, thresh=0.8):
for i in range(32):
for k in range(32):
if float(inMat[i, k] > thresh):
print 1,
else:
print 0,
print ' '
def imgCompress(numSV=3, thresh=0.8):
myl = []
for line in open('0_5.txt', 'r').readlines():
newRow = []
for i in range(32):
newRow.append(int(line[i]))
myl.append(newRow)
myMat = np.mat(myl)
print '****origin matrix*****'
printMat(myMat, thresh)
U, Sigma, VT = np.linalg.svd(myMat)
SigRecon = np.mat(np.zeros((numSV, numSV)))
for k in range(numSV):
SigRecon[k, k] = Sigma[k]
reconMat = U[:, :numSV] * SigRecon * VT[:numSV, :]
print '****reconstructed matrix using %d singular values******' % numSV
printMat(reconMat, thresh)
``` |
{
"source": "jh-lor/QC",
"score": 3
} |
#### File: QC/bs13/BaconShor.py
```python
from PauliSim import PauliSim
import numpy as np
class BaconShor13():
"""Implements a Bacon-Shor-13 simulation using the PauliSim class
"""
def __init__(self, p = 0):
"""Initializes BaconShor object with error mode, physical error and initial state
Args:
mode (str): error mode for simuslation
p (float, optional): physical error rate. Defaults to 0.
state (numpy arr (int), optional): initial state of system. Defaults to None.
"""
self.errorRate = p
self.measurements = {}
self.appliedchannels = []
self.sim = PauliSim(13)
def execute(self):
self.state = self.sim.execute()
self.appliedchannels += self.sim.getOperations()
self.measurements = self.sim.measurements
self.sim = PauliSim(13, self.sim.state)
def initialize_FT(self):
"""Intializes the Bacon-Shor-13 circuit with two qubit errors
Args:
Xerr (list, optional): list of index of qubits to append X gates to before stabilizers. For debugging purposes. Defaults to [].
Zerr (list, optional): list of index of qubits to append Z gates to before stabilizers. For debugging purposes. Defaults to [].
Returns:
dict: dictionary of measurements. Keys are the stabilizer names.
"""
self.sim.addTag("Fault Tolerant Initialization")
self.sim.addCNOT(0,3)
self.sim.addCNOT(0,6)
for i in [0,3,6]:
self.sim.addH(i)
self.sim.addCNOT(0,1)
self.sim.addDepolarizingNoise([0,1], self.errorRate, 2)
self.sim.addCNOT(0,2)
self.sim.addDepolarizingNoise([0,2], self.errorRate, 2)
self.sim.addCNOT(3,4)
self.sim.addDepolarizingNoise([3,4], self.errorRate, 2)
self.sim.addCNOT(3,5)
self.sim.addDepolarizingNoise([3,5], self.errorRate, 2)
self.sim.addCNOT(6,7)
self.sim.addDepolarizingNoise([6,7], self.errorRate, 2)
self.sim.addCNOT(6,8)
self.sim.addDepolarizingNoise([6,8], self.errorRate, 2)
for i in range(0,9):
self.sim.addH(i)
def single_qubit_errors(self):
"""Adds single qubit errors to each of nine data qubits
"""
self.sim.addTag("Single Qubit Errors")
for i in range(9):
self.sim.addDepolarizingNoise(i, self.errorRate, 1)
def add_errors(self, Xerr, Zerr):
"""[Debugging] Adds specific X or Z errors to the specified qubit
Args:
Xerr ([int]): list of qubits to apply X error to
Zerr ([int]): list of qubits to apply Z error to
"""
for qubit in Xerr:
self.sim.addX(qubit)
for qubit in Zerr:
self.sim.addZ(qubit)
def correctError(self):
"""Measures stabilizers, decode error string and appends the appropriate gate. Measurement may have errors
Args:
error (bool, optional): whether measurement errors can occur. Defaults to False.
Returns:
dict: dictionary of measurements. Keys are the stabilizer names.
"""
lookup_table = {
'0000': 'IIIIIIIII',
'0100': 'IIZIIIIII',
'1000': 'ZIIIIIIII',
'1100': 'IZIIIIIII',
'0010': 'XIIIIIIII',
'0001': 'IIIIIIXII',
'0011': 'IIIXIIIII',
'1010': 'YIIIIIIII',
'1011': 'IIIYIIIII',
'1001': 'IIIIIIYII',
'1110': 'IYIIIIIII',
'1111': 'IIIIYIIII',
'1101': 'IIIIIIIYI',
'0110': 'IIYIIIIII',
'0111': 'IIIIIYIII',
'0101': 'IIIIIIIIY'
}
error_string = ""
error_string += str(self.measurements["X1X2X4X5X7X8"])
error_string += str(self.measurements["X2X3X5X6X8X9"])
error_string += str(self.measurements["Z1Z4Z2Z5Z3Z6"])
error_string += str(self.measurements["Z4Z7Z5Z8Z6Z9"])
decode_string = lookup_table.get(error_string)
self.sim.addTag("Error Correction")
for i in range(len(decode_string)):
if decode_string[i] != 'I':
if decode_string[i] =='X':
self.sim.addX(i)
if decode_string[i] =='Y':
self.sim.addY(i)
if decode_string[i] =='Z':
self.sim.addZ(i)
def measure_syndrome(self, error = False):
"""Appends stabilizers and measure the stabilizers. Resets ancilla qubits and clears the operations list after measurement
Args:
error (bool, optional): whether there is measurement error. Defaults to False.
Returns:
dict: dictionary of measurements. Keys are the stabilizer names.
"""
self.sim.addTag("Stabilizer Measurement")
if error:
self.sim.addZStabilizer([0,3,1,4,2,5], 9, self.errorRate)
self.sim.addZStabilizer([3,6,4,7,5,8], 10, self.errorRate)
self.sim.addXStabilizer([0,1,3,4,6,7], 11, self.errorRate)
self.sim.addXStabilizer([1,2,4,5,7,8], 12, self.errorRate)
else:
self.sim.addZStabilizer([0,3,1,4,2,5], 9)
self.sim.addZStabilizer([3,6,4,7,5,8], 10)
self.sim.addXStabilizer([0,1,3,4,6,7], 11)
self.sim.addXStabilizer([1,2,4,5,7,8], 12)
self.sim.addMeasurement(11, "X1X2X4X5X7X8")
self.sim.addMeasurement(12, "X2X3X5X6X8X9")
self.sim.addMeasurement(9, "Z1Z4Z2Z5Z3Z6")
self.sim.addMeasurement(10, "Z4Z7Z5Z8Z6Z9")
# reset ancilla
for i in [9, 10, 11 ,12]:
self.sim.addReset(i)
self.execute()
return self.measurements
``` |
{
"source": "jhlund/package-wrapper",
"score": 3
} |
#### File: package_wrapper/manifest/manifest.py
```python
from pathlib import Path
import json
from package_wrapper.manifest.filehash import (
file_hash_create,
file_hash_check,
FileHashE,
)
from package_wrapper.manifest.filehash import file_hash_create_hash_file
class ManifestE(BaseException):
"""
Basic exception for manifest related tasks
"""
def __init__(self, msg):
super(ManifestE, self).__init__()
self.msg = msg
class ManifestFile:
"""
Handle a manifest file that contains a list of artifacts (path to file, optional hash sum)
"""
def __init__(self, hash_method="sha256"):
self.database = dict()
self.hash_method = hash_method
def add_meta_data(self, keyword: str, content):
self.database[keyword] = content
def add_meta_data_file(self, meta_data_path: Path):
data = json.loads(meta_data_path.read_bytes())
for key, value in data.items():
self.add_meta_data(keyword=key, content=value)
def add_folder(self, path_to_directory: Path):
"""
Adds all files found in a certain folder to the manifest
database
:param path_to_directory:
:return: True if succeeded, otherwise False
"""
if not path_to_directory.is_dir():
raise ManifestE("No such directory")
files = [
_file
for _file in path_to_directory.glob("**/*")
if path_to_directory.joinpath(_file).is_file()
]
for file in files:
_abs_path = file.absolute()
self.add_artifact_to_db(_abs_path, path_to_directory)
return True
def add_artifact_to_db(self, path_to_file: Path, base_path: Path):
if "files" not in self.database.keys():
self.database["files"] = dict()
_hash = None
if path_to_file in self.database.keys():
raise ManifestE("duplicated file: {path_to_file}")
try:
_hash = (
self.hash_method
+ ":"
+ file_hash_create(file_name=path_to_file, hash_method=self.hash_method)
)
except FileHashE as exception:
raise ManifestE(exception.msg)
rel_file_path = Path.relative_to(path_to_file, base_path)
self.database["files"][str(rel_file_path)] = _hash
return True
def retrive_contents(self):
return self.database
def check_hashes_in_db(self) -> bool:
_match = True
for _file in self.database["files"]:
(_hash_method, _hash_string) = self.database[_file].split(":")
try:
_match = file_hash_check(
file_name=Path(_file),
hash_string=_hash_string,
hash_method=_hash_method,
)
except FileHashE:
raise ManifestE(f"error when checking hash for {_file}")
if not _match:
break
return _match
```
#### File: package-wrapper/tests/test_filehash.py
```python
import json
from pathlib import Path
import pytest
from package_wrapper.manifest.filehash import (
file_hash_check,
file_hash_create_hash_file,
)
@pytest.fixture()
def hash_file(tmpdir):
_file_name = Path("hashed_file.json")
_json_data = {"my_hash": "is always matching"}
_path = Path(tmpdir).joinpath(_file_name)
with open(_path, "w") as file_pointer:
json.dump(_json_data, file_pointer, indent=3)
yield _path
@pytest.mark.parametrize(
"expected_hash, hash_method",
[
pytest.param(
"sha1:c922c69a3ad8c693b7dcc1e4bf75dbffc6074782", "sha1", id="sha1"
),
pytest.param(
"sha224:749c15eaca51f74b47824be96cc5e7aceca8339cb73e258885d11f2c",
"sha224",
id="sha224",
),
pytest.param(
"sha256:4c12882f1ce34f1f0aa5d2f6a902170d35cef128b8eecee5867d5750f5ab5e63",
"sha256",
id="sha256",
),
pytest.param(
"sha384:26acf5c876e3c7dd19d276b88431d5cdb86f5f757e8a27e420e2f3f43b8f5026bdd3298f1258bf8d5b4c98af243bbddc",
"sha384",
id="sha384",
),
pytest.param(
"sha512:fe493c17f9521b641eaac36edfbab48657938f2337da75bcf5b4e32ec9d3cb4ecaf524031553aff7b17a0ca3da58d3f92e8e404a93eb24fcc93698fa40f631de",
"sha512",
id="sha512",
),
pytest.param("md5:545112744d394dd2b7e52b4f4dc717ed", "md5", id="md5"),
],
)
class TestHashes:
def test_file_hash_check_matches(self, hash_file, expected_hash, hash_method):
"""
Check dummy file with a known hash with generated checksum
:return:
"""
assert file_hash_check(
file_name=hash_file, hash_string=expected_hash, hash_method=hash_method
)
def test_file_hash_check_doesnt_match(self, hash_file, expected_hash, hash_method):
"""
Check dummy file with a known hash with generated checksum against the scrambled hash.
:return:
"""
_invalid = str(expected_hash).replace("f", "c")
assert not file_hash_check(
file_name=hash_file, hash_string=_invalid, hash_method=hash_method
)
def test_hash_file_contents(self, tmpdir, hash_file, expected_hash, hash_method):
file_path = Path(tmpdir).joinpath(Path("hash_file.json"))
file_hash_create_hash_file(
file_path=file_path, file_list=[hash_file], hash_method=hash_method
)
file_contents = file_path.read_text()
assert file_contents == f"{expected_hash} hashed_file.json"
``` |
{
"source": "jhmaddox/django-authy-admin",
"score": 2
} |
#### File: django-authy-admin/authy_admin/models.py
```python
from authy.api import AuthyApiClient
from django.db import models
from django.conf import settings
from django.contrib.admin.models import User
class AuthyAdminUser(models.Model):
"""
AuthyAdminUser instances enable two-factor authentication
for the given user when authenticating with an AuthyAdminSite
"""
user = models.OneToOneField(User, primary_key=True)
authy_id = models.IntegerField(blank=True)
country_code = models.IntegerField(default=1)
phone_number = models.CharField(max_length=128)
def save(self, *args, **kwargs):
"""
override save to register user with authy.com and to save
the third party user id for subsequent validation API calls
"""
if not self.authy_id:
authy_api = AuthyApiClient(settings.AUTHY_API_KEY)
authy_user = authy_api.users.create(self.user.email,
self.phone_number,
self.country_code)
if authy_user.ok():
self.authy_id = authy_user.id
super(AuthyAdminUser, self).save(*args, **kwargs)
def check_token(self, token):
"""
returns True iff token is a valid authy.com two-factor token
for the given user
"""
authy_api = AuthyApiClient(settings.AUTHY_API_KEY)
verification = authy_api.tokens.verify(self.authy_id, token)
return verification.ok()
``` |
{
"source": "jhmarcus/spatial-random-walk",
"score": 3
} |
#### File: spatial-random-walk/code/genotype_simulator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import msprime
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import pickle as pkl
import os
class GenotypeSimulator(object):
"""Class for simulating genotypes under the coalescent
given a habitat, a directed graph which individuals migrate
over
Arguments
---------
hab : Habitat
habitat object
sim_path: str
path to simulation pkl file
chrom_length: float
length of chrom to simulate
mu: float
mutation rate
n_samp: int
n haploid samples per deme
n_rep: int
number of indepdent regions to simulate from
eps: float
min derived allele frequency for filtering out rare variants
Attributes
----------
hab : Habitat
habitat object
chrom_length: float
length of chrom to simulate
mu: float
mutation rate
n_samp: int
n haploid samples per deme
n_rep: int
number of indepdent regions to simulate from
eps: float
min derived allele frequency for filtering out rare variants
y : array
n x p genotype matrix
tree_sequences :
geneologies object
n : int
number of individuals
p : int
number of snps
"""
def __init__(self, hab, sim_path, chrom_length=1, mu=1e-3, n_e=1,
n_samp=10, n_rep=1e4, eps=.05):
# habitat object
self.hab = hab
# choromosome length
self.chrom_length = chrom_length
# mutation rate
self.mu = mu
# effective sizes
self.n_e = n_e
# number of haploids per deme
self.n_samp = n_samp
# number of indepdent chunks to simulate
self.n_rep = n_rep
# min derived allele frequency to filter out
self.eps = eps
# if the simulation was already performed extract genotypes
if os.path.exists(sim_path):
with open(sim_path, 'rb') as geno:
self.y = pkl.load(geno)
# otherwise run the simulation
else:
# simulate geneologies from the defined model
self._simulate_trees()
self._simulate_genotypes()
with open(sim_path, 'wb') as geno:
pkl.dump(self.y, geno)
# number of snps
self.n, self.p = self.y.shape
# node ids for each individual
self.v = np.repeat(self.hab.v, int(self.n / self.hab.d)).T
# spatial positions for each individual
self.s = np.vstack([np.repeat(self.hab.s[:,0], int(self.n / self.hab.d)),
np.repeat(self.hab.s[:,1], int(self.n / self.hab.d))]).T
def _simulate_trees(self):
"""Simulate trees under the coalescent migration model
defined in the habitat with constant population
sizes
"""
# simulate trees
population_configurations = [msprime.PopulationConfiguration(sample_size=self.n_samp) for _ in range(self.hab.d)]
self.tree_sequences = msprime.simulate(population_configurations=population_configurations,
migration_matrix=self.hab.m.tolist(),
length=self.chrom_length,
mutation_rate=self.mu,
num_replicates=self.n_rep,
Ne=self.n_e)
def _simulate_genotypes(self):
"""Extract trees and simulate mutations in each
independent region to obtain a genotype matrix
"""
# extract mutations
genotypes = []
# loop through each region
for i,tree_sequence in enumerate(self.tree_sequences):
if i % 250 == 0:
print('extracting tree {}'.format(i))
shape = tree_sequence.get_num_mutations(), tree_sequence.get_sample_size()
g = np.empty(shape, dtype="u1")
# loop through each tree
for variant in tree_sequence.variants():
g[variant.index] = variant.genotypes
genotypes.append(g.T)
# (n*d) x p genotype matrix
self.y = np.hstack(genotypes)
print("n={},p={}".format(self.y.shape[0], self.y.shape[1]))
def filter_rare_var(self):
"""Filter out rare variants
"""
daf = np.sum(self.y, axis=0) / self.n
idx = np.where((daf >= self.eps) & (daf <= (1. - self.eps)))[0]
self.y = self.y[:,idx]
self.n, self.p = self.y.shape
def geno_dist(self):
"""Compute the observed genetic distance between
individuals
Returns
-------
d_gen : array
n x n array of observed genetic distances for each
pair
"""
# mean frequencies for each snp
mu = np.mean(self.y, axis=0, keepdims=True)
d_gen = squareform(pdist((self.y - mu), metric='seuclidean')) / self.p
return(d_gen)
def pca(self):
"""Run principal components analysis
on the genotype matrix
"""
mu = np.mean(self.y, axis=0)
std = np.std(self.y, axis=0)
z = (self.y - mu) / std
pca = PCA(n_components=50)
pca.fit(z.T)
self.pcs = pca.components_.T
self.pves = pca.explained_variance_ratio_
def plot_sfs(self):
"""Plot the observed site frequency spectrum and neutral expectation
"""
dac = np.sum(self.y, axis=0)
x = np.arange(1, self.n) / self.n
sfs = np.histogram(dac, bins=np.arange(1, self.n + 1))[0]
plt.semilogy(x, sfs / sfs[0], '.')
plt.semilogy(x, 1 / (x * self.n), '--')
plt.xlabel('Derived Allele Frequency')
plt.ylabel('log(Count)')
def plot_pca(self, pcs, pves, figsize=(12, 6)):
"""Plot PC1 vs PC2 and scree plot
Arguments:
pcs : array
pcs output from pca
pves : array
proportion of variance explained for each pc
"""
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False, figsize=figsize)
# figure 1
ax1.scatter(pcs[:,0], pcs[:,1], c=self.s[:,0]**2 + (np.sqrt(self.hab.d) / 2) * self.s[:,1], cmap=cm.viridis)
ax1.set_xlabel('PC1 ({})'.format(np.round(pves[0], 4)))
ax1.set_ylabel('PC2 ({})'.format(np.round(pves[1], 4)))
# figure 2
ax2.scatter(np.arange(pves.shape[0]), pves)
ax2.set_xlabel('PC')
ax2.set_ylabel('PVE')
def plot_dist(self, d_x_tril, d_y_tril, lab_x, lab_y):
"""
"""
fit = np.polyfit(d_x_tril, d_y_tril, 1)
plt.scatter(d_x_tril, d_y_tril, marker='.', alpha=.5)
plt.plot(d_x_tril, fit[0] * d_x_tril + fit[1], c='orange')
plt.xlabel(lab_x)
plt.ylabel(lab_y)
def node_to_obs_mat(self, x, n, v):
"""Converts node level array to data level array
Arguments:
x : array
array at the level of nodes
n : int
number of observations
v : array
array carraying the node ids for each
observation
Returns:
y : array
array at the level of observations repeated
from the node level array
"""
y = np.zeros((n, n))
for i in range(n):
for j in range(n):
y[i,j] = x[v[i], v[j]]
return(y)
```
#### File: spatial-random-walk/code/habitat.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as nx
import numpy as np
from scipy.linalg import pinvh
from scipy.sparse.linalg import cg
from scipy.sparse import csr_matrix
from scipy.spatial.distance import pdist, squareform
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
class Habitat(object):
"""Class for defining, visualzing and computing on a
habitat which is a directed graph with a set of specified
edge weights
Attributes
----------
g : nx directed graph
directed graph object storing the Habitat
d : int
number of nodes in the graph
m : array
d x d matrix storing the migration
rates
"""
def __init__(self):
# graph object
self.g = None
# number of nodes in the graph
self.d = None
# migration matrix storing non-negative edge weights
self.m = None
# d x 2 matrix of spatial positions
self.s = None
def migration_surface(self):
"""User defined method to define edge weights in the graph
as this will vary often between different simulations
"""
raise NotImplementedError("migration_surface is not implemented")
def get_graph_lapl(self):
"""Computes the graph laplacian which is
a d x d matrix where L = I - M as M is markov
matrix its rows sum to 1
"""
# adding diagonal to migration matrix
m = np.zeros((self.d, self.d))
diag = 1. - np.sum(self.m, axis=1)
diag_idx = np.diag_indices(self.d)
m = np.array(self.m.tolist())
m[diag_idx] = diag
self.l = np.eye(self.d) - m
def rw_dist(self, q):
"""Computes a random walk distance between nodes
on the graph defined by the habitat. To compute the
random walk distance the adjaceny matrix must be symmetric
Arguments:
q : array
d x d graph laplacian matrix L or LL'
Returns:
r : array
d x d array of random walk distances between
each node
"""
# invert the graph lapl ... pinvh assumes q is symmetric and psd
q_inv = pinvh(q)
# compute the random walk dist
r = self._cov_to_dist(q_inv)
return(r)
def geo_dist(self):
"""Computes geographic distance between nodes
on the graph defined by the habitat.
Arguments:
s : array
d x 2 array of spatial positions
Returns:
r : array
d x d of geographic distances between each
node
"""
r = squareform(pdist(self.s, metric="seuclidean")) / 2
return(r)
def coal_dist(self, tol=1e-8):
"""Computes expected genetic distance between nodes
on the graph defined by the habitat under a coalescent
stepping stone model for migration with constant population
sizes
Arguments:
tol : float
tolerence for solving linear system using conjugate gradient
Returns:
t : array
d x d of expected genetic distances between each
node
"""
# upper tri indicies including diagonal
triu_idx = np.triu_indices(self.d, 0)
# number of within deme equations and between deme equations
n_wb = triu_idx[0].shape[0]
# d x d matrix storing indicies of each pair
h = np.zeros((self.d, self.d), dtype=np.int64)
k = 0
for i in range(self.d):
for j in range(i, self.d):
h[i, j] = k
h[j, i] = k
k += 1
# coefficents of coal time equation
A = np.zeros((n_wb, n_wb))
# solution to coal time equation
b = np.ones(n_wb)
# loop of all unique pairs of demes
for i in range(n_wb):
# deme pair for each row
alpha, beta = (triu_idx[0][i], triu_idx[1][i])
if alpha == beta:
c = h[alpha, beta]
A[i, c] += 1. # add coalescent rate
# loop over neighbors of deme alpha
for gamma in range(self.d):
c = h[beta, gamma]
A[i, c] += self.l[alpha, gamma]
# loop over the neighbors of deme beta
for gamma in range(self.d):
c = h[alpha, gamma]
A[i, c] += self.l[beta, gamma]
#t = np.empty((self.d, self.d))
#t[triu_idx] = np.linalg.solve(A, b)
#t = t + t.T - np.diag(np.diag(t))
A_ = csr_matrix(A)
t_ = cg(A_, b, tol=tol)
t = np.empty((self.d, self.d))
t[triu_idx] = t_[0]
t = t + t.T - np.diag(np.diag(t))
return(t)
def _cov_to_dist(self, sigma):
"""Converts covariance matrix to distance matrix
Arguments:
sigma : np.array
covariance matrix
Returns:
d : np.array
distance matrix
"""
n = sigma.shape[0]
ones = np.ones(n).reshape(n, 1)
sigma_diag = np.diag(sigma).reshape(n, 1)
d = ones.dot(sigma_diag.T) + sigma_diag.dot(ones.T) - (2. * sigma)
return(d)
def plot_habitat(self, node_size, edge_width_mult, arrows=False):
"""Plot the habitat as weighted directed graph
Arguments:
node_size: float
size of nodes in plot
edge_width_mult: float
multiplier of edge weights in plot
"""
# extract edge weights
weights = [self.g[i][j]['m'] for i,j in self.g.edges() if self.g[i][j]['m'] != 0.0]
# extract non-zero edges
edges = [(i,j) for i,j in self.g.edges() if self.g[i][j]['m'] != 0.0]
# draw the habitat
nx.draw(self.g, pos=self.pos_dict, node_size=node_size,
node_color=(self.s[:,0]**2 + (np.sqrt(self.d) / 2) * self.s[:,1]),
cmap=cm.viridis, arrows=arrows, edgelist=edges,
width=edge_width_mult*np.array(weights), edge_color='gray')
def plot_migration_matrix(self):
"""Plot the migration matrix as a heatmap
"""
plt.imshow(self.m, cmap=cm.viridis)
plt.colorbar()
def plot_precision_matrix(self, q):
"""Plots the precision matrix as a heatmap
Arguments:
q : array
n x n graph laplacian L or LL'
"""
plt.imshow(q, cmap='seismic', norm=mpl.colors.Normalize(vmin=-np.max(q),
vmax=np.max(q)))
plt.colorbar()
class TriangularLattice(Habitat):
"""Class for a habitat that is a triangular latttice
Arguments
---------
r: int
number of rows in the latttice
c: int
number of columns in the lattice
Attributes
----------
g : nx directed graph
directed graph object storing the Habitat
d : int
number of nodes in the graph
m : array
d x d matrix storing the migration
rates
r : int
number of rows in the latttice
c : int
number of columns in the lattice
pos_dict : dict
dictionary of spatial positions
v : array
array of node ids
s : array
d x 2 array of spatial positions
"""
def __init__(self, r, c):
# inherits from Habitat
super().__init__()
# number of rows
self.r = r
# number of cols
self.c = c
# number of nodes
self.d = self.r * self.c
# create the graph
self.g = nx.generators.lattice.triangular_lattice_graph(r - 1, 2 * c - 2, with_positions=True)
# make node ids ints
self.g = nx.convert_node_labels_to_integers(self.g)
# convert to directed graph
self.g = self.g.to_directed()
# dictionary of positions
self.pos_dict = nx.get_node_attributes(self.g, "pos")
# array of node ids
self.v = np.array(list(self.g.nodes()))
# array of spatial positions
self.s = np.array(list(self.pos_dict.values()))
class SquareLattice(Habitat):
"""Class for a habitat that is a square latttice
Arguments
---------
r: int
number of rows in the latttice
c: int
number of columns in the lattice
Attributes
----------
g : nx directed graph
directed graph object storing the Habitat
d : int
number of nodes in the graph
m : array
d x d matrix storing the migration
rates
r : int
number of rows in the latttice
c : int
number of columns in the lattice
pos_dict : dict
dictionary of spatial positions
v : array
array of node ids
s : array
d x 2 array of spatial positions
"""
def __init__(self, r, c):
# inherits from Habitat
super().__init__()
# number of rows
self.r = r
# number of cols
self.c = c
# number of nodes
self.d = self.r * self.c
# create the graph
self.g = nx.grid_2d_graph(self.r, self.c)
# dictionary of positions
self.pos_dict = {}
for i,node in enumerate(self.g.nodes):
self.g.nodes[node]["pos"] = node
self.pos_dict[i] = node
#nx.set_node_attributes(self.g, "pos", self.pos_dict)
# make node ids ints
self.g = nx.convert_node_labels_to_integers(self.g)
# convert to directed graph
self.g = self.g.to_directed()
# array of node ids
self.v = np.array(list(self.g.nodes()))
# array of spatial positions
self.s = np.array(list(self.pos_dict.values()))
class Line(Habitat):
"""Class for a habitat that is a square latttice
Arguments
---------
d: int
number of nodes in the lattice
Attributes
----------
g : nx directed graph
directed graph object storing the Habitat
d : int
number of nodes in the graph
m : array
d x d matrix storing the migration
rates
pos_dict : dict
dictionary of spatial positions
v : array
array of node ids
s : array
d x 2 array of spatial positions
"""
def __init__(self, d):
# inherits from Habitat
super().__init__()
# number of nodes
self.d = d
# create the graph
self.g = nx.grid_graph([self.d])
# dictionary of positions
self.pos_dict = {}
for i,node in enumerate(self.g.nodes):
self.g.nodes[node]["pos"] = (node, 0.)
self.pos_dict[i] = (node, 0.)
#nx.set_node_attributes(self.g, "pos", self.pos_dict)
# make node ids ints
self.g = nx.convert_node_labels_to_integers(self.g)
# convert to directed graph
self.g = self.g.to_directed()
# array of node ids
self.v = np.array(list(self.g.nodes()))
# array of spatial positions
self.s = np.array(list(self.pos_dict.values()))
class Circle(Habitat):
"""Class for a habitat that is a cirlce
Arguments
---------
d: int
number of node (demes)
Attributes
----------
g : nx directed graph
directed graph object storing the Habitat
d : int
number of nodes in the graph
m : array
d x d matrix storing the migration
rates
pos_dict : dict
dictionary of spatial positions
v : array
array of node ids
s : array
d x 2 array of spatial positions
"""
def __init__(self, d):
super().__init__()
# number of nodes
self.d = d
# create the graph
self.g = nx.cycle_graph(d)
# make node ids ints
self.g = nx.convert_node_labels_to_integers(self.g)
# convert to directed graph
self.g = self.g.to_directed()
# dictionary of positions
self.pos_dict = nx.circular_layout(self.g)
# array of node ids
self.v = np.array(list(self.g.nodes()))
# array of spatial positions
self.s = np.array(list(self.pos_dict.values()))
``` |
{
"source": "jhmartin/taskwarrior-effort-tracker",
"score": 2
} |
#### File: jhmartin/taskwarrior-effort-tracker/onmodify.timetrack.py
```python
import calendar
import json
import os
import re
import sys
from datetime import datetime
from datetime import timedelta
LEDGERFILE = '/Users/jhmartin/.task/timetrack.ledger'
if 'TIMELOG' in os.environ:
LEDGERFILE = os.environ['TIMELOG']
def adjust_date(d, adjust_by):
if not isinstance(d, datetime):
d = tw_to_dt(d)
d -= timedelta(minutes=int(adjust_by))
return d
def tw_to_dt(s):
""" Taskwarrior JSON date ---> datetime object. """
return datetime.strptime(s, "%Y%m%dT%H%M%SZ")
def dt_to_tw(d):
""" datetime object ---> Taskwarrior JSON date. """
return d.strftime("%Y%m%dT%H%M%SZ")
old = json.loads(sys.stdin.readline())
new = json.loads(sys.stdin.readline())
annotation_added = ('annotations' in new and not 'annotations' in old) \
or \
('annotations' in new and 'annotations' in old and \
len(new['annotations']) > len(old['annotations']))
# task started
if ('start' in new and not 'start' in old) and annotation_added:
new['annotations'].sort(key=lambda anno: anno['entry'])
m = re.match('^[0-9]+$', new['annotations'][-1]['description'])
if m:
new['start'] = dt_to_tw(adjust_date(new['start'], int(m.group(0))))
new['annotations'] = new['annotations'][:-1]
if not new['annotations']:
del new['annotations']
print("Timelog: Started task %s minutes ago." % m.group(0))
if tw_to_dt(new['start']) < tw_to_dt(new['entry']):
new['entry'] = new['start']
# task stopped
if 'start' in old and not 'start' in new:
started_utc = tw_to_dt(old['start'])
started_ts = calendar.timegm(started_utc.timetuple())
started = datetime.fromtimestamp(started_ts)
stopped = datetime.now()
delta = (stopped - started).total_seconds()
if annotation_added:
new['annotations'].sort(key=lambda anno: anno['entry'])
m = re.match('^[0-9]+$', new['annotations'][-1]['description'])
if m:
new['annotations'] = new['annotations'][:-1]
if not new['annotations']:
del new['annotations']
stopped = adjust_date(stopped, m.group(0))
if stopped < started:
print("ERROR: Stop date -%s minutes would be before the start date!" % m.group(0))
sys.exit(1)
print("Timelog: Stopped task %s minutes ago." % m.group(0))
newentry = started.strftime("%Y/%m/%d") + ","
newentry += new['uuid'] + ","
newentry += str(int(delta)) + ","
projectlabel= new['project'].replace('.', ':') if 'project' in new else "no project"
newentry += projectlabel
newentry += ","
newentry += new['description'] + "\n"
print(json.dumps(new)
``` |
{
"source": "jhmclean/fork_DLF",
"score": 2
} |
#### File: fork_DLF/python/gamma_model.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
import random
import time
from util import *
from sklearn.metrics import *
import sys
class Model:
def __init__(self, lr_1, lr_2, l2_loss_weight, batch_size, dimension, theta0, util_train, util_test, campaign):
self.lr_1 = lr_1
self.lr_2 = lr_2
self.util_train = util_train
self.util_test = util_test
self.train_data_amt = util_train.get_data_amt()
self.test_data_amt = util_test.get_data_amt()
self.batch_size = batch_size
self.batch_num = int(self.train_data_amt / self.batch_size)
self.l2_loss_weight = l2_loss_weight
self.campaign = campaign
# output directory
model_name = "{0}_{1}_{2}_{3}".format(self.lr_1, self.lr_2, self.l2_loss_weight, self.batch_size)
self.output_dir = 'output/gamma/{}/{}/'.format(campaign, model_name)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# reset graph
tf.reset_default_graph()
# placeholders
self.X = tf.sparse_placeholder(tf.float64)
self.z = tf.placeholder(tf.float64)
self.b = tf.placeholder(tf.float64)
self.y = tf.placeholder(tf.float64)
# trainable variables
self.theta = tf.Variable([theta0], name = 'theta', dtype=tf.float64)
# tf.reshape(self.theta, [1, 1])
all_train_data = self.util_train.get_all_data_origin()
self.init_ks_value = all_train_data[3] * all_train_data[2] / theta0 + (1 - all_train_data[3]) * all_train_data[1] / theta0
self.ks = tf.Variable(self.init_ks_value, name='ks', dtype=tf.float64)
self.w = tf.Variable(initial_value=tf.truncated_normal(shape=[dimension, 1], dtype=tf.float64), name='w')
# computation graph phase1
self.ps = tf.pow(self.z, (self.ks - 1.)) * tf.exp(-self.z / self.theta) \
/ tf.exp(tf.lgamma(self.ks)) / tf.pow(self.theta, self.ks)
self.cs = tf.igamma(self.ks, self.b / self.theta) / tf.exp(tf.lgamma(self.ks))
self.loss_win = tf.log(self.ps)
self.loss_lose = tf.log(1 - self.cs)
self.loss_phase1 = -tf.reduce_mean(self.y * self.loss_win + (1 - self.y) * self.loss_lose)
self.optimizer1 = tf.train.GradientDescentOptimizer(self.lr_1)
self.train_step1 = self.optimizer1.minimize(self.loss_phase1)
# phase 2
self.label_phase2 = tf.placeholder(tf.float64)
self.log_label_phase2 = tf.log(self.label_phase2)
self.loss_phase2 = tf.reduce_mean(tf.square(tf.sparse_tensor_dense_matmul(self.X, self.w) - self.log_label_phase2)) \
+ self.l2_loss_weight * tf.nn.l2_loss(self.w)
self.optimizer2 = tf.train.MomentumOptimizer(self.lr_2, 0.9)
self.train_step2 = self.optimizer2.minimize(self.loss_phase2)
# session initialization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
def train_phase1(self, train_round = 50):
# get all batches data
x, b, z, y = self.util_train.get_all_data_origin()
feed_dict = {}
feed_dict[self.X] = tf.SparseTensorValue(x, [1] * len(x), [b.shape[0], dimension])
feed_dict[self.b] = b
feed_dict[self.z] = z
feed_dict[self.y] = y
print("begin training phase 1")
for i in range(train_round):
self.sess.run(self.train_step1, feed_dict)
loss = self.sess.run(self.loss_phase1, feed_dict)
print("train loss of phase-1, iteration-{0} is {1}".format(i, loss))
def train_phase2(self):
self.ks_const = self.ks.eval(session=self.sess) #np array
self.theta_const = self.theta.eval(session=self.sess) #np array
step = 0
epoch = 0
loss_list = []
batch_loss = []
print("begin training phase 2")
while True:
x_batch, b_batch, z_batch, y_batch, ks_batch = self.util_train.get_batch_data_origin_with_ks(step, self.ks_const)
feed_dict = {}
feed_dict[self.X] = tf.SparseTensorValue(x_batch, [1] * len(x_batch), [self.batch_size, dimension])
feed_dict[self.b] = b_batch
feed_dict[self.z] = z_batch
feed_dict[self.y] = y_batch
feed_dict[self.label_phase2] = self.theta_const * ks_batch
self.sess.run(self.train_step2, feed_dict)
batch_loss.append(self.sess.run(self.loss_phase2, feed_dict))
step += 1
if step * self.batch_size - epoch * int(0.02 * self.train_data_amt) >= int(0.02 * self.train_data_amt):
loss = np.mean(batch_loss[step - int(int(0.02 * self.train_data_amt) / self.batch_size) - 1:])
loss_list.append(loss)
print("train loss of phase2 epoch-{0} is {1}".format(epoch, loss))
epoch += 1
# stop condition
if epoch * 0.02 * self.train_data_amt <= 5 * self.train_data_amt:
continue
if (loss_list[-1] - loss_list[-2] > 0 and loss_list[-2] - loss_list[-3] > 0):
break
if epoch * 0.02 * self.train_data_amt >= 20 * self.train_data_amt:
break
# draw SGD training process
x = [i for i in range(len(loss_list))]
plt.plot(x, loss_list)
plt.savefig(self.output_dir + 'train_phase2.png')
plt.gcf().clear()
def test(self):
print('Test begin')
self.pred_mp = tf.exp(tf.sparse_tensor_dense_matmul(self.X, self.w))
self.MSE = tf.reduce_mean(tf.square(self.z - self.pred_mp))
x, b, z, y = self.util_test.get_all_data_origin()
feed_dict = {}
feed_dict[self.X] = tf.SparseTensorValue(x, [1] * len(x), [self.test_data_amt, dimension])
feed_dict[self.z] = z
feed_dict[self.y] = y
feed_dict[self.b] = b
# calculate MSE
mse = self.sess.run(self.MSE, feed_dict)
print("MSE: {}".format(mse))
ks = self.pred_mp / self.theta
ps = tf.pow(self.z, (ks - 1.)) * tf.exp(-self.z / self.theta) / tf.pow(self.theta, ks) / tf.exp(tf.lgamma(ks))
cs = tf.igamma(ks, self.b / self.theta) / tf.exp(tf.lgamma(ks))
# calculate AUC and LogLoss
win_rate = self.sess.run(cs, feed_dict)
auc = roc_auc_score(y, win_rate)
print("AUC: {}".format(auc))
logloss = log_loss(y, win_rate)
print("Log Loss: {}".format(logloss))
# calculate ANLP
logp = -tf.log(ps)
logp_arr = self.sess.run(logp, feed_dict)
logp_arr[np.isnan(logp_arr)] = 1e-20 #for overflow values, minor
logp_arr[logp_arr == 0] = 1e-20
anlp = np.mean(logp_arr)
print("ANLP: {}".format(anlp))
# save result and params
fin = open(self.output_dir + 'result.txt', 'w')
fin.writelines(["MSE: {0} AUC: {1} Log Loss: {2} ANLP: {3}\n".format(mse, auc, logloss, anlp)])
fin.close()
np.save(self.output_dir + 'w', self.sess.run(self.w))
np.save(self.output_dir + 'k', self.sess.run(ks, feed_dict))
np.save(self.output_dir + 'theta', self.sess.run(self.theta))
if __name__ == '__main__':
if len(sys.argv) == 2:
os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[1]
campaign_list = ['2259']#['3386', '3427', '3476', '1458']#['2997', '2259', '2261', '2821']
for campaign in campaign_list:
train_file = '../data/' + campaign + '/train.yzbx.txt'
test_file = '../data/' + campaign + '/test.yzbx.txt'
feat_index = '../data/' + campaign + '/featindex.txt'
# hyper parameters
lr_1s = [1e-3]
lr_2s = [1e-3]
l2_loss_weights = [0.0001]
batch_sizes = [128]
dimension = int(open(feat_index).readlines()[-1].split('\t')[1][:-1]) + 1
params = []
for lr_1 in lr_1s:
for lr_2 in lr_2s:
for l2_loss_weight in l2_loss_weights:
for batch_size in batch_sizes:
util_train = Util(train_file, feat_index, batch_size, 'train')
util_test = Util(test_file, feat_index, batch_size, 'test')
params.append([lr_1, lr_2, l2_loss_weight, batch_size, util_train, util_test])
# search hyper parameters
random.shuffle(params)
for para in params:
model = Model(lr_1=para[0], lr_2=para[1], l2_loss_weight=para[2], batch_size=para[3],
dimension=dimension, theta0=para[4].get_max_z(), util_train=para[4], util_test=para[5], campaign=campaign)
model.train_phase1()
model.train_phase2()
try:
model.test()
except:
continue
``` |
{
"source": "j-hmd/daily-python",
"score": 4
} |
#### File: daily-python/Object-Oriented-Python/class_methods.py
```python
class Book:
# Class variable
BOOK_TYPES = ("NOVEL", "BIOGRAPHY")
def __init__(self, title, bookType):
self.title = title
if not bookType in Book.BOOK_TYPES: # Have to specify here that we're accessing the class' variable!
raise ValueError(f"{bookType} is not a valid book type") # Seems like we use 'f' to be able to access the parameter bookType...
else:
self.bookType = bookType
# Example of istance method, which is applied to an instance of
# that class
def GetTitle(self):
return self.title
# We could create an class method, that is applicable
# to the whole class, usually denoted in CAPS
@classmethod
def GetBookTypes(cls):
return cls.BOOK_TYPES
# Private variable to the class
__book_list = None # Need more info on what this means in python
# Static methods function like global functions, and could be used
# to implement a singleton pattern for example.
# It's a good way no namespace the otherwise global method.
@staticmethod
def GetBookList():
if Book.__book_list == None:
Book.__book_list = []
return Book.__book_list
# Class methods are accessible via the class itself and not an instance of the class
print("Books: ", Book.GetBookTypes())
b1 = Book("War and Peace", "NOVEL")
b2 = Book("<NAME>", "BIOGRAPHY")
print(b1.GetTitle)
sBookList = Book.GetBookList()
sBookList.append(b1)
sBookList.append(b2)
print(sBookList[0].title, "and", sBookList[1].title)
```
#### File: daily-python/Object-Oriented-Python/dataclasses_default.py
```python
from dataclasses import dataclass, field
from random import randrange
def random_price():
return randrange(2, 7)
@dataclass
class Beverage:
# default values. If one of the values doesn't have a default it should come first
name: str = 'unknown'
# if we need more flexibility with the value, we can use the field method
temp: str = field(default = 'boiling hot')
# and in the field method, the default_factory is a callable object to provide even more freedom
price: float = field(default_factory=random_price)
# the post init method allows you to add attributes that rely on other attributes and have more flexibility in the dataclass
def __post_init__(self):
self.description = f'{self.name} is {self.temp}'
b1 = Beverage()
print(b1.name)
b2 = Beverage('coffee')
print(b2.name)
print(b2.price)
```
#### File: daily-python/Object-Oriented-Python/definition_start.py
```python
class Empty(): # We don't necessarily need to use the (), only if we're looking at inheritance
pass
e1 = Empty()
print (e1)
# Book Class
class Book:
# I think here we're "overriding the __init__ method, that is a special method in python."
def __init__(self, title): # Initializer function. Not quite the "constructor", because the object has already been constructed at this point.
self.title = title # Create an attribute named title, and pass it "title"
# Instantiating the book class
b1 = Book("War and peace") # We're only passig one argument, because on calls to objects, the first argument will always be the object itself.
# Looking at the book class:
print (b1)
print (b1.title)
```
#### File: daily-python/Object-Oriented-Python/immutable_intro.py
```python
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferencePoint:
x: int = 100
y: int = 200
def SetPoint(self, new_x, new_y):
self.x = new_x
self.y = new_y
p1 = ReferencePoint()
print(p1.x)
# If we try modifying the contents of that class we get an error:
# File "<string>", line 4, in __setattr__
# dataclasses.FrozenInstanceError: cannot assign to field 'x'
# p1.x = 10
# And even the class itself can't change its contents when
# the attribute frozen is set to true in the dataclass decorator
# p1.SetPoint(10, 20)
```
#### File: daily-python/Object-Oriented-Python/interfaces_minheritance.py
```python
from abc import ABC, abstractmethod
class JSONify(ABC): # don't forget to add this inheritance from ABS
@abstractmethod
def ToJson(self):
pass
class GraphicShape(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def GetArea(self):
pass
# We can add to the square class the functionality of JSONify
# through the abstractmethod implementation.
class Square(GraphicShape, JSONify):
def __init__(self, side):
super().__init__()
self.side = side
def GetArea(self):
return self.side ** 2
def ToJson(self):
return f"{{ \"square\" : {str(self.GetArea())} }}" # This is crazy useful way of making strings?
s1 = Square(3)
print(s1.GetArea())
print(s1.ToJson())
```
#### File: daily-python/Object-Oriented-Python/magic_call.py
```python
class Book:
def __init__(self, title, author, price):
super().__init__()
self.title = title
self.author = author
self.price = price
def __str__(self):
return f"{self.title} by {self.author}, costs {self.price}"
def __call__(self, title, author, price):
self.title = title
self.author = author
self.price = price
b1 = Book("A mao e a luva", "Machado de Assis", 29.99)
print(b1)
# Here we can call the object, as if it were a funciton to modify its attributes
b1("<NAME>", "Machado de Assis", 25.50)
print(b1)
```
#### File: daily-python/Object-Oriented-Python/magic_comparison_method.py
```python
class Book:
def __init__(self, name, author, price):
self.name = name
self.author = author
self.price = price
def __eq__(self, lhs):
if not isinstance(lhs, Book):
raise ValueError("Can't perform comaparison on non book objects.")
return (self.name == lhs.name and \
self.author == lhs.author and \
self.price == lhs.price)
def __ge__(self, lhs):
return self.price >= lhs.price
def __lt__(self, lhs):
return self.price < lhs.price
b1 = Book("Catcher in the rye", "<NAME>", 32.99)
b2 = Book("Pride and prejudice", "<NAME>", 24.50)
b3 = Book("Catcher in the rye", "<NAME>", 32.99)
b4 = Book("War and Peace", "<NAME>", 35.99)
# Before the overriding of the equality operator, this would evaluate to false,
# because python comapres with the instance in memory, and not the attributes
print(b1==b3)
print(b2>b3)
print(b1<b4)
# Because our objects can now be compared to each other, we can sort them!
# Sorting them from smallest to biggest
books = [b1, b2, b3, b4]
books.sort()
print([book.name for book in books])
```
#### File: daily-python/Object-Oriented-Python/magic_getattr.py
```python
class Book:
def __init__(self, title, author, price):
super().__init__()
self.title = title
self.author = author
self.price = price
self._discount = 0.1
def __str__(self):
return f"{self.title} by {self.author} costs {self.price}"
# We have to call this using the super(), otherwise we'll be stuck in a loop
def __getattribute__(self, name):
if name == "price":
p = super().__getattribute__("price")
d = super().__getattribute__("_discount")
return p - (p * d)
return super().__getattribute__(name)
def __setattr__(self, attrname, value):
if attrname == "price":
if type(value) is not float:
raise ValueError("Error: Price must be float.")
return super().__setattr__(attrname, value) # careful with logic, if there was an else, the value would never be returned
# The getattr method is only called if the __getattribute__ fails, or if it's not defined.
def __getattr__(self, name):
return name + " is not present!"
b1 = Book("Pride and Prejudice", "<NAME>", 34.99)
print(b1)
# Get a value error, must be a float
#b1.price = 10
print(b1.nothing)
```
#### File: daily-python/SOLID-Principles/blackjack_notSOLID.py
```python
import random
"""
This class has some problems with it,
it both implements a deck of cards, and the cards themselves
It also implements the point counting, which is particular to BlackJack, so
we wouldn't be able to reuse this class if we wanted to.
"""
class Card():
def __init__(self):
self.cards = [(rank, suit) # A list of tuples, which represents a single card
for rank in range(1, 14) # range doesn't include the last number!
for suit in 'HSCD'] # Hearts, Spades, Clubs, Diamonds
random.shuffle(self.cards)
def deal(self):
return self.cards.pop()
def points(self, card):
rank, suit = card
if rank == 1:
return (1,11)
elif 2 <= rank < 11:
return (rank,rank)
else:
return (10,10)
class Shoe(Card):
def __init__(self, n):
super().__init__()
self.shoe = []
for _ in range(n):
self.shoe.extend(self.cards)
random.shuffle(self.shoe)
def shuffle_burn(self, n=100):
random.shuffle(self.shoe)
self.shoe = self.shoe[n:]
def deal():
return self.shoe.pop()
"""
Articulating the problems:
- Mixed responsibilities: card, deck, points
- Missing responsibilities: total points to the black jack game?
- Limit reuse: can't use for cribbage for example
- Not substitutable: can't use a shoe in the place of a card
- Haphazard interface: (?) should be iterable.
Goals for our solid design: prevent problems, and differentiate changes that would be relevant.
"""
```
#### File: daily-python/SOLID-Principles/ISP_cardClass.py
```python
import random
# Here we're separating the classes into Card class, and a Black Jack Card class
# The interface segregation principle says that the client of an interface
# should not have to deal with methods it doesn't need
# if we kept the classes together, clients of Card, might not use the Hard()
# and Soft() methods.
class Card:
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
def __str__(self):
return f"{self.rank} of {self.suit}"
class BlackJackCard(Card):
def __init__(self, *args):
super().__init__(*args)
self._hard = self.rank if self.rank <= 10 else 10
self._soft = 11 if self.rank == 1 else self.rank if self.rank <= 10 else 10
def Hard(self):
return self._hard
def Soft(self):
return self._soft
class Deck:
def __init__(self, CardType):
self.deck = [CardType(rank, suit)
for rank in range(1, 14)
for suit in ["Hearts", "Spades", "Clubs", "Diamonds"]]
random.shuffle(self.deck)
def Deal(self):
return self.deck.pop()
d1 = Deck(BlackJackCard)
#card = d1.Deal()
#print(card)
#print(card.Hard())
#print(card.Soft())
print(len(d1.deck))
for card in d1.deck:
print(card, "Soft Value=", card.Soft(), "Hard Value=", card.Hard())
```
#### File: daily-python/SOLID-Principles/ISP_DeckClass.py
```python
import random
# The DeckList class inherits from the Python build in class list
class DeckList(list):
# Here we already have the methods: pop, insert
# which seem to be all we need to implement this class
pass
# And if we want to build a cribbage deck based on this class we do so:
class CribbageDeck(DeckList):
def Cut(self, depth):
reveal = self.pop(depth)
self.insert(0, reveal)
# This is easy to separate the making of different cards for different games
def card_factory(rank, suit):
return BlackjackCard(rank, suit)
# More on ISP: Lott says that the ISP suggests separating the building,
# modifying, retrieving, etc, of a particular object to another class
# so the individual parts don't influence the others by their change.
# Moving on, we analyze the deck and shoe classes, they have a lot in common,
# but Lott suggests that because the shoe has different functionality of
# burning, this would suggest another class with shared functionality
def deck_builder(card_factory, n=1):
return [card_factory(rank, suit)
for rank in range(1,14)
for suit in ("Hearts", "Spades", "Clubs", "Diamonds")
for _ in range(n)]
class Card:
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
class BlackjackCard(Card):
def __init__(self, *args):
super().__init__(*args)
self._hard = self.rank if self.rank <=10 else 10
self._soft = 11 if self.rank == 1 else self.rank if self.rank <= 10 else 10
def hard(self):
return self._hard
def soft(self):
return self._soft
from collections.abc import MutableSequence # whose simplest example would be a list
# Then we get to the question of if we should wrap or extend a class
# we could've written the Deck class as wrapping the list build-int class
# we're not inheriting from the List class anymore, we're just exposing a limited
# interface from the list of cards.
class DeckWrapper:
def __init__(self, cards: MutableSequence):
self.cards = cards
def __iter__(self):
return iter(self.cards)
def __getitem__(self, slice_or_index):
return self.cards[slice_or_index]
def __setitem__(self, slice_or_index, value):
self.cards[slice_or_index] = value
def __len__(self):
return len(self.cards)
# Lott is saying that in python wrapping is not the best because we don't have static type checking?
# Then to implement the deck class, we're adding the burn method, but we didn't implement the __delitem__
# in the class DeckWrapper, which exposes one limitation of wrapping: we don't know how many methods in advance we will
# need to expose for a particular purpose
class Shoe1(DeckWrapper):
def ShuffleBurn(self):
random.shuffle(self)
del self[-100:]
class Shoe2(DeckWrapper):
def ShuffleBurn(self):
random.shuffle(self.cards)
del self.cards[-100:]
deck = DeckWrapper(deck_builder(card_factory))
# AttributeError: __delitem__
# shoe1 = Shoe1(deck_builder(card_factory, 3))
# shoe1.ShuffleBurn()
# print(len(shoe1))
shoe2 = Shoe2(deck_builder(card_factory, 3))
shoe2.ShuffleBurn()
print(len(shoe2))
``` |
{
"source": "JHMeusener/detectron2-ResNeSt",
"score": 2
} |
#### File: JHMeusener/detectron2-ResNeSt/params.py
```python
import configargparse
import argparse
from os.path import join as pjoin
def get_params(path='.'):
p = configargparse.ArgParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
default_config_files=[pjoin(path, 'default.yaml')])
p.add('-v', help='verbose', action='store_true')
p.add('--aug-n-angles', type=int)
p.add('--epochs', type=int)
p.add('--epochs-pre', type=int)
p.add('--epochs-div-lr', type=int)
p.add('--cp-period', type=int)
p.add('--n-ims-test', type=int)
p.add('--batch-size', type=int)
p.add('--lr', type=float)
p.add('--decay', type=float)
p.add('--momentum', type=float)
p.add('--cuda', default=False, action='store_true')
return p
```
#### File: detectron2-ResNeSt/utils/pascal_ctxt.py
```python
import os
from os.path import join as pjoin
import collections
import json
import numpy as np
from skimage.io import imsave, imread
import scipy.io as io
import matplotlib.pyplot as plt
import glob
class pascalVOCContextLoader:
"""Data loader for the Pascal VOC semantic segmentation dataset.
Annotations from both the original VOC data (which consist of RGB images
in which colours map to specific classes) and the SBD (Berkely) dataset
(where annotations are stored as .mat files) are converted into a common
`label_mask` format. Under this format, each mask is an (M,N) array of
integer values from 0 to 21, where 0 represents the background class.
The label masks are stored in a new folder, called `pre_encoded`, which
is added as a subdirectory of the `SegmentationClass` folder in the
original Pascal VOC data layout.
A total of five data splits are provided for working with the VOC data:
train: The original VOC 2012 training data - 1464 images
val: The original VOC 2012 validation data - 1449 images
trainval: The combination of `train` and `val` - 2913 images
train_aug: The unique images present in both the train split and
training images from SBD: - 8829 images (the unique members
of the result of combining lists of length 1464 and 8498)
train_aug_val: The original VOC 2012 validation data minus the images
present in `train_aug` (This is done with the same logic as
the validation set used in FCN PAMI paper, but with VOC 2012
rather than VOC 2011) - 904 images
"""
def __init__(self, root_imgs, root_segs, split='train'):
self.root_imgs = root_imgs
self.root_segs = root_segs
self.splits = ['train', 'val', 'test']
self.split = split
self.all_base_names_ctxt = [
os.path.splitext(os.path.basename(f))[0]
for f in glob.glob(pjoin(self.root_segs, '*.mat'))
]
# read pascal train and validation sets
with open(pjoin(root_imgs, 'ImageSets', 'Main', 'train.txt')) as f:
self.pascal_train = f.readlines()
self.pascal_train = [x.strip() for x in self.pascal_train]
with open(pjoin(root_imgs, 'ImageSets', 'Main', 'val.txt')) as f:
self.pascal_val = f.readlines()
self.pascal_val = [x.strip() for x in self.pascal_val]
self.base_names = dict()
self.base_names['train'] = [
f for f in self.all_base_names_ctxt if f in self.pascal_train
]
self.base_names['valtest'] = [
f for f in self.all_base_names_ctxt if f in self.pascal_val
]
self.base_names['val'] = self.base_names[
'valtest'][:len(self.base_names['valtest']) // 2]
self.base_names['test'] = self.base_names['valtest'][
len(self.base_names['valtest']) // 2:]
def __len__(self):
return len(self.base_names[self.split])
def __getitem__(self, index):
base_name = self.base_names[self.split][index]
im_path = pjoin(self.root_imgs, 'JPEGImages', base_name + '.jpg')
lbl_path = pjoin(self.root_segs, base_name + '.mat')
im = imread(im_path)
data = io.loadmat(lbl_path)
lbl = data['LabelMap']
return {'image': im, 'labels': lbl, 'base_name': base_name}
class customDatasetContextLoader:
def __init__(self, root_imgs, root_segs, split='train'):
self.root_imgs = root_imgs
self.root_segs = root_segs
self.splits = ['train', 'val', 'test']
self.split = split
self.all_base_names_ctxt = [
os.path.splitext(os.path.basename(f))[0].replace("_depth","")
for f in glob.glob(pjoin(self.root_imgs, '*_depth.png'))
]
self.base_names = dict()
self.base_names['train'] = [
f for f in self.all_base_names_ctxt
]
self.base_names['valtest'] = [
f for f in self.all_base_names_ctxt
]
self.base_names['val'] = self.base_names[
'valtest'][:len(self.base_names['valtest']) // 2]
self.base_names['test'] = self.base_names['valtest'][
len(self.base_names['valtest']) // 2:]
def __len__(self):
return len(self.base_names[self.split])
def __getitem__(self, index):
base_name = self.base_names[self.split][index]
im_path = pjoin(self.root_imgs, base_name + '_L.png')
lbl_path = pjoin(self.root_segs, base_name + '_mask_L.png')
im = imread(im_path)
lbl = imread(lbl_path)
return {'image': im, 'labels': lbl, 'base_name': base_name}
if __name__ == "__main__":
root_path = '/home/ubelix/lejeune/data'
dl = pascalVOCContextLoader(root_imgs=pjoin(root_path, 'pascal-voc',
'VOC2012'),
root_segs=pjoin(root_path, 'trainval'))
c = [0, 0]
r = 1
npts = 1000
theta = np.linspace(0, 2 * np.pi, npts)
x = c[0] + np.cos(theta)
y = c[1] + np.sin(theta)
x_interp, y_interp = contours_to_pts(x, y, n_pts=30)
angles = segments_to_angles(x_interp, y_interp)
bins = bin_angles(angles)
plt.plot(x, y)
for i in range(x_interp.shape[0] - 1):
plt.plot((x_interp[i], x_interp[i + 1]),
(y_interp[i], y_interp[i + 1]),
linewidth=4,
color=plt.cm.RdYlBu(bins[i] / bins.max()))
plt.plot(x_interp, y_interp, 'ro')
plt.grid()
plt.show()
im, lbl = dl[0]
plt.subplot(121)
plt.imshow(im)
plt.subplot(122)
plt.imshow(lbl)
plt.show()
``` |
{
"source": "jhmfreitas/AI-Project",
"score": 3
} |
#### File: AI-Project/Project2/RL.py
```python
import numpy as np
import random
from tempfile import TemporaryFile
outfile = TemporaryFile()
class finiteMDP:
def __init__(self, nS, nA, gamma, P=[], R=[], absorv=[]):
self.nS = nS #numero de states
self.nA = nA #numero de acoes
self.gamma = gamma #gamma - discount factor
self.Q = np.zeros((self.nS,self.nA)) #valores de Q para todas as trajetorias
self.P = P
self.R = R
self.absorv = absorv
# completar se necessario
def runPolicy(self, n, x0, poltype = 'greedy', polpar=[]):
#nao alterar
traj = np.zeros((n,4)) #vetor para valores de trajetoria
x = x0
J = 0
for ii in range(0,n):
a = self.policy(x,poltype,polpar) #acao
r = self.R[x,a] #reward
y = np.nonzero(np.random.multinomial( 1, self.P[x,a,:]))[0][0] #estado de chegada
traj[ii,:] = np.array([x, a, y, r]) #calcula trajetoria para cada SARS'
J = J + r * self.gamma**ii #funcao a maximizar (funcao J)
if self.absorv[x]:
y = x0
x = y #avanca para estado de chegada x->y
return J,traj
def VI(self):
#nao alterar
nQ = np.zeros((self.nS,self.nA))
while True:
self.V = np.max(self.Q,axis=1)
for a in range(0,self.nA):
nQ[:,a] = self.R[:,a] + self.gamma * np.dot(self.P[:,a,:],self.V)
err = np.linalg.norm(self.Q-nQ)
self.Q = np.copy(nQ)
if err<1e-7:
break
#update policy
self.V = np.max(self.Q,axis=1)
#correct for 2 equal actions
self.Pol = np.argmax(self.Q, axis=1)
return self.Q, self.Q2pol(self.Q)
def traces2Q(self, trace): #calcula valores de Q para todas as trajetorias
# implementar esta funcao
self.Q = np.zeros((self.nS,self.nA))
temporaryQ = np.zeros((self.nS, self.nA))
alpha=0.1
while True:
for elem in trace:
s=int(elem[0])
a=int(elem[1])
r=elem[3]
s_next=int(elem[2])
temporaryQ[s,a] += alpha*(r+self.gamma*max(temporaryQ[s_next,:]) - temporaryQ[s,a])
dif = np.linalg.norm(self.Q - temporaryQ)
self.Q = np.copy(temporaryQ)
if dif < 1e-2:
break
return self.Q
def policy(self, x, poltype = 'exploration', par = []):
# implementar esta funcao
if poltype == 'exploitation':
#usa o polpar e verifica qual a melhor acao para o seu estado
a=np.argmax(par[x])#retorna index do maior valor para aquele estado(retorna s' = proximo estado)
elif poltype == 'exploration':
#randint(self.nA - 1)
a=np.random.randint(0,self.nA) #explora de forma aleatoria para explorar melhor o ambiente
return a
def Q2pol(self, Q, eta=5):
# implementar esta funcao
return np.exp(eta*Q)/np.dot(np.exp(eta*Q),np.array([[1,1],[1,1]]))
``` |
{
"source": "jhmj-io/bitacademy-challenge-wk49",
"score": 3
} |
#### File: jhmj-io/bitacademy-challenge-wk49/challenge-joep.py
```python
from csv import reader
from functools import reduce
from datetime import datetime
import plotext as plt
print("\nChallenge Junior Data Engineer week 49 - <NAME>")
path = "challenge-ratings.csv"
limit = 9999
# --- generieke functies --- #
# --- dictionary omzetten naar een list --- #
def dict2list (d,l ): #=[]): # hmmm ?! bij hernoemde aanroep bestaat de l uit vorige aanroep nog en wordt aangevuld?!
for r in d: l.append( {r: d[r]})
return l
# --- sommeer de tellingen en sommeer de gewogen telling --- #
def asignmentsratingsstat(e):
e["arcw"] = 0
e["arc"] = 0
for r in e[ list(e.keys())[0] ]:
e["arcw"] += int(r) * e[ list(e.keys())[0] ] [r]
e["arc"] += e[ list(e.keys())[0] ] [r]
return e
# --- read csv into list of dictionaries --- #
def map_csv_row2dict (d,r) :
o = dict()
for i, a in enumerate(d):
o[a] = r[i]
return o
def map_csv_rows2list (path):
l = list()
with open(path) as f:
rr = reader( f, delimiter=',', quotechar='|' )
d = next(rr)
d.insert(0,"id")
for i, r in enumerate( rr ):
r.insert(0,i)
l.append( map_csv_row2dict(d,r) )
if i>=limit: break
return l
ratings = map_csv_rows2list(path)
# --- tellen per opdracht, per rating --- #
def sumexerciserating ( a, y):
if y["exercise"] not in a.keys() :
a[y["exercise"]] = {}
if y["rating"] in a[y["exercise"]].keys():
a[y["exercise"]] [y["rating"]] += 1
else:
a[y["exercise"]] [y["rating"]] = 1
return a
ratingsdict = reduce( sumexerciserating, ratings, {} )
ratingslist = dict2list(ratingsdict, [])
# --- sommeer tellingen over alle opdracht en alle ratings --- #
def sumgrand ( a, y):
for r in y [ list(y.keys())[0] ]:
a += y [ list(y.keys())[0] ][r]
return a
ratingscount = reduce( sumgrand, ratingslist, 0 )
# Totaal aantal ratings - 1 ster
print("\n1 Ratings aantal")
print(" »","Totaal:", ratingscount)
# Laat per opdracht zien hoeveel ratings de opdracht heeft en geef de namen van de opdrachten met een gemiddelde rating lager dan 3 - 2 sterren
# --- bereken statistieken per opdracht: som telling ratings en gemiddelde --- #
assignmentsratings = list( map( asignmentsratingsstat, ratingslist))
print("\n2 Ratings per opdracht")
for a in assignmentsratings:
print ( " »",list(a.keys())[0], "-","aantal:" + str(a["arc"]) , "LAGER DAN 3 GEMIDDELD" if a["arcw"]//a["arc"] < 3 else "" )
# --- filter opdracht "Flex met boxen" --- #
ratingsfiltered = list( filter( (lambda x : x['exercise'] == "Flex met boxen") , ratings ) )
# --- tellen per maand, per rating --- #
def sumexerciseratingmonth ( a, y):
d = datetime.strptime(y['date'], '%Y-%m-%d')
yearmonth = str(d.year)+("0"+str(d.month))[-2:]
if yearmonth not in a.keys() :
a[ yearmonth ] = {}
if y["rating"] in a[ yearmonth ].keys():
a[yearmonth] [y["rating"]] += 1
else:
a[yearmonth] [y["rating"]] = 1
return a
ratingsdictfiltered = reduce( sumexerciseratingmonth, ratingsfiltered, {} )
ratingslistfiltered = dict2list( ratingsdictfiltered, [] )
# --- bereken statistieken per maand: som telling ratings en gemiddelde --- #
assignmentsratingsfiltered = list( map( asignmentsratingsstat, ratingslistfiltered ))
# --- sorteer op YYYYMM --- #
assignmentsratingsfiltered.sort(key = lambda r: list(r.keys())[0] )
# Op 1 mei 2021 is de opdracht Flex met boxen herzien, heeft dat geleid tot betere ratings - 3 sterren #
print("\n3 Ratings opdracht 'Flex met boxen' per maand")
for a in assignmentsratingsfiltered:
print ( " »", list(a.keys())[0], "-","aantal:", str(a["arc"])+"," , "gemiddeld:", a["arcw"]//a["arc"] )
print("\n")
# --- prepare lists for plot --- #
months = list( map( lambda m: list(m.keys())[0], assignmentsratingsfiltered ))
monthsaverage = list( map( lambda m: m["arcw"]//m["arc"] , assignmentsratingsfiltered ))
# Data traject: Maak een grafiek waarin je laat zien wat de invloed was van de herzage. - Bonus #
print("\n4 Bonus opdracht 'Flex met boxen' per maand - grafiek\n")
plt.bar(months, monthsaverage)
plt.plot_size(width=90, height=30)
plt.title("Bonus - Ratings opdracht 'Flex met boxen' per maand")
plt.show()
print("\n--- fin ---")
``` |
{
"source": "jhmj-io/bitacademy-challenge-wk51",
"score": 3
} |
#### File: jhmj-io/bitacademy-challenge-wk51/morse.py
```python
import json
import unicodedata
import re
#list( filter( (lambda f: False if f[:2]=="__" or f in ["re","json","unicodedata"] else True ), list(dir(morse) )))
def morse_file(morsefile):
# just to be suren get the simple (non extended) morse code
morsefile = "morse.json"
jsonFile = open(morsefile, "r")
jsonContent = jsonFile.read()
jsonFile.close()
return json.loads(jsonContent)
def morse_codes(morse, direction):
codes = {}
if direction=="encode":
for character in morse:
codes[ character["unicode"] ] = character["morse"]
else:
for character in morse:
codes[character["morse"]] = character["latin"]
return codes
def morse_extend( morse, cu ):
morseunknown = {
"latin": " ",
"morse": "-.-.-.-",
"unicode": cu
}
morse.append( morseunknown )
outFile = open("morse_extended.json", 'w')
outFile.write( json.dumps(morse, indent=4, sort_keys=True ) )
outFile.close()
return morse
def morse_encode(message):
morse = morse_file("morse.json")
codes = morse_codes(morse, "encode")
coded = ""
words = re.split(' ', message )
for i, word in enumerate(words):
for j, character in enumerate(word) :
#print(unicodedata.name(character))
# character unicode
cu = unicodedata.name(character)
cus = re.split(' ', unicodedata.name(character) )
if cus[0]=="LATIN" and cus[2] + " " + cus[3] in list(codes.keys()):
#print(cu[0], cu[3], codes[cu[3]])
coded += codes[ cus[2] + " " + cus[3] ]
elif unicodedata.name(character) in list(codes.keys()):
coded += codes[ cu ]
else:
# 'GREEK CAPITAL LETTER OMEGA' - unicodedata.name("Ω")
# 'EXCLAMATION MARK'
morse = morse_extend( morse, cu )
codes = morse_codes(morse, "encode")
coded += codes[ cu ]
if j+1 < len(word):
coded += " "
elif i+1 < len(words):
coded += "/"
return coded
def morse_decode(message):
morse = morse_file("morse.json")
codes = morse_codes(morse, "decode")
coded = ""
words = re.split('/', message )
for i, word in enumerate(words):
for l in re.split(' ', word ):
if l in list(codes.keys()):
coded += codes[l]
else:
print("NOT IN MORSE?!", l )
if i+1 < len(words):
coded += " "
return coded
def messageencodedecode(messagename):
f = open( f"{messagename}.txt", "r")
message = f.read()
#message = "HULP NODIG 50 MAN ZIT VAST OP HET EILAND RED ONS"
#message = "SOS Aan wie ons horen kan. We weten niet wat er is gebeurt, maar we zitten zonder telefoonlijnen en internet. Daarom vallen we terug op morsecode. We zitten met 50 man vast op het eiland. Met de kerstdagen nabij willen we weer terug naar onze families en dus willen we opgehaald worden. Daarnaast moet Zoë haar hondje weer te eten geven en is de vrouw van Rubìn morgen uitgerekend. Stuur snel hulp!"
#message = "Yoo 012345 6Ω789 bla!" # ,:;.'()" + '"'
print(f"\nmessage { messagename} input")
print("-"*50)
print(message)
# 2 print encoded message in terminal
morseencoded = morse_encode(message)
print(f"\nmessage {messagename} encoded" )
print("-"*50)
print(morseencoded + "<EOF>" )
f = open( f"{messagename}.txt.mc", "w")
f.write(morseencoded)
# 3 print decoded message in terminal
morsedecoded = morse_decode( morseencoded )
print(f"\nmessage {messagename} decoded")
print("-"*50)
print(morsedecoded + "<EOF>")
f = open( f"{messagename}.txt.dc", "w")
f.write(morsedecoded)
f.close()
return True
#messageoktxt = lambda m : "OK" if (m) else "NOT OK"
if __name__ == "__main__":
messageok = messageencodedecode("noodoproep")
print("\nmessage is", (lambda m : "OK" if (m) else "NOT OK") (messageok) )
print("")
messageok = messageencodedecode("noodoproep-extended")
print("\nmessage is", (lambda m : "OK" if (m) else "NOT OK") (messageok) )
print("\nzie morse-extended.json voor morsecode van buitengewone letters en leestekens!")
print("")
``` |
{
"source": "j-h-m/ManTraNet",
"score": 3
} |
#### File: j-h-m/ManTraNet/lib.py
```python
def read_rgb_image( image_file ) :
rgb = cv2.imread( image_file, 1 )[...,::-1]
return rgb
def decode_an_image_array( rgb, manTraNet ) :
x = np.expand_dims( rgb.astype('float32')/255.*2-1, axis=0 )
t0 = datetime.now()
y = manTraNet.predict(x)[0,...,0]
t1 = datetime.now()
return y, t1-t0
def decode_an_image_file( image_file, manTraNet ) :
rgb = read_rgb_image( image_file )
mask, ptime = decode_an_image_array( rgb, manTraNet )
return rgb, mask, ptime.total_seconds()
def slice_and_decode(filename, manTraNet):
tiles = image_slicer.slice(filename, number_tiles=6)
mask_tiles = []
total_ptime = 0
for tile in tiles:
rgb, mask, ptime = decode_an_image_file(tile.filename, manTraNet)
mask_image = Image.fromarray(np.uint8(mask * 255))
mask_tile = image_slicer.Tile(image=mask_image, number=tile.number, position=tile.position, coords=tile.coords)
mask_tiles.append(mask_tile)
total_ptime += ptime
mask_tiles = tuple(mask_tiles)
res = image_slicer.join(mask_tiles)
return res, total_ptime
``` |
{
"source": "j-h-m/Media-Journaling-Tool",
"score": 3
} |
#### File: hp_tool/hp/CameraForm.py
```python
import csv
import tkSimpleDialog
import webbrowser
from Tkinter import *
import ttk
import pandas as pd
import os
import collections
import subprocess
import tkFileDialog, tkMessageBox
import json
import requests
from camera_handler import API_Camera_Handler
from hp_data import exts
import data_files
"""
Contains classes for managing camera browser adding/editing
"""
class HP_Device_Form(Toplevel):
"""This class creates the window to add a new device. Must have browser login."""
def __init__(self, master, validIDs=None, pathvar=None, token=None, browser=None, gan=False):
Toplevel.__init__(self, master)
self.geometry("%dx%d%+d%+d" % (600, 600, 250, 125))
self.master = master
self.pathvar = pathvar # use this to set a tk variable to the path of the output txt file
self.validIDs = validIDs if validIDs is not None else []
self.set_list_options()
self.camera_added = False
self.is_gan = gan
self.renamed = {}
self.trello_token = StringVar()
self.trello_token.set(token) if token is not None else ''
self.browser_token = StringVar()
self.browser_token.set(browser) if browser is not None else ''
self.trello_key = data_files._TRELLO['app_key']
self.create_widgets()
def set_list_options(self):
"""
Sets combobox options for manufacturer, lens mounts, and device types
:return: None
"""
df = pd.read_csv(os.path.join(data_files._DB))
self.manufacturers = [str(x).strip() for x in df['Manufacturer'] if str(x).strip() != 'nan']
self.lens_mounts = [str(y).strip() for y in df['LensMount'] if str(y).strip() != 'nan']
self.device_types = [str(z).strip() for z in df['DeviceType'] if str(z).strip() != 'nan']
def create_widgets(self):
"""
Creates form widgets
:return: None
"""
self.f = VerticalScrolledFrame(self)
self.f.pack(fill=BOTH, expand=TRUE)
Label(self.f.interior, text='Add a new HP Device', font=('bold underline', 25)).pack()
Label(self.f.interior, text='Once complete, the new camera will be added automatically, and a notification card will be posted to trello.', wraplength=400).pack()
if not self.is_gan:
Label(self.f.interior, text='Sample File', font=('bold', 18)).pack()
Label(self.f.interior, text='This is required. Select an image/video/audio file. Once metadata is loaded from it, you may continue to complete the form.'
' Some devices can have multiple make/model configurations for images vs. video, or for apps. In this instances, submit this '
'form as normal, and then go to File->Update a Device on the main GUI.', wraplength=400).pack()
self.imageButton = Button(self.f.interior, text='Select File', command=self.populate_from_image)
self.imageButton.pack()
# all questions defined here. end name with a * to make mandatory
head = [('Media Type*', {'description': 'Select the type of media contained in the sample file (Image, Video, Audio)',
'type': 'readonlylist',
'values': ['image', 'video', 'audio']}),
('App', {'description': 'If the sample image was taken with a certain app, specify it here. Otherwise, leave blank.',
'type': 'text',
'values': None}),
('Exif Camera Make',{'description': 'Device make, pulled from device Exif.',
'type': 'list',
'values': self.manufacturers}),
('Exif Camera Model',{'description': 'Device model, pulled from device Exif.',
'type': 'text',
'values': None}),
('Device Serial Number', {'description': 'Device serial number, pulled from device Exif.',
'type': 'text',
'values': None}),
('Local ID*', {'description': 'This can be a one of a few forms. The most preferable is the cage number. If it is a personal device, you can use INITIALS-MODEL, such as'
' ES-iPhone4. Please check that the local ID is not already in use.',
'type': 'text',
'values': None}),
('Device Affiliation*', {'description': 'If it is a personal device, please define the affiliation as Other, and write in your organization and your initials, e.g. RIT-TK',
'type': 'radiobutton',
'values': ['RIT', 'PAR', 'Other (please specify):']}),
('HP Model*',{'description': 'Please write the make/model such as it would be easily identifiable, such as Samsung Galaxy S6.',
'type': 'text',
'values': None}),
('Edition',{'description': 'Specific edition of the device, if applicable and not already in the device\'s name.',
'type': 'text',
'values': None}),
('Device Type*',{'description': 'Select camera type. If none are applicable, select "other".',
'type': 'readonlylist',
'values':self.device_types}),
('Sensor Information',{'description': 'Sensor size/dimensions/other sensor info.',
'type': 'text',
'values': None}),
('Lens Mount*',{'description': 'Choose \"builtin\" if the device does not have interchangeable lenses.',
'type': 'list',
'values':self.lens_mounts}),
('Firmware/OS',{'description': 'Firmware/OS',
'type': 'text',
'values': None}),
('Firmware/OS Version',{'description': 'Firmware/OS Version',
'type': 'text',
'values': None}),
('General Description',{'description': 'Other specifications',
'type': 'text',
'values': None}),
]
self.headers = collections.OrderedDict(head)
self.questions = {}
for h in self.headers:
d = SectionFrame(self.f.interior, title=h, descr=self.headers[h]['description'], type=self.headers[h]['type'], items=self.headers[h]['values'], bd=5)
d.pack(pady=4)
self.questions[h] = d
Label(self.f.interior, text='Trello Login Token', font=(20)).pack()
Label(self.f.interior, text='This is required to send a notification of the new device to Trello.').pack()
trello_link = 'https://trello.com/1/authorize?key=' + self.trello_key + '&scope=read%2Cwrite&name=HP_GUI&expiration=never&response_type=token'
trelloTokenButton = Button(self.f.interior, text='Get Trello Token', command=lambda: self.open_link(trello_link))
trelloTokenButton.pack()
tokenEntry = Entry(self.f.interior, textvar=self.trello_token)
tokenEntry.pack()
Label(self.f.interior, text='Browser Login Token*', font=(20)).pack()
Label(self.f.interior, text='This allows for the creation of the new device.').pack()
browserTokenButton = Button(self.f.interior, text='Get Browser Token', command=lambda: tkMessageBox.showinfo("Get Browser Token", "Refer to the HP Tool guide to retrieve your browser token."))
browserTokenButton.pack()
browserEntry = Entry(self.f.interior, textvar=self.browser_token)
browserEntry.pack()
buttonFrame = Frame(self)
buttonFrame.pack()
self.okbutton = Button(buttonFrame, text='Complete', command=self.export_results)
self.okbutton.pack()
self.cancelbutton = Button(buttonFrame, text='Cancel', command=self.destroy)
self.cancelbutton.pack()
if self.is_gan:
self.questions['Exif Camera Make'].edit_items([])
self.questions['Device Type*'].edit_items(['Computational'])
self.questions['Device Type*'].set('Computational')
self.add_required('Edition')
self.questions['Sensor Information'].pack_forget()
self.questions['Device Serial Number'].pack_forget()
self.add_required('Exif Camera Model')
self.questions['HP Model*'].pack_forget()
self.questions["Lens Mount*"].pack_forget()
self.questions['Lens Mount*'].set("NA")
self.remove_required("Lens Mount*")
self.add_required('Firmware/OS')
self.add_required('Firmware/OS Version')
self.rename("Exif Camera Make", "GAN Name*", "Name of the GAN used")
else:
self.okbutton.configure(state='disabled')
for q, a in self.questions.iteritems():
a.disable()
def remove_required(self, data):
if not data.endswith("*"):
return
try:
self.headers[data[:-1]] = self.headers.pop(data)
self.questions[data].remove_required()
self.renamed[data[:-1]] = data
except KeyError:
return
def add_required(self, data):
if data.endswith("*"):
return
try:
self.headers[data + "*"] = self.headers.pop(data)
self.questions[data].add_required()
self.renamed[data + "*"] = data
except KeyError:
return
def rename(self, item, title, desc):
try:
self.headers[title] = self.headers.pop(item)
self.renamed[title] = item
self.questions[item].rename(title, desc)
except KeyError:
return
def populate_from_image(self):
"""
Fill mandatory exif-fillable fields
:return: None
"""
self.imfile = tkFileDialog.askopenfilename(title='Select Image File', parent=self)
if not self.imfile:
return
self.imageButton.config(text=os.path.basename(self.imfile))
args = ['exiftool', '-f', '-j', '-Model', '-Make', '-SerialNumber', self.imfile]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
exifData = json.loads(p)[0]
except:
self.master.statusBox.println('An error ocurred attempting to pull exif data from image.')
return
for q, a in self.questions.iteritems():
a.enable()
if exifData['Make'] != '-':
self.questions['Exif Camera Make'].set(exifData['Make'])
self.questions['Exif Camera Make'].disable()
if exifData['Model'] != '-':
self.questions['Exif Camera Model'].set(exifData['Model'])
self.questions['Exif Camera Model'].disable()
if exifData['SerialNumber'] != '-':
self.questions['Device Serial Number'].set(exifData['SerialNumber'])
self.questions['Device Serial Number'].disable()
self.okbutton.config(state='normal')
def export_results(self):
"""
Triggers when ok/complete button is clicked. Validates and exports the new camera data
:return: None
"""
if self.is_gan:
self.questions["HP Model*"].set(self.questions['Exif Camera Model'].get())
msg = None
for h in self.headers:
if h in self.renamed.keys():
contents = self.questions[self.renamed[h]].get()
else:
contents = self.questions[h].get()
if h.endswith('*') and contents == '':
msg = 'Field ' + h[:-1] + ' is a required field.'
break
if self.browser_token.get() == '':
msg = 'Browser Token is a required field.'
check = self.local_id_used()
msg = msg if check is None else check
if msg:
tkMessageBox.showerror(title='Error', message=msg, parent=self)
return
# post and check browser response
browser_resp = self.post_to_browser()
if browser_resp.status_code in (requests.codes.ok, requests.codes.created):
cont = tkMessageBox.askyesno(title='Complete', message='Successfully posted new camera information! Post notification to Trello?', parent=self)
self.camera_added = True
else:
tkMessageBox.showerror(title='Error', message='An error ocurred posting the new camera information to the MediBrowser. (' + str(browser_resp.status_code)+ ')', parent=self)
return
if cont:
code = self.post_to_trello()
if code is not None:
tkMessageBox.showerror('Trello Error', message='An error ocurred connecting to trello (' + str(
code) + ').\nIf you\'re not sure what is causing this error, email <EMAIL>.', parent=self)
else:
tkMessageBox.showinfo(title='Information', message='Complete!', parent=self)
self.destroy()
def post_to_browser(self):
"""
Handles the browser interaction
:return: requests.post() response
"""
url = self.master.settings.get_key("apiurl") + '/cameras/'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Token ' + self.browser_token.get(),
}
data = { 'hp_device_local_id': self.questions['Local ID*'].get(),
'affiliation': self.questions['Device Affiliation*'].get(),
'hp_camera_model': self.questions['HP Model*'].get(),
'exif':[{'exif_camera_make': self.questions['Exif Camera Make'].get(),
'exif_camera_model': self.questions['Exif Camera Model'].get(),
'exif_device_serial_number': self.questions['Device Serial Number'].get(),
'hp_app': self.questions['App'].get(),
'media_type': self.questions['Media Type*'].get()}],
'camera_edition': self.questions['Edition'].get(),
'camera_type': self.questions['Device Type*'].get(),
'camera_sensor': self.questions['Sensor Information'].get(),
'camera_description': self.questions['General Description'].get(),
'camera_lens_mount': self.questions['Lens Mount*'].get(),
'camera_firmware': self.questions['Firmware/OS'].get(),
'camera_version': self.questions['Firmware/OS Version'].get()
}
data = self.json_string(data)
return requests.post(url, headers=headers, data=data)
def json_string(self, data):
"""
Convert a dictionary of camera data to a string. Also changes empty strings in the dict to None
:param data: dictionary containing camera data
:return: string version of data
"""
for key, val in data.iteritems():
if val == '':
data[key] = None
for configuration in data['exif']:
for key, val in configuration.iteritems():
if val == '':
configuration[key] = None
return json.dumps(data)
def local_id_used(self):
"""
Check if a user-entered local ID is already in use
:return: (string) Error message (if error), otherwise None
"""
print 'Verifying local ID is not already in use...'
c = API_Camera_Handler(self, token=self.browser_token.get(), url=self.master.settings.get_key("apiurl"), given_id=self.questions["Local ID*"].get())
local_id_reference = c.get_local_ids()
if self.questions['Local ID*'].get().lower() in [i.lower() for i in local_id_reference]:
return 'Local ID ' + self.questions['Local ID*'].get() + ' already in use.'
def open_link(self, link):
"""
Open a web browser link
:param link: string containing website to open
:return: None
"""
webbrowser.open(link)
def post_to_trello(self):
"""create a new card in trello and attach a file to it"""
token = self.trello_token.get()
# list ID for "New Devices" list
list_id = data_files._TRELLO['camera_update_list']
# post the new card
new = self.questions['Local ID*'].get()
resp = requests.post("https://trello.com/1/cards", params=dict(key=self.trello_key, token=token),
data=dict(name='NEW DEVICE: ' + new, idList=list_id))
# attach the file and user, if the card was successfully posted
if resp.status_code == requests.codes.ok:
j = json.loads(resp.content)
me = requests.get("https://trello.com/1/members/me", params=dict(key=self.trello_key, token=token))
member_id = json.loads(me.content)['id']
new_card_id = j['id']
resp2 = requests.post("https://trello.com/1/cards/%s/idMembers" % (new_card_id),
params=dict(key=self.trello_key, token=token),
data=dict(value=member_id))
return None
else:
return resp.status_code
class SectionFrame(Frame):
"""
Question template for new camera form
"""
def __init__(self, master, title, descr, type, items=None, **kwargs):
Frame.__init__(self, master, **kwargs)
self.title = title
self.descr = descr
self.type = type
self.items = items # list items, if combobox type
self.val = StringVar()
self.row = 0
self.create_form_item()
def create_form_item(self):
self._title = Label(self, text=self.title, font=(20))
self._title.grid(row=self.row)
self.row+=1
self._descr = Label(self, text=self.descr, wraplength=350)
self._descr.grid(row=self.row)
self.row+=1
if 'list' in self.type:
self._form = ttk.Combobox(self, textvariable=self.val, values=self.items)
self._form.bind('<MouseWheel>', self.remove_bind)
elif 'radiobutton' in self.type:
for item in self.items:
if item.lower().startswith('other'):
Label(self, text='Other - Please specify: ').grid(row=self.row)
self._form = Entry(self, textvar=self.val)
else:
Radiobutton(self, text=item, variable=self.val, value=item).grid(row=self.row)
self.row+=1
else:
self._form = Entry(self, textvar=self.val)
if 'readonly' in self.type and hasattr(self, '_form'):
self._form.config(state='readonly')
if hasattr(self, '_form'):
self._form.grid(row=self.row)
def remove_bind(self, event):
return 'break'
def get(self):
return str(self.val.get())
def set(self, val):
self.val.set(str(val))
def disable(self):
if hasattr(self, '_form'):
self._form.config(state='disabled')
def enable(self):
if hasattr(self, '_form'):
self._form.config(state='normal')
def edit_items(self, new_vals):
self._form['values'] = new_vals
def remove_required(self):
self._title['text'] = self._title['text'][:-1] if self._title['text'].endswith("*") else self._title['text']
def add_required(self):
self._title['text'] = self._title['text'] + "*" if not self._title['text'].endswith("*") else self._title['text']
def rename(self, title, desc):
self._title['text'] = title if title else self._title['text']
self._descr['text'] = desc if desc else self._descr['text']
class Update_Form(Toplevel):
"""
Functions for updating a device. Accessed via File -> Update a Device in main menu. User must have valid browser
login set in settings.
"""
def __init__(self, master, device_data, trello=None, browser=None):
Toplevel.__init__(self, master)
self.master = master
self.device_data = device_data
self.trello = trello
self.browser = browser
self.configurations = {'exif_device_serial_number':[],'exif_camera_make':[], 'exif_camera_model':[], 'hp_app':[],
'media_type':[], 'username':[], 'created':[]}
self.row = 0
self.config_count = 0
self.updated = False
self.create_widgets()
def create_widgets(self):
self.f = VerticalScrolledFrame(self)
self.f.pack(fill=BOTH, expand=TRUE)
self.buttonsFrame = Frame(self)
self.buttonsFrame.pack(fill=BOTH, expand=True)
Label(self.f.interior, text='Updating Device:\n' + self.device_data['hp_device_local_id'], font=('bold', 20)).grid(columnspan=8)
self.row+=1
Label(self.f.interior, text='Shown below are the current exif configurations for this camera.').grid(row=self.row, columnspan=8)
self.row+=1
Button(self.f.interior, text='Show instructions for this form', command=self.show_help).grid(row=self.row, columnspan=8)
self.row+=1
col = 1
for header in ['Serial', 'Make', 'Model', 'Software/App', 'Media Type', 'Username', 'Created']:
Label(self.f.interior, text=header).grid(row=self.row, column=col)
col+=1
self.row += 1
self.add_button = self.create_add_button()
self.add_button.grid(row=self.row, columnspan=8)
for configuration in self.device_data['exif']:
self.add_config(configuration=configuration)
ok = Button(self.buttonsFrame, text='Ok', command=self.go, width=20, bg='green')
ok.pack()
cancel = Button(self.buttonsFrame, text='Cancel', command=self.cancel, width=20)
cancel.pack()
def add_config(self, configuration):
"""
Controls the mechanism for adding a new exif configuration
:param configuration: dictionary containing exif fields shown in ordered dict
:return: None
"""
if hasattr(self, 'add_button'):
self.add_button.grid_forget()
col = 0
self.row += 1
stringvars = collections.OrderedDict([('exif_device_serial_number', StringVar()), ('exif_camera_make', StringVar()),
('exif_camera_model', StringVar()), ('hp_app', StringVar()),
('media_type', StringVar()), ('username', StringVar()), ('created', StringVar())])
Label(self.f.interior, text='Config: ' + str(self.config_count + 1)).grid(row=self.row, column=col)
col += 1
for k, v in stringvars.iteritems():
if configuration[k] is None:
v.set('')
else:
v.set(configuration[k])
if k == 'media_type':
e = ttk.Combobox(self.f.interior, values=['image', 'video', 'audio'], state='readonly', textvariable=v)
else:
e = Entry(self.f.interior, textvar=v)
if k != 'hp_app':
e.config(state=DISABLED)
e.grid(row=self.row, column=col)
self.configurations[k].append(v)
col += 1
self.config_count+=1
self.row+=1
self.add_button = self.create_add_button()
self.add_button.grid(row=self.row, columnspan=8)
def create_add_button(self):
return Button(self.f.interior, text='Add a new configuration', command=self.get_data)
def go(self):
"""
Posts the camera update, and notifies trello if option is selected.
:return: None. Should pop up box with status when complete.
"""
url = self.master.settings.get_key('apiurl') + '/cameras/' + str(self.device_data['id']) + '/'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Token ' + self.browser,
}
data = self.prepare_data()
if data is None:
return
r = requests.put(url, headers=headers, data=data)
if r.status_code in (requests.codes.ok, requests.codes.created):
if tkMessageBox.askyesno(title='Done!', message='Camera updated. Post notification to Trello?', parent=self):
self.camupdate_notify_trello(url)
with open(data_files._LOCALDEVICES, 'r+') as j:
local = json.load(j)
for item in local:
if item == self.device_data['hp_device_local_id']:
local[item] = data
break
new = json.dumps(local, indent=4)
j.write(new)
self.updated = True
self.destroy()
else:
tkMessageBox.showerror(title='Error', message='An error occurred updating this device. (' + str(r.status_code) + ')', parent=self)
def prepare_data(self):
"""
Parse out exif data for posting on browser. Ensures no duplicates are created and assigns username/createdate.
:return: string-formatted json data with the new exif data
"""
data = {'exif':[]}
for i in range(len(self.configurations['exif_camera_make'])):
data['exif'].append({'exif_camera_make':self.configurations['exif_camera_make'][i].get(),
'exif_camera_model':self.configurations['exif_camera_model'][i].get(),
'hp_app': self.configurations['hp_app'][i].get(),
'media_type': self.configurations['media_type'][i].get(),
'exif_device_serial_number': self.configurations['exif_device_serial_number'][i].get()})
if self.configurations['created'][i].get() != 'ToBeSetOnUpdate' and self.configurations['username'][i].get() != 'ToBeSetOnUpdate':
data['exif'][i]['created'] = self.configurations['created'][i].get()
data['exif'][i]['username'] = self.configurations['username'][i].get()
for configuration in data['exif']:
for key, val in configuration.iteritems():
if val == '':
configuration[key] = None
if key == 'media_type' and val == '':
configuration[key] = 'image'
# remove duplicate entries
data = [dict(t) for t in set([tuple(d.items()) for d in data['exif']])]
return json.dumps({'exif':data})
def camupdate_notify_trello(self, link):
"""
Trello notifier. Posts a new card on update with user and details.
:param link: Link to device
:return: None. Message pop-up.
"""
# list ID for "New Devices" list
trello_key = data_files._TRELLO['app_key']
list_id = data_files._TRELLO['camera_update_list']
link = self.master.settings.get_key("apiurl")[:-4] + '/camera/' + str(self.device_data['id'])
# post the new card
title = 'Camera updated: ' + self.device_data['hp_device_local_id']
resp = requests.post("https://trello.com/1/cards", params=dict(key=trello_key, token=self.trello),
data=dict(name=title, idList=list_id, desc=link))
# attach the user, if successfully posted.
if resp.status_code == requests.codes.ok:
j = json.loads(resp.content)
me = requests.get("https://trello.com/1/members/me", params=dict(key=trello_key, token=self.trello))
member_id = json.loads(me.content)['id']
new_card_id = j['id']
resp2 = requests.post("https://trello.com/1/cards/%s/idMembers" % (new_card_id),
params=dict(key=trello_key, token=self.trello),
data=dict(value=member_id))
tkMessageBox.showinfo(title='Information', message='Complete!', parent=self)
else:
tkMessageBox.showerror(title='Error', message='An error occurred connecting to trello (' + str(resp.status_code) + '). The device was still updated.')
def show_help(self):
tkMessageBox.showinfo(title='Instructions',
parent=self,
message='Occasionally, cameras can have different metadata for make and model for image vs. video, or for different apps. '
'This usually results in errors in HP data processing, as the tool checks the data on record.\n\n'
'If the device you\'re using has different metadata than what is on the browser for that device, add a new configuration by clicking the "Add a new configuration" button. '
'You will be prompted to choose a file from that camera with the new metadata.\n\n'
'Be sure to enter the media type, and if there was a particular App that was used with this media file, enter that as well in the respective field.'
'Press Ok to push the changes to the browser, or Cancel to cancel the process.')
def cancel(self):
self.destroy()
def get_data(self):
"""
Parse out EXIF metadata from a sample media file.
:return: None. Calls add_config()
"""
self.imfile = tkFileDialog.askopenfilename(title='Select Media File', parent=self)
if self.imfile in ('', None):
return
args = ['exiftool', '-f', '-j', '-Model', '-Make', '-SerialNumber', self.imfile]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
exifData = json.loads(p)[0]
except:
self.master.statusBox.println('An error ocurred attempting to pull exif data from image. Check exiftool install.')
return
exifData['Make'] = exifData['Make'] if exifData['Make'] != '-' else ''
exifData['Model'] = exifData['Model'] if exifData['Model'] != '-' else ''
exifData['SerialNumber'] = exifData['SerialNumber'] if exifData['SerialNumber'] != '-' else ''
global exts
if os.path.splitext(self.imfile)[1].lower() in exts['VIDEO']:
type = 'video'
elif os.path.splitext(self.imfile)[1].lower() in exts['AUDIO']:
type = 'audio'
else:
type = 'image'
new_config = {'exif_device_serial_number': exifData['SerialNumber'], 'exif_camera_model': exifData['Model'],
'exif_camera_make': exifData['Make'], 'hp_app': None, 'media_type': type,
'username': 'ToBeSetOnUpdate', 'created': 'ToBeSetOnUpdate'}
self.add_config(new_config)
class VerticalScrolledFrame(Frame):
"""A pure Tkinter scrollable frame that actually works!
http://stackoverflow.com/questions/16188420/python-tkinter-scrollbar-for-frame
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
def __init__(self, parent, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = Scrollbar(self, orient=VERTICAL)
vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
self.canvas = Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set)
self.canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
vscrollbar.config(command=self.canvas.yview)
self.canvas.bind("<MouseWheel>", self.on_mousewheel)
# reset the view
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = Frame(self.canvas)
interior_id = self.canvas.create_window(0, 0, window=interior,
anchor=NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the canvas's width to fit the inner frame
self.canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
def _configure_canvas(event):
if interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the inner frame's width to fill the canvas
self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())
self.canvas.bind('<Configure>', _configure_canvas)
def on_mousewheel(self, event):
if sys.platform.startswith('win'):
self.canvas.yview_scroll(-1*(event.delta/120), "units")
else:
self.canvas.yview_scroll(-1*(event.delta), "units")
```
#### File: hp_tool/hp/hpgui.py
```python
import argparse
import tarfile
import tempfile
import threading
from Tkinter import *
import collections
import boto3
import rawpy
from boto3.s3.transfer import S3Transfer
import matplotlib
import requests
from maskgen.maskgen_loader import MaskGenLoader
from maskgen.tool_set import openImage
from maskgen.image_wrap import openImageFile
matplotlib.use("TkAgg")
import ttk
import tkFileDialog
import tkMessageBox
import tkSimpleDialog
from PIL import Image
from hp_data import *
from HPSpreadsheet import HPSpreadsheet, TrelloSignInPrompt, ProgressPercentage
from KeywordsSheet import KeywordsSheet
from ErrorWindow import ErrorWindow
from prefs import SettingsWindow
from CameraForm import HP_Device_Form, Update_Form
from camera_handler import API_Camera_Handler
from data_files import *
import sys
class HP_Starter(Frame):
def __init__(self, settings, checker, master=None):
Frame.__init__(self, master)
self.master = master
self.settings = settings
self.checker = checker
self.oldImageNames = []
self.newImageNames = []
self.collections = load_json_dictionary(data_files._COLLECTIONS)
self.createWidgets()
self.load_defaults()
self.bindings()
def bindings(self):
self.bind('<Return>', self.go)
def update_defaults(self):
self.settings.save('inputdir', self.inputdir.get())
self.settings.save('outputdir', self.outputdir.get())
def load_defaults(self):
if self.settings.get_key('inputdir') is not None:
self.inputdir.insert(END, self.settings.get_key('inputdir'))
if self.settings.get_key('outputdir') is not None:
self.outputdir.insert(END, self.settings.get_key('outputdir'))
def load_input(self):
initial = self.inputdir.get() if self.inputdir.get() else os.getcwd()
d = tkFileDialog.askdirectory(initialdir=initial)
if d:
self.inputdir.delete(0, 'end')
self.inputdir.insert(0, d)
def load_output(self):
initial = self.inputdir.get() if self.inputdir.get() else os.getcwd()
d = tkFileDialog.askdirectory(initialdir=initial)
if d:
self.outputdir.delete(0, 'end')
self.outputdir.insert(0, d)
def preview_filename(self):
testNameStr = 'Please update settings with username and organization.'
if self.settings.get_key('seq') is not None:
testNameStr = datetime.datetime.now().strftime('%Y%m%d')[2:] + '-' + \
self.settings.get_key('hp-organization') + self.settings.get_key('username') + '-' + \
self.settings.get_key('seq')
if self.additionalinfo.get():
testNameStr += '-' + self.additionalinfo.get()
tkMessageBox.showinfo('Filename Preview', testNameStr)
def go(self, event=None):
if not self.settings.get_key('username') or not self.settings.get_key('hp-organization'):
tkMessageBox.showerror(title='Error', message='Please enter username and organization in settings before running.')
return
if self.inputdir.get() == '':
tkMessageBox.showerror(title='Error',
message='Please specify an input directory. This should contain data from only one camera.')
return
elif self.outputdir.get() == '':
self.outputdir.insert(0, os.path.join(self.inputdir.get(), 'hp-output'))
self.update_model()
try:
if not self.checker.check_calibrations(self.localID.get()):
tkMessageBox.showerror('Error', 'PRNU has not yet been uploaded for this device. PRNU must be collected '
'and uploaded for a device prior to HP uploads.')
return
except KeyError:
if self.localID.get() != "":
tkMessageBox.showerror('Error', 'PRNU has not yet been uploaded for this device. PRNU must be collected '
'and uploaded for a device prior to HP uploads.')
return
if not self.master.cameras:
input_dir_files = [os.path.join(self.inputdir.get(), x) for x in os.listdir(self.inputdir.get())]
models = all(os.path.isdir(x) for x in input_dir_files)
def needed_cammodel():
yes = tkMessageBox.askyesno(title='Error',
message='Invalid Device Local ID. Would you like to add a new device?')
if yes:
self.master.open_form()
self.update_model()
return
if models and not self.recBool.get():
errors = []
for model_dir in input_dir_files:
if len(os.listdir(model_dir)) == 1 and (is_model(model_dir) or
os.path.splitext(os.listdir(model_dir)[0])[1] in exts[
'nonstandard']):
errors.append("No Thumbnail images found in {0}.".format(os.path.basename(model_dir)))
if not any([is_model(fname) for fname in os.listdir(model_dir)]):
needed_cammodel()
return
if len(errors) > 0:
tkMessageBox.showerror("Error", "\n".join(errors))
return
pass
else:
needed_cammodel()
return
globalFields = ['HP-Collection', 'HP-DeviceLocalID', 'HP-CameraModel', 'HP-LensLocalID']
kwargs = {'settings': self.settings,
'imgdir': self.inputdir.get(),
'outputdir': self.outputdir.get(),
'recursive': self.recBool.get(),
'additionalInfo': self.additionalinfo.get(),
}
for fieldNum in xrange(len(globalFields)):
val = self.attributes[self.descriptionFields[fieldNum]].get()
if val == 'None':
val = ''
kwargs[globalFields[fieldNum]] = val
self.update_defaults()
(self.oldImageNames, self.newImageNames) = process(self, self.master.cameras, **kwargs)
if self.oldImageNames == None:
return
aSheet = HPSpreadsheet(self.settings, dir=self.outputdir.get(), master=self.master, devices=self.master.cameras)
aSheet.open_spreadsheet()
self.keywordsbutton.config(state=NORMAL)
keySheet = self.open_keywords_sheet()
keySheet.close()
def open_keywords_sheet(self):
keywords = KeywordsSheet(self.settings, dir=self.outputdir.get(), master=self.master,
newImageNames=self.newImageNames, oldImageNames=self.oldImageNames)
keywords.open_spreadsheet()
return keywords
def open_settings(self):
SettingsWindow(self.settings, master=self.master)
def createWidgets(self):
r = 0
Label(self, text='***ONLY PROCESS DATA FROM ONE DEVICE PER RUN***', font=('bold', 16)).grid(row=r, columnspan=8,
pady=2)
r += 1
Label(self, text='Specify a different output directory for each different device.').grid(row=r, columnspan=8,
pady=2)
r += 1
self.recBool = BooleanVar()
self.recBool.set(False)
self.inputSelector = Button(self, text='Input directory: ', command=self.load_input, width=20)
self.inputSelector.grid(row=r, column=0, ipadx=5, ipady=5, padx=5, pady=5, columnspan=1)
self.recbox = Checkbutton(self, text='Include subdirectories', variable=self.recBool, command=self.warnRecbox)
self.recbox.grid(row=r, column=3, ipadx=5, ipady=5, padx=5, pady=5)
self.inputdir = Entry(self)
self.inputdir.grid(row=r, column=1, ipadx=5, ipady=5, padx=0, pady=5, columnspan=2)
self.outputSelector = Button(self, text='Output directory: ', command=self.load_output, width=20)
self.outputSelector.grid(row=r, column=4, ipadx=5, ipady=5, padx=5, pady=5, columnspan=2)
self.outputdir = Entry(self, width=20)
self.outputdir.grid(row=r, column=6, ipadx=5, ipady=5, padx=5, pady=5, columnspan=2)
r += 1
self.additionallabel = Label(self, text='Additional Text to add at end of new filenames: ')
self.additionallabel.grid(row=r, column=0, ipadx=5, ipady=5, padx=5, pady=5, columnspan=3)
self.additionalinfo = Entry(self, width=10)
self.additionalinfo.grid(row=r, column=3, ipadx=5, ipady=5, padx=5, pady=5, sticky='W')
self.previewbutton = Button(self, text='Preview filename', command=self.preview_filename, bg='cyan')
self.previewbutton.grid(row=r, column=4)
self.changeprefsbutton = Button(self, text='Edit Settings', command=self.open_settings)
self.changeprefsbutton.grid(row=r, column=6)
r += 1
self.sep1 = ttk.Separator(self, orient=HORIZONTAL).grid(row=r, columnspan=8, sticky='EW')
self.descriptionFields = ['HP-Collection', 'Local Camera ID', 'Camera Model', 'Local Lens ID']
r += 1
Label(self,
text='Enter collection information. Local Camera ID is REQUIRED. If you enter a valid ID (case sensitive), the corresponding '
'model will appear in the camera model box.\nIf you enter an invalid ID and Run, it is assumed '
'that this is a new device, and you will be prompted to enter the new device\'s information.').grid(
row=r,
columnspan=8)
r += 1
self.localID = StringVar()
self.camModel = StringVar()
col = 0
self.attributes = {}
for field in self.descriptionFields:
attrlabel = Label(self, text=field)
attrlabel.grid(row=r, column=col, ipadx=5, ipady=5, padx=5, pady=5)
if field == 'HP-Collection':
self.attributes[field] = ttk.Combobox(self, width=20, values=self.collections.keys(), state='readonly')
self.attributes[field].set('None')
else:
self.attributes[field] = Entry(self, width=10)
self.attributes[field].grid(row=r, column=col + 1, ipadx=0, ipady=5, padx=5, pady=5)
if field == 'Local Camera ID':
self.attributes[field].config(textvar=self.localID)
elif field == 'Camera Model':
self.attributes[field].config(textvar=self.camModel, state=DISABLED)
col += 2
if col == 8:
r += 1
col = 0
lastLoc = self.attributes['Local Lens ID'].grid_info()
lastRow = int(lastLoc['row'])
self.sep2 = ttk.Separator(self, orient=HORIZONTAL).grid(row=lastRow + 1, columnspan=8, sticky='EW')
self.okbutton = Button(self, text='Run ', command=self.go, width=20, bg='green')
self.okbutton.grid(row=lastRow + 2, column=0, ipadx=5, ipady=5, sticky='E')
self.cancelbutton = Button(self, text='Cancel', command=self.quit, width=20, bg='red')
self.cancelbutton.grid(row=lastRow + 2, column=6, ipadx=5, ipady=5, padx=5, sticky='W')
self.keywordsbutton = Button(self, text='Enter Keywords', command=self.open_keywords_sheet, state=DISABLED,
width=20)
self.keywordsbutton.grid(row=lastRow + 2, column=2, ipadx=5, ipady=5, padx=5, sticky='E')
def update_model(self, *args):
self.master.load_ids(self.localID.get())
self.checker.camera_list = self.master.cameras
if self.localID.get() in self.master.cameras:
self.attributes['Camera Model'].config(state=NORMAL)
self.camModel.set(self.master.cameras[self.localID.get()]['hp_camera_model'])
self.attributes['Camera Model'].config(state=DISABLED)
else:
self.attributes['Camera Model'].config(state=NORMAL)
self.camModel.set('')
self.attributes['Camera Model'].config(state=DISABLED)
def warnRecbox(self):
if self.recBool.get():
tkMessageBox.showwarning("Warning", '3D Models will not be scanned loaded if the "Include subdirectories"'
' box is checked.')
class PRNU_Uploader(Frame):
"""
Handles the checking and uploading of PRNU data
"""
def __init__(self, settings, checker, master=None):
Frame.__init__(self, master)
self.master = master
self.checker = checker
self.settings = settings
self.root_dir = StringVar()
self.localID = StringVar()
self.s3path = StringVar()
self.newCam = BooleanVar()
self.newCam.set(0)
self.parse_vocab(data_files._PRNUVOCAB)
self.create_prnu_widgets()
self.s3path.set(self.settings.get_key('aws-prnu'))
def create_prnu_widgets(self):
r = 0
Label(self,
text='Enter the absolute path of the main PRNU directory here. You can click the button to open a file select dialog.').grid(
row=r, column=0, ipadx=5, ipady=5, padx=5, pady=5, columnspan=8)
r += 1
dirbutton = Button(self, text='Root PRNU Directory:', command=self.open_dir, width=20)
dirbutton.grid(row=r, column=0, ipadx=5, ipady=5, padx=5, pady=5, columnspan=1)
self.rootEntry = Entry(self, width=100, textvar=self.root_dir)
self.rootEntry.grid(row=r, column=1, ipadx=5, ipady=5, padx=0, pady=5, columnspan=4)
r += 1
sep1 = ttk.Separator(self, orient=HORIZONTAL).grid(row=r, columnspan=6, sticky='EW', pady=5)
r += 1
sep2 = ttk.Separator(self, orient=VERTICAL).grid(row=r, column=2, sticky='NS', padx=5, rowspan=3)
Label(self,
text='You must successfully verify the directory structure by clicking below before you can upload.\n'
'If any errors are found, they must be corrected.').grid(row=r, column=0, ipadx=5, ipady=5,
padx=5, pady=5, columnspan=2)
Label(self, text='After successful verification, specify the upload location and click Start Upload.\n'
'Make sure you have specified your Trello token in Settings as well.').grid(
row=r, column=3, ipadx=5, ipady=5, padx=5, pady=5, columnspan=2)
r += 1
verifyButton = Button(self, text='Verify Directory Structure', command=self.examine_dir, width=20)
verifyButton.grid(row=r, column=0, ipadx=5, ipady=5, padx=5, pady=5, columnspan=2)
self.s3Label = Label(self, text='S3 bucket/path: ').grid(row=r, column=3, ipadx=5, ipady=5, padx=5, pady=5,
columnspan=1)
self.s3Entry = Entry(self, width=40, textvar=self.s3path)
self.s3Entry.grid(row=r, column=4, ipadx=5, ipady=5, padx=5, pady=5, columnspan=2, sticky=W)
r += 1
self.changeprefsbutton = Button(self, text='Edit Settings', command=self.open_settings)
self.changeprefsbutton.grid(row=r, column=0, columnspan=2)
self.uploadButton = Button(self, text='Start Upload', command=self.upload, width=20, state=DISABLED,
bg='green')
self.uploadButton.grid(row=r, column=3, ipadx=5, ipady=5, padx=5, pady=5, columnspan=1, sticky=W)
self.cancelButton = Button(self, text='Cancel', command=self.cancel_upload, width=20, bg='red')
self.cancelButton.grid(row=r, column=4, ipadx=5, ipady=5, padx=5, pady=5, columnspan=1, sticky=E)
def open_settings(self):
SettingsWindow(self.settings, master=self.master)
self.s3path.set(self.settings.get_key('aws-prnu'))
def open_new_insert_id(self):
d = HP_Device_Form(self, validIDs=self.master.cameras.keys(), token=self.settings.get_key('trello'),
browser=self.settings.get_key('apitoken'))
self.master.reload_ids()
def parse_vocab(self, path):
"""
Create valid vocabulary list for folder names. Adds valid numbering format to smaller list in file. (see prnu_vocab.csv)
:param path: string, path to PRNU vocab CSV
:return: None
"""
self.vocab = []
with open(path) as v:
rdr = csv.reader(v)
for row in rdr:
self.vocab.append(row[0])
for x in range(0, 101):
self.vocab.append(row[0] + '_' + str(x))
def open_dir(self):
d = tkFileDialog.askdirectory()
if d is not None:
self.root_dir.set(d)
def examine_dir(self):
"""
Bulk of the PRNU tool processing. Checks the specified directory for proper contents. See PRNU doc for more
information on rules. Will also remove hidden files and thumbs.db - be careful.
:return: None
"""
print('Verifying PRNU directory')
self.localID.set(os.path.basename(os.path.normpath(self.root_dir.get())))
msgs = []
luminance_folders = []
for path, dirs, files in os.walk(self.root_dir.get()):
p, last = os.path.split(path)
# check root directory. should only have images and video folders.
if last == self.localID.get():
if not self.has_same_contents(dirs, ['images', 'video']):
msgs.append('Root PRNU directory must have \"Images\" and \"Video\" folders.')
if files:
for f in files:
if f.startswith('.') or f.lower() == 'thumbs.db':
try:
os.remove(os.path.join(path, f))
except OSError:
pass
else:
msgs.append(
'There should be no files in the root directory. Only \"Images\" and \"Video\" folders.')
break
# check first level content. should contain primary and secondary folders only.
elif last.lower() in ['images', 'video']:
if not self.has_same_contents(dirs, ['primary', 'secondary']):
msgs.append('Images and Video folders must each contain Primary and Secondary folders.')
if files:
for f in files:
if f.startswith('.') or f.lower() == 'thumbs.db':
try:
os.remove(os.path.join(path, f))
except OSError:
pass
else:
msgs.append(
'There should be no additional files in the ' + last + ' directory. Only \"Primary\" and \"Secondary\".')
break
# check second level directory, should have folders named with valid vocab
elif last.lower() == 'primary' or last.lower() == 'secondary':
for sub in dirs:
if sub.lower() not in self.vocab:
msgs.append('Invalid reference type: ' + sub)
elif sub.lower().startswith('rgb_no_lens') or sub.lower().startswith('roof_tile') or sub.lower().startswith('lens_cap'):
luminance_folders.append(os.path.join(path, sub))
if files:
for f in files:
if f.startswith('.') or f.lower() == 'thumbs.db':
try:
os.remove(os.path.join(path, f))
except OSError:
pass
else:
msgs.append(
'There should be no additional files in the ' + last + ' directory. Only PRNU reference type folders (White_Screen, Blue_Sky, etc).')
break
# check bottom level directory, should only have files
elif last.lower() in self.vocab:
if files:
for f in files:
if f.startswith('.') or f.lower() == 'thumbs.db':
try:
os.remove(os.path.join(path, f))
except OSError:
pass
if not files and not dirs:
msgs.append('There are no images or videos in: ' + path + '. If this is intentional, delete the folder.')
# software_whitelist = csv.reader()
for folder in luminance_folders:
res = self.checker.check_luminance(folder)
if res is not None:
msgs.append(res)
organization_errors = self.organize_prnu_dir(folder)
if organization_errors:
msgs.extend(organization_errors)
return
if not self.newCam.get() and not self.local_id_used():
msgs = 'Invalid local ID: ' + self.localID.get() + '. This field is case sensitive, and must also match the name of the directory. Would you like to add a new device?'
if tkMessageBox.askyesno(title='Unrecognized Local ID', message=msgs):
self.open_new_insert_id()
msgs = 'hide'
if msgs == 'hide':
pass
elif msgs:
enable = True
for msg in msgs:
if not msg.lower().startswith('warning'):
enable = False
break
ErrorWindow(self, errors=msgs)
if enable:
self.uploadButton.config(state=NORMAL)
self.rootEntry.config(state=DISABLED)
tkMessageBox.showwarning(title='Complete',
message='Since only warnings were generated, upload will be enabled. Make sure'
' that your data is correct.')
self.master.statusBox.println('PRNU directory successfully validated: ' + self.root_dir.get())
else:
tkMessageBox.showerror(title='Complete', message='Correct the errors and re-verify to enable upload.')
self.master.statusBox.println('PRNU directory validation failed for ' + self.root_dir.get())
else:
tkMessageBox.showinfo(title='Complete',
message='Everything looks good. Click \"Start Upload\" to begin upload.')
self.uploadButton.config(state=NORMAL)
self.rootEntry.config(state=DISABLED)
self.master.statusBox.println('PRNU directory successfully validated: ' + self.root_dir.get())
def organize_prnu_dir(self, luminance_dir):
subfolders = [os.path.normpath(os.path.join(luminance_dir, x)) for x in os.listdir(luminance_dir) if os.path.isdir(os.path.join(luminance_dir, x))]
files_in_dir = any(os.path.isfile(os.path.join(luminance_dir, x)) for x in os.listdir(luminance_dir))
warning_res = []
def copy_to_res(image_data, root_dir):
correct_res_dir = None
if 'ImageWidth' in width_height[i] and 'ImageHeight'in width_height[i]:
correct_res_dir = os.path.join(root_dir, "{0}x{1}".format(image_data['ImageWidth'], image_data['ImageHeight']))
else:
mg_size = openImage(image_data['SourceFile']).size
if mg_size:
correct_res_dir = os.path.join(root_dir, "{0}x{1}".format(str(mg_size[0]), str(mg_size[1])))
else:
warning_res.append("Unable to verify the resolution of {0}".format(image_data['SourceFile']))
if correct_res_dir:
if not os.path.exists(correct_res_dir):
os.mkdir(correct_res_dir)
filename = os.path.split(image_data['SourceFile'])[1]
shutil.move(image_data['SourceFile'], os.path.join(correct_res_dir, filename))
for subdir in subfolders:
try:
(width, height) = os.path.split(subdir)[1].lower().split("x")
except ValueError:
if not os.listdir(subdir):
os.rmdir(subdir)
return
else:
error = "{0} is not a resolution directory. Please check this folder and run the verification " \
"again. If these contain PRNU images, put them in: {1}.".format(subdir,
os.path.split(subdir)[0])
return error
for f in os.listdir(subdir):
if f.startswith(".") or f.lower() == "thumbs.db":
try:
os.remove(os.path.join(subdir, f))
except OSError:
pass
if all(os.path.isfile(os.path.join(subdir, x)) for x in os.listdir(subdir)):
width_height = json.loads(subprocess.Popen(['exiftool', '-ImageWidth', '-ImageHeight', '-Software', '-j', subdir], stdout=subprocess.PIPE).communicate()[0])
else:
error = "There should be no subdirectories in:\n{0}\n\nPlease check this directory and try again".format(subdir)
return error
for i in xrange(0, len(width_height)):
if ('ImageWidth' in width_height[i] and 'ImageHeight' in width_height[i]) and \
(width_height[i]['ImageWidth'] != int(width) or width_height[i]['ImageHeight'] != int(height)):
copy_to_res(width_height[i], luminance_dir)
if files_in_dir:
for useless in [x for x in os.listdir(luminance_dir) if
os.path.splitext(x)[1] in [".ini"] or x.startswith(".")]:
os.remove(os.path.join(luminance_dir, useless))
exif_r = subprocess.Popen(['exiftool', '-ImageWidth', '-ImageHeight', '-j', luminance_dir],stdout=subprocess.PIPE).communicate()[0] # ['-Software',]
width_height = json.loads(exif_r)
for i in range(0, len(width_height)):
# if width_height[i]['Software'] not in software_list:
# error = "{0} is not in the approved software list for this camera.".format(
# width_height[i]['Software'])
# return error
copy_to_res(width_height[i], luminance_dir)
return warning_res if warning_res else None
def has_same_contents(self, list1, list2):
# set both lists to lowercase strings and checks if they have the same items, in any order
llist1 = [x.lower() for x in list1]
llist2 = [y.lower() for y in list2]
return collections.Counter(llist1) == collections.Counter(llist2)
def upload(self):
"""
Upload files to S3 individually (no archiving)
:return: None
"""
self.capitalize_dirs()
val = self.s3path.get()
if (val is not None and len(val) > 0):
self.settings.save('aws-prnu', val)
# parse path
s3 = S3Transfer(boto3.client('s3', 'us-east-1'))
if val.startswith('s3://'):
val = val[5:]
BUCKET = val.split('/')[0].strip()
DIR = val[val.find('/') + 1:].strip()
DIR = DIR if DIR.endswith('/') else DIR + '/'
print('Archiving data...')
archive = self.archive_prnu()
if not archive:
tkMessageBox.showerror("Error", "File encryption failed. Please check your recipient setting and try again.")
return
print('Uploading...')
try:
s3.upload_file(archive, BUCKET, DIR + os.path.basename(archive), callback=ProgressPercentage(archive))
except Exception as e:
tkMessageBox.showerror(title='Error', message='Could not complete upload. (' + str(e) + ')')
return
if tkMessageBox.askyesno(title='Complete',
message='Successfully uploaded PRNU data to S3://' + val + '. Would you like to notify via Trello?'):
err = self.notify_trello_prnu('s3://' + os.path.join(BUCKET, DIR, os.path.basename(archive)), archive)
if err:
tkMessageBox.showerror(title='Error', message='Failed to notify Trello (' + str(err) + ')')
else:
tkMessageBox.showinfo(title='Status', message='Complete!')
# reset state of buttons and boxes
self.cancel_upload()
def notify_trello_prnu(self, path, archive_path):
"""
Trello notifier. Posts location on s3 and timestamp, as well as errors.
:param path: S3 bucket/path (used for card description only)
:return: Status code, if bad. Otherwise None.
"""
if self.settings.get_key('trello') is None:
t = TrelloSignInPrompt(self)
token = t.token.get()
self.settings.save('trello', token)
# post the new card
list_id = data_files._TRELLO['prnu_list']
new = os.path.splitext(os.path.basename(archive_path))[0]
desc = path
resp = requests.post("https://trello.com/1/cards", params=dict(key=self.master.trello_key, token=self.settings.get_key('trello')),
data=dict(name=new, idList=list_id, desc=desc))
if resp.status_code == requests.codes.ok:
me = requests.get("https://trello.com/1/members/me", params=dict(key=self.master.trello_key, token=self.settings.get_key('trello')))
member_id = json.loads(me.content)['id']
new_card_id = json.loads(resp.content)['id']
resp2 = requests.post("https://trello.com/1/cards/%s/idMembers" % (new_card_id),
params=dict(key=self.master.trello_key, token=self.settings.get_key('trello')),
data=dict(value=member_id))
return None
else:
return resp.status_code
def cancel_upload(self):
self.uploadButton.config(state=DISABLED)
self.rootEntry.config(state=NORMAL)
def archive_prnu(self):
fd, tname = tempfile.mkstemp(suffix='.tar')
# ftar = os.path.join(os.path.split(self.root_dir.get())[0], self.localID.get() + '.tar')
archive = tarfile.open(tname, "w", errorlevel=2)
archive.add(self.root_dir.get(), arcname=os.path.split(self.root_dir.get())[1])
archive.close()
os.close(fd)
tar_name = os.path.join(self.root_dir.get(), self.localID.get() + '.tar')
tar_path = os.path.join(self.root_dir.get(), tar_name)
shutil.move(tname, tar_path)
recipient = self.settings.get_key("archive_recipient") if self.settings.get_key("archive_recipient") else None
if recipient:
subprocess.Popen(['gpg', '--recipient', recipient, '--trust-model', 'always', '--encrypt', tar_path]).communicate()
final_name = tar_path + ".gpg"
return final_name
return None
def write_md5(self, path):
# write md5 of archive to file
md5filename = os.path.join(os.path.split(self.root_dir.get())[0], self.localID.get() + '.md5')
with open(md5filename, 'w') as m:
with open(path, 'rb') as f:
m.write(hashlib.md5(f.read()).hexdigest())
return md5filename
def capitalize_dirs(self):
# http://stackoverflow.com/questions/3075443/python-recursively-remove-capitalisation-from-directory-structure
# applied title capitalization to all subforders of a root dir, not including root itself
def rename_all(root, items):
for name in items:
try:
shutil.move(os.path.join(root, name), os.path.join(root, name.title()))
except OSError:
pass # just skip if can't be renamed
# starts from the bottom so paths further up remain valid after renaming
for root, dirs, files in os.walk(self.root_dir.get(), topdown=False):
rename_all(root, dirs)
def export_local(self):
# for testing purposes
self.capitalize_dirs()
archive = self.archive_prnu()
shutil.copy(archive, os.getcwd())
os.remove(archive)
self.master.statusBox.println('done')
def local_id_used(self):
self.master.load_ids(local_id=self.localID.get())
if self.localID.get().lower() in [i.lower() for i in self.master.cameras.keys()]:
return True
else:
return False
class HPGUI(Frame):
"""
The main HP GUI Window. Contains the initial UI setup, the camera list updating, and the file menu options.
"""
def __init__(self, checker, master=None, **kwargs):
Frame.__init__(self, master, **kwargs)
self.checker = checker
self.master = master
self.trello_key = data_files._TRELLO['app_key']
self.settings = MaskGenLoader()
self.cam_local_id = ""
self.create_widgets()
self.statusBox.println('See terminal/command prompt window for progress while processing.')
try:
with open(data_files._LOCALDEVICES, "r") as j:
self.cameras = json.load(j)
except (ValueError, IOError):
if self.settings.get_key("apitoken") != "":
self.load_ids("download_locally")
print("Failed to load local device list from file.")
def create_widgets(self):
self.menubar = Menu(self)
self.fileMenu = Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label='File', menu=self.fileMenu)
self.fileMenu.add_command(label='Open HP Data Spreadsheet for Editing', command=self.open_old_rit_csv,
accelerator='ctrl-o')
self.fileMenu.add_command(label='Open Keywords Spreadsheet for Editing', command=self.open_old_keywords_csv)
self.fileMenu.add_command(label='Settings...', command=self.open_settings)
self.fileMenu.add_command(label='Add a New Device', command=self.open_form)
self.fileMenu.add_command(label='Add a New GAN', command=self.add_gan)
self.fileMenu.add_command(label='Update a Device', command=self.edit_device)
self.fileMenu.add_command(label='System Check', command=self.system_check)
self.fileMenu.add_command(label='Download HP Device List for Offline Use',
command=lambda: API_Camera_Handler(self, self.settings.get_key('apiurl'),
self.settings.get_key('apitoken'),
given_id="download_locally"))
self.master.config(menu=self.menubar)
self.statusFrame = Frame(self)
self.statusFrame.pack(side=BOTTOM, fill=BOTH, expand=1)
Label(self.statusFrame, text='Notifications').pack()
self.statusBox = ReadOnlyText(self.statusFrame, height=10)
self.statusBox.pack(fill=BOTH, expand=1)
self.nb = ttk.Notebook(self)
self.nb.pack(fill=BOTH, expand=1)
f1 = HP_Starter(self.settings, self.checker, master=self)
f2 = PRNU_Uploader(self.settings, self.checker, master=self)
self.nb.add(f1, text='Process HP Data')
self.nb.add(f2, text='Export PRNU Data')
def open_form(self):
"""
Open the form for uploading a new HP device. Requires browser login.
:return: None
"""
if self.settings.get_key('apitoken') in (None, ''):
tkMessageBox.showerror(title='Error', message='Browser login is required to use this feature. Enter this in settings.')
return
new_device = StringVar()
h = HP_Device_Form(self, validIDs=self.cameras.keys(), pathvar=new_device, token=self.settings.get_key('trello'), browser=self.settings.get_key('apitoken'), gan=False)
h.wait_window()
if h.camera_added:
self.reload_ids()
def add_gan(self):
"""
Open the form for uploading a new HP GAN. Requires browser login.
:return: None
"""
if self.settings.get_key('apitoken') in (None, ''):
tkMessageBox.showerror(title='Error', message='Browser login is required to use this feature. Enter this in settings.')
return
new_device = StringVar()
h = HP_Device_Form(self, validIDs=self.cameras.keys(), pathvar=new_device, token=self.settings.get_key('trello'), browser=self.settings.get_key('apitoken'), gan=True)
h.wait_window()
if h.camera_added:
self.reload_ids()
def edit_device(self):
"""
Opens the form for updating an existing HP device with alternate exif metadata.
:return: None
"""
token = self.settings.get_key('apitoken')
if token is None:
tkMessageBox.showerror(title='Error',
message='You must be logged into browser to use this feature. Please enter your browser token in settings.')
return
device_id = tkSimpleDialog.askstring(title='Device ID', prompt='Please enter device local ID:')
if device_id in ('', None):
return
self.cam_local_id = device_id
# before opening the camera update form, make sure the most up-to-date camera list is available
source = self.reload_ids(local_id=device_id)
if source == 'local':
tkMessageBox.showerror(title='Error', message='Could not get camera from browser.')
return
else:
try:
d = Update_Form(self, device_data=self.cameras[device_id], browser=token, trello=self.settings.get_key('trello'))
self.wait_window(d)
if d.updated:
self.reload_ids(local_id=device_id)
except KeyError:
tkMessageBox.showerror(title='Error', message='Invalid Device ID (case-sensitive).')
return
def open_old_rit_csv(self):
"""
Open an existing set of HP data for spreadsheet editing. user selects root output directory.
:return: None
"""
open_data = tkMessageBox.askokcancel(title='Data Selection',
message='Select data to open. Select the root OUTPUT directory - the one with csv, image, etc. folders.')
if open_data:
d = tkFileDialog.askdirectory(title='Select Root Data Folder')
if d is None:
return
else:
try:
csv = None
for f in os.listdir(os.path.join(d, 'csv')):
if f.endswith('rit.csv'):
csv = os.path.join(d, 'csv', f)
break
# csv directory and at least one of: image, video, audio folders must exist
if csv is None or True not in (os.path.exists(os.path.join(d, 'image')),
os.path.exists(os.path.join(d, 'video')),
os.path.exists(os.path.join(d, 'audio')),
os.path.exists(os.path.join(d, 'model'))):
raise OSError()
except OSError as e:
tkMessageBox.showerror(title='Error',
message='Directory must contain csv directory and at least one of image, video, or audio directories. The csv folder must contain the data file (*rit.csv).')
return
check_outdated(csv, d)
h = HPSpreadsheet(self.settings, dir=d, ritCSV=csv, master=self, devices=self.cameras)
h.open_spreadsheet()
else:
return
def open_old_keywords_csv(self):
"""
Open existing keyword data for spreadsheet editing. User selects root output directory.
:return: None
"""
open_data = tkMessageBox.askokcancel(title='Data Selection',
message='Select data to edit keywords. Select the root OUTPUT directory - the one with csv, image, etc. folders.')
if open_data:
d = tkFileDialog.askdirectory(title='Select Root Data Folder')
if d is None:
return
else:
try:
csv = None
for f in os.listdir(os.path.join(d, 'csv')):
if f.endswith('keywords.csv'):
csv = os.path.join(d, 'csv', f)
break
# csv directory and at least one of: image, video, audio folders must exist
if csv is None or True not in (os.path.exists(os.path.join(d, 'image')),
os.path.exists(os.path.join(d, 'video')),
os.path.exists(os.path.join(d, 'audio')),
os.path.exists(os.path.join(d, 'model'))):
raise OSError()
except OSError as e:
tkMessageBox.showerror(title='Error',
message='Directory must contain csv directory and at least one of image, video, or audio directories. The csv folder must contain the data file (*keywords.csv).')
return
k = KeywordsSheet(self.settings, dir=d, keyCSV=csv, master=self)
k.open_spreadsheet()
else:
return
def open_settings(self):
SettingsWindow(master=self.master, settings=self.settings)
def load_ids(self, local_id):
"""
Call to the camera handler class to get most updated version of camera list. Will load from local list if no
connection available.
:return: string containing source of camera data ('local' or 'remote')
"""
self.cam_local_id = local_id
self.cams = API_Camera_Handler(self, self.settings.get_key('apiurl'), self.settings.get_key('apitoken'), given_id=self.cam_local_id)
self.cameras = self.cams.get_all()
if self.cams.get_source() == 'remote':
self.statusBox.println('Camera data successfully loaded from API.')
else:
self.statusBox.println(
'Camera data loaded from local device list.\nIf you have never connected before, this'
' list is empty and you will not be able to process your data!')
self.statusBox.println(
'It is recommended to enter your browser credentials in settings and restart to get the most updated information.')
return self.cams.source
def reload_ids(self, local_id=""):
"""Wipe and reload camera data"""
if local_id != "":
self.cam_local_id = local_id
self.cameras = None
return self.load_ids(local_id)
def system_check(self):
errors = []
warnings = []
try:
subprocess.Popen(['exiftool'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except WindowsError:
errors.append("Exiftool is not installed.")
try:
keys = subprocess.Popen(['gpg', '--list-keys'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if not keys[0]:
errors.append('There are no GnuPG recipient keys installed. The HP Tool cannot encrypt to a recipient'
' without a key, leading to archives not being uploaded.')
except WindowsError:
errors.append("GnuPG is not installed. The HP Tool cannot encrypt archives without GnuPG, leading to "
"archives not being uploaded.")
if self.load_ids('AS-ONE') != 'remote':
errors.append('Cannot connect to {0} with API token {1}.'.format(self.settings.get_key('apiurl'),
self.settings.get_key('apitoken')))
if self.settings.get_key('archive_recipient') in (None, ""):
warnings.append("No archive recipient found. The HP Tool cannot upload archives without a recipient.")
if errors:
tkMessageBox.showerror("Error", "\n\n".join(errors))
if warnings:
tkMessageBox.showwarning("Warning", "\n\n".join(warnings))
elif not (warnings or errors):
tkMessageBox.showinfo("Success", "No errors have been found in this installation.")
class ReadOnlyText(Text):
"""
The Notifications box on main HP GUI
"""
def __init__(self, master, **kwargs):
Text.__init__(self, master, **kwargs)
self.master = master
self.config(state='disabled')
def println(self, text):
self.config(state='normal')
self.insert(END, text + '\n')
self.see('end')
self.config(state='disabled')
class Checker:
def __init__(self):
self.camera_list = {}
def update_camera_list(self, camera_list):
self.camera_list = camera_list
return
def check_calibrations(self, local_id):
try:
ret = True if self.camera_list[local_id]['calibrations'] else False
except KeyError:
ret = False
return ret
def check_luminance(self, foldername):
"""
Verifies luminance of PRNU data folder
:param foldername: Full absolute path of folder to check. Last
:return: list of error messages
"""
reds = []
greens = []
blues = []
def calc_mean(filepath):
image_data = openImageFile(filepath).image_array
if image_data is None:
return
red = image_data[:, :, 0]
green = image_data[:, :, 1]
blue = image_data[:, :, 2]
reds.append((np.mean(red) / 255) * 100)
greens.append((np.mean(green) / 255) * 100)
blues.append((np.mean(blue) / 255) * 100)
return
try:
target = int(foldername.split("_")[-1])
except ValueError:
return 'Warning: Luminance of ' + foldername + ' could not be verified.' if not \
os.path.split(foldername)[1].lower() == "lens_cap" else None
min_value = target - 10
max_value = target + 10
for res in os.listdir(foldername):
if os.path.isdir(os.path.join(foldername, res)):
for f in os.listdir(os.path.join(foldername, res)):
calc_mean(os.path.join(foldername, res, f))
else:
calc_mean(os.path.join(foldername, res))
if reds and greens and blues:
red_per = int(np.mean(reds))
green_per = int(np.mean(greens))
blue_per = int(np.mean(blues))
if not all(rgb in range(min_value, max_value) for rgb in (red_per, green_per, blue_per)):
results = "Warning: {0} has incorrect luminance values of R:{1}, G:{2}, B:{3} where it appears " \
"the target was {4}".format(foldername, red_per, green_per, blue_per, target)
return results
class LenientChecker:
def __init__(self, *args):
pass
def update_camera_list(self, *args):
pass
def check_calibrations(self, *args):
return True
def check_luminance(self, *args):
return None
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(description='')
parser.add_argument('--lenient', help=argparse.SUPPRESS, required=False, action="store_true")
args = parser.parse_args(argv[1:])
checker = Checker() if not args.lenient else LenientChecker()
root = Tk()
root.resizable(width=False, height=False)
root.wm_title('HP GUI')
HPGUI(checker, master=root).pack(side=TOP, fill=BOTH, expand=TRUE)
root.mainloop()
if __name__ == '__main__':
sys.exit(main())
```
#### File: Media-Journaling-Tool/hp_tool/test_hp_tool.py
```python
import os
import shutil
import unittest
from hp.camera_handler import API_Camera_Handler
from hp.hp_data import process
from maskgen.maskgen_loader import MaskGenLoader
from mock import Mock
from hp import data_files
class TestHPTool(unittest.TestCase):
def test_process_data(self):
def get_key(key, *args, **kwargs):
return 0 if key == "seq" else key
self.settings = Mock()
self.settings.get_key = get_key
# Get a Camera (May be AS-ONE, May be sample, irrelevant for the test)
cam = API_Camera_Handler(self, None, None, "sample", localfile=data_files._DEVICES)
# Attempt to Process Data with it's Information
current_dir = os.path.dirname(__file__)
indir = os.path.join(current_dir, "test")
odir = os.path.join(current_dir, "output")
process(self, cam.get_all(), indir, odir)
self.assertTrue(os.path.isdir(os.path.join(odir, "csv")) and os.listdir(os.path.join(odir, "csv")) != [])
self.assertTrue(os.path.isdir(os.path.join(odir, "image")) and os.listdir(os.path.join(odir, "image")) != [])
self.assertTrue(os.path.isdir(os.path.join(odir, "video")) and os.listdir(os.path.join(odir, "video")) != [])
self.assertTrue(os.path.isdir(os.path.join(odir, "audio")) and os.listdir(os.path.join(odir, "audio")) != [])
self.assertFalse(os.path.isdir(os.path.join(odir, "model")))
shutil.rmtree(odir)
# Attempt to Process 3D Models
indir = os.path.join(current_dir, "test_model")
process(self, {}, indir, odir)
self.assertTrue(os.path.isdir(os.path.join(odir, "csv")) and os.listdir(os.path.join(odir, "csv")) != [])
self.assertFalse(os.path.isdir(os.path.join(odir, "image")))
self.assertFalse(os.path.isdir(os.path.join(odir, "video")))
self.assertFalse(os.path.isdir(os.path.join(odir, "audio")))
self.assertTrue(os.path.isdir(os.path.join(odir, "model")) and os.listdir(os.path.join(odir, "model")) != [])
shutil.rmtree(odir)
def tearDown(self):
# If any of the tests fail, the output directory may have still been created
if os.path.isdir(os.path.join(os.path.dirname(__file__), "output")):
shutil.rmtree(os.path.join(os.path.dirname(__file__), "output"))
```
#### File: maskgen/batch/bulk_validate.py
```python
from __future__ import print_function
import argparse
from maskgen import scenario_model
import csv
import os
from maskgen.batch import pick_projects
from maskgen.preferences_initializer import initialize
from maskgen import graph_rules
from maskgen import maskGenPreferences
def validate_export(error_writer,project, sm):
"""
Save error report, project properties, composites, and donors
:param sm: scenario model
"""
errorList = sm.validate()
name = os.path.basename(project)
graph_rules.processProjectProperties(sm)
sm.save()
for err in errorList:
error_writer.writerow((name, err[0].name, err[1],err[2],err[3]))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', help="optional username", required=False)
parser.add_argument('--projects', help='Directory of projects')
args = parser.parse_args()
initialize(maskGenPreferences, username=args.username)
project_list = pick_projects(args.projects)
with open(os.path.join(args.projects,'ErrorReport_' + str(os.getpid()) + '.csv'), 'wb') as csvfile:
error_writer = csv.writer(csvfile, delimiter = ' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for project in project_list:
try:
validate_export(error_writer, project, scenario_model.loadProject(project))
except Exception as e:
print (project)
print (str(e))
if __name__ == '__main__':
main()
```
#### File: maskgen/batch/__init__.py
```python
import os
import csv
import sys
import logging
from threading import RLock, Thread
def pick_projects(directory):
"""
Finds all subdirectories in directory containing a .json file
:param directory: string containing directory of subdirectories to search
:return: list projects found under the given directory
"""
if os.path.isfile(directory):
with open(directory,'r') as fp:
return [x.strip() for x in fp.readlines()]
ext = '.json'
subs = [x[0] for x in os.walk(directory,followlinks=True)]
projects = []
for sub in subs:
files = []
for f in os.listdir(sub):
if f.endswith(ext):
files.append(f)
if len(files) > 0:
sizes = [os.stat(os.path.join(sub, pick)).st_size for pick in files]
max_size = max(sizes)
index = sizes.index(max_size)
projects.append(os.path.join(sub, files[index]))
return projects
def pick_zipped_projects(directory):
"""
Finds all subdirectories in directory containing a .json file
:param directory: string containing directory of subdirectories to search
:return: list projects found under the given directory
"""
ext = '.tgz'
subs = [x[0] for x in os.walk(directory)]
projects = []
for sub in subs:
for f in os.listdir(sub):
if f.endswith(ext):
projects.append(os.path.join(sub,f))
return projects
class BatchProcessor:
def __init__(self, completeFile, itemsToProcess, threads=1):
self.completefile = completeFile if completeFile is not None else str(os.getpid()) + '.txt'
self.itemsToProcess = itemsToProcess
self.threads=threads
self.count = 0
self.lock = RLock()
def _thread_worker(self, total, func_to_run,done_file,error_writer):
while not self.q.empty():
try:
item_to_process = self.q.get_nowait()
if item_to_process is None:
break
item_id = item_to_process[0] if isinstance(item_to_process,tuple) else item_to_process
logging.getLogger('maskgen').info('Project updating: ' + str(item_id))
errors = func_to_run(item_to_process)
if errors is not None:
for error in errors:
if type(error) == tuple:
error_writer.writerow((str(item_id),) + error)
elif hasattr(error, 'astuple') and callable(getattr(error, 'astuple')):
error_writer.writerow((str(item_id),) + error.astuple())
else:
error_writer.writerow((str(item_id), str(error)))
with self.lock:
self.count += 1
logging.getLogger('maskgen').info(
'Project updated [' + str(self.count) + '/' + str(total) + '] ' + str(item_id))
done_file.write(item_id + '\n')
done_file.flush()
except Exception as e:
logging.getLogger('maskgen').error(str(e))
logging.getLogger('maskgen').error('Project skipped: ' + str(item_id))
def process(self, func):
from Queue import Queue
from functools import partial
skips = []
if os.path.exists(self.completefile):
with open(self.completefile, 'r') as skip:
skips = skip.readlines()
skips = [x.strip() for x in skips]
count = 0
total = len(self.itemsToProcess)
logging.getLogger('maskgen').info('Processing {} projects'.format(total))
name=0
threads=[]
self.q = Queue()
for item_to_process in self.itemsToProcess:
if item_to_process not in skips:
self.q.put(item_to_process)
with open(self.completefile, 'a') as done_file:
with open(os.path.join('ErrorReport_' + str(os.getpid()) + '.csv'), 'w') as csvfile:
error_writer = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
thread_func = partial(self._thread_worker, total, func, done_file, error_writer)
for i in range(int(self.threads)):
name += 1
t = Thread(target=thread_func, name=str(name))
threads.append(t)
t.start()
for thread in threads:
thread.join()
return self.count
```
#### File: Media-Journaling-Tool/maskgen/config.py
```python
global_config = {}
def getAndSet(name, item):
global global_config
if name in global_config:
return global_config[name]
global_config[name] = item
return item
```
#### File: Media-Journaling-Tool/maskgen/cv2api.py
```python
import cv2
import ffmpeg_api
"""
Wrapper class around CV2 to support different API versions (opencv 2 and 3)
"""
class CAPReader:
def __init__(self, cap):
"""
:param cap:
@type cap: cv2.VideoCapture
"""
self.cap = cap
def grab(self):
ret = self.cap.grab()
if not ret:
ret = self.cap.grab()
return ret
return ret
def release(self):
self.cap.release()
def read(self):
ret, frame = self.cap.read()
if not ret:
ret, frame = self.cap.read()
return ret, frame
return ret, frame
def retrieve(self, channel = None):
return self.cap.retrieve() if channel is None else self.cap.retrieve(channel)
def get(self, prop):
return self.cap.get(prop)
def set(self, prop, value):
return self.cap.set(prop, value)
def isOpened(self):
return self.cap.isOpened()
class CAPReaderWithFFMPEG(CAPReader):
def __init__(self, frames, cap):
"""
:param cap:
@type cap: cv2.VideoCapture
"""
CAPReader.__init__(self, cap)
self.frames = frames
self.pos = 0
self.last_frame = None
self.use_last_frame = False
def retrieve(self, channel = None):
if self.use_last_frame:
return self.last_frame
self.last_frame = CAPReader.retrieve(self)
return self.last_frame
def grab(self):
result = CAPReader.grab(self)
if not result and self.pos < len(self.frames):
self.use_last_frame = True
result = True
if result:
self.pos += 1
return result
def read(self):
ret, last_frame = CAPReader.read(self)
if not ret and self.pos < len(self.frames):
self.use_last_frame = True
return self.last_frame
if ret:
self.pos += 1
self.last_frame = last_frame
return ret, last_frame
return ret, last_frame
def get(self, prop):
if prop == cv2api_delegate.prop_pos_msec:
try:
return float(self.frames[self.pos]['pkt_pts_time']) * 1000
except:
try:
return float(self.frames[self.pos]['pkt_dts_time']) * 1000
except:
pass
return CAPReader.get(self, prop)
class CV2Api:
def __init__(self):
pass
def findContours(self, image):
pass
def videoWriter(self, out_file,fourcc, fps, dimensions, isColor=1):
pass
def videoCapture(self, filename, preference=None, useFFMPEGForTime=True):
meta, frames = ffmpeg_api.get_meta_from_video(filename, show_streams=True, media_types=['video'])
index = ffmpeg_api.get_stream_indices_of_type(meta, 'video')[0]
cap = cv2.VideoCapture(filename, preference) if preference is not None else cv2.VideoCapture(filename)
# is FIXED RATE (with some confidence)
if not ffmpeg_api.is_vfr(meta[index]) or not useFFMPEGForTime:
return CAPReader(cap)
meta, frames = ffmpeg_api.get_meta_from_video(filename, show_streams=True, with_frames=True, media_types=['video'])
return CAPReaderWithFFMPEG(frames[index], cap)
def computeSIFT(self, img):
None, None
def get_fourcc(self, codec):
return 0
def calcOpticalFlowFarneback(self, past, future, scale, levels, windowsize, iterations, poly_n, poly_sigma,flags=0):
return None
class CV2ApiV2(CV2Api):
def __init__(self):
CV2Api.__init__(self)
self.prop_pos_msec = cv2.cv.CV_CAP_PROP_POS_MSEC
# self.prop_buffer_size = cv2.cv.CV_CAP_PROP_BUFFERSIZE
self.prop_frame_height = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
self.prop_frame_width = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
self.prop_fps = cv2.cv.CV_CAP_PROP_FPS
self.prop_frame_count = cv2.cv.CV_CAP_PROP_FRAME_COUNT
self.tm_sqdiff_normed = cv2.cv.CV_TM_SQDIFF_NORMED
self.tm_ccorr_normed = cv2.cv.CV_TM_CCORR_NORMED
self.fourcc_prop = cv2.cv.CV_CAP_PROP_FOURCC
self.inter_linear = cv2.cv.CV_INTER_LINEAR
self.inter_cubic = cv2.cv.CV_INTER_CUBIC
self.inter_nn = cv2.INTER_NEAREST
self.inter_area = cv2.INTER_AREA
self.inter_lanczos = cv2.INTER_LANCZOS4
def findContours(self, image, mode, method):
contours, hierarchy = cv2.findContours(image, mode, method)
return contours, hierarchy
def computeSURF(self, img):
detector = cv2.FeatureDetector_create("SURF")
extractor = cv2.DescriptorExtractor_create("SURF")
kp = detector.detect(img)
return extractor.compute(img, kp)
def computeSIFT(self, img):
detector = cv2.FeatureDetector_create("SIFT")
extractor = cv2.DescriptorExtractor_create("SIFT")
kp = detector.detect(img)
return extractor.compute(img, kp)
def get_fourcc(self, codec):
if codec == '0' or codec == 0:
return 0
return cv2.cv.CV_FOURCC(*codec)
def videoWriter(self, out_file,fourcc, fps, dimensions, isColor=1):
return cv2.VideoWriter(out_file, fourcc, fps,dimensions, isColor=isColor)
def calcOpticalFlowFarneback(self, past, future, scale, levels, windowsize, iterations, poly_n, poly_sigma,flags=0):
return cv2.calcOpticalFlowFarneback(past, future,
scale, levels, windowsize, iterations, poly_n, poly_sigma, flags)
class CV2ApiV3(CV2Api):
def __init__(self):
CV2Api.__init__(self)
self.prop_pos_msec = cv2.CAP_PROP_POS_MSEC
# self.prop_buffer_size = cv2.CAP_PROP_BUFFERSIZE
self.prop_frame_height = cv2.CAP_PROP_FRAME_HEIGHT
self.prop_frame_width = cv2.CAP_PROP_FRAME_WIDTH
self.prop_fps = cv2.CAP_PROP_FPS
self.prop_frame_count = cv2.CAP_PROP_FRAME_COUNT
self.tm_sqdiff_normed = cv2.TM_SQDIFF_NORMED
self.tm_ccorr_normed = cv2.TM_CCORR_NORMED
self.fourcc_prop = cv2.CAP_PROP_FOURCC
self.inter_linear = cv2.INTER_LINEAR
self.inter_cubic = cv2.INTER_CUBIC
self.inter_nn = cv2.INTER_NEAREST
self.inter_area = cv2.INTER_AREA
self.inter_lanczos = cv2.INTER_LANCZOS4
def findContours(self, image, mode, method):
img2, contours, hierarchy = cv2.findContours(image, mode, method)
return contours, hierarchy
def computeSIFT(self, img):
detector = cv2.xfeatures2d.SIFT_create()
return detector.detectAndCompute(img, None)
def computeSURF(self, img):
detector = cv2.xfeatures2d.SURF_create(upright=True,extended=True)
return detector.detectAndCompute(img, None)
def get_fourcc(self, codec):
if codec == '0' or codec == 0:
return 0
return cv2.VideoWriter_fourcc(*codec)
def videoWriter(self, out_file,fourcc, fps, dimensions, isColor=1):
return cv2.VideoWriter(out_file, cv2.CAP_FFMPEG, fourcc, fps,dimensions, isColor=isColor)
def calcOpticalFlowFarneback(self, past, future, scale, levels, windowsize, iterations, poly_n, poly_sigma,flags=0):
return cv2.calcOpticalFlowFarneback(past, future, None,
scale, levels, windowsize, iterations, poly_n, poly_sigma, flags)
global cv2api_delegate
cv2api_delegate = CV2ApiV2() if cv2.__version__.startswith('2') else CV2ApiV3()
def findContours(image, mode, method):
global cv2api_delegate
return cv2api_delegate.findContours(image, mode, method)
```
#### File: maskgen/masks/donor_rules.py
```python
from maskgen.image_wrap import ImageWrapper
from maskgen.support import getValue
from maskgen.tool_set import interpolateMask, zipFileType
from maskgen.video_tools import get_rate_from_segment, get_start_time_from_segment, \
get_end_time_from_segment, update_segment, get_type_of_segment
#===================================================
#
# Defines Donor factory function associated with donor_processor of an Operation.
# The factory creates A Donor object that responds to the arguments (arguments method)
# required for the donor operation
# and mask generating component of the donor (create method).
#
# donors edge is defined between nodes donor_start and donor_end.
# The donor_end is also the target of the manipulation that used the donor (e.g. Paste).
# The parent (source) of the manipulation is the parent_of_end.
# THe mask and manipulation from the associated manipulation can be used in donor computation.
#
#===================================================
def _pre_select_mask(graph, start, startIm):
from PIL import Image
predecessors = graph.predecessors(start)
for pred in predecessors:
edge = graph.get_edge(pred, start)
if edge['op'] == 'SelectRegion':
mask = graph.get_edge_image(pred, start, 'maskname').invert()
if mask.size != startIm.size:
mask = mask.resize(startIm.size, Image.ANTIALIAS)
return mask
def donothing_processor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return DoNothingDonor()
def donothing_stream_processor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return DoNothingStreamDonor()
class DoNothingStreamDonor:
def arguments(self):
return {}
def create(self,
arguments={},
invert=False):
return []
class DoNothingDonor:
def arguments(self):
return {}
def create(self,
arguments={},
invert=False):
return None
def alpha_stream_processor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return AlphaDonor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class AlphaDonor:
"""
USE SIFT/RANSAC TO FIND DONOR MASK
IF ALPHA CHANNEL IS AVAILABLE, THEN USE THAT INSTEAD
"""
def __init__(self,graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
self.graph = graph
self.donor_start = donor_start
self.donor_end = donor_end
self.parent_of_end = parent_of_end
self.startIm = startImTuple[0]
self.destIm = destImTuple[0]
self.startFileName = startImTuple[1]
self.destFileName = destImTuple[1]
def arguments(self):
return {}
def create(self,
arguments={},
invert=False):
mask = _pre_select_mask(self.graph, self.donor_start, self.startIm)
mask = self.startIm.to_mask().invert() if mask is None else mask
return mask.invert() if invert else mask
def image_interpolate(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return InterpolateDonor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class InterpolateDonor:
"""
USE SIFT/RANSAC TO FIND DONOR MASK
IF ALPHA CHANNEL IS AVAILABLE, THEN USE THAT INSTEAD
"""
def __init__(self,graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
self.graph = graph
self.donor_start = donor_start
self.donor_end = donor_end
self.parent_of_end = parent_of_end
self.startIm = startImTuple[0]
self.destIm = destImTuple[0]
self.startFileName = startImTuple[1]
self.destFileName = destImTuple[1]
def arguments(self):
if self.startIm.has_alpha():
default = 'None'
else:
default = 'RANSAC-4'
predecessors = self.graph.predecessors(self.donor_start)
for pred in predecessors:
edge = self.graph.get_edge(pred, self.donor_start)
if edge['op'].startswith('Select'):
default = 'None'
return {
"homography": {
"type": "list",
"source": "image",
"defaultvalue": default,
"values": [
"None",
"Map",
"All",
"LMEDS",
"RANSAC-3",
"RANSAC-4",
"RANSAC-5"
],
"trigger mask": True,
"description": "Tune transform creation for composite mask generation"
},
"homography max matches": {
"type": "int[20:10000]",
"defaultvalue":2000,
"description": "Maximum number of matched feature points used to compute the homography.",
"trigger mask": True
}
}
def create(self,
arguments={},
invert=False):
import numpy as np
if getValue(arguments,'homography','None') == 'None':
if self.startIm.has_alpha():
img_array = np.asarray(self.startIm)
mask = np.copy(img_array[:,:,3])
#accept the alpha channel as what is kept
mask[mask>0] = 255
#invert since 0 in the donor mask indicates the donor pixels
return ImageWrapper(mask).invert()
# use the pre select mask (inverted) as the selection...invert what was removed to be what is kept
return _pre_select_mask(self.graph, self.donor_start, self.startIm)
mask = self.graph.get_edge_image(self.parent_of_end, self.donor_end, 'arguments.pastemask')
if mask is None:
mask = self.graph.get_edge_image(self.parent_of_end, self.donor_end, 'maskname')
mask, analysis = interpolateMask(
mask,
self.startIm,
self.destIm,
arguments=arguments,
invert=invert)
if mask is not None and mask.shape != (0, 0):
mask = ImageWrapper(mask)
else:
mask = None
return mask
def video_interpolate(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return VideoInterpolateDonor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class VideoInterpolateDonor:
def arguments(self):
return {
"homography": {
"type": "list",
"source": "image",
"values": [
"None",
"Map",
"All",
"LMEDS",
"RANSAC-3",
"RANSAC-4",
"RANSAC-5"
],
"trigger mask": True,
"description": "Tune transform creation for composite mask generation"
}
}
def __init__(self, graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
self.graph = graph
self.donor_start = donor_start
self.donor_end = donor_end
self.parent_of_end = parent_of_end
self.startIm = startImTuple[0]
self.destIm = destImTuple[0]
self.startFileName = startImTuple[1]
self.destFileName = destImTuple[1]
def create(self,
arguments={},
invert=False):
from maskgen.video_tools import interpolateMask
import os
from maskgen.tool_set import shortenName
"""
Used for Donor video or images, the mask recording a 'donation' is the inversion of the difference
of the Donor image and its parent, it exists.
Otherwise, the donor image mask is the donor image (minus alpha channels):
"""
edge = self.graph.get_edge(self.parent_of_end, self.donor_end)
return interpolateMask(
os.path.join(self.graph.dir, shortenName(self.donor_start + '_' + self.donor_end, '_mask')),
self.graph.dir,
edge['videomasks'],
self.startFileName,
self.destFileName,
arguments=arguments)
def audio_zip_donor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return AudioZipDonor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def video_donor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return VideoDonor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def video_without_audio_donor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return VideoDonorWithoutAudio(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class VideoDonor:
def __init__(self, graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
self.graph = graph
self.donor_start = donor_start
self.donor_end = donor_end
self.parent_of_end = parent_of_end
self.startIm = startImTuple[0]
self.destIm = destImTuple[0]
self.startFileName = startImTuple[1]
self.destFileName = destImTuple[1]
def _base_arguments(self):
return {
"include audio": {
"type": "yesno",
"defaultvalue": "no",
"trigger mask": True,
"description": "Is Audio Donated."
},
"Start Time": {
"type": "frame_or_time",
"defaultvalue": 1,
"trigger mask": True,
"description": "Start frame number"
},
"End Time": {
"type": "frame_or_time",
"defaultvalue": 0,
"trigger mask" : True,
"description": "End frame number. Leave 0 if ALL"
}
}
def arguments(self):
args = self._base_arguments()
predecessors = self.graph.predecessors(self.donor_start)
for pred in predecessors:
edge = self.graph.get_edge(pred, self.donor_start)
if edge['op'].startswith('Select'):
args['Start Time']['defaultvalue'] = getValue(edge,'arguments.Start Time',"1")
end_def = getValue(edge, 'arguments.End Time', None)
if end_def is not None:
args['End Time']['defaultvalue'] = end_def
return args
def create(self,
arguments={},
invert=False):
from maskgen.tool_set import getMilliSecondsAndFrameCount
media_types = ['video', 'audio'] if getValue(arguments, 'include audio', 'no') == 'yes' else ['video']
from maskgen.video_tools import FileMetaDataLocator
end_time_tuple = getMilliSecondsAndFrameCount(getValue(arguments, 'End Time', "00:00:00"))
start_time_tuple = getMilliSecondsAndFrameCount(getValue(arguments, 'Start Time', '00:00:00'))
video_set= FileMetaDataLocator(self.startFileName).getMaskSetForEntireVideoForTuples(
start_time_tuple=start_time_tuple,
end_time_tuple=end_time_tuple if end_time_tuple[1] > start_time_tuple[1] else None,
media_types=media_types)
audio_segments = [x for x in video_set if get_type_of_segment(x) == 'audio']
video_segments = [x for x in video_set if get_type_of_segment(x) == 'video']
if getValue(arguments, 'include audio', 'no') == 'yes':
for audio_segment in audio_segments:
video_segment = video_segments[0] if len(video_segments) > 0 else audio_segment
update_segment(audio_segment,
type='audio',
starttime=get_start_time_from_segment(video_segment),
endtime=get_end_time_from_segment(video_segment),
startframe=int(get_start_time_from_segment(video_segment) * get_rate_from_segment(audio_segment)/1000.0),
endframe=int(get_end_time_from_segment(video_segment)* get_rate_from_segment(audio_segment)/1000.0)+1)
return video_set
class VideoDonorWithoutAudio(VideoDonor):
def __init__(self, graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
VideoDonor.__init__(self,graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def _base_arguments(self):
return {
"Start Time": {
"type": "frame_or_time",
"defaultvalue": 1,
"description": "Start frame number"
},
"End Time": {
"type": "frame_or_time",
"defaultvalue": 0,
"description": "End frame number. Leave 0 if ALL"
}
}
class GeneralStreamDonor:
def __init__(self, graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
self.graph = graph
self.donor_start = donor_start
self.donor_end = donor_end
self.parent_of_end = parent_of_end
self.startIm = startImTuple[0]
self.destIm = destImTuple[0]
self.startFileName = startImTuple[1]
self.destFileName = destImTuple[1]
def media_types(self):
return ['audio','video']
def arguments(self):
args = self._base_arguments()
edge = self.graph.get_edge(self.donor_start,self.donor_end)
args['Start Time']['defaultvalue'] = getValue(edge,'arguments.Start Time',"1")
end_def = getValue(edge, 'arguments.End Time', None)
if end_def is not None:
args['End Time']['defaultvalue'] = end_def
return args
def _base_arguments(self):
return {
"Start Time": {
"type": "time",
"defaultvalue": "00:00:00.000000",
"description": "Start time"
},
"End Time": {
"type": "time",
"defaultvalue": "00:00:00.000000",
"description": "End time. Leave 00:00:00.000000 if ALL"
}
}
def create(self,
arguments={},
invert=False):
from maskgen.video_tools import FileMetaDataLocator
from maskgen.tool_set import getMilliSecondsAndFrameCount
end_time_tuple = getMilliSecondsAndFrameCount(getValue(arguments, 'End Time', "00:00:00"))
start_time_tuple = getMilliSecondsAndFrameCount(getValue(arguments, 'Start Time', '00:00:00'))
return FileMetaDataLocator(self.startFileName).getMaskSetForEntireVideoForTuples(
start_time_tuple=start_time_tuple,
end_time_tuple=end_time_tuple if end_time_tuple[0] > 0 else None,
media_types=self.media_types())
def audio_donor_processor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return AudioDonor(graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def audio_sample_donor_processor(raph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return SampleAudioDonor(raph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class AudioDonor(GeneralStreamDonor):
def __init__(self, graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
GeneralStreamDonor.__init__(self,graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def media_types(self):
return ['audio']
class SampleAudioDonor(AudioDonor):
def __init__(self, graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
AudioDonor.__init__(self,graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def all_audio_processor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return AllAudioStreamDonor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class AllAudioStreamDonor(AudioDonor):
def __init__(self, graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
AudioDonor.__init__(self,graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def arguments(self):
return {}
def all_stream_processor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
return AllStreamDonor(graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
class AllStreamDonor(GeneralStreamDonor):
def __init__(self, graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
GeneralStreamDonor.__init__(self,graph,donor_start, donor_end, parent_of_end, startImTuple, destImTuple)
def arguments(self):
return {}
class AudioZipDonor(VideoDonor):
def __init__(self, graph, donor_start, donor_end, parent_of_end, startImTuple, destImTuple):
"""
:param graph:
:param donor_start:
:param donor_end:
:param parent_of_end:
:param startImTuple:
:param destImTuple:
@type graph: ImageGraph
"""
self.graph = graph
self.donor_start = donor_start
self.donor_end = donor_end
self.parent_of_end = parent_of_end
self.startIm = startImTuple[0]
self.destIm = destImTuple[0]
self.startFileName = startImTuple[1]
self.destFileName = destImTuple[1]
def _base_arguments(self):
return {
"Start Time": {
"type": "frame_or_time",
"defaultvalue": "00:00:00.000000",
"trigger mask": True,
"description": "Start frame number"
},
"sample rate": {
"type": "float:[0:600000]",
"defaultvalue": 44100,
"description": "Samples Per Second"
},
"End Time": {
"type": "frame_or_time",
"trigger mask" : True,
"description": "End frame number. Leave 0 if ALL"
}
}
def arguments(self):
args = self._base_arguments()
edge = self.graph.get_edge(self.donor_start,self.donor_end)
args['Start Time']['defaultvalue'] = getValue(edge,'arguments.Start Time',"1")
end_def = getValue(edge, 'arguments.End Time', None)
if end_def is not None:
args['End Time']['defaultvalue'] = end_def
return args
def create(self,
arguments={},
invert=False):
from maskgen.zip_tools import AudioPositions
from maskgen.tool_set import getMilliSecondsAndFrameCount
from maskgen.video_tools import FileMetaDataLocator, \
create_segment
fps = float(getValue(arguments, 'sample rate',0))
# use AudioPostions to determine duration and rate
positions = AudioPositions(FileMetaDataLocator(self.startFileName).get_filename(),
fps=fps)
duration = positions.get_total_duration()
rate = positions.fps
end_time_tuple = getMilliSecondsAndFrameCount(getValue(arguments, 'End Time', "00:00:00"))
start_time_tuple = getMilliSecondsAndFrameCount(getValue(arguments, 'Start Time', '00:00:00'))
if end_time_tuple[0] <= start_time_tuple[0]:
end_time_tuple= (duration,0)
return [create_segment(starttime=float(start_time_tuple[0]),
startframe=int(start_time_tuple[0]*rate/1000.0)+1,
endtime=float(end_time_tuple[0]),
endframe=int(end_time_tuple[0]*rate/1000.0)+1,
type='audio',
rate=rate)]
```
#### File: Media-Journaling-Tool/maskgen/plugins.py
```python
import json
import logging
import os
import subprocess
import sys
import tarfile
import traceback
import copy
import config
from maskgen.ioc.registry import IoCComponent, Method, broker
"""
Manage and invoke all JT plugins that support operations on node media (images, video and audio)
"""
MainModule = "__init__"
def installPlugin(zippedFile):
def extract_archive(fname, dir):
try:
archive = tarfile.open(fname, "r:gz", errorlevel=2)
except Exception as e:
try:
archive = tarfile.open(fname, "r", errorlevel=2)
except Exception as e:
if archive is not None:
archive.close()
logging.getLogger('maskgen').critical(
"Cannot open archive {}; it may be corrupted ".format(fname))
logging.getLogger('maskgen').error(str(e))
return []
pluginnames = set([name.split('/')[0] for name in archive.getnames()])
archive.extractall(dir)
archive.close()
return list(pluginnames)
loaded = config.global_config.get('plugins', PluginManager({}))
pluginFolders = [os.path.join('.', "plugins"), os.getenv('MASKGEN_PLUGINS', 'plugins')]
pluginFolders.extend([os.path.join(x, 'plugins') for x in sys.path if 'maskgen' in x or not x.endswith('egg') and \
os.path.exists(os.path.join(x, 'plugins'))])
for folder in pluginFolders:
if os.path.exists(folder):
for name in extract_archive(zippedFile, folder):
location = os.path.join(folder, name)
info = _findPluginModule(location)
if info is not None:
_loadPluginModule(info,name,loaded)
break
def _loadPluginModule(info,name,loaded):
logging.getLogger('maskgen').info("Loading plugin " + name)
try:
plugin = __import__(info)
op = plugin.operation()
loaded[name] = {}
loaded[name]['function'] = plugin.transform
loaded[name]['operation'] = op
loaded[name]['suffix'] = plugin.suffix() if hasattr(plugin, 'suffix') else None
except Exception as e:
logging.getLogger('maskgen').error("Failed loading plugin " + name + ": " + str(e))
#finally:
# info[0].close()
def _findPluginModule(location):
if not os.path.isdir(location) or not MainModule + ".py" in os.listdir(location):
return None
return os.path.basename(location) #imp.find_module(MainModule, [location])
def getPlugins(reload=False,customFolders=[]):
plugins = {}
pluginFolders = [os.path.join('.', "plugins"), os.getenv('MASKGEN_PLUGINS', 'plugins')]
pluginFolders.extend([os.path.join(x, 'plugins') for x in sys.path if 'maskgen' in x or not x.endswith('egg') and \
os.path.exists(os.path.join(x, 'plugins'))])
pluginFolders.extend(customFolders)
pluginFolders = set([os.path.abspath(f) for f in pluginFolders])
for folder in pluginFolders:
if os.path.exists(folder):
if folder not in sys.path:
sys.path.append(folder)
possibleplugins = os.listdir(folder)
customfolder = os.path.join(folder, 'Custom')
customplugins = os.listdir(customfolder) if os.path.exists(customfolder) else []
for i in possibleplugins:
if i in plugins:
continue
if i == 'Custom':
continue
location = os.path.join(folder, i)
mod = _findPluginModule(location)
if mod is not None:
plugins[i] = {"info": mod}
for j in customplugins:
location = os.path.join(folder, 'Custom', j)
plugins[os.path.splitext(j)[0]] = {"custom": location}
return plugins
class EchoInterceptor:
def __init__(self, provided_broker):
provided_broker.register('PluginManager', self)
def _callPlugin(self, definition, im, source, target, **kwargs):
return None,None
class PluginCaller:
def __init__(self,provided_broker):
provided_broker.register('PluginManager', self)
def _callPlugin(self, definition, im, source, target, **kwargs):
if definition['function'] == 'custom':
return _runCustomPlugin(definition, im, source, target, **kwargs)
else:
return definition['function'](im, source, target, **kwargs)
class PluginManager:
caller = IoCComponent('PluginManager', Method('_callPlugin'))
def __init__(self,plugins={}):
self.plugins=plugins
#default
PluginCaller(broker)
def getBroker(self):
return broker
def getPreferredSuffix(self,name,filetype=None):
name=name.split('::')[0]
loaded = self.plugins
if 'suffix' in loaded[name]:
suffix = loaded[name]['suffix']
if suffix is not None:
if type(suffix) == dict and filetype is not None:
return suffix[filetype]
return suffix
return None
def getOperations(self,fileType=None):
ops = {}
loaded = self.plugins
for l in loaded.keys():
if 'operation' not in loaded[l]:
logging.getLogger('maskgen').error('Invalid plugin {}'.format(l))
continue
transitions = loaded[l]['operation']['transitions'] if 'transitions' in loaded[l]['operation'] else []
transitions = [t.split('.')[0] for t in transitions]
if len(transitions) == 0:
continue
if fileType is None or fileType in transitions:
ops[l] = loaded[l]
return ops
def getOperation(self,name):
loaded = self.plugins
if name not in loaded:
logging.getLogger('maskgen').warning('Requested plugin not found: ' + str(name))
return None
return loaded[name]['operation']
def callPlugin(self,name,im,source,target,**kwargs):
loaded = self.plugins
if name not in loaded:
raise ValueError('Request plugined not found: ' + str(name))
try:
return self._callPlugin(loaded[name],im, source, target, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.format_exception(exc_type, exc_value, exc_traceback)
logging.getLogger('maskgen').error(
'Plugin {} failed with {} for arguments {}'.format(name, str(e), str(kwargs)))
logging.getLogger('maskgen').error(' '.join(trace))
raise e
def _callPlugin(self, definition, im, source, target, **kwargs):
return self.caller(definition, im, source, target, **kwargs)
def loadCustom(self,name, path):
"""
loads a custom plugin
"""
logging.getLogger('maskgen').info("Loading plugin " + name)
try:
with open(path) as jfile:
data = json.load(jfile)
self.plugins[name] = {}
self.plugins[name]['function'] = 'custom'
self.plugins[name]['operation'] = data['operation']
self.plugins[name]['command'] = data['command']
self.plugins[name]['group'] = None
self.plugins[name]['mapping'] = data['mapping'] if 'mapping' in data else None
self.plugins[name]['suffix'] = data['suffix'] if 'suffix' in data else None
except Exception as e:
logging.getLogger('maskgen').error("Failed to load plugin {}: {} ".format(name, str(e)))
def pluginSummary():
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
loaded = loadPlugins()
with open('plugin.csv','w') as fp:
csv_fp = csv.writer(fp)
for plugin_name,plugin_def in loaded.iteritems():
args = plugin_def['operation']['arguments'] if 'arguments' in plugin_def['operation'] else {}
args = {} if args is None else args
csv_fp.writerow([plugin_name,plugin_def['operation']['name'],
plugin_def['operation']['category'],
plugin_def['operation']['software'],
plugin_def['operation']['description'],
'yes' if 'inputmaskname' in args else 'no'])
def loadPlugins(reload=False, customFolders=[]):
"""
:param reload:
:param customFolders:
:return:
@rtype: PluginManager
"""
if 'plugins' in config.global_config and not reload:
return config.global_config['plugins']
loaded = {}
config.global_config['plugins'] = PluginManager(loaded)
ps = getPlugins(customFolders=customFolders)
for i in ps.keys():
if 'custom' in ps[i]:
path = ps[i]['custom']
config.global_config['plugins'].loadCustom(i, path)
else:
_loadPluginModule(ps[i]['info'], i, loaded)
return config.global_config['plugins']
def getOperations(fileType=None):
return config.global_config['plugins'].getOperations(fileType=fileType)
def getPreferredSuffix(name,filetype= None):
return config.global_config['plugins'].getPreferredSuffix(name,filetype=filetype)
def getOperation(name):
parts = name.split('::')
plugin_name = parts[0]
op = config.global_config['plugins'].getOperation(plugin_name)
if op is not None and len(parts) > 1:
op = copy.copy(op)
op['name'] = op['name'] + '::' + parts[1]
return op
def callPlugin(name,im,source,target,**kwargs):
return config.global_config['plugins'].callPlugin(name.split('::')[0],im,source,target,**kwargs)
def _runCustomPlugin(command, im, source, target, **kwargs):
import copy
commands = copy.deepcopy(command['command'])
mapping = copy.deepcopy(command['mapping'])
executeOk = False
for k, command in commands.items():
if sys.platform.startswith(k):
executeWith(command, im, source, target, mapping, **kwargs)
executeOk = True
break
if not executeOk:
executeWith(commands['default'], im, source, target, mapping, **kwargs)
return None, None
def executeWith(executionCommand, im, source, target, mapping, **kwargs):
shell=False
if executionCommand[0].startswith('s/'):
executionCommand[0] = executionCommand[0][2:]
shell = True
kwargs = mapCmdArgs(kwargs, mapping)
kwargs['inputimage'] = source
kwargs['outputimage'] = target
for i in range(len(executionCommand)):
try:
executionCommand[i] = executionCommand[i].format(**kwargs)
except KeyError as e:
logging.getLogger('maskgen').warn('Argument {} not provided for {}'.format(e.message,executionCommand[0]))
ret = subprocess.call(executionCommand,shell=shell)
if ret != 0:
raise RuntimeError('Plugin {} failed with code {}'.format(executionCommand[0],ret))
def mapCmdArgs(args, mapping):
import copy
newargs = copy.copy(args)
if mapping is not None:
for key, val in args.iteritems():
if key in mapping:
if val not in mapping[key] or mapping[key][val] is None:
raise ValueError('Option \"' + str(val) + '\" is not permitted for this plugin.')
newargs[key] = mapping[key][val]
return newargs
def findPlugin(pluginName):
import errno
pluginFolders = [os.path.join('.', "plugins"), os.getenv('MASKGEN_PLUGINS', 'plugins')]
pluginFolders.extend([os.path.join(x, 'plugins') for x in sys.path if 'maskgen' in x or not x.endswith('egg') and \
os.path.exists(os.path.join(x, 'plugins'))])
for parent in pluginFolders:
if not os.path.exists(parent):
continue
for f in os.listdir(parent):
if f == pluginName:
return os.path.join(parent, f)
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), pluginName)
```
#### File: Media-Journaling-Tool/maskgen/preferences_initializer.py
```python
from maskgen.userinfo import setPwdX,CustomPwdX
from maskgen.validation.core import setValidators
from maskgen.validation.browser_api import ValidationBrowserAPI
from maskgen.validation.code_name_s3_api import ValidationCodeNameS3
from maskgen.plugins import loadPlugins
import logging
def initial_user(preferences,username=None):
"""
Set the global user name for Maskgen services. Defaults to the op-sys username if preferences
or override kwargs is not provided.
:param preferences: default username if provided in preferences
:param username: optional override
:return:
"""
if username is not None:
setPwdX(CustomPwdX(username))
elif preferences.get_key('username') is not None:
setPwdX(CustomPwdX(preferences.get_key('username')))
else:
logging.getLogger('maskgen').warn("Name not configured in preferences; using operating system user name")
def initialize_validators(preferences,validators=[]):
"""
:param preferences:
:param validators: list of class(ValidationAPI)
:return:
@type validators: list of class(ValidationAPI)
"""
setValidators(preferences,validators)
def initialize(preferences, username=None, validators=None):
"""
:param username: optional overriding username.
:param validators: class extending ValidationAPI
:return:
@type validators: list of class(ValidationAPI)
"""
initial_user(preferences,username=username)
initialize_validators(preferences,
validators=validators if validators is not None else [ValidationBrowserAPI,ValidationCodeNameS3])
loadPlugins()
```
#### File: Media-Journaling-Tool/maskgen/scenario_model.py
```python
import collections
import copy
import shutil
import tempfile
import traceback
from threading import Lock
import ffmpeg_api
import graph_rules
import mask_rules
import notifiers
import plugins
import video_tools
from graph_auto_updates import updateJournal
from group_filter import buildFilterOperation, GroupFilter, GroupOperationsLoader
from image_graph import createGraph
from image_wrap import ImageWrapper
from maskgen.image_graph import ImageGraph
from maskgen.video_tools import DummyMemory
from support import MaskgenThreadPool, StatusTracker, getPathValuesFunc, getPathValues
from software_loader import Software, getProjectProperties, getRule, getOperation
from tool_set import *
from validation.core import Validator, ValidationMessage,Severity,removeErrorMessages
def formatStat(val):
if type(val) == float:
return "{:5.3f}".format(val)
return str(val)
prefLoader = MaskGenLoader()
def defaultNotify(edge, message, **kwargs):
return True
def loadProject(projectFileName, notify=None, username=None, tool=None):
"""
Given JSON file name, open then the appropriate type of project
@rtype: ImageProjectModel
"""
graph = createGraph(projectFileName, tool=tool)
return ImageProjectModel(projectFileName, graph=graph, notify=notify, username=username, tool=tool)
def consolidate(dict1, dict2):
"""
:param dict1:
:param dict2:
:return:
@rtype dict
"""
d = dict(dict1)
d.update(dict2)
return d
EdgeTuple = collections.namedtuple('EdgeTuple', ['start', 'end', 'edge'])
def createProject(path,
notify=None,
base=None,
name=None,
suffixes=[],
tool=None,
username=None,
organization=None,
preferences={}):
"""
This utility function creates a ProjectModel given a directory.
If the directory contains a JSON file, then that file is used as the project file.
Otherwise, the directory is inspected for images.
All images found in the directory are imported into the project.
If the 'base' parameter is provided, the project is named based on that image name.
If the 'base' parameter is not provided, the project name is set based on finding the
first image in the list of found images, sorted in lexicographic order, starting with JPG, then PNG and then TIFF.
:param path: directory name or JSON file
:param notify: function pointer receiving the image (node) id and the event type
:param base: image name
:param suffixes:
:param projectModelFactory:
:param organization:
:return: a tuple=> a project if found or created, returns True if created. Returns None if a project cannot be found or created.
@type path: str
@type notify: (str, str) -> None
@rtype: (ImageProjectModel, bool)
"""
if path is None:
path = '.'
selectionSet = [filename for filename in os.listdir(path) if filename.endswith(".json") and \
filename not in ['operations.json','project_properties.json']]
if len(selectionSet) == 0:
return ImageProjectModel(os.path.join('.', 'Untitled.json'), notify=notify, username=username, tool=tool), True
else:
if (path.endswith(".json")) and os.path.exists(path):
return ImageProjectModel(os.path.abspath(path), notify=notify, username=username, tool=tool), False
# just a directory
selectionSet = [filename for filename in os.listdir(path) if filename.endswith(".json")]
if len(selectionSet) != 0 and base is not None:
logging.getLogger('maskgen').warning('Cannot add base image/video to an existing project')
return None
# JSON file not found and base image not provided
if len(selectionSet) == 0 and base is None:
logging.getLogger('maskgen').info(
'No project found and base image/video not provided; Searching for a base image/video')
suffixPos = 0
# look for a viable media file to create the project
while len(selectionSet) == 0 and suffixPos < len(suffixes):
suffix = suffixes[suffixPos]
selectionSet = [filename for filename in os.listdir(path) if filename.lower().endswith(suffix)]
selectionSet.sort()
suffixPos += 1
if len(selectionSet) == 0:
logging.getLogger('maskgen').warning('Could not find a base image/video')
return None
projectFile = selectionSet[0]
# add base is not None
elif len(selectionSet) == 0:
projectFile = os.path.split(base)[1]
else:
projectFile = selectionSet[0]
projectFile = os.path.abspath(os.path.join(path, projectFile))
if not os.path.exists(projectFile):
logging.getLogger('maskgen').warning('Base project file ' + projectFile + ' not found')
return None
image = None
existingProject = projectFile.endswith(".json")
if not existingProject:
image = projectFile
if name is None:
projectFile = os.path.splitext(projectFile)[0] + ".json"
else:
projectFile = os.path.abspath(os.path.join(path, name + ".json"))
model = ImageProjectModel(projectFile, notify=notify, baseImageFileName=image, username=username, tool=tool)
if organization is not None:
model.setProjectData('organization', organization)
if image is not None:
model.addImagesFromDir(path, baseImageFileName=os.path.split(image)[1], suffixes=suffixes, \
sortalg=lambda f: os.stat(os.path.join(path, f)).st_mtime, preferences=preferences)
return model, not existingProject
class MetaDiff:
diffData = None
def __init__(self, diffData):
self.diffData = diffData
def getMetaType(self):
return 'EXIF'
def getSections(self):
return None
def getColumnNames(self, section):
return ['Operation', 'Old', 'New']
def toColumns(self, section):
d = {}
for k, v in self.diffData.iteritems():
old = v[1] if v[0].lower() == 'change' or v[0].lower() == 'delete' else ''
new = v[2] if v[0].lower() == 'change' else (v[1] if v[0].lower() == 'add' else '')
old = old.encode('ascii', 'xmlcharrefreplace')
new = new.encode('ascii', 'xmlcharrefreplace')
d[k] = {'Operation': v[0], 'Old': old, 'New': new}
return d
class VideoMetaDiff:
"""
Video Meta-data changes are represented by section.
A special section called Global represents meta-data for the entire video.
Other sections are in the individual streams (e.g. video and audio) of frames.
A table of columns is produced per section. The columns are Id, Operation, Old and New.
Operations are add, delete and change.
For streams, each row is identified by a time and meta-data name.
When frames are added, the New column contains the number of frames added followed by the end time in seconds: 30:=434.4343434
When frames are deleted, the Old column contains the number of frames removed followed by the end time in seconds: 30:=434.4343434
"""
diffData = None
def __init__(self, diffData):
self.diffData = diffData
def getMetaType(self):
return 'FRAME'
def getSections(self):
return self.diffData.keys()
def getColumnNames(self, section):
return ['Operation', 'Old', 'New']
def toColumns(self, section):
d = {}
if len(self.diffData) == 0:
return d
if section is None:
section = self.diffData.keys()[0]
self._sectionChanges(d, self.diffData[section])
return d
def _sectionChanges(self, d, sectionData, prefix=''):
for k, v in sectionData.iteritems():
dictKey = str(k)
old = v[1] if v[0].lower() == 'change' or v[0].lower() == 'delete' else ''
new = v[2] if v[0].lower() == 'change' else (v[1] if v[0].lower() == 'add' else '')
if type(old) is not str:
old = str(old)
if type(new) is not str:
new = str(new)
old = old.encode('ascii', 'xmlcharrefreplace')
new = new.encode('ascii', 'xmlcharrefreplace')
d[dictKey] = {'Operation': v[0], 'Old': old, 'New': new}
class Modification:
"""
Represents a single manipulation to a source node, resulting in the target node
"""
operationName = None
additionalInfo = ''
# for backward compatibility and ease of access, input mask name is both arguments and
# an instance variable
inputMaskName = None
# set of masks used for videos
maskSet = None
# Record the link in the composite. Uses 'no' and 'yes' to mirror JSON read-ability
recordMaskInComposite = 'no'
# arguments used by the operation
arguments = dict()
# instance of Software
software = None
# automated
automated = 'no'
# errors
errors = list()
# generate mask
generateMask = "all"
username = ''
ctime = ''
start = ''
end = ''
semanticGroups = None
def __init__(self, operationName, additionalInfo,
start='',
end='',
arguments={},
recordMaskInComposite=None,
changeMaskName=None,
inputMaskName=None,
software=None,
maskSet=None,
automated=None,
username=None,
ctime=None,
errors=list(),
semanticGroups=None,
category=None,
generateMask="all"):
self.start = start
self.end = end
self.additionalInfo = additionalInfo
self.maskSet = maskSet
self.automated = automated if automated else 'no'
self.errors = errors if errors else list()
self.operationName = operationName
self.setArguments(arguments)
self.semanticGroups = semanticGroups
if inputMaskName is not None:
self.setInputMaskName(inputMaskName)
self.changeMaskName = changeMaskName
self.username = username if username is not None else ''
self.ctime = ctime if ctime is not None else datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.software = software
if recordMaskInComposite is not None:
self.recordMaskInComposite = recordMaskInComposite
self.category = category
self.generateMask = generateMask
def getSemanticGroups(self):
return [] if self.semanticGroups is None else self.semanticGroups
def setSemanticGroups(self, groups):
self.semanticGroups = groups
def setErrors(self, val):
self.errors = val if val else list()
def setAutomated(self, val):
self.automated = 'yes' if val == 'yes' else 'no'
def setMaskSet(self, maskset):
self.maskSet = maskset
def getSoftwareName(self):
return self.software.name if self.software is not None and self.software.name is not None else ''
def getSoftwareVersion(self):
return self.software.version if self.software is not None and self.software.version is not None else ''
def setSoftware(self, software):
self.software = software
def setArguments(self, args):
self.arguments = dict()
for k, v in args.iteritems():
self.arguments[k] = v
if k == 'inputmaskname':
self.setInputMaskName(v)
def setInputMaskName(self, inputMaskName):
self.inputMaskName = inputMaskName
if 'inputmaskname' not in self.arguments or self.arguments['inputmaskname'] != inputMaskName:
self.arguments['inputmaskname'] = inputMaskName
def setAdditionalInfo(self, info):
self.additionalInfo = info
def setRecordMaskInComposite(self, recordMaskInComposite):
self.recordMaskInComposite = recordMaskInComposite
def setOperationName(self, name):
self.operationName = name
def setFromOperation(self,op,filetype='image'):
"""
:param op:
:return:
@type op: Operation
"""
self.category = op.category
self.generateMask = op.generateMask
self.recordMaskInComposite = op.recordMaskInComposite(filetype)
class LinkTool:
"""
LinkTools are used to handle the comparison and analysis of each each in the graph.
The link tools are organizaed by transitions of
media type: video->image, video->video, audio->video, etc.
"""
def __init__(self):
return
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.donothing_processor"
def processDonors(self, scModel, start, destination, startIm, startFileName, destIm, destFileName, arguments, invert=False):
"""
:param scModel:
:param destination:
:param startIm:
:param startFileName:
:param destIm:
:param arguments:
:param invert:
:return:
@type scModel: ImageProjectModel
"""
result = scModel.getCreatingOperation(destination)
if result is not None:
return result[1].getDonorProcessor(default_processor=self.getDefaultDonorProcessor())(
scModel.getGraph(),
start,
destination,
result[0],
(startIm, startFileName),
(destIm, destFileName)).create(arguments=arguments,
invert=invert)
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
return None, {}, []
def _addAnalysis(self, startIm, destIm, op, analysis, mask, linktype=None,
arguments={}, start=None, end=None, scModel=None):
"""
Add analysis to dictionary
:param startIm:
:param destIm:
:param op:
:param analysis: fill this dictionary
:param mask:
:param linktype:
:param arguments:
:param start:
:param end:
:param scModel:
:return:
@type scModel: ImageProjectModel
"""
import importlib
directory = scModel.get_dir()
opData = scModel.gopLoader.getOperationWithGroups(op)
if opData is None:
return
arguments = dict(arguments)
arguments['start_node'] = start
arguments['end_node'] = end
arguments['sc_model'] = scModel
for analysisOp in opData.analysisOperations:
mod_name, func_name = analysisOp.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
func(analysis, startIm, destIm, mask=invertMask(mask), linktype=linktype,
arguments=arguments,
directory=directory)
except Exception as e:
logging.getLogger('maskgen').error('Failed to run analysis {}: {} '.format(analysisOp, str(e)))
def addSubstituteMasks(self, start, destination, scModel, op, arguments={}, filename=''):
return None
class ImageImageLinkTool(LinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
LinkTool.__init__(self)
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
im1 = scModel.getImage(start)
im2 = scModel.getImage(end)
edge = scModel.G.get_edge(start, end)
compareFunction = None
if edge is not None:
operation = scModel.gopLoader.getOperationWithGroups(edge['op'] if edge is not None else 'NA', fake=True)
compareFunction = operation.getCompareFunction()
mask, analysis, error = createMask(im1, im2, invert=False, arguments=arguments,
alternativeFunction=compareFunction)
if error is not None:
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
edge['op'] if edge is not None else 'NA',
start,
end
))
return im1, im2, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type scModel: ImageProjectModel
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
errors = list()
operation = scModel.gopLoader.getOperationWithGroups(op)
if op == 'Donor':
predecessors = scModel.G.predecessors(destination)
mask = None
expect_donor_mask = False
analysis = {}
if not skipDonorAnalysis:
errors = list()
mask = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
if mask is None:
mask = convertToMask(startIm).invert()
if expect_donor_mask:
errors = ["Donor image has insufficient features for SIFT and does not have a predecessor node."]
analysis = {}
else:
mask = startIm.apply_alpha_to_mask(mask)
else:
logging.getLogger('maskgen').debug('Create Mask')
mask, analysis, error = createMask(startIm,
destIm,
invert=invert,
arguments=arguments,
alternativeFunction=operation.getCompareFunction(),
convertFunction=operation.getConvertFunction())
if error is not None:
errors.append(error)
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
op,
start,
destination
))
logging.getLogger('maskgen').debug('EXIF Compare')
exifDiff = exif.compareexif(startFileName, destFileName)
analysis = analysis if analysis is not None else {}
analysis['exifdiff'] = exifDiff
logging.getLogger('maskgen').debug('Analysis')
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='image.image',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class VideoImageLinkTool(ImageImageLinkTool):
"""
Supports mask construction and meta-data comparison when linking video to image.
"""
def __init__(self):
ImageImageLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.video_without_audio_donor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
im1, startFileName = scModel.getImageAndName(start, arguments=arguments)
im2, destFileName = scModel.getImageAndName(end)
edge = scModel.G.get_edge(start, end)
operation = scModel.gopLoader.getOperationWithGroups(edge['op'])
mask, analysis,error = createMask(im1, im2, invert=False, arguments=arguments,
alternativeFunction=operation.getCompareFunction())
if error is not None:
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
edge['op'] if edge is not None else 'NA',
start,
end
))
return im1, im2, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
args = dict(arguments)
args['skipSnapshot'] = True
startIm, startFileName = scModel.getImageAndName(start, arguments=args)
destIm, destFileName = scModel.getImageAndName(destination)
errors = list()
operation = scModel.gopLoader.getOperationWithGroups(op)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
if op == 'Donor':
errors = [
"An video cannot directly donate to an image. First select a frame using an appropriate operation."]
analysis = {}
else:
mask, analysis,error = createMask(startIm, destIm, invert=invert, arguments=arguments,
alternativeFunction=operation.getCompareFunction())
if error is not None:
errors.append(error)
logging.getLogger('maskgen').warn('Failed mask generation for operation {} between {} and {}'.format(
op,
start,
destination
))
exifDiff = exif.compareexif(startFileName, destFileName)
analysis = analysis if analysis is not None else {}
analysis['exifdiff'] = exifDiff
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='video.image',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class ZipImageLinkTool(VideoImageLinkTool):
"""
Supports mask construction and meta-data comparison when linking zip to image.
"""
def __init__(self):
VideoImageLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.donothing_stream_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(end)
mask, analysis, errors = self.compareImages(start, end, scModel, 'noOp', skipDonorAnalysis=True,
arguments=arguments, analysis_params={})
if 'videomasks' in analysis:
analysis['videomasks'] = VideoMaskSetInfo(analysis['videomasks'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return startIm, destIm, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type start: str
@type destination: str
@type scModel: ImageProjectModel
@type op: str
@type invert: bool
@type arguments: dict
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
maskSet = video_tools.formMaskDiffForImage(startFileName, destIm,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(arguments[
'Start Time']) if 'Start Time' in arguments else None,
endSegment=getMilliSecondsAndFrameCount(arguments[
'End Time']) if 'End Time' in arguments else None,
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
#alternateFrameFunction=operation.getCompareFunction(),
arguments=consolidate(arguments, analysis_params))
# for now, just save the first mask
if len(maskSet) > 0 and video_tools.get_mask_from_segment( maskSet[0] ) is not None:
mask = ImageWrapper(video_tools.get_mask_from_segment( maskSet[0] ))
for item in maskSet:
video_tools.drop_mask_from_segment(item)
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
metaDataDiff = None
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
analysis['shape change'] = sizeDiff(startIm, destIm)
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='zip.image',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, []
class CollectionImageLinkTool(VideoImageLinkTool):
"""
Supports mask construction and meta-data comparison when linking zip to image.
"""
def __init__(self):
VideoImageLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.donothing_stream_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(end)
mask = np.ones((destIm.size[0],destIm.size[1]),dtype=np.uint8)*255
return startIm, destIm, ImageWrapper(mask), {}
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type start: str
@type destination: str
@type scModel: ImageProjectModel
@type op: str
@type invert: bool
@type arguments: dict
"""
startIm, destIm, mask, analysis = self.compare(start, destination, scModel)
return mask, analysis, []
class VideoVideoLinkTool(LinkTool):
"""
Supports mask construction and meta-data comparison when linking video to video.
"""
def __init__(self):
LinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.video_without_audio_donor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(end)
mask, analysis, errors = self.compareImages(start, end, scModel, 'noOp',
arguments=arguments, analysis_params={})
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'videomasks' in analysis:
analysis['videomasks'] = VideoMaskSetInfo(analysis['videomasks'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return startIm, destIm, mask, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
@type start: str
@type destination: str
@type scModel: ImageProjectModel
@type op: str
@type invert: bool
@type arguments: dict
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
if op != 'Donor' and operation.generateMask not in ['audio', 'all']:
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo()
if maskSet is None:
maskSet = list()
errors = list()
elif op == 'Donor' and not skipDonorAnalysis:
errors = list()
maskSet = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
else:
arguments['generate_frames'] = 0
previewer = analysis_params.pop('controller') if 'controller' in analysis_params else None
maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(
arguments['Start Time']) if 'Start Time' in arguments else None,
endSegment=getMilliSecondsAndFrameCount(
arguments['End Time']) if 'End Time' in arguments else None,
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
#alternateFrameFunction=operation.getCompareFunction(),
arguments=consolidate(arguments, analysis_params),
controller=previewer)
mask = None
for item in maskSet:
if video_tools.get_mask_from_segment(item) is not None:
mask = ImageWrapper(video_tools.get_mask_from_segment(item))
video_tools.drop_mask_from_segment(item)
if mask is None:
mask = ImageWrapper(np.ones(startIm.image_array.shape[0:2], dtype='uint8')*255)
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
metaDataDiff = video_tools.form_meta_data_diff(startFileName, destFileName)
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
analysis['shape change'] = sizeDiff(startIm, destIm)
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='video.video',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
def addSubstituteMasks(self,start, destination, scModel, op, arguments={}, filename=''):
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
startSegment = getMilliSecondsAndFrameCount(arguments[
'Start Time']) if 'Start Time' in arguments else None
endSegment = getMilliSecondsAndFrameCount(arguments[
'End Time']) if 'End Time' in arguments else None
subs = video_tools.formMaskForSource(startFileName,
filename,
start + '_' + destination + '_substitute',
startTimeandFrame=startSegment,
stopTimeandFrame=endSegment
)
#if subs is not None:
# analysis = {}
# startIm, startFileName = scModel.getImageAndName(start)
# destIm, destFileName = scModel.getImageAndName(destination)
# maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
# os.path.join(scModel.G.dir, start + '_' + destination + '_cmp'),
# op,
# startSegment=startSegment,
# endSegment=endSegment,
# analysis=analysis,
# alternateFunction=video_tools.maskCompare,
# arguments=arguments)
# how best to compare
return subs
class AudioVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking audio to video.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.all_audio_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
"""
:param start:
:param destination:
:param scModel:
:param op:
:param invert:
:param arguments:
:param skipDonorAnalysis:
:param analysis_params:
:return:
%type scModel: ImageProjectModel
"""
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
analysis['masks count'] = 0
analysis['videomasks'] = list()
metaDataDiff = video_tools.form_meta_data_diff(startFileName, destFileName, frames=False, media_types=['audio'])
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
errors = []
if op == 'Donor':
errors = list()
maskSet = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
elif op != 'Donor' and operation.generateMask in ['audio','all']:
maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(arguments[
'Start Time']) if 'Start Time' in arguments else None,
endSegment=getMilliSecondsAndFrameCount(arguments[
'End Time']) if 'End Time' in arguments else None,
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
arguments=consolidate(arguments, analysis_params))
else:
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo( media_types=['audio'])
if maskSet is None:
maskSet = list()
errors = list()
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='audio.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class AudioAudioLinkTool(AudioVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking audio to audio.
"""
def __init__(self):
AudioVideoLinkTool.__init__(self)
class VideoAudioLinkTool(LinkTool):
"""
Supports mask construction and meta-data comparison when linking video to audio.
"""
def __init__(self):
LinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.all_audio_processor"
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False,
analysis_params={}):
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
analysis['masks count'] = 0
analysis['videomasks'] = list()
metaDataDiff = video_tools.form_meta_data_diff(startFileName, destFileName, frames=False, media_types=['audio'])
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = metaDataDiff
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='video.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, list()
class ImageVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
return "maskgen.masks.donor_rules.alpha_stream_processor"
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
if op == 'Donor':
mask = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
if mask is None:
mask = startIm.to_mask().invert()
return mask, {}, list()
class ZipZipLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
# not correct...TODO
return "maskgen.masks.donor_rules.all_stream_processors"
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
from support import setPathValue
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = {}
errors = list()
operation = scModel.gopLoader.getOperationWithGroups(op, fake=True)
rate = float(getValue(arguments, 'Frame Rate', 30))
if operation.generateMask in ['audio', 'meta']:
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo()
if maskSet is None:
maskSet = list()
elif op == 'Donor' and not skipDonorAnalysis:
maskSet = self.processDonors(scModel, start, destination, startIm, startFileName, destIm, destFileName,
consolidate(arguments, analysis_params), invert=invert)
else:
maskSet, errors = video_tools.formMaskDiff(startFileName, destFileName,
os.path.join(scModel.G.dir, start + '_' + destination),
op,
startSegment=getMilliSecondsAndFrameCount(getValue(arguments,'Start Time',None)),
endSegment=getMilliSecondsAndFrameCount(getValue(arguments,'End Time',None)),
analysis=analysis,
alternateFunction=operation.getVideoCompareFunction(),
arguments=consolidate(arguments, analysis_params))
for item in maskSet:
if video_tools.get_mask_from_segment(item) is not None:
mask = ImageWrapper(video_tools.get_mask_from_segment(item))
video_tools.drop_mask_from_segment(item)
analysis['masks count'] = len(maskSet)
analysis['videomasks'] = maskSet
analysis['shape change'] = sizeDiff(startIm, destIm)
startZip = ZipCapture(startFileName,fps=rate)
endZip = ZipCapture(destFileName,fps=rate)
if startZip.get_size() != endZip.get_size():
setPathValue(analysis['metadatadiff'], 'video.nb_frames', ('change', startZip.get_size(), endZip.get_size()))
setPathValue(analysis['metadatadiff'], 'video.duration',
('change', startZip.get_size()/rate, endZip.get_size()/rate))
self._addAnalysis(startIm, destIm, op, analysis, mask, linktype='zip.zip',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class ZipVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def getDefaultDonorProcessor(self):
# not correct...TODO
return "maskgen.masks.donor_rules.all_stream_processors"
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
from support import setPathValue
from video_tools import get_rate_from_segment, get_frames_from_segment
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = {}
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo(start_time=getValue(arguments,'Start Time',0),
end_time=getValue(arguments,'End Time'))
endMaskSet = video_tools.FileMetaDataLocator(destFileName).getMaskSetForEntireVideo()
analysis['masks count'] = len(maskSet)
for videomask in maskSet:
if 'mask' in videomask:
videomask.pop('mask')
analysis['videomasks'] = maskSet
rate = get_rate_from_segment(maskSet[0])
length = get_frames_from_segment(maskSet[0])
if length != get_frames_from_segment(endMaskSet[0]) or rate != get_rate_from_segment(endMaskSet[0]):
setPathValue(analysis['metadatadiff'], 'video.nb_frames',
('change', length, get_frames_from_segment(endMaskSet[0])))
setPathValue(analysis['metadatadiff'], 'video.duration',
('change', length/float(rate), get_frames_from_segment(endMaskSet[0]) / float(get_rate_from_segment(endMaskSet[0]))))
setPathValue(analysis['metadatadiff'], 'video.avg_frame_rate',
('change', rate,get_rate_from_segment(endMaskSet[0])))
return mask, analysis, []
class VideoZipLinkTool(ZipVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
ZipVideoLinkTool.__init__(self)
class ZipAudioLinkTool(VideoAudioLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoAudioLinkTool.__init__(self)
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False,
analysis_params={}):
from zip_tools import AudioPositions
from support import setPathValue
from video_tools import create_segment, get_start_time_from_segment, get_end_time_from_segment,\
update_segment,get_rate_from_segment
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
maskSet = video_tools.FileMetaDataLocator(destFileName).getMaskSetForEntireVideo(
start_time=getValue(arguments, 'Start Time', 0),
end_time=getValue(arguments, 'End Time'),
media_types=['audio'])
if not len(maskSet):
raise ValueError("Cannot find audio data target file {}".format(destFileName))
node = scModel.getGraph().get_node(start)
meta_data = getValue(node,'zip content meta')
if meta_data is None:
meta_data = getValue(ZipAddTool().getAdditionalMetaData(startFileName),'zip content meta')
def audio_metadata_extractor(filename):
return meta_data[os.path.basename(filename)]
fps = float(getValue(arguments,'sample rate',maskSet[-1]['rate']))
positions = AudioPositions(startFileName,
position_file_name=getValue(arguments,'Audio Sequence File'),
fps=int(fps),
audio_metadata_extractor=audio_metadata_extractor if meta_data is not None else None)
segments = [create_segment(starttime=seg[0],
startframe=seg[1],
endtime=seg[2],
endframe=seg[3],
type='audio',
frames=seg[3]-seg[1]+1,
rate=fps)
for seg in positions.get_segments(0)]
analysis['masks count'] = 0
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = {}
analysis['videomasks'] = segments
cap_end_time = get_end_time_from_segment(maskSet[0])
if abs(get_end_time_from_segment(segments[-1]) - cap_end_time) > 0.001:
setPathValue(analysis['metadatadiff'],
'audio.duration',
('change',get_end_time_from_segment(segments[-1]),cap_end_time))
analysis['videomasks'] = [seg for seg in analysis['videomasks'] if get_start_time_from_segment(seg) < cap_end_time]
lastseg = analysis['videomasks'][-1]
update_segment(lastseg,
endtime = cap_end_time,
endframe = int(cap_end_time*get_rate_from_segment(lastseg)/1000)+ 1)
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='video.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, list()
class AudioZipLinkTool(VideoAudioLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoAudioLinkTool.__init__(self)
def compare(self, start, end, scModel, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask set and the meta-data diff results
"""
analysis = dict()
if 'metadatadiff' in analysis:
analysis['metadatadiff'] = VideoMetaDiff(analysis['metadatadiff'])
if 'errors' in analysis:
analysis['errors'] = VideoMaskSetInfo(analysis['errors'])
return None, None, None, analysis
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False,
analysis_params={}):
from zip_tools import AudioPositions
from support import setPathValue
from video_tools import create_segment, get_start_time_from_segment, get_end_time_from_segment,\
update_segment, get_end_frame_from_segment
startIm, startFileName = scModel.getImageAndName(start)
destIm, destFileName = scModel.getImageAndName(destination)
mask = ImageWrapper(np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8'))
analysis = dict()
# CAN HAVE A START TIME LATER
maskSet = video_tools.FileMetaDataLocator(startFileName).getMaskSetForEntireVideo(
start_time=getValue(arguments,'Start Time',0),
end_time=getValue(arguments, 'End Time'),
media_types=['audio'])
if not len(maskSet):
raise ValueError("Cannot find audio data target file {}".format(destFileName))
node = scModel.getGraph().get_node(start)
meta_data = getValue(node,'zip content meta')
if meta_data is None:
meta_data = getValue(ZipAddTool().getAdditionalMetaData(destFileName),'zip content meta')
def audio_metadata_extractor(filename):
return meta_data[os.path.basename(filename)]
fps = float(getValue(arguments,'sample rate',maskSet[-1]['rate']))
positions = AudioPositions(destFileName,
position_file_name=getValue(arguments,'Audio Sequence File'),
fps=int(fps),
audio_metadata_extractor=audio_metadata_extractor if meta_data is not None else None)
segments = [create_segment(starttime=seg[0],
startframe=seg[1],
endtime=seg[2],
endframe=seg[3],
type='audio',
frames=seg[3]-seg[1]+1,
rate=fps)
for seg in positions.get_segments(0)]
analysis['masks count'] = 0
analysis = analysis if analysis is not None else {}
analysis['metadatadiff'] = {}
analysis['videomasks'] = maskSet
cap_end_time = get_end_time_from_segment(maskSet[0])
diff = cap_end_time - get_end_time_from_segment(segments[-1])
errors = []
# IF NOT ALL THE AUDIO IS USED, THEN CUT THE END OF THE MASK SET
if diff > 0.001:
setPathValue(analysis['metadatadiff'],
'audio.duration',
('change',cap_end_time, get_end_time_from_segment(segments[-1])))
analysis['videomasks'] = [seg for seg in analysis['videomasks'] if get_start_time_from_segment(seg) < cap_end_time]
lastseg = analysis['videomasks'][-1]
update_segment(lastseg,
endtime = get_end_time_from_segment(segments[-1]),
endframe = get_end_frame_from_segment(segments[-1]))
elif diff < 0:
# THIS WOULD BE AN ODD OCCURRENCE. This would be ony is a sequence file is provided
# that created 'spaces'
if getValue(arguments,'Audio Sequence File') is None:
errors = ['Duration of target zip file is longer than the source given the provided time constraints']
# thought about checking the mask set without an end time, perhaps
# the sequence file is out of alignement with provided end time.
setPathValue(analysis['metadatadiff'],
'audio.duration',
('change', cap_end_time, get_end_time_from_segment(segments[-1])))
self._addAnalysis(startIm, destIm, op, analysis, None, linktype='video.audio',
arguments=consolidate(arguments, analysis_params),
start=start, end=destination, scModel=scModel)
return mask, analysis, errors
class ImageZipVideoLinkTool(VideoVideoLinkTool):
"""
Supports mask construction and meta-data comparison when linking images to images.
"""
def __init__(self):
VideoVideoLinkTool.__init__(self)
def compareImages(self, start, destination, scModel, op, invert=False, arguments={},
skipDonorAnalysis=False, analysis_params={}):
startIm, startFileName = scModel.getImageAndName(start)
#destIm, destFileName = scModel.getImageAndName(destination)
mask, analysis = ImageWrapper(
np.zeros((startIm.image_array.shape[0], startIm.image_array.shape[1])).astype('uint8')), {}
return mask, analysis, []
class AddTool:
def getAdditionalMetaData(self, media):
return {}
class VideoAddTool(AddTool):
def getAdditionalMetaData(self, media):
parent = {}
meta, frames = ffmpeg_api.get_meta_from_video(media, show_streams=True, with_frames=True, frame_limit=30, frame_meta=['pkt_duration_time'],media_types=['video'])
indices = ffmpeg_api.get_stream_indices_of_type(meta, 'video')
if indices:
if_vfr = ffmpeg_api.is_vfr(meta[indices[0]], frames=frames[indices[0]])
else:
if_vfr = False
meta, _ = ffmpeg_api.get_meta_from_video(media, show_streams=True,
frame_meta=['pkt_duration_time'])
parent['media'] = meta
width = 0
height = 0
rotation = 0
for item in meta:
if 'width' in item:
width = int(item['width'])
if 'height' in item:
height = int(item['height'])
if 'rotation' in item:
rotation = int(item['rotation'])
parent['shape'] = (width, height)
parent['rotation'] = rotation
if indices:
meta[indices[0]]['is_vfr'] = if_vfr
# redundant but requested by NIST
parent['is_vfr'] = if_vfr
return parent
class ZipAddTool(AddTool):
def getAdditionalMetaData(self, media):
import copy
from zipfile import ZipFile
file_type = zipFileType(media)
final_meta = {}
final_meta['media'] = []
if file_type in ['audio','video']:
tool = VideoAddTool()
duration = 0.0
frames = 0
capture = ZipCapture(media,filetypes=audiofiletypes + videofiletypes)
details = {}
while capture.isOpened():
if not capture.grab():
break
fn = capture.retrieve_file()
meta = tool.getAdditionalMetaData(fn)
new_meta = {}
for item in meta['media']:
if file_type == getValue(item, 'codec_type', 'text'):
if file_type == 'audio':
last_sample = getValue(meta, 'sample_rate', 48000)
last_duration = float(getValue(item, 'duration', getValue(meta, 'duration_ts', 1) / getValue(meta, 'sample_rate', 48000)))
last_frames = int(last_duration * 48000)
new_meta['audio'] = copy.copy(item)
details[os.path.basename(fn)] = item
else:
last_frames = int(getValue(item, 'nb_frames', 1))
last_duration = video_tools.estimate_duration(item, last_frames)
last_duration = getValue(item, 'duration', last_duration)
last_sample = last_frames/float(last_duration)
new_meta['video'] = copy.copy(item)
details[os.path.basename(fn)] = item
duration += float(last_duration)
frames += last_frames
if 'video' in new_meta:
new_meta['video']['duration'] = duration
new_meta['video']['nb_frames'] = frames
final_meta['media'] = [new_meta['video']]
if 'audio' in new_meta:
new_meta['audio']['duration'] = duration
new_meta['audio']['duration_ts'] = duration * last_sample
final_meta['media'] = meta['media'] + [new_meta['audio']]
final_meta['total duration'] = duration
final_meta['zip content meta'] = details
return final_meta
meta = {}
with ZipFile(media, 'r') as myzip:
names = myzip.namelist()
meta['length'] = len(names)
return meta
class OtherAddTool(AddTool):
def getAdditionalMetaData(self, media):
return {}
addTools = {
'video': VideoAddTool(),
'zip':ZipAddTool(),
'collection': OtherAddTool(),
'audio': OtherAddTool(),
'image': OtherAddTool()
}
linkTools = {'image.image': ImageImageLinkTool(), 'video.video': VideoVideoLinkTool(),
'image.video': ImageVideoLinkTool(), 'video.image': VideoImageLinkTool(),
'video.audio': VideoAudioLinkTool(), 'audio.video': AudioVideoLinkTool(),
'audio.audio': AudioAudioLinkTool(), 'zip.video': ZipVideoLinkTool(),
'collection.image': CollectionImageLinkTool(),
'zip.zip': ZipZipLinkTool(), 'video.zip': VideoZipLinkTool(),
'zip.image': ZipImageLinkTool(),
'zip.audio': ZipAudioLinkTool(),
'audio.zip': AudioZipLinkTool()}
def true_notify(object, message, **kwargs):
return True
class ImageProjectModel:
"""
A ProjectModel manages a project. A project is made up of a directed graph of Image nodes and links.
Each link is associated with a manipulation between the source image to the target image.
A link contains a mask(black and white) image file describing the changes.
A mask's X&Y dimensions match the source image.
A link contains a description of the manipulation operation, software used to perfrom the manipulation,
analytic results comparing source to target images, and an input mask path name. The input mask path name
describes a mask used by the manipulation software as a parameter describing the manipulation.
Links may be 'read-only' indicating that they are created through an automated plugin.
A ProjectModel can be reused to open new projects. It is designed to represent a view model (MVC).
A ProjectModel has two state paremeters, 'start' and 'end', containing the name of image nodes in the graph.
When both set, a link is selected. When 'start' is set and 'end' is None, only a single image node is selected.
Several methods on the ProjectModel depend on the state of these parameters. For example, adding a new link
to a image node, chooses the source node referenced by 'end' if set, otherwise it chooses the node referenced by 'start'
"""
G = None
start = None
end = None
notify = None
"""
@type G: ImageGraph
@type start: String
@type end: String
"""
lock = Lock()
def __init__(self, projectFileName, graph=None, notify=None,
baseImageFileName=None, username=None,tool=None):
self.probeMaskMemory = DummyMemory(None)
if notify is not None:
self.notify = notifiers.NotifyDelegate(
[notify, notifiers.QaNotifier(self), notifiers.ValidationNotifier(total_errors=None)])
else:
self.notify = notifiers.NotifyDelegate([true_notify])
if graph is not None:
graph.arg_checker_callback = self.__scan_args_callback
# Group Operations are tied to models since
# group operations are created by a local instance and stored in the graph model
# when used.
self.gopLoader = GroupOperationsLoader()
self.username = username if username is not None else get_username()
self._setup(projectFileName, graph=graph, baseImageFileName=baseImageFileName,tool=tool)
def set_notifier(self, notifier):
self.notify = notifiers.NotifyDelegate(
[notifier, notifiers.QaNotifier(self), notifiers.ValidationNotifier(total_errors=None)])
def get_dir(self):
return self.G.dir
def getGroupOperationLoader(self):
return self.gopLoader
def addImagesFromDir(self, dir, baseImageFileName=None, xpos=100, ypos=30, suffixes=list(),
sortalg=lambda s: s.lower(),preferences={}):
"""
Bulk add all images from a given directory into the project.
Position the images in a grid, separated by 50 vertically with a maximum height of 520.
Images are imported in lexicographic order, first importing JPG, then PNG and finally TIFF.
If baseImageFileName, the name of an image node, is provided, then that node is selected
upong completion of the operation. Otherwise, the last not imported is selected"
"""
initialYpos = ypos
totalSet = []
suffixes = set(suffixes)
for suffix in suffixes:
suffix_lower = suffix.lower()
totalSet.extend([filename for filename in os.listdir(dir) if
filename.lower().endswith(suffix_lower ) and \
not filename.endswith('_mask' + suffix) and \
not filename.endswith('_proxy' + suffix)])
totalSet = sorted(totalSet, key=sortalg)
added = []
for filename in totalSet:
try:
pathname = os.path.abspath(os.path.join(dir, filename))
additional = self.getAddTool(pathname).getAdditionalMetaData(pathname)
additional.update(preferences)
nname = self.G.add_node(pathname, xpos=xpos, ypos=ypos, nodetype='base', **additional)
added.append(nname)
ypos += 50
if ypos == 450:
ypos = initialYpos
xpos += 50
if filename == baseImageFileName:
self.start = nname
self.end = None
except Exception as ex:
logging.getLogger('maskgen').warn('Failed to add media file {}'.format(filename))
self.notify(added, 'add')
def addImage(self, pathname, cgi=False, prnu=False, **kwargs):
maxx = 50
max_node = None
for node_id in self.G.get_nodes():
node = self.G.get_node(node_id)
if 'xpos' in node and int(node['xpos']) > maxx:
maxx = int(node['xpos'])
max_node = node
maxy = max_node['ypos'] + 50 if max_node is not None else 50
additional = self.getAddTool(pathname).getAdditionalMetaData(pathname)
additional.update(kwargs)
nname = self.G.add_node(pathname, nodetype='base',
cgi='yes' if cgi else 'no',
xpos=maxx,
ypos=maxy,
prnu='yes' if prnu else 'no',
**additional)
self.start = nname
self.end = None
self.notify([self.start], 'add')
return nname
def getEdgesBySemanticGroup(self):
"""
:return: association of semantics groups to edge id tuples (start,end)
@rtype: dict of list of tuple
"""
result = {}
for edgeid in self.getGraph().get_edges():
for grp in self.getSemanticGroups(edgeid[0], edgeid[1]):
if grp not in result:
result[grp] = [edgeid]
else:
result[grp].append(edgeid)
return result
def add_to_edge(self, **items):
self.G.update_edge(self.start, self.end, **items)
self.notify((self.start, self.end), 'update_edge')
def update_node(self, node_properties):
deleteImage(self.getStartImageFile())
self.G.update_node(self.start, **node_properties)
def update_edge(self, mod):
"""
:param mod:
:return:
@type mod: Modification
"""
op = self.gopLoader.getOperationWithGroups(mod.operationName,fake=True)
mod_old = self.getModificationForEdge(self.start, self.end)
trigger_update = False
for k,v in mod.arguments.iteritems():
if (k not in mod_old.arguments or mod_old.arguments[k] != v) and \
k in op.getTriggerUpdateArguments():
trigger_update = True
for k in mod_old.arguments:
if k not in mod.arguments and \
k in op.getTriggerUpdateArguments():
trigger_update = True
self.G.update_edge(self.start, self.end,
op=mod.operationName,
description=mod.additionalInfo,
arguments={k: v for k, v in mod.arguments.iteritems() if k != 'inputmaskname'},
recordMaskInComposite=mod.recordMaskInComposite,
semanticGroups=mod.semanticGroups,
editable='no' if (
mod.software is not None and mod.software.internal) or mod.operationName == 'Donor' else 'yes',
softwareName=('' if mod.software is None else mod.software.name),
softwareVersion=('' if mod.software is None else mod.software.version),
inputmaskname=mod.inputMaskName)
self._save_group(mod.operationName)
if trigger_update:
self.reproduceMask(force=False)
else:
self.notify((self.start, self.end), 'update_edge')
def compare(self, destination, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
return self.getLinkTool(self.start, destination).compare(self.start, destination, self, arguments=arguments)
def getMetaDiff(self):
""" Return the EXIF differences between nodes referenced by 'start' and 'end'
Return the Frame meta-data differences between nodes referenced by 'start' and 'end'
"""
e = self.G.get_edge(self.start, self.end)
if e is None:
return None
videodiff = VideoMetaDiff(e['metadatadiff']) if getValue(e,'metadatadiff',None) is not None else None
imagediff = MetaDiff(e['exifdiff']) if 'exifdiff' in e and len(e['exifdiff']) > 0 else None
return imagediff if imagediff is not None else videodiff
def getDonorAndBaseNodeTuples(self):
"""
Return a tuple (edge, base node, list of nodes that for the path from edge to base)
for each valid donor path through the graph
"""
donorEdges = []
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if graph_rules.eligible_for_donor(edge):
donorEdges.append(edge_id)
results = []
for edge in donorEdges:
baseSet = self._findBaseNodesAndPaths(edge[0], excludeDonor=True)
for base in baseSet:
if (edge, base) not in results:
results.append((edge, base[0], base[1]))
if len(baseSet) == 0:
results.append((edge, None, list()))
for result in results:
result[2].reverse()
return results
def getTerminalAndBaseNodeTuples(self):
"""
Return a tuple (lead node, base node) for each valid (non-donor) path through the graph
"""
terminalNodes = [node for node in self.G.get_nodes() if
len(self.G.successors(node)) == 0 and len(self.G.predecessors(node)) > 0]
return [(node, self._findBaseNodes(node)) for node in terminalNodes]
def getEdges(self, endNode,excludeDonor=True):
"""
:param endNode: (identifier)
:return: tuple (start, end, edge map) for all edges ending in endNode
"""
return self._findEdgesWithCycleDetection(endNode, excludeDonor=excludeDonor, visitSet=list())
def getNodeNames(self):
return self.G.get_nodes()
def getCurrentNode(self):
return self.G.get_node(self.start)
def isEditableEdge(self, start, end):
e = self.G.get_edge(start, end)
return 'editable' not in e or e['editable'] == 'yes'
def findChild(self, parent, child):
for suc in self.G.successors(parent):
if suc == child or self.findChild(suc, child):
return True
return False
def compress(self, all=False,force=False):
if all:
return [self._compress(node) for node in self.G.get_nodes()]
else:
return self._compress(self.start, force=force)
def _compress(self, start, force=False):
defaults = {'compressor.video': 'maskgen.video_tools.x264',
'compressor.audio': None,
'compressor.image': None}
node = self.G.get_node(start)
ftype = self.getNodeFileType(start)
# cannot finish the action since the edge analysis was skipped
for skipped_edge in self.G.getDataItem('skipped_edges', []):
if skipped_edge['start'] == start and not force:
return
if (len(self.G.successors(start)) == 0 or len(self.G.predecessors(start)) == 0) and not force:
return
props = {'remove_video': False,'force': False}
#for pred in self.G.predecessors(start):
# edge = self.G.get_edge(pred, start)
# op = getOperationWithGroups(edge['op'], fake=True)
# if op.category == 'Audio':
# props['remove_video'] = True
compressor = prefLoader.get_key('compressor.' + ftype,
default_value=defaults['compressor.' + ftype])
if 'compressed' in node:
return
func = getRule(compressor)
newfile = None
if func is not None:
newfilename = func(os.path.join(self.get_dir(), node['file']), **props)
if newfilename is not None:
newfile = os.path.split(newfilename)[1]
self.G.update_node(start,file=newfile,compressed=compressor)
return newfile
def connect(self, destination, mod=Modification('Donor', '',category='Donor'), invert=False, sendNotifications=True,
skipDonorAnalysis=False):
""" Given a image node name, connect the new node to the end of the currently selected node.
Create the mask, inverting the mask if requested.
Send a notification to the register caller if requested.
Return an error message on failure, otherwise return None
"""
if self.start is None:
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Node node selected",
Module=''), False
elif not self.G.has_node(destination):
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Canvas out of state from model. Node Missing.",
Module=''), False
elif self.findChild(destination, self.start):
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Cannot connect to ancestor node",
Module=''), False
else:
for successor in self.G.successors(self.start):
if successor == destination:
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Cannot connect to the same node twice",
Module=''), False
return self._connectNextImage(destination,
mod,
invert=invert,
sendNotifications=sendNotifications,
skipDonorAnalysis=skipDonorAnalysis)
def getPredecessorNode(self):
if self.end is None:
for pred in self.G.predecessors(self.start):
edge = self.G.get_edge(pred, self.start)
if edge['op'] != 'Donor':
return pred
return self.start
def getBaseNode(self, node):
for pred in self.G.predecessors(node):
edge = self.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.getBaseNode(pred)
return node
def getCreatingOperation(self, destination):
"""
:return: operation for the manipulation that created this destination and the start node
@rtype: (str,Operation)
"""
predecessors = self.G.predecessors(destination)
for pred in predecessors:
pred_edge = self.G.get_edge(pred, destination)
edge_op = self.gopLoader.getOperationWithGroups(pred_edge['op'])
if edge_op is not None and pred_edge['op'] != 'Donor':
return pred, edge_op
def getDonorAndBaseImage(self):
"""
Get the donor image and associated baseImage for the selected node.
"""
nodeName = self.start if self.end is None else self.end
# verify the node is a leaf node
endPointTuples = self.getDonorAndBaseNodeTuples()
for x in endPointTuples:
if nodeName == x[0][1]:
baseImage, _ = self.G.get_image(x[1])
donors = self.constructDonors()
for donortuple in donors:
if donortuple.base == x[1]:
if donortuple.media_type == 'video':
return video_tools.getSingleFrameFromMask(donortuple.mask_wrapper), baseImage
elif donortuple.media_type == 'audio':
return None, None
else:
return donortuple.mask_wrapper, baseImage
return None, None
def getTransformedMask(self):
"""
:return: list of CompositeImage
"""
composite_generator = mask_rules.prepareComposite((self.start, self.end),self.G, self.gopLoader, self.probeMaskMemory)
return composite_generator.constructComposites(checkEmptyMask=False)
def executeFinalNodeRules(self):
terminalNodes = [node for node in self.G.get_nodes() if
len(self.G.successors(node)) == 0 and len(self.G.predecessors(node)) > 0]
for node in terminalNodes:
graph_rules.setFinalNodeProperties(self, node)
def constructDonors(self):
"""
Construct donor images
Find all valid base node, leaf node tuples
:return computed donors in the form of tuples
(image node id donated to, base image node, ImageWrapper mask, filename)
@rtype list of DonorImage
"""
self._executeSkippedComparisons()
for edge_id in self.G.get_edges():
if self.start is not None and self.start != edge_id[1]:
continue
composite_generator = mask_rules.prepareComposite(edge_id, self.G, self.gopLoader, self.probeMaskMemory)
return composite_generator.constructDonors(saveImage=False)
return []
def invertInputMask(self):
"""
Temporary: Add missing input masks
:return:
"""
if self.start is not None and self.end is not None:
start_im = self.startImage()
edge = self.G.get_edge(self.start, self.end)
if edge is not None:
maskname= getValue(edge,'inputmaskname')
if maskname is not None:
mask = openImageMaskFile(self.get_dir(),maskname)
if mask is not None:
expected_shape = start_im.image_array.shape[0:2]
if expected_shape != mask.shape:
mask = cv2.resize(mask,tuple(reversed(expected_shape)))
mask = ImageWrapper(mask)
mask = mask.invert()
mask.save(os.path.join(self.get_dir(),maskname))
def fixInputMasks(self):
"""
Temporary: Add missing input masks
:return:
"""
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if graph_rules.missing_donor_inputmask(edge, self.G.dir):
startimage, name = self.G.get_image(edge_id[0])
finalimage, fname = self.G.get_image(edge_id[1])
mask = self.G.get_edge_image(edge_id[0], edge_id[1], 'maskname')
inputmaskname = os.path.splitext(name)[0]+ '_inputmask.png'
ImageWrapper(composeCloneMask(mask, startimage, finalimage)).save(inputmaskname)
# if 'arguments' not in edge:
# edge['arguments'] = {}
edge['inputmaskname'] = os.path.split(inputmaskname)[1]
# edge['arguments']['inputmaskname'] = os.path.split(inputmaskname)[1]
self.G.setDataItem('autopastecloneinputmask', 'yes')
def renametobase(self):
"""
Rename the project to match the name of the base image
:return:
"""
for nodeid in self.G.get_nodes():
node = self.G.get_node(nodeid)
if 'nodetype' in node and node['nodetype'] == 'base':
pos = node['file'].find('.')
self.getGraph().set_name(node['file'][:pos] if pos > 0 else node['file'])
break
def addNextImage(self, pathname, invert=False, mod=Modification('', ''), sendNotifications=True, position=(50, 50),
skipRules=False, edge_parameters={}, node_parameters={}):
""" Given a image file name and PIL Image, add the image to the project, copying into the project directory if necessary.
Connect the new image node to the end of the currently selected edge. A node is selected, not an edge, then connect
to the currently selected node. Create the mask, inverting the mask if requested.
Send a notification to the register caller if requested.
Return a list of validation messages on failure, otherwise return None
"""
if (self.end is not None):
self.start = self.end
params = dict(node_parameters)
params['xpos'] = position[0]
params['ypos'] = position[1]
params['nodetype'] = 'base'
for k, v in self.getAddTool(pathname).getAdditionalMetaData(pathname).iteritems():
params[k] = v
destination = self.G.add_node(pathname, seriesname=self.getSeriesName(), **params)
self.notify([destination],'add')
analysis_params = dict({ k:v for k,v in edge_parameters.iteritems() if v is not None})
msgs, status = self._connectNextImage(destination, mod, invert=invert, sendNotifications=sendNotifications,
skipRules=skipRules, analysis_params=analysis_params)
return msgs, status
def getLinkType(self, start, end):
return self.getNodeFileType(start) + '.' + self.getNodeFileType(end)
def getLinkTool(self, start, end):
"""
:param start:
:param end:
:return:
@rtype: LinkTool
"""
return linkTools[self.getLinkType(start, end)]
def mergeProject(self, project):
"""
Merge projects. Does not support updating edges or nodes.
Instead, it only adds new edges and nodes.
Should be used with caution.
:param project:
:return:
@type project: ImageProjectModel
"""
# link from their node id to my node id
merge_point = dict()
myfiles = dict()
matches = dict()
for nodeid in self.getGraph().get_nodes():
mynode = self.getGraph().get_node(nodeid)
md5 = md5_of_file(os.path.join(self.G.dir, mynode['file']),
raiseError=False)
matches[md5] = nodeid
self.G.update_node(nodeid, file=md5)
for nodeid in project.getGraph().get_nodes():
theirnode = project.getGraph().get_node(nodeid)
theirfilemd5 = md5_of_file(os.path.join(project.get_dir(), theirnode['file']),
raiseError=False)
if theirnode['file'] in myfiles:
if myfiles[theirnode['file']] != theirfilemd5:
logging.getLogger('maskgen').warn(
'file {} is in both projects but MD5 is different'.format(theirnode['file']))
if theirfilemd5 in matches:
merge_point[nodeid] = matches[theirfilemd5]
if len(merge_point) == 0:
return 'No merge points found'
for nodeid in project.getGraph().get_nodes():
theirnode = project.getGraph().get_node(nodeid)
if nodeid not in merge_point:
merge_point[nodeid] = self.getGraph().add_node(os.path.join(project.get_dir(), theirnode['file']),
**theirnode)
for start, end in project.getGraph().get_edges():
mystart = merge_point[start]
myend = merge_point[end]
edge = self.getGraph().get_edge(mystart, myend)
if edge is None:
self.getGraph().copy_edge(mystart,
myend,
dir=project.get_dir(),
edge=project.getGraph().get_edge(start, end))
def getAddTool(self, media):
""""
:param media:
:return:
@rtype : AddTool
"""
ft = fileType(media)
if ft.startswith('zip'):
ft = 'zip'
return addTools[ft]
def hasSkippedEdges(self):
return len(self.G.getDataItem('skipped_edges', [])) > 0
def _executeQueue(self,q,results,tracker):
from Queue import Empty
"""
:param q:
:return:
@type q : Queue
@type failures: Queue
"""
while not q.empty():
try:
edge_data = q.get_nowait()
if edge_data is None:
break
logging.getLogger('maskgen').info('Recomputing mask for edge {} to {} using operation {}'.format(
edge_data['start'],
edge_data['end'],
edge_data['opName']
))
tracker.next('{}->{}'.format(edge_data['start'], edge_data['end']))
if self.getGraph().has_node(edge_data['start']) and self.getGraph().has_node(edge_data['end']) and \
self.getGraph().has_edge(edge_data['start'],edge_data['end']):
mask, analysis, errors = self.getLinkTool(edge_data['start'], edge_data['end']).compareImages(
edge_data['start'],
edge_data['end'],
self,
edge_data['opName'],
arguments=edge_data['arguments'],
skipDonorAnalysis=edge_data['skipDonorAnalysis'],
invert=edge_data['invert'],
analysis_params=edge_data['analysis_params'])
maskname = shortenName(edge_data['start'] + '_' + edge_data['end'], '_mask.png', identifier=self.G.nextId())
self.G.update_mask(edge_data['start'], edge_data['end'], mask=mask, maskname=maskname, errors=errors,
**consolidate(analysis, edge_data['analysis_params']))
else:
errors = []
results.put(((edge_data['start'], edge_data['end']), True, errors))
#with self.G.lock:
# results.put(((edge_data['start'], edge_data['end']), True, errors))
# self.G.setDataItem('skipped_edges', [skip_data for skip_data in self.G.getDataItem('skipped_edges', []) if
# (skip_data['start'], skip_data['end']) != (edge_data['start'], edge_data['end'])])
except Empty:
break
except Exception as e:
if edge_data is not None:
logging.getLogger('maskgen').error('Failure to generate mask for edge {} to {} using operation {}: {}'.format(
edge_data['start'],
edge_data['end'],
edge_data['opName'],
str(e)
))
results.put(((edge_data['start'], edge_data['end']),False, [str(e)]))
return
def _executeSkippedComparisons(self,status_cb=None):
from Queue import Queue
from threading import Thread
allErrors = []
completed = []
q = Queue()
status = Queue()
results = Queue()
skipped_edges = self.G.getDataItem('skipped_edges', [])
if len(skipped_edges) == 0:
return
tracker_cb = status_cb
tracker_cb = lambda x : status.put(x) if tracker_cb is not None and int(skipped_threads) >= 2 else None
tracker = StatusTracker(module_name='Mask Generator',
amount=len(skipped_edges),
status_cb=tracker_cb)
for edge_data in skipped_edges:
q.put(edge_data)
skipped_threads = prefLoader.get_key('skipped_threads', 2)
logging.getLogger('maskgen').info('Recomputing {} masks with {} threads'.format(q.qsize(), skipped_threads))
threads = list()
try:
if int(skipped_threads) < 2:
self._executeQueue(q, results, tracker)
else:
for i in range(int(skipped_threads)):
t = Thread(target=self._executeQueue, name='skipped_edges' + str(i), args=(q,results,tracker))
threads.append(t)
t.start()
if status_cb is not None:
while not q.empty():
try:
message = status.get(timeout=5)
if message is not None:
status_cb(message)
except:
continue
for thread in threads:
thread.join()
while not results.empty():
result = results.get_nowait()
allErrors.extend(result[2])
if result[1]:
completed.append(result[0])
finally:
tracker.complete()
self.G.setDataItem('skipped_edges',[edge_data for edge_data in skipped_edges if (edge_data['start'], edge_data['end']) not in completed])
msg = os.linesep.join(allErrors).strip()
return msg if len(msg) > 0 else None
def _compareImages(self, start, destination, opName, invert=False, arguments={}, skipDonorAnalysis=False,
analysis_params=dict(),
force=False):
if prefLoader.get_key('skip_compare') and not force:
self.G.setDataItem('skipped_edges', self.G.getDataItem('skipped_edges', list()) + [{"start": start,
"end": destination,
"analysis_params": analysis_params,
"arguments": arguments,
"opName": opName,
"skipDonorAnalysis": skipDonorAnalysis,
"invert": invert
}])
return None, {}, []
try:
for k, v in self.gopLoader.getOperationWithGroups(opName).compareparameters.iteritems():
arguments[k] = v
except:
pass
return self.getLinkTool(start, destination).compareImages(start, destination, self, opName,
arguments=arguments,
skipDonorAnalysis=skipDonorAnalysis,
invert=invert,
analysis_params=analysis_params)
def reproduceMask(self, skipDonorAnalysis=False,edge_id=None, analysis_params=dict(), argument_params=dict(),
force=True):
"""
:param skipDonorAnalysis:
:param edge_id:
:param analysis_params:
:param argument_params:
:param force: If True, then force mask creation do not skip.
:return:
"""
errors = []
mask_edge_id = (self.start, self.end) if edge_id is None else edge_id
edge = self.G.get_edge(mask_edge_id[0],mask_edge_id[1])
arguments = dict(edge['arguments']) if 'arguments' in edge else dict()
if len(argument_params) > 0:
arguments = argument_params
if 'inputmaskname' in edge and edge['inputmaskname'] is not None:
arguments['inputmaskname'] = edge['inputmaskname']
try:
mask, analysis, errors = self._compareImages(mask_edge_id[0], mask_edge_id[1], edge['op'],
arguments=arguments,
skipDonorAnalysis=skipDonorAnalysis,
analysis_params=analysis_params,
force=force)
analysis_params['arguments'] = arguments
maskname = shortenName(mask_edge_id[0] + '_' + mask_edge_id[1], '_mask.png', identifier=self.G.nextId())
self.G.update_mask(mask_edge_id[0], mask_edge_id[1], mask=mask, maskname=maskname, errors=errors, **consolidate(analysis, analysis_params))
if len(errors) == 0:
self.G.setDataItem('skipped_edges', [skip_data for skip_data in self.G.getDataItem('skipped_edges', []) if
(skip_data['start'], skip_data['end']) != mask_edge_id])
self.notify(mask_edge_id, 'update_edge')
except video_tools.MaskGenerationError as e:
if e.message != '':
logging.getLogger('maskgen').info(e.message)
return errors
def _connectNextImage(self, destination, mod, invert=False, sendNotifications=True, skipRules=False,
skipDonorAnalysis=False,
analysis_params={}):
"""
:param destination:
:param mod:
:param invert:
:param sendNotifications:
:param skipRules:
:param skipDonorAnalysis:
:param analysis_params:
:return: Error message and success or failure
@rtype: (str, bool)
"""
try:
maskname = shortenName(self.start + '_' + destination, '_mask.png', identifier=self.G.nextId())
if mod.inputMaskName is not None:
mod.arguments['inputmaskname'] = mod.inputMaskName
mask, analysis, errors = self._compareImages(self.start, destination, mod.operationName,
invert=invert, arguments=mod.arguments,
skipDonorAnalysis=skipDonorAnalysis,
analysis_params=analysis_params)
self.end = destination
if errors:
mod.errors = errors
for k, v in analysis_params.iteritems():
if k not in analysis:
analysis[k] = v
if 'recordMaskInComposite' in mod.arguments:
mod.recordMaskInComposite = mod.arguments.pop('recordMaskInComposite')
mod.recordMaskInComposite = getValue(analysis,'global','yes') == 'no' | mod.recordMaskInComposite
self.__addEdge(self.start, self.end, mask, maskname, mod, analysis)
if sendNotifications:
self.notify((self.start, destination), 'connect')
logging.getLogger('maskgen').debug('Validation')
edgeErrors = [] if skipRules else self.validator.run_edge_rules(self.G, self.start, destination, isolated=True)
edgeErrors = edgeErrors if len(edgeErrors) > 0 else None
self.labelNodes(self.start)
self.labelNodes(destination)
return edgeErrors, True
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.getLogger('maskgen').error(' '.join(traceback.format_exception(exc_type,exc_value,exc_traceback)))
return [ValidationMessage(Severity.ERROR,
self.start,
destination,
'Exception (' + str(e) + ')',
'Change Mask',
None)], False
def __scan_args_callback(self, opName, arguments):
"""
Call back function for image graph's arg_checker_callback.
Add any discovered arguments that are associated with
file paths so that the image graph can managed the file
existence and archiving
:param opName:
:param arguments:
:return:
"""
if len(arguments) > 0 and opName != 'node':
self.__addEdgeFilePaths(self.gopLoader.getOperationWithGroups(opName, fake=True))
def __addEdgeFilePaths(self, op):
for k, v in op.mandatoryparameters.iteritems():
if k == 'inputmaskname':
continue
if v['type'].startswith('fileset:') or v['type'].startswith('file:'):
self.G.addEdgeFilePath('arguments.' + k, '')
for k, v in op.optionalparameters.iteritems():
if k == 'inputmaskname':
continue
if v['type'].startswith('fileset:') or v['type'].startswith('file:'):
self.G.addEdgeFilePath('arguments.' + k, '')
def __addEdge(self, start, end, mask, maskname, mod, additionalParameters):
if len(mod.arguments) > 0:
additionalParameters['arguments'] = {k: v for k, v in mod.arguments.iteritems() if k != 'inputmaskname'}
self.G.add_edge(start, end,
mask=mask,
maskname=maskname,
op=mod.operationName,
description=mod.additionalInfo,
recordMaskInComposite=mod.recordMaskInComposite,
editable='no' if (
mod.software is not None and mod.software.internal) or mod.operationName == 'Donor' else 'yes',
softwareName=('' if mod.software is None else mod.software.name),
softwareVersion=('' if mod.software is None else mod.software.version),
inputmaskname=mod.inputMaskName,
automated=mod.automated,
semanticGroups=mod.semanticGroups,
errors=mod.errors,
**additionalParameters)
self._save_group(mod.operationName)
def _save_group(self, operation_name):
op = self.gopLoader.getOperationWithGroups(operation_name, fake=True)
if op.groupedOperations is not None and len(op.groupedOperations) > 0:
groups = self.G.getDataItem('groups')
if groups is None:
groups = dict()
groups[operation_name] = op.groupedOperations
self.G.setDataItem('groups', groups, excludeUpdate=True)
def getSeriesName(self):
""" A Series is the prefix of the first image node """
if self.start is None:
return None
startNode = self.G.get_node(self.start)
prefix = None
if (startNode.has_key('seriesname')):
prefix = startNode['seriesname']
if (self.end is not None):
endNode = self.G.get_node(self.end)
if (endNode.has_key('seriesname')):
prefix = startNode['seriesname']
return prefix
def nodesToCSV(self, filename, additionalpaths=list(), nodeFilter=None):
"""
Create a CSV containing all the nodes of the graph.
By default, the first columns are project name, edge start node id,
edge end node id, and edge operation.
:param filename:
:param additionalpaths: paths that describe nested keys within the edge dictionary identifying
those keys' value to be placed as columns in the CSV
:param nodeFilter: a function that accepts the node dictionary and returns True if
the edge is to be included in the CSV file. If the edgeFilter is None or not provided,
all edges are included in the CSV file
:return: None
@type filename: str
@type edgeFilter: func
"""
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename, "ab") as fp:
fp_writer = csv.writer(fp)
for node_id in self.G.get_nodes():
node = self.G.get_node(node_id)
if nodeFilter is not None and not nodeFilter(node):
continue
row = [self.G.get_name(), node_id, node['nodetype'], self.G.getNodeFileType(node_id), self.G.get_filename(node_id)]
for path in additionalpaths:
if type(path) == 'str':
values = getPathValues(node, path)
else:
values = path(node)
if len(values) > 0:
row.append(values[0])
else:
row.append('')
fp_writer.writerow(row)
def toCSV(self, filename, additionalpaths=list(), edgeFilter=None):
"""
Create a CSV containing all the edges of the graph.
By default, the first columns are project name, edge start node id,
edge end node id, and edge operation.
:param filename:
:param additionalpaths: paths that describe nested keys within the edge dictionary identifying
those keys' value to be placed as columns in the CSV
:param edgeFilter: a function that accepts the edge dictionary and returns True if
the edge is to be included in the CSV file. If the edgeFilter is None or not provided,
all edges are included in the CSV file
:return: None
@type filename: str
@type edgeFilter: func
"""
import csv
import inspect
from functools import partial
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename, "ab") as fp:
fp_writer = csv.writer(fp)
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if edgeFilter is not None and not edgeFilter(edge):
continue
row = [self.G.get_name(), edge_id[0], edge_id[1], edge['op']]
baseNodes = self._findBaseNodes(edge_id[0])
for path in additionalpaths:
if path == 'basenode':
row.append(baseNodes[0])
continue
elif type(path) == 'str':
values = getPathValues(edge, path)
elif (inspect.isfunction(path) and 'graph' in inspect.getargspec(path).args) or \
(isinstance(path, partial) and 'graph' in inspect.getargspec(path.func).args):
values = path(edge, edge_id=edge_id, op=self.gopLoader.getOperationWithGroups(edge['op']),
graph=self.getGraph())
else:
values = path(edge, edge_id=edge_id, op=self.gopLoader.getOperationWithGroups(edge['op']))
if len(values) > 0:
row.append(values[0])
else:
row.append('')
fp_writer.writerow(row)
def getName(self):
return self.G.get_name()
def operationImageName(self):
return self.end if self.end is not None else self.start
def getFileName(self, nodeid):
return self.G.get_node(nodeid)['file']
def startImageName(self):
return self.G.get_node(self.start)['file'] if self.start is not None else ""
def nextImageName(self):
return self.G.get_node(self.end)['file'] if self.end is not None else ""
def nextId(self):
return self.end
def undo(self):
""" Undo the last graph edit """
s = self.start
e = self.end
self.start = None
self.end = None
self.G.undo()
self.notify((s, e), 'undo')
def select(self, edge):
if self.getGraph().get_node(edge[0]) == None:
return False
self.start = edge[0]
self.end = edge[1]
return True
def _openProject(self, projectFileName, projecttype, username=None,tool=None):
return createGraph(projectFileName,
projecttype=projecttype,
arg_checker_callback=self.__scan_args_callback,
edgeFilePaths={'inputmaskname': 'inputmaskownership',
'selectmasks.mask': '',
'videomasks.videosegment': '',
'substitute subsitute': '',
'substitute videomasks.videosegment': ''},
nodeFilePaths={'donors.*': ''},
username=username if username is not None else self.username,
tool=tool)
def _autocorrect(self):
if not updateJournal(self):
logging.getLogger('maskgen').error('Cannot auto update journal')
def _setup(self, projectFileName, graph=None, baseImageFileName=None,tool=None):
projecttype = None if baseImageFileName is None else fileType(baseImageFileName)
self.G = self._openProject(projectFileName, projecttype, username=self.username,tool=tool) if graph is None else graph
self._autocorrect()
self.start = None
self.end = None
n = self.G.get_nodes()
if len(n) > 0:
self.start = n[0]
s = self.G.successors(n[0])
if len(s) > 0:
self.end = s[0]
else:
p = self.G.predecessors(n[0])
if len(p) > 0:
self.start = p[0]
self.end = n[0]
# inject loaded groups into the group operations manager
for group, ops in self.G.getDataItem('groups', default_value={}).iteritems():
self.gopLoader.injectGroup(group, ops)
self.validator = Validator(prefLoader, self.gopLoader)
def getStartType(self):
return self.G.getNodeFileType(self.start) if self.start is not None else 'image'
def getEndType(self):
return self.G.getNodeFileType(self.end) if self.end is not None else 'image'
def getNodeFileType(self, nodeid):
return self.G.getNodeFileType(nodeid)
def saveas(self, pathname):
with self.lock:
self.clear_validation_properties()
self.assignColors()
self.G.saveas(pathname)
def save(self):
with self.lock:
self.clear_validation_properties()
self.assignColors()
self.setProjectSummary()
self.G.save()
def getEdgeItem(self, name, default=None):
edge = self.G.get_edge(self.start, self.end)
return edge[name] if name in edge else default
def getDescriptionForPredecessor(self, node):
for pred in self.G.predecessors(node):
edge = self.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.getModificationForEdge(pred, node)
return None
def getCurrentEdgeModification(self):
if self.start is None or self.end is None:
return None
return self.getModificationForEdge(self.start, self.end)
def findPaths(self,node, condition):
"""
Return a list of a tuple. The first item is the full path in reverse order
from final node to current node. The second item is a boolean indicating if the path
meets the condition.
:param node:
:param condition:
:return:
@rtype: list of (list,bool)
"""
successors = self.G.successors(node)
if len(successors) == 0:
return [([node],False)]
else:
paths=[]
for successsor in successors:
for path in self.findPaths(successsor, condition):
paths.append(path)
paths = [(path[0]+[node], condition(node, path[0][-1]) | path[1]) for path in paths]
return paths
def findEdgePaths(self,node):
"""
Return a list of a tuple. The first item is the full path in reverse order
from final node to current node. The second item is a boolean indicating if the path
meets the condition.
:param node:
:param condition:
:return:
@rtype: list of (list,bool)
"""
successors = self.G.successors(node)
if len(successors) == 0:
return [[]]
else:
paths=[]
for successsor in successors:
for path in self.findEdgePaths(successsor):
paths.append(path)
paths = [path+[(node, successsor)] for path in paths]
return paths
def getImage(self, name):
if name is None or name == '':
return ImageWrapper(np.zeros((250, 250, 4)).astype('uint8'))
return self.G.get_image(name)[0]
def getImageAndName(self, name, arguments=dict()):
"""
:param name:
:param arguments:
:return:
@rtype (ImageWrapper,str)
"""
if name is None or name == '':
return ImageWrapper(np.zeros((250, 250, 4)).astype('uint8')),''
return self.G.get_image(name, metadata=arguments)
def getStartImageFile(self):
return os.path.join(self.G.dir, self.G.get_node(self.start)['file'])
def getProxy(self):
return getValue(self.G.get_node(self.start),'proxyfile')
def setProxy(self, filename):
if filename is None:
if self.getProxy() is not None:
self.G.get_node(self.start).pop('proxyfile')
return
self.G.update_node(self.start,proxyfile=os.path.basename(filename))
def getNextImageFile(self):
return os.path.join(self.G.dir, self.G.get_node(self.end)['file'])
def startImage(self):
return self.getImage(self.start)
def nextImage(self):
if self.end is None:
dim = (250, 250) if self.start is None else self.getImage(self.start).size
return ImageWrapper(np.zeros((dim[1], dim[0])).astype('uint8'))
return self.getImage(self.end)
def updateSelectMask(self, selectMasks):
if self.end is None:
return
sms = []
for k, v in selectMasks.iteritems():
if v is not None:
sms.append({'mask': v[0], 'node': k})
self.G.update_edge(self.start, self.end, selectmasks=sms)
def getSelectMasks(self):
"""
A selectMask is a mask the is used in composite mask production, overriding the default link mask
"""
if self.end is None:
return {}
edge = self.G.get_edge(self.start, self.end)
terminals = self._findTerminalNodes(self.end, excludeDonor=True,
includeOps=['Recapture', 'TransformWarp', 'TransformContentAwareScale',
'TransformDistort', 'TransformSkew', 'TransformSeamCarving'])
images = edge['selectmasks'] if 'selectmasks' in edge else []
sms = {}
for image in images:
if image['node'] in terminals:
sms[image['node']] = (
image['mask'], openImageFile(os.path.join(self.get_dir(), image['mask']), isMask=False))
for terminal in terminals:
if terminal not in sms:
sms[terminal] = None
return sms
def maskImageName(self):
if self.end is None:
return ''
edge = self.G.get_edge(self.start, self.end)
return edge['maskname'] if 'maskname' in edge else ''
def maskImageFileTime(self):
if self.end is None:
return 0
return self.G.get_edge_image_file_time(self.start, self.end, 'maskname')
def maskImage(self, inputmask=False):
mask = self.G.get_edge_image(self.start, self.end, 'maskname')
if self.end is None or mask is None:
dim = (250, 250) if self.start is None else self.getImage(self.start).size
return ImageWrapper(np.zeros((dim[1], dim[0])).astype('uint8'))
return mask
def maskStats(self):
if self.end is None:
return ''
edge = self.G.get_edge(self.start, self.end)
if edge is None:
return ''
stat_names = ['ssim', 'psnr', 'local psnr', 'local ssim', 'shape change', 'masks count', 'change size category',
'change size ratio']
return ' '.join([key + ': ' + formatStat(value) for key, value in edge.items() if key in stat_names])
def currentImage(self):
if self.end is not None:
return self.getImageAndName(self.end)
elif self.start is not None:
return self.getImageAndName(self.start)
return None, None
def selectNode(self, name):
if self.G.has_node(name):
self.start = name
self.end = None
def selectEdge(self, start, end):
if self.G.has_node(start):
self.start = start
if self.G.has_node(end):
self.end = end
def remove(self, children=False):
import copy
s = self.start
e = self.end
list_to_process= []
if children:
list_to_process = copy.copy(self.G.successors(self.end if self.end is not None else self.start))
def remove_children(children):
for child in children:
remove_children(self.G.successors(child))
self.G.remove(child)
print (child)
self.notify((child, None), 'remove')
remove_children(list_to_process)
""" Remove the selected node or edge """
if (self.start is not None and self.end is not None):
if children:
self.G.remove(self.end, None)
self.labelNodes(self.start)
else:
self.G.remove_edge(self.start, self.end)
self.labelNodes(self.start)
self.labelNodes(self.end)
self.end = None
else:
name = self.start if self.end is None else self.end
p = self.G.predecessors(self.start) if self.end is None else [self.start]
self.G.remove(name, None)
self.start = p[0] if len(p) > 0 else None
self.end = None
for node in p:
self.labelNodes(node)
self.notify((s, e), 'remove')
def getProjectData(self, item, default_value=None):
return self.G.getDataItem(item, default_value=default_value)
def setProjectData(self, item, value, excludeUpdate=False):
"""
:param item:
:param value:
:param excludeUpdate: True if the update does not change the update time stamp on the journal
:return:
"""
self.notify((item,value),'meta')
self.G.setDataItem(item, value, excludeUpdate=excludeUpdate)
def getVersion(self):
""" Return the graph/software versio n"""
return self.G.getVersion()
def isFrozen(self):
return self.G.isFrozen()
def getGraph(self):
"""
:return: underlying graph
@rtype: ImageGraph
"""
return self.G
def validate(self, external=False, status_cb=None):
""" Return the list of errors from all validation rules on the graph.
@rtype: list of ValidationMessage
"""
notifier = self.notify.get_notifier_by_type(notifiers.ValidationNotifier)
if notifier is not None and not notifier.total_errors == None:
return notifier.total_errors
self._executeSkippedComparisons(status_cb=status_cb)
logging.getLogger('maskgen').info('Begin validation for {}'.format(self.getName()))
total_errors = self.validator.run_graph_suite(self.getGraph(), external=external, status_cb=status_cb)
for prop in getProjectProperties():
if prop.mandatory:
item = self.G.getDataItem(prop.name)
if item is None or len(item.strip()) < 3:
total_errors.append(
ValidationMessage(Severity.ERROR,
'',
'',
'Project property ' + prop.description + ' is empty or invalid',
'Mandatory Property',
None))
if notifier is not None:
self.notify.replace(notifiers.ValidationNotifier(total_errors))
return total_errors
def assignColors(self):
level = 1
edgeMap = dict()
foundColors = 0
colors = []
edges = 0
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if edge['op'] == 'Donor':
continue
edges += 1
if 'linkcolor' in edge:
foundColors += 1
colors.append(edge['linkcolor'])
if edges == foundColors and len(set(colors)) ==foundColors:
return
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if edge['op'] == 'Donor':
continue
edgeMap[edge_id] = (level, None)
level = level + 1
redistribute_intensity(edgeMap)
for k, v in edgeMap.iteritems():
self.G.get_edge(k[0], k[1])['linkcolor'] = str(list(v[1])).replace('[', '').replace(']', '').replace(
',', '')
return edgeMap
def __assignLabel(self, node, label):
prior = self.G.get_node(node)['nodetype'] if 'nodetype' in self.G.get_node(node) else None
if prior != label:
self.G.update_node(node, nodetype=label)
self.notify(node, 'label')
def renameFileImages(self):
"""
:return: list of node ids renamed
"""
renamed = []
for node in self.getNodeNames():
self.labelNodes(node)
nodeData = self.G.get_node(node)
if nodeData['nodetype'] in ['final']:
logging.getLogger('maskgen').info('Inspecting {} for rename'.format(nodeData['file']))
suffix = os.path.splitext(nodeData['file'])[1].lower()
file_path_name = os.path.join(self.G.dir, nodeData['file'])
try:
new_file_name = md5_of_file(os.path.join(self.G.dir, nodeData['file'])) + suffix
fullname = os.path.join(self.G.dir, new_file_name)
except:
logging.getLogger('maskgen').error(
'Missing file or invalid permission: {} '.format(nodeData['file']))
continue
if not os.path.exists(fullname):
try:
os.rename(file_path_name, fullname)
renamed.append(node)
logging.getLogger('maskgen').info('Renamed {} to {} '.format(nodeData['file'], new_file_name))
self.G.update_node(node, file=new_file_name)
except Exception as e:
try:
logging.getLogger('maskgen').error(
('Failure to rename file {} : {}. Trying copy').format(file_path_name, str(e)))
shutil.copy2(file_path_name, fullname)
logging.getLogger('maskgen').info(
'Renamed {} to {} '.format(nodeData['file'], new_file_name))
self.G.update_node(node, file=new_file_name)
except:
continue
else:
logging.getLogger('maskgen').warning('New name ' + new_file_name + ' already exists')
self.G.update_node(node, file=new_file_name)
self.save()
return renamed
def labelNodes(self, destination):
baseNodes = []
donorNodes = []
terminalNodes = []
candidateBaseDonorNodes = self._findBaseNodes(destination, excludeDonor=False)
for baseCandidate in candidateBaseDonorNodes:
foundTerminalNodes = self._findTerminalNodes(baseCandidate, excludeDonor=True)
terminalNodes.extend(foundTerminalNodes)
if len(foundTerminalNodes) > 0:
baseNodes.append(baseCandidate)
else:
donorNodes.append(baseCandidate)
for node in donorNodes:
self.__assignLabel(node, 'donor')
for node in baseNodes:
self.__assignLabel(node, 'base')
if len(self.G.successors(destination)) == 0:
if len(self.G.predecessors(destination)) == 0:
self.__assignLabel(destination, 'base')
else:
self.__assignLabel(destination, 'final')
elif len(self.G.predecessors(destination)) > 0:
self.__assignLabel(destination, 'interim')
elif 'nodetype' not in self.G.get_node(destination):
self.__assignLabel(destination, 'base')
def finalNodes(self):
final = []
for name in self.getNodeNames():
node = self.G.get_node(name)
if node['nodetype'] == 'final':
final.append(name)
return final
def baseNodes(self):
bases = []
for name in self.getNodeNames():
node = self.G.get_node(name)
if node['nodetype'] == 'base':
bases.append(name)
return bases
def _findTerminalNodes(self, node, excludeDonor=False, includeOps=None):
terminalsWithOps = self._findTerminalNodesWithCycleDetection(node, visitSet=list(), excludeDonor=excludeDonor)
return [terminalWithOps[0] for terminalWithOps in terminalsWithOps if
includeOps is None or len(set(includeOps).intersection(terminalWithOps[1])) > 0]
def _findTerminalNodesWithCycleDetection(self, node, visitSet=list(), excludeDonor=False):
succs = self.G.successors(node)
if len(succs) == 0:
return [(node, [])]
res = list()
for succ in succs:
if succ in visitSet:
continue
op = self.G.get_edge(node, succ)['op']
if op == 'Donor' and excludeDonor:
continue
visitSet.append(succ)
terminals = self._findTerminalNodesWithCycleDetection(succ,
visitSet=visitSet,
excludeDonor=excludeDonor)
for term in terminals:
term[1].append(op)
res.extend(terminals)
return res
def _findEdgesWithCycleDetection(self, node, excludeDonor=True, visitSet=list()):
preds = self.G.predecessors(node)
res = list()
for pred in preds:
if pred in visitSet:
continue
edge = self.G.get_edge(pred, node)
isNotDonor = (edge['op'] != 'Donor' or not excludeDonor)
if isNotDonor:
visitSet.append(pred)
res.append(EdgeTuple(start=pred, end=node, edge=edge))
res.extend(self._findEdgesWithCycleDetection(pred, excludeDonor=excludeDonor,
visitSet=visitSet) if isNotDonor else list())
return res
def _findBaseNodes(self, node, excludeDonor=True):
return [item[0] for item in mask_rules.findBaseNodesWithCycleDetection(self.G, node, excludeDonor=excludeDonor)]
def _findBaseNodesAndPaths(self, node, excludeDonor=True):
return [(item[0], item[2]) for item in mask_rules.findBaseNodesWithCycleDetection(self.G,node, excludeDonor=excludeDonor)]
def isDonorEdge(self, start, end):
edge = self.G.get_edge(start, end)
if edge is not None:
return edge['op'] == 'Donor'
return False
def getTerminalToBasePairs(self, suffix='.jpg'):
"""
find all pairs of leaf nodes to matching base nodes
:return list of tuples (leaf, base)
@rtype: list of (str,str)
"""
endPointTuples = self.getTerminalAndBaseNodeTuples()
pairs = list()
for endPointTuple in endPointTuples:
matchBaseNodes = [baseNode for baseNode in endPointTuple[1] if
suffix is None or self.G.get_pathname(baseNode).lower().endswith(suffix)]
if len(matchBaseNodes) > 0:
# if more than one base node, use the one that matches the name of the project
projectNodeIndex = matchBaseNodes.index(self.G.get_name()) if self.G.get_name() in matchBaseNodes else 0
baseNode = matchBaseNodes[projectNodeIndex]
startNode = endPointTuple[0]
# perfect match
# if baseNode == self.G.get_name():
# return [(startNode,baseNode)]
pairs.append((startNode, baseNode))
return pairs
def imageFromGroup(self, grp, software=None, **kwargs):
"""
:param grp:
:param software:
:param kwargs:
:return:
@type grp GroupFilter
@type software Software
"""
import copy
pairs_composite = []
resultmsgs = []
kwargs_copy = copy.copy(kwargs)
for filter in grp.filters:
msg, pairs = self.mediaFromPlugin(filter, software=software,
**kwargs_copy)
if msg is not None:
resultmsgs.extend(msg)
if len(pairs) == 0:
break
mod = self.getModificationForEdge(self.start,self.end)
for key,value in mod.arguments.iteritems():
if key not in kwargs_copy or not self.getGraph().isEdgeFilePath('arguments.' + key):
kwargs_copy[key] = value
pairs_composite.extend(pairs)
return resultmsgs, pairs_composite
def canPreviewMask(self):
allowed = self.getStartType() == 'video' or self.getEndType() == 'video'
modification = self.getCurrentEdgeModification()
edge = self.G.get_edge(self.start, self.end)
allowed &= getValue(edge, 'videomasks', None) is not None
op = getOperation(modification.operationName)
compare_func = op.getVideoCompareFunction()
allowed &= video_tools.Previewable(compare_func, modification.arguments)
return 'disabled' if not allowed else 'normal'
def substitutesAllowed(self):
allowed = False
modification = self.getCurrentEdgeModification()
if modification is not None:
allowed = getValue(modification.arguments, 'videoinputmaskname', '')
return 'disabled' if not allowed else 'normal'
def hasSubstituteMasks(self):
edge = self.getGraph().get_edge(self.start, self.end)
subs = getValue(edge, 'substitute videomasks', [])
return len(subs) > 0
def removeSubstituteMasks(self):
if self.hasSubstituteMasks():
edge = self.getGraph().get_edge(self.start, self.end)
edge.pop('substitute videomasks')
def addSubstituteMasks(self, filename):
edge = self.getGraph().get_edge(self.start, self.end)
subs = self.getLinkTool(self.start, self.end).addSubstituteMasks(self.start,
self.end,
self,
edge['op'],
arguments=getValue(edge,'arguments',{}),
filename=filename)
if subs is not None:
for sub in subs:
sub.pop('mask')
edge['substitute videomasks'] = subs
self.getGraph().addEdgeFilePath('substitute videomasks.videosegment','')
self.notify((self.start, self.end), 'update_edge')
return subs is not None
def mediaFromPlugin(self, filter, software=None, passthru=False, description=None, **kwargs):
"""
Use a plugin to create a new media item and link.
This method is given the plugin name, Image, the full pathname of the image and any additional parameters
required by the plugin (name/value pairs).
The name of the resulting image contains the prefix of the input image file name plus an additional numeric index.
If requested by the plugin (return True), the Exif is copied from the input image to the resulting image.
The method resolves the donor parameter's name to the donor's image file name.
If a donor is used, the method creates a Donor link from the donor image to the resulting image node.
If an input mask file is used, the input mask file is moved into the project directory.
Prior to calling the plugin, the output file is created and populated with the contents of the input file for convenience.
The filter plugin must update or overwrite the contents.
The method returns tuple with an error message and a list of pairs (links) added. The error message may be none if no error occurred.
@type filter: str
@type im: ImageWrapper
@type filename: str
@rtype: list of (ValidationMessage, list of (str,str))
"""
im, filename = self.currentImage()
filetype= fileType(filename)
op = plugins.getOperation(filter)
suffix = os.path.splitext(filename)[1].lower()
preferred = plugins.getPreferredSuffix(filter,filetype=filetype)
if type(preferred) == dict:
preferred = preferred[filetype]
fullOp = buildFilterOperation(op)
resolved, donors, graph_args, suffix_override, donorargs = self._resolvePluginValues(kwargs, fullOp)
if suffix_override is not None:
suffix = suffix_override
elif preferred is not None:
if preferred in donors:
suffix = os.path.splitext(resolved[preferred])[1].lower()
else:
suffix = preferred
target = os.path.join(tempfile.gettempdir(), self.G.new_name(self.start, suffix=suffix))
shutil.copy2(filename, target)
try:
msg = None
self.__addEdgeFilePaths(fullOp)
try:
if getValue(kwargs,'$$-pass-thru') or passthru:
extra_args, warning_message = None,None
else:
extra_args, warning_message = plugins.callPlugin(filter, im, filename, target, **resolved)
except Exception as e:
msg = str(e)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, limit=10, file=sys.stderr)
logging.getLogger('maskgen').error(
'Plugin {} failed with {} given node {} for arguments {}'.format(filter, str(e),self.start, str(resolved)))
extra_args = None
if msg is not None:
return self._pluginError(filter, msg), []
if extra_args is not None and 'rename_target' in extra_args:
filename = extra_args.pop('rename_target')
newtarget = os.path.join(os.path.split(target)[0], os.path.split(filename)[1])
shutil.copy2(target, newtarget)
target = newtarget
if extra_args is not None and 'override_target' in extra_args:
filename = extra_args.pop('override_target')
target = os.path.join(os.path.split(target)[0], os.path.split(filename)[1])
if extra_args is not None and 'output_files' in extra_args:
file_params = extra_args.pop('output_files')
for name, value in file_params.iteritems():
extra_args[name] = value
self.G.addEdgeFilePath('arguments.' + name, '')
opInfo = self.gopLoader.getOperationWithGroups(op['name'], fake=True)
description = Modification(op['name'], filter + ':' + op['description'] if description is None else description,
category=opInfo.category,
generateMask=opInfo.generateMask,
semanticGroups=graph_args['semanticGroups'] if 'semanticGroups' in graph_args else [],
recordMaskInComposite=opInfo.recordMaskInComposite(filetype) if
'recordMaskInComposite' not in kwargs else kwargs['recordMaskInComposite'])
sendNotifications = kwargs['sendNotifications'] if 'sendNotifications' in kwargs else True
skipRules = kwargs['skipRules'] if 'skipRules' in kwargs else False
if software is None:
software = Software(op['software'], op['version'], internal=True)
if 'recordInCompositeMask' in kwargs:
description.setRecordMaskInComposite(kwargs['recordInCompositeMask'])
experiment_id = kwargs['experiment_id'] if 'experiment_id' in kwargs else None
description.setArguments(
{k: v for k, v in graph_args.iteritems() if k not in ['semanticGroups','sendNotifications', 'skipRules', 'experiment_id']})
if extra_args is not None and type(extra_args) == type({}):
for k, v in extra_args.iteritems():
if k not in kwargs or v is not None:
description.arguments[k] = v
description.setSoftware(software)
description.setAutomated('yes')
edge_parameters = {'plugin_name': filter,'experiment_id': experiment_id}
if 'global operation' in kwargs:
edge_parameters['global operation'] = kwargs['global operation']
results2, status = self.addNextImage(target,
mod=description,
sendNotifications=sendNotifications,
skipRules=skipRules,
position=self._getCurrentPosition((75 if len(donors) > 0 else 0, 75)),
edge_parameters=edge_parameters,
node_parameters={
'experiment_id': experiment_id} if experiment_id is not None else {})
pairs = list()
errors = []
if warning_message is not None:
errors.append(ValidationMessage(Severity.WARNING,
self.start,
self.start,
warning_message,
'Plugin {}'.format(filter),
None))
if results2 is not None:
errors.extend(results2)
finally:
os.remove(target)
if status:
pairs.append((self.start, self.end))
if sendNotifications:
self.notify((self.start, self.end), 'connect')
for donor in donors:
_end = self.end
_start = self.start
self.selectNode(kwargs[donor])
mod = Modification('Donor', '',category='Donor',automated='yes',arguments=donorargs)
self.connect(_end,mod=mod)
pairs.append((kwargs[donor], _end))
self.select((_start, _end))
# donor error message is removed. This annoys me (rwgdrummer).
# really need to classify rules and skip certain categories
errors = removeErrorMessages(errors,lambda msg: 'donor' in msg)
return errors, pairs
def _resolvePluginValues(self, args, operation):
parameters = {}
stripped_args = {}
donors = []
arguments = copy.copy(operation.mandatoryparameters)
arguments.update(operation.optionalparameters)
for k, v in args.iteritems():
if k in arguments or k in {'sendNotifications',
'override_suffix',
'skipRules',
'semanticGroups',
'experiment_id',
'recordInCompositeMask',
'donorargs',
'index'}:
parameters[k] = v
# if arguments[k]['type'] != 'donor':
stripped_args[k] = v
for k, v in args.iteritems():
if k in arguments and \
arguments[k]['type'] == 'donor':
parameters[k] = self.getImageAndName(v)[1]
if parameters[k] is None:
if os.path.exists(v):
parameters[k] = v
else:
logging.getLogger('maskgen').error('Donor {} not found'.format(v))
donors.append(k)
for arg, info in arguments.iteritems():
if arg not in parameters and 'defaultvalue' in info and \
info['defaultvalue'] is not None:
parameters[arg] = info['defaultvalue']
return parameters, donors, stripped_args, \
args['override_suffix'] if 'override_suffix' in args else None, \
getValue(args,'donorargs',{})
def _pluginError(self, filter, msg):
if msg is not None and len(msg) > 0:
return [ValidationMessage(Severity.ERROR,
self.start,
self.start,
'Plugin ' + filter + ': ' + msg,
'Plugin {}'.format(filter),
None)]
return None
def scanNextImageUnConnectedImage(self):
"""Scan for an image node with the same prefix as the currently select image node.
Scan in lexicographic order.
Exlude images that have neighbors.
Return None if a image nodee is not found.
"""
selectionSet = [node for node in self.G.get_nodes() if not self.G.has_neighbors(node) and node != self.start]
selectionSet.sort()
if (len(selectionSet) > 0):
matchNameSet = [name for name in selectionSet if name.startswith(self.start)]
selectionSet = matchNameSet if len(matchNameSet) > 0 else selectionSet
return selectionSet[0] if len(selectionSet) > 0 else None
def scanNextImage(self):
"""
Scan for a file with the same prefix as the currently select image node.
Scan in lexicographic order.
Exlude image files with names ending in _mask or image files that are already imported.
Return None if a file is not found.
"""
if self.start is None:
return None, None
suffix = self.start
seriesName = self.getSeriesName()
if seriesName is not None:
prefix = seriesName
prefix = prefix[0:32] if len(prefix) > 32 else prefix
files = [self.G.get_node(node)['file'] for node in self.G.get_nodes()]
def filterFunction(file):
return os.path.split(file)[1] not in files and \
not (file.rfind('_mask') > 0) and \
not (file.rfind('_proxy') > 0)
def findFiles(dir, preFix, filterFunction):
set = [os.path.abspath(os.path.join(dir, filename)) for filename in os.listdir(dir) if
(filename.startswith(preFix)) and filterFunction(os.path.abspath(os.path.join(dir, filename)))]
set = sorted(set, key=lambda f: -os.stat(f).st_mtime)
return set
nfile = None
for file in findFiles(self.G.dir, prefix, filterFunction):
nfile = file
break
return self.G.openImage(nfile) if nfile is not None else None, nfile
def getDescriptions(self):
"""
:return: descriptions for all edges
@rtype list of Modification
"""
return [self.getModificationForEdge(edge[0], edge[1]) for edge in
self.G.get_edges()]
def openImage(self, nfile):
"""
:param nfile:
:return:
@rtype: (str, ImageWrapper)
"""
im = None
if nfile is not None and nfile != '':
im = self.G.openImage(nfile)
return nfile, im
def findEdgesByOperationName(self, opName):
return [edge for edge in [self.G.get_edge(edge[0], edge[1]) for edge in self.G.get_edges()]
if edge['op'] == opName]
def getPathExtender(self):
from services.probes import CompositeExtender
"""
:return: Extend the composite or donor through current operation
"""
#nodes = self._findTerminalNodes(self.start, excludeDonor=True)
#if len(nodes) > 0:
return CompositeExtender(self)
#else:
# return DonorExtender(self)
def export(self, location, include=[], redacted=[],notifier=None):
with self.lock:
self.clear_validation_properties()
self.compress(all=True)
path, errors = self.G.create_archive(location, include=include, redacted=redacted, notifier=notifier)
return path, [ValidationMessage(Severity.ERROR,error[0],error[1],error[2],'Export',None) for error in errors]
def export_path(self, location, redacted=[]):
"""
:param location:
:param redacted: a list of registered file paths to exclude @see ImageGraph.addEdgeFilePath
:return:
"""
if self.end is None and self.start is not None:
self.G.create_path_archive(location, self.start, redacted=redacted)
elif self.end is not None:
self.G.create_path_archive(location, self.end, redacted=redacted)
def _getCurrentPosition(self, augment):
if self.start is None:
return (50, 50)
startNode = self.G.get_node(self.start)
return ((startNode['xpos'] if startNode.has_key('xpos') else 50) + augment[0],
(startNode['ypos'] if startNode.has_key('ypos') else 50) + augment[1])
def getModificationForEdge(self, start, end):
"""
:param start:
:param end:
:param edge:
:return: Modification
@type start: str
@type end: str
@rtype: Modification
"""
end_node = self.G.get_node(end)
edge = self.G.get_edge(start, end)
if edge is None:
return None
default_ctime = end_node['ctime'] if 'ctime' in end_node else None
op = self.gopLoader.getOperationWithGroups(edge['op'], warning=True,fake=True)
return Modification(edge['op'],
edge['description'],
start=start,
end=end,
arguments=edge['arguments'] if 'arguments' in edge else {},
inputMaskName=edge['inputmaskname'] if 'inputmaskname' in edge and edge[
'inputmaskname'] and len(edge['inputmaskname']) > 0 else None,
changeMaskName=edge['maskname'] if 'maskname' in edge else None,
software=Software(edge['softwareName'] if 'softwareName' in edge else None,
edge['softwareVersion'] if 'softwareVersion' in edge else None,
'editable' in edge and edge['editable'] == 'no'),
recordMaskInComposite=edge[
'recordMaskInComposite'] if 'recordMaskInComposite' in edge else 'no',
semanticGroups=edge['semanticGroups'] if 'semanticGroups' in edge else None,
automated=edge['automated'] if 'automated' in edge else 'no',
username=edge['username'] if 'username' in edge else '',
ctime=edge['ctime'] if 'ctime' in edge else default_ctime,
errors=edge['errors'] if 'errors' in edge else list(),
maskSet=(VideoMaskSetInfo(edge['videomasks']) if (
'videomasks' in edge and len(edge['videomasks']) > 0) else None),
category=op.category,
generateMask=op.generateMask)
def getSemanticGroups(self, start, end):
edge = self.getGraph().get_edge(start, end)
if edge is not None:
return edge['semanticGroups'] if 'semanticGroups' in edge and edge['semanticGroups'] is not None else []
return []
def setSemanticGroups(self, start, end, grps):
"""
@type start: str
@type end: str
@type grps: list(str)
"""
edge = self.getGraph().get_edge(start, end)
if edge is not None:
self.getGraph().update_edge(start, end, semanticGroups=grps)
self.notify((self.start, self.end), 'update_edge')
def setProjectSummary(self):
groups = []
for edgeTuple in self.getGraph().get_edges():
edge = self.getGraph().get_edge(edgeTuple[0], edgeTuple[1])
semantic_groups = getValue(edge,'semanticGroups',[])
for group in semantic_groups:
if group not in groups:
groups.append(group)
self.setProjectData('semanticgroups', groups)
def set_validation_properties(self, qaState, qaPerson, qaComment, qaData):
import qa_logic
qa_logic.ValidationData(self,qaState,qaPerson,None,qaComment,qaData)
def clear_validation_properties(self):
import qa_logic
logic = qa_logic.ValidationData(self)
logic.clearProperties()
def set_probe_mask_memory(self, memory):
self.probeMaskMemory = memory
"""Not sure if this will ever see any use"""
def get_probe_mask_memory(self):
return self.probeMaskMemory
class VideoMaskSetInfo:
"""
Set of change masks video clips
"""
columnNames = ['Start', 'End', 'Frames', 'File']
func = [float,float,int,str]
columnKeys = ['starttime', 'endtime', 'frames', 'File']
columnValues = {}
def __init__(self, maskset):
self.maskset = maskset
self.columnValues = {'{:=02d}'.format(i):self._convert(maskset[i]) for i in range(len(maskset))}
def _convert(self, item):
return {'Start': self.tofloat(video_tools.get_start_time_from_segment(item)),
'End': self.tofloat(video_tools.get_end_time_from_segment(item)),
'Frames': video_tools.get_frames_from_segment(item),
'File': video_tools.get_file_from_segment(item,default_value='')}
def update(self, item_number, column, value):
video_tools.update_segment(self.maskset[item_number],
**{self.columnKeys[column] : self.func[column](value)})
video_tools.update_segment(self.maskset[item_number],rate= \
(video_tools.get_end_time_from_segment(self.maskset[item_number]) -
video_tools.get_start_time_from_segment(self.maskset[item_number])/
video_tools.get_frames_from_segment(self.maskset[item_number])))
def tofloat(self, o):
return o if o is None else float(o)
```
#### File: Media-Journaling-Tool/maskgen/software_loader.py
```python
import json
import logging
import os
from json import JSONEncoder
from maskgen.config import global_config
from maskgen_loader import MaskGenLoader
from maskgen.support import getValue
class OperationEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
def strip_version(version):
return '.'.join(version.split('.')[:2]) if version is not None else ''
def getFileName(fileName, path=None):
import sys
if (os.path.exists(fileName)):
logging.getLogger('maskgen').debug( 'Loading ' + fileName)
return fileName
places = [os.getenv('MASKGEN_RESOURCES', 'resources')]
places.extend([os.path.join(x, 'resources') for x in sys.path if 'maskgen' in x or not x.endswith('egg') and \
os.path.exists(os.path.join(x, 'resources'))])
for place in places:
newNanme = os.path.abspath(os.path.join(place, fileName))
if os.path.exists(newNanme):
logging.getLogger('maskgen').debug( 'Loading ' + newNanme)
return newNanme
def extract_default_values(operation_arguments):
"""
given argument definitions, return operation name: default value if default is present
:param operation_arguments:
:return:
@type dict
"""
return {k:v['defaultvalue'] for k,v in operation_arguments.iteritems() if 'defaultvalue' in v}
class ProjectProperty:
description = None
name = None
type = None
operations = None
parameter = None
rule = None
values = None
value = None
information = None
semanticgroup = False
node = False
readonly = False
mandatory= False
nodetype = None
defaultvalue = None
"""
@type operations: list of str
@type nodetype: str
"""
def __init__(self, name='', type='', operations=None, parameter=None, description=None,
information=None, value=None, values=None, rule=None, node=False, readonly=False,mandatory=True,
nodetype=None,semanticgroup=False,defaultvalue = None,includedonors=False):
self.name = name
self.type = type
self.operations = operations
self.parameter = parameter
self.description = description
self.rule = rule
self.values = values
self.value = value
self.information = information
self.node = node
self.readonly = readonly
self.mandatory = mandatory
self.nodetype = nodetype
self.semanticgroup = semanticgroup
self.defaultvalue = defaultvalue
self.includedonors = includedonors
class Operation:
name = None
category = None
includeInMask = {'default':False}
description = None
optionalparameters = {}
mandatoryparameters = {}
rules = []
analysisOperations = []
transitions = []
compareparameters = {}
generateMask = "all"
groupedOperations = None
groupedCategories = None
maskTransformFunction = None
compareOperations = None
parameter_dependencies = None
donor_processor = None
"""
parameter_dependencies is a dictionary: { 'parameter name' : { 'parameter value' : 'dependenent parameter name'}}
If the parameter identitied by parameter name has a value if 'parameter value' then the parameter identified by
'dependent parameter name' is required.
compareparamaters are used to pick arguments and algorithms for link comparison and analysis functions.
Examples:
"function" :"maskgen.tool_set.cropCompare",
"video_function": "maskgen.video_tools.cropCompare"
"tolerance" : 0.0001
maskTransformFunction is a dictionary of functions associated with type of media which determines the
transformation function applied to a mask as it is re-alligned to the final or base image for composite or
donor mask construction, respectively. Examples:
"image": "maskgen.mask_rules.crop_transform",
"video":"maskgen.mask_rules.video_crop_transform"
rules is a list of functions to apply to each link during validation. The signature of each of function
is (op, graph, frm, to)
op = Operation
graph = maskgen.image_graph.ImageGraph
frm = str source node id
to = str targe node id
transitions is a list of string of the format source type '.' target type.
The types identify media types (e.g. audio, video ,zip and image). The transition identifies
allowed transitions supported by the specific operation. For example, 'video.image' states that the
associated operation can convert a video to an image.
generateMask states whether an operation analysis requires mask generation for 'all', 'frames', 'meta' or None.
For the moment, all and frames are the same thing: frames and meta data is collected for each link comparing source
and target media. generateMask currently only applies to video and audio.
analysisOperations is a list of function names that are used to populate the analysis dictionary collected at link
comparison time. Analysis can find transform matrices, shape changes, location identification, etc.
The results of analysis are often used by maskTransformFunction functions to construct composite and donor masks,
acting as the transform parameters.
groupedOperations and groupedCategories are lists of operations and categories represented by an agglomerative/composite
operation.
@type category: str
@type generateMask: tr
@type name: str
@type rules: list
@type transitions : list
@type description: str
@type analysisOperations: list
@type mandatoryparameters: dict
@type optionalparameters: dict
@type compareparameters: dict
@type parameter_dependencies: dict
@type maskTransformFunction:dict
@type donor_processor: str
"""
def __init__(self, name='', category='', includeInMask={"default": False}, rules=list(), optionalparameters=dict(),
mandatoryparameters=dict(), description=None, analysisOperations=list(), transitions=list(),
compareparameters=dict(),generateMask = "all",groupedOperations=None, groupedCategories = None,
maskTransformFunction=None,parameter_dependencies = None, qaList=None,donor_processor=None,
deprecated=False):
self.name = name
self.category = category
self.includeInMask = includeInMask
self.rules = rules
self.mandatoryparameters = mandatoryparameters if mandatoryparameters is not None else {}
self.optionalparameters = optionalparameters if optionalparameters is not None else {}
self.description = description
self.analysisOperations = analysisOperations
self.transitions = transitions
self.compareparameters = compareparameters
self.generateMask = generateMask
self.groupedOperations = groupedOperations
self.groupedCategories = groupedCategories
self.maskTransformFunction = maskTransformFunction
self.parameter_dependencies = parameter_dependencies
self.qaList = qaList
self.donor_processor = donor_processor
self.deprecated = deprecated
self.trigger_arguments = self._getTriggerUpdateArguments()
def _getTriggerUpdateArguments(self):
names = set()
for k,v in self.mandatoryparameters.iteritems():
if getValue(v,'trigger mask',False):
names.add(k)
for k,v in self.optionalparameters.iteritems():
if getValue(v,'trigger mask',False):
names.add(k)
return names
def getTriggerUpdateArguments(self):
return self.trigger_arguments
def recordMaskInComposite(self,filetype):
if filetype in self.includeInMask :
return 'yes' if self.includeInMask [filetype] else 'no'
if 'default' in self.includeInMask :
return 'yes' if self.includeInMask ['default'] else 'no'
return 'no'
def getParameterValuesForType(self, param_name, ftype, default_value=[]):
param = getValue(self.mandatoryparameters, param_name, getValue(self.optionalparameters, param_name,
{}))
return getValue(param, ftype + ':values', getValue(param, 'values', default_value), default_value)
def getDonorProcessor(self, default_processor = None):
if self.donor_processor is not None:
return getRule(self.donor_processor)
return getRule(default_processor)
def getConvertFunction(self):
if 'convert_function' in self.compareparameters:
funcName = self.compareparameters['convert_function']
return getRule(funcName)
return None
def getCompareFunction(self):
if 'function' in self.compareparameters:
funcName = self.compareparameters['function']
return getRule(funcName)
return None
def getVideoCompareFunction(self):
if 'video_function' in self.compareparameters:
funcName = self.compareparameters['video_function']
return getRule(funcName)
return None
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def getOperation(name, fake = False, warning=True):
"""
:param name: name of the operation
:param fake: Set to True to allow fake operations
:return: Operation
"""
if name == 'Donor':
return Operation(name='Donor', category='Donor',maskTransformFunction=
{'image':'maskgen.mask_rules.donor',
'video':'maskgen.mask_rules.video_donor',
'audio': 'maskgen.mask_rules.audio_donor',
})
if name not in getMetDataLoader().operations:
root_name = name.split('::')[0]
if root_name == name:
if warning:
logging.getLogger('maskgen').warning( 'Requested missing operation ' + str(name))
else:
return getOperation(root_name,fake=fake,warning=warning)
return getMetDataLoader().operations[name] if name in getMetDataLoader().operations else (Operation(name='name', category='Bad') if fake else None)
def getOperations():
return getMetDataLoader().operations
def getOperationsByCategory(sourcetype, targettype, excludeDeprecated=True):
result = {}
transition = sourcetype + '.' + targettype
for name, op in getMetDataLoader().operations.iteritems():
if op.deprecated and excludeDeprecated:
continue
if transition in op.transitions:
if op.category not in result:
result[op.category] = []
result[op.category].append(op.name)
return result
def getPropertiesBySourceType(source):
return getMetDataLoader().node_properties[source]
def getSoftwareSet():
return getMetDataLoader().software_set
def saveJSON(filename):
opnamelist = list(getMetDataLoader().operations.keys())
opnamelist.sort()
oplist = [getMetDataLoader().operations[op] for op in opnamelist]
with open(filename, 'w') as f:
json.dump({'operations': oplist}, f, indent=2, cls=OperationEncoder)
def loadProjectPropertyJSON(fileName):
"""
:param fileName:
:return:
@rtype: list of ProjectProperty
"""
res = list()
fileName = getFileName(fileName)
with open(fileName, 'r') as f:
props = json.load(f)
for prop in props['properties']:
res.append( ProjectProperty(name=prop['name'], type=prop['type'], description=prop['description'],
parameter=prop['parameter'] if 'parameter' in prop else None,
rule=prop['rule'] if 'rule' in prop else None,
values=prop['values'] if 'values' in prop else None,
value=prop['value'] if 'value' in prop else None,
node=prop['node'] if 'node' in prop else False,
information=prop['information'] if 'information' in prop else None,
operations=[prop['operation']] if 'operation' in prop else
(prop['operations'] if 'operations' in prop else []),
readonly=prop['readonly'] if 'readonly' in prop else None,
mandatory=prop['mandatory'] if 'mandatory' in prop else False,
semanticgroup=prop['semanticgroup'] if 'semanticgroup' in prop else False,
nodetype=prop['nodetype'] if 'nodetype' in prop else None,
defaultvalue=prop['defaultvalue'] if 'defaultvalue' in prop else None,
includedonors=prop['includedonors'] if 'includedonors' in prop else False))
return res
def loadOperationJSON(fileName):
"""
:param fileName:
:return:
@rtype: dict of str:Operation
"""
from collections import OrderedDict
from copy import deepcopy
def create_op(op):
return Operation(name=op['name'], category=op['category'], includeInMask=op['includeInMask'],
rules=op['rules'], optionalparameters=op['optionalparameters'] if 'optionalparameters' in op else {},
mandatoryparameters=op['mandatoryparameters'],
description=op['description'] if 'description' in op else None,
generateMask=op['generateMask'] if 'generateMask' in op else "all",
analysisOperations=op[
'analysisOperations'] if 'analysisOperations' in op else [],
transitions=op['transitions'] if 'transitions' in op else [],
compareparameters=op[
'compareparameters'] if 'compareparameters' in op else dict(),
maskTransformFunction=op['maskTransformFunction'] if 'maskTransformFunction' in op else None,
parameter_dependencies=op['parameter_dependencies'] if 'parameter_dependencies' in op else None,
qaList=op['qaList'] if 'qaList' in op else None,
donor_processor=op['donor_processor'] if 'donor_processor' in op else None,
deprecated=op['deprecated'] if 'deprecated' in op else False)
operations = OrderedDict()
fileName = getFileName(fileName)
dependencies = []
ops_by_name = {}
with open(fileName, 'r') as f:
ops = json.load(f)
for op in ops['operations']:
if '::' in op['name']:
dependencies.append(op)
else:
ops_by_name[op['name']] = op
operations[op['name']] = create_op(op)
for op in dependencies:
opname = op['name'].split('::')[0]
parent_op = deepcopy(ops_by_name[opname])
for k in op:
if k in parent_op and type(parent_op[k]) in [dict, OrderedDict]:
parent_op[k].update(op[k])
else:
parent_op[k] = op[k]
operations[op['name']] = create_op(parent_op)
return operations, ops['filtergroups'] if 'filtergroups' in ops else {}, ops['version'] if 'version' in ops else '0.4.0308.db2133eadc', \
ops['node_properties'] if 'node_properties' in ops else {}
customRuleFunc = {}
def loadCustomRules():
global customRuleFunc
import pkg_resources
for p in pkg_resources.iter_entry_points("maskgen_rules"):
logging.getLogger('maskgen').info( 'load rule ' + p.name)
customRuleFunc[p.name] = p.load()
def insertCustomRule(name,func):
global customRuleFunc
customRuleFunc[name] = func
def returnNoneFunction(*arg,**kwargs):
return None
def getRule(name, globals={}, noopRule=returnNoneFunction, default_module=None):
if name is None:
return noopRule
import importlib
global customRuleFunc
if name in customRuleFunc:
return customRuleFunc[name]
else:
if '.' not in name:
mod_name = default_module
func_name = name
func = globals.get(name)
if func is None:
if default_module is None:
logging.getLogger('maskgen').error('Rule Function {} not found'.format(name))
return noopRule
else:
return func
else:
mod_name, func_name = name.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
customRuleFunc[name] = func
return func#globals.get(name)
except Exception as e:
logging.getLogger('maskgen').error('Unable to load rule {}: {}'.format(name,str(e)))
return noopRule
def getProjectProperties():
"""
:return:
@rtype: list of ProjectProperty
"""
return getMetDataLoader().projectProperties
def getSemanticGroups():
return [prop.description for prop in getProjectProperties() if prop.semanticgroup]
def getFilters(filtertype):
if filtertype == 'filtergroups':
return getMetDataLoader().filters
else:
return {}
def _load_software_from_resource(fileName):
fileName = getFileName(fileName)
software_set = {'image': {}, 'video': {}, 'audio': {},'zip': {}, 'collection':{}}
category_set = {'gan': [], 'other': []}
with open(fileName) as f:
line_no = 0
for l in f.readlines():
line_no += 1
l = l.strip()
if len(l) == 0:
continue
columns = l.split(',')
if len(columns) < 3:
logging.getLogger('maskgen').error(
'Invalid software description on line ' + str(line_no) + ': ' + l)
software_type = columns[0].strip()
software_name = columns[2].strip()
software_category = columns[1].strip().lower()
versions = [strip_version(x.strip()) for x in columns[3:] if len(x) > 0]
if software_type not in ['both', 'image', 'video', 'audio', 'all', 'collection']:
logging.getLogger('maskgen').error('Invalid software type on line ' + str(line_no) + ': ' + l)
elif len(software_name) > 0:
types = ['image', 'video', 'zip'] if software_type == 'both' else [software_type]
types = ['image', 'video', 'audio', 'zip'] if software_type == 'all' else types
types = ['video', 'audio'] if software_type == 'audio' else types
types = ['zip'] if software_type == 'zip' else types
types = ['collection'] if software_type == 'collection' else types
for stype in types:
software_set[stype][software_name] = versions
category_set[software_category].append(software_name)
return {'software_set': software_set, 'category_set': category_set}
class MetaDataLoader:
version = ''
software_set = {}
software_category_set = {}
operations = {}
filters = {}
operationsByCategory = {}
node_properties = {}
def __init__(self):
self.reload()
def reload(self):
self.operations, self.filters, self.operationsByCategory, self.node_properties, self.operation_version = self._load_operations('operations.json')
self.software_set, self.software_category_set = self._load_software('software.csv')
self.projectProperties = self._load_project_properties('project_properties.json')
self.manipulator_names = self._load_manipulators('ManipulatorCodeNames.txt')
def _load_software(self, fileName):
sets = _load_software_from_resource(fileName)
softwareset = sets['software_set']
categoryset = sets['category_set']
return softwareset, categoryset
def merge(self,fileName):
softwareset = _load_software_from_resource(fileName)['software_set']
bytesOne = {}
bytesTwo = {}
namesOne = {}
namesTwo = {}
for atype,names in self.software_set.iteritems():
for name in names:
bytesOne[name] = atype
for name,versions in names.iteritems():
namesOne[name] = versions
for atype,names in softwareset.iteritems():
for name in names:
bytesTwo[name] = atype
for name,versions in names.iteritems():
namesTwo[name] = versions
for name,versions in namesTwo.iteritems():
if name not in namesOne:
logging.getLogger('maskgen').warn( 'missing ' + name)
else:
for version in versions:
if version not in namesOne[name]:
logging.getLogger('maskgen').warn( 'missing ' + str(version) + ' in ' + name)
for name, atype in bytesTwo.iteritems():
if name in bytesOne and atype != bytesOne[name]:
logging.getLogger('maskgen').warn( 'missing ' + str(atype) + ' in ' + name)
def _load_manipulators(self, filename):
file = getFileName(filename)
if file is not None:
if os.path.exists(file):
with open(file, 'r') as fp:
return [name.strip() for name in fp.readlines() if len(name) > 1]
def _load_project_properties(self, fileName):
"""
:param fileName:
:return:
@rtype: list ProjectProperty
"""
loadCustomRules()
projectProperties = loadProjectPropertyJSON(fileName)
return projectProperties
def _load_operations(self, fileName):
operations, filters, version, node_properties = loadOperationJSON(fileName)
logging.getLogger('maskgen').info('Loaded operation version ' + version)
operationsByCategory = {}
for op, data in operations.iteritems():
category = data.category
if category not in operationsByCategory:
operationsByCategory[category] = []
operationsByCategory[category].append(op)
return operations, filters, operationsByCategory, node_properties, version
def propertiesToCSV(self, filename):
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename, 'w') as fp:
fp_writer = csv.writer(fp)
fp_writer.writerow(['JSON Name', 'Full Name', 'level', 'description', 'type', 'operations'])
for property in self.projectProperties:
opdata = [
property.name,
property.description,
'semantic group' if property.semanticgroup else 'node' if property.node else 'project',
property.information,
property.type,
' '.join(property.operations) if property.operations is not None else ''
]
try:
fp_writer.writerow(opdata)
except:
print ' '.join(opdata)
def operationsToCSV(self,filename):
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename,'w') as fp:
fp_writer = csv.writer(fp)
fp_writer.writerow(['category','operation','description','transitions','argument1','argument1 description'])
for cat, ops in self.operationsByCategory.iteritems():
for opname in ops:
op = self.operations[opname]
opdata = [
cat,
op.name,
op.description,
' '.join(op.transitions),
]
for name, val in op.mandatoryparameters.iteritems():
opdata.extend([name, val['description']])
for name, val in op.optionalparameters.iteritems():
opdata.extend([name, val['description']])
try:
fp_writer.writerow(opdata)
except:
print ' '.join(opdata)
def getProperty(self, propertyname):
for prop in self.projectProperties:
if propertyname == prop.name:
return prop
def getProjectProperty(name, prop_type):
"""
:param name: name of property
:param prop_type: one of 'semanticgroup' or 'node' or 'project'
:return: ProjectProperty
@type name: str
@type prop_type: str
@rtype: list of ProjectProperty
"""
for prop in getProjectProperties():
if (prop.description == name or prop.name == name) and \
((prop.semanticgroup and prop_type == 'semanticgroup') or
(prop.node and prop_type == 'node') or (prop_type == 'project'
and not (prop.node or prop.semanticgroup))):
return prop
return None
def toSoftware(columns):
return [x.strip() for x in columns[1:] if len(x) > 0]
def getMetDataLoader():
"""
:return:
@rtype: MetaDataLoader
"""
if 'metadataLoader' not in global_config:
global_config['metadataLoader'] = MetaDataLoader()
return global_config['metadataLoader']
def operationVersion():
return getMetDataLoader().version
def validateSoftware(softwareName, softwareVersion):
for software_type, typed_software_set in getMetDataLoader().software_set.iteritems():
if softwareName in typed_software_set and softwareVersion in typed_software_set[softwareName]:
return True
return False
class Software:
name = None
version = None
internal = False
def __init__(self, name, version, internal=False):
self.name = name
self.version = version
self.internal = internal
class SoftwareLoader:
software = {}
preference = None
loader = MaskGenLoader()
def __init__(self):
self.load()
def load(self):
res = {}
self.preference = self.loader.get_key('software_pref')
newset = self.loader.get_key('software')
if newset is not None:
if type(newset) == list:
for item in newset:
if validateSoftware(item[0], item[1]):
res[item[0]] = item[1]
else:
for name, version in newset.iteritems():
if validateSoftware(name, version):
res[name] = version
self.software = res
def get_preferred_version(self, name=None):
if self.preference is not None and (name is None or name == self.preference[0]):
return self.preference[1]
if len(self.software) > 0:
if name in self.software:
return self.software[name]
elif name is None:
return self.software[self.software.keys()[0]]
return None
def get_preferred_name(self):
if self.preference is not None:
return self.preference[0]
if len(self.software) > 0:
return self.software.keys()[0]
return None
def get_names(self, software_type):
if software_type is None:
return []
return list(getMetDataLoader().software_set[software_type].keys())
def get_versions(self, name, software_type=None, version=None):
types_to_check = getMetDataLoader().software_set.keys() if software_type is None else [software_type]
for type_to_check in types_to_check:
versions = getMetDataLoader().software_set[type_to_check][name] if name in getMetDataLoader().software_set[type_to_check] else None
if versions is None:
continue
if version is not None and strip_version(version) not in versions:
versions = list(versions)
versions.append(version)
logging.getLogger('maskgen').warning( version + ' not in approved set for software ' + name)
return versions
return []
def add(self, software):
isChanged = False
if validateSoftware(software.name, software.version):
if not software.name in self.software or self.software[software.name] != software.version:
self.software[software.name] = software.version
isChanged = True
pref = self.preference
if pref is None or pref[0] != software.name or pref[1] != software.version:
self.preference = [software.name, software.version]
isChanged = True
return isChanged
def save(self):
self.loader.saveall([("software", self.software), ("software_pref", self.preference)])
```
#### File: Media-Journaling-Tool/maskgen/SystemCheckTools.py
```python
import json
import logging
from maskgen import ffmpeg_api
from maskgen.software_loader import getFileName
import re
from subprocess import Popen, PIPE
import sys
logger = logging.getLogger("maskgen")
class VersionChecker:
def __init__(self):
self.platform = "Windows" if sys.platform.startswith("win") else "Mac" if sys.platform == "darwin" else "Linux"
version_file = getFileName("dependency_versions.json")
if version_file is None:
raise ValueError("dependency_versions.json was not found.")
with open(version_file, "r") as f:
self.versions = json.load(f)
def check_tool(self, tool, found_version):
if self.platform.lower() in self.versions and tool.lower() in self.versions[self.platform.lower()]:
if found_version not in self.versions[self.platform.lower()][tool.lower()] and \
self.versions[self.platform.lower()][tool.lower()] != "*.*":
return "{0} is not a supported version of {1} on {2}".format(found_version, tool, self.platform)
return None
def check_ffmpeg(self):
return self.check_tool("FFmpeg", ffmpeg_api.get_ffmpeg_version())
def check_opencv(self):
import cv2
return self.check_tool("OpenCV", cv2.__version__)
def check_dot(self):
p = Popen(["dot", "-V"], stdout=PIPE, stderr=PIPE)
data = p.communicate()[1]
if p.returncode == 0:
v = re.findall("\d\.\d+\.\d", data)[0]
return self.check_tool("Graphviz", v)
else:
return "Unable to check Graphviz version"
```
#### File: Media-Journaling-Tool/maskgen/tool_set.py
```python
import imghdr
import math
import platform
import re
import sys
import threading
import warnings
from datetime import datetime
from subprocess import Popen, PIPE
from scipy import ndimage
from skimage.measure import compare_ssim
import cv2api
import loghandling
import maskgen.exif
from ffmpeg_api import get_ffprobe_tool, ffmpeg_overlay
from image_wrap import *
from maskgen.support import removeValue, getValue
from maskgen.userinfo import get_username
from maskgen_loader import MaskGenLoader
imagefiletypes = [("jpeg files", "*.jpg"), ("png files", "*.png"), ("tiff files", "*.tiff"), ("tiff files", "*.tif"),
("Raw NEF", "*.nef"), ("ARW Sony", "*.arw"), ("CRW Canon", "*.crw"), ("raw panasonic", "*.raw"),
("Raw 2 Panasonic", "*.rw2"), ("ORF Olympus", "*.orf"), ("MDC Minolta", "*.mdc"),
("PTX Pentax", "*.ptx"),
("PEF Pentax", "*.pef"), ("MRW Minolta", "*.nrw"), ("Adobe", "*.dng"),
("bmp files", "*.bmp"), ("pdf files", "*.pdf"), ('cr2', '*.cr2'), ('raf Fuji', '*.raf'),
("NITF files","*.ntf"),("NITF files","*.nitf"),('JP2','*.jp2'), ('Lytro Raw','*.lfr'),
("High Efficiency Image File Format", "*.heic"), ("High Efficiency Image File Format", "*.heif")]
videofiletypes = [("mpeg files", "*.mp4"), ("mov files", "*.mov"), ('wmv', '*.wmv'), ('m4p', '*.m4p'), ('m4v', '*.m4v'),
('f4v', '*.flv'), ("avi files", "*.avi"), ('asf', '*.asf'), ('mts', '*.mts'), ('3gp', '*.3gp'),
('mxf', '*.mxf')]
audiofiletypes = [("mpeg audio files", "*.m4a"), ("mpeg audio files", "*.m4p"), ("mpeg audio files", "*.mp3"),
("raw audio files", "*.raw"), ("Audio Interchange File", "*.aif"),
("Audio Interchange File", "*.aiff"),
("Standard PC audio files", "*.wav"), ("Windows Media audio files", "*.wma")]
zipfiletypes = [('zip of images','*.zip'),('zip of images','*.gz'),('zip of images','*.tgz')]
textfiletypes = [("CSV file", "*.csv"), ("json file", "*.json"), ("text file", "*.txt"), ("log file","*.log")]
suffixes = [".nef", ".jpg", ".png", ".tiff", ".bmp", ".avi", ".mp4", ".mov", ".wmv", ".ppm", ".pbm", ".mdc",".gif",
".raf", ".ptx", ".pef", ".mrw",".dng", ".zip",".gz", ".cr2",".jp2",
".wav", ".wma", ".m4p", ".mp3", ".m4a", ".raw", ".asf", ".mts",".tif",".arw",".orf",".raw",".rw2",".crw"]
maskfiletypes = [("png files", "*.png"), ("zipped masks", "*.tgz")]
modelfiletypes = [('3D Studio', '*.3ds'), ('Blender', '*.blen'), ('Collada', '*.dae'), ('AutoCAD', '*.dxf'),
('Autodesk Exchange', '*.fbx'), ('geoTIFF', '*.tif'), ('gITF', '*.gITF'), ('Lightwave', '*.lwo'),
('OBJ Files', '*.obj'), ('OFF File', '*.off'), ('PLY Files', '*.ply'), ('PTS Files', '*.pts'),
('PTX Files', '*.ptx'), ('Sculptris', '*.sc1'), ('Pro/ENGINEER', '*.scl'),
('Google Sketchup', '*.skp'), ('STL File', '*.stl'), ('TRI Files', '*.tri'), ('V3D Files', '*.v3d'),
('VRML (WRL Files)', '*.wrl'), ('X3D Files', '*.x3d'), ('X3DV Files', '*.x3dv'),
('SoftImage', '*.xsi'), ('ZBrush', '*.ztl'), ('XYZ Files', '*.xyz')]
class S3ProgessComposite(object):
def __init__(self,progress_monitors = []):
self.progress_monitors = progress_monitors
def __call__(self, bytes_amount):
for pm in self.progress_monitors:
pm(bytes_amount)
class S3ProgressPercentage(object):
def __init__(self, filename, log = None):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._percentage_so_far = 0
self._lock = threading.Lock()
self.log = log if log is not None else logging.getLogger('maskgen').info
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
if (percentage - self._percentage_so_far) > 5:
self.log(
"%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
self._percentage_so_far = percentage
def exportlogsto3(location, last_uploaded):
import boto3
loghandling.flush_logging()
logging_file = get_logging_file()
if logging_file is not None and last_uploaded != logging_file:
logging_file_name = os.path.split(logging_file)[1]
s3 = boto3.client('s3', 'us-east-1')
bucket = location.split('/')[0].strip()
directory = location[location.find('/') + 1:].strip()
directory = directory[:-1] if directory.endswith('/') else directory
directory = directory[:directory.rfind('/') + 1:].strip() + "logs/"
try:
s3.upload_file(logging_file, bucket, directory + get_username() + '_' + logging_file_name)
except:
logging.getLogger('maskgen').error("Could not upload prior log file to " + directory)
return logging_file
def fetchbyS3URL(url):
import boto3
location = url[5:] if url.startswith('s3://') else url
parts = location.split('/')
BUCKET = parts[0].strip()
location = location[location.find('/') + 1:].strip()
file = parts[-1]
s3 = boto3.resource('s3')
destination = os.path.join('.', file)
my_bucket = s3.Bucket(BUCKET)
my_bucket.download_file(location, destination)
return destination
def get_icon(name):
places = [] # ['./icons']
places.extend([os.path.join(x, 'icons/' + name) for x in sys.path if ('maskgen' in x or not x.endswith('egg')) and \
os.path.exists(os.path.join(x, 'icons'))])
for place in places:
if os.path.exists(place):
return place
return None
def get_logging_file():
"""
:return: The last roll over log file
"""
newest = None
newest_time = None
filename = 'maskgen.log.'
for item in os.listdir('.'):
if item.startswith(filename):
t = os.stat(item).st_ctime
if newest_time is None or newest_time < t:
newest = item
newest_time = t
return newest
def getImageFileTypes():
prefLoader = MaskGenLoader()
filetypes = prefLoader.get_key('filetypes')
filetypes = [] if filetypes is None else filetypes
types = [tuple(x) for x in filetypes]
tset = set([x[1] for x in types])
for suffix in getFileTypes():
if suffix[1] not in tset:
types.append(suffix)
return types
def getMaskFileTypes():
return maskfiletypes
def getFileTypes():
return imagefiletypes + videofiletypes + audiofiletypes + zipfiletypes
def fileTypeChanged(file_one, file_two):
"""
Return: True if the file types of the two provided files do not match
"""
try:
one_type = fileType(file_one)
two_type = fileType(file_two)
return one_type != two_type
except:
return os.path.splitext(file_one)[1].lower() != os.path.splitext(file_two)[1].lower()
def runCommand(command,outputCollector=None):
p = Popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
errors = []
if p.returncode == 0:
if outputCollector is not None:
for line in stdout.splitlines():
outputCollector.append(line)
if p.returncode != 0:
try:
if stderr is not None:
for line in stderr.splitlines():
if len(line) > 2:
errors.append(line)
except OSError as e:
errors.append(str(e))
return errors
def isVideo(filename):
ffmpegcommand = [get_ffprobe_tool(), filename]
try:
p = Popen(ffmpegcommand, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return stderr.find('Invalid data') < 0
except:
return False
def getMimeType(filename):
import subprocess
import shlex
cmd = shlex.split('file --mime-type "{0}"'.format(filename))
try:
result = subprocess.check_output(cmd)
return (result.split(':')[1]).split('/')[0].strip()
except Exception as e:
logging.getLogger('maskgen').error('Cannot determine file type for "{}": {}'.format(
filename,
str(e)
))
raise ValueError('Cannot determine file type for "{}"'.format(
filename
))
def zipFileType(fileName):
parts = fileName.lower().split('.')
if parts[-1] not in ['zip','gz','tgz']:
return None
return fileType('.'.join(parts[0:-1]))
def fileType(fileName):
if os.path.isdir(fileName):
return 'dir'
lowerName = fileName.lower()
suffixes = lowerName.split('.')
suffix = '*.' + suffixes[-1] if len(suffixes) > 0 else ''
file_type = None
if suffix in ['*.zip', '*.tgz', '*.gz']:
file_type = 'zip'
if len(suffixes) > 2:
content_type = '*.' + suffixes[-2]
if content_type not in [x[1] for x in imagefiletypes]:
file_type = 'collection'
elif suffix in [x[1] for x in imagefiletypes] or (os.path.exists(fileName) and imghdr.what(fileName) is not None):
file_type = 'image'
elif suffix in [x[1] for x in audiofiletypes]:
file_type = 'audio'
elif suffix in [x[1] for x in textfiletypes]:
file_type = 'text'
elif suffix in [x[1] for x in videofiletypes] or isVideo(fileName):
file_type = 'video'
return getMimeType(fileName) if file_type is None else file_type
def getOS():
return platform.system() + ' ' + platform.release() + ' ' + platform.version()
def openFile(fileName):
"""
Open a file using a native OS associated program
"""
import sys
if fileName.endswith('.hdf5'):
fileName = convertToVideo(fileName, preferences=MaskGenLoader())
if sys.platform.startswith('linux'):
os.system('xdg-open "' + fileName + '"')
elif sys.platform.startswith('win'):
os.startfile(fileName)
else:
os.system('open "' + fileName + '"')
class IntObject:
value = 0
def __init__(self):
pass
def set(self, value):
self.value = value
def increment(self):
self.value += 1
return self.value
def imageResize(img, dim):
"""
:param img:
:param dim:
:return:
@rtype: ImageWrapper
"""
return img.resize(dim, Image.ANTIALIAS).convert('RGBA')
def imageResizeRelative(img, dim, otherImDim):
"""
Preserves the dimension ratios_
:param dim:
:param otherImDim: dimensions of other image
:return: Resized relative to width given the maximum constraints
@rtype: ImageWrapper
"""
if otherImDim is None and img is not None:
otherImDim = img.size
if img is None:
img = ImageWrapper(np.zeros((otherImDim[1], otherImDim[0]), dtype=np.uint8))
wmax = max(img.size[0], otherImDim[0])
hmax = max(img.size[1], otherImDim[1])
wpercent = float(dim[0]) / float(wmax)
hpercent = float(dim[1]) / float(hmax)
perc = min(wpercent, hpercent)
wsize = int((float(img.size[0]) * float(perc)))
hsize = int((float(img.size[1]) * float(perc)))
return img.resize((wsize, hsize), Image.ANTIALIAS)
def validateCoordinates(v):
"""
Coordinates are [x,y] or (x,y) or x,y where x and y are integers.
Return False if the coordinates are invalid.
"""
try:
return len([int(re.sub('[()]', '', x)) for x in v.split(',')]) == 2
except ValueError:
return False
def sumMask(mask):
return int(np.sum(mask))
class VidTimeManager:
"""
frameCountWhenStarted: record the frame at start
frameCountWhenStopped: record the frame at finish
"""
def __init__(self, startTimeandFrame=None, stopTimeandFrame=None):
self.startTimeandFrame = startTimeandFrame
self.stopTimeandFrame = stopTimeandFrame
#if startTimeandFrame is not None and startTimeandFrame[1] > 0 and startTimeandFrame[0] > 0:
# self.startTimeandFrame = (startTimeandFrame[0],startTimeandFrame[1]+1)
#if stopTimeandFrame is not None and stopTimeandFrame[1] > 0 and stopTimeandFrame[0] > 0:
# self.stopTimeandFrame = (stopTimeandFrame[0],stopTimeandFrame[1]+1)
self.pastEndTime = False
self.beforeStartTime = True if startTimeandFrame else False
self.reachedEnd = False
self.milliNow = 0
self.frameCountWhenStopped = 0
self.frameCountWhenStarted = 0
self.frameSinceBeginning = 0
self.frameCountSinceStart = 0
self.frameCountSinceStop = 0
def isAtBeginning(self):
return self.startTimeandFrame is None or (self.startTimeandFrame[0] < 0 and self.startTimeandFrame[1] < 2)
def spansToEnd(self):
return self.stopTimeandFrame is None or (self.stopTimeandFrame[0] is None and self.stopTimeandFrame[1] is None)
def getExpectedStartFrameGiveRate(self, rate, defaultValue=None):
if not self.startTimeandFrame:
return defaultValue
return self.startTimeandFrame[1] + (self.startTimeandFrame[0] / 1000.0) * float(rate)
def getExpectedEndFrameGiveRate(self, rate, defaultValue=None):
if not self.stopTimeandFrame:
return defaultValue
val = int(self.stopTimeandFrame[1] + (self.stopTimeandFrame[0] / 1000.0) * float(rate))
if val == 0:
return defaultValue
return self.stopTimeandFrame[1] + (self.stopTimeandFrame[0] / 1000.0) * float(rate)
def getStartFrame(self):
return self.frameCountWhenStarted if self.startTimeandFrame else 1
def getEndFrame(self):
return self.frameCountWhenStopped if self.stopTimeandFrame and self.frameCountWhenStopped else self.frameSinceBeginning
def updateToNow(self, milliNow, frames=1):
"""
:param milliNow: time after the frame is to be displayed or sound emitted
:param frames:
:return:
"""
self.milliNow = milliNow
self.frameSinceBeginning += frames
if self.stopTimeandFrame:
if self.milliNow > self.stopTimeandFrame[0]:
self.frameCountSinceStop += frames
if self.frameCountSinceStop >= self.stopTimeandFrame[1]:
self.frameCountWhenStopped = self.frameSinceBeginning
self.reachedEnd = True
if not self.pastEndTime and self.frameCountSinceStop > self.stopTimeandFrame[1]:
self.pastEndTime = True
self.frameCountWhenStopped = self.frameSinceBeginning - 1
if self.startTimeandFrame:
if self.milliNow > self.startTimeandFrame[0]:
self.frameCountSinceStart += frames
if self.frameCountSinceStart >= self.startTimeandFrame[1]:
if self.beforeStartTime:
self.frameCountWhenStarted = self.frameSinceBeginning
self.beforeStartTime = False
def setStopFrame(self, frame):
if self.stopTimeandFrame is not None and self.stopTimeandFrame[0] > 0:
self.frameCountSinceStop = self.frameSinceBeginning
self.stopTimeandFrame = (0,frame)
def isOpenEnded(self):
return self.stopTimeandFrame is None
def isEnd(self):
return self.reachedEnd
def isPastTime(self):
return self.pastEndTime
def isPastStartTime(self):
return self.startTimeandFrame and self.milliNow > self.startTimeandFrame[0] and \
self.frameCountSinceStart > self.startTimeandFrame[1]
def isBeforeTime(self):
return self.beforeStartTime
def getFrameDurationString(st, et):
"""
calculation duration
"""
try:
stdt = datetime.strptime(st, '%H:%M:%S.%f')
except ValueError:
stdt = datetime.strptime(st, '%H:%M:%S')
try:
etdt = datetime.strptime(et, '%H:%M:%S.%f')
except ValueError:
etdt = datetime.strptime(et, '%H:%M:%S')
delta = etdt - stdt
if delta.days < 0:
return None
sec = delta.seconds
sec += (1 if delta.microseconds > 0 else 0)
hr = sec / 3600
mi = sec / 60 - (hr * 60)
ss = sec - (hr * 3600) - mi * 60
return '{:=02d}:{:=02d}:{:=02d}'.format(hr, mi, ss)
def getSecondDurationStringFromMilliseconds(millis):
sec = int(millis / 1000)
ms = int(millis - (sec * 1000))
return '{:=02d}.{:=03d}'.format(sec, ms)
def getDurationStringFromMilliseconds(millis):
sec = int(millis / 1000)
ms = int((millis - (sec * 1000)) * 1000.0)
hr = sec / 3600
mi = sec / 60 - (hr * 60)
ss = sec - (hr * 3600) - mi * 60
return '{:=02d}:{:=02d}:{:=02d}.{:=06d}'.format(hr, mi, ss, ms)
def addTwo(num_string):
return int(num_string) + 2
def sutractOne(num_string):
return int(num_string) - 1
def addOneFrame(time_string):
time_val = getMilliSecondsAndFrameCount(time_string, defaultValue=(0,0))
return str(time_val[1] + 1)
def subtractOneFrame(time_string):
time_val = getMilliSecondsAndFrameCount(time_string, defaultValue=(0,1))
return str(time_val[1] - 1) if time_val[1] > 1 else '0'
def addFrame(millisAndFrame, frames):
return millisAndFrame[0], millisAndFrame[1] + frames
def differenceBetweenFrame(mandf1, mandf2, rate):
timediff = mandf1[0] - mandf2[0]
frames = int(timediff*rate/1000.0)
return frames + (mandf1[1] - mandf2[1])
def differenceBetweeMillisecondsAndFrame(mandf1, mandf2, rate):
return mandf1[0] - mandf2[0] + (rate * (mandf1[1] - mandf2[1]))
def differenceInFramesBetweenMillisecondsAndFrame(mandf1, mandf2, rate):
return (mandf1[0] - mandf2[0]) / 1000.0 / rate + mandf1[1] - mandf2[1]
def getMilliSeconds(v):
if v is None:
return None, 0
if type(v) in [int,float]:
return v
dt = None
coloncount = v.count(':')
if coloncount == 0:
return int(float(v) * 1000.0)
try:
if '.' in v and len(v) > 15:
v = v[:15]
dt = datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
dt = datetime.strptime(v, '%H:%M:%S')
except ValueError:
return None
millis = dt.hour * 360000 + dt.minute * 60000 + dt.second * 1000 + dt.microsecond / 1000
return millis
def getMilliSecondsAndFrameCount(v, rate=None, defaultValue=None):
if v is None:
return defaultValue
if type(v) == int:
return (float(v) / rate * 1000, 0) if rate is not None else (0, 1 if v == 0 else v)
frame_count = 0
coloncount = v.count(':')
if coloncount > 2:
try:
frame_count = int(v[v.rfind(':') + 1:])
v = v[0:v.rfind(':')]
except:
return defaultValue
elif coloncount == 0:
return (float(v) / rate * 1000.0, 0) if rate is not None else (0, 1 if v == 0 else int(v))
try:
if '.' in v and len(v) > 15:
v = v[:15]
dt = datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
dt = datetime.strptime(v, '%H:%M:%S')
except ValueError:
return defaultValue
millis = dt.hour * 360000 + dt.minute * 60000 + dt.second * 1000 + dt.microsecond / 1000
if rate is not None:
millis += float(frame_count) / rate * 1000.0
frame_count = 0
return (millis, frame_count) if (millis, frame_count) != (0, 0) else (0, 1)
def validateTimeString(v):
if type(v) == int:
return True
if v.count(':') > 2:
return False
if v.count(':') == 0:
try:
int(v)
except:
return False
return True
try:
datetime.strptime(v, '%H:%M:%S.%f')
except ValueError:
try:
datetime.strptime(v, '%H:%M:%S')
except ValueError:
return False
return True
def validateAndConvertTypedValue(argName, argValue, operationDef, skipFileValidation=True):
"""
Validate a typed operation argument
return the type converted argument if necessary
raise a ValueError if invalid
"""
if not argValue or len(str(argValue)) == 0:
raise ValueError(argName + ' cannot be an empty string')
argDef = operationDef.optionalparameters[argName] if argName in operationDef.optionalparameters else None
argDef = operationDef.mandatoryparameters[
argName] if not argDef and argName in operationDef.mandatoryparameters else argDef
if argDef:
if argDef['type'] == 'imagefile':
if not os.path.exists(argValue) and not skipFileValidation:
raise ValueError(argName + ' is an invalid file')
elif argDef['type'].startswith('float'):
typeDef = argDef['type']
vals = [float(x) for x in typeDef[typeDef.rfind('[') + 1:-1].split(':')]
if float(argValue) < vals[0] or float(argValue) > vals[1]:
raise ValueError(argName + ' is not within the defined range')
return float(argValue)
elif argDef['type'].startswith('int'):
typeDef = argDef['type']
_match = re.search(r"\[(.*?)\]", typeDef).group(1)
vals = [int(x) for x in _match.split(':')]
if int(argValue) < vals[0] or int(argValue) > vals[1]:
raise ValueError(argName + ' is not within the defined range')
return int(argValue)
elif argDef['type'] == 'list':
if argValue not in argDef['values']:
raise ValueError(argValue + ' is not one of the allowed values')
elif argDef['type'] in ('frame_or_time', 'time'):
if not validateTimeString(argValue):
raise ValueError(argValue + ' is not a valid time (e.g. HH:MM:SS.micro)')
elif argDef['type'] == 'yesno':
if argValue.lower() not in ['yes', 'no']:
raise ValueError(argName + ' is not yes or no')
elif argDef['type'] == 'coorindates':
if not validateCoordinates(argValue):
raise ValueError(argName + ' is not a valid coordinate (e.g. (6,4)')
return argValue
def _processFileMeta(stream):
streams = []
if stream is None:
return streams
for line in stream.splitlines():
if line is None or len(line) == 0:
break
if 'Stream' in line:
if 'Audio' in line:
streams.append('audio')
if 'Video' in line:
streams.append('video')
return streams
def getFileMeta(filename):
ffmpegcommand = os.getenv('MASKGEN_FFPROBETOOL', 'ffprobe')
try:
stdout, stderr = Popen([ffmpegcommand, filename], stdout=PIPE, stderr=PIPE).communicate()
if stderr is not None:
meta = _processFileMeta(stderr)
if stdout is not None:
meta.extend(_processFileMeta(stdout))
return meta
except Exception as e:
logging.getLogger('maskgen').error('FFMPEG error (is it installed?): ' + str(e))
return {}
def millisec2time(milliseconds):
''' Convert milliseconds to 'HH:MM:SS.FFF' '''
s, ms = divmod(milliseconds, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if ms > 0:
pattern = r'%02d:%02d:%02d.%03d'
return pattern % (h, m, s, ms)
else:
pattern = r'%02d:%02d:%02d'
return pattern % (h, m, s)
def outputVideoFrame(filename, outputName=None, videoFrameTime=None, isMask=False):
import os
ffcommand = os.getenv('MASKGEN_FFMPEG', 'ffmpeg')
if outputName is not None:
outfilename = outputName
else:
outfilename = os.path.splitext(filename)[0] + '.png'
command = [ffcommand, '-i', filename]
if videoFrameTime is not None:
st = videoFrameTime[0] + 30 * videoFrameTime[1]
command.extend(['-ss', millisec2time(st)])
command.extend(['-vframes', '1', outfilename])
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
p.communicate()
p.wait()
except OSError as e:
logging.getLogger('maskgen').error("FFmpeg not installed")
logging.getLogger('maskgen').error(str(e))
raise e
return openImage(outfilename, isMask=isMask)
class ZipWriter:
def __init__(self, filename, fps=30):
from zipfile import ZipFile
postfix = filename[filename.rfind('.'):]
self.filename = filename + ('.zip' if postfix not in ['.tgz','.zip'] else '')
self.myzip = ZipFile(self.filename, 'w')
self.count = 0
self.fps = fps
self.prefix = os.path.basename(os.path.splitext(self.filename)[0])
#self.names = []
def isOpened(self):
#TODO: check names, what else
return True
def get(self,prop):
if prop == cv2api.cv2api_delegate.prop_fps:
return self.fps
if prop == cv2api.cv2api_delegate.prop_frame_count:
return self.count
if prop == cv2api.cv2api_delegate.prop_pos_msec:
return self.count * self.fps
def write(self, frame):
fname = "{}_{}.png".format(self.prefix, self.count)
ImageWrapper(frame,filename=fname).save(fname)
self.myzip.write(fname,fname)
self.count+=1
os.remove(fname)
def release(self):
fn = 'meta.csv'
with open(fn,'w') as fp:
fp.write('fram_rate,{}\n'.format(self.fps))
self.myzip.write(fn, fn)
os.remove('meta.csv')
self.myzip.close()
class ZipCapture:
def __init__(self, filename, fps=30, filetypes=imagefiletypes):
from zipfile import ZipFile
import uuid
self.filename = filename
self.myzip = ZipFile(filename, 'r')
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
self.fps = fps
self.count = 0
self.dir = os.path.join(os.path.dirname(os.path.abspath(self.filename)) , uuid.uuid4().__str__())
os.mkdir(self.dir)
if 'meta.csv' in self.myzip.namelist():
self.loadMeta()
self.names = [name for name in self.myzip.namelist() if len(file_type_matcher.findall(name.lower())) > 0 and \
os.path.basename(name) == name]
self.exif = None
def loadMeta(self):
self.meta = {}
if 'meta.csv' in self.myzip.namelist():
fn = self._extract_name('meta.csv')
with open(fn,mode='r') as fp:
for line in fp.readlines():
parts = line.split(',')
self.meta[parts[0].lower().strip()] = ','.join(parts[1:])
self.fps = self.fps if 'frame_rate' not in self.meta else float(self.meta['frame_rate'])
def get_size(self):
return len(self.names)
def isOpened(self):
#TODO: check names, what else
return True
def _extract_name(self,name):
extracted_file = os.path.join(self.dir, name)
if not os.path.exists(extracted_file):
extracted_file = self.myzip.extract(name, self.dir)
return extracted_file
def get(self,prop):
if prop == cv2api.cv2api_delegate.prop_fps:
return self.fps
if prop == cv2api.cv2api_delegate.prop_frame_count:
return self.get_size()
if prop == cv2api.cv2api_delegate.prop_pos_msec:
return self.count* 1000.0/self.fps
exif = self.get_exif()
if prop == cv2api.cv2api_delegate.prop_frame_height:
return getExifDimensionsFromData(exif)[0][0]
if prop == cv2api.cv2api_delegate.prop_frame_width:
return getExifDimensionsFromData(exif)[0][1]
def grab(self):
self.count+=1
return self.count <= len(self.names)
def get_exif(self):
if self.exif is None:
name = self.names[min(len(self.names)-1,max(0, self.count - 1))]
extracted_file = self._extract_name (name)
self.exif = exif.getexif(extracted_file)
return self.exif
def retrieve(self):
if self.count > len(self.names):
return False, None
name = self.names[self.count-1]
extracted_file = self._extract_name (name)
return True, openImage(extracted_file, isMask=False).to_array()
def set_to_end(self):
self.count = len(self.names)
def retrieve_file(self):
if self.count > len(self.names):
return None
name = self.names[self.count-1]
extracted_file = self._extract_name (name)
return extracted_file
def read(self):
self.grab()
return self.retrieve()
def release(self):
import shutil
if self.dir is not None:
shutil.rmtree(self.dir)
self.myzip.close()
self.dir = None
def readFromZip(filename, filetypes=imagefiletypes, videoFrameTime=None, isMask=False, snapshotFileName=None, fps=30):
from zipfile import ZipFile
import re
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
with ZipFile(filename, 'r') as myzip:
names = myzip.namelist()
names.sort()
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
i = 0
for name in names:
i += 1
elapsed_time = i * fps
if len(file_type_matcher.findall(name.lower())) == 0:
continue
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime() or videoFrameTime is None:
break
extracted_file = myzip.extract(name, os.path.dirname(os.path.abspath(filename)))
img = openImage(extracted_file, isMask=isMask)
if extracted_file != snapshotFileName and snapshotFileName is not None:
img.save(snapshotFileName)
return img
def readFromArchive(filename, filetypes=imagefiletypes, videoFrameTime=None, isMask=False, snapshotFileName=None, fps=30):
import tarfile
import re
file_type_matcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')')
archive = tarfile.open(filename, "w:gz")
try:
names = archive.getnames()
names.sort()
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
i = 0
for name in names:
i += 1
elapsed_time = i * fps
if len(file_type_matcher.findall(name.lower())) == 0:
continue
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime() or videoFrameTime is None:
break
if names:
extracted_file = archive.extract(name, os.path.dirname(os.path.abspath(filename)))
img = openImage(extracted_file, isMask=isMask)
else:
extracted_file =''
img = openImage('')
if extracted_file != snapshotFileName and snapshotFileName is not None:
img.save(snapshotFileName)
return img
finally:
archive.close()
def readImageFromVideo(filename, videoFrameTime=None, isMask=False, snapshotFileName=None):
cap = cv2api.cv2api_delegate.videoCapture(filename, useFFMPEGForTime=False)
bestSoFar = None
bestVariance = -1
maxTry = 20
time_manager = VidTimeManager(stopTimeandFrame=videoFrameTime)
try:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = frame[..., ::-1]
elapsed_time = cap.get(cv2api.cv2api_delegate.prop_pos_msec)
time_manager.updateToNow(elapsed_time)
if time_manager.isPastTime():
bestSoFar = frame
break
varianceOfImage = math.sqrt(ndimage.measurements.variance(frame))
if frame is not None and bestVariance < varianceOfImage:
bestSoFar = frame
bestVariance = varianceOfImage
maxTry -= 1
if not videoFrameTime and maxTry <= 0:
break
finally:
cap.release()
if bestSoFar is None:
logging.getLogger('maskgen').error(
"{} cannot be read by OpenCV/ffmpeg. Mask generation will not function properly.".format(filename))
return outputVideoFrame(filename, outputName=snapshotFileName, videoFrameTime=videoFrameTime, isMask=isMask)
else:
img = ImageWrapper(bestSoFar, to_mask=isMask)
if snapshotFileName is not None and snapshotFileName != filename:
img.save(snapshotFileName)
return img
def md5_of_file(filename, raiseError=True, load_size=500000000):
import hashlib
import os
try:
size = os.stat(filename).st_size
with open(filename, 'rb') as rp:
if size < load_size:
return hashlib.md5(rp.read()).hexdigest()
else:
m = hashlib.md5()
while True:
b = rp.read(load_size)
if b is not None and len(b) > 0:
m.update(b)
else:
break
return m.hexdigest()
except Exception as e:
if raiseError:
raise e
return ''
def uniqueId():
import time
return str(time.time()).replace('.', '')
def shortenName(name, postfix, identifier=None):
import hashlib
middle = ''.join([(x[0] + x[-1] if len(x) > 1 else x) for x in name.split('_')])
if identifier is not None:
middle = middle + '_' + str(identifier)
return hashlib.md5(name + postfix).hexdigest() + '_' + middle + '_' + postfix
class ImageOpener:
def __init__(self):
pass
def openImage(self, filename, isMask=False, args=None):
try:
img = openImageFile(filename, isMask=isMask, args=args)
return img if img is not None else openImage(get_icon('RedX.png'))
except Exception as e:
logging.getLogger('maskgen').warning('Failed to load ' + filename + ': ' + str(e))
return openImage(get_icon('RedX.png'))
class AudioOpener(ImageOpener):
def __init__(self):
ImageOpener.__init__(self)
def openImage(self, filename, isMask=False, args=None):
return ImageOpener.openImage(self, get_icon('audio.png'))
class VideoOpener(ImageOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
self.videoFrameTime = videoFrameTime
self.preserveSnapshot = preserveSnapshot
ImageOpener.__init__(self)
def openSnapshot(self, filename, snapshotFileName):
return os.path.exists(snapshotFileName) and \
os.stat(snapshotFileName).st_mtime >= os.stat(filename).st_mtime
def openImage(self, filename, isMask=False, args=None):
if not ('video' in getFileMeta(filename)):
return ImageOpener.openImage(self, get_icon('audio.png'))
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readImageFromVideo(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
class ZipOpener(VideoOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
VideoOpener.__init__(self, videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
def openImage(self, filename, isMask=False, args=None):
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readFromZip(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
class CollectionOpener(ImageOpener):
def __init__(self):
ImageOpener.__init__(self)
def openImage(self, filename, isMask=False, args=None):
return ImageOpener.openImage(self, get_icon('zip.jpg'))
class TgzOpener(VideoOpener):
def __init__(self, videoFrameTime=None, preserveSnapshot=True):
VideoOpener.__init__(self, videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
def openImage(self, filename, isMask=False, args=None):
snapshotFileName = os.path.splitext(filename)[0] + '.png'
if self.openSnapshot(filename, snapshotFileName):
return ImageOpener.openImage(self, snapshotFileName)
videoFrameImg = readFromArchive(filename, videoFrameTime=self.videoFrameTime, isMask=isMask,
snapshotFileName=snapshotFileName if self.preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning('invalid or corrupted file ' + filename)
return ImageOpener.openImage(self, get_icon('RedX.png'))
return videoFrameImg
def getContentsOfZip(filename):
from zipfile import ZipFile
with ZipFile(filename, 'r') as inzip:
names = inzip.namelist()
names.sort()
return names
def condenseZip(filename, outputfile=None, filetypes=None, keep=2):
from zipfile import ZipFile
import re
filetypematcher = re.compile('.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in filetypes]) + ')') \
if filetypes is not None else re.compile('.*')
fn = os.path.splitext(filename)[0] + '_c' + os.path.splitext(filename)[1] if outputfile is None else outputfile
cleanup = []
try:
with ZipFile(fn, 'w') as outzip:
with ZipFile(filename, 'r') as inzip:
names = inzip.namelist()
names.sort()
extensions = {}
for i in range(len(names)):
name = names[i]
extension = os.path.splitext(name)[1]
if len(filetypematcher.findall(name)) == 0:
continue
if extension not in extensions:
extensions[extension] = 1
else:
extensions[extension] += 1
dir = os.path.dirname(os.path.abspath(filename))
extracted_file = os.path.join(dir, name)
cleanup.append(extracted_file)
if extensions[extension] <= keep:
extracted_file = inzip.extract(name, dir)
outzip.write(extracted_file, name)
else:
with open(extracted_file, 'wb') as fp:
fp.flush()
outzip.write(extracted_file, name)
finally:
for filename in cleanup:
if os.path.exists(filename):
os.remove(filename)
def openImage(filename, videoFrameTime=None, isMask=False, preserveSnapshot=False, args=None):
"""
Open and return an image from the file. If the file is a video, find the first non-uniform frame.
videoFrameTime, integer time in milliseconds, is provided, then find the frame after that point in time
preserveSnapshot, False by default, informs the function to save the frame image after extraction for videos
"""
import os
if not os.path.exists(filename):
logging.getLogger('maskgen').warning(filename + ' is missing.')
if not filename.endswith('icons/RedX.png'):
return openImage(get_icon('RedX.png'))
return None
prefix = os.path.splitext(filename)[1][1:].lower()
opener = ImageOpener()
if prefix in ['avi', 'mp4', 'mov', 'flv', 'qt', 'wmv', 'm4p', 'mpeg', 'mpv',
'm4v', 'mts', 'mpg'] or fileType(filename) == 'video':
opener = VideoOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif prefix in ['zip', 'gz']:
if fileType(filename) == 'collection':
opener = CollectionOpener()
else:
opener = ZipOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif prefix in [ 'tgz']:
if fileType(filename) == 'collection':
opener = CollectionOpener()
else:
opener = TgzOpener(videoFrameTime=videoFrameTime, preserveSnapshot=preserveSnapshot)
elif fileType(filename) == 'audio':
opener = AudioOpener()
return opener.openImage(filename, isMask=isMask, args=args)
def interpolateMask(mask, startIm, destIm, invert=False, arguments=dict()):
"""
:param mask:
:param img1:
:param img2:
:param invert:
:param arguments:
:return:
@type mask: ImageWrapper
@type img2: ImageWrapper
@type img1: ImageWrapper
"""
maskInverted = mask if invert else mask.invert()
mask = np.asarray(mask)
mask = mask.astype('uint8')
logger = logging.getLogger('maskgen')
try:
mask1 = convertToMask(startIm).to_array() if startIm.has_alpha() else None
logger.debug('SIFT')
TM, matchCount = __sift(startIm, destIm, mask1=mask1, mask2=maskInverted, arguments=arguments)
except:
TM = None
if TM is not None:
logger.debug('WARP')
newMask = cv2.warpPerspective(mask, TM, (startIm.size[0], startIm.size[1]), flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=255)
analysis = {}
analysis['transform matrix'] = serializeMatrix(TM)
return newMask, analysis
elif getValue(arguments,'homography','None') != 'None':
logger.debug('SIFT Failed. Find Countours')
try:
contours, hier = cv2api.findContours(255 - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minpoint = None
maxpoint = None
for contour in contours:
for point in contour:
if type(point[0]) is np.ndarray:
point = point[0]
if minpoint is None:
minpoint = point
else:
minpoint = (min(minpoint[0], point[0]), min(minpoint[1], point[1]))
if maxpoint is None:
maxpoint = point
else:
maxpoint = (max(maxpoint[0], point[0]), max(maxpoint[1], point[1]))
w = maxpoint[0] - minpoint[0] + 1
h = maxpoint[1] - minpoint[1] + 1
x = minpoint[0]
y = minpoint[1]
if (startIm.size[0] - w) == 0 and (startIm.size[1] - h) == 0:
return mask[y:y + h, x:x + w], {}
except:
return None, None
return None, None
def serializeMatrix(m):
if m is None:
return None
data = {'r': m.shape[0],'c':m.shape[1]}
for r in range(m.shape[0]):
data['r' + str(r)] = list(m[r, :])
return data
def deserializeMatrix(data):
if data is None:
return None
m = np.zeros((int(data['r']), int(data['c'])))
for r in range(m.shape[0]):
m[r, :] = data['r' + str(r)]
return m
def redistribute_intensity(edge_map):
"""
Produce a intensity_map that redistributes the intensity values found in the edge_map evenly over 1 to 255
:param edge_map contains a map between an edge identifier (s,e) and an intensity value from 1 to 255 and possibly a color
:return map of intensity value from edge map to a replacement intensity value
@type edge_map {(str,str): (int,[])}
"""
levels = [x[0] for x in edge_map.values()]
colors = [str(x[1]) for x in edge_map.values() if x[1] is not None]
unique_colors = sorted(np.unique(colors))
intensities = sorted(np.unique(levels))
intensity_map = [0]
if len(unique_colors) == len(intensities):
for x in edge_map.values():
intensity_map[x[0]] = x[1]
return intensity_map
increment = int(16777216 / (len(intensities) + 1))
for pos in range(len(intensities)):
v = (pos + 1) * increment
intensity_map.append([(v % 65536) / 256, v / 65536, (v % 65536) % 256])
for k, v in edge_map.iteritems():
edge_map[k] = (v[0], intensity_map[v[0]])
#im = np.zeros((500,500,3)).astype('uint8')
#pos = 0
#for i in intensity_map:
# im[pos,:] = i
# pos+=1
#ImageWrapper(im).save('foo.png')
return intensity_map
def maskToColorArray(img, color=[0, 0, 0]):
"""
Create a new image setting all white to the color and all black to white.
:param img:
:param color:
:return:
@type img: ImageWrapper
@rtype ImageWrapper
"""
imarray = np.asarray(img)
rgb = np.ones((imarray.shape[0], imarray.shape[1], 3)).astype('uint8') * 255
rgb[imarray == 0, :] = color
return rgb
def toColor(img, intensity_map={}):
"""
Produce an image that changes gray scale to color.
First, set the intensity values of each pixel using the intensity value from the intensity map
Then use a color map to build a color image
Then repopulate the edge_map with the assigned color for each edge
:param img gray scale image
:param intensity_map intensity value mapped to its replacement
:return the new color image
"""
result = cv2.applyColorMap(img.astype('uint8'), cv2.COLORMAP_HSV)
for old, new in intensity_map.iteritems():
result[img == old] = new
result[img == 0] = [255, 255, 255]
return result
def toComposite(img):
"""
Convert to a mask with white indicating change
:param img gray scale image
:return image
"""
result = np.zeros(img.shape).astype('uint8')
result[img > 0] = 255
return result
def toIntTuple(tupleString):
import re
if tupleString is not None and tupleString.find(',') > 0:
return tuple([int(re.sub('[()L]', '', x)) for x in tupleString.split(',')])
return 0, 0
def sizeOfChange(mask):
if len(mask.shape) == 2:
return mask.size - sumMask(mask == 255)
else:
mask_size = mask.shape[0] * mask.shape[1]
return mask_size - sumMask(np.all(mask == [255, 255, 255], axis=2))
def maskChangeAnalysis(mask, globalAnalysis=False):
mask = np.asarray(mask)
totalPossible = reduce(lambda a, x: a * x, mask.shape)
totalChange = sumMask(mask.astype('float32')) / 255.0
ratio = float(totalChange) / float(totalPossible)
globalchange = True
if globalAnalysis:
globalchange = ratio > 0.75
(x, y), (w, h) = boundingRegion(mask)
area = float(w*h)
region = mask[x:x+w,y:y+w]
np.diff(np.where(region > 0)[1])
xhist = np.histogram(np.where(region > 0)[0],bins=min(256,region.shape[0]))[0]
yhist = np.histogram(np.where(region > 0)[0],bins=min(256,region.shape[1]))[0]
dispersion = xhist[0] > 0 and xhist[-1] > 0 and yhist[0] > 0 and yhist[-1] > 0
globalchange |= (area/totalPossible > 0.75) and dispersion
return globalchange, 'small' if totalChange < 2500 else ('medium' if totalChange < 10000 else 'large'), ratio
def SSIMAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
globalchange = img1.size != img2.size
img1, img2 = __alignChannels(img1, img2)
analysis['ssim'] = compare_ssim(np.asarray(img1), np.asarray(img2), multichannel=False),
if mask is not None:
mask = np.copy(np.asarray(mask))
mask[mask > 0] = 1
analysis['local ssim'] = ssim(img1 * mask, img2 * mask, mask, R=65536)
return globalchange
def globalTransformAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
"""
Determine if operation is global. Capture 'change size ratio' and 'change size category'.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
globalchange = img1.size != img2.size
totalChange = ''
ratio = 1.0
if mask is not None:
globalchange, totalChange, ratio = maskChangeAnalysis(mask, not globalchange)
analysis['global'] = arguments['global operation'] if 'global operation' in arguments else \
('yes' if globalchange else 'no')
analysis['change size ratio'] = ratio
analysis['change size category'] = totalChange
return globalchange
def localTransformAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments={}, directory='.'):
"""
Non-global operations, capturing 'change size ratio' and 'change size category'.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
globalchange = globalTransformAnalysis(analysis, img1, img2,
mask=mask,
linktype=linktype,
arguments=arguments,
directory=directory)
analysis['global'] = 'no'
return globalchange
def forcedSiftWithInputAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status, using an input mask from the parameters
to select the source region.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments: parameters
:return:
"""
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
if 'inputmaskname' in arguments:
inputmask = openImageFile(os.path.join(directory, arguments['inputmaskname'])).to_mask().to_array()
# a bit arbitrary. If there is a less than 50% overlap, then isolate the regions highlighted by the inputmask
# otherwise just use the change mask for the transform. The change mask should be the full set of the pixels
# changed and the input mask a subset of those pixels
if sumMask(abs((mask.image_array - inputmask) / 255)) / float(sumMask(mask.image_array / 255)) >= 0.75:
# want mask2 to be the region moved to
mask2 = mask - inputmask
# mask1 to be the region moved from
mask = inputmask
else:
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
else:
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def forcedSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:return:
"""
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def seamAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Perform SIFT regardless of the global change status. If neighbor mask is is constructed, indicating the seams
can be calculated, then mark as not Global.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
forcedSiftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory)
if 'neighbor mask' in arguments:
analysis['global'] = 'no'
def rotateSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
If the image is rotated by values other than factors of 90 degrees, use SIFT to build a homography.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
import copy
rot = float(getValue(arguments,'rotation',-1))
is_local = getValue(arguments,'local',True)
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if abs(rot % 90)<0.001 and not is_local:
return
if is_local:
return siftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory)
# global case and not a factor of 90
# skip video
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
serializedMatrix = getValue(arguments,'transform matrix')
if serializedMatrix is None:
args = copy.copy(arguments)
(x,y),(w,h) = boundingRegion(mask.invert().image_array)
if (w-x + h-y) > 0.5*(mask.size[0] + mask.size[1]):
args['Matcher.TREES'] = 6
args['Matcher.CHECKS'] = 20
matrix,matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=args)
if matrix is not None and isHomographyOk(matrix,img1.size[1],img1.size[0]):
analysis['transform matrix'] = serializeMatrix(matrix)
else:
analysis['transform matrix'] = serializedMatrix
def siftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
Use SIFT to build a homography for transform type changes that manipulated prior masks for probes.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
if globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments):
return
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
analysis['transform matrix'] = serializeMatrix(matrix)
def boundingRegion(mask):
x, y, w, h = widthandheight(mask)
return (x, y), (x + w, y + h)
def boundingRectange(mask):
allpoints = []
contours, hierarchy = cv2api.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt = contours[i]
allpoints.extend(cnt)
hull = cv2.convexHull(np.asarray(allpoints))
return cv2.minAreaRect(hull)
def _affineTransformDonorImage(initialImage, donorImage, mask, donorMask):
dims = initialImage.shape[2]
IM = (255 - mask)
IDM = (255 - donorMask)
mcenter, mdims, mrotation = boundingRectange(IM)
dcenter, ddims, drotation = boundingRectange(IDM)
ratiox = float(donorImage.shape[0]) / float(initialImage.shape[0])
ratioy = float(donorImage.shape[1]) / float(initialImage.shape[1])
scale = min(float(mdims[0]) * ratiox / ddims[0], float(mdims[1]) * ratioy / ddims[1])
M = cv2.getRotationMatrix2D(mcenter, drotation - mrotation, scale)
IDM3 = np.zeros((donorImage.shape[0], donorImage.shape[1], dims))
IM3 = np.zeros((initialImage.shape[0], initialImage.shape[1], dims))
for i in range(dims):
IDM3[:, :, i] = IDM
IM3[:, :, i] = IM
donorImageSelection = donorImage[:, :, 0:dims] * IDM3
return cv2.warpAffine(donorImageSelection, M, (initialImage.shape[1], initialImage.shape[0]))
def generateOpacityImage(initialImage, donorImage, outputImg, mask, donorMask, tm):
"""
Assume opacity is o such that
outputImg = initialImage*(mask/255) + initialImage*((255-mask)/255)*(1-o) + donorImage*o*((255-donormask)/255)
IM = inverted mask
IDM = inverted donor mask
outputImg - initialImage*(mask/255) = initialImage*IM - initialImage*IM*o + donorImage*o*((255-donormask)/255)
outputImg - initialImage*(mask/255) - initialImage*IM = donorImage*IDM*o - initialImage*IM*o
outputImg - initialImage = donorImage*IDM*o - initialImage*IM*o
outputImg - initialImage = o * (donorImage*IDM - initialImage*IM)
o = (outputImg - initialImage)/(donorImage*IDM - initialImage*IM)
Challenging since the donor mask is not lined up the image exactly.
:param img1:
:param img2:
:param outputImg:
:param mask:
:return:
"""
dims = initialImage.shape[2]
IDM = (255 - donorMask) / 255
IM = (255 - mask) / 255
IDM3 = np.zeros((donorImage.shape[0], donorImage.shape[1], dims))
IM3 = np.zeros((initialImage.shape[0], initialImage.shape[1], dims))
for i in range(dims):
IDM3[:, :, i] = IDM
IM3[:, :, i] = IM
donorImageSelection = (donorImage[:, :, 0:dims] * IDM3)
if tm is not None:
transformedImageAligned = cv2.warpPerspective(donorImageSelection, tm,
(initialImage.shape[1], initialImage.shape[0]),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
else:
transformedImageAligned = _affineTransformDonorImage(initialImage, donorImage, mask, donorMask).astype('uint8')
# r = i(1-o) + t*o
# r = i - o*i + t*o
# r-i = o*t - o*i
# r-i= o(t-i)
# o = (r-i)/(t-i)
diffDonorImage = abs(transformedImageAligned * IM3 - initialImage * IM3).astype('float32')
diffOutputImage = abs(outputImg[:, :, 0:dims] * IM3 - initialImage * IM3).astype('float32')
result = np.zeros(diffOutputImage.shape)
result[diffDonorImage > 0.0] = diffOutputImage[diffDonorImage > 0] / diffDonorImage[diffDonorImage > 0.0]
result[np.isinf(result)] = 0.0
result[result > 1] = 1.0
if dims > 3:
result[:, :, 3] = 1
return result
def generateOpacityColorMask(initialImage, donorImage, outputImg, mask, donorMask):
result = generateOpacityImage(initialImage, donorImage, outputImg, mask, donorMask)
min = np.min(result)
max = np.max(result)
return (result - min) / (max - min) * 255.0
def optionalSiftAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'):
"""
If 'location change' is not in parameters or 'location change' is no, skip tis step.
Otherwise, use SIFT to find a homography.
:param analysis:
:param img1:
:param img2:
:param mask:
:param linktype:
:param arguments:
:param directory:
:return:
"""
if 'location change' not in arguments or arguments['location change'] == 'no':
return
globalTransformAnalysis(analysis, img1, img2, mask=mask, arguments=arguments)
if linktype != 'image.image':
return
mask2 = mask.resize(img2.size, Image.ANTIALIAS) if mask is not None and img1.size != img2.size else mask
matrix, matchCount = __sift(img1, img2, mask1=mask, mask2=mask2, arguments=arguments)
if matrix is not None:
analysis['transform matrix'] = serializeMatrix(matrix)
def createMask(img1, img2, invert=False, arguments={}, alternativeFunction=None, convertFunction=None):
mask, analysis, error = __composeMask(img1,
img2,
invert,
arguments=arguments,
alternativeFunction=alternativeFunction,
convertFunction=convertFunction)
analysis['shape change'] = sizeDiff(img1, img2)
if 'location' not in analysis:
analysis['location'] = '(0,0)'
analysis['empty mask'] = 'yes' if np.all(mask == 255) else 'no'
return ImageWrapper(mask), analysis, error
def __indexOf(source, dest):
positions = []
for spos in range(len(source)):
for dpos in range(len(dest)):
if (source[spos] == dest[dpos]).all():
positions.append(spos)
break
return positions
def __flannMatcher(d1, d2, args=None):
FLANN_INDEX_KDTREE = 0
TREES = 16
CHECKS = 50
if 'Matcher.CHECKS' in args:
CHECKS = int(args['Matcher.CHECKS'])
if 'Matcher.TREES' in args:
TREES = int(args['Matcher.TREES'])
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=TREES)
search_params = dict(checks=CHECKS)
flann = cv2.FlannBasedMatcher(index_params, search_params)
return flann.knnMatch(d1, d2, k=2) if d1 is not None and d2 is not None else []
def getMatchedSIFeatures(img1, img2, mask1=None, mask2=None, arguments=dict(), matcher=__flannMatcher):
img1 = img1.to_rgb(data_type=np.uint8).apply_mask(mask1).to_array()
img2 = img2.to_rgb(data_type=np.uint8).apply_mask(mask2).to_array()
threshold = arguments['sift_match_threshold'] if 'sift_match_threshold' in arguments else 10
maxmatches = int(arguments['homography max matches']) if 'homography max matches' in arguments else 10000
def getRange(size, segment_size=2048):
"""
Divided up the size into segment_size ranges
:param size:
:param segment_size:
:return: list of ranges as representd by tuples(start,end, last range indicator)
"""
ranges = [(x * segment_size, min((x + 1) * segment_size, size), False) for x in range(size / segment_size + 1)]
if ranges[-1][1] - ranges[-1][0] < segment_size and len(ranges) > 1:
ranges = ranges[:-2] + [(ranges[-2][0],ranges[-1][1], True)]
else:
ranges[-1] = (ranges[-1][0], ranges[-1][1], True)
return ranges
def updateKP(kp,pos):
kp.pt = (kp.pt[0]+pos[0], kp.pt[1]+pos[1])
return kp
def filterKP(pt, xstart, xend, ystart, yend):
"""
Filter out points outside the 'window' surrounded by the buffer
:param pt:
:param xstart:
:param xend:
:param ystart:
:param yend:
:return:
"""
return \
(pt[0] >= xstart and pt[0] <= xend) and \
(pt[1] >= ystart and pt[1] <= yend)
def computeSIFTOverRanges(img1,buffer_size=16, segment_size=2048):
total_kp = []
total_d = None
for xrange in getRange(img1.shape[0]):
for yrange in getRange(img1.shape[1]):
(kp, ds) = cv2api.cv2api_delegate.computeSIFT(
img1[max(0,xrange[0]-buffer_size):min(xrange[1]+buffer_size,img1.shape[0]),
max(0,yrange[0]-buffer_size):min(yrange[1]+buffer_size,img1.shape[1])])
xstart = buffer_size - 1 if xrange[0] > 0 else 0
xend = segment_size*2 if xrange[2] else (segment_size + \
(0 if xrange[0] == 0 else buffer_size))
ystart = buffer_size - 1 if yrange[0] > 0 else 0
yend = segment_size*2 if yrange[2] else (segment_size + \
(0 if yrange[0] == 0 else buffer_size))
kept = [kpi for kpi in range(len(kp)) if filterKP(kp[kpi].pt,
xstart,xend,
ystart,yend)]
total_kp.extend([updateKP(kp[kpi],(xrange[0],yrange[0])) for kpi in kept])
if ds is not None:
ds = ds[kept,:]
if total_d is None:
total_d = ds
else:
total_d = np.concatenate((total_d,ds))
return total_kp,total_d
(kp2, d2) = computeSIFTOverRanges(img2)
if kp2 is None or len(kp2) == 0:
return None
(kp1, d1) = computeSIFTOverRanges(img1)
if kp1 is None or len(kp1) == 0:
return None
d1 /= (d1.sum(axis=1, keepdims=True) + 1e-7)
d1 = np.sqrt(d1)
d2 /= (d2.sum(axis=1, keepdims=True) + 1e-7)
d2 = np.sqrt(d2)
matches = matcher(d1,d2, args=arguments)
# store all the good matches as per Lowe's ratio test.
good = [m for m, n in matches if m.distance < 0.75 * n.distance]
good = sorted(good, lambda g1, g2: -int(max(g1.distance, g2.distance) * 1000))
good = good[0:min(maxmatches, len(good))]
if len(good) >= threshold:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
return (src_pts, dst_pts) if src_pts is not None else None
return None
def _remap(img, mask, src_pts, dst_pts):
from scipy.interpolate import griddata
long = mask.reshape(mask.shape[0] * mask.shape[1])
grid_x, grid_y = np.mgrid[0:mask.shape[0], 0:mask.shape[1]]
grid_z = griddata(np.array(dst_pts),
np.array(src_pts), (grid_x, grid_y), method='cubic', rescale=True)
map_x = np.append([], [ar[:, 0] for ar in grid_z])
map_y = np.append([], [ar[:, 1] for ar in grid_z])
default_x = np.append([], [ar for ar in grid_x])
default_y = np.append([], [ar for ar in grid_y])
# remove remaps outside the mask
map_x[long == 0] = default_x[long == 0]
map_y[long == 0] = default_y[long == 0]
# fix nan's with no mapping
jj = np.where(np.isnan(map_x))
map_x[jj] = default_x[jj]
jj = np.where(np.isnan(map_y))
map_y[jj] = default_y[jj]
map_x_32 = map_x.astype('float32').reshape(mask.shape)
map_y_32 = map_y.astype('float32').reshape(mask.shape)
return cv2.remap(img, map_y_32, map_x_32, cv2.INTER_NEAREST)
def __grid(img1, img2, compositeMask, edgeMask=None, arguments=None):
"""
Compute sparse maps from points between img1 to img2
:param img1:
:param img2:
:param mask1:
:param mask2:
@type img1: ImageWrapper
@type img2: ImageWrapper
:return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix
"""
src_dts_pts = getMatchedSIFeatures(img1, img2, mask1=edgeMask, mask2=None, arguments=arguments)
if src_dts_pts is None:
return compositeMask
newMask = _remap(compositeMask, edgeMask,
[[x[0][1], x[0][0]] for x in src_dts_pts[0].astype('int')],
[[x[0][1], x[0][0]] for x in src_dts_pts[1].astype('int')])
# r = np.zeros(r.shape).astype('uint8')
# for x in range(len(src_dts_pts[1])):
# cv2.line(r,tuple(src_dts_pts[0][x][0]),tuple(src_dts_pts[1][x][0]),255)
# r[int(x[0][1]),int(x[0][0])] = 255
return newMask
def __sift(img1, img2, mask1=None, mask2=None, arguments=None):
"""
Compute homography to transfrom img1 to img2
Apply the mask to each in order to only compare relevent regions of images
:param img1:
:param img2:
:param mask1:
:param mask2:
@type img1: ImageWrapper
@type img2: ImageWrapper
:return: None if a matrix cannot be constructed, otherwise a 3x3 transform matrix
"""
arguments = dict(arguments)
homography = arguments['homography'] if arguments is not None and 'homography' in arguments else 'RANSAC-4'
if homography in ['None', 'Map']:
return None, None
elif homography in ['All'] and 'homography max matches' in arguments:
# need as many as possible
arguments.pop('homography max matches')
src_dts_pts = getMatchedSIFeatures(img1, img2, mask1=mask1, mask2=np.asarray(mask2), arguments=arguments)
if src_dts_pts is not None:
new_src_pts = src_dts_pts[0]
new_dst_pts = src_dts_pts[1]
matches = None
if homography == 'LMEDS':
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts, cv2.LMEDS)
elif homography == 'All':
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts)
elif homography.find('-') > 0:
try:
RANSAC_THRESHOLD = float(homography[homography.find('-') + 1])
except:
RANSAC_THRESHOLD = 10.0
if matches is None:
M1, matches = cv2.findHomography(new_src_pts, new_dst_pts, cv2.RANSAC, RANSAC_THRESHOLD)
matchCount = np.sum(matches)
if float(matchCount) / len(src_dts_pts) < 0.15 and matchCount < 30:
return None, None
return M1, matchCount
return None, None
def applyResizeComposite(compositeMask, shape, interpolation=2):
"""
Resize the composite mask
:param compositeMask:
:param transform_matrix:
:return:
"""
newMask = np.zeros(shape).astype('uint8')
for level in list(np.unique(compositeMask)):
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 1024
newLevelMask = cv2.resize(levelMask, (shape[1], shape[0]),interpolation=interpolation)
newMask[newLevelMask > 300] = level
return newMask
class Flipper:
def __init__(self, mask, flip):
self.mask = mask
self.flipdirection = flip
self.region = boundingRegion(mask)
def _lcs(self, alist, blist):
"""
:param alist
:param blist:
:return:
"""
m = len(alist)
n = len(blist)
counter = [[0] * (n + 1) for x in range(m + 1)]
longest = 0
lcs_set = (0, 0)
for i in range(m):
for j in range(n):
if alist[i] == blist[j]:
c = counter[i][j] + 1
counter[i + 1][j + 1] = c
if c > longest:
lcs_set = (i, j)
longest = c
return lcs_set, longest
def flip(self, compositeMask):
flipped = compositeMask[self.region[0][1]:self.region[1][1], self.region[0][0]:self.region[1][0]]
flipped = cv2.flip(flipped,
1 if self.flipdirection == 'horizontal' else (-1 if self.flipdirection == 'both' else 0))
flipCompositeMask = np.zeros(self.mask.shape).astype('uint8')
flipCompositeMask[self.region[0][1]:self.region[1][1], self.region[0][0]:self.region[1][0]] = flipped
return flipCompositeMask
def applyFlipComposite(compositeMask, mask, flip):
"""
Since SIFT Cannot flip
Flip the selected area
:param compositeMask:
:param mask:
:param flip:
:return:
"""
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
flipper = Flipper(maskInverted, flip)
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
def work(levelMask):
flipCompositeMask = flipper.flip(levelMask)
return (flipCompositeMask + levelMask * maskAltered).astype('uint8')
return applyToComposite(compositeMask,work)
def applyToComposite(compositeMask, func, shape=None):
"""
Loop through each level add apply the function.
Need to convert levels to 0 and unmapped levels to 255
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
"""
newMask = np.zeros(shape if shape is not None else compositeMask.shape).astype('uint8')
for level in list(np.unique(compositeMask)):
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint8')
levelMask[compositeMask == level] = 255
newLevelMask = func(levelMask)
if newLevelMask is not None:
newMask[newLevelMask > 100] = level
return newMask
def applyGridTransformCompositeImage(compositeMask, startIm, destIm, edgeMask=None, arguments={}):
newMask = np.zeros((destIm.image_array.shape[0], destIm.image_array.shape[1]), dtype=np.uint8)
arguments = dict(arguments)
if 'homography max matches' in arguments:
arguments.pop('homography max matches')
levels = list(np.unique(compositeMask))
for level in levels:
if level == 0:
continue
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 255
newlevelmask = __grid(startIm, destIm, levelMask, edgeMask=255 - edgeMask, arguments=arguments)
if newlevelmask is not None:
newMask[newlevelmask > 100] = level
return newMask
def applyInterpolateToCompositeImage(compositeMask, startIm, destIm, edgeMask, inverse=False, arguments={},
defaultTransform=None,
withMask = False):
"""
Loop through each level add apply SIFT to transform the mask
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
@type destIm: ImageWrapper
@type startIm: ImageWrapper
"""
newMask = np.zeros((destIm.image_array.shape[0], destIm.image_array.shape[1]), dtype=np.uint8)
if 'homography' in arguments and arguments['homography'] == 'Map':
return applyGridTransformCompositeImage(compositeMask,
startIm,
destIm,
edgeMask=edgeMask,
arguments=arguments)
if 'homography' in arguments and arguments['homography'] == 'None':
return compositeMask
levels = list(np.unique(compositeMask))
flags = cv2.WARP_INVERSE_MAP if inverse else cv2.INTER_LINEAR
borderValue = 0
for level in levels:
if level == 0:
continue
if defaultTransform is None or (
'composite homography' in arguments and arguments['composite homography'] == 'Multiple'):
levelMask = np.zeros(compositeMask.shape).astype('uint8')
levelMask[compositeMask == level] = 200
TM, matchCountResult = __sift(startIm, destIm, mask1=levelMask, mask2=invertMask(ImageWrapper(edgeMask)), arguments=arguments)
else:
TM = defaultTransform
levelMask = np.zeros(compositeMask.shape).astype('uint16')
levelMask[compositeMask == level] = 8000
if TM is None:
newLevelMask = cv2.resize(levelMask, (destIm.size[0], destIm.size[1]))
elif withMask:
newLevelMask = applyTransform(levelMask,
mask=edgeMask,
transform_matrix=TM,
invert=inverse,
shape=(destIm.size[1], destIm.size[0]))
else:
newLevelMask = cv2.warpPerspective(levelMask, TM, (destIm.size[0], destIm.size[1]),
flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=borderValue)
if newLevelMask is not None:
newMask[newLevelMask > 100] = level
return newMask
def applyRotateToCompositeImage(img, angle, pivot):
"""
Loop through each level add apply the rotation.
Need to convert levels to 0 and unmapped levels to 255
:param img:
:param angle:
:param pivot:
:return:
"""
from functools import partial
func = partial(rotateImage, angle, pivot)
return applyToComposite(img, func, shape=img.shape)
def applyTransformToComposite(compositeMask, mask, transform_matrix, shape=None, returnRaw=False):
"""
Loop through each level add apply the transform.
Need to convert levels to 0 and unmapped levels to 255
:param compositeMask:
:param mask:
:param transform_matrix:
:return:
"""
from functools import partial
func = partial(applyTransform, mask=mask, transform_matrix=transform_matrix, shape=shape, returnRaw=returnRaw)
return applyToComposite(compositeMask, func, shape=shape)
def applyPerspectiveToComposite(compositeMask, transform_matrix, shape):
def perspectiveChange(composite_mask, M=None, shape=None):
return cv2.warpPerspective(composite_mask, M, (shape[1], shape[0]))
from functools import partial
func = partial(perspectiveChange, M=transform_matrix, shape=shape)
return applyToComposite(compositeMask, func, shape=shape)
def applyAffineToComposite(compositeMask, transform_matrix, shape):
def perspectiveChange(composite_mask, M=None, shape=None):
return cv2.warpAffine(composite_mask, M, (shape[1], shape[0]))
from functools import partial
func = partial(perspectiveChange, M=transform_matrix, shape=shape)
return applyToComposite(compositeMask, func, shape=shape)
def applyRotateToComposite(rotation, compositeMask, edgeMask, expectedDims, local=False):
"""
Loop through each level add apply the rotation.
Need to convert levels to 0 and unmapped levels to 255
:param rotation:
:param compositeMask:
:param edgeMask:
:param expectedDims:
:param local
:return:
"""
from functools import partial
if local:
func = partial(__localrotateImage, rotation, edgeMask, expectedDims=expectedDims, cval=255)
else:
func = partial(__rotateImage, rotation, expectedDims=expectedDims, cval=255)
return applyToComposite(compositeMask, func, shape=expectedDims)
def isHomographyOk(transform_matrix, h, w):
# convert cornore to homogenous coordinates
ll = np.array([0, 0, 1])
ul = np.array([0, w, 1])
lr = np.array([h, 0, 1])
ur = np.array([h, w, 1])
if transform_matrix.shape == (2,3):
transform_matrix = np.vstack([transform_matrix,[0,0,1.0]])
a_ll = np.matmul(transform_matrix, ll)
a_ul = np.matmul(transform_matrix, ul)
a_ur = np.matmul(transform_matrix, ur)
a_lr = np.matmul(transform_matrix, lr)
# convert points to lines
a = np.cross(a_ll, a_ul)
b = np.cross(a_lr, a_ur)
# find point of intersection
intersection_point_projective = np.cross(a, b)
if intersection_point_projective[2] == 0:
return False
y_vertical = intersection_point_projective[0] / intersection_point_projective[2]
x_vertical = intersection_point_projective[1] / intersection_point_projective[2]
a = np.cross(a_ul, a_ur)
b = np.cross(a_ll, a_lr)
# find point of intersection
intersection_point_projective = np.cross(a, b)
if intersection_point_projective[2] == 0:
return False
y_horizontal = intersection_point_projective[0] / intersection_point_projective[2]
x_horizontal = intersection_point_projective[1] / intersection_point_projective[2]
# if the resulting lines intersect inside the box, fail
return not (0 <= x_vertical <= w and 0 <= y_vertical <= h) and not (
0 <= x_horizontal <= w and 0 <= y_horizontal <= h)
# Or is more appropriate to look at the hull of the shape.
# point = Point(x,y)
# points = [(d[0] / d[2], d[1] / d[2]) for d in [a_ll,a_ul,a_ur,a_lr]]
##polygon = Polygon(points).convex_hull
# return not polygon.contains(point)
def applyTransform(compositeMask, mask=None, transform_matrix=None, invert=False, returnRaw=False, shape=None):
"""
Ceate a new mask applying the transform to only those parts of the
compositeMask that overlay with the provided mask.
:param compositeMask:
:param mask: 255 for unmanipulated pixels
:param transform_matrix:
:param invert:
:param returnRaw: do merge back in the composite
:return:
"""
flags = cv2.WARP_INVERSE_MAP if invert else cv2.INTER_LINEAR # +cv2.CV_WARP_FILL_OUTLIERS
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
maskInverted[maskInverted > 0] = 1
compositeMaskFlipped = compositeMask
# resize only occurs by user error.
if compositeMaskFlipped.shape != maskInverted.shape:
compositeMaskFlipped = cv2.resize(compositeMaskFlipped, (maskInverted.shape[1], maskInverted.shape[0]))
compositeMask = cv2.resize(compositeMask, (maskInverted.shape[1], maskInverted.shape[0]))
if shape is None:
shape = mask.shape
# zeros out areas outside the mask
compositeMaskAltered = compositeMaskFlipped * maskInverted
maxvalue = compositeMaskAltered.max()
compositeMaskAltered[compositeMaskAltered > 0] = maxvalue-20
if transform_matrix.shape[0] == 2:
newMask = cv2.warpAffine(compositeMaskAltered, transform_matrix, (shape[1], shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
else:
newMask = cv2.warpPerspective(compositeMaskAltered, transform_matrix, (shape[1], shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
newMask[newMask > 99] = maxvalue
newMask[newMask < 100] = 0
# put the areas outside the mask back into the composite
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
if returnRaw:
return newMask
newMask = newMask | compositeMask * maskAltered
return newMask
def cropResize(img,location, wh):
img_crop = img[location[0]:wh[0],location[1]:wh[1],:]
return cv2.resize(img_crop, (img.shape[1],img.shape[0]))
def cropResizeCompare(img1, img2, arguments=dict()):
width_and_height = (int(arguments['crop width']), int(arguments['crop height']))
pre_resize_img = cv2.resize(img2, width_and_height)
return composeCropImageMask(img1, pre_resize_img, location=None)
def cropCompare(img1, img2, arguments=dict()):
from maskgen.image_wrap import ImageWrapper
if (sum(img1.shape) > sum(img2.shape)):
img1_m, img2_m = __alignChannels(ImageWrapper(img1), ImageWrapper(img2))
analysis = {'shape change': sizeDiff(ImageWrapper(img1_m), ImageWrapper(img2_m))}
location = getValue(arguments,'location',None)
if type(location) == str:
location = toIntTuple(location)
mask, analysis_d = composeCropImageMask(img1_m, img2_m,location=location)
analysis.update(analysis)
return mask, analysis_d
return None, {}
def _composeLCS(img1, img2):
from scipy import sparse
m = img1.shape[0] * img1.shape[1]
n = img2.shape[0] * img2.shape[1]
LCS = sparse.lil_matrix((m + 1, n + 1), dtype=np.int8)
# that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]
for i in xrange(1, m + 1, 1):
for j in xrange(1, n + 1, 1):
x1 = (i - 1) % img1.shape[0]
y1 = (i - 1) / img1.shape[0]
x2 = (j - 1) % img2.shape[0]
y2 = (j - 1) / img2.shape[0]
if img1[x1, y1] == img2[x2, y2]:
LCS[i, j] = LCS[i - 1, j - 1] + 1
else:
m = max(LCS[i - 1, j], LCS[i, j - 1])
if m > 0:
LCS[i, j] = m
# Start from the right-most-bottom-most corner and
# one by one store characters in lcs[]
i = m - 1
j = n - 1
mask = np.zeros(img1.shape, dtype=np.uint8)
while i >= 0 and j >= 0:
x1 = i % img1.shape[0]
y1 = i / img1.shape[0]
x2 = j % img2.shape[0]
y2 = j / img2.shape[0]
if img1[x1, y1] == img2[x2, y2]:
mask[x1, y1] = 255
i -= 1
j -= 1
# If not same, then find the larger of two and
# go in the direction of larger value
elif LCS[i - 1, j] > LCS[i, j - 1]:
i -= 1
else:
j -= 1
def __search1(pixel, img2, tally, endx, endy, x, y):
from collections import deque
def __addToQueue(x, y, endx, endy, queue):
if x > endx:
queue.append((x - 1, y))
if y > endy:
queue.append((x, y - 1))
if x > endx:
queue.append((x - 1, y - 1))
pixel2 = img2[x, y]
if pixel == pixel2:
return (x, y)
queue = deque()
__addToQueue(x, y, endx, endy, queue)
while len(queue) > 0:
x, y = queue.popleft()
pixel2 = img2[x, y]
if pixel == pixel2:
return x, y
if tally[x, y] == 0:
__addToQueue(x, y, endx, endy, queue)
return None
def __search(pixel, img2, tally, position, depth):
startx = min(max(0, position[0] - depth[0]), img2.shape[0])
starty = min(max(0, position[1] - depth[1]), img2.shape[1])
endx = min(position[0] + depth[0], img2.shape[0]) + 1
endy = min(position[1] + depth[1], img2.shape[1]) + 1
imgbox = img2[startx:endx, starty:endy]
image_positions = zip(*np.where(imgbox == pixel))
if len(image_positions) > 0:
tallybox = tally[startx:endx, starty:endy]
tallypostions = zip(*np.where(tallybox > 0))
if len(tallypostions) > 0:
maxtally = max(tallypostions)
image_positions = [p for p in image_positions if p > maxtally]
else:
return None
if len(image_positions) > 0:
best = min(image_positions)
return startx + best[0], starty + best[1]
return None
def _tallySeam(img1, img2, minDepth=50):
tally1 = np.zeros(img1.shape)
tally2 = np.zeros(img2.shape)
depth_x = max(img2.shape[0] - img1.shape[0], minDepth)
depth_y = max(img2.shape[1] - img1.shape[1], minDepth)
for x1 in range(img1.shape[0]):
for y1 in range(img1.shape[1]):
pos = __search(img1[x1, y1], img2, tally2, (x1, y1), (depth_x, depth_y))
if pos is not None:
tally1[x1, y1] = 1
tally2[pos[0], pos[1]] = 1
return tally1.astype('uint8') * 255
def rotateCompare(img1, img2, arguments=dict()):
rotation = float(arguments['rotation']) if 'rotation' in arguments else 0.0
local = (arguments['local'] == 'yes') if 'local' in arguments else False
if img1.shape == img2.shape:
mask1, analysis1 = __diffMask(img1, img2, False, args=arguments)
if abs(rotation) < 0.0001:
return mask1, analysis1
mask2, analysis2 = __compareRotatedImage(rotation, img1, img2, arguments)
diff = sumMask(mask1) - sumMask(mask2)
return (mask1, analysis1) if diff < 0 or local else (mask2, analysis2)
else:
return __compareRotatedImage(rotation, img1, img2, arguments)
def resizeImage(img1, shape, interpolation):
name_map = {
'bicubic': cv2api.cv2api_delegate.inter_cubic,
'nearest': cv2api.cv2api_delegate.inter_nn,
'bilinear': cv2api.cv2api_delegate.inter_linear,
'cubic': cv2api.cv2api_delegate.inter_cubic,
'mesh': cv2api.cv2api_delegate.inter_area,
'lanczos': cv2api.cv2api_delegate.inter_lanczos
}
inter_val = name_map[interpolation] if interpolation in name_map else cv2api.cv2api_delegate.inter_nn
return cv2.resize(img1, (shape[1], shape[0]), interpolation=inter_val)
def resizeCompare(img1, img2, arguments=dict()):
new_img2 = resizeImage(img2,
img1.shape,
arguments['interpolation'] if 'interpolation' in arguments else 'nearest')
return __diffMask(img1, new_img2, False, args=arguments)
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def morphologyCompare(img_one, img_two, arguments= {}):
kernel_size = int(getValue(arguments, 'kernel', 3))
kernel = np.ones((kernel_size, kernel_size), np.uint8)
diff = (np.abs(img_one - img_two)).astype('uint16')
mask = np.sum(diff, 2)
difference = float(arguments['tolerance']) if arguments is not None and 'tolerance' in arguments else 0.00390625
difference = difference * 256
mask[np.where(mask < difference)] = 0
if getValue(arguments, 'distribute_difference', False):
mask = 255*mask.astype(np.double)/(np.max(mask)-difference)
mask = mask.astype('uint8')
else:
# set to black if less than threshold
mask[np.where(mask > 0)] = 255
mask = mask.astype('uint8')
mask = cv2.morphologyEx(mask,cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)# filter out noise in the mask
return mask, {}
def mediatedCompare(img_one, img_two, arguments={}):
morphologyOps = {'open':cv2.MORPH_OPEN, 'close':cv2.MORPH_CLOSE}
morphology_order = getValue(arguments, 'morphology order', 'open:close').split(':')
gain = int(getValue(arguments, 'gain', 0))
kernel_size=int(getValue(arguments, 'kernel',3))
weight = int(getValue(arguments, 'weight', 1.0))
smoothing = int(getValue(arguments, 'smoothing', 3))
algorithm = getValue(arguments, 'filling', 'morphology')
aggregate = getValue(arguments, 'aggregate', 'max')
kernel = np.ones((kernel_size, kernel_size), np.uint8)
max_threshold = int(getValue(arguments, 'maximum threshold', 255))
from scipy import signal
# compute diff in 3 colors
if aggregate == 'luminance':
min_threshold = int(getValue(arguments, 'minimum threshold', 3))
img_one = cv2.cvtColor(img_one.astype('uint8'), cv2.COLOR_BGR2YCR_CB)
img_two = cv2.cvtColor(img_two.astype('uint8'), cv2.COLOR_BGR2YCR_CB)
diff = (np.abs(img_one.astype('int16') - img_two.astype('int16')))
mask = diff[:, :, 0] + (diff[:, :, 2] + diff[:, :, 1])/weight
bins = 256 + 512/weight
else:
min_threshold = int(getValue(arguments, 'minimum threshold', 0))
diff = (np.abs(img_one.astype('int16') - img_two.astype('int16'))).astype('uint16')
if aggregate == 'max':
mask = np.max(diff, 2) # use the biggest difference of the 3 colors
bins=256
elif aggregate == 'sum':
mask = np.sum(diff, 2)
bins=768
else:
mask = np.mean(diff, 2)
bins = 256
hist, bin_edges = np.histogram(mask, bins=bins, density=False)
if smoothing > 0:
hist = moving_average(hist,n=smoothing) # smooth out the histogram
minima = signal.argrelmin(hist, order=1) # find local minima
size = minima[0].size
minima = minima[0][0] if size > 0 else 0
else:
size = 0
minima = min_threshold
if size == 0 or minima > bins/2: # if there was no minima, hardcode
threshold = min_threshold
else:
threshold = max(min_threshold,min(minima, max_threshold)) # Use first minima
threshold += gain
mask[np.where(mask <= threshold)] = 0 # set to black if less than threshold
mask[np.where(mask > 0)] = 255
mask = mask.astype('uint8')
if algorithm == 'morphology':
mask = cv2.morphologyEx(mask, morphologyOps[morphology_order[0]], kernel)
mask = cv2.morphologyEx(mask, morphologyOps[morphology_order[1]], kernel)
elif algorithm == 'median':
mask = cv2.medianBlur(mask, kernel_size) # filter out noise in the mask
return mask, {'threshold': threshold, 'hist': hist, 'diff':diff}
def getExifDimensionsFromData(exif_meta, crop=False):
heights = ['Cropped Image Height', 'AF Image Height', 'Image Height', 'Exif Image Height', ] if crop else [
'Image Height', 'Exif Image Height']
widths = ['Cropped Image Width', 'AF Image Width', 'Image Width', 'Exif Image Width', ] if crop else ['Image Width',
'Exif Image Width']
height_selections = [(exif_meta[h] if h in exif_meta else None) for h in heights]
width_selections = [(exif_meta[w] if w in exif_meta else None) for w in widths]
if 'png:IHDR.width,height' in exif_meta:
try:
w, h = [int(x.strip()) for x in exif_meta['png:IHDR.width,height'].split(',')]
height_selections.append(h)
width_selections.append(w)
except:
pass
return [(int(height_selections[p]), int(width_selections[p]))
for p in range(len(width_selections)) if
height_selections[p] is not None and width_selections[p] is not None]
def getExifDimensions(filename, crop=False):
from maskgen import exif
return getExifDimensionsFromData(exif.getexif(filename))
def convertCompare(img1, img2, arguments=dict()):
analysis = {}
if 'Image Rotated' in arguments and arguments['Image Rotated'] == 'yes':
if 'source filename' in arguments:
orienation = exif.getOrientationFromExif((arguments['source filename']))
analysis.update(exif.rotateAnalysis(orienation))
img1 = exif.rotateAccordingToExif(img1, orienation,counter=True)
else:
# assumes crop, but this approach should be improved to use HOG comparisons
# since some of these conversions occur with Raw images
rotation, mask = __findRotation(img1, img2, [0, 90, 180, 270])
analysis.update({'rotation': rotation})
return 255 - mask, analysis
if 'source filename' in arguments and img1.shape != img2.shape:
# see if there is crop information in exif
dims_crop = getExifDimensions(arguments['source filename'], crop=True)
dims = getExifDimensions(arguments['source filename'], crop=False)
if len(dims_crop) > 0 and len(dims) > 0 and dims_crop[0] != dims[0]:
analysis['Crop'] = 'yes'
if img1.shape != img2.shape:
diff_shape = (int(img1.shape[0] - img2.shape[0]) / 2, int(img1.shape[1] - img2.shape[1]) / 2)
#keep in mind that alterMask, used for composite generation, assumes 'crop' occurs first, followed
# by final adjustments for size
if 'location' not in arguments:
diff_shape= (max(1,diff_shape[0]),max(1,diff_shape[1]))
else:
diff_shape = toIntTuple(arguments['location'])
if getValue(arguments, 'Crop','yes') == 'no':
new_img1 = img1
else:
new_img1 = img1[diff_shape[0]:-diff_shape[0], diff_shape[1]:-diff_shape[1]]
new_img2 = cv2.resize(img2, (new_img1.shape[1], new_img1.shape[0]))
if getValue(arguments, 'Crop', 'yes') == 'yes':
analysis['location'] = str(diff_shape)
mask, a = __diffMask(new_img1, new_img2, False, args=arguments)
else:
mask, a = __diffMask(img1, img2, False, args=arguments)
analysis.update(a)
return mask, analysis
def __composeMask(img1_wrapper, img2_wrapper, invert, arguments=dict(), alternativeFunction=None, convertFunction=None):
"""
:param img1:
:param img2:
:param invert:
:param arguments:
:param alternativeFunction:
:param convertFunction:
:return:
@type img1_wrapper: ImageWrapper
@type img2_wrapper: ImageWrapper
@type arguments: dict
@rtype numpy.ndarray,dict
"""
img1, img2 = __alignChannels(img1_wrapper,
img2_wrapper,
convertFunction=convertFunction)
args = {}
args.update(arguments)
args['source filename'] = img1_wrapper.filename
args['target filename'] = img2_wrapper.filename
if alternativeFunction is not None:
try:
mask, analysis = alternativeFunction(img1, img2, arguments=args)
removeValue(analysis, 'arguments.source filename')
removeValue(analysis, 'arguments.target filename')
if mask is not None:
return mask if not invert else 255 - mask, analysis, None
except ValueError as e:
logging.getLogger('maskgen').error('Mask generation failure ' + str(e))
logging.getLogger('maskgen').info('Arguments ' + str(arguments))
mask = np.zeros(img1.shape, dtype=np.uint8)
analysis = {}
return abs(255 - mask).astype('uint8') if invert else mask, analysis, str(e)
# rotate image two if possible to compare back to image one.
# The mask is not perfect.
mask = None
error = None
rotation = float(arguments['rotation']) if 'rotation' in arguments else 0.0
analysis = {}
if abs(rotation) > 0.0001:
mask, analysis = __compareRotatedImage(rotation, img1, img2, arguments)
if sum(img1.shape) > sum(img2.shape):
mask, analysis = composeCropImageMask(img1, img2)
if sum(img1.shape) < sum(img2.shape):
mask, analysis = __composeExpandImageMask(img1, img2)
if mask is None:
try:
if img1.shape != img2.shape and \
img1.shape[1] == img2.shape[0] and \
img1.shape[0] == img2.shape[1]:
arguments['Image Rotated'] = 'yes'
mask, analysis = convertCompare(img1, img2, arguments)
else:
mask, analysis = __diffMask(img1, img2, False, args=arguments)
except Exception as e:
logging.getLogger('maskgen').error('Mask generation failure ' + str(e))
logging.getLogger('maskgen').info('Arguments ' + str(arguments))
mask = np.zeros(img1.shape, dtype=np.uint8)
analysis = {}
return abs(255 - mask).astype('uint8') if invert else mask, analysis, error
def __alignShape(im, shape):
x = min(shape[0], im.shape[0])
y = min(shape[1], im.shape[1])
z = np.zeros(shape)
for d in range(min(shape[2], im.shape[2])):
z[0:x, 0:y, d] = im[0:x, 0:y, d]
return z
def __resize(img, dimensions):
if img.shape[0] != dimensions[0]:
diff = abs(img.shape[0] - dimensions[0])
img = np.concatenate((np.zeros((diff / 2, img.shape[1])), img), axis=0)
img = np.concatenate((img, np.zeros((diff - (diff / 2), img.shape[1]))), axis=0)
if img.shape[1] != dimensions[1]:
diff = abs(img.shape[1] - dimensions[1])
img = np.concatenate((np.zeros((img.shape[0], diff / 2)), img), axis=1)
img = np.concatenate((img, np.zeros((img.shape[0], diff - (diff / 2)))), axis=1)
return img
def rotateImage(angle, pivot, img):
padX = [img.shape[1] - pivot[1], pivot[1]]
padY = [img.shape[0] - pivot[0], pivot[0]]
imgP = np.pad(img, [padY, padX], 'constant')
if abs(angle) % 90 == 0:
imgR = np.rot90(imgP, int(angle / 90)).astype('uint8')
else:
try:
imgR = np.asarray(Image.fromarray(imgP).rotate(angle))
except:
imgR = ndimage.rotate(imgP, angle, cval=0, reshape=False, mode='constant').astype('uint8')
return imgR[padY[0]: -padY[1], padX[0]: -padX[1]]
def __localrotateImage(rotation, mask, img, expectedDims=None, cval=0):
maskInverted = ImageWrapper(np.asarray(mask)).invert().to_array()
maskInverted[maskInverted > 0] = 1
targetDims = img.shape
if expectedDims is not None:
targetDims = expectedDims
x0,y0,w,h = widthandheight(maskInverted)
if w == 0 or h == 0:
return img
h = min(h+1, targetDims[0])
w = min(w+1, targetDims[1])
subImg = img[y0:(y0+h),x0:(x0+w)]
center = (h /2, w / 2)
M = cv2.getRotationMatrix2D(center, rotation, 1.0)
rotatedSubMask = cv2.warpAffine(subImg*maskInverted[y0:(y0+h),x0:(x0+w)], M, (w,h),flags=cv2api.cv2api_delegate.inter_linear)
rotatedMask = np.zeros(mask.shape)
rotatedMask[y0:y0+h,x0:x0+w] = rotatedSubMask
maskAltered = np.copy(mask)
maskAltered[maskAltered > 0] = 1
return (rotatedMask + img * maskAltered).astype('uint8')
def __rotateImage(rotation, img, expectedDims=None, cval=0):
expectedDims = expectedDims if expectedDims is not None else (img.shape[0], img.shape[1])
rotNorm = int(rotation / 90) if (rotation % 90) == 0 else None
rotNorm = rotNorm if rotNorm is None or rotNorm >= 0 else (4 + rotNorm)
npRotation = rotNorm is not None and img.shape == (expectedDims[1], expectedDims[0])
if npRotation:
res = np.rot90(img, rotNorm)
else:
res = ndimage.interpolation.rotate(img, rotation, cval=cval, reshape=(img.shape != expectedDims), order=0)
if (res.shape[0],res.shape[1]) != expectedDims:
res = cv2.resize(res,(expectedDims[1],expectedDims[0]))
return res
def __compareRotatedImage(rotation, img1, img2, arguments):
if rotation != 0:
res = __rotateImage(rotation, img1, expectedDims=img2.shape, cval=img2[0, 0])
else:
res = img1
mask, analysis = __composeExpandImageMask(res, img2) if res.shape != img2.shape else __diffMask(res,
img2,
False,
args=arguments)
if rotation != 0:
res = __rotateImage(-rotation, mask, expectedDims=img1.shape, cval=255)
else:
res = mask
return res, analysis
def __findRotation(img1, img2, range):
best = 0
r = None
best_mask = None
for rotation in range:
res, analysis = __compareRotatedImage(rotation, img1, img2, {})
c = np.sum(res)
if c > best or best_mask is None:
best = c
best_mask = res
r = rotation
return r, best_mask
# res = __resize(mask,(max(img2.shape[0],img1.shape[0]), max(img2.shape[1],img1.shape[1])))
# res[res<0.00001] = 0
# res[res>0] = 255
# # now crop out the rotation difference, to make sure the original image is not modified
# if img1.shape != res.shape:
# diff = (res.shape[0]-img1.shape[0], res.shape[1]-img1.shape[1])
# diff = (diff[0] if diff[0] > 0 else 0, diff[1] if diff[1] > 0 else 0)
# res = res[diff[0]/2:res.shape[0]-((diff[0]/2) -diff[0]),diff[1]/2:res.shape[1]-((diff[1]/2) - diff[1])]
def extractAlpha(rawimg1, rawimg2):
"""
If rawimg2 has an alpha channel, then the pixels then the high alpha value is the pixels that did not change
:param rawimg1:
:param rawimg2:
:return:
"""
img2_array = rawimg2.to_array()
img1_array = rawimg1.to_array()
ii16 = np.iinfo(np.uint16)
if len(img2_array.shape) == 3 and img2_array.shape[2] == 4:
img2_array = img2_array[:, :, 3]
if len(img2_array.shape) == 2:
all = np.zeros((img2_array.shape[0], img2_array.shape[1])).astype('uint16')
all[img2_array == 0] = ii16.max
return np.zeros((img1_array.shape[0], img1_array.shape[1])).astype('uint16'), all
return rawimg1.to_16BitGray().to_array(), rawimg2.to_16BitGray().to_array()
def convert16bitcolor(rawimg1, rawimg2):
return rawimg1.to_array().astype('int16'), rawimg2.to_array().astype('int16')
def __alignChannels(rawimg1, rawimg2, convertFunction=None):
"""
:param rawimg1:
:param rawimg2:
:param equalize_colors:
:return:
@type rawimg1: ImageWrapper
@type rawimg2: ImageWrapper
"""
if convertFunction is not None:
return convertFunction(rawimg1, rawimg2)
return rawimg1.to_16BitGray().to_array(), rawimg2.to_16BitGray().to_array()
def __findBestMatch(big, small):
""" Return a tuple describing the bounding box (xl,xh,yl,yh) with the most
likely match to the small image.
"""
if len(small.shape) == 3 and len(big.shape) == 3 and \
small.shape[2] == 4 and big.shape[2] == 3:
newsmall = np.zeros((small.shape[0], small.shape[1], 3))
newsmall[:, :, :] = small[:, :, 0:3]
small = newsmall
if np.any(np.asarray([(x[1] - x[0]) for x in zip(small.shape, big.shape)]) < 0):
return None
result = cv2.matchTemplate(big.astype('float32'), small.astype('float32'), cv2api.cv2api_delegate.tm_sqdiff_normed)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
result_tuple = (mnLoc[1], mnLoc[0], mnLoc[1] + small.shape[0], mnLoc[0] + small.shape[1])
if result_tuple[2] > big.shape[0] or result_tuple[3] > big.shape[1]:
return None
return result_tuple
def bm(X, patch):
from sklearn.metrics import mean_absolute_error
bv = 999999.0
bp = (0, 0)
for i in range(X.shape[0] - patch.shape[0]):
for j in range(X.shape[1] - patch.shape[1]):
v = mean_absolute_error(X[i:i + patch.shape[0], j:j + patch.shape[1]], patch)
if v < bv:
bv = v
bp = (i, j)
return bp, bv
def composeCropImageMask(img1, img2, location=None):
""" Return a masking where img1 is bigger than img2 and
img2 is likely a crop of img1.
images are 16 bit unnsigned or floating point.
@return change mask aligned to in img1 dimensions, dictionary of analysis keys
@type img1: np.array
@type img2: np.array
"""
analysis = {}
analysis['location'] = '(0,0)'
if location is not None:
matched_tuple = (location[0],location[1],img2.shape[0]+location[0],img2.shape[1]+location[1])
else:
matched_tuple = __findBestMatch(img1, img2)
if matched_tuple is not None:
diffIm = np.zeros(img1.shape).astype(img1.dtype)
diffIm[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]] = img2
analysis['location'] = str((int(matched_tuple[0]), int(matched_tuple[1])))
dst = np.abs(img1 - diffIm)
gray_image = np.zeros(img1.shape).astype('uint8')
gray_image[dst > 0.0001] = 255
mask = gray_image
for k, v in img_analytics(img1, diffIm, mask=mask).iteritems():
analysis[k] = v
else:
mask = np.ones(img1.shape) * 255
return abs(255 - mask).astype('uint8'), analysis
def composeCloneMask(changemask, startimage, finalimage):
"""
:param changemask:
:param startimage:
:param finalimage:
:return:
@type changemask: ImageWrapper
@type startimage: ImageWrapper
@type finalimage: ImageWrapper
"""
mask = np.asarray(changemask.invert())
start_image_array = np.array(startimage)
final_image_array = np.array(finalimage)
newmask = np.zeros(start_image_array.shape).astype('uint8')
try:
contours, hierarchy = cv2api.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
try:
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
if w <= 2 or h <= 2:
continue
final_image_subarray = final_image_array[y:y + h, x:x + w]
for i in range(final_image_subarray.shape[2]):
final_image_subarray[:, :, i] = final_image_subarray[:, :, i] * (mask[y:y + h, x:x + w] / 255)
matched_tuple = __findBestMatch(start_image_array, final_image_subarray)
if matched_tuple is not None:
newmask[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]] = 255
except Exception as e:
logging.getLogger('maskgen').warning('Failed to compose clone mask: ' + str(e))
continue
except Exception as e:
return changemask.to_array()
return newmask
def __composeExpandImageMask(img1, img2):
""" Return a masking where img1 is smaller than img2 and
img2 contains img1.
"""
matched_tuple = __findBestMatch(img2, img1)
analysis = {}
if matched_tuple is not None:
diffIm = img2[matched_tuple[0]:matched_tuple[2], matched_tuple[1]:matched_tuple[3]]
dst = np.abs(img1 - diffIm)
analysis['location'] = str((int(matched_tuple[0]), int(matched_tuple[1])))
gray_image = np.zeros(img1.shape).astype('uint8')
gray_image[dst > 0.0001] = 255
mask = gray_image
for k, v in img_analytics(img1, diffIm, mask=mask).iteritems():
analysis[k] = v
else:
mask = np.ones(img1.shape) * 255
return abs(255 - mask).astype('uint8'), analysis
def __colorPSNR(z1, z2, size=None):
if size == 0:
return 0.0
d = (z1 - z2) ** 2
sse = np.sum(d)
size = float(reduce(lambda x, y: x * y, d.shape)) if size is None else float(size)
mse = float(sse) / size
return 0.0 if mse == 0.0 else 20.0 * math.log10(255.0 / math.sqrt(mse))
def sizeDiff(z1, z2):
"""
z1 and z2 are expected to be PIL images
"""
# size is inverted due to Image's opposite of numpy arrays
return str((int(z2.size[1] - z1.size[1]), int(z2.size[0] - z1.size[0])))
def invertMask(mask):
return mask.invert()
def convertToMask(im):
"""
Takes an image and produce a mask where all black areas are white
"""
return im.to_mask()
def __checkInterpolation(val):
validVals = ['nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic']
return val if val in validVals else 'nearest'
def applyMask(image, mask, value=0):
if mask.shape != image.shape:
mask = cv2.resize(mask, (image.shape[1], image.shape[0]))
image = np.copy(image)
image[mask == 0] = value
return image
def carveMask(image, mask, expectedSize):
"""
Trim a mask after seam carving
:param image:
:param mask:
:param expectedSize:
:return:
"""
newimage = np.zeros(expectedSize).astype('uint8')
if expectedSize[0] == mask.shape[0]:
for x in range(expectedSize[0]):
topaste = image[x, mask[x, :] == 255]
if (len(topaste)) <= newimage.shape[1]:
newimage[x, 0:len(topaste)] = topaste
else:
newimage[x, :] = topaste[0:len(topaste)]
elif expectedSize[1] == mask.shape[1]:
for y in range(expectedSize[1]):
topaste = image[mask[:, y] == 255, y]
if (len(topaste)) <= newimage.shape[0]:
newimage[0:len(topaste), y] = topaste
else:
newimage[:, y] = topaste[0:len(topaste)]
else:
return applyMask(image, mask)
return newimage
def alterMask(compositeMask,
edgeMask,
rotation=0.0,
targetShape=(0, 0),
interpolation='nearest',
location=(0, 0),
transformMatrix=None,
flip=None,
crop=False,
cut=False):
res = compositeMask
# rotation may change the shape
# transforms typical are created for local operations (not entire image)
if location != (0, 0) or crop:
if targetShape != res.shape:
# inverse crop
newRes = np.zeros(targetShape).astype('uint8')
upperBound = (min(res.shape[0] + location[0], newRes.shape[0]),
min(res.shape[1] + location[1], newRes.shape[0]))
newRes[location[0]:upperBound[0], location[1]:upperBound[1]] = res[0:(upperBound[0] - location[0]),
0:(upperBound[1] - location[1])]
res = newRes
else:
upperBound = (min(res.shape[0], targetShape[0] + location[0]),
min(res.shape[1], targetShape[1] + location[1]))
res = res[location[0]:upperBound[0], location[1]:upperBound[1]]
if transformMatrix is not None and not cut and flip is None:
res = applyTransformToComposite(compositeMask, edgeMask, transformMatrix)
elif abs(rotation) > 0.001:
if targetShape != res.shape or abs(rotation) % 90 < 0.001:
res = __rotateImage(rotation, compositeMask,
expectedDims=targetShape,
cval=0)
else:
res = applyRotateToComposite(rotation, res,
edgeMask,
targetShape)
# if transform matrix provided and alternate path is taken above
if flip is not None:
res = applyFlipComposite(res, edgeMask, flip)
if cut:
res = applyMask(res, edgeMask)
if targetShape != res.shape:
res = applyResizeComposite(res, targetShape)
return res
def alterReverseMask(donorMask, edgeMask, rotation=0.0, location=(0, 0),
transformMatrix=None, flip=None, crop=False, cut=False, targetShape=None):
res = donorMask
# if we are cutting, then do not want to use the edge mask as mask for transformation.
# see the cut section below, where the transform occurs directly on the mask
# this occurs in donor cases
if ((location != (0, 0) or crop) and not cut):
if targetShape != donorMask.shape:
# inverse crop
upperBound = (min(res.shape[0], targetShape[0] + location[0]),
min(res.shape[1], targetShape[1] + location[1]))
res = res[location[0]:upperBound[0], location[1]:upperBound[1]]
else:
newRes = np.zeros(targetShape).astype('uint8')
upperBound = (res.shape[0] + location[0], res.shape[1] + location[1])
newRes[location[0]:upperBound[0], location[1]:upperBound[1]] = res[0:(upperBound[0] - location[0]),
0:(upperBound[1] - location[1])]
res = newRes
if transformMatrix is not None and not cut and flip is None:
res = applyTransform(res, mask=edgeMask, transform_matrix=transformMatrix, invert=True,
returnRaw=False)
elif abs(rotation) > 0.001:
res = __rotateImage(-rotation, res, expectedDims=targetShape, cval=0)
elif flip is not None:
res = applyFlipComposite(res, edgeMask, flip)
if cut:
# res is the donor mask
# edgeMask may be the overriding mask from a PasteSplice, thus in the same shape
# The transfrom will convert to the target mask size of the donor path.
res = applyMask(res, edgeMask)
if transformMatrix is not None:
res = cv2.warpPerspective(res, transformMatrix, (targetShape[1], targetShape[0]),
flags=cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT, borderValue=0).astype('uint8')
# need to use target size since the expected does ot align with the donor paths.
if targetShape != res.shape:
res = cv2.resize(res, (targetShape[1], targetShape[0]))
return res
def __toMask(im):
"""
Performs same functionality as convertToMask, but takes and returns np array
"""
if len(im.shape) < 3:
return im
imGray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
gray_image = np.ones(imGray.shape).astype('uint8')
gray_image[imGray < 255] = 0
gray_image *= 255
if im.shape[2] == 4:
gray_image[im[:, :, 3] == 0] = 255
return gray_image
def mergeColorMask(compositeMaskArray, newMaskArray):
matches = np.any(newMaskArray != [255, 255, 255], axis=2)
compositeMaskArray[matches] = newMaskArray[matches]
return compositeMaskArray
def mergeMask(compositeMask, newMask, level=0):
if compositeMask.shape != newMask.shape:
compositeMask = cv2.resize(compositeMask, (newMask.shape[1], newMask.shape[0]))
newMask = ImageWrapper(newMask).to_mask().to_array()
else:
compositeMask = np.copy(compositeMask)
compositeMask[newMask == 0] = level
return compositeMask
def ssim(X, Y, MASK, **kwargs):
from scipy.ndimage import gaussian_filter
K1 = kwargs.pop('K1', 0.01)
R = kwargs.pop('R', 255)
K2 = kwargs.pop('K2', 0.03)
sigma = kwargs.pop('sigma', 1.5)
X = X.astype(np.float64)
Y = Y.astype(np.float64)
win_size = 1
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
filter_func = gaussian_filter
filter_args = {'sigma': sigma}
# compute (weighted) means
ux = filter_func(X, **filter_args)
uy = filter_func(Y, **filter_args)
# compute (weighted) variances and covariances
uxx = filter_func(X * X, **filter_args)
uyy = filter_func(Y * Y, **filter_args)
uxy = filter_func(X * Y, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = ((A1 * A2) / D) * MASK
# compute (weighted) mean of ssim
return S.mean()
def img_analytics(z1, z2, mask=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = {'psnr': __colorPSNR(z1, z2)}
if mask is not None:
mask = np.copy(mask)
mask[mask > 0] = 1
result.update({'local psnr': __colorPSNR(z1 * mask, z2 * mask, size=sumMask(mask))})
return result
def __diffMask(img1, img2, invert, args=None):
itype = np.iinfo(img1.dtype)
dst = np.abs(np.subtract(img1.astype('int32'), img2.astype('int32')))
gray_image = np.zeros(img1.shape).astype('uint8')
difference = float(args['tolerance']) if args is not None and 'tolerance' in args else 0.0001
difference = difference * (itype.max - itype.min)
gray_image[dst > difference] = 255
analysis = img_analytics(img1, img2, mask=gray_image)
return (gray_image if invert else (255 - gray_image)), analysis
def coordsFromString(value):
import re
value = re.sub('[\(\)\,]', ' ', value)
vals = [int(float(v)) for v in value.split(' ') if v != ' ' and v != '']
return tuple(vals)
def fixTransparency(img):
return img.apply_transparency()
def dictDeepUpdate(aDictionary, aPartialDictionary):
for k, v in aPartialDictionary.iteritems():
if k in aDictionary and type(v) == dict:
dictDeepUpdate(aDictionary[k], v)
else:
aDictionary[k] = v
def grayToRGB(frame):
"""
project gray into Green
"""
result = np.zeros((frame.shape[0], frame.shape[1], 3))
if len(frame.shape) == 2:
result[:, :, 1] = frame
else:
summary = np.zeros((frame.shape[0], frame.shape[1]))
for d in range(frame.shape[2]):
summary[:, :] += frame[:, :, d]
summary[summary > 0] = 255
result[:, :, 1] = summary
return result.astype('uint8')
def composeVideoMaskName(maskprefix, starttime, suffix):
"""
:param maskprefix:
:param starttime:
:param suffix:
:return: A mask file name using the provided components
"""
if maskprefix.endswith('_mask_' + str(starttime)):
return maskprefix + '.' + suffix
return maskprefix + '_mask_' + str(starttime) + '.' + suffix
def convertToVideo(filename, preferences=None, start_frame=None, start_time=0):
suffix = '.' + preferredSuffix(preferences=preferences)
fn = os.path.splitext(filename)[0] + (str(start_frame) if start_frame is not None else '') + suffix
if os.path.exists(fn):
if os.stat(filename).st_mtime < os.stat(fn).st_mtime:
return fn
else:
os.remove(fn)
reader = GrayBlockReader(filename,
convert=True,
preferences=preferences,
start_frame=start_frame,
start_time=start_time)
while True:
mask = reader.read()
if mask is None:
break
fn = reader.writer.filename
return fn
executions = {}
def cancel_execute(worker_func):
if worker_func in executions:
executions[worker_func].cancel()
def execute_every(interval, worker_func, start=True, **kwargs):
executions[worker_func] = threading.Timer(
interval,
execute_every, [interval, worker_func, False], kwargs)
executions[worker_func].start()
if not start:
worker_func(**kwargs)
class GrayBlockFrameFirstLayout():
name = 'framefirst'
@staticmethod
def is_end(reader):
return reader.pos >= reader.dset.shape[0]
@staticmethod
def count(reader):
return reader.dset.shape[0]
@staticmethod
def get_frame(reader):
return reader.dset[reader.pos]
@staticmethod
def initial_shape(shape, size = None):
return (size,) + shape
@staticmethod
def resize(shape, writer):
if writer.dset.shape[0] < (writer.pos + 1):
writer.dset.resize((writer.pos + 1,) + writer.dset.shape[1:])
@staticmethod
def set(writer,mask):
writer.dset[ writer.pos] = mask
class GrayBlockFrameLastLayout():
name = 'framelast'
@staticmethod
def is_end(reader):
return reader.pos >= reader.dset.shape[-1]
@staticmethod
def count(reader):
return reader.dset.shape[-1]
@staticmethod
def get_frame(reader):
return reader.dset[:, :, reader.pos]
@staticmethod
def initial_shape(shape, size=None):
return (shape)[:-1] + (size,)
@staticmethod
def resize(shape, writer):
if writer.dset.shape[-1] < (writer.pos + 1):
writer.dset.resize((shape)[:-1] + (writer.pos + 1,))
@staticmethod
def set(writer,mask):
if len(writer.dset.shape) == 2:
writer.dset[:, :, writer.pos] = mask
else:
writer.dset[:, :, :, writer.pos] = mask
class GrayBlockReader:
def __init__(self, filename,
convert=False,
preferences=None,
start_time=0,
start_frame=None,
end_frame=None):
import h5py
self.writer = None
self.start_frame = start_frame
self.start_time = start_time
self.preferences = preferences
self.filename = filename
self.h_file = h5py.File(filename, 'r')
grp_names = self.h_file.keys()
if 'masks' in grp_names:
self.grps = ['masks']
self.setter = OldFormatGroupSetter()
else:
self.setter = NewFormatGroupSetter()
self.grps = [str(x) for x in sorted([int(x) for x in grp_names])]
# group selection
self.grp_pos = 0
# frame selection in group (relative to start of group)
self.pos = 0
# the smart numpy array
self.dset = None
# where to stop
self.end_frame = end_frame
self.fps = self.h_file.attrs['fps']
self.mask_format = MASKFORMATS[
self.h_file.attrs['mask_format'] if 'mask_format' in self.h_file.attrs else GrayBlockFrameFirstLayout.name]
self.setter.set_group(self, start_time=start_time, start_frame=start_frame, end_frame=end_frame)
self.convert = convert
self.writer = GrayFrameWriter(os.path.splitext(filename)[0],
self.fps,
preferences=preferences) if self.convert else DummyWriter()
def create_writer(self):
"""
:return:
@rtype: GrayBlockWriter
"""
import time
dir = os.path.dirname(self.filename)
prefix = os.path.join(dir,os.path.basename(self.h_file.attrs['prefix'])) if 'prefix' in self.h_file.attrs else os.path.splitext(self.filename)[0][:48]
return GrayBlockWriter(prefix + str(time.clock()), self.fps)
def set_group(self, start_frame=None, start_time=1, end_frame=None):
self.setter.set_group(self, start_frame=start_frame,start_time=start_time, end_frame=end_frame)
def current_frame_time(self):
return self.start_time + (self.pos * (1000 / self.fps))
def current_frame(self):
return self.start_frame + self.pos
def length(self):
return self.mask_format.count(self)
def read(self):
if self.dset is None:
return None
if self.end_frame is not None and self.current_frame() == self.end_frame + 1:
return None
if self.mask_format.is_end(self):
self.grp_pos+=1
if self.grp_pos < len(self.grps):
self.setter.select_group(self, self.grp_pos)
else:
self.dset = None
return None
mask = self.mask_format.get_frame(self)
mask = mask.astype('uint8')
self.writer.write(mask, self.start_frame + self.pos, self.current_frame_time())
self.pos += 1
return mask
def release(self):
pass
def close(self):
self.h_file.close()
if self.writer is not None:
self.writer.close()
MASKFORMATS = {GrayBlockFrameFirstLayout.name:GrayBlockFrameFirstLayout(),
GrayBlockFrameLastLayout.name:GrayBlockFrameLastLayout()}
class GrayBlockReaderManager:
def __init__(self, reader_type= GrayBlockReader):
self.reader_type = reader_type
self.reader = None
self.filename = None
def create_reader(self, filename,
start_frame=None,
start_time=0,
end_frame=None):
"""
:param filename:
:param start_frame:
:param start_time:
:param end_frame: optional stopping point
:return:
@type filename: str
@rtype: GrayBlockReader
"""
if filename == self.filename:
self.reader.set_group(start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
else:
if self.reader is not None:
self.reader.close()
self.filename = filename
self.reader = self.reader_type(filename,
start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
return self.reader
def close(self):
if self.reader is not None:
self.reader.close()
self.reader = None
class GrayBlockWriterManager:
def __init__(self):
self.writer = None
def create_writer(self, reader):
"""
:param reader:
:return:
@type reader: GrayBlockReader
@rtype: GrayBlockWriter
"""
if self.writer is not None:
return self.writer
self.writer= reader.create_writer()
return self.writer
def close(self):
if self.writer is not None:
self.writer.close()
self.writer = None
class NewFormatGroupSetter:
"""
Multiple Mask Segment per HDF5 File, one in each group.
"""
@staticmethod
def set_group(reader, start_frame=None, start_time=1,end_frame=None):
"""
:param start_frame:
:param start_time:
:return:
@type reader: GrayBlockReader
"""
grp_pos = 0
if start_frame is not None:
pos = len([x for x in reader.grps if int(x) <= start_frame]) - 1
grp_pos = pos if pos > 0 else grp_pos
NewFormatGroupSetter.select_group(reader,
grp_pos,
start_frame=start_frame,
start_time=start_time,
end_frame=end_frame)
@staticmethod
def select_group(reader,
grp_pos,
start_frame=None,
start_time=0,
end_frame=None):
"""
:param reader:
:param grp_no:
:param start_frame:
:param start_time:
:param end_frame: determine end frame
:return:
"""
reader.grp_pos = grp_pos
reader.current_group = reader.h_file.get(reader.grps[grp_pos])
reader.dset = reader.current_group.get('masks')
reader.start_time = reader.current_group.attrs[
'start_time'] if 'start_time' in reader.current_group.attrs else start_time
reader.start_frame = reader.current_group.attrs[
'start_frame'] if 'start_frame' in reader.current_group.attrs else start_frame
end_frame = reader.current_group.attrs[
'end_frame'] if 'end_frame' in reader.current_group.attrs and end_frame is None else end_frame
reader.end_frame = end_frame if end_frame is not None else None
reader.pos = 0 if start_frame is None else reader.start_frame - start_frame
class OldFormatGroupSetter:
"""
One Mask Segment per HDF5 File.
"""
@staticmethod
def set_group(reader, start_frame=None, start_time=0, end_frame=None):
"""
:param start_frame:
:param start_time:
:return:
@type reader: GrayBlockReader
"""
reader.current_group = reader.h_file.get('masks')
reader.dset = reader.current_group.get('masks')
reader.start_time = reader.h_file.attrs[
'start_time'] if 'start_time' in reader.h_file.attrs else start_time
reader.start_frame = reader.h_file.attrs[
'start_frame'] if 'start_frame' in reader.h_file.attrs else start_frame
reader.pos = 0 if start_frame is None else reader.start_frame - start_frame
@staticmethod
def select_group(reader, grp_pos, start_frame=None, start_time=0,end_frame=None):
OldFormatGroupSetter.set_group(reader,start_frame=start_frame,start_time=start_time)
def compose_overlay_name(target_file="", link = tuple()):
path_tuple = os.path.split(target_file)
return os.path.join(path_tuple[0], path_tuple[1] + str(hash(link))[:5] + '_overlay.' + preferredSuffix())
class GrayBlockOverlayGenerator:
def __init__(self, locator, segments = [], target_file = None, output_file = ""):
from video_tools import get_frames_from_segment
self.target_file = target_file
self.output_file = output_file
segments = [segment for segment in segments if segment.media_type == 'video' and segment.filename != None]
self.segments = sorted(segments, key=lambda segment: segment.startframe)
self.segment_index = 0
self.segment = segments[self.segment_index]
self.readerManager = GrayBlockReaderManager()
self.reader = self.readerManager.create_reader(
filename=self.segment.filename,
start_time=self.segment.starttime,
start_frame=self.segment.startframe,
end_frame=self.segment.endframe)
self.overlay_mask_name = os.path.join(os.path.split(self.segment.filename)[0], '_overlay')
self.writer = GrayFrameOverlayWriter(
mask_prefix=self.overlay_mask_name,
fps=self.reader.fps)
self.last_frame = get_frames_from_segment(locator.getMaskSetForEntireVideo()[0])
def updateSegment(self):
self.segment_index += 1
self.segment = self.segments[self.segment_index]
self.reader = self.readerManager.create_reader(
filename=self.segment.filename,
start_time=self.segment.starttime,
start_frame=self.segment.startframe,
end_frame=self.segment.endframe)
def generate(self):
while self.writer.lastPos < self.last_frame:
frame_time = self.reader.current_frame_time()
frame_count = self.reader.current_frame()
mask = self.reader.read()
if mask is None:
if self.segment_index + 1 < len(self.segments):
self.updateSegment()
else:
frame_count = self.last_frame #write blanks for the rest
self.writer.write(mask, frame_count, frame_time)
self.writer.close()
self.readerManager.close()
ffmpeg_overlay(self.target_file, self.writer.filename, self.output_file)
try:
os.remove(self.writer.filename) #clean up the mask file, leave the finished overlay
except OSError:
pass
class DummyWriter:
def write(self, mask, mask_number, mask_time):
pass
def close(self):
pass
class GrayBlockWriter:
"""
Write Gray scale (Mask) images to a compressed block file
"""
def __init__(self, mask_prefix, fps, layout=GrayBlockFrameFirstLayout()):
self.fps = fps
self.dset = None
self.pos = 0
self.h_file = None
self.suffix = 'hdf5'
self.filename = None
self.mask_prefix = mask_prefix
self.mask_format = layout
self.last_frame = 1
self.last_time = 0
self.current_group = None
def write(self, mask, mask_time, frame_number):
import h5py
if self.current_group is not None and frame_number - self.last_frame > 1:
grp = self.current_group
grp.attrs['end_time'] = self.last_time
grp.attrs['end_frame'] = self.last_frame
self.current_group = None
if self.h_file is None:
self.filename = composeVideoMaskName(self.mask_prefix, mask_time, self.suffix)
logging.getLogger('maskgen').info('Writing to ' + self.filename)
if os.path.exists(self.filename):
os.remove(self.filename)
self.h_file = h5py.File(self.filename, 'w')
self.h_file.attrs['fps'] = self.fps
self.h_file.attrs['prefix'] = os.path.basename(self.mask_prefix)
self.h_file.attrs['mask_format'] = self.mask_format.name
self.current_group = None
if self.current_group is None:
self.current_group = self.h_file.create_group(str(frame_number))
grp = self.current_group
grp.attrs['start_time'] = mask_time
grp.attrs['start_frame'] = frame_number
self.dset = grp.create_dataset("masks",
self.mask_format.initial_shape(mask.shape, size=10),
compression="gzip",
chunks=True,
maxshape=self.mask_format.initial_shape(mask.shape))
self.pos = 0
self.mask_format.resize(mask.shape, self)
self.last_frame = frame_number
self.last_time = mask_time
self.mask_format.set(self, mask)
self.pos += 1
def get_file_name(self):
return self.filename
def close(self):
self.release()
def release(self):
if self.current_group is not None:
self.current_group.attrs['end_time'] = self.last_time
self.current_group.attrs['end_frame'] = self.last_frame
self.current_group = None
self.dset = None
if self.h_file is not None:
self.h_file.close()
self.h_file = None
def preferredSuffix(preferences=None):
import sys
default_suffix = 'm4v'
if sys.platform.startswith('win'):
default_suffix = 'avi'
if sys.platform.startswith('linux'):
default_suffix = 'avi'
if preferences is not None:
t_suffix = getValue(preferences,'vid_suffix')
default_suffix = t_suffix if t_suffix is not None else default_suffix
return default_suffix
class GrayBlockFactory:
"""
Either build the Writer or the Validator
"""
def __init__(self, writer =None):
self.writer = writer
def __call__(self, name, fps):
return GrayBlockWriter(mask_prefix=name, fps=fps) if self.writer is None else self.writer
class GrayBlockValidator():
"""
Compare frames of two video masks to see if one is valid.
"""
def __init__(self, jt_mask_file, validation_function):
self.filename = jt_mask_file
self.failed_frames = []
self.manager = GrayBlockReaderManager()
self.validation_function = validation_function
self.manager.create_reader(jt_mask_file)
def write(self, mask, mask_time, frame_number):
while(self.manager.reader.current_frame() < frame_number):
self.manager.reader.read() #ffwd to where we want to be
if self.manager.reader.current_frame() == frame_number:
jt_mask = self.manager.reader.read()
if jt_mask is not None:
if not self.validation_function(jt_mask,mask):
self.failed_frames.append(frame_number)
def get_file_name(self):
return self.filename
class GrayFrameWriter:
"""
Write Gray scale (Mask) video images
"""
capOut = None
codec = 'AVC1'
suffix = 'm4v'
fourcc = None
filename = None
fps = 0
mask_prefix = None
def __init__(self, mask_prefix, fps, preferences=None):
import sys
self.fps = fps
self.mask_prefix = mask_prefix
self.suffix = preferredSuffix(preferences=preferences)
t_codec = None
if preferences is not None and 'vid_codec' in preferences:
t_codec = preferences['vid_codec']
if t_codec is None and sys.platform.startswith('win'):
self.codec = 'XVID'
elif t_codec is None and sys.platform.startswith('linux'):
self.codec = 'XVID'
elif t_codec is not None:
self.codec = str(t_codec)
self.fourcc = cv2api.cv2api_delegate.get_fourcc(self.codec) if self.codec is not 'raw' else 0
def write(self, mask, mask_number, mask_time):
if self.capOut is None:
self.filename = composeVideoMaskName(self.mask_prefix, mask_time, self.suffix)
logging.getLogger('maskgen').info('writing using fourcc ' + str(self.fourcc))
if os.path.exists(unicode(os.path.abspath(self.filename))):
os.remove(unicode(os.path.abspath(self.filename)))
self.capOut = cv2.VideoWriter(unicode(os.path.abspath(self.filename)),
self.fourcc,
self.fps,
(mask.shape[1], mask.shape[0]),
len(mask.shape) > 2 and mask.shape[2] > 1)
if cv2.__version__.startswith('2.4.11'):
mask = grayToRGB(mask)
self.capOut.write(mask)
def close(self):
if self.capOut is not None:
self.capOut.release()
self.capOut = None
def release(self):
self.close()
class GrayFrameOverlayWriter(GrayFrameWriter):
def __init__(self, mask_prefix = '', fps = 30/1, preferences = None):
GrayFrameWriter.__init__(self, mask_prefix=mask_prefix, fps=fps, preferences = preferences)
self.lastPos = 0
self.blankMask = None
def write(self, mask, mask_number, mask_time):
if self.blankMask is None:
self.blankMask = np.ones((mask.shape[0], mask.shape[1]), dtype=np.uint8) * 255
frames_to_write = mask_number - self.lastPos #write all the frames up to and including the mask frame
for i in range(1,frames_to_write+1):
frame_num = self.lastPos + i
mask_time = frame_num * 1000.0 / self.fps #refigure time for the frame we actually write
GrayFrameWriter.write(self,
mask=mask if frame_num == mask_number and mask is not None else self.blankMask,
mask_number=frame_num,
mask_time=mask_time)
self.lastPos = mask_number
def widthandheight(img):
a = np.where(img != 0)
if len(a[0]) == 0:
return 0, 0, 0, 0
bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])
h, w = bbox[1] - bbox[0], bbox[3] - bbox[2]
return bbox[2], bbox[0], w, h
def place_in_image(mask, image_to_place, image_to_cover, placement_center, rect=None):
x, y, w, h = widthandheight(mask)
if rect:
if w > rect[2]:
x = x + (w - rect[2]) / 2
w = rect[2]
if h > rect[3]:
y = y + (h - rect[3]) / 2
h = rect[3]
w += w % 2
h += h % 2
x_offset = int(placement_center[0]) - int(math.floor(w / 2))
y_offset = int(placement_center[1]) - int(math.floor(h / 2))
if y_offset < 0:
return None
if x_offset < 0:
return None
image_to_cover = np.copy(image_to_cover)
flipped_mask = 255 - mask
for c in range(0, 3):
image_to_cover[y_offset:y_offset + h, x_offset:x_offset + w, c] = \
image_to_cover[y_offset:y_offset + h, x_offset:x_offset + w, c] * \
(flipped_mask[y:y + h, x:x + w] / 255) + \
image_to_place[y:y + h, x:x + w, c] * \
(mask[y:y + h, x:x + w] / 255)
return image_to_cover
def selfVideoTest():
logging.getLogger('maskgen').info('Checking opencv and ffmpeg, this may take a minute.')
writer = GrayBlockWriter('test_ts_gw', 29.97002997)
mask_set = list()
for i in range(255):
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
mask_set.append(mask)
writer.write(mask, i + 1 * 33.3666666667, i + 1)
writer.close()
fn = writer.get_file_name()
vidfn = convertToVideo(fn)
if not os.path.exists(vidfn):
return 'Video Writing Failed'
try:
size = openImage(vidfn, getMilliSecondsAndFrameCount('00:00:01')).size
if size != (1920, 1090):
return 'Video Writing Failed: Frame Size inconsistent'
except:
return 'Video Writing Failed'
return None
def dateTimeStampCompare(v1, v2):
def get_defaults(source):
exifdata = maskgen.exif.getexif(source)
rd = {}
for e in exifdata:
if "date" in str(e).lower() or "time" in str(e).lower():
rd[e] = exifdata[e]
return rd
#date_time_stamp = exifdata['Create Date'] if 'Create Date' in exifdata else exifdata['File Creation Date/Time']
stamp1 = get_defaults(v1)
rgexdict = {}
for e in stamp1:
st = stamp1[e]
rgexf = "\\A"
for x in st:
if x.isdigit():
rgexf += '[0-9]'
elif x.isalpha():
rgexf += '[a-zA-z]*'
else:
rgexf += x
rgexf+= "\\Z"
rgexdict[e] = rgexf
stamp2 = get_defaults(v2)
nonmatches = []
for e in stamp2:
if e in rgexdict:
mo = re.match(rgexdict[e],stamp2[e])
if mo is None:
nonmatches.append(e)
else:
pass
#nonmatches.append(e)
return nonmatches
```
#### File: maskgen/ui/plugin_builder.py
```python
import collections
import json
import os
import tkSimpleDialog
import maskgen.plugins
import maskgen.software_loader
from maskgen.group_filter import GroupOperationsLoader
from maskgen.ui.autocomplete_it import *
class PluginBuilder(tkSimpleDialog.Dialog):
def __init__(self, master, gopLoader):
"""
:param master:
:param gopLoader:
@type gopLoader: GroupOperationsLoader
"""
self.gopLoader = gopLoader
self.softwareLoader = maskgen.software_loader.SoftwareLoader()
self.sourcefiletype = 'image'
self.targetfiletype = 'image'
self.master = master
self.arguments = []
tkSimpleDialog.Dialog.__init__(self, master)
def body(self, master):
nameLabel = Label(master, text='Plugin Name: ')
nameLabel.grid(row=0, column=0)
self.nameEntry = Entry(master, width=40)
self.nameEntry.grid(row=0, column=1, sticky='EW')
descriptionLabel = Label(master, text='Description: ')
descriptionLabel.grid(row=1, column=0)
self.descriptionEntry = Text(master, width=40, height=3)
self.descriptionEntry.grid(row=1, column=1, sticky='EW')
cats = self.organizeOperationsByCategory()
catlist = list(cats.keys())
catlist.sort()
oplist = cats[catlist[0]] if len(cats) > 0 else []
self.opCatEntry = AutocompleteEntryInText(master, values=catlist, takefocus=False, width=40, state='readonly')
self.opNameEntry = AutocompleteEntryInText(master, values=oplist, takefocus=False, width=40, state='readonly')
self.softwareNameEntry = AutocompleteEntryInText(master, values=sorted(self.softwareLoader.get_names(self.sourcefiletype), key=str.lower), takefocus=False,
width=40,state='readonly')
self.softwareVersionEntry = AutocompleteEntryInText(master, values=self.softwareLoader.get_versions(self.softwareNameEntry.get(),software_type=self.sourcefiletype),
initialValue=self.softwareLoader.get_preferred_version(name=self.softwareNameEntry.get()), takefocus=False, width=40)
self.opCatEntry.bind("<Return>", self.newcategory)
self.opCatEntry.bind("<<ComboboxSelected>>", self.newcategory)
self.opNameEntry.bind("<Return>", self.newcommand)
self.opNameEntry.bind("<<ComboboxSelected>>", self.newcommand)
self.softwareNameEntry.bind("<Return>", self.newsoftware)
self.softwareNameEntry.bind("<<ComboboxSelected>>", self.newsoftware)
opCatLabel = Label(master, text='Operation Category: ')
opCatLabel.grid(row=2, column=0)
self.opCatEntry.grid(row=2, column=1, sticky='EW')
opNameLabel = Label(master, text='Operation Name: ')
opNameLabel.grid(row=3, column=0)
self.opNameEntry.grid(row=3, column=1, sticky='EW')
softwareNameLabel = Label(master, text='Software Name: ')
softwareNameLabel.grid(row=4, column=0)
self.softwareNameEntry.grid(row=4, column=1, sticky='EW')
softwareVersionLabel = Label(master, text='Software Version: ')
softwareVersionLabel.grid(row=5, column=0)
self.softwareVersionEntry.grid(row=5, column=1, sticky='EW')
# suffixLabel = Label(master, text='Suffix: ')
# suffixLabel.grid(row=6, column=0)
# self.suffixEntry = Entry(master, width=40)
# self.suffixEntry.grid(row=6, column=1, sticky='EW')
commandLabel1 = Label(master, text='Command (exactly as it would be typed in command line):')
commandLabel1.grid(row=7, column=0, columnspan=8)
self.commandEntry = Entry(master, width=40)
self.commandEntry.grid(row=8, column=0, columnspan=8, sticky='EW')
commandLabel2 = Label(master, text='Use \"{inputimage}\" and \"{outputimage}\" in place of input and output images, respectively.\n'
'If omitted, \"{inputimage}\" and \"{outputimage}\" will be appended to end of command.')
commandLabel2.grid(row=9, column=0, columnspan=8)
Label(master, text='Additional Arguments (optional):').grid(row=10)
self.argFrame = Frame(master)
self.argFrame.grid(row=11, column=0, columnspan=8)
self.add_argument_row(row=0, col=0, initialize=True)
def add_argument_row(self, row, col, initialize=False, event=None):
if initialize == False:
self.addArgButton.grid_forget()
Label(self.argFrame, text='Arg Name: ').grid(row=row, column=col)
argNameEntry = Entry(self.argFrame)
argNameEntry.grid(row=row, column=col+1, sticky='EW')
col+=2
Label(self.argFrame, text='Arg Type: ').grid(row=row, column=col)
typeBox = ttk.Combobox(self.argFrame, values=['String', 'ImageFile', 'XMPFile', 'Donor', 'Float', 'Int', 'List', 'YesNo', 'Time', 'Coordinates'])
typeBox.set('String')
typeBox.grid(row=row, column=col+1, sticky='EW')
col+=2
Label(self.argFrame, text='Default Value: ').grid(row=row, column=col)
defaultValueBox = Entry(self.argFrame)
defaultValueBox.grid(row=row, column=col+1, sticky='EW')
row+=1
col=0
Label(self.argFrame, text='Description: ').grid(row=row, column=col)
descriptionBox = Entry(self.argFrame)
descriptionBox.grid(row=row, column=col+1, sticky='EW')
col+=2
Label(self.argFrame, text='List Values: ').grid(row=row, column=col)
valuesBox = Entry(self.argFrame, state='disabled')
valuesBox.grid(row=row, column=col+1, sticky='EW')
typeBox.correspondingValues = valuesBox
typeBox.bind("<<ComboboxSelected>>", self.set_valuesbox_state)
col+=2
insertButton = Button(self.argFrame, text='Insert', command=lambda:self.insert_arg(argNameEntry))
insertButton.grid(row=row, column=col, columnspan=2, sticky='EW')
row+=1
col=0
ttk.Separator(self.argFrame, orient=HORIZONTAL).grid(row=row, column=col, columnspan=8, sticky='EW')
row+=1
col=0
self.addArgButton = Button(self.argFrame, text='Add another argument', command=lambda: self.add_argument_row(row=row, col=col))
self.addArgButton.grid(row=row, column=col, columnspan=2)
Fields = collections.namedtuple('Fields', 'argname, type, defaultvalue, description, values')
f = Fields(argname=argNameEntry, type=typeBox, defaultvalue=defaultValueBox, description=descriptionBox, values=valuesBox)
self.arguments.append(f)
def insert_arg(self, entry):
idx = self.commandEntry.index(INSERT)
currentCommand = self.commandEntry.get()
try:
if currentCommand[idx-1] != ' ':
self.commandEntry.insert(idx, ' ')
idx+=1
except IndexError:
pass
self.commandEntry.insert(idx, '{' + entry.get().replace(' ', '') + '}') if len(entry.get()) >0 else ''
idx = self.commandEntry.index(INSERT)
currentCommand = self.commandEntry.get()
try:
if currentCommand[idx+1] != ' ':
self.commandEntry.insert(idx, ' ')
except IndexError:
pass
def set_valuesbox_state(self, event=None):
if event is not None:
val = event.widget.get()
value_entry = event.widget.correspondingValues
if val == 'List':
value_entry.config(state='normal')
else:
value_entry.config(state='disabled')
def apply(self):
self.pluginName = self.nameEntry.get().replace(' ', '')
opName = self.opNameEntry.get()
opCat = self.opCatEntry.get()
description = self.descriptionEntry.get("1.0",END).strip()
softwareName = self.softwareNameEntry.get()
softwareVersion = self.softwareVersionEntry.get()
#suffix = self.suffixEntry.get()
command = self.commandEntry.get().split(' ')
if '{inputimage}' not in command:
command.append('{inputimage}')
if '{outputimage}' not in command:
command.append('{outputimage}')
platform = sys.platform
self.data = {"name": self.pluginName,
"operation": {
"name": opName,
"category": opCat,
"description": description,
"software": softwareName,
"version": softwareVersion,
"arguments": {},
"transitions": ['image.image']
},
#"suffix": suffix
"command": {
"default": command,
platform: command
}
}
self.export_arguments()
self.path = os.path.join('plugins', 'Custom', self.pluginName) + '.json'
# need to step up a directory to save the json
with open(os.path.join('.', self.path), 'w') as newJSON:
json.dump(self.data, newJSON, indent=4)
maskgen.plugins.loadPlugins().loadCustom(self.pluginName, self.path)
def cancel(self, event=None):
self.destroy()
def export_arguments(self):
for argument in self.arguments:
self.data['operation']['arguments'][argument.argname.get().replace(' ', '')] = {
'type':argument.type.get().lower(),
'defaultvalue':argument.defaultvalue.get(),
'description':argument.description.get(),
}
if argument.type.get() == 'List':
vals = argument.values.get().replace(', ', ',').split(',')
self.data['operation']['arguments'][argument.argname.get().replace(' ', '')]['values'] = vals
"""
the below functions are taken from the DescriptionCaptureDialog class in description_dialog.py
(much of the code in this class has been borrowed from here)
"""
def newcategory(self, event):
opByCat = self.organizeOperationsByCategory()
if self.opCatEntry.get() in opByCat:
oplist = opByCat[self.opCatEntry.get()]
self.opNameEntry.set_completion_list(oplist)
self.newcommand(event)
else:
self.opNameEntry.set_completion_list([])
def newcommand(self, event):
op = self.gopLoader.getOperationWithGroups(self.opNameEntry.get())
def organizeOperationsByCategory(self):
return self.gopLoader.getOperationsByCategoryWithGroups(self.sourcefiletype, self.targetfiletype)
def newsoftware(self, event):
sname = self.softwareNameEntry.get()
self.softwareVersionEntry.set_completion_list(self.softwareLoader.get_versions(sname,software_type=self.sourcefiletype),
initialValue=self.softwareLoader.get_preferred_version(name=sname))
def main():
maskgen.plugins.loadPlugins()
root = Tk()
root.withdraw()
d = PluginBuilder(root)
d.mainloop()
if __name__ == '__main__':
main()
```
#### File: maskgen/ui/QAExtreme.py
```python
import matplotlib
from maskgen.maskgen_loader import MaskGenLoader
from maskgen.ui.semantic_frame import SemanticFrame
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use("TkAgg")
import logging
from matplotlib.figure import Figure
from Tkinter import *
import matplotlib.patches as mpatches
import ttk
import tkMessageBox
from PIL import ImageTk
from maskgen.support import getValue
from maskgen.tool_set import imageResizeRelative, openImage,get_username, GrayBlockOverlayGenerator, compose_overlay_name
import os
import numpy as np
import maskgen.qa_logic
from maskgen.video_tools import get_end_time_from_segment
import maskgen.tool_set
import random
import maskgen.scenario_model
from maskgen.services.probes import ProbeGenerator, DetermineTaskDesignation, fetch_qaData_designation, cleanup_temporary_files
import maskgen.validation
from maskgen.tool_set import openFile
import webbrowser
from maskgen.graph_meta_tools import MetaDataExtractor
class Chkbox:
def __init__(self, parent, dialog, label=None, command=None, value=False):
self.value = BooleanVar(value=value)
self.box = Checkbutton(parent, variable=self.value, command=dialog.check_ok if command is None else command)
self.label = label
def __nonzero__(self):
return self.value.get()
def set_value(self, value):
self.value.set(value=value)
def grid_info(self):
return self.box.grid_info()
def grid(self):
self.label.grid()
self.box.grid()
def grid_remove(self):
self.box.grid_remove()
self.label.grid_remove()
class CheckboxGroup:
"""
boxes: list of wrapped Checkboxes
condition: either 'all'- all checkboxes in the group must be true or 'any'- any true value will return true.
"""
def __init__(self, boxes = [], condition = 'all'):
self.boxes = boxes
self.condition = condition
def __nonzero__(self):
if len(self.boxes) == 0:
return True
if self.condition == 'any':
return any(bool(value) for value in self.boxes)
else:
return all(bool(value) for value in self.boxes)
def hide_group(self):
for ck in self.boxes:
ck.grid_remove()
def show_group(self):
for ck in self.boxes:
ck.grid()
def grid_info(self, index = -1):
"""
Get the grid_info of the checkbox at the index. default is last index
:return:
"""
return self.boxes[index].grid_info() if len(self.boxes) > 0 else {}
class MannyPage(Frame):
"""
Displays mascot with instructions and status information on probe and QA page generation.
"""
checkboxes = CheckboxGroup()
manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]
def __init__(self, master):
Frame.__init__(self, master)
self.statusLabelText = StringVar()
self.statusLabelText.set('Probes Generating')
self.heading = Label(self, text="Welcome to the QA Wizard. Press Next to begin the QA Process or Quit to stop. This is "
"Manny; He is here to help you analyze the journal. The tool is currently generating the probes. "
"This could take a while. When the next button is enabled you may begin.",
wraplength=400)
self.heading.grid(column=0, row=0, rowspan=2, columnspan=2)
manny_color = maskgen.tool_set.get_icon('Manny_icon_color.jpg')
manny_mask = maskgen.tool_set.get_icon('Manny_icon_mask.jpg')
self.mannyFrame = Frame(self)
self.mannyFrame.grid(column=0, row=2, columnspan=2)
self.canvas = Canvas(self.mannyFrame, width=510, height=510)
self.canvas.pack()
manny_img = openImage(manny_color)
manny_img_mask = openImage(manny_mask).to_mask()
manny_img_mask = imageResizeRelative(manny_img_mask, (500, 500), manny_img_mask.size)
self.manny = ImageTk.PhotoImage(
imageResizeRelative(manny_img, (500, 500), manny_img.size).overlay(manny_img_mask,self.manny_colors[
random.randint(0, len(self.manny_colors) - 1)]).toPIL())
self.image_on_canvas = self.canvas.create_image(510 / 2, 510 / 2, image=self.manny, anchor=CENTER, tag='things')
self.statusLabelObject = Label(self, textvariable=self.statusLabelText)
self.statusLabelObject.grid(column=0, row=3, columnspan=2, sticky=E + W)
self.canvas.bind("<Double-Button-1>", master.help)
self.wquit = Button(self, text='Quit', command=master.exitProgram, width=20)
self.wquit.grid(column=0, row=4, sticky=W, padx=5, pady=5)
self.wnext = Button(self, text='Next', command=master.nex, state=DISABLED, width=20)
self.wnext.grid(column=1, row=4, sticky=E, padx=5, pady=5)
class FinalPage(Frame):
"""
Final QA page, handles comments, final approval.
"""
def __init__(self, master):
Frame.__init__(self, master)
row = 0
col = 0
self.infolabel = Label(self, justify=LEFT, text='QA Checklist:').grid(row=row, column=col)
row += 1
qa_list = [
'Base and terminal node images should be the same format. -If the base was a JPEG, the Create JPEG/TIFF option should be used as the last step.',
'All relevant semantic groups are identified.']
self.checkboxes = CheckboxGroup(boxes=[])
for q in qa_list:
box_label = Label(self, text=q, wraplength=600, justify=LEFT)
ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_state())
ck.box.grid(row=row, column=col)
ck.label.grid(row=row, column=col + 1, sticky='W')
self.checkboxes.boxes.append(ck)
row += 1
master.checkboxes[master.current_qa_page] = self.checkboxes
if len(self.master.errors) > 1:
Label(self, text='Probes were generated with errors. They can be reviewed, but QA cannot be accepted. Check the log for errors.').grid(row=row, column=col+1)
row += 1
Label(self, text='QA Signoff: ').grid(row=row, column=col)
col += 1
self.reporterStr = StringVar()
self.reporterStr.set(get_username())
self.reporterEntry = Entry(self, textvar=self.reporterStr)
self.reporterEntry.grid(row=row, column=col, columnspan=3, sticky='W')
row += 2
col -= 1
self.acceptButton = Button(self, text='Accept', command=lambda: master.qa_done('yes'), width=15,
state=DISABLED)
self.acceptButton.grid(row=row, column=col + 2, columnspan=2, sticky='W')
self.rejectButton = Button(self, text='Reject', command=lambda: master.qa_done('no'), width=15)
self.rejectButton.grid(row=row, column=col + 1, columnspan=1, sticky='E')
self.previButton = Button(self, text='Previous', command=master.pre, width=15)
self.previButton.grid(row=row, column=col, columnspan=2, sticky='W')
row += 1
self.commentsLabel = Label(self, text='Comments: ')
self.commentsLabel.grid(row=row, column=col, columnspan=3)
row += 1
textscroll = Scrollbar(self)
textscroll.grid(row=row, column=col + 4, sticky=NS)
self.commentsBox = Text(self, height=5, width=100, yscrollcommand=textscroll.set, relief=SUNKEN)
self.commentsBox.grid(row=row, column=col, padx=5, pady=5, columnspan=3, sticky=NSEW)
textscroll.config(command=self.commentsBox.yview)
currentComment = master.parent.scModel.getProjectData('qacomment')
self.commentsBox.insert(END, currentComment) if currentComment is not None else ''
class QAPage(Frame):
"""
A standard QA Page, allows review and user validation of probe spatial, temporal aspects
"""
#TODO: Refactor to put page data with the page.
"""
subplots = []
pltdata = []
successIcon = None
displays = []
pathboxes = []
"""
def __init__(self, master, link):
Frame.__init__(self, master=master)
self.master = master
self.link = link
self.checkboxes = CheckboxGroup(boxes=[])
#Find this probe- could probably do this elsewhere and pass it in.
self.edgeTuple = tuple(link.split("<-"))
if len(self.edgeTuple) < 2:
self.finalNodeName = link.split("->")[1]
self.edgeTuple = tuple(link.split("->"))
else:
self.finalNodeName = None
if (len(link.split('->'))>1):
probe = [probe for probe in master.probes if
probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.finalNodeId in master.lookup[self.edgeTuple[1]]][0]
else:
probe = \
[probe for probe in master.probes if
probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.donorBaseNodeId in
master.lookup[
self.edgeTuple[1]]][0]
self.probe = probe
iFrame = Frame(self)
c = Canvas(iFrame, width=35, height=35)
c.pack()
#Success Icon
img = openImage(maskgen.tool_set.get_icon('RedX.png') if probe.failure else maskgen.tool_set.get_icon('check.png'))
self.successIcon = ImageTk.PhotoImage(imageResizeRelative(img, (30, 30), img.size).toPIL())
c.create_image(15, 15, image=self.successIcon, anchor=CENTER, tag='things')
#Layout
row = 0
col = 0
self.optionsLabel = Label(self, text=self.link, font=(None, 10))
self.optionsLabel.grid(row=row, columnspan=3, sticky='EW', padx=(40, 0), pady=10)
iFrame.grid(column=0, row=0, columnspan=1, sticky=W)
row += 1
self.operationVar = StringVar(value="Operation [ Semantic Groups ]:")
self.operationLabel = Label(self, textvariable=self.operationVar, justify=LEFT)
self.semanticFrame = SemanticFrame(self)
self.semanticFrame.grid(row=row + 1, column=0, columnspan=2, sticky=N + W, rowspan=1, pady=10)
row += 2
#cImageFrame is used for plot, image and overlay
self.cImgFrame = ttk.Notebook(self)
self.cImgFrame.bind('<<NotebookTabChanged>>', lambda a: self.frameMove())
self.cImgFrame.grid(row=row, rowspan=8)
self.descriptionVar = StringVar()
self.descriptionLabel = Label(self, textvariable=self.operationVar, justify=LEFT)
row += 8
self.operationLabel.grid(row=row, columnspan=3, sticky='W', padx=10)
row += 1
textscroll = Scrollbar(self)
textscroll.grid(row=row, column=col + 1, sticky=NS)
self.commentBox = Text(self, height=5, width=80, yscrollcommand=textscroll.set, relief=SUNKEN)
self.master.commentsBoxes[self.link] = self.commentBox
self.commentBox.grid(row=row, column=col, padx=5, pady=5, columnspan=1, rowspan=2, sticky=NSEW)
textscroll.config(command=self.commentBox.yview)
col = 3
row = 0
scroll = Scrollbar(self)
scroll.grid(row=row, column=col + 2, rowspan=5, columnspan=1, sticky=NS)
self.pathList = Listbox(self, width=30, yscrollcommand=scroll.set, selectmode=EXTENDED, exportselection=0)
self.pathList.grid(row=row, column=col - 1, rowspan=5, columnspan=3, padx=(30, 10), pady=(20, 20))
self.master.pathboxes[self] = self.semanticFrame.getListbox()
scroll.config(command=self.pathList.yview)
self.transitionVar = StringVar()
edge = master.scModel.getGraph().get_edge(probe.edgeId[0], probe.edgeId[1])
self.operationVar.set(self.operationVar.get() + master._compose_label(edge))
master.edges[self] = [edge, self.semanticFrame.getListbox()]
for sg in edge['semanticGroups'] if 'semanticGroups' in edge else []:
self.semanticFrame.insertListbox(ANCHOR, sg)
operation = master.scModel.getGroupOperationLoader().getOperationWithGroups(edge['op'])
#QA checkboxes
if operation.qaList is not None:
args = getValue(edge, 'arguments', {})
self.curOpList = [x for x in operation.qaList]
for item_pos in range(len(self.curOpList)):
item = self.curOpList[item_pos]
try:
self.curOpList[item_pos] = item.format(**args)
except:
pass
else:
self.curOpList = []
row += 5
if self.curOpList is None:
master.qaData.set_qalink_status(self.link, 'yes')
for q in self.curOpList:
box_label = Label(self, text=q, wraplength=250, justify=LEFT)
ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_qalink_status(link=link))
ck.box.grid(row=row, column=col - 1)
ck.label.grid(row=row, column=col, columnspan=4, sticky='W')
self.checkboxes.boxes.append(ck)
row += 1
master.checkboxes[self] = self.checkboxes
# Main Features- load the overlay for images, load plot graph & overlay page for videos
if ('<-' in self.link and probe.donorVideoSegments is None) or probe.targetVideoSegments is None:
self.load_overlay(initialize=True)
else:
self.transitionString(None)
self.setUpFrames()
#Comment section
currentComment = master.qaData.get_qalink_caption(self.link)
self.commentBox.delete(1.0, END)
self.commentBox.insert(END, currentComment if currentComment is not None else '')
#Navigation Buttons
self.acceptButton = Button(self, text='Next', command=master.nex, width=15)
self.acceptButton.grid(row=12, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))
self.prevButton = Button(self, text='Previous', command=master.pre, width=15)
self.prevButton.grid(row=12, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))
self.acceptnButton = Button(self, text='Next Unchecked', command=master.nexCheck, width=15)
self.acceptnButton.grid(row=13, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))
self.prevnButton = Button(self, text='Previous Unchecked', command=master.preCheck, width=15)
self.prevnButton.grid(row=13, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))
row = 14
#Progress Bar
pb = ttk.Progressbar(self, orient='horizontal', mode='determinate', maximum=100.0001)
pb.grid(row=row, column=0, sticky=EW, columnspan=8)
pb.step(master.progress * 100)
master.progressBars.append(pb)
def setUpFrames(self):
"""
Lays out inner display for video temporal and spatial review
:return:
"""
displays = [TemporalReviewDisplay(self)]
if any(segment.filename != None for segment in self.probe.targetVideoSegments):
displays.append(SpatialReviewDisplay(self))
self.checkboxes.boxes.append(CheckboxGroup(boxes=[d.checkbox for d in displays], condition='any'))
self.master.pageDisplays[self] = [0, displays]
def _add_to_listBox(self, box, string):
if len(string) < 20:
box.insert(END, string)
return 1
box.insert(END, string[0:15]+"...")
box.insert(END, " " + string[max(15-int(len(string)),-10):])
return 2
def transitionString(self, probeList):
tab = " "
current = 0
c = 0
if self.finalNodeName == None:
self._add_to_listBox(self.pathList, self.edgeTuple[1])
self.pathList.insert(END, 2*tab + "|")
self.pathList.insert(END, tab + "Donor")
self.pathList.insert(END, 2*tab + "|")
self.pathList.insert(END, 2*tab + "V")
self._add_to_listBox(self.pathList, self.edgeTuple[0])
self.pathList.select_set(6)
return self.edgeTuple[0] + "\n|Donor|\nV\n" + self.edgeTuple[1]
self._add_to_listBox(self.pathList,self.master.backs[self.finalNodeName][0].start)
for p in self.master.backs[self.finalNodeName]:
edge = self.master.scModel.getGraph().get_edge(p.start, p.end)
self.pathList.insert(END, 2 * tab + "|")
c += self._add_to_listBox(self.pathList, edge['op'])
self.pathList.insert(END, 2 * tab + "|")
self.pathList.insert(END, 2 * tab + "V")
c += 3
c += self._add_to_listBox(self.pathList, self.master.getFileNameForNode(p.end))
if self.master.getFileNameForNode(p.end) == self.edgeTuple[0]:
current = c
self.pathList.selection_set(current)
self.pathList.see(max(0,current-5))
return ""
def load_overlay(self, initialize):
"""
Lays out display for spatial overlay for image probes
:param initialize:
:return:
"""
edgeTuple = self.edgeTuple
message = 'final image'
if (len(self.link.split('->')) > 1):
probe = [probe for probe in self.master.probes if
probe.edgeId[1] in self.master.lookup[self.edgeTuple[0]] and probe.finalNodeId in self.master.lookup[
self.edgeTuple[1]]][0]
n = self.master.scModel.G.get_node(probe.finalNodeId)
finalFile = os.path.join(self.master.scModel.G.dir,
self.master.scModel.G.get_node(probe.finalNodeId)['file'])
final = openImage(finalFile)
finalResized = imageResizeRelative(final, (500, 500), final.size)
imResized = imageResizeRelative(probe.targetMaskImage, (500, 500),
probe.targetMaskImage.size if probe.targetMaskImage is not None else finalResized.size)
else:
message = 'donor'
probe = \
[probe for probe in self.master.probes if probe.edgeId[1] in self.master.lookup[edgeTuple[0]] and probe.donorBaseNodeId in self.master.lookup[edgeTuple[1]]][0]
final, final_file = self.master.scModel.G.get_image(probe.donorBaseNodeId)
finalResized = imageResizeRelative(final, (500, 500), final.size)
imResized = imageResizeRelative(probe.donorMaskImage, (500, 500),
probe.donorMaskImage.size if probe.donorMaskImage is not None else finalResized.size)
edge = self.master.scModel.getGraph().get_edge(probe.edgeId[0],probe.edgeId[1])
if initialize is True:
self.c = Canvas(self.cImgFrame, width=510, height=510)
self.c.pack()
self.transitionString(None)
try:
finalResized = finalResized.overlay(imResized)
except IndexError:
tex = self.c.create_text(250,250,width=400,font=("Courier", 20))
self.c.itemconfig(tex, text="The mask of link {} did not match the size of the {}.".format(self.link, message))
return
self.master.photos[self.link] = ImageTk.PhotoImage(finalResized.toPIL())
self.image_on_canvas = self.c.create_image(255, 255, image=self.master.photos[self.link], anchor=CENTER, tag='imgc')
def frameMove(self):
"""
change pages on inner display for videos
:return:
"""
if self in self.master.pageDisplays:
displays = self.master.pageDisplays[self][1]
d_index = self.cImgFrame.index('current')
displays[d_index].checkbox.grid()
for display in displays:
if display != displays[d_index]:
display.checkbox.grid_remove()
def scrollplt(self, *args):
"""
Handle scrolling function on temporal review graph.
:param args:
:return:
"""
if (args[0] == 'moveto'):
na = self.master.pltdata[self]
end = na[-1]
total = end[3]-end[2] + 20000
curframe = self.master.subplots[self].get_children()[1].xaxis.get_view_interval()
space = curframe[1]-curframe[0]
total *= float(args[1])
self.master.subplots[self].get_children()[1].xaxis.set_view_interval(total, total + space, ignore=True)
self.master.subplots[self].canvas.draw()
elif (args[0] == 'scroll'):
self.master.subplots[self].get_children()[1].xaxis.pan(int(args[1]))
self.master.subplots[self].canvas.draw()
def cache_designation(self):
"""
Cache the QA validation of probe designation.
:return:
"""
self.master.check_ok()
displays = self.master.pageDisplays[self][1] if self in self.master.pageDisplays else []
if len(displays) > 0:
validation = {'temporal': bool(displays[0].checkbox), 'spatial': bool(displays[1].checkbox) if len(displays) > 1 else False}
elegibility = [key for key in validation.keys() if validation[key] == True]
designation = '-'.join(elegibility) if len(elegibility) else 'detect'
else:
designation = self.probe.taskDesignation
self.master.qaData.set_qalink_designation(self.link, designation)
class DummyPage(Frame):
def __init__(self, master, labeltext = ''):
Frame.__init__(self, master=master)
self.mainlabel = Label(self, text= labeltext)
self.mainlabel.pack()
self.nextButton = Button(self, text='NEXT', command=master.nex)
self.nextButton.pack()
class SpatialReviewDisplay(Frame):
"""
The spatial review display for video
"""
def __init__(self, page):
Frame.__init__(self, master=page.cImgFrame, height=500,width=50)
page.cImgFrame.add(self, text='Spatial')
self.dialog = self.winfo_toplevel()
#Add Checkbox for spatial review
checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}
chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5
chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4
spatial_box_label = Label(master=page, text='Spatial Overlay Correct?', wraplength=250, justify=LEFT)
self.checkbox = Chkbox(parent=page, dialog=page.master, label=spatial_box_label, command=page.cache_designation,
value=page.master.qaData.get_qalink_designation(page.link) is not None)
self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col -1)
self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')
self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function
if (len(page.link.split('->')) > 1):
probe = [probe for probe in page.master.probes if
probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.finalNodeId in
page.master.lookup[page.edgeTuple[1]]][0]
else:
probe = \
[probe for probe in page.master.probes if
probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.donorBaseNodeId in
page.master.lookup[
page.edgeTuple[1]]][0]
if probe.targetVideoSegments is not None:
to = os.path.join(self.dialog.scModel.get_dir(),probe.finalImageFileName)
overlay_file = compose_overlay_name(target_file=to, link=page.link)
total_range = (probe.targetVideoSegments[0].starttime/1000, probe.targetVideoSegments[-1].endtime/1000)
self.buttonText = StringVar()
self.buttonText.set(value=('PLAY: ' if os.path.exists(overlay_file) else 'GENERATE: ') + os.path.split(overlay_file)[1])
self.playbutton = Button(master=self, textvariable=self.buttonText,
command=lambda: self.openOverlay(probe=probe,
target_file=to,
overlay_path=overlay_file))
self.playbutton.grid(row=0, column=0, columnspan=2, sticky='W')
self.range_label = Label(master=self, text='Range: ' + '{:.2f}'.format(total_range[0]) + 's - ' + '{:.2f}'.format(total_range[1]) + 's')
self.range_label.grid(row=0, column= 3, columnspan = 1, sticky='W')
def openOverlay(self, probe=None, target_file = '', overlay_path=''):
if not os.path.exists(overlay_path):
GrayBlockOverlayGenerator(locator=self.dialog.meta_extractor.getMetaDataLocator(probe.edgeId[0]),
segments=probe.targetVideoSegments,
target_file=target_file, output_file=overlay_path).generate()
self.buttonText.set('PLAY: ' + os.path.split(overlay_path)[1])
openFile(overlay_path)
class TemporalReviewDisplay(Frame):
"""
The temporal review display for video
"""
def __init__(self, page):
Frame.__init__(self, master=page.cImgFrame)
page.cImgFrame.add(self, text='Temporal')
# Add Checkbox for spatial review
checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}
chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5
chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4
temporal_box_label = Label(master=page, text='Temporal data correct?', wraplength=250, justify=LEFT)
self.checkbox = Chkbox(parent=page, dialog=page.master, label=temporal_box_label, command=page.cache_designation,
value=page.master.qaData.get_qalink_designation(page.link) is not None)
self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col - 1)
self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')
self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function
ps = [mpatches.Patch(color="red", label="Target Video"),
mpatches.Patch(color="blue", label="Current Manipulations"),
mpatches.Patch(color="green", label="Other Manipulations")]
data = []
f = Figure(figsize=(6, 4), dpi=100)
subplot = f.add_subplot(111)
subplot.legend(handles=ps, loc=8)
prolist = []
maxtsec = 0
for probe in page.master.probes:
maxtsec = max(maxtsec, probe.max_time())
if (page.finalNodeName == None):
if probe.donorBaseNodeId is not None and page.master.getFileNameForNode(probe.donorBaseNodeId) == \
page.edgeTuple[1]:
prolist.append(probe)
else:
if (page.master.getFileNameForNode(probe.finalNodeId) == page.edgeTuple[1]):
prolist.append(probe)
try:
tsec = get_end_time_from_segment(
page.master.meta_extractor.getMetaDataLocator(page.master.lookup[page.edgeTuple[1]][0]).getMaskSetForEntireVideo(
media_types=probe.media_types())[0]) / 1000.0
except Exception as ex:
logging.getLogger("maskgen").error(ex.message)
logging.getLogger("maskgen").error(
"{} Duration could not be found the length displayed in the graph is incorrect".format(
page.edgeTuple[1]))
tsec = maxtsec
ytics = []
ytic_lbl = []
count = 0
high = 0
low = tsec * 1000 + 20000
for probe in prolist:
count += 1
col = 2
cur = False
if (probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]]):
col = 1
cur = True
if page.finalNodeName == None:
for mvs in probe.donorVideoSegments if probe.donorVideoSegments is not None else []:
data.append([count, col, mvs.starttime, mvs.endtime])
if cur:
high = max(high, mvs.endtime)
low = min(low, mvs.starttime)
subplot.text(mvs.starttime - 100, count - 0.5, "F:" + str(int(mvs.startframe)),
{'size': 10})
subplot.text(mvs.endtime + 100, count - 0.5, "F:" + str(int(mvs.endframe)), {'size': 10})
subplot.text(mvs.starttime - 100, count - 0.20, "T:" + str(int(mvs.starttime)),
{'size': 10})
subplot.text(mvs.endtime + 100, count - 0.20, "T:" + str(int(mvs.endtime)), {'size': 10})
else:
for mvs in probe.targetVideoSegments if probe.targetVideoSegments is not None else []:
data.append([count, col, mvs.starttime, mvs.endtime])
if cur:
high = max(high, mvs.endtime)
low = min(low, mvs.starttime)
subplot.text(mvs.starttime, count - 0.5, "F:" + str(int(mvs.startframe)), {'size': 10})
subplot.text(mvs.endtime, count - 0.5, "F:" + str(int(mvs.endframe)), {'size': 10})
subplot.text(mvs.starttime, count - 0.20, "T:" + str(int(mvs.starttime)), {'size': 10})
subplot.text(mvs.endtime, count - 0.20, "T:" + str(int(mvs.endtime)), {'size': 10})
ytics.append(count)
ytic_lbl.append(str(page.master.abreive(probe.edgeId[0])))
color_mapper = np.vectorize(lambda x: {0: 'red', 1: 'blue', 2: 'green'}.get(x))
data.append([count + 1, 0, 0.0, tsec * 1000.0])
ytics.append(count + 1)
ytic_lbl.append(page.master.abreive(page.edgeTuple[1]))
numpy_array = np.array(data)
subplot.hlines(numpy_array[:, 0], numpy_array[:, 2], numpy_array[:, 3], color_mapper(numpy_array[:, 1]),
linewidth=10)
subplot.set_yticks(ytics)
subplot.set_yticklabels(ytic_lbl)
subplot.set_xlabel('Time in Milliseconds')
subplot.grid()
i = subplot.yaxis.get_view_interval()
if (i[1] - i[0] < 10):
i[0] = i[1] - 8
subplot.yaxis.set_view_interval(i[0], i[1])
i = subplot.xaxis.get_view_interval()
if (i[1] - i[0] > 2000):
i[0] = low - 1000
i[1] = high + 1000
subplot.xaxis.set_view_interval(i[0], i[1])
page.master.pltdata[page] = numpy_array
canvas = Canvas(self, height=50, width=50)
imscroll = Scrollbar(self, orient=HORIZONTAL)
imscroll.grid(row=1, column=0, sticky=EW)
imscroll.config(command=page.scrollplt)
fcanvas = FigureCanvasTkAgg(f, master=canvas)
fcanvas.draw()
fcanvas.get_tk_widget().grid(row=0, column=0)
fcanvas._tkcanvas.grid(row=0, column=0)
canvas.grid(row=0, column=0)
canvas.config(height=50, width=50)
page.master.subplots[page] = f
class QAProjectDialog(Toplevel):
"""
Host window for QA pages
"""
manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]
def __init__(self, parent):
self.parent = parent
self.scModel = parent.scModel
self.meta_extractor = MetaDataExtractor(parent.scModel.getGraph())
self.probes = None
Toplevel.__init__(self, parent)
self.type = self.parent.scModel.getEndType()
self.pages = []
self.current_qa_page = None
self.checkboxes = {} #Checkboxes, keyed by page
self.backs = {}
self.lookup = {}
self.subplots ={}
self.pltdata = {}
self.backsProbes={}
self.photos = {}
self.commentsBoxes = {}
self.edges = {}
self.qaList = []
self.pathboxes = {}
self.qaData = maskgen.qa_logic.ValidationData(self.scModel)
self.resizable(width=False, height=False)
self.progressBars = []
self.narnia = {}
self.pageDisplays = {} #Frames that go inside pages, keyed by page.
self.valid = False
self.mannypage = MannyPage(self)
self.switch_frame(self.mannypage)
self.lastpage = None #Assigned in generate Pages
self.pages.append(self.mannypage)
self.getProbes()
if self.probes is None:
self.mannypage.statusLabelText.set('Probe Generation failed. Please consult logs for more details.')
self.parent.update()
else:
self.errors = [p for p in self.probes if p.failure]
if len(self.errors) > 0:
self.mannypage.statusLabelText.set('Probes Complete with errors. Generating Preview Pages.')
else:
self.mannypage.statusLabelText.set('Probes Complete. Generating Preview Pages.')
self.generate_pages()
def getProbes(self):
try:
generator = ProbeGenerator(
scModel=self.scModel,
processors=[
DetermineTaskDesignation(
scModel=self.scModel,
inputFunction=fetch_qaData_designation)])
self.probes = generator(saveTargets=False, keepFailures=True)
except Exception as e:
logging.getLogger('maskgen').error(str(e))
self.probes = None
def getFileNameForNode(self, nodeid):
try:
fn = self.scModel.getFileName(nodeid)
if fn not in self.lookup:
self.lookup[fn] = []
if nodeid not in self.lookup[fn]:
self.lookup[fn].append(nodeid)
except TypeError:
fn = None
logging.getLogger('maskgen').warn("Unable to locate File for node with Id {}".format(nodeid))
return fn
def pre(self):
self.move(-1,False)
def nex(self):
self.move(1, False)
def exitProgram(self):
self.destroy()
cleanup_temporary_files(probes=self.probes, scModel=self.scModel)
def help(self,event):
URL = MaskGenLoader.get_key("apiurl")[:-3] + "journal"
webbrowser.open_new(URL)
def generate_pages(self):
self.crit_links = ['->'.join([self.getFileNameForNode(p.edgeId[1]), self.getFileNameForNode(p.finalNodeId)]) for
p in self.probes] if self.probes else []
self.crit_links = list(set(self.crit_links))
self.finNodes = []
for x in range(0, len(self.crit_links)):
for y in range(x, len(self.crit_links)):
link1 = self.crit_links[x]
link2 = self.crit_links[y]
fin1 = link1.split("->")[1]
fin2 = link2.split("->")[1]
self.finNodes.append(fin2)
if (fin1 > fin2):
self.crit_links[x] = self.crit_links[y]
self.crit_links[y] = link1
self.finNodes = list(set(self.finNodes))
for end in self.finNodes:
for node in self.lookup[end]:
if node in self.scModel.finalNodes():
break
self.backs[end] = []
next = self.getPredNode(node)
while next != None:
node = next.start
self.backs[end].append(next)
next = self.getPredNode(node)
self.backs[end].reverse()
donors = ['<-'.join([self.getFileNameForNode(p.edgeId[1]), self.getFileNameForNode(p.donorBaseNodeId)]) for p in
self.probes if
p.donorMaskImage is not None or p.donorVideoSegments is not None] if self.probes else []
donors = set(sorted(donors))
self.crit_links.extend([x for x in donors])
count = 0.0
for k in self.qaData.keys():
count += 1 if self.qaData.get_qalink_status(k) == 'yes' else 0
self.progress = count / len(self.crit_links) if len(self.crit_links) != 0 else 0.99999
count = 1
for link in self.crit_links:
self.pages.append(QAPage(master=self, link=link))
count += 1
self.lastpage = FinalPage(self)
self.pages.append(self.lastpage)
self.mannypage.statusLabelText.set('Preview Pages Complete. Press Next to Continue.')
self.mannypage.wnext.config(state=NORMAL)
def validategoodtimes(self):
v = self.scModel.validate()
if maskgen.validation.core.hasErrorMessages(v, lambda x: True):
self.valid = False
tkMessageBox.showerror("Validation Errors!","It seems this journal has unresolved validation errors. "
"Please address these and try again. Your QA progress will be saved.")
else:
self.valid = True
self.check_ok()
def abreive(self,str):
if (len(str)>10):
return(str[:5]+ "...\n" + str[-6:])
else:
return str
def _add_to_listBox(self, box, string):
if len(string) < 20:
box.insert(END, string)
return 1
box.insert(END, string[0:15]+"...")
box.insert(END, " " + string[max(15-int(len(string)),-10):])
return 2
def _compose_label(self,edge):
op = edge['op']
if 'semanticGroups' in edge and edge['semanticGroups'] is not None:
groups = edge['semanticGroups']
op += ' [' + ', '.join(groups) + ']'
self.descriptionVar = edge['description']
return op
def nexCheck(self):
self.findNextUnchecked()
def preCheck(self):
self.findNextUnchecked()
def switch_frame(self, frame):
if self.current_qa_page != None:
self.current_qa_page.grid_forget()
self.current_qa_page = frame
self.current_qa_page.grid()
def findNextUnchecked(self):
try:
unchecked = next(page for page in self.pages if not bool(page.checkboxes))
except StopIteration:
return None
if unchecked != self.current_qa_page:
self.switch_frame(unchecked)
def move(self, dir, checked):
if self.current_qa_page in self.edges.keys():
self.edges[self.current_qa_page][0]['semanticGroups'] = self.edges[self.current_qa_page][1].get(0, END)
finish = True
if self.current_qa_page in self.checkboxes.keys():
for box in self.checkboxes[self.current_qa_page].boxes:
if bool(box) is False:
finish = False
break
#caching in qaData
ind = self.pages.index(self.current_qa_page)
step = 0
if 0<=ind-1<len(self.crit_links):
if finish and self.crit_links[ind-1] in self.qaData.keys():
if self.qaData.get_qalink_status(self.crit_links[ind-1]) == 'no':
step += 1.0/len(self.crit_links)*100
self.qaData.set_qalink_status(self.crit_links[ind-1],'yes')
self.qaData.set_qalink_caption(self.crit_links[ind-1],self.commentsBoxes[self.crit_links[ind-1]].get(1.0, END).strip())
self.current_qa_page.cache_designation()
if not finish:
if self.qaData.get_qalink_status(self.crit_links[ind-1]) == 'yes':
step += -1.0/len(self.crit_links)*100
self.qaData.set_qalink_status(self.crit_links[ind - 1], 'no')
self.qaData.set_qalink_caption(self.crit_links[ind - 1], self.commentsBoxes[self.crit_links[ind - 1]].get(1.0, END).strip())
for p in self.progressBars:
p.step(step)
i = self.pages.index(self.current_qa_page) + dir
if not 0<=i<len(self.pages):
return
nex = self.current_qa_page
while checked:
nex = self.pages[i]
finish = True
if nex in self.checkboxes.keys():
for t in self.checkboxes[nex]:
if t.get() is False:
finish = False
break
if i == len(self.pages)-1 or i == 0:
break
if not finish:
break
i += dir
self.switch_frame(self.pages[i])
def qa_done(self, qaState):
self.qaData.update_All(qaState, self.lastpage.reporterStr.get(), self.lastpage.commentsBox.get(1.0, END), None)
self.parent.scModel.save()
self.destroy()
cleanup_temporary_files(probes=self.probes, scModel=self.scModel)
def getPredNode(self, node):
for pred in self.scModel.G.predecessors(node):
edge = self.scModel.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.scModel.getModificationForEdge(pred, node)
return None
def check_ok(self, event=None):
if self.lastpage != None:
if len(self.errors) == 0 and all(bool(page.checkboxes) for page in self.pages):
self.lastpage.acceptButton.config(state=NORMAL)
else:
self.lastpage.acceptButton.config(state=DISABLED)
```
#### File: maskgen/ui/ui_tools.py
```python
import time
import ttk
from Tkinter import *
import tkSimpleDialog
import tkMessageBox
import logging
from maskgen.support import ModuleStatus
from copy import deepcopy
class ProgressBar(Frame):
def __init__(self, master, **kwargs):
Frame.__init__(self, master, **kwargs)
self.body(self)
self.last_time = time.time()
def body(self,master):
self.system_label_var = StringVar()
self.system_label_var.set(' ')
self.module_label_var = StringVar()
self.module_label_var.set(' ')
self.function_label_var = StringVar()
self.function_label_var.set(' ')
Label(master,textvariable=self.system_label_var,anchor=W, justify=LEFT,width=20).grid(row=0,column=0)
ttk.Separator().grid(row=0,column=1)
Label(master,textvariable=self.module_label_var,anchor=W, justify=LEFT,width=20).grid(row=0, column=2)
ttk.Separator().grid(row=0,column=3)
Label(master,textvariable=self.function_label_var,anchor=W, justify=LEFT,width=40).grid(row=0, column=4)
ttk.Separator().grid(row=0,column=5)
self.pb_status = DoubleVar()
self.pb_status.set(0)
self.pb = ttk.Progressbar(master,
variable=self.pb_status,
orient='horizontal',
mode='determinate',
maximum=100.001)
self.pb.grid(row=0,column=6,sticky=E)
def postChange(self,module_status):
"""
:param module_status:
:return:
@type module_status: ModuleStatus
"""
current_time = time.time()
# update if the system changes or the last update occurred more than the prior 1.2
# seconds or the percentage is effectively complete.
if module_status.system_name != self.system_label_var.get() or \
current_time - self.last_time > 1.2 or \
module_status.percentage >= 99.9999:
logging.getLogger('maskgen').info('%s %s %s %2.3f' % (module_status.system_name,
module_status.module_name,
module_status.component,
module_status.percentage))
self.system_label_var.set(module_status.system_name)
self.module_label_var.set(module_status.module_name)
self.function_label_var.set(module_status.component)
delta = module_status.percentage - self.pb_status.get()
self.pb.step(delta)
self.pb_status.set(module_status.percentage)
if module_status.percentage >= 99.9999:
self.pb_status.set(0)
self.last_time = current_time
self.pb.update_idletasks()
class SelectDialog(tkSimpleDialog.Dialog):
cancelled = True
def __init__(self, parent, name, description, values, initial_value=None, information=None, callback=None):
self.description = description
self.values = values
self.parent = parent
self.initial_value = initial_value
self.name = name
self.information = information
self.callback = callback
tkSimpleDialog.Dialog.__init__(self, parent, name)
def body(self, master):
self.desc_lines = '\n'.join(self.description.split('.'))
self.desc_label = Label(master, text=self.desc_lines, wraplength=400)
self.desc_label.grid(row=0, sticky=N, columnspan=(3 if self.information else 1))
self.var1 = StringVar()
self.var1.set(self.values[0] if self.initial_value is None or self.initial_value not in self.values else self.initial_value)
self.e1 = OptionMenu(master, self.var1, *self.values)
self.e1.grid(row=1, column=0, sticky=N, columnspan=3 if self.information else 1)
if self.information:
from maskgen.ui.help_tools import HelpFrame
fr = HelpFrame(master, self.information, self.var1)
fr.grid(row=2, column=0, columnspan=3)
def cancel(self):
if self.cancelled:
self.choice = None
tkSimpleDialog.Dialog.cancel(self)
def apply(self):
self.cancelled = False
self.choice = self.var1.get()
if self.callback is not None:
self.callback(self.var1.get())
class AddRemove(SelectDialog):
def __init__(self, parent, name, description, values, initial_value=None, information=None):
SelectDialog.__init__(self, parent, name, description, values, initial_value, information)
def buttonbox(self):
box = Frame(self)
self.add_button = Button(box, text="Add", width=10, command=self.add, default=ACTIVE)
self.add_button.pack(side=LEFT, padx=5, pady=5)
self.remove_button = Button(box, text="Remove", width=10, command=self.remove)
self.remove_button.pack(side=LEFT, padx=5, pady=5)
self.cancel_button = Button(box, text="Cancel", width=10, command=self.cancel)
self.cancel_button.pack(side=LEFT, padx=5, pady=5)
self.bind("<Escape>", self.cancel)
box.pack()
def add(self):
self.cancelled = False
self.ok()
self.choice = (self.var1.get(), "add")
def remove(self):
self.cancelled = False
self.ok()
self.choice = (self.var1.get(), "remove")
class TimeWidget(Frame):
def __init__(self, master, textvariable):
self.time_text_variable = textvariable
Frame.__init__(self, master)
self.master = master
self.entries = {}
self.create_widgets()
self.bind_all("<Control-v>", lambda e: self.paste())
def create_widgets(self):
initialvalues = self.time_text_variable.get().split(':')
if len(initialvalues) > 2:
micro = 0 if '.' not in initialvalues[-1] else initialvalues[-1].split('.')[1]
second = int(initialvalues[-1]) if '.' not in initialvalues[-1] else initialvalues[-1].split('.')[0]
minute = initialvalues[1]
hour = initialvalues[0]
else:
micro = "micros"
second = "SS"
minute = "MM"
hour = "HH"
font = ("TkDefaultFont", 10) # Increase font size
# Setup fields
self.entries['hour'] = w = Entry(self, width=3, font=font)
w.insert(0, hour)
w.bind('<KeyRelease>', lambda e: self.track('hour', 'minute', 2, 23))
w.bind('<FocusIn>', lambda e: self.get_focus('hour'))
w.bind('<FocusOut>', lambda e: self.lose_focus('hour', 2))
w.grid(row=0, column=0)
w = Label(self, text=":", font=font, bg='white')
w.grid(row=0, column=1)
self.entries['minute'] = w = Entry(self, width=3, font=font)
w.insert(0, minute)
w.bind('<KeyRelease>', lambda e: self.track('minute', 'second', 2, 59))
w.bind('<FocusIn>', lambda e: self.get_focus('minute'))
w.bind('<FocusOut>', lambda e: self.lose_focus('minute', 2))
w.grid(row=0, column=2)
w = Label(self, text=":", font=font, bg='white')
w.grid(row=0, column=3)
self.entries['second'] = w = Entry(self, width=3, font=font)
w.insert(0, second)
w.bind('<KeyRelease>', lambda e: self.track('second', 'microsecond', 2, 59))
w.bind('<FocusIn>', lambda e: self.get_focus('second'))
w.bind('<FocusOut>', lambda e: self.lose_focus('second', 2))
w.grid(row=0, column=4)
w = Label(self, text=".", font=font, bg='white')
w.grid(row=0, column=5)
self.entries['microsecond'] = w = Entry(self, width=10, font=font)
w.insert(0, micro)
w.bind('<KeyRelease>', lambda e: self.track('microsecond', None, 6, 999999))
w.bind('<FocusIn>', lambda e: self.get_focus('microsecond'))
w.bind('<FocusOut>', lambda e: self.lose_focus('microsecond', 6, prepend=False))
w.grid(row=0, column=6)
def get_focus(self, field):
"""
Binding to clear field on first entry. Allows for guidance on what units go where when tool launches
:param field: Field name that gained focus
:return:
"""
# Clear default text, if any, and unbind this function
if any([l.isalpha() for l in self.entries[field].get()]):
self.entries[field].delete(0, END)
self.entries[field].unbind('<FocusIn>')
def lose_focus(self, field, max_length, prepend=True):
"""
Binding to verify that all items in the field are properly padded before saving can occur.
:param field: Field name that lost focus
:param max_length: Maximum length of the item in that field
:param prepend: Add to the beginning when true (9->09), add to the end when false (9->900000)
:return: None
"""
curr = self.entries[field].get()
if len(curr) != 0 and len(curr) < max_length:
if prepend:
self.entries[field].insert(0, "0" * (max_length - len(curr)))
else:
self.entries[field].insert(END, "0" * (max_length - len(curr)))
# Verify there are no letters
if any([l.isalpha() for l in curr]):
tkMessageBox.showerror("Error", "The {0}s field cannot contain letters. Re-enter the {0}s.".format(field))
self.entries[field].delete(0, END)
self.entries[field].insert(0, "0" * max_length)
else:
self.update_variable()
def track(self, field, next_field, max_length, max_digit):
"""
Binding to verify that the value within each entry is valid in terms of length, and the maximum value that can
exist in the field.
:param field: Current field name
:param next_field: Field to jump to once current field is entered
:param max_length: Maximum length of the value in the field (character count)
:param max_digit: Maximum value that the field can hold
:return:
"""
curr = self.entries[field].get()
pos = self.entries[field].index(INSERT)
# Check that there is a value in the entry
if curr == "":
return
# Verify it is a number
if not curr[pos-1].isdigit():
first = pos-1
last = pos
for i in range(len(curr)):
if not curr[i].isdigit():
first = i
break
for i in range(0, len(curr), -1):
if not curr[i].isdigit():
last = i
break
self.entries[field].delete(first, last)
return
# enforce length restriction
if len(curr) > max_length:
self.entries[field].delete(0, END)
self.entries[field].insert(0, curr[:max_length])
self.update_variable()
return
# limit the value entered to the maximum
if int(curr[:max_length]) > max_digit:
self.entries[field].delete(0, END)
self.entries[field].insert(0, max_digit)
self.update_variable()
# If we are at the end, go to the next cell
if pos >= max_length and next_field:
self.entries[next_field].focus()
self.entries[next_field].icursor(0)
self.update_variable()
return
self.entries[field].icursor(pos)
def paste(self):
"""
Handle pasting data into time boxes
:return:
"""
time = self.clipboard_get()
try:
hr, mins, sfm = time.split(":")
s, fm = sfm.split(".")
except ValueError:
return
# Run through focus gain so text boxes wont self delete
self.get_focus("hour")
self.get_focus("minute")
self.get_focus("second")
self.get_focus("microsecond")
# Insert data and verify that it is valid
self.entries['hour'].delete(0, END)
self.entries['hour'].insert(0, hr)
self.lose_focus("hour", 2)
self.track("hour", None, 2, 23)
self.entries['minute'].delete(0, END)
self.entries['minute'].insert(0, mins)
self.lose_focus("minute", 2)
self.track("minute", None, 2, 59)
self.entries['second'].delete(0, END)
self.entries['second'].insert(0, s)
self.lose_focus("second", 2)
self.track("second", None, 2, 59)
self.entries['microsecond'].delete(0, END)
self.entries['microsecond'].insert(0, fm)
self.lose_focus("microsecond", 6, prepend=FALSE)
self.track("microsecond", None, 6, 999999)
def update_variable(self):
self.time_text_variable.set(self.__str__())
def __str__(self):
if all(self.isblank(value.get()) for value in self.entries.values()):
return ''
return "{0}:{1}:{2}.{3}".format(self.entries['hour'].get(), self.entries['minute'].get(),
self.entries['second'].get(), self.entries['microsecond'].get())
def isblank(self, v):
initial = ['HH', 'MM', 'SS', 'micros']
if v in initial or v == '':
return True
else:
return False
def get(self):
try:
if all(self.isblank(value.get()) for value in self.entries.values()):
return ''
else:
for k, v in self.entries:
entryString = v.get()
if not entryString.isdigit():
raise ValueError("Not digit")
self.update_variable()
except ValueError:
tkMessageBox.showerror("Data Error", "Hours, minutes, seconds, and microseconds must all be integers.")
return ""
return self
class EntryDialog(tkSimpleDialog.Dialog):
cancelled = True
def __init__(self, parent, name, description, validateFunc, initialvalue=None):
self.description = description
self.validateFunc = validateFunc
self.parent = parent
self.name = name
self.initialvalue = initialvalue
tkSimpleDialog.Dialog.__init__(self, parent, name)
def body(self, master):
Label(master, text=self.description).grid(row=0, sticky=W)
self.e1 = Entry(master, takefocus=True)
if self.initialvalue:
self.e1.insert(0, self.initialvalue)
self.e1.grid(row=1, column=0)
self.lift()
def cancel(self):
if self.cancelled:
self.choice = None
tkSimpleDialog.Dialog.cancel(self)
def apply(self):
self.cancelled = False
self.choice = self.e1.get()
def validate(self):
v = self.e1.get()
if self.validateFunc and not self.validateFunc(v):
tkMessageBox.showwarning(
"Bad input",
"Illegal values, please try again"
)
return 0
return 1
class ScrollableListbox(Frame):
def __init__(self, master, height, width):
Frame.__init__(self, master)
self.master = master
self.height = height
self.width = width
self.create_widgets()
def create_widgets(self):
self.lb = Listbox(self, height=self.height, width=self.width)
self.lb.grid(row=0, column=0, sticky=N + S)
sb = Scrollbar(self, orient=VERTICAL)
sb.grid(row=0, column=1, sticky=N + S)
self.lb.config(yscrollcommand=sb.set)
sb.config(command=self.lb.yview)
def get_listbox(self):
return self.lb
```
#### File: Media-Journaling-Tool/maskgen/updater.py
```python
import requests
import json
import logging
import maskgen
from maskgen import maskGenPreferences
"""
Git API used to compare version of the tool with lastest on GitHub master
"""
class GitLabAPI:
def __init__(self, branch='master', version_file='VERSION', repo='',
url='https://gitlab.mediforprogram.com'):
self.file = '{url}/api/v4/projects/{repo}/repository/files/{version_file}/raw'.format(
url=url, repo=repo, version_file=version_file
)
self.commits = '{url}/api/v4/projects/{repo}/repository/commits'.format(
url=url, repo=repo, version_file=version_file
)
self.token = maskGenPreferences.get_key('git.token')
self.branch = branch
def get_version_file(self):
header = {'PRIVATE-TOKEN': self.token}
resp = requests.get(self.file, params={"ref": self.branch}, timeout=2, headers=header)
if resp.status_code == requests.codes.ok:
return resp.content.strip()
return "NA"
def getCommitMessage(self):
import json
header = {'PRIVATE-TOKEN': self.token}
resp = requests.get(self.commits, params={"ref": self.branch}, timeout=2, headers=header)
if resp.status_code == requests.codes.ok:
data = json.loads(resp.content)
return data[0]['message']
return "NA"
class GitHub:
# TODO!
def __init__(self, branch='master', version_file='VERSION', repo='rwgdrummer/maskgen',
url='https://api.github.com'):
self.file = '{url}/repos/{repo}/repository/files/{version_file}/raw'.format(
url=url, repo=repo, version_file=version_file
)
self.commits = '{url}/repos/{repo}/repository/files/{version_file}/raw'.format(
url=url, repo=repo, version_file=version_file
)
self.token = maskGenPreferences.get_key('git.token')
self.branch = branch
def get_version_file(self):
header = {'PRIVATE-TOKEN': self.token}
resp = requests.get(self.file, params={"ref": self.branch}, timeout=2, headers=header)
if resp.status_code == requests.codes.ok:
return resp.content.strip()
return "NA"
def getCommitMessage(self):
url = self.url + '/' + self.repos + '/commits/'
resp = requests.get(url, timeout=2)
if resp.status_code == requests.codes.ok:
content = json.loads(resp.content)
return content['commit']['message']
return None
class UpdaterGitAPI:
def __init__(self, branch='master', version_file='VERSION'):
url = maskGenPreferences.get_key('git.api.url',
'https://gitlab.mediforprogram.com')
repo = maskGenPreferences.get_key('repo', '503')
if 'gitlab' in url:
self.api = GitLabAPI(branch=branch, version_file=version_file, url=url, repo=repo)
else:
self.api = GitHub(branch=branch, version_file=version_file, url=url, repo=repo)
def _get_version_file(self):
return self.api.get_version_file()
def _getCommitMessage(self):
return self.api.getCommitMessage()
def _hasNotPassed(self, merge_sha):
if merge_sha is None:
return True
currentversion = maskgen.__version__
sha = currentversion[currentversion.rfind('.') + 1:]
return not merge_sha.startswith(sha)
def isOutdated(self):
try:
merge_sha = self._get_version_file()
if self._hasNotPassed(merge_sha):
return merge_sha, self._getCommitMessage()
return None, None
except Exception as ex:
logging.getLogger('maskgen').error('Error validating JT version: {}'.format(ex.message))
raise EnvironmentError(ex.message)
class OperationsUpdaterGitAPI(UpdaterGitAPI):
def __init__(self, branch='master'):
import urllib
UpdaterGitAPI.__init__(self, branch=branch, version_file=urllib.quote_plus('resources/operations.json'))
def _get_version_file(self):
resp = UpdaterGitAPI._get_version_file(self)
if resp is not None:
import json
return json.loads(resp)['version']
def _hasNotPassed(self, merge_sha):
from maskgen.software_loader import getMetDataLoader
if merge_sha is None:
return True
currentversion = getMetDataLoader().operation_version
return merge_sha != currentversion
```
#### File: other_plugins/CocoMaskSelector/maskgen_coco.py
```python
from pycocotools.coco import COCO
import numpy as np
import logging
import os
import sys
import cv2
import shutil
import json
"""
COCO SUPPORT
"""
def _mapSubject(annotation,mapping):
"""
map annotation category_id to a subject
:param mapping:
:param annotation:
:return:
@type mapping: dict
"""
return mapping[annotation['category_id']] if annotation['category_id'] in mapping else 'man-made object'
def createMaskImage(image_array, imagefilename, coco, lookup, subjectMapping={}, areaConstraint=(0,sys.maxint)):
"""
Given an image and its Coco data, pick a mask from the segmented image.
:param image
:param imageData:
:return:
@type imageData: dict
@type image_array: numpy.array
@type coco: COCO
@type lookup: dict
"""
def defaultMask(image):
h, w = image.size
real_mask = np.zeros((w, h), dtype=np.uint8)
real_mask[w / 4:3 * w / 4, h / 4:3 * h / 4] = 255
return 'other',real_mask
imgId = lookup[os.path.split(imagefilename)[1]]
imageData = coco.loadImgs(ids=[imgId])[0]
annIds = coco.getAnnIds(imgIds=[imgId])
annotations = coco.loadAnns(annIds)
logging.getLogger('maskgen').info('Processing image name: {}'.format(imagefilename))
image_width,image_height = image_array.shape[0],image_array.shape[1]
factor = float(imageData['width']) / image_width
valid_annotations = [annotation for annotation in annotations
if annotation['area'] * factor >= areaConstraint[0] and annotation['area'] * factor <= areaConstraint[1]]
if len(valid_annotations) > 0:
position = np.random.randint(0, len(valid_annotations))
annotation = annotations[position]
real_mask = coco.annToMask(annotation)
real_mask = real_mask.astype(np.uint8)
real_mask[real_mask>0] = 255
if real_mask.shape != (image_width, image_height):
real_mask = cv2.resize(real_mask,(image_height,image_width))
subject = _mapSubject(annotation,subjectMapping)
return subject,real_mask
return defaultMask(image)
# mask[real_mask > 0] = [color/65536,color%65536/256,color%256]
def loadCoco(annotationsFile):
return COCO(annotationsFile)
def createFileNameToIDLookup(coco,imgIds=[], catIds=[]):
"""
Create an index of file name to coco image ID
:param coco:
:return:
@type coco: COCO
"""
return { image_data['file_name']:image_data['id'] for image_data in coco.loadImgs(coco.getImgIds(imgIds=imgIds,catIds=catIds))}
def createMaskImageWithParams(image_array, imagefilename, params, areaConstraint=(0,sys.maxint)):
"""
Using parameters for the coco and coco.index as they would appear in the global state,
create mask using one of the select annotations.
@see createBatchProjectGlobalState.
:param image_array:
:param imagefilename:
:param params:
:param areaConstraint:
:return:
@type image_array: numpy.ndarray
@type params: dict
"""
if 'coco.annotations' in params:
annotationPath = params['coco.annotations']
if not os.path.exits(annotationPath):
logging.getLogger('maskgen').error(
'Cannot load COCO annotations. Annotation path set to coco.annotations is invalid.')
return None,None
coco = COCO(annotationPath)
else:
if 'coco' not in params:
logging.getLogger('maskgen').error('Cannot build mask. Missing parameter coco.')
return None,None
coco = params['coco']
index = params['coco.index'] if 'coco.index' in params else createFileNameToIDLookup(coco)
return createMaskImage(image_array, imagefilename, coco,index,areaConstraint=areaConstraint)
def createBatchProjectGlobalState(global_state):
"""
Check the global state for a batch project. Initialize coco and return additions to the global state if missing
:param global_state:
:return:
@type global_state: dict
"""
if 'coco.annotations' not in global_state:
logging.getLogger('maskgen').error('Cannot load COCO annotations. Missing parameter coco.annotations.')
return {}
annotationPath = global_state['coco.annotations']
if not os.path.exists(annotationPath):
logging.getLogger('maskgen').error('Cannot load COCO annotations. Annotation path set to coco.annotations is invalid.')
return {}
coco = loadCoco(annotationPath)
return {'coco' : coco, 'coco.index' : createFileNameToIDLookup(coco), 'coco.subject': {}}
def moveValidImages(image_dir,target_dir,annotationPath,areaConstraint=(0,sys.maxint),maxCount=None, newAnnotationPath=None):
"""
Move the images from the source folder to the target folder if they represent a valid
image that contains images that meet the area constraints.
Download the image from flickr if missing.
If image_dir and target_dir are the same, images that do not meet the criteria are removed.
:param image_dir:
:param target_dir:
:param annotationPath:
:param areaConstraint:
:param maxCount: maximum number of images to move/download
:param newAnnotationPath: if provided, save the annotations for the select images
:return:
"""
coco = COCO(annotationPath)
keep = []
annotations_to_keep = []
for imgId in coco.getImgIds():
if maxCount is not None and len(keep) >= maxCount:
break
if imgId not in coco.anns:
continue
#this download is broken...downloading invalid 500x500 images!
coco.download(tarDir=image_dir, imgIds=[imgId])
imageData = coco.loadImgs(ids=[imgId])[0]
target_file = os.path.join(target_dir,imageData['file_name'])
source_file = os.path.join(image_dir, imageData['file_name'])
if not os.path.exists(target_file):
logging.getLogger('maskgen').warn('File Not Found: {}'.format(imageData['file_name']))
else:
annotations = coco.loadAnns(ids=[imgId])
valid_annotations = [annotation for annotation in annotations
if annotation['area'] >= areaConstraint[0] and annotation['area'] <= areaConstraint[1]]
if len(valid_annotations) > 0:
if source_file != target_file:
shutil.move(source_file,target_file)
keep.append(imgId)
annotations_to_keep.extend(valid_annotations)
elif source_file == target_file:
os.remove(source_file)
if newAnnotationPath is not None:
dataset = {'info': coco.dataset['info'],
'images': coco.loadImgs(ids=keep),
'categories': coco.dataset['categories'],
'annotations': annotations_to_keep}
with open(newAnnotationPath, 'w') as f:
json.dump(dataset, f, indent=2, encoding='utf-8')
def createSubset(annotationPath,filename, areaConstraint=(0,sys.maxint),maxCount=None):
"""
Move the images from the source folder to the target folder if they represent a valid
image that contains images that meet the area constraints.
Download the image from flickr if missing.
If image_dir and target_dir are the same, images that do not meet the criteria are removed.
:param image_dir:
:param target_dir:
:param annotationPath:
:param areaConstraint:
:param maxCount: maximum number of images to move/download
:return:
"""
coco = COCO(annotationPath)
keep = []
annotations_to_keep = []
for imgId in coco.getImgIds():
if maxCount is not None and len(keep) >= maxCount:
break
if imgId not in coco.anns:
continue
annIds = coco.getAnnIds(imgIds=[imgId])
annotations = coco.loadAnns(ids=annIds)
valid_annotations = [annotation for annotation in annotations
if annotation['area'] >= areaConstraint[0] and annotation['area'] <= areaConstraint[1]]
if len(valid_annotations) > 0:
keep.append(imgId)
annotations_to_keep.extend(valid_annotations)
dataset = {'info':coco.dataset['info'],
'images':coco.loadImgs(ids=keep),
'categories':coco.dataset['categories'],
'annotations':annotations_to_keep}
with open(filename, 'w') as f:
json.dump(dataset, f, indent=2, encoding='utf-8')
def main(argv=None):
createSubset('/Users/ericrobertson/Downloads/annotations/instances_train2014.json',
'tests/other_plugins/CocoMaskSelector/annotations.json',
maxCount=30)
if __name__ == "__main__":
import sys
sys.exit(main())
```
#### File: plugins/ApplyLensDistortion/__init__.py
```python
from PIL import Image
import numpy as np
import cv2
import math
from maskgen import tool_set
from maskgen.cv2api import cv2api_delegate
"""
References
--------------
[1] http://en.wikipedia.org/wiki/Distortion_(optics), August 2012.
[2] <NAME>, "Automatic Correction of Lens Distortion by Using
Digital Image Processing," July 10, 1999.
[3] G.Vassy and T.Perlaki, "Applying and removing lens distortion in post
production," year???
[4] http://www.mathworks.com/products/demos/image/...
create_gallery/tform.html#34594, August 2012.
Adapted from a Matlab implementation by <NAME>, 8/31/2012"
"""
def cart2pol(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return (rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return (x, y)
def _lens_distort(input_array, k, bordertype='fit', interpolation='linear', padmethod='symmetric', model=None):
output = np.zeros(input_array.shape)
interp_map = {'linear':cv2api_delegate.inter_linear,
'cubic':cv2api_delegate.inter_cubic,
'nearest':cv2api_delegate.inter_nn}
border_map={'symmetric':cv2.BORDER_REFLECT,
'fill': cv2.BORDER_CONSTANT,
'replicate': cv2.BORDER_REPLICATE,
'bound': cv2.BORDER_CONSTANT,
'circular': cv2.BORDER_WRAP
}
def _correct_distortion(I, K, bordertype='fit',model=None):
M = I.shape[0] # (y rows)
N = I.shape[1] # (x)
center = int(round(float(N) / 2.0)), int(round(float(M) / 2.0))
xi, yi = np.meshgrid(range(N), range(M))
#xi = xi + 1 Matlab is from 1 to N, Python is from 0 to N-1
#yi = yi + 1
xt = xi.reshape(xi.shape[0] * xi.shape[1], order='F') - center[0]
yt = yi.reshape(yi.shape[0] * yi.shape[1], order='F') - center[1]
r, theta = cart2pol(xt, yt)
# calculate the maximum vector( image center to image corner) to be used
# for normalization
R = math.sqrt(center[0] ** 2 + center[1] ** 2)
# Normalize the polar coordinate r to range between 0 and 1
r = r / R
# Apply the r-based transformation
s = model(r, k)
# Denormalize s
s2 = s * R
# Find a scaling parameter based on selected border type
brcor = _correct_border(r, s, k, center, R, bordertype=bordertype)
s2 = s2 * brcor
# Convert back to cartesians
ut, vt = pol2cart(s2, theta)
u = ut.reshape(xi.shape, order='F') + center[0]
v = vt.reshape(yi.shape, order='F') + center[1]
#tmap_B = np.concatenate((u[..., np.newaxis], v[..., np.newaxis]), axis=-1)
return cv2.remap(I, u.astype(np.float32), v.astype(np.float32),
interp_map[interpolation],
borderMode=border_map[padmethod], borderValue=255)
def _correct_border(r, s, k, center, R, bordertype='fit'):
mincenter = min(center) / R
if k < 0:
if bordertype == 'fit':
return r[0] / s[0]
else:
return 1 / (1 + k * (mincenter * mincenter))
elif k > 0:
if bordertype == 'crop':
return r[0] / s[0]
else:
return 1 / (1 + k * (mincenter * mincenter))
if len(input_array.shape) == 3:
for i in range(input_array.shape[2]):
output[:, :, i] = _correct_distortion(input_array[:, :, i], k,bordertype=bordertype,model=model)
else:
output = _correct_distortion(input_array, k)
return output
def applytoimage(input_file, output_file, model, k, bordertype='fit', interpolation='linear', padmethod='symmetric',
ftype=2):
with open(input_file, "rb") as f:
img = Image.open(f)
img.load()
Image.fromarray(
_lens_distort(np.asarray(img), k, bordertype=bordertype, interpolation=interpolation, padmethod=padmethod,
model=model).astype(np.uint8)).save(output_file)
def applytovideo(input_file, output_file, model, k, bordertype='fit', interpolation='linear', padmethod='symmetric',
ftype=2):
cap = cv2api_delegate.videoCapture(input_file)
fourcc = int(cap.get(cv2api_delegate.fourcc_prop))
fps = cap.get(cv2api_delegate.prop_fps)
height = int(np.rint(cap.get(cv2api_delegate.prop_frame_height)))
width = int(np.rint(cap.get(cv2api_delegate.prop_frame_width)))
out_video = cv2.VideoWriter(output_file, fourcc, fps, (width, height))
if not out_video.isOpened():
err = out_video + " fourcc: " + str(fourcc) + " FPS: " + str(fps) + \
" H: " + str(height) + " W: " + str(width)
raise ValueError('Unable to create video ' + err)
try:
count = 0
while (cap.grab()):
ret, frame = cap.retrieve()
count += 1
out_video.write(_lens_distort(frame,k, bordertype=bordertype, interpolation=interpolation, padmethod=padmethod,
model=model).astype(np.uint8))
finally:
cap.release()
out_video.release()
def applylensdistortion(input_file, output_file, k, bordertype='fit', interpolation='linear', padmethod='symmetric',
ftype=2):
model = lambda r, k: r * (1 / (1 + k * r))
if int(ftype) == 2:
model = lambda r, k: r * (1 / (1 + k * r * r))
elif int(ftype) == 3:
model = lambda r, k: r * (1 + k * r)
elif int(ftype) == 4:
model = lambda r, k: r * (1 + k * r * r)
if tool_set.fileType(input_file) == 'image':
applytoimage(input_file, output_file,model,k,bordertype=bordertype,interpolation=interpolation,padmethod=padmethod,
ftype=ftype)
else:
applytovideo(input_file, output_file, model, k, bordertype=bordertype, interpolation=interpolation,
padmethod=padmethod,
ftype=ftype)
def transform(im, source, target, **kwargs):
k = float(kwargs['threshold'])
applylensdistortion(source, target,k,
ftype=kwargs['model'] if 'model' in kwargs else 4,
interpolation=kwargs['interpolation'] if 'interpolation' in kwargs else 'linear',
bordertype=kwargs['bordertype'] if 'bordertype' in kwargs else 'fit',
padmethod=kwargs['padmethod'] if 'padmethod' in kwargs else 'fill')
dt = "Pincushion" if k < 0 else ("Barrel" if k > 0 else "Mustache")
return {"Distortion Type" : dt}, None
def operation():
return {
'name': 'LensDistortion',
'category': 'AntiForensic',
'description': 'Apply lens Distortion.',
'software': 'PAR Lens Distort',
'version': '0.1',
'arguments': {
'threshold': {
'type': 'float[-1.0:1.0]',
'defaultValue': '0.25',
'description': 'Distortion level (-1:1). Use x < 0 for pincushion, x > 0 for barrel (be sure to set distortion type param appropriately).'
},
'interpolation': {
'type': 'list',
'values': ['linear','nearest','cubic'],
'defaultValue': 'linear',
'description': 'Translates to CV2 interpolation (see resize())'
},
'padmethod': {
'type': 'list',
'values': ['bound', 'symmetric','fill','circular','replicate'],
'defaultValue': 'fill',
'description': 'controls how the resamples interpolates or assigns values to elements that map close to or outside the edge of the image'
},
'bordertype': {
'type': 'list',
'values': ['fit', 'crop'],
'defaultValue': 'fit',
'description': 'How to treat edge of image'
},
'model': {
'type': 'list',
'values': [1,2,3,4],
'defaultValue': 4,
'description': 'Models 1 and 2 are sigmoid. Models 3 and 4 are polynomial degree 1 and 2'
}
},
'transitions': [
'image.image'
]
}
def suffix():
return '.png'
```
#### File: plugins/DirMaskSelect/__init__.py
```python
import os
import glob
import maskgen
def transform(img, source, target, **kwargs):
dirname = kwargs['directory']
filename = os.path.basename(os.path.splitext(source)[0])
mask = glob.glob(os.path.join(dirname, filename) + '*')[0]
return {'override_target': mask}, None
def operation():
return {
'category': 'Select',
'name': 'SelectRegion',
'description': 'Mask Selector: ',
'software': 'Maskgen',
'version': maskgen.__version__,
'arguments': {'directory': {'type': "text", 'description': 'Directory of Masks'}},
'transitions': [
'image.image'
]
}
def suffix():
return '.png'
```
#### File: plugins/DonorlessOperation/__init__.py
```python
import logging
from maskgen import video_tools
import random
import maskgen.video_tools
import os
import maskgen
import json
plugin = "DonorPicker"
def transform(img, source, target, **kwargs):
valid = []
possible = []
data = {}
logging.getLogger('maskgen').info(str(kwargs))
for f in os.listdir(kwargs['Directory']):
if os.path.splitext(f)[1] == '.json':
data = json.load(open(os.path.join(kwargs['Directory'],f)))
elif video_tools.get_shape_of_video(os.path.join(kwargs['Directory'], f)) == video_tools.get_shape_of_video(source):
possible.append(os.path.join(kwargs['Directory'],f))
for d in possible:
if os.path.split(d)[1] in data:
valid.append(d)
if len(valid) == 0:
raise ValueError('No donors of correct size available')
donor = valid[0]
if kwargs['Pick Preference'] == 'Random':
donor = valid[random.randint(0,len(valid)-1)]
elif kwargs['Pick Preference'] == 'By Name':
for v in valid:
if os.path.splitext(source)[0] in (os.path.split(v)[1]):
donor = v
elif kwargs['Pick Preference'] =='Specific':
donor = kwargs['Donator']
data = data[os.path.split(donor)[1]]
data['Donator'] = donor
logging.getLogger('maskgen').info("Donor Selected: {}".format(donor))
#shutil.copy((os.path.join(kwargs['Directory'],f)),os.path.join(scenario_model.get, f))
#result, err = callPlugin(kwargs['Plugin'],img,source,target,**kwargs)
#final = {k: v for d in [result, data] for k, v in d.items()} if result is not None else data
logging.getLogger('maskgen').info(str(data))
#os.remove(os.path.join(".", f))
return data,None
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'type': 'Selector',
'description': 'Pick a donor and other data from a directory',
'software': 'Maskgen',
'version': maskgen.__version__,
'arguments': {
'Directory': {
'type': 'file',
'defaultvalue': '.',
'description': 'Directory full of possible PRNU choices'
},
'Pick Preference': {
'type': 'list',
'values': ['Random', 'By Name', 'Specific'],
'defaultvalue': 'Random',
'description': 'Select the deciding factor for which video will be selected from the directory'
}
},
'transitions': [
'video.video'
'image.image'
]
}
```
#### File: plugins/ExifGPSChange/__init__.py
```python
import os
from maskgen.exif import *
def get_new_position(start, range=0, degrees_of_change=3):
import random
if start is None:
startdegrees = random.randint(1, range - 1)
else:
parts = start.split()
startdegrees = int(float(parts[0]))
return '{} {:} {:.2f}'.format((random.randint(-degrees_of_change, degrees_of_change) + startdegrees) % (range - 1),
random.randint(1, 59),
random.random() * 60)
def get_same_positon(start, range=0):
if start is None:
return get_new_position(start, range=range)
return start
def modify_value(source, target, tag, name, modifier):
result = getexif(source, args=[tag])
newvalue = modifier(result[name] if name in result else None)
if newvalue is None:
newvalue = get_new_position(None)
return runexif(['-overwrite_original', '-P', '-q', '-m',
tag + '=' + newvalue, target], fix=False)
def relocate(source, target, degrees_of_change):
import functools
latfunc = functools.partial(get_new_position, range=90, degrees_of_change=degrees_of_change)
lonfunc = functools.partial(get_new_position, range=180, degrees_of_change=degrees_of_change)
ok = False
ok |= modify_value(source, target, '-xmp:gpslatitude', 'GPS Latitude', latfunc)
ok |= modify_value(source, target, '-exif:gpslatitude', 'GPS Latitude', latfunc)
ok |= modify_value(source, target, '-exif:gpslongitude', 'GPS Longitude', lonfunc)
ok |= modify_value(source, target, '-xmp:gpslongitude', 'GPS Longitude', lonfunc)
return ok
def relocate_to(donor, target):
import functools
ok = False
latfunc = functools.partial(get_same_positon, range=90, )
lonfunc = functools.partial(get_same_positon, range=180)
ok |= modify_value(donor, target, '-xmp:gpslatitude', 'GPS Latitude', latfunc)
ok |= modify_value(donor, target, '-exif:gpslatitude', 'GPS Latitude', latfunc)
ok |= modify_value(donor, target, '-exif:gpslongitude', 'GPS Longitude', lonfunc)
ok |= modify_value(donor, target, '-xmp:gpslongitude', 'GPS Longitude', lonfunc)
return ok
def transform(img, source, target, **kwargs):
degrees_of_change = int(kwargs['degrees of change']) if 'degrees of change' in kwargs else 3
if 'donor' in kwargs and kwargs['donor'] is not None and os.path.exists(kwargs['donor']):
donor = kwargs['donor']
ok = relocate_to(donor, target)
else:
ok = relocate(source, target, degrees_of_change)
if ok:
runexif(['-overwrite_original', '-P', '-q', '-m', '-XMPToolkit=', target])
return None, 'Failed' if not ok else None
def suffix():
return None
def operation():
return {'name': 'AntiForensicEditExif::GPSChange',
'category': 'AntiForensic',
'description': 'Set GPS Location',
'software': 'exiftool',
'version': get_version(),
'arguments': {
'donor': {
'type': 'donor',
'defaultValue': None,
'description': 'Image/video with donor metadata.'
},
'degrees of change': {
'type': 'int',
'defaultValue': 3,
'description': 'Positive/negative range of change.'
}
},
'transitions': [
'image.image',
'video.video'
]
}
```
#### File: plugins/LiquidRescale/__init__.py
```python
from __future__ import division
from maskgen.image_wrap import ImageWrapper,openImageFile
import subprocess
import platform
import os
import logging
"""
Convert donor to png and resize to fit source dimensions
using Liquid Rescale plugin of GIMP.
"""
gimpfile = os.getenv('MASKGEN_GIMP')
if gimpfile is None:
if "Darwin" in platform.platform():
gimpfile = "DYLD_LIBRARY_PATH=/Applications/GIMP.app/Contents/Resources/lib:$DYLD_LIBRARY_PATH /Applications/GIMP.app/Contents/MacOS/GIMP"
else:
gimpfile = "gimp-2.8"
lqr_command = "batch-gimp-lqr"
def resizeUsingLQR(fpn, sizeNew):
# Compose command line string that calls GIMP plugin Liquid Rescale
lqrCommandLine = [gimpfile,
"-i",
"-f",
"-b",
"\"({} \\\"{}\\\" {} {})\"".format(lqr_command, fpn.replace("\\","\\\\"), str(sizeNew[0]),str(sizeNew[1])),
"-b",
"\"(gimp-quit -0)\""]
pcommand= subprocess.Popen(" ".join(lqrCommandLine), shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = pcommand.communicate()
if stderr is not None and 'Error' in stderr:
logging.getLogger('maskgen').error(stderr)
raise IOError('Failed to execute plugin')
def valtestExcludeSameSize(sizSource=(0, 0), sizDonor=(0, 0)):
if sizSource == sizDonor:
raise ValueError('LQR images are the same size')
def valtestExcludeSizeNotWithinPercent(sizSource=(0, 0), sizDonor=(0, 0), nPercent=20):
if abs((sizSource[0] - sizDonor[0]) / sizSource[0]) > nPercent / 100.0:
raise ValueError('LQR image sizes are too different')
if abs((sizSource[1] - sizDonor[1]) / sizSource[1]) > nPercent / 100.0:
raise ValueError('LQR image sizes are too different')
def validateImageSizes(sizSource=(0, 0), sizDonor=(0, 0), nPercent=20):
valtestExcludeSameSize(sizSource, sizDonor)
valtestExcludeSizeNotWithinPercent(sizSource, sizDonor, 20)
def transform(img, source, target, **kwargs):
donor = kwargs['donor']
sizSource = img.size
sizDonor = openImageFile(donor).size
validateImageSizes(sizSource, sizDonor)
# Use Liquid Rescale to resize donor image
# to size of source image.
resizeUsingLQR(target, sizDonor)
return None, None
def operation():
return {'name': 'TransformSeamCarving',
'category': 'Transform',
'description': 'Resize donor to size of Input using LQR. Requires GIMP. Set environment variable MASKGEN_GIMP to the gimp binary',
'software': 'GIMP',
'version': '2.8.20',
'arguments': {
'donor': {
'type': 'donor',
'defaultvalue': None,
'description': 'png that contributes size info'
},
'percentage bounds': {
'type': 'int',
'defaultvalue': 20,
'description': 'Proximity '
}
},
'transitions': [
'image.image'
]
}
def suffix():
return '.png'
```
#### File: plugins/MedianStack/__init__.py
```python
import logging
import os
from maskgen import exif
import maskgen
import numpy as np
from maskgen.algorithms.opencv_registration import OpenCVECCRegistration
from maskgen.image_wrap import ImageWrapper
from maskgen.tool_set import ZipCapture
from maskgen.support import getValue
def transform(img, source, target, **kwargs):
# source = zip of images
if 'Registration Type' in kwargs:
reg_type = kwargs['Registration Type']
else:
reg_type = 'ECC'
zipf = ZipCapture(source)
imgs = []
logger = logging.getLogger("maskgen")
retrieved, zip_image = zipf.read()
if not retrieved:
raise ValueError("Zip File {0} is empty".format(os.path.basename(source)))
registrar = {'ECC': OpenCVECCRegistration(os.path.join(zipf.dir, zipf.names[0]))}
reg_tool = registrar[reg_type]
if 'Image Rotated' in kwargs and kwargs['Image Rotated'] == 'yes':
try:
orientation = getValue(zipf.get_exif(), 'Orientation', None)
except KeyError:
orientation = None
else:
orientation = None
logger.debug("Beginning image alignment for " + os.path.basename(source))
while retrieved:
aligned = reg_tool.align(zip_image)
imgs.append(aligned)
retrieved, zip_image = zipf.read()
logger.debug(os.path.basename(source) + " alignment complete")
if not imgs:
return None, False
stacks = np.stack(np.asarray(imgs))
median_img = np.median(stacks, 0)
analysis = {'Merge Operation': 'Median Pixel'}
if orientation is not None:
analysis.update(exif.rotateAnalysis(orientation))
median_img = exif.rotateAccordingToExif(median_img, orientation, counter=True)
ImageWrapper(median_img).save(target, format='PNG')
analysis['Image Rotated'] = 'yes' if 'rotation' in analysis else 'no'
return analysis, None
def operation():
return {'name': 'MediaStacking',
'category': 'Output',
'description': 'Save an image with median pixel values taken from a zip of images.',
'software': 'maskgen',
'version': maskgen.__version__[:3],
'arguments': {
'Image Rotated': {
'type': 'yesno',
'defaultvalue': 'no',
'description': 'Rotate image according to EXIF'
},
'Registration Type': {
'type': 'text',
'value': 'ECC',
'description': 'Type of registration used to align images.',
'visible': False
}
},
'transitions': [
'zip.image'
]
}
def suffix():
return '.png'
```
#### File: plugins/OutputRawVideo/__init__.py
```python
from maskgen.video_tools import outputRaw
import maskgen
"""
Save te image as AVI using Raw Video.
"""
def transform(img,source,target, **kwargs):
outputRaw(source,target)
return None,None
def operation():
return {'name':'OutputAVI',
'category':'Output',
'description':'Save an video as .avi using codec rawvideo',
'software':'ffmpeg',
'version':maskgen.video_tools.get_ffmpeg_version(),
'arguments':{
},
'transitions': [
'video.video'
]
}
def suffix():
return '.avi'
```
#### File: plugins/OutputX264/__init__.py
```python
from maskgen.video_tools import x264
from maskgen import exif
import maskgen
"""
Save te image as MP4 using X264 loss encoding.
"""
def transform(img,source,target, **kwargs):
crf = int(kwargs['crf']) if 'crf' in kwargs else 0
x264(source,outputname=target,crf=crf)
orientation_source = exif.getOrientationFromExif(source)
orientation_target = exif.getOrientationFromExif(target)
analysis = {"Image Rotated": "no"}
if orientation_source is not None and orientation_source != orientation_target:
analysis = {"Image Rotated": "yes"}
return analysis,None
def operation():
return {'name':'OutputAVI',
'category':'Output',
'description':'Save an video as .mp4 using codec X264',
'software':'ffmpeg',
'version':maskgen.video_tools.get_ffmpeg_version(),
'arguments':{
'crf':{
'type':'int[0:100]',
'defaultvalue':'0',
'description':'Constraint Rate Factor. 0 is lossless'
}
},
'transitions': [
'video.video'
]
}
def suffix():
return '.avi'
```
#### File: plugins/OverwriteAudioStream/__init__.py
```python
import maskgen.video_tools
import maskgen.tool_set
import maskgen.ffmpeg_api
"""
Concatentate or Replace donor audio to video.
"""
def transform(img, source, target, **kwargs):
whichones = kwargs['Stream'] if 'Stream' in kwargs else 'both'
donor = kwargs['donor']
donor_data = maskgen.ffmpeg_api.get_meta_from_video(donor, show_streams=True)[0]
has_audio = len(maskgen.ffmpeg_api.get_stream_indices_of_type(maskgen.ffmpeg_api.get_meta_from_video(source), stream_type='audio')) > 0
milli, frame = maskgen.tool_set.getMilliSecondsAndFrameCount(kwargs['Start Time'])
channelspecifier = ':1' if whichones == 'right' else (':0' if whichones == 'left' else '')
streamno = 0
if len(donor_data) > 0:
streamno = [x for x in (idx for idx, val in enumerate(donor_data) if val['codec_type'] == 'audio')][0]
command = ['-y', '-i', source, '-i', donor]
if not has_audio:
command.extend(['-c:v', 'copy', '-c:a', 'aac', target])
elif milli is not None and milli > 0:
command.extend(['-filter_complex',
'[0:a]atrim=start=0:end=' + \
maskgen.tool_set.getSecondDurationStringFromMilliseconds(milli) + \
'[aout];[1:a]atrim=start=0[bout];[aout][bout]concat=n=2:v=0:a=1[allout]',
'-map','0:v','-map','[allout]','-c:v','copy', target])
elif frame > 0:
command.extend(['-filter_complex',
'[0:a]atrim=start_sample=0:end_sample={}'.format(frame) + \
'[aout];[1:a]atrim=start=0[bout];[aout][bout]concat=n=2:v=0:a=1[allout]',
'-map','0:v','-map','[allout]','-c:v','copy',target])
else:
command.extend(['-map', '0:v', '-map', '1:' + str(streamno) + channelspecifier, '-c', 'copy', target])
maskgen.ffmpeg_api.run_ffmpeg(command, noOutput=False)
return {'add type': 'replace', 'synchronization': 'none'}, None
def operation():
return {'name': 'AddAudioSample',
'category': 'Audio',
'description': 'Add Audio Stream to Video. The Start time is insertion point over the original video',
'software': 'ffmpeg',
'version': maskgen.video_tools.get_ffmpeg_version(),
'arguments': {
'donor': {
'type': 'donor'
},
'add type': {
'type': 'str',
'defaultvalue': 'replace',
'description': 'Replacing or overlaying.'
},
'synchronization': {
'type': 'str',
'defaultvalue': 'none',
'description': 'If additional track alignment changes occurred with the addition.'
},
'Start Time': {
'type': 'int',
'defaultvalue': 0,
'description': 'Start time of the audio clip (HH:MI:SS.micro or frame number)'
}
},
'transitions': [
'video.video',
'audio.video'
]
}
def suffix():
return None
```
#### File: plugins/PasteSpliceDonor/__init__.py
```python
import shutil
import os
import maskgen
"""
Convenience plugin to combine donation and splice connecting in one operation.
"""
def transform(img, source, target, **kwargs):
if 'Final Image ' in kwargs:
shutil.copy(kwargs['Final Image'], target)
return {'rename_target': os.path.split(kwargs['Final Image'])[1]}, None
else:
return None, None
def operation():
return {'name': 'PasteSplice',
'category': 'Paste',
'software': 'maskgen',
'version': maskgen.__version__[0:3],
'arguments': {
'donor': {
"type": "donor",
"description": "Image to paste."
},
'Final Image': {
"type": "file:image",
"description": "Final Result of the manipulation."
}
},
'description': 'Paste Splice Convenience Filter to combine paste splice and donation connections in one step.',
'transitions': [
'image.image'
]
}
def suffix():
return None
```
#### File: plugins/PreSegmentedSelectRegion/__init__.py
```python
from maskgen.segmentation.segmanage import select_region, convert_color, find_segmentation_classifier, \
segmentation_classification
import cv2
"""
Selected a region using a presegmented image descriptor.
A directory contains PNG images, each with the same name (different suffix) as the source image (md5 name).
Each PNG contains pixels with colors associated their assigned classification, as determined another algorithm.
A classifications.csv file in the same directory contains the mapping of color to classification.
Example contents:
"[200,100,200]",house
Pick one color of all colors in the image, create a mask with the pixels associated with the chosen color set to white.
Save the mask as the target image. The result of the transform includes a variable 'subject' set to the classification of the chosen color.
"""
def transform(img, source, target, **kwargs):
segmentation_directory = kwargs['segmentation_directory']
segmentation_color = kwargs['color'] if 'color' in kwargs else None
source = kwargs['alternate_source'] if 'alternate_source' in kwargs else source
segmentation_color = convert_color(segmentation_color)
segment_mask = find_segmentation_classifier(source, segmentation_directory)
if segment_mask is None:
return None, 'Cannot find segmentation mask'
newimg, segmentation_color = select_region(img, segment_mask, segmentation_color)
newimg.save(target)
return {'subject': segmentation_classification(segmentation_directory, segmentation_color)}, None
# the actual link name to be used.
# the category to be shown
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'description': 'Use a set of presegmented images to pick a select region and purpose. ',
'software': 'OpenCV',
'version': cv2.__version__,
'arguments':
{
'segmentation_directory': {
'type': 'imagefile',
'defaultvalue': None,
'description': 'Directory containing the image segments'
},
'color': {
'type': 'string',
'defaultvalue': None,
'description': 'The color to be used for classification (e.g. [100,200,130])'
}
},
'output':
{
'subject': {
'type': 'string',
'description': 'the subject name of the chosen segment of an image'
}
},
'transitions': [
'image.image'
]
}
def suffix():
return None
```
#### File: plugins/SmartMaskSelector/__init__.py
```python
from maskgen import image_wrap
import numpy as np
from random import randint
from skimage import segmentation
import skimage
import math
"""
Select from a region from a segmented image to produce a selection mask. Can used with paste splice and paste clone.
In the later case, paste_x and paste_y values are returned indicating a suitable upper left corner paste position in the source image.
"""
def build_mask_box(pixelWidth, pixelHeight, shape):
if pixelWidth> shape[1]/2:
pixelWidth=shape[1]/2-1
if pixelHeight> shape[0]/2:
pixelHeight=shape[0]/2-1
r_x = randint(1, abs(shape[1] - pixelWidth)-1)
r_y = randint(1, abs(shape[0] - pixelHeight)-1)
mask = np.zeros((shape[0], shape[1]))
mask[r_y:r_y + pixelHeight, r_x:r_x + pixelWidth] = 255
new_position_x = randint(1, abs(shape[1] - pixelWidth))
new_position_y = randint(1, abs(shape[0] - pixelHeight))
return new_position_x,new_position_y, mask
def build(img, segment_labels, unique_labels, label_counts, size_constraint):
shape = img.shape
count = 0
segInd = randint(0, len(unique_labels) - 1)
segVal = unique_labels[segInd]
diffs = abs(label_counts - size_constraint)
best = np.where(diffs==min(diffs))
segInd = best[0][0]
segVal = unique_labels[segInd]
mask = np.zeros((shape[0], shape[1]))
mask[segment_labels == segVal] = 255
indices = np.where(mask == 255)
pixelWidth = abs(max(indices[1]) - min(indices[1]))
pixelHeight = abs(max(indices[0]) - min(indices[0]))
if pixelWidth> shape[1]/2:
pixelWidth=shape[1]/2-1
if pixelHeight> shape[0]/2:
pixelHeight=shape[0]/2-1
new_position_x = randint(1, abs(shape[1] - pixelWidth))
new_position_y = randint(1, abs(shape[0] - pixelHeight))
return new_position_x, new_position_y, mask
def build_mask_slic(img, size,W,H):
shape = img.shape
imgsize=img.shape[0]*img.shape[1]
numsegments = imgsize / size
numsegments = max(numsegments,1)
segment_labels = segmentation.slic(img, compactness=5, n_segments=numsegments)
unique_labels, label_counts = np.unique(segment_labels,return_counts=True)
if len(unique_labels) < 10:
new_position_x,new_position_y ,mask = build_mask_box(W,H,shape)
else:
new_position_x, new_position_y, mask = build(img, segment_labels, unique_labels,label_counts,size)
return new_position_x, new_position_y, mask
def transform(img,source,target,**kwargs):
smallw = int(kwargs['smallw']) if 'smallw' in kwargs else 32
smallh = int(kwargs['smallh']) if 'smallh' in kwargs else 32
mediumw = int(kwargs['mediumw']) if 'mediumw' in kwargs else 64
mediumh = int(kwargs['mediumh']) if 'mediumh' in kwargs else 64
largew = int(kwargs['largew']) if 'largew' in kwargs else 128
largeh = int(kwargs['largeh']) if 'largeh' in kwargs else 128
size = int(kwargs['size']) if 'size' in kwargs else 1
# to support the test, used the abbreviate version
pasteregionsize = kwargs['region'] if 'region' in kwargs else 1.0
pasteregionsize = kwargs['region size'] if 'region size' in kwargs else pasteregionsize
color = map(int,kwargs['savecolor'].split(',')) if 'savecolor' in kwargs and kwargs['savecolor'] is not 'none' else None
op = kwargs['op'] if 'op' in kwargs else 'box'
if size ==1:
W=smallw
H=smallh
elif size ==2:
W=mediumw
H=mediumh
else:
W=largew
H=largeh
cv_image = img.to_array()
if pasteregionsize < 1.0:
dims = (int(img.size[1] * pasteregionsize), int(img.size[0] * pasteregionsize))
else:
dims = (img.size[1], img.size[0])
x = (img.size[1]-dims[0])/2
y = (img.size[0]-dims[1])/2
if len(cv_image.shape) > 2:
cv_image = cv_image[x:dims[0]+x,y:dims[1]+y,:]
else:
cv_image = cv_image[x:dims[0]+x, y:dims[1]+y]
imgsize = cv_image.shape[0] * cv_image.shape[1]
area = W * H
if area < (imgsize/2):
W=smallw
H=smallh
if op == 'box':
new_position_x,new_position_y,mask= build_mask_box(W,H,cv_image.shape)
else:
new_position_x,new_position_y,mask= build_mask_slic(cv_image,area,W,H)
if pasteregionsize < 1.0:
mask2 =np.zeros((img.to_array().shape[0],img.to_array().shape[1]),dtype=np.uint8)
if len(mask2.shape) > 2:
mask2[x:dims[0]+x, y:dims[1]+y, :] = mask
else:
mask2[x:dims[0]+x, y:dims[1]+y] = mask
mask = mask2
new_position_x+=x
new_position_y+=y
if 'alpha' in kwargs and kwargs['alpha'] == 'yes':
rgba = np.asarray(img.convert('RGBA'))
rgba = np.copy(rgba)
rgba[mask != 255] = 0
image_wrap.ImageWrapper(rgba).save(target)
elif color is not None:
rgb = np.zeros((mask.shape[0],mask.shape[1],3),dtype=np.uint8)
for channel in range(3):
rgb[:,:,channel] = (mask/255)*color[channel]
image_wrap.ImageWrapper(rgb).save(target)
else:
image_wrap.ImageWrapper(mask.astype('uint8')).save(target)
return {'paste_x': new_position_x, 'paste_y': new_position_y},None
def operation():
return {
'category': 'Select',
'name': 'SelectRegion',
'description':'Select from a region from a segmented image to produce a selection mask. Can used with paste splice and paste clone. In the later case, paste_x and paste_y variables are returned indicating a suitable upper left corner paste position in the source image. ',
'software':'skimage',
'version':skimage.__version__,
'arguments':{'smallw': {'type': "int[32:64]", 'defaultValue': 32, 'description':'small mask width size'},
'smallh': {'type': "int[32:64]", 'defaultValue': 32,'description':'small mask height size'},
'mediumw': {'type': "int[64:128]", 'defaultValue': 64, 'description':'medium mask width size'},
'mediumh': {'type': "int[64:128]", 'defaultValue': 64, 'description':'medium mask width size'},
'largew': {'type': "int[128:1000]",'defaultValue': 128, 'description':'large mask width size'},
'largeh': {'type': "int[128:1000]", 'defaultValue': 128,'description':'large mask width size'},
'size': {'type': "int[1:4]",'defaultValue': 1, 'description':'mask size 1=small, 2=med, 3=large'},
'op': {'type': 'list', 'values' : ['slic', 'box'], 'description':'selection algorithm to use'},
'alpha': {'type' : "yesno",
"defaultvalue": "no",
'description': "If yes, save the image with an alpha channel instead of the mask."},
"savecolor": {'type' : "text",
"defaultvalue": "none",
'description': "color value in rgb 100,100,100 for color mask generation."}
},
'transitions': [
'image.image'
]
}
def suffix():
return '.png'
```
#### File: plugins/VideoPropertySelector/__init__.py
```python
import maskgen
from maskgen.ffmpeg_api import get_meta_from_video
"""
Select FFPROBE properties
"""
def __get_channel_data(source_data, codec_type):
for data in source_data:
if data['codec_type'] == codec_type:
return data
def transform(img, source, target, **kwargs):
meta = get_meta_from_video(source, show_streams=True)
video_data = __get_channel_data(meta[0],'video')
return video_data,None
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'software': 'maskgen',
'type': 'selector',
'version': maskgen.__version__[0:6],
'description': 'Gather Meta Information',
'transitions': [
'video.video'
]
}
def suffix():
return '.png'
```
#### File: plugins/YoutubeUpDown/__init__.py
```python
from __future__ import unicode_literals
import httplib
import httplib2
import json
import os
import random
import sys
import time
import argparse
import subprocess as sp
import contextlib
from maskgen import software_loader
from maskgen import video_tools
import maskgen
import logging
from collections import OrderedDict
try:
import youtube_dl
except ImportError:
logging.getLogger('maskgen').error('Missing python library youtube_dl, run: pip install youtube-dl')
raise ValueError('Missing python dependencies')
try:
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow
except ImportError:
logging.getLogger('maskgen').error('Missing python libraries for google API or oauth.\n'
'run: pip install --upgrade google-api-python-client')
raise ValueError('Missing python dependencies')
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = software_loader.getFileName('client_secrets.json')
OAUTH_FILE = software_loader.getFileName('youtube-oauth2.json')
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """Please configure OAuth 2.0
To run this plugin, you will need to place a populated client_secrets.json file in maskgen/resources.
If there is one there already, it is invalid.
Download the Json using the developers console at:
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
"""
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(scope, oauth):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=scope, message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(oauth if oauth is not None else
os.path.join(os.path.dirname(CLIENT_SECRETS_FILE), 'youtube-oauth2.json'))
credentials = storage.get()
if credentials is None or credentials.invalid:
try:
credentials = run_flow(flow, storage)
except SystemExit:
raise ValueError('Credentials file could not be obtained!')
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, credentials=credentials)
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body = dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
return resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
status, response = insert_request.next_chunk()
if 'id' not in response:
logging.getLogger('maskgen').error("The upload failed with an unexpected response: %s" % response)
raise ValueError('Unexpected Response')
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
logging.getLogger('maskgen').error(error)
retry += 1
if retry > MAX_RETRIES:
logging.getLogger('maskgen').error("No longer attempting to retry.")
raise ValueError('Retrying ultimately failed.')
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
logging.getLogger('maskgen').info("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
return str(response['id']).strip()
def upload_file(youtube, fname):
# get the file extension for later use in saving downloaded video
#ext = os.path.splitext(fname)[0]
# replacing the args namespace variable above to repurpose it.
# since these are pretty standard here, we can simplify the command
# line requirements by using a Namespace object to pass the necessary
# data into the auth process
args = argparse.Namespace()
args.file = fname
args.title = os.path.split(fname)[1]
args.description = fname
args.keywords = ''
args.category = 22
args.privacyStatus = 'unlisted'
args.logging_level = 'ERROR'
args.noauth_local_webserver = 'true'
# does the file we are trying to upload exist?
if not os.path.exists(args.file):
logging.getLogger('maskgen').info("Video file, " + args.file + " cannot be found or is invalid.")
# start the upload process
logging.getLogger('maskgen').info('uploading ' + fname + '...')
# the upload happens here and we are returned the YouTube ID
try:
youtubeID = initialize_upload(youtube, args)
except HttpError, e:
rc = json.loads(e.content)
logging.getLogger('maskgen').error('An HTTP error %d occurred and the process could not continue:\n%s' % (
e.resp.status, rc['code'] + ': ' + rc['message']))
raise ValueError('HTTP Error')
# get the uploaded video id
return youtubeID
def get_upload_status(youtube, youtubeID):
status = videos_list_by_id(youtube, part='status', id=youtubeID)
return str(status['items'][0]['status']['uploadStatus'])
def download_files(youtubeID, quality, resultDir):
# Set YoutubeDL parms
ydl_opts = {
'format': quality,
'outtmpl': resultDir
}
# create the video object and retrieve the video using the loop quality/format
# level and youtubeID from the list.
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
url = 'https://www.youtube.com/watch?v=' + youtubeID
# if we got this far the assumption can be made that an error is either unavailable format or a
# connection issue neither of which can be controlled from here and processing should continue.
# after the except.
ydl.download([url])
except youtube_dl.DownloadError:
pass
# Remove keyword arguments that are not set
def remove_empty_kwargs(**kwargs):
good_kwargs = {}
if kwargs is not None:
for key, value in kwargs.iteritems():
if value:
good_kwargs[key] = value
return good_kwargs
def videos_list_by_id(client, **kwargs):
# See full sample for function
kwargs = remove_empty_kwargs(**kwargs)
response = client.videos().list(**kwargs).execute()
return response
def delete_video(client, **kwargs):
kwargs = remove_empty_kwargs(**kwargs)
response = client.videos().delete(**kwargs).execute()
return response
def get_valid_resolution(source, target_resolution):
resolutions = ['2160p', '1440p', '1080p', '720p', '480p', '360p', '240p', '144p']
width, height = video_tools.get_shape_of_video(source)
orientation = 'landscape' if width > height else 'portrait'
src_resolution = height if orientation == 'landscape' else width
if int(target_resolution[:-1]) > src_resolution:
index = len(resolutions)-1
while index > 0:
if src_resolution <= int(resolutions[index][:-1]):
return resolutions[index]
index -= 1
return target_resolution
def waitForProcessing(youtube, youtubeID, source, quality):
#scale the wait time by duration and wait longer if we target better quality
waitTime = int((video_tools.FileMetaDataLocator(source).get_duration()/1000))
target_res = int(quality[:-1])
#Just guessing on most of these, might need to wait even longer for 1440 and 4k
if target_res >= 2160:
waitTime *= 12
elif target_res >= 1440:
waitTime *= 11
elif target_res >= 1080:
waitTime *= 8
elif target_res >= 720:
waitTime *= 5
elif target_res >= 480:
waitTime *= 3
waitTime = 30 if waitTime < 30 else waitTime
logging.getLogger('maskgen').info('Waiting %s seconds for processing- higher resolutions wait longer.' % waitTime)
time.sleep(15) # wait 15 to find out if we got rejected
status = get_upload_status(youtube, youtubeID)
if status == 'rejected':
return
time.sleep(waitTime-15)
#make sure processing is finished, but don't wait all day.
while waitTime <= 600:
status = get_upload_status(youtube, youtubeID)
if status == 'processed':
return
else:
waitTime += 10
time.sleep(10)
def compare_dimensions(pathOne='',pathTwo=''):
path = ''
try:
if os.path.exists(pathOne) and os.path.exists(pathTwo):
#return best resolution video of the two
heightOne = video_tools.get_shape_of_video(pathOne)[1]
heightTwo = video_tools.get_shape_of_video(pathTwo)[1]
path = pathOne if heightOne > heightTwo else pathTwo
elif not os.path.exists(pathOne) and not os.path.exists(pathTwo):
raise ValueError('Requested quality file not available- try again with a different max_resolution')
else:
path = pathOne if os.path.exists(pathOne) else pathTwo
return path
finally:
#Clean up the unused tempfile
if path == pathOne and os.path.exists(pathTwo):
os.remove(pathTwo)
elif path == pathTwo and os.path.exists(pathOne):
os.remove(pathOne)
def transform(img,source,target,**kwargs):
quality = get_valid_resolution(source, kwargs['max_resolution']) #There will never be a larger res video from youtube.
if CLIENT_SECRETS_FILE == None:
logging.getLogger('maskgen').error(MISSING_CLIENT_SECRETS_MESSAGE)
raise ValueError('Invalid or missing client_secrets.json- see console/log for more information.')
# authenticate with the youtube account
youtube = get_authenticated_service(YOUTUBE_UPLOAD_SCOPE, OAUTH_FILE)
# instantiate a list for the IDs
logging.getLogger('maskgen').info('Attempting upload of ' + str(source))
youtubeID = upload_file(youtube, source)
try:
waitForProcessing(youtube, youtubeID, source, quality)
logging.getLogger('maskgen').info('Retrieving best resolution video smaller or equal to: ' + quality)
#Fetch the versions of the video with separate streams and the pre-merged, we'll pick the better of the two.
tmpfile = os.path.splitext(target)[0]
tmpfile_merge = tmpfile + '_downloaded_merged' + os.path.splitext(target)[1]
tmpfile_best = tmpfile + '_downloaded_best' + os.path.splitext(target)[1]
download_files(youtubeID, "bestvideo[height <=? " + quality[:-1] + "]+bestaudio", tmpfile_merge)
download_files(youtubeID, "best[height <=? " + quality[:-1] + "]", tmpfile_best)
# select better of the two, or raise error if neither available.
tmpfile = compare_dimensions(tmpfile_merge,tmpfile_best)
os.remove(target) #remove the copy of the source node
os.rename(tmpfile, target)#replace the old target file
return None, None
finally:
delete_video(youtube, id=youtubeID) #Always cleanup file uploaded to youtube
def suffix():
return None
def operation():
return {'name': 'SocialMedia',
'category': 'Laundering',
'description': 'Upload source to Youtube, download a processed version back as result.',
'software': 'maskgen',
'version': maskgen.__version__[0:3],
'arguments': OrderedDict([
('max_resolution', {
'type': 'list',
'values': ['2160p', '1440p', '1080p', '720p', '480p', '360p', '240p', '144p'],
'defaultvalue': '2160p',
'description': 'Download the best available resolution at or below the target'
}),
('type', {
'type':'list',
'values' : ['Facebook', 'Instagram', 'Youtube'],
'description':'Service',
'defaultvalue': 'Youtube'
}),
('upload', {
'type': 'list',
'values': ['Mobile Device', 'Desktop'],
'description': 'How was the image uploaded?',
'defaultvalue': 'Desktop'
}),
('download', {
'type': 'list',
'values': ['Mobile Device', 'Desktop'],
'description': 'How was the image downloaded?',
'defaultvalue': 'Desktop'
})
]),
'transitions': [
'video.video'
]
}
```
#### File: scripts/python/jtprefs.py
```python
import os
import tempfile
import tkFileDialog
from Tkinter import *
import tkMessageBox
import json
import requests
import subprocess
import maskgen.maskgen_loader
from maskgen.software_loader import getFileName
from tkinter import ttk
from hp.hp_data import orgs
key = os.path.join(os.path.expanduser("~"), "medifor_ingest.gpg")
hp_settings = os.path.join(os.path.expanduser("~"), ".hpsettings")
class Window(Frame):
def __init__(self, parent, errors):
Frame.__init__(self, parent)
self.parent = parent
self.parent.title("Settings")
self.loader = maskgen.maskgen_loader.MaskGenLoader()
self.setup_window()
maskgen.maskgen_loader.imageLoaded = False
if errors:
tkMessageBox.showerror("Error", "\n".join(errors))
self.info = {"username": ["Username Field", "Enter your project codename."],
"organization": ["Organization Field", "Enter the organization you are affiliated with."],
"apiurl": ["API URL Field", "Enter the API URL for the browser."],
"busername": ["Browser Username Field", "Enter your browser username."],
"bpassword": ["Browser Password Field", "Enter your browser password."],
"hporganization": ["HP Organization Field", "Enter your organization abbreviation for the HP Tool."],
"uploadfolder": ["Folder Field", "Enter the location you would like to upload the tar files to."
"\n\"s3://\" is not necessary."],
"s3-endpoint": ["AWS endpoint URL field", "Enter your endpoint url if you have one."],
"s3-profile": ["AWS profile name field", "Enter your aws profile name if you have multiple config profiles."],
"s3-region": ["AWS region field", "Enter your aws region if you have one."],
"help": ["Help", "For additional help contact <EMAIL>."]}
def setup_window(self):
r = 0
# Info heading
info_text = Label(text="Enter all of the following\ninformation in order to guarantee\nproper setup of"
" the Journaling Tool\nand High Provenance Tool.\nFields marked with an * are"
" mandatory")
info_text.grid(row=r, columnspan=2, pady=5)
r += 1
ufile = getFileName("ManipulatorCodeNames.txt")
if ufile:
with open(ufile, "r") as names:
self.valid_usernames = sorted(names.read().splitlines())
else:
self.valid_usernames = []
self.maskgen_button = Button(text="Select Maskgen Folder", command=self.get_maskgen)
self.maskgen_button.grid(row=r, column=0, columnspan=2)
r += 1
self.master.withdraw()
tkMessageBox.showerror("No Username File", "A username list file could not be found.")
self.master.deiconify()
# General Header
general_label = Label(text="General Setup")
general_label.grid(row=r, columnspan=2)
r += 1
# API URL
self.apiurl_label = Button(text="API URL*", command=lambda: self.get_info("apiurl"))
self.apiurl_label.grid(row=r, column=0, padx=10)
self.apiurl_field = Entry(self.parent)
self.apiurl_field.insert(0, self.loader.get_key('apiurl', ''))
self.apiurl_field.grid(row=r, column=1, padx=10)
r += 1
# Browser Username
self.busername_label = Button(text="Browser Username*", command=lambda: self.get_info("busername"))
self.busername_label.grid(row=r, column=0, padx=10)
self.busername_field = Entry(self.parent)
self.busername_field.grid(row=r, column=1, padx=10)
r += 1
# Browser Password
self.bpassword_label = Button(text="Browser Password*", command=lambda: self.get_info("bpassword"))
self.bpassword_label.grid(row=r, column=0, padx=10)
self.bpassword_field = Entry(self.parent, show="*")
self.bpassword_field.grid(row=r, column=1, padx=10)
r += 1
# Username
self.username_label = Button(text="Username*", command=lambda: self.get_info("username"))
self.username_label.grid(row=r, column=0, padx=10)
self.username_field = ttk.Combobox(values=self.valid_usernames)
self.username_field.insert(0, self.loader.get_key('username', ''))
self.username_field.grid(row=r, column=1, padx=10)
r += 1
# JT Setup
jt_setup = Label(text="Journaling Tool Setup")
jt_setup.grid(row=r, columnspan=2, pady=5)
r += 1
# Organization
self.organization_label = Button(text="Organization*", command=lambda: self.get_info("organization"))
self.organization_label.grid(row=r, column=0, padx=10)
self.organization_field = Entry(self.parent)
self.organization_field.insert(0, self.loader.get_key('organization', ''))
self.organization_field.grid(row=r, column=1, padx=10)
r += 1
# Journal Upload Folder
self.jt_uploadfolder_label = Button(text="Journal Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.jt_uploadfolder_label.grid(row=r, column=0, padx=10)
self.jt_uploadfolder_field = Entry(self.parent)
self.jt_uploadfolder_field.insert(0, self.loader.get_key('s3info', ''))
self.jt_uploadfolder_field.grid(row=r, column=1, padx=10)
r += 1
# HP Tool Setup
jt_setup = Label(text="High Provenance Tool Setup")
jt_setup.grid(row=r, columnspan=2, pady=5)
r += 1
# HP Organization
hporg_button = Button(text="HP Organization*", command=lambda: self.get_info("hporganization"))
hporg_button.grid(row=r, column=0, padx=10)
self.hporganization = StringVar()
self.hporganization.set(self.loader.get_key('hporganization', ''))
hporg_optionmenu = OptionMenu(None, self.hporganization, *orgs.keys())
hporg_optionmenu.grid(row=r, column=1, padx=10)
r += 1
# High Provenance Upload Folder
self.hpupload_button = Button(text="HP Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.hpupload_button.grid(row=r, column=0, padx=10)
self.hpupload_field = Entry(self.parent)
self.hpupload_field.insert(0, self.loader.get_key('aws-hp', ''))
self.hpupload_field.grid(row=r, column=1, padx=10)
r += 1
# PRNU Upload Folder
self.prnuupload_button = Button(text="PRNU Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.prnuupload_button.grid(row=r, column=0, padx=10)
self.prnuupload_field = Entry(self.parent)
self.prnuupload_field.insert(0, self.loader.get_key('aws-prnu', ''))
self.prnuupload_field.grid(row=r, column=1, padx=10)
r += 1
# AWS Profile
self.profile_button = Button(text="AWS Profile Name", command=lambda: self.get_info("s3-profile"))
self.profile_button.grid(row=r, column=0, padx=10)
self.profile_field = Entry(self.parent)
self.profile_field.insert(0, self.loader.get_key('s3-profile', 'default'))
self.profile_field.grid(row=r, column=1, padx=10)
r+=1
# AWS Endpoint
self.endpoint_button = Button(text="AWS Endpoint URL", command=lambda: self.get_info("s3-endpoint"))
self.endpoint_button.grid(row=r, column=0, padx=10)
self.endpoint_field = Entry(self.parent)
self.endpoint_field.insert(0, self.loader.get_key('s3-endpoint', ''))
self.endpoint_field.grid(row=r, column=1, padx=10)
r += 1
# AWS Region
self.region_button = Button(text="AWS Region", command=lambda: self.get_info("s3-region"))
self.region_button.grid(row=r, column=0, padx=10)
self.region_field = Entry(self.parent)
self.region_field.insert(0, self.loader.get_key('s3-region', 'us-east-1'))
self.region_field.grid(row=r, column=1, padx=10)
r += 1
# Submit Button
submit = Button(text="Submit", command=lambda: self.submit_data())
submit.grid(row=r, column=0, padx=10, pady=5)
# Help Button
help = Button(text="Help", command=lambda: self.get_info("help"))
help.grid(row=r, column=1, padx=10, pady=5)
def get_info(self, item):
tkMessageBox.showinfo(*self.info[item])
def submit_data(self):
self.username = self.username_field.get()
self.organization = self.organization_field.get()
self.apiurl = self.apiurl_field.get()
self.busername = self.busername_field.get()
self.bpassword = self.bpassword_field.get()
self.jt_uploadfolder = self.jt_uploadfolder_field.get()
self.hpupload_folder = self.hpupload_field.get()
self.prnuupload_folder = self.prnuupload_field.get()
self.s3_profile = self.profile_field.get()
self.s3_endpoint = self.endpoint_field.get()
self.s3_region = self.region_field.get()
self.eemail = self.get_recipient()
self.full_org = self.hporganization.get() + " (" + orgs[self.hporganization.get()] + ")"
if not all([self.username, self.organization, self.apiurl, self.busername, self.bpassword,
self.hporganization.get()]):
tkMessageBox.showerror("Missing Fields", "One or more fields are missing required information.")
return
if self.username not in self.valid_usernames:
tkMessageBox.showerror("Invalid Username", "Username not in list of valid usernames.")
self.apitoken = self.get_token()
if self.apitoken:
self.create_json()
tkMessageBox.showinfo("Success!", "Configuration file for {0} has been successfully created!".format(
self.username))
if os.path.isfile(key):
os.remove(key)
self.parent.destroy()
def get_recipient(self):
if not os.path.isfile(key):
return None
try:
gpg_result = subprocess.Popen(["gpg", "--with-colons", key], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
except Exception:
tkMessageBox.showerror("Error", "There has been an error retrieving the encryption key.")
return
for line in gpg_result[0].split("\n"):
if line.startswith("uid"):
email = line.split("<")[1].split(">")[0]
return email
return None
def get_token(self):
try:
url = self.apiurl[:-1] if self.apiurl.endswith('/') else self.apiurl
headers = {'Content-Type': 'application/json'}
url = url + '/login/'
data = '{"username": "' + self.busername + '","password":"' + self.bpassword + '"}'
response = requests.post(url, data=data, headers=headers)
if response.status_code != requests.codes.ok:
tkMessageBox.showerror("Invalid API Token", "Error calling external service {0} : {1}".format(
url, str(response.content)))
return None
else:
r = json.loads(response.content)
return r['key']
except Exception as e:
return "Error calling external service: {0} : {1}".format(url, str(e.message))
def create_json(self):
data = {"username": self.username, "apitoken": self.apitoken, "organization": self.organization,
"s3info": self.jt_uploadfolder, "apiurl": self.apiurl, "archive_recipient": self.eemail, "aws-hp":
self.hpupload_folder, "aws-prnu": self.prnuupload_folder, "autosave": "600", "fullorgname":
self.full_org, "hp-organization": orgs[self.hporganization.get()], "git.branch": branch,
"s3-endpoint": self.s3_endpoint, "s3-profile": self.s3_profile, "s3-region": self.s3_region}
self.loader.saveall(data.items())
def get_maskgen(self):
maskgen_dir = tkFileDialog.askdirectory()
if maskgen_dir:
namefile = os.path.join(maskgen_dir, "resources", "ManipulatorCodeNames.txt")
if not os.path.isfile(namefile):
tkMessageBox.showerror("Usernames Not Found", "Could not find username text file at {0}.".format(
namefile))
return
with open(namefile) as f:
self.valid_usernames = sorted(f.read().splitlines())
self.username_field['values'] = self.valid_usernames
def update_user_name():
import json
from maskgen.software_loader import getFileName
property_file = getFileName('project_properties.json')
if property_file is None:
return
with open(property_file, 'r') as f:
props = json.load(f)
for prop in props['properties']:
if prop['name'] == 'username':
prop['type'] = 'listfromfile:ManipulatorCodeNames.txt'
with open(property_file, 'w') as f:
json.dump(props, f, indent=2, encoding='utf-8')
def setup():
errors = []
if os.path.isfile(key):
try:
key_installed = subprocess.Popen(["gpg", "--list-keys", key], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
key_installed.communicate()
if key_installed.returncode == 2:
subprocess.Popen(["gpg", "--import", key])
else:
os.remove(key)
except WindowsError as e:
errors.append("Error encountered while installing encryption key: " + str(e))
# Set autosave to 600s by default
maskgen.maskgen_loader.imageLoaded = False
settings = maskgen.maskgen_loader.MaskGenLoader()
autosave = settings.get_key("autosave")
if autosave is None:
settings.save("autosave", "600")
if sys.platform.startswith("win"):
# Will only run if .maskgen2 doesn't exist, so delete the old commands
if os.path.isfile(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd")):
os.remove(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd"))
with open(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd"), "a+") as startjt:
startjt.writelines(["title Journaling Tool\n", "cd {0}\n".format(os.path.expanduser("~")), "cls\n",
"jtui"])
if os.path.isfile(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd")):
os.remove(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd"))
with open(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd"), "a+") as starthp:
starthp.writelines(["title HP Tool\n", "cd {0}\n".format(os.path.expanduser("~")), "cls\n", "hpgui"])
update_user_name()
return errors
def combine_settings():
maskgen.maskgen_loader.imageLoaded = False
hp_loader = maskgen.maskgen_loader.MaskGenLoader(hp_settings)
hp_keys = {}
for hp_key in hp_loader.__iter__():
hp_keys[hp_key] = hp_loader.get_key(hp_key)
conversions = {"aws": "aws-hp", "aws-prnu": "aws-prnu", "archive_recipient":
"archive_recipient", "inputdir": "inputdir", "outputdir": "outputdir", "organization":
"hp-organization", "seq": "seq"}
maskgen.maskgen_loader.imageLoaded = False
jt_loader = maskgen.maskgen_loader.MaskGenLoader()
jt_keys = {}
for jt_key in jt_loader.__iter__():
jt_keys[jt_key] = jt_loader.get_key(jt_key)
for k, v in hp_keys.items():
if k in conversions.keys():
jt_keys[conversions[k]] = hp_keys[k]
if k == "metadata":
for mk, mv in v.items():
jt_keys[mk] = mv
jt_loader.saveall(jt_keys.items())
os.remove(hp_settings)
def main():
root = Tk()
if os.path.isfile(hp_settings):
combine_settings()
if os.path.isfile(os.path.join(os.path.expanduser("~"), ".maskgen2")):
# Get a maskgen loader to check if fields are defined
maskgen.maskgen_loader.imageLoaded = False
loader = maskgen.maskgen_loader.MaskGenLoader()
if "apitoken" in loader:
exit(0)
if "git.branch" in loader:
global branch
branch = loader.get_key("git.branch")
maskgen.maskgen_loader.imageLoaded = False
errs = setup()
Window(root, errs)
root.wm_resizable(width=FALSE, height=FALSE)
root.mainloop()
if __name__ == "__main__":
branch = "master"
main()
```
#### File: scripts/python/WindowsInstallScript.py
```python
import os
import subprocess
from datetime import datetime
import json
from sys import argv
import shutil
import tempfile
commands = {"remove": {"conda": ["pillow"]},
"install": {
"conda": ["-c conda-forge tifffile",
"scikit-image"],
"pip": ["Image",
os.path.join(tempfile.gettempdir(), "pygraphviz-1.3.1-cp27-none-win_amd64.whl"),
os.path.join(tempfile.gettempdir(), "Shapely-1.6.4.post1-cp27-cp27m-win_amd64.whl"),
"GitPython",
"opencv-contrib-python==3.4.5.20"]
}
}
def write_log(msg):
with open(os.path.expanduser("~\\install_log.log"), "a+") as logf:
logf.write(msg + "\n")
def check_installations():
p = subprocess.Popen(['python', '-m', 'pip', 'install', '--upgrade', 'pip'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
d = p.communicate()
if p.returncode != 0:
m = "Failed to upgrade pip:\n" + d[1]
else:
m = "Successfully upgraded pip"
write_log(m)
conda_process = subprocess.Popen(['conda', 'list', '--json'], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
conda_data = conda_process.communicate()
pip_process = subprocess.Popen(['pip', 'list', '--format=freeze'], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pip_data = pip_process.communicate()
if conda_process.returncode != 0:
write_log("Failed to verify conda version:\n" + conda_data[1])
if pip_process.returncode != 0:
write_log("Failed to verify pip version:\n" + pip_data[1])
if conda_process.returncode != 0 or pip_process.returncode != 0:
return False
conda_list = json.loads(conda_data[0])
needed_packages = {"conda": [x.split(" ")[-1].lower() for x in commands["install"]["conda"]],
"pip": [y.lower() for y in commands["install"]["pip"]]}
packages_found = {"conda": {}, "pip": {}}
for pack in conda_list:
if pack["name"].lower() in needed_packages["conda"]:
packages_found["conda"][pack["name"].lower()] = pack
for pack in pip_data[0].splitlines():
name, version = pack.split("==")
if name.lower() in needed_packages["pip"]:
packages_found["pip"][name.lower()] = version
# Verify all packages needed have been found
if sorted(needed_packages["conda"]) != sorted(packages_found["conda"].keys()) or sorted(needed_packages["pip"]) != \
sorted(packages_found["pip"].keys()):
return False
return True
def install_package(module, install_type, package):
if force:
if module == "conda":
cmd = ["python", "-m", module, install_type, "--force"] + package.split(" ")
else:
cmd = ["python", "-m", module, install_type, "--force-reinstall"] + package.split(" ")
else:
cmd = ["python", "-m", module, install_type] + package.split(" ")
if os.sep in package:
pname = os.path.split(package)[1].split("-")[0]
if not os.path.isfile(package):
return "Failed to locate {0} at {1}.".format(pname, package)
else:
pname = cmd[-1]
cmd = ["echo", "y", "|"] + cmd
print ("Attempting to {0:.<50s}".format(install_type + " " + pname)), # Stay under 80 characters
pro = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = pro.communicate()
if pro.returncode == 0:
log_data = "Successfully {0} {1}".format(install_type + ("ed" if install_type != "remove" else "d"), pname)
print("success")
elif out[1].startswith("\r\nPackagesNotFoundError: The following packages are missing from the target environment"):
log_data = "Skipped removing {0} - Package does not exist.".format(pname)
print("skipped")
else:
log_data = "Failed to {0} {1}\n{2}".format(install_type, pname, out[1])
print("failed")
return log_data
def main():
write_log("<---{0:^50s}--->".format("Maskgen Dependency Installation Process"))
start_time = datetime.strftime(datetime.now(), "%b %d, %Y %I:%M:%S%p")
if not force:
already_exists = check_installations()
if already_exists:
print("Packages already installed.")
write_log("Package installation process skipped, all packages to be installed have been found.")
return
successful = []
skipped = []
failed = []
for inst_type in sorted(commands.keys(), reverse=True): # uninstall, then install
for module in sorted(commands[inst_type].keys()): # conda, then pip
for package in commands[inst_type][module]: # package (+ channel if needed)
log_data = install_package(module, inst_type, package)
if log_data.startswith("Success"):
successful.append(package)
elif log_data.startswith("Skip"):
skipped.append(package)
else:
failed.append(package)
write_log(log_data)
write_log("+" * 64)
write_log("+ {0:<60s} +".format("Maskgen Package Installation Info"))
write_log("+ {0:<60s} +".format("Skipped: " + ", ".join(skipped)))
write_log("+ {0:<60s} +".format("Failed: " + ", ".join(failed)))
write_log("+ {0:<60s} +".format("Start Time: " + start_time))
write_log("+ {0:<60s} +".format("End Time: " + datetime.strftime(datetime.now(), "%b %d, %Y %I:%M:%S%p")))
write_log("+" * 64 + "\n" * 5)
if __name__ == '__main__':
try:
force = True if argv[1] in ["-f", "--force"] else False
except IndexError:
force = False
print("Running Python package installation commands. This may take several minutes.")
main()
```
#### File: standalone/copy_metadata/create_qt_from_images.py
```python
from maskgen.plugins import loadPlugins,callPlugin
from maskgen.image_wrap import openImageFile
from maskgen.jpeg.utils import get_subsampling, parse_tables, sort_tables, check_rotate
from maskgen.exif import getexif
import os
def dumpTable(filename, tableData):
with open(filename,'w') as tableDataFp:
count = 0
for table in tableData:
for item in table:
tableDataFp.write('{}'.format(item))
count += 1
if count % 8 == 0:
tableDataFp.write('\n')
else:
tableDataFp.write('\t')
avoidkeys = set(['-System:FileModifyDate',
'-Composite:SubSecCreateDate',
'-System:FileInodeChangeDate',
'-System:Directory',
'-System:FilePermissions',
'-System:FileAccessDate',
'-Composite:ImageSize',
'-System:FileName',
'-ExifIFD:CreateDate',
'-Sony:SonyDateTime']
)
computekeys = ['Date','Time','Width','Height']
def writeExif(filename,exifdata):
with open(filename,'w') as fp:
for k, v in exifdata.iteritems():
if k not in avoidkeys:
iscompute=len([computekey for computekey in computekeys if k.find(computekey) >= 0 ]) > 0
linetype = 'compute' if iscompute else 'database'
fp.write('{},{},{}\n'.format(linetype,k,v))
def main():
files_to_qt = {
"iPhone6s.jpg":"iPhone6s-[{}x{}]-{}.txt",
"Galaxy_S4.jpg": "Samsung-Galaxy-S4-[{}x{}]-{}.txt",
"NEX-5TL.jpg": "Sony-NEX-5TL-[{}x{}]-{}.txt",
"droid_maxx.jpg": "Motorola-Droid-Maxx-[{}x{}]-{}.txt",
"canon_eos_sl1.jpg": "Canon-EOS-SL1-[{}x{}]-{}.txt",
"Kodak_M1063_0_9367.JPG": "Kodak-EasyShare-M1063-[{}x{}]-{}.txt",
"Samsung_L74wide_1_44105.JPG": "Samsung-Digimax-L74_Wide-[{}x{}]-{}.txt",
"Praktica_DCZ5.9_3_35003.JPG": "Praktica-DCZ-59-[{}x{}]-{}.txt",
"Olympus_mju_1050SW_0_23680.JPG": "Olympus-Stylus-1050SW-[{}x{}]-{}.txt",
"Panasonic_DMC-FZ50_0_26019.JPG": "Panasonic-DMC-FZ50-[{}x{}]-{}.txt"
}
imagedir = ''
savedir = 'maskgen/plugins/JpgFromCamera/QuantizationTables'
for file_name_prefix in files_to_qt:
filename = os.path.join(imagedir,file_name_prefix)
thumbTable = None
prevTable = None
finalTable = None
tables_zigzag = parse_tables(filename)
tables_sorted = sort_tables(tables_zigzag)
if len(tables_sorted) == 6:
thumbTable = tables_sorted[0:2]
prevTable = tables_sorted[2:4]
finalTable = tables_sorted[4:6]
elif len(tables_sorted) > 2 and len(tables_sorted) < 6:
thumbTable = tables_sorted[0:2]
finalTable = tables_sorted[-2:]
else:
finalTable = tables_sorted
im = openImageFile(filename)
outfilenametemplate = files_to_qt[file_name_prefix]
dims = im.size
if thumbTable is not None:
dumpTable(os.path.join(savedir,outfilenametemplate.format(dims[0],dims[1],'thumbnail')),
thumbTable)
if prevTable is not None:
dumpTable(os.path.join(savedir, outfilenametemplate.format(dims[0], dims[1], 'preview')),
prevTable)
if finalTable is not None:
dumpTable(os.path.join(savedir, outfilenametemplate.format(dims[0], dims[1], 'QT')),
finalTable)
writeExif(os.path.join(savedir, outfilenametemplate.format(dims[0], dims[1], 'metadata')),
getexif(filename,args=['-args', '-G1','-n'],separator='='))
if __name__ == '__main__':
main()
```
#### File: tests/algorithms/test_retinex.py
```python
import unittest
from maskgen.algorithms.retinex import *
from tests.test_support import TestSupport
import os
from maskgen.image_wrap import ImageWrapper, openImageFile
import numpy as np
#import colorcorrect.algorithm
from maskgen.image_wrap import ImageWrapper
class TestRetinex(TestSupport):
def test_retinex(self):
img = openImageFile(self.locateFile('tests/images/test_project4.jpg'))
for f in [MultiScaleResinex([15,80,125],
G=30,
b=-6,
alpha=125.0,
beta=1.0,
colorBalance=(0.01,0.99)),
MultiScaleResinexLab([15,80,125],
G=30,
b=-6,
alpha=125.0,
beta=1.0,
colorBalance=(0.01, 0.99)),
MultiScaleResinexChromaPerservation([15,80,125],
G=30,
b=-6,
alpha=125.0,
beta=1.0,
colorBalance=(0.01, 0.99))
]:
res = f(img.to_array())
self.assertTrue(np.mean(res) > np.mean(img.to_array()))
#ImageWrapper(res).save('cr_{}_ret.png'.format(f.__class__.__name__))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/algorithms/test_seam_carving.py
```python
import unittest
from maskgen.algorithms.seam_carving import SeamCarver, HogEnergyFunc, SobelFunc,saveEnergy, ScharrEnergyFunc,\
createHorizontalSeamMask,createVerticalSeamMask, foward_base_energy_function
from tests.test_support import TestSupport
import os
from maskgen.image_wrap import ImageWrapper, openImageFile,deleteImage
import numpy as np
import random
class TestToolSet(TestSupport):
def xtest_Sobel(self):
filename = self.locateFile('tests/algorithms/arch_sunset.resized.jpg')
map = SobelFunc()(np.asarray(openImageFile(filename)))
saveEnergy(map,os.path.join(os.path.dirname(filename), 'arch_e.png'))
def xtest_Scharr(self):
filename = self.locateFile('tests/algorithms/arch_sunset.resized.jpg')
map = ScharrEnergyFunc()(np.asarray(openImageFile(filename)))
saveEnergy(map,os.path.join(os.path.dirname(filename), 'arch_e.png'))
def xtest_Hog(self):
filename = self.locateFile('tests/algorithms/arch_sunset.resized.jpg')
map = HogEnergyFunc()(np.asarray(openImageFile(filename)))
saveEnergy(map,os.path.join(os.path.dirname(filename), 'arch_e.png'))
def test_mask_withsame_size(self):
filename = self.locateFile('tests/algorithms/cat.png')
img = openImageFile(filename)
somemask = np.random.randint(0,255,(img.to_array().shape))
sc = SeamCarver(filename,
shape=somemask.shape,
mask_filename=self.locateFile('tests/algorithms/cat_mask.png'))
image, mask = sc.remove_seams()
somemask = sc.mask_tracker.move_pixels(somemask.astype('uint8'))
self.assertTrue(image.shape == somemask.shape)
self.assertTrue((image.shape[0],image.shape[1]) == sc.mask_tracker.neighbors_mask.shape)
#ImageWrapper(image).save(os.path.join(os.path.dirname(filename), 'cat_f.png'))
#ImageWrapper(mask).save(os.path.join(os.path.dirname(filename), 'cat_m.png'))
#ImageWrapper(somemask).save(os.path.join(os.path.dirname(filename), 'twins_sm.png'))
#sc.mask_tracker.save_neighbors_mask(os.path.join(os.path.dirname(filename), 'twins_rm.png'))
def test_shrink(self):
filename = self.locateFile('tests/algorithms/twins.jpg')
img = openImageFile(filename)
#img = openImageFile(filename, False, None)
somemask = img.to_array()
somemaskcopy = somemask
sc = SeamCarver(filename, shape=(
350, 450),energy_function=SobelFunc()) # mask_filename=self.locateFile('tests/algorithms/cat_mask.png'))
image, mask = sc.remove_seams()
#ImageWrapper(image).save(os.path.join(os.path.dirname(filename), 'twins_f.png'))
#ImageWrapper(mask).save(os.path.join(os.path.dirname(filename), 'twins_m.png'))
radj, cadj = sc.mask_tracker.save_adjusters('adjusters.png')
sc.mask_tracker.read_adjusters( radj, cadj )
sc.mask_tracker.save_neighbors_mask('twins_m.png')
os.remove(radj)
os.remove(cadj)
os.remove('twins_m.png')
somemask = sc.mask_tracker.move_pixels(somemask)
#ImageWrapper(somemask).save(os.path.join(os.path.dirname(filename), 'twins_sm.png'))
self.assertTrue(image.shape == somemask.shape)
self.assertTrue(np.all(image == somemask))
self.assertTrue((image.shape[0], image.shape[1]) == sc.mask_tracker.neighbors_mask.shape)
originalmask = sc.mask_tracker.invert_move_pixels(somemask)
self.assertTrue(somemaskcopy.shape == originalmask.shape)
#ImageWrapper(somemaskcopy).save(os.path.join(os.path.dirname(filename), 'twins_om.png'))
#ImageWrapper(originalmask).save(os.path.join(os.path.dirname(filename), 'twins_om2.png'))
self.assertTrue(np.all(somemaskcopy[mask==0] == originalmask[mask==0]))
def test_shrink_forward_energy(self):
filename = self.locateFile('tests/algorithms/twins.jpg')
img = openImageFile(filename)
somemask = img.to_array()
somemaskcopy = somemask
sc = SeamCarver(filename, shape=(
350, 450),energy_function=SobelFunc(),
seam_function=foward_base_energy_function)
image, mask = sc.remove_seams()
#ImageWrapper(image).save(os.path.join(os.path.dirname(filename), 'twins_f.png'))
#ImageWrapper(mask).save(os.path.join(os.path.dirname(filename), 'twins_m.png'))
radj, cadj = sc.mask_tracker.save_adjusters('adjusters.png')
deleteImage(radj)
deleteImage(cadj)
foo = np.copy(sc.mask_tracker.dropped_adjuster)
sc.mask_tracker.read_adjusters( radj, cadj )
self.assertTrue(np.all(foo == sc.mask_tracker.dropped_adjuster))
sc.mask_tracker.save_neighbors_mask('twins_m.png')
#os.remove(radj)
#os.remove(cadj)
#os.remove('twins_m.png')
somemask = sc.mask_tracker.move_pixels(somemask)
#ImageWrapper(somemask).save(os.path.join(os.path.dirname(filename), 'twins_sm.png'))
self.assertTrue(image.shape == somemask.shape)
self.assertTrue(np.all(image == somemask))
self.assertTrue((image.shape[0], image.shape[1]) == sc.mask_tracker.neighbors_mask.shape)
originalmask = sc.mask_tracker.invert_move_pixels(somemask)
self.assertTrue(somemaskcopy.shape == originalmask.shape)
#ImageWrapper(somemaskcopy).save(os.path.join(os.path.dirname(filename), 'twins_om.png'))
#ImageWrapper(originalmask).save(os.path.join(os.path.dirname(filename), 'twins_om2.png'))
self.assertTrue(np.all(somemaskcopy[mask==0] == originalmask[mask==0]))
def test_shrink_forward_energy_arch(self):
#filename = self.locateFile('tests/algorithms/arch_sunset.jpg')
#newshape = (470, 250)
filename = self.locateFile('tests/algorithms/pexels-photo-746683.jpg')
newshape = (1450, 1950)
img = openImageFile(filename)
imgcopy = img.to_array()
sc = SeamCarver(filename, shape=newshape,energy_function=SobelFunc(),
seam_function=foward_base_energy_function,keep_size=True)
image, mask = sc.remove_seams()
#ImageWrapper(image).save(os.path.join(os.path.dirname(filename), 'as_f.png'))
#ImageWrapper(mask).save(os.path.join(os.path.dirname(filename), 'as_m.png'))
#radj, cadj = sc.mask_tracker.save_adjusters('adjusters.png')
#sc.mask_tracker.read_adjusters( radj, cadj )
sc.mask_tracker.save_neighbors_mask('as_m.png')
imgcopymoved = sc.mask_tracker.move_pixels(imgcopy)
#ImageWrapper(somemask).save(os.path.join(os.path.dirname(filename), 'as_sm.png'))
self.assertTrue(image.shape == imgcopymoved.shape)
self.assertTrue(np.all(image == imgcopymoved))
self.assertTrue((image.shape[0], image.shape[1]) == sc.mask_tracker.neighbors_mask.shape)
originalmask = sc.mask_tracker.invert_move_pixels(imgcopymoved)
self.assertTrue(imgcopy.shape == originalmask.shape)
#ImageWrapper(imgcopymoved).save(os.path.join(os.path.dirname(filename), 'as_om.png'))
#ImageWrapper(originalmask).save(os.path.join(os.path.dirname(filename), 'as_om2.png'))
self.assertTrue(np.all(imgcopy[mask==0] == originalmask[mask==0]))
def extendRemoveSet(self, removeset,dim):
newset = []
for x in removeset:
while True:
newx = min(max(0, x + random.randint(-1,1)), dim-1)
if newx not in newset:
newset.append(newx)
break
self.assertEqual(len(removeset),len(newset))
return sorted(newset)
def createHorizontal(self, basis, dimx, dimy):
import random
m = np.zeros((dimx, dimy))
for y in range(dimy):
unuseditems = [x for x in range(255) if x not in basis[:, y].tolist()]
for x in range(dimx):
m[x, y] = random.choice(unuseditems)
return m
def createVertical(self, basis, dimx, dimy):
import random
m = np.zeros((dimx, dimy))
for x in range(dimx):
unuseditems = [y for y in range(255) if y not in basis[x, :].tolist()]
for y in range(dimy):
m[x, y] = random.choice(unuseditems)
return m
# need to fix the two tests: horizontal and vertical
# the random generator sometimes generates matrices that have the same
# value unintentionally, causing the test to fail
# The solution is to not fail the test in this case.
# it is a legitimate case, so the final assertion must change.
def test_createHorizontalSeamMask(self):
dim = 25
#random.seed = 10
old = [] #np.random.randint(180, 255, size=(dim, dim+1))
f = open(self.locateFile("tests/algorithms/inputImages.txt"), "r")
for line in f:
for num in line.split(" "):
old.append(int(num))
f.close()
old = np.array(old)
old.resize((dim, dim+1))
#old = np.random.randint(255, size=(dim, dim+1)) # can change this to make it so that this is static values
new = []
n = open(self.locateFile("tests/algorithms/newHorizontal.txt"), "r")
for line in n:
for num in line.split(" "):
num = num.strip('.')
num = num.strip('\n')
num = num.strip('.\n')
new.append(int(num))
n.close()
new = np.array(new)
new.resize((dim-3, dim+1))
#new = self.createHorizontal(old, dim-3, dim+1) # creates a new image from the old one with 3 rows cut out
mask = np.zeros((dim, dim+1)).astype('uint8')
random.seed(10)
removeset = sorted([x for x in random.sample(range(0, dim), 3)])
for y in range(dim+1):
for x in range(dim):
mask[x, y] = (x in removeset)
removeset = self.extendRemoveSet(removeset,dim)
newx = [0 for y in range(dim+1)]
for y in range(dim+1):
for x in range(dim):
if mask[x, y] == 0:
new[newx[y], y] = old[x, y]
newx[y] = newx[y]+1
print old
print new
newmask_tracker = createHorizontalSeamMask(old,new)
newmask = newmask_tracker.dropped_mask*255
print mask * 255
print newmask
if not np.all(newmask==mask*255):
self.assertTrue(sum(sum(newmask != mask * 255)) < 4)
#new_rebuilt = carveMask(old, 255-(mask * 255), new.shape)
#self.assertTrue(np.all(new==new_rebuilt))
def test_createVerticalSeamMask(self):
dim = 25
old = []
f = open(self.locateFile("tests/algorithms/inputImages.txt"), "r")
for line in f:
for num in line.split(" "):
old.append(int(num))
f.close()
old = np.array(old)
old.resize((dim, dim), refcheck=False)
#old = np.random.randint(255, size=(dim, dim))
new = []
n = open(self.locateFile("tests/algorithms/newImage.txt"), "r")
for line in n:
for num in line.split(" "):
num = num.strip('.')
num = num.strip('\n')
num = num.strip('.\n')
new.append(int(num))
n.close()
new = np.array(new)
new.resize((dim, dim-3))
# new = self.createVertical(old, dim, dim-3)
mask = np.zeros((dim, dim)).astype('uint8')
random.seed(10)
removeset = sorted([x for x in random.sample(range(0, dim-1), 3)])
for x in range(dim):
for y in range(dim):
mask[x, y] = (y in removeset)
removeset = self.extendRemoveSet(removeset, dim-1)
newy = [0 for y in range(dim)]
for y in range(dim):
for x in range (dim):
if mask[x,y] == 0:
new[x, newy[x]] = old[x, y]
newy[x] = newy[x]+1
print old
print new
newmask_tracker = createVerticalSeamMask(old,new)
newmask = newmask_tracker.dropped_mask*255
print mask * 255
print newmask
if not np.all(newmask == mask*255):
self.assertTrue(sum(sum(newmask != mask * 255)) < 4)
somemask = newmask_tracker.move_pixels(old)
self.assertTrue(old.dtype == somemask.dtype)
self.assertTrue(np.all(new == somemask))
# ImageWrapper(somemask).save(os.path.join(os.path.dirname(filename), 'twins_sm.png'))
originalmask = newmask_tracker.invert_move_pixels(somemask)
self.assertTrue(old.shape == originalmask.shape)
# ImageWrapper(somemaskcopy).save(os.path.join(os.path.dirname(filename), 'twins_om.png'))
# ImageWrapper(originalmask).save(os.path.join(os.path.dirname(filename), 'twins_om2.png'))
self.assertTrue(np.all(old[mask == 0] == originalmask[mask == 0]))
if __name__ == '__main__':
unittest.main()
```
#### File: plugins/DummyPlugin/__init__.py
```python
import maskgen
def transform(img,source,target,**kwargs):
return None,None
# the actual link name to be used.
# the category to be shown
def operation():
return {'name':'Blur',
'category':'Filter',
'description':'Gaussian Blur',
'software':'maskgen',
'version':maskgen.__version__,
'transitions': [
'image.image'
]
}
def suffix():
return None
```
#### File: tests/batch/test_batch_process.py
```python
import tempfile
import unittest
from threading import Lock
from maskgen import plugins
from maskgen.batch import batch_project
from maskgen.batch.batch_process import processSpecification
from maskgen.batch.permutations import *
from maskgen.support import getValue
from maskgen.tool_set import openImageFile
from networkx.readwrite import json_graph
from tests.test_support import TestSupport
def saveAsPng(source, target):
openImageFile(source, args={'Bits per Channel': 16}).save(target, format='PNG')
class TestBatchProcess(TestSupport):
def setUp(self):
plugins.loadPlugins(customFolders= [self.locateFile('tests/batch/plugins')])
def createExecutor(self, prefix, skipValidation=False, loglevel=50, setup=False, global_variables={}):
d = tempfile.mkdtemp(prefix=prefix, dir='.')
os.mkdir(os.path.join(d, 'test_projects'))
if setup:
self.general_setup(d)
be = batch_project.BatchExecutor(os.path.join(d, 'test_projects'),
workdir=d,
loglevel=loglevel,
skipValidation=skipValidation,
global_variables=global_variables)
self.addFileToRemove(d)
return be
def createExecutor(self, prefix,skipValidation=False, loglevel=50, setup=False,global_variables={}):
d = tempfile.mkdtemp(prefix=prefix, dir='.')
os.mkdir(os.path.join(d,'test_projects'))
if setup:
self.general_setup(d)
be = batch_project.BatchExecutor(os.path.join(d,'test_projects'),
workdir=d,
loglevel=loglevel,
skipValidation=skipValidation,
global_variables=global_variables)
self.addFileToRemove(d)
return be
def test_int_picker(self):
manager = PermuteGroupManager()
global_state = {'iteratorslock': Lock(),
'permutegroupsmanager': manager}
local_state = {}
spec = {"type": "int[5:11:2]", 'permutegroup': 'yes'}
manager.next()
self.assertEqual(5, batch_project.executeParamSpec('test_int_spec', spec,
global_state, local_state, 'test_node', []))
manager.next()
self.assertEqual(7, batch_project.executeParamSpec('test_int_spec', spec,
global_state, local_state, 'test_node', []))
manager.next()
self.assertEqual(9, batch_project.executeParamSpec('test_int_spec', spec,
global_state, local_state, 'test_node', []))
manager.next()
self.assertEqual(11, batch_project.executeParamSpec('test_int_spec', spec,
global_state, local_state, 'test_node', []))
def test_float_picker(self):
manager = PermuteGroupManager()
global_state = {'iteratorslock': Lock(),
'image_dir': self.locateFile('test/images'),
'permutegroupsmanager': manager}
local_state = {}
spec = {"type": "float[5.1:7:0.5]", 'permutegroup': 'yes'}
manager.next()
self.assertEqual(5.1, batch_project.executeParamSpec('test_float_spec', spec,
global_state, local_state, 'test_node', []))
manager.next()
self.assertEqual(5.6, batch_project.executeParamSpec('test_float_spec', spec,
global_state, local_state, 'test_node', []))
manager.next()
self.assertEqual(6.1, batch_project.executeParamSpec('test_float_spec', spec,
global_state, local_state, 'test_node', []))
manager.next()
self.assertEqual(6.6, batch_project.executeParamSpec('test_float_spec', spec,
global_state, local_state, 'test_node', []))
def test_value_shortcut(self):
self.assertEqual('foo', batch_project.executeParamSpec('test_value_spec', 'foo',
{}, {}, 'test_node', []))
self.assertEqual(1, batch_project.executeParamSpec('test_value_spec',
{'type': 'value', 'value': '{foo}', 'function': 'numpy.int'},
{'foo': 1}, {}, 'test_node', []))
self.assertEqual(1, batch_project.executeParamSpec('test_value_spec',
{'type': 'value', 'value': '{foo@nodex}',
'function': 'numpy.int'},
{'foo': 2}, {'nodex': {'foo': 1}}, 'test_node', []))
self.assertEqual('2,331.23', batch_project.executeParamSpec('test_value_spec',
{'type': 'value', 'value': '{foo@nodex:,}'},
{'foo': 2}, {'nodex': {'foo': 2331.23}},
'test_node', []))
def test_list_picker(self):
manager = PermuteGroupManager()
global_state = {'iteratorslock': Lock(),
'image_dir': self.locateFile('test/images'),
'permutegroupsmanager': manager}
local_state = {}
spec = {"type": "list", 'values': ['1', '2', '3']}
self.assertTrue(batch_project.executeParamSpec('test_list_spec', spec,
global_state, local_state, 'test_node', []) in ['1', '2', '3'])
self.assertTrue(batch_project.executeParamSpec('test_list_spec', spec,
global_state, local_state, 'test_node', []) in ['1', '2', '3'])
self.assertTrue(batch_project.executeParamSpec('test_list_spec', spec,
global_state, local_state, 'test_node', []) in ['1', '2', '3'])
self.assertTrue(batch_project.executeParamSpec('test_list_spec', spec,
global_state, local_state, 'test_node', []) in ['1', '2', '3'])
def general_setup(self, dir):
f = os.path.join(dir, 'imageset.txt')
self.addFileToRemove(f, preemptive=True)
with open(f, 'w') as fp:
fp.writelines([filename + os.linesep for filename in os.listdir(self.locateFile('tests/images')) if
not filename.startswith('test_project')])
f = os.path.join(dir, 'donorset.txt')
self.addFileToRemove(f, preemptive=True)
with open(f, 'w') as fp:
fp.writelines([filename + os.linesep for filename in os.listdir(self.locateFile('tests/images')) if
not filename.startswith('test_project')])
batch_project.loadCustomFunctions()
def test_validation(self):
be = self.createExecutor('validation',
setup=True,loglevel=10,
global_variables={'image_dir': self.locateFile('tests/images')})
batchProject = batch_project.loadJSONGraph(
self.locateFile('tests/specifications/batch_validation_process.json'))
dir,name= be.runProjectLocally(batchProject)
be.finish(remove_logs=True)
self.assertTrue(dir is None)
def test_extend(self):
batch_project.loadCustomFunctions()
import shutil
self.addFileToRemove('testimages_extend', preemptive=True)
shutil.copytree(os.path.dirname(self.locateFile('./images/sample.json')), 'testimages_extend')
self.assertTrue(processSpecification(self.locateFile('tests/specifications/batch_extension_process.json'), '',
'testimages_extend', skipValidation=True) == 1)
def test_run(self):
batch_project.loadCustomFunctions()
batchProject = batch_project.loadJSONGraph(
self.locateFile('tests/specifications/batch_process.json'))
be = self.createExecutor('main_batch_run', skipValidation=True, loglevel=10, setup=True,
global_variables={'image_dir': self.locateFile('tests/images'),
'donorImages': self.locateFile('tests/images')})
be.runProjectLocally(batchProject)
be.runProjectLocally(batchProject)
global_state = be.initialState
try:
# self.assertFalse(global_state['permutegroupsmanager'].hasNext())
global_state['permutegroupsmanager'].next()
self.assertTrue(global_state['permutegroupsmanager'].hasNext())
global_state['permutegroupsmanager'].next()
self.fail('Should have seen an end of resource exception')
except EndOfResource:
pass
be.finish(remove_logs=True)
def test_external_image_selection(self):
d = tempfile.mkdtemp(prefix='external_image', dir='.')
self.general_setup(d)
os.mkdir(os.path.join(d, 'test_projects'))
os.mkdir(os.path.join(d, 'images'))
hdf5dir = os.path.join(d, 'hdf5')
def mysetup():
os.mkdir(hdf5dir)
with open(os.path.join(hdf5dir, 'test_project1.hdf5'), 'w') as fp:
fp.write('foo')
mysetup()
self.addFileToRemove(d, preemptive=False)
be = batch_project.BatchExecutor(os.path.join(d, 'test_projects'),
workdir=d,
global_variables={'image_dir': os.path.join(d, 'images'),
'hdf5dir': hdf5dir,
'results': os.path.join(d, 'images')})
batch_project.loadCustomFunctions()
batchProject = batch_project.loadJSONGraph(
self.locateFile('tests/specifications/external_image_batch_process.json'))
saveAsPng(self.locateFile('tests/images/test_project1.jpg'),
os.path.join(d, 'images', 'test_project1.png'.format(d)))
with open(os.path.join(d, 'images', 'arguments.csv'), 'w') as fp:
fp.write('test_project1.png,no,16')
dir, name = be.runProjectLocally(batchProject)
be.finish(remove_logs=True)
self.assertTrue(dir is not None)
self.assertTrue(os.path.exists(os.path.join(hdf5dir, 'test_project1.hdf5')))
def test_image_selection(self):
batch_project.loadCustomFunctions()
batchProject = batch_project.loadJSONGraph(
self.locateFile('tests/specifications/simple_image_selector_plugin.json'))
be = self.createExecutor('image_selection', skipValidation=True, setup=True, loglevel=10,
global_variables={'image_dir': self.locateFile('tests/images')})
dir, name = be.runProjectLocally(batchProject)
be.finish(remove_logs=True)
self.assertTrue(dir is not None)
def test_runinheritance(self):
batch_project.loadCustomFunctions()
batchProject = batch_project.loadJSONGraph(
self.locateFile('tests/specifications/inheritance_test.json'))
be = self.createExecutor('image_selection', skipValidation=True, loglevel=10, setup=True,
global_variables={
'image_dir': self.locateFile('tests/images'),
})
dir, name = be.runProjectLocally(batchProject)
be.finish(remove_logs=True)
self.assertTrue(dir is not None)
def test_runwithpermutation(self):
batch_project.loadCustomFunctions()
d = tempfile.mkdtemp(prefix='external_image', dir='.')
self.general_setup(d)
os.mkdir(os.path.join(d, 'test_projects'))
batch_project.loadCustomFunctions()
batchProject = batch_project.loadJSONGraph(
self.locateFile('tests/specifications/permutation_batch_process.json'))
global_state = {
'projects': os.path.join(d, 'test_projects'),
'project': batchProject,
'workdir': d,
'picklists_files': {},
'image_dir': self.locateFile('tests/images'),
'count': batch_project.IntObject(20),
'permutegroupsmanager': PermuteGroupManager(d)
}
batchProject.loadPermuteGroups(global_state)
for i in range(10):
batchProject.executeOnce(global_state)
self.assertTrue(global_state['permutegroupsmanager'].hasNext())
def test_remap(self):
network = {
"directed": True,
"graph": {
"username": "test",
},
"nodes": [
{
"id": "A"
},
{
"id": "B"
}
],
"links": [
{
"source": "A",
"target": "B"
}
],
"multigraph": False
}
remapped = batch_project.remap_links(network)
G = json_graph.node_link_graph(remapped, multigraph=False, directed=True)
self.assertTrue(G.edge['A']['B'] is not None)
def test_remap2(self):
network = {
"directed": True,
"graph": {
"username": "test",
},
"nodes": [
{
"id": "A",
"fooA": "barA"
},
{
"id": "B",
"fooB": "barB"
},
{
"id": "C",
"fooC": "barC"
},
{
"id": "D",
"fooD": "barD",
'source': 'A'
},
{
"id": "E",
"fooE": "barE"
}
],
"links": [
{
"source": "A",
"target": "B"
},
{
"source": "A",
"target": "D"
},
{
"source": "B",
"target": "C"
},
{
"source": "C",
"target": "D",
"split": True
},
{
"source": "D",
"target": "E",
"foo": "bar"
}
],
"multigraph": False
}
remapped = batch_project.remap_links(network)
G = json_graph.node_link_graph(remapped, multigraph=False, directed=True)
G = batch_project.separate_paths(G)
for node_id in G.nodes():
preds = [pred for pred in G.predecessors(node_id) if not getValue(G[pred][node_id], 'donor', False)]
self.assertTrue(len(preds) < 2)
self.assertEqual(7, len(G.nodes()))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/masks/test_mask_gen.py
```python
import unittest
from tests.test_support import TestSupport
from maskgen.cv2api import cv2api_delegate
import numpy as np
from subprocess import Popen, PIPE
from maskgen import ffmpeg_api
import os
from maskgen.video_tools import pasteCompare
from maskgen.tool_set import VidTimeManager,convertToVideo
from maskgen.image_wrap import ImageWrapper
from collections import OrderedDict
from skimage.draw import random_shapes
def make_image():
i = np.random.randint(0,255,(200,256,4),dtype='uint8')
i[:,:,3] = 0
shape = random_shapes((200, 256), max_shapes=4,allow_overlap=True,
intensity_range=((100, 255)),num_channels=1)
shape = shape[0].reshape((200,256))
i[:,:,3][shape[:]<255] =255
return i
codecs = OrderedDict([('raw',['-vcodec', 'rawvideo']),
('mp4v', ['-c:v', 'mpeg4', '-crf','0'])])
suffices = {'raw':'avi',
'mp4v':'m4v'}
fourccs = {
'raw':0,
'mp4v':cv2api_delegate.get_fourcc('mp4v')}
def make_video(input_filename, codec):
ffmpegcommand = ffmpeg_api.get_ffmpeg_tool()
command = [ffmpegcommand, '-y', '-i', input_filename]
command.extend(codecs[codec])
video_prefix = input_filename[:input_filename.rfind('.') + 1]
outFileName = video_prefix + suffices[codec]
outFileName = os.path.split(outFileName)[1]
command.append(outFileName)
p = Popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
try:
return outFileName if p.returncode == 0 else None
except OSError as e:
print (e)
return video_prefix
def save_result(result, codec):
import json
killList = []
for seg in result:
seg.pop('mask')
maskvid_filename = convertToVideo(seg['videosegment'])
killList.append(maskvid_filename)
json_filename = codec + '.json'
with open(json_filename, 'w') as fp:
json.dump(result, fp)
killList.append(json_filename)
return killList
class TestMaskGeneration(TestSupport):
filesToKill = []
def setUp(self):
self.mv = self.locateFile('tests/videos/sample1.mov')
self.test_image = make_image()
ImageWrapper(self.test_image).save('test_paste.png')
def paste_in(self,frame,object):
diff_x = frame.shape[0] - object.shape[0]
x = int(diff_x/2.0 - (np.random.randint(-20,20,(1))[0]))
diff_y = frame.shape[1] - object.shape[1]
y = int(diff_y / 2.0 - np.random.randint(-20,20,(1))[0])
part = frame[x:x+object.shape[0],y:y+object.shape[1],:]
part[object[:,:,3]>0] = object[:,:,0:3][object[:,:,3]>0]
frame[x:x + object.shape[0], y:y + object.shape[1]] = part
return frame
def paste(self, video_file, object, codec):
vfi = cv2api_delegate.videoCapture(video_file)
width = int(vfi.get(cv2api_delegate.prop_frame_width))
height = int(vfi.get(cv2api_delegate.prop_frame_height))
fourcc = fourccs[codec]
video_prefix = video_file[:video_file.rfind('.')]
video_file_output = video_prefix + '_paste.' + suffices[codec]
video_file_output = os.path.split(video_file_output)[1]
vfo = cv2api_delegate.videoWriter(video_file_output, fourcc, (vfi.get(cv2api_delegate.prop_fps)), (width, height))
if not vfo.isOpened():
raise ValueError('VideoWriter failed to open.')
try:
while vfi.isOpened() and vfo.isOpened():
r,f = vfi.read()
if not r:
break
i = ImageWrapper(self.paste_in(f,object),mode='BGR')
vfo.write(i.image_array)
finally:
vfi.release()
vfo.release()
self.addFileToRemove(video_file_output)
return video_file_output
def run_test(self, codec):
pre_of = make_video(self.mv, codec)
self.addFileToRemove(pre_of)
post_of = self.paste(pre_of, self.test_image, codec)
self.addFileToRemove(post_of)
result, errors = pasteCompare(pre_of, post_of, post_of, VidTimeManager(), arguments={'add type': 'replace'})
_filesToKill = save_result(result, codec)
for _file in _filesToKill:
self.addFileToRemove(_file)
def test_raw(self):
self.run_test('raw')
def test_mpeg4(self):
self.run_test('mp4v')
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/notifiers_test.py
```python
from test_support import TestSupport
import unittest
from maskgen.scenario_model import ImageProjectModel
from maskgen.notifiers import NotifyDelegate
from maskgen.services.probes import ProbeSetBuilder, ProbeGenerator, EmptyCompositeBuilder
import logging
class TestNotifiers(TestSupport):
def test_memory(self):
model = ImageProjectModel(self.locateFile('images/sample.json'),
notify=NotifyDelegate([]))
ProbeGenerator(scModel=model, processors=[ProbeSetBuilder(scModel=model, compositeBuilders=[EmptyCompositeBuilder])])()
key1 = ('composite', ('orig_input', 'input_mod_1'), ('input_mod_1', 'input_mod_2'))
key2 = ('composite', ('orig_input', 'input_mod_1'), ('input_mod_2', 'input_mod_2_3'))
key3 = ('composite', ('orig_input', 'input_mod_1'), ('input_mod_2_3', 'input_mod_2_47'))
key4 = ('donor', ('hat_splice_crop', 'input_mod_1'), ('hat', 'hat_splice'))
memory = model.get_probe_mask_memory()
self.assertTrue (memory[key1] is not None)
self.assertTrue (memory[key2] is not None)
self.assertTrue (memory[key3] is not None)
self.assertTrue (memory[key4] is not None)
model.select(('input_mod_2','input_mod_2_3'))
model.update_edge(model.getCurrentEdgeModification())
self.assertTrue (memory[key1] is not None)
self.assertTrue (memory[key2] is None)
self.assertTrue (memory[key3] is None)
self.assertTrue (memory[key4] is not None)
model.select(('hat_splice', 'hat_splice_rot_1'))
model.update_edge(model.getCurrentEdgeModification())
self.assertTrue (memory[key1] is not None)
self.assertTrue (memory[key2] is None)
self.assertTrue (memory[key3] is None)
self.assertTrue (memory[key4] is None)
```
#### File: tests/plugins/ExifDateTimeTest.py
```python
import unittest
from plugins import ExifDateTime
from datetime import datetime
class ExifDateTimeTests(unittest.TestCase):
def test_random_date(self):
date = ExifDateTime.random_date("01-01-2010", "12-31-2020")
assert(date is not False)
assert(datetime.strptime("01-01-2010", "%m-%d-%Y").date() <=
datetime.strptime(date, "%Y:%m:%d").date() <=
datetime.strptime("12-31-2020", "%m-%d-%Y").date())
date = ExifDateTime.random_date("12-31-2020", "01-01-2010")
assert(date is False)
date = ExifDateTime.random_date("12-31-2020", "12-31-2020")
assert(date is not False)
assert(datetime.strptime(date, "%Y:%m:%d").date() == datetime.strptime("12-31-2020", "%m-%d-%Y").date())
def test_random_time(self):
time = ExifDateTime.random_time("23:00:00", "03:00:00")
assert(time is not False)
time = ExifDateTime.random_time("08:00:00", "12:00:00")
assert(time is not False)
assert(datetime.strptime("08:00:00", "%H:%M:%S").time() <=
datetime.strptime(time, "%H:%M:%S").time() <=
datetime.strptime("12:00:00", "%H:%M:%S").time())
time = ExifDateTime.random_time("15:00:00", "15:00:00")
assert(time is not False)
assert(datetime.strptime("15:00:00", "%H:%M:%S").time() ==
datetime.strptime(time, "%H:%M:%S").time())
```
#### File: tests/plugins/FlowDrivenFrameDropTest.py
```python
import unittest
import os
from maskgen import plugins, ffmpeg_api
from maskgen.support import getValue
from tests.test_support import TestSupport
def get_channel_data(source_data, codec_type):
pos = 0
for data in source_data:
if data['codec_type'] == codec_type:
return data,pos
pos += 1
class CropSelectorTestCase(TestSupport):
filesToKill = []
def setUp(self):
plugins.loadPlugins()
def test_drop_then_add(self):
filename= self.locateFile('tests/videos/sample1.mov')
filename_output1 = os.path.join(os.path.dirname(os.path.abspath(filename)),'sample_out1a.avi')
kwargs = {'Start Time':100,
'seconds to drop': 2,
'codec':'XVID',
'save histograms':'yes'}
args,error = plugins.callPlugin('FlowDrivenVideoFrameDrop',
None,
filename,
filename_output1,
**kwargs)
self.filesToKill.append(filename_output1)
self.assertTrue(error is None)
frames1 = int(get_channel_data(ffmpeg_api.get_meta_from_video(filename, show_streams=True)[0], 'video')[0]['nb_frames'])
frames2 = int(
get_channel_data(ffmpeg_api.get_meta_from_video(filename_output1, show_streams=True)[0], 'video')[0]['nb_frames'])
diff = frames1-frames2
self.assertTrue(diff>0)
diff_time = int(args['End Time']) - int(args['Start Time'])+1
self.assertEqual(diff, diff_time)
filename_output2 = os.path.join(os.path.dirname(os.path.abspath(filename)), 'sample_out2a.avi')
args['codec'] = 'XVID'
if getValue(args,'Frames to Add',0) < 1:
args['Frames to Add'] = 1
print str(args)
args, error = plugins.callPlugin('FlowDrivenVideoTimeWarp',
None,
filename_output1,
filename_output2,
**args)
self.filesToKill.append(filename_output2)
self.assertTrue(error is None)
frames1 = int(get_channel_data(ffmpeg_api.get_meta_from_video(filename_output1, show_streams=True)[0], 'video')[0]['nb_frames'])
frames2 = int(
get_channel_data(ffmpeg_api.get_meta_from_video(filename_output2, show_streams=True)[0], 'video')[0]['nb_frames'])
diff = frames2 - frames1
self.assertTrue(diff > 0)
diff_time = int(args['End Time']) - int(args['Start Time']) + 1
print str(args)
self.assertEqual(diff, diff_time)
def tearDown(self):
for f in self.filesToKill:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/plugins/MagickTests.py
```python
import unittest
import os
from maskgen import plugins, image_wrap
import numpy
import tempfile
from tests import test_support
class MagickTestCase(test_support.TestSupport):
filesToKill = []
def setUp(self):
plugins.loadPlugins()
def test_gamma(self):
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project1.jpg'))
filename_output = tempfile.mktemp(prefix='cstcr', suffix='.png', dir='.')
self.filesToKill.extend([filename_output])
img_wrapper.save(filename_output)
args, error = plugins.callPlugin('ManualGammaCorrection',
img_wrapper,
self.locateFile('tests/images/test_project1.jpg'),
filename_output,
gamma=2.0)
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == img_wrapper.to_array().shape)
# self.assertFalse(numpy.all(output==img_wrapper.to_array(),axis=0))
def test_modulate(self):
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project1.jpg'))
filename_output = tempfile.mktemp(prefix='cstcr', suffix='.png', dir='.')
self.filesToKill.extend([filename_output])
img_wrapper.save(filename_output)
args, error = plugins.callPlugin('MagickModulate',
img_wrapper,
self.locateFile('tests/images/test_project1.jpg'),
filename_output,
saturation=130,
brightness=130)
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == img_wrapper.to_array().shape)
# self.assertFalse(numpy.all(output == img_wrapper.to_array(), axis=0))
def test_noise(self):
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project1.jpg'))
filename_output = tempfile.mktemp(prefix='cstcr', suffix='.png', dir='.')
self.filesToKill.extend([filename_output])
img_wrapper.save(filename_output)
args, error = plugins.callPlugin('MagickAddNoise',
img_wrapper,
self.locateFile('tests/images/test_project1.jpg'),
filename_output,
**{"Noise Type":"salt-pepper"})
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == img_wrapper.to_array().shape)
# self.assertFalse(numpy.all(output == img_wrapper.to_array(), axis=0))
def test_contrast(self):
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project1.jpg'))
filename_output = tempfile.mktemp(prefix='cstcr', suffix='_c.png', dir='.')
self.filesToKill.extend([filename_output])
img_wrapper.save(filename_output)
args, error = plugins.callPlugin('Constrast',
img_wrapper,
self.locateFile('tests/images/test_project1.jpg'),
filename_output,
direction="increase")
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == img_wrapper.to_array().shape)
# self.assertFalse(numpy.all(output == img_wrapper.to_array(), axis=0))
def test_levels(self):
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project1.jpg'))
filename_output = tempfile.mktemp(prefix='cstcr', suffix='_l.png', dir='.')
#self.filesToKill.extend([filename_output])
img_wrapper.save(filename_output)
args, error = plugins.callPlugin('LevelCorrectionNoMask',
img_wrapper,
self.locateFile('tests/images/test_project1.jpg'),
filename_output,
blackpoint=25,
whitepoint=75,
gamma=1.5)
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == img_wrapper.to_array().shape)
def tearDown(self):
for f in self.filesToKill:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/plugins/MedianBlurTest.py
```python
import unittest
import os
from maskgen import plugins, image_wrap
import numpy
import tempfile
class MedianBlurTestCase(unittest.TestCase):
filesToKill = []
def setUp(self):
plugins.loadPlugins()
def test_something(self):
img = numpy.random.randint(0, 255, (500, 500, 3), dtype='uint8')
wrapper = image_wrap.ImageWrapper(img)
filename = tempfile.mktemp(prefix='mstc',suffix='.png',dir='.')
filename_output = tempfile.mktemp(prefix='mstcr', suffix='.png', dir='.')
self.filesToKill.append(filename)
wrapper.save(filename)
self.filesToKill.append(filename_output)
image_wrap.ImageWrapper(img).save(filename_output)
args,error = plugins.callPlugin('MedianBlur',
wrapper,
filename,
filename_output,
kernelSize=25,
percentageChange = 0.5)
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertEqual(output.shape, img.shape)
diff = abs(output - img)
finaldiff = numpy.zeros((500,500))
for i in range(3):
finaldiff = finaldiff + diff[:,:,i]
finaldiff[finaldiff > 0] = 1
self.assertTrue(abs(sum(sum(finaldiff))-62500) < 100)
def tearDown(self):
for f in self.filesToKill:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/plugins/RetinexTest.py
```python
import unittest
from maskgen import plugins, image_wrap
import numpy
import tempfile
from tests import test_support
class RetinexTestCase(test_support.TestSupport):
def setUp(self):
plugins.loadPlugins()
def test_retinex(self):
inputfile = self.locateFile('tests/images/test_project5.jpg')
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project5.jpg'))
img = img_wrapper.to_array()
filename_output = tempfile.mktemp(prefix='mstcr', suffix='.jpg', dir='.')
self.addFileToRemove(filename_output)
args, error = plugins.callPlugin('Retinex',
img_wrapper,
inputfile,
filename_output)
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == (322,483,3))
self.assertTrue(numpy.all(output != input))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/segmentation/segmanagetest.py
```python
import unittest
from maskgen import image_wrap
import numpy
from maskgen.segmentation.segmanage import select_region,segmentation_classification,convert_color
from tests.test_support import TestSupport
class SegManageTestCase(TestSupport):
def test_select_region(self):
img = numpy.zeros((500,500,3),dtype='uint8')
img_wrapper = image_wrap.ImageWrapper(img)
selector = numpy.zeros((500, 500, 3), dtype='uint8')
selector[30:40,30:40,:] = [200,200,100]
selector[130:140, 130:140, :] = [100, 200, 100]
selector_wrapper = image_wrap.ImageWrapper(selector)
result,rcolor = select_region(img_wrapper,selector_wrapper,convert_color('[200,200,100]'))
result = result.to_array()
self.assertTrue(numpy.all(result[30:40,30:40,3] == 255))
self.assertTrue(numpy.all(result[130:140, 130:140, 3] == 0))
self.assertEquals(rcolor,[200,200,100])
def test_select_region_anycolor(self):
img = numpy.zeros((500, 500, 3), dtype='uint8')
img_wrapper = image_wrap.ImageWrapper(img)
selector = numpy.zeros((500, 500, 3), dtype='uint8')
selector[30:40, 30:40, :] = [200, 200, 100]
selector[130:140, 130:140, :] = [100, 200, 100]
selector_wrapper = image_wrap.ImageWrapper(selector)
result,color = select_region(img_wrapper, selector_wrapper)
result = result.to_array()
self.assertTrue(numpy.all(result[30:40, 30:40, 3] != result[130:140, 130:140, 3]))
def test_segmentation_classification(self):
import os
filelocation = self.locateFile('./tests/data/classifications.csv')
self.assertEquals(segmentation_classification(os.path.dirname(filelocation),[100,100,200]),'other')
self.assertEquals(segmentation_classification(os.path.dirname(filelocation), [200,100,200]), 'house')
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/test_graph_meta_tools.py
```python
import unittest
from test_support import TestSupport
from maskgen import video_tools
from maskgen.graph_meta_tools import MetaDataExtractor, GraphProxy, get_meta_data_change_from_edge
import os
class TestGraphMetaTools(TestSupport):
def __init__(self, stuff):
TestSupport.__init__(self,stuff)
self.filesToKill = []
def setUp(self):
source = self.locateFile('tests/videos/sample1.mov')
target = 'sample1_ffr_ex.mov'
os.system('ffmpeg -y -i "{}" -r 10/1 "{}"'.format(source, target))
self.addFileToRemove(target)
source = self.locateFile('tests/videos/sample1.mov')
target = 'sample1_ffr_2_ex.mov'
os.system('ffmpeg -y -i "{}" -r 8/1 "{}"'.format(source, target))
self.addFileToRemove(target)
def tearDown(self):
for f in self.filesToKill:
if os.path.exists(f):
os.remove(f)
def _add_mask_files_to_kill(self, segments):
for segment in segments:
if 'videosegment' in segment:
self.filesToKill.append(segment['videosegment'])
def testCache(self):
from maskgen.scenario_model import VideoAddTool
tool = VideoAddTool()
source = self.locateFile('tests/videos/sample1.mov')
target = 'sample1_ffr_ex.mov'
extractor = MetaDataExtractor(GraphProxy(source, target, source_node=tool.getAdditionalMetaData(source),
target_node={'media':[{'codec_type':'video','height':1000}]}))
meta = extractor.getVideoMeta(source, show_streams=True)
self.assertEqual('803', meta[0][0]['nb_frames'])
meta = extractor.getVideoMeta(target, show_streams=True)
self.assertEqual(1000, meta[0][0]['height'])
def test_Audio_to_Video(self):
source = self.locateFile('tests/videos/sample1.mov')
extractor = MetaDataExtractor(GraphProxy(source, 'b'))
masks = [video_tools.create_segment(endframe= 2618367,
rate= 44100,
starttime=0.0,
frames= 2618367,
startframe=1,
endtime=59373.424,
type='audio')]
newMasks = extractor.create_video_for_audio(source, masks=masks)
self.assertTrue(len(newMasks) > len(masks))
self.assertTrue(video_tools.get_start_frame_from_segment(newMasks[1]) == 1)
self.assertEquals(803,video_tools.get_end_frame_from_segment(newMasks[1]))
self.assertTrue(1352,int(video_tools.get_rate_from_segment(newMasks[1]) *100))
self.assertTrue(5934833 ,int(video_tools.get_end_time_from_segment(newMasks[1])*100))
source = self.locateFile('tests/videos/Sample1_slow.mov')
masks = [video_tools.create_segment(endframe= 441000,
rate= 44100,
starttime=1000.0,
frames= 396901,
startframe=44100,
endtime=10000.0,
type='audio')]
newMasks = extractor.create_video_for_audio(source, masks=masks)
self.assertTrue(len(newMasks) > len(masks))
self.assertTrue(video_tools.get_rate_from_segment(newMasks[1]) == 10.0)
self.assertTrue(video_tools.get_start_frame_from_segment(newMasks[1]) == 11)
self.assertTrue(video_tools.get_end_frame_from_segment(newMasks[1]) == 100)
def test_get_meta_data_change_from_edge(self):
result = get_meta_data_change_from_edge({'metadatadiff': {'video': {
'nb_frames': ('change',9,10),
'r_frame_rate': ('change',29,30),
'duration': ('change',10,11)
}}})
self.assertEqual(9,result[0])
self.assertEqual(10000, result[1])
self.assertEqual(10, result[2])
self.assertEqual(11000, result[3])
self.assertEqual(30, result[4])
def test_warp(self):
source = self.locateFile('tests/videos/sample1.mov')
target = 'sample1_ffr_ex.mov'
source_set = video_tools.FileMetaDataLocator(source).getMaskSetForEntireVideo(
start_time='29', end_time='55')
target_set = video_tools.FileMetaDataLocator(target).getMaskSetForEntireVideoForTuples(
start_time_tuple=(video_tools.get_start_time_from_segment(source_set[0]), 0),
end_time_tuple=(video_tools.get_end_time_from_segment(source_set[0]), 0))
extractor = MetaDataExtractor(GraphProxy(source,target))
new_mask_set = extractor.warpMask(source_set, source, source)
self.assertTrue(video_tools.get_frames_from_segment(new_mask_set[0]) == video_tools.get_frames_from_segment(source_set[0]))
self.assertTrue(video_tools.get_end_time_from_segment(new_mask_set[0]) == video_tools.get_end_time_from_segment(source_set[0]))
self.assertTrue(video_tools.get_rate_from_segment(new_mask_set[0]) == video_tools.get_rate_from_segment(source_set[0]))
self.assertTrue(video_tools.get_start_frame_from_segment(new_mask_set[0]) == video_tools.get_start_frame_from_segment(source_set[0]))
self.assertTrue(video_tools.get_start_time_from_segment(new_mask_set[0]) == video_tools.get_start_time_from_segment(source_set[0]))
self._add_mask_files_to_kill(source_set)
new_mask_set = extractor.warpMask(source_set, source, target)
self.assertTrue(video_tools.get_frames_from_segment(new_mask_set[0]) == video_tools.get_frames_from_segment(target_set[0]))
self.assertTrue(video_tools.get_end_time_from_segment(new_mask_set[0]) == video_tools.get_end_time_from_segment(target_set[0]))
self.assertTrue(video_tools.get_rate_from_segment(new_mask_set[0]) == video_tools.get_rate_from_segment(target_set[0]))
self.assertTrue(video_tools.get_start_frame_from_segment(new_mask_set[0]) == video_tools.get_start_frame_from_segment(target_set[0]))
self.assertTrue(video_tools.get_start_time_from_segment(new_mask_set[0]) == video_tools.get_start_time_from_segment(target_set[0]))
source_mask_set = extractor.warpMask(new_mask_set, source, target, inverse=True)
self.assertTrue(abs(video_tools.get_frames_from_segment(source_mask_set[0]) - video_tools.get_frames_from_segment(source_set[0])) < 2)
self.assertTrue(abs(video_tools.get_end_time_from_segment(source_mask_set[0]) - video_tools.get_end_time_from_segment(source_set[0])) < video_tools.get_error_from_segment(source_mask_set[0]) * 2)
self.assertTrue(abs(video_tools.get_rate_from_segment(source_mask_set[0]) - video_tools.get_rate_from_segment(source_set[0])) < 0.1)
self.assertTrue(abs(video_tools.get_start_frame_from_segment(source_mask_set[0]) - video_tools.get_start_frame_from_segment(source_set[0])) < 2)
self.assertTrue(
abs(video_tools.get_start_time_from_segment(source_mask_set[0]) - video_tools.get_start_time_from_segment(source_set[0])) < video_tools.get_error_from_segment(source_mask_set[0]) * 2)
new_mask_set = extractor.warpMask(source_set, source, target, useFFMPEG=True)
self.assertTrue(video_tools.get_frames_from_segment(new_mask_set[0]) == video_tools.get_frames_from_segment(target_set[0]))
self.assertTrue(video_tools.get_end_time_from_segment(new_mask_set[0]) == video_tools.get_end_time_from_segment(target_set[0]))
self.assertTrue(video_tools.get_rate_from_segment(new_mask_set[0]) == video_tools.get_rate_from_segment(target_set[0]))
self.assertTrue(video_tools.get_start_frame_from_segment(new_mask_set[0]) == video_tools.get_start_frame_from_segment(target_set[0]))
self.assertTrue(video_tools.get_start_time_from_segment(new_mask_set[0]) == video_tools.get_start_time_from_segment(target_set[0]))
source_mask_set = extractor.warpMask(new_mask_set, source, target, inverse=True, useFFMPEG=True)
self.assertTrue(abs(video_tools.get_frames_from_segment(source_mask_set[0]) - video_tools.get_frames_from_segment(source_set[0])) < 2)
self.assertTrue(abs(video_tools.get_end_time_from_segment(source_mask_set[0]) - video_tools.get_end_time_from_segment(source_set[0])) < video_tools.get_error_from_segment(source_mask_set[0]) * 2)
self.assertTrue(abs(video_tools.get_rate_from_segment(source_mask_set[0]) - video_tools.get_rate_from_segment(source_set[0])) < 0.1)
self.assertTrue(abs(video_tools.get_start_frame_from_segment(source_mask_set[0]) - video_tools.get_start_frame_from_segment(source_set[0])) < 2)
self.assertTrue(
abs(video_tools.get_start_time_from_segment(source_mask_set[0]) - video_tools.get_start_time_from_segment(source_set[0])) < video_tools.get_error_from_segment(source_mask_set[0]) * 2)
source_set = target_set
source = target
target = 'sample1_ffr_2_ex.mov'
target_set = video_tools.FileMetaDataLocator(target).getMaskSetForEntireVideoForTuples(
start_time_tuple=(video_tools.get_start_time_from_segment(source_set[0]), 0),
end_time_tuple=(video_tools.get_end_time_from_segment(source_set[0]), 0))
new_mask_set = extractor.warpMask(source_set, source, target)
self.assertTrue(video_tools.get_frames_from_segment(new_mask_set[0]) == video_tools.get_frames_from_segment(target_set[0]))
self.assertTrue(video_tools.get_end_time_from_segment(new_mask_set[0]) == video_tools.get_end_time_from_segment(target_set[0]))
self.assertTrue(video_tools.get_rate_from_segment(new_mask_set[0]) == video_tools.get_rate_from_segment(target_set[0]))
self.assertTrue(video_tools.get_start_frame_from_segment(new_mask_set[0]) == video_tools.get_start_frame_from_segment(target_set[0]))
self.assertTrue(video_tools.get_start_time_from_segment(new_mask_set[0]) == video_tools.get_start_time_from_segment(target_set[0]))
def create_masks(self,mask_set):
from maskgen import tool_set
import numpy as np
for segment in mask_set:
writer = tool_set.GrayBlockWriter('test_warp',video_tools.get_rate_from_segment(segment))
for i in range(video_tools.get_frames_from_segment(segment)):
f = i + video_tools.get_start_frame_from_segment(segment)
writer.write(np.random.randint(0,1,(1000,1000),'uint8')*255,
(f-1)*video_tools.get_rate_from_segment(segment),
frame_number=f)
writer.close()
video_tools.update_segment(segment, videosegment=writer.filename)
self.addFileToRemove(writer.filename)
def read_masks(self, mask_set):
from maskgen import tool_set
r = []
for segment in mask_set:
reader = tool_set.GrayBlockReader(video_tools.get_file_from_segment(segment))
r.append({'start_time':reader.current_frame_time(),
'start_frame':reader.current_frame(),
'frames': reader.length()})
return r
def test_warp(self):
def run_warp(source, target,start_time, end_time):
source_set = video_tools.FileMetaDataLocator(source).getMaskSetForEntireVideo(
start_time=start_time, end_time=end_time)
self.create_masks(source_set)
extractor = MetaDataExtractor(GraphProxy(source, target))
target_set = video_tools.FileMetaDataLocator(target).getMaskSetForEntireVideoForTuples(
start_time_tuple=(video_tools.get_start_time_from_segment(source_set[0]), 0),
end_time_tuple=(video_tools.get_end_time_from_segment(source_set[0]), 0))
new_mask_set = extractor.warpMask(source_set, source, target)
self.assertTrue(
video_tools.get_frames_from_segment(new_mask_set[0]) == video_tools.get_frames_from_segment(
target_set[0]))
self.assertTrue(
video_tools.get_end_time_from_segment(new_mask_set[0]) == video_tools.get_end_time_from_segment(
target_set[0]))
self.assertTrue(
video_tools.get_rate_from_segment(new_mask_set[0]) == video_tools.get_rate_from_segment(target_set[0]))
self.assertTrue(
video_tools.get_start_frame_from_segment(new_mask_set[0]) == video_tools.get_start_frame_from_segment(
target_set[0]))
self.assertTrue(
video_tools.get_start_time_from_segment(new_mask_set[0]) == video_tools.get_start_time_from_segment(
target_set[0]))
file_data = self.read_masks(new_mask_set)
self.assertEqual(video_tools.get_frames_from_segment(new_mask_set[0]), file_data[0]['frames'])
run_warp('sample1_ffr_2_ex.mov', 'sample1_ffr_ex.mov', '10', '24')
run_warp('sample1_ffr_ex.mov', 'sample1_ffr_2_ex.mov', '10', '24')
run_warp( self.locateFile('tests/videos/sample1.mov'), 'sample1_ffr_2_ex.mov','29','55')
run_warp('sample1_ffr_2_ex.mov', self.locateFile('tests/videos/sample1.mov'), '29', '55')
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/test_graph_rules.py
```python
from maskgen import graph_rules
import unittest
from maskgen.scenario_model import loadProject
from test_support import TestSupport
from mock import MagicMock, Mock, patch
from maskgen.validation.core import Severity
from maskgen import video_tools,graph_meta_tools
class TestToolSet(TestSupport):
def test_aproject(self):
model = loadProject(self.locateFile('images/sample.json'))
leafBaseTuple = model.getTerminalAndBaseNodeTuples()[0]
result = graph_rules.setFinalNodeProperties(model, leafBaseTuple[0])
self.assertEqual('yes', result['manmade'])
self.assertEqual('no', result['face'])
self.assertEqual('no', result['postprocesscropframes'])
self.assertEqual('no', result['spatialother'])
self.assertEqual('no', result['otherenhancements'])
self.assertEqual('yes', result['color'])
self.assertEqual('no', result['blurlocal'])
self.assertEqual('large', result['compositepixelsize'])
self.assertEqual('yes', result['imagecompression'])
def test_checkForVideoRetainment(self):
graph = Mock()
mapping = {'a':self.locateFile('videos/sample1.mov'), 'b':self.locateFile('videos/sample1.wav')}
graph.get_image = lambda x: (None, mapping[x])
result = graph_rules.checkForVideoRetainment('op', graph, 'a', 'a')
self.assertIsNone(result)
result = graph_rules.checkForVideoRetainment('op', graph, 'a', 'b')
self.assertIsNotNone(result)
# Tests checkSame and checkBigger
def test_checkLength(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time': '1', 'End Time': '2', 'add type': 'insert'},
'metadatadiff': {}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.dir = '.'
result = graph_rules.checkLengthSameOrBigger('op', graph, 'a', 'b') # bigger
self.assertIsNotNone(result)
graph.get_edge.return_value['metadatadiff'] ={'video': {'nb_frames': ('change', '2', '1')}}
result = graph_rules.checkLengthSameOrBigger('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge.return_value['metadatadiff'] = {'video': {'nb_frames': ('change', '1', '2')}}
result = graph_rules.checkLengthSameOrBigger('op', graph, 'a', 'b')
self.assertIsNone(result)
graph.get_edge.return_value['arguments']['add type'] = 'replace'
result = graph_rules.checkLengthSameOrBigger('op', graph, 'a', 'b') # same
self.assertIsNotNone(result)
graph.get_edge.return_value['metadatadiff'] = {}
result = graph_rules.checkLengthSameOrBigger('op', graph, 'a', 'b')
self.assertIsNone(result)
result = graph_rules.checkLengthSmaller('op', graph, 'a', 'b') # smaller
self.assertIsNotNone(result)
graph.get_edge.return_value['metadatadiff'] = {'video': {'nb_frames': ('change', '2', '1')}}
result = graph_rules.checkLengthSmaller('op', graph, 'a', 'b')
self.assertIsNone(result)
graph.get_edge.return_value['metadatadiff'] = {'video': {'nb_frames': ('change', '1', '2')}}
result = graph_rules.checkLengthSmaller('op', graph, 'a', 'b')
self.assertIsNotNone(result)
def test_checkAudioLengthBigger(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'video':{'duration':('change',1,2)}}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules.checkAudioLengthBigger('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'duration': ('change', 1, 1)}}})
result = graph_rules.checkAudioLengthBigger('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'duration': ('change', 1, 2)}}})
result = graph_rules.checkAudioLengthBigger('op', graph, 'a', 'b')
self.assertIsNone(result)
def test_checkAudioLengthSmaller(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'video':{'duration':('change',1,2)}}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules.checkAudioLengthSmaller('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'duration': ('change', 1, 1)}}})
result = graph_rules.checkAudioLengthSmaller('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'duration': ('change', 2, 1)}}})
result = graph_rules.checkAudioLengthSmaller('op', graph, 'a', 'b')
self.assertIsNone(result)
def test_checkAudioLengthDonor(self):
graph = Mock()
def which_edge(x,y):
if x in ['ai','ar']:
return {'arguments': {'Start Time': 1, 'End Time': 2,
'add type':'insert' if x == 'ai' else 'replace'},
'op':'AddAudioOfSomeType',
'metadatadiff': {'video': {'duration': ('change', 1, 2)}}}
else:
return {'arguments': {'Start Time': 4, 'End Time': 5, 'add type': 'insert'},
'op':'Donor',
'videomasks': [{'startframe': 20,'endframe':30,'rate':10,'type':'audio','frames':11,
'starttime':1900,'endtime':2900}]}
graph.get_edge = which_edge
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
graph.predecessors = Mock(return_value=['ai','c'])
class MyExtractor:
def __init__(self,x):
self.x = x
def get_duration(self,audio=False):
return {'ai': 59348.345, 'b': 60348.345}[self.x]
with patch.object(graph_meta_tools.MetaDataExtractor, 'getMetaDataLocator',side_effect=MyExtractor) as cm:
result = graph_rules.checkAudioLengthDonor('op', graph, 'ai', 'b')
self.assertIsNone(result)
with patch.object(graph_meta_tools.MetaDataExtractor, 'getMetaDataLocator',side_effect=MyExtractor) as cm:
result = graph_rules.checkAudioLengthDonor('op', graph, 'ar', 'b')
self.assertIsNotNone(result)
class MyExtractor2:
def __init__(self,x):
self.x = x
def get_duration(self,audio=False):
return {'ai': 59348.345, 'b': 1000.0}[self.x]
with patch.object(graph_meta_tools.MetaDataExtractor, 'getMetaDataLocator',side_effect=MyExtractor2) as cm:
result = graph_rules.checkAudioLengthDonor('op', graph, 'ar', 'b')
self.assertIsNone(result)
def test_checkAudioLength(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'video':{'duration':('change',1,2)}}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules._checkAudioLength('op', graph, 'a', 'b')
self.assertEqual(0,result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'x': ('change', 1, 1)}}})
result = graph_rules._checkAudioLength('op', graph, 'a', 'b')
self.assertEqual(0, result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'duration': ('change', 2, 1)}}})
result = graph_rules._checkAudioLength('op', graph, 'a', 'b')
self.assertEqual(1, result)
def test_checkFrameRate(self):
from maskgen.graph_meta_tools import GraphProxy
source = self.locateFile('tests/videos/sample1.mov')
target = self.locateFile('tests/videos/sample1_slow.mov')
graph = GraphProxy(source, 'b')
op = Mock()
op.category = 'Paste'
result = graph_rules._checkFrameRateChange(op, graph, source, target)
self.assertTrue(result)
op.category = 'Audio'
target = self.locateFile('tests/videos/sample2_ffr.mxf')
result = graph_rules._checkFrameRateChange(op, graph, source, target)
self.assertTrue(result)
target = source
op.category = 'Paste'
result = graph_rules._checkFrameRateChange(op, graph, source, target)
self.assertFalse(result)
op.category = 'Audio'
result = graph_rules._checkFrameRateChange(op, graph, source, target)
self.assertFalse(result)
def test_checkDuration(self):
graph = Mock()
graph.get_edge = Mock(return_value={'metadatadiff': {'video': {'nb_frames': ('change', 1, 2)}}})
result = graph_rules.checkDuration('op', graph, 'a', 'b')
self.assertIsNotNone(result)
result = graph_rules.checkAudioOnly('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge.return_value = {'metadatadiff': {'video': {}}}
result = graph_rules.checkDuration('op', graph, 'a', 'b')
self.assertIsNone(result)
def test_checkSampleRate(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'sample_rate': ('change', 1, 2)}}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules.checkSampleRate('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'avr_rate': ('change', 1, 2)}}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules.checkSampleRate('op', graph, 'a', 'b')
self.assertIsNone(result)
def test_checkAudioOnly(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value={'file': self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules.checkAudioOnly('op', graph, 'a', 'b')
self.assertIsNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'video': {'nb_frames': ('change', 1, 2)}}})
result = graph_rules.checkAudioOnly('op', graph, 'a', 'b')
self.assertIsNotNone(result)
graph.get_edge = Mock(return_value={'arguments': {'Start Time': 1, 'End Time': 2},
'metadatadiff': {'audio': {'duration': ('change', 2, 1)}}})
result = graph_rules.checkAudioOnly('op', graph, 'a', 'b')
self.assertIsNone(result)
def test_checkCropLength(self):
graph = Mock()
graph.get_edge = Mock(return_value={'arguments': {'Start Time':1,'End Time':2},
'metadatadiff':{}})
graph.get_image_path = Mock(return_value=self.locateFile('videos/sample1.mov'))
graph.get_node = Mock(return_value= {'file':self.locateFile('videos/sample1.mov')})
graph.dir = '.'
result = graph_rules.checkCropLength('op', graph, 'a', 'b')
self.assertIsNotNone(result)
self.assertTrue('803' in result[1])
def test_fileTypeChanged(self):
graph = Mock()
values= {'a': self.locateFile('images/hat.jpg'),
'b': self.locateFile('images/sample.jpg'),
'c': self.locateFile('tests/videos/sample1.mov')}
def side_effect(x):
return 0,values[x]
graph.get_image = Mock(side_effect=side_effect)
self.assertIsNone(graph_rules.checkFileTypeChange('op',graph,'a','b'))
graph.get_image.assert_called_with('b')
self.assertIsNotNone(
graph_rules.checkFileTypeChange('op',graph,'a','c'))
graph.get_image.assert_called_with('c')
def test_checkCutFrames(self):
def edge(a,b):
return {}
def get_node(a):
return {'file':a}
def get_image_path(a):
return a
mock = Mock()
mock.get_edge = Mock(spec=edge,return_value={
'videomasks': [{'startframe': 20,'endframe':30,'rate':10,'type':'audio','frames':11,
'starttime':1900,'endtime':2900},
{'startframe': 20, 'endframe': 30, 'rate': 10, 'type': 'video', 'frames': 11,
'starttime': 1900, 'endtime': 2900}
]
})
mock.get_node =get_node
mock.get_image_path = get_image_path
mock.dir = '.'
video_tools.meta_cache[video_tools.meta_key_builder(video_tools.FileMetaDataLocator('a'), start_time_tuple=(0,1), end_time_tuple=None, media_types=['video'],
channel=0)] = [{'startframe': 1,'endframe':300,'rate':10,'type':'audio','frames':300,
'starttime':0,'endtime':29900}]
video_tools.meta_cache[video_tools.meta_key_builder(video_tools.FileMetaDataLocator('b'), start_time_tuple=(0,1), end_time_tuple=None, media_types=['video'],
channel=0)] = [{'startframe': 1,'endframe':289,'rate':10,'type':'audio','frames':289,
'starttime':0,'endtime':28800}]
video_tools.meta_cache[
video_tools.meta_key_builder(video_tools.FileMetaDataLocator('a'), start_time_tuple=(0, 1), end_time_tuple=None, media_types=['audio'],
channel=0)] = [
{'startframe': 1, 'endframe': 300, 'rate': 10, 'type': 'video', 'frames': 300,
'starttime': 0, 'endtime': 29900}]
video_tools.meta_cache[
video_tools.meta_key_builder(video_tools.FileMetaDataLocator('b'), start_time_tuple=(0, 1), end_time_tuple=None, media_types=['audio'],
channel=0)] = [
{'startframe': 1, 'endframe': 270, 'rate': 10, 'type': 'video', 'frames': 270,
'starttime': 0, 'endtime': 26900}]
r = graph_rules.checkCutFrames('op',mock,'a','b')
self.assertEqual(2, len(r))
self.assertTrue('3000' in r[1])
def test_checkForSelectFrames(self):
def preds(a):
pass
mock = Mock()
mock.predecessors = Mock(spec=preds,return_value=['a','d'])
mock.findOp = Mock(return_value=False)
r = graph_rules.checkForSelectFrames('op',mock,'a','b')
self.assertEqual(2, len(r))
self.assertEqual(Severity.WARNING,r[0])
mock.predecessors.assert_called_with('b')
mock.findOp.assert_called_once_with('d', 'SelectRegionFromFrames')
mock = Mock()
mock.predecessors = Mock(spec=preds, return_value=['a', 'd'])
mock.findOp = Mock(return_value=True)
r = graph_rules.checkForSelectFrames('op', mock, 'a', 'b')
self.assertIsNone(r)
mock.predecessors.assert_called_with('b')
mock.findOp.assert_called_once_with('d', 'SelectRegionFromFrames')
def test_checkAudioOutputType(self):
op_mock = Mock()
op_mock.name = 'OutputAudioPCM'
graph_mock = Mock()
graph_mock.get_node = Mock(return_value={'file':'foo.png'})
graph_mock.dir = '.'
r = graph_rules.checkAudioOutputType(op_mock, graph_mock, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
graph_mock.get_node.assert_called_with('b')
graph_mock.get_node = Mock(return_value={'file':'foo.wav'})
r = graph_rules.checkAudioOutputType(op_mock, graph_mock, 'a', 'b')
self.assertIsNone(r)
def test_checkFileTypeUnchanged(self):
op_mock = Mock()
op_mock.name = 'OutputCopy'
graph_mock = Mock()
graph_mock.get_node = lambda x: {'file':'x.png'} if x == 'a' else {'file':'y.pdf'}
graph_mock.dir = '.'
r = graph_rules.checkFileTypeUnchanged(op_mock, graph_mock, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
graph_mock.get_node = Mock(return_value={'file':'foo.wav'})
r = graph_rules.checkFileTypeUnchanged(op_mock, graph_mock, 'a', 'b')
self.assertIsNone(r)
def test_checkOutputType(self):
op_mock = Mock()
op_mock.name='OutputPDF'
graph_mock = Mock()
graph_mock.get_image_path = Mock(return_value='foo.png')
r = graph_rules.checkOutputType(op_mock, graph_mock, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
graph_mock.get_image_path.assert_called_with('b')
graph_mock.get_image_path = Mock(return_value='foo.pdf')
r = graph_rules.checkOutputType(op_mock, graph_mock, 'a', 'b')
self.assertIsNone(r)
def test_checkJpgOutputType(self):
op_mock = Mock()
op_mock.name='OutputMp4'
graph_mock = Mock()
graph_mock.get_image_path = Mock(return_value='foo.png')
r = graph_rules.checkJpgOutputType(op_mock, graph_mock, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
graph_mock.get_image_path.assert_called_with('b')
graph_mock.get_image_path = Mock(return_value='foo.jpg')
r = graph_rules.checkJpgOutputType(op_mock, graph_mock, 'a', 'b')
self.assertIsNone(r)
def test_checkMp4OutputType(self):
op_mock = Mock()
op_mock.name='OutputMp4'
graph_mock = Mock()
graph_mock.get_image_path = Mock(return_value='foo.png')
r = graph_rules.checkMp4OutputType(op_mock, graph_mock, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
graph_mock.get_image_path.assert_called_with('b')
graph_mock.get_image_path = Mock(return_value='foo.mpeg')
r = graph_rules.checkMp4OutputType(op_mock, graph_mock, 'a', 'b')
self.assertIsNone(r)
def test_checkSize(self):
mock = Mock()
mock.get_edge = Mock(return_value={'shape change': '(20,20)'})
r = graph_rules.checkSize('Op', mock, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mock.get_edge.return_value = {}
r = graph_rules.checkSize('Op', mock, 'a', 'b')
self.assertIsNone(r)
def test_checkSizeAndExif(self):
def get_MockImage(name, metadata=dict()):
if name == 'a':
return mockImage_frm, name
else:
return mockImage_to, name
mockGraph = Mock(get_edge = Mock(return_value={'shape change': '(1664,-1664)',
'exifdiff':{'Orientation': ['add', 'Rotate 270 CW']}}),
get_image=get_MockImage)
mockImage_frm = Mock(size=(3264, 4928),isRaw=False)
mockImage_to = Mock(size=(4928, 3264), isRaw=False)
r = graph_rules.checkSizeAndExif('Op', mockGraph, 'a', 'b')
self.assertIsNone(r)
mockImage_to.size = (3264, 4928)
r = graph_rules.checkSizeAndExif('Op', mockGraph, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mockGraph.get_edge.return_value = {'shape change': '(1664,-1664)','metadatadiff': {'video':{'_rotate': ('change',90,270)}}}
r = graph_rules.checkSizeAndExif('Op', mockGraph, 'a', 'b')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
def test_checkSizeAndExifPNG(self):
def get_MockImage(name, metadata=dict()):
if 'arw' in name:
if name[0] == 'a':
return mockImage_frm_raw, name
else:
return mockImage_to_raw, name
if name[0] == 'a':
return mockImage_frm, name
else:
return mockImage_to, name
mockGraph = Mock(get_edge = Mock(return_value={'shape change': '(1664,-1664)',
'arguments' : {'Image Rotated':'yes'},
'exifdiff':{'Orientation': ['add', 'Rotate 270 CW']}}),
get_image=get_MockImage)
mockImage_frm = Mock(size=(3264, 4928), isRaw=False)
mockImage_to = Mock(size=(4928, 3264), isRaw=False)
mockImage_frm_raw = Mock(size=(3264, 4928), isRaw=True)
mockImage_to_raw = Mock(size=(4928, 3264), isRaw=True)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.jpg', 'b.jpg')
self.assertIsNone(r)
mockImage_to.size = (3264, 4928)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.jpg', 'b.jpg')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mockGraph = Mock(get_edge=Mock(return_value={'shape change': '(-50,-50)',
'arguments': {'Image Rotated': 'no'}}),
get_image=get_MockImage)
mockImage_to_raw.size = (3214, 4878)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.arw', 'b.arw')
self.assertIsNone(r)
mockImage_to_raw.size = (3000, 4800)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.arw', 'b.arw')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mockGraph = Mock(get_edge=Mock(return_value={'shape change': '(-50,-50)',
'arguments': {'Image Rotated': 'no',
'Lens Distortion Applied': 'yes'}}),
get_image=get_MockImage)
mockImage_to_raw.size = (3000, 4800)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.arw', 'b.arw')
self.assertIsNone(r)
mockImage_to.size = (3214, 4878)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.jpg', 'b.jpg')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mockGraph.get_edge.return_value = {'shape change': '(-30,-30)'}
mockImage_to.size = (3234, 4898)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.jpg', 'b.jpg')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mockGraph.get_edge.return_value = {'shape change': '(-30,-30)','arguments': {'Lens Distortion Applied':'yes'}}
mockImage_to.size = (3234, 4898)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.jpg', 'b.jpg')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0] == Severity.ERROR)
mockGraph.get_edge.return_value = {'shape change': '(-100,-100)'}
mockImage_to.size = (3164, 4828)
r = graph_rules.checkSizeAndExifPNG('Op', mockGraph, 'a.jpg', 'b.jpg')
self.assertTrue(len(r)> 0)
self.assertTrue(r[0] == Severity.ERROR)
def test_checkMoveMask(self):
import numpy as np
from maskgen.tool_set import GrayFrameWriter, GrayBlockWriter
from maskgen.image_wrap import ImageWrapper
def edge(a,b):
return {}
mask = np.ones((1092, 720), dtype='uint8') * 255
mask[100:200, 100:200] = 0
ImageWrapper(mask).save(filename='test_jt_mask.png')
self.addFileToRemove(filename='test_jt_mask.png')
input_mask = np.zeros((1092, 720, 3), dtype='uint8')
input_mask[100:200, 100:200, :] = 255
ImageWrapper(input_mask).save(filename='test_input_mask.png')
self.addFileToRemove(filename='test_input_mask.png')
mockGraph = Mock(get_edge=Mock(return_value={'inputmaskname': self.locateFile('test_input_mask.png'),
'maskname': self.locateFile('test_jt_mask.png')}))
mockGraph.dir = '.'
r = graph_rules.checkMoveMask('Op', mockGraph, 'a', 'b')
self.assertIsNone(r)
input_mask = np.zeros((1092, 720, 3), dtype='uint8')
input_mask[150:250, 150:250, :] = 255
ImageWrapper(input_mask).save(filename='test_input_mask.png')
self.addFileToRemove(filename='test_input_mask.png')
r = graph_rules.checkMoveMask('Op', mockGraph, 'a', 'b')
self.assertIsNone(r)
input_mask = np.zeros((1092, 720, 3), dtype='uint8')
input_mask[150:300, 150:250, :] = 255
ImageWrapper(input_mask).save(filename='test_input_mask.png')
self.addFileToRemove(filename='test_input_mask.png')
r = graph_rules.checkMoveMask('Op', mockGraph, 'a', 'b')
self.assertIsNotNone(r)
w = GrayBlockWriter('test_jt', 10)
m = np.ones((1092, 720), dtype='uint8') * 255
m[100:200, 100:200] = 0
for i in range(60):
w.write(m, i / 10.0, i+1)
w.close()
self.addFileToRemove(w.filename)
checkname = w.filename
w = GrayFrameWriter('test_input', 10, preferences={'vid_codec':'raw'})
m = np.zeros((1092, 720, 3), dtype='uint8')
m[100:200, 100:200, :] = 255
for i in range(60):
w.write(m, i / 10.0, i+1)
w.close()
self.addFileToRemove(w.filename)
mockGraph.get_edge = Mock(spec=edge, return_value={ 'inputmaskname': self.locateFile(w.filename),
'videomasks': [{'startframe': 1, 'endframe': 60, 'rate': 10, 'type': 'video', 'frames': 60,
'videosegment':checkname, 'starttime': 0, 'endtime': 600},
]
})
r = graph_rules.checkMoveMask('Op', mockGraph, 'a', 'b')
self.assertIsNone(r)
w = GrayFrameWriter('test_input', 10,preferences={'vid_codec':'raw'})
m = np.zeros((1092, 720, 3), dtype='uint8')
m[150:250, 150:250, :] = 255
for i in range(60):
w.write(m, i / 10.0, i+1)
w.close()
self.addFileToRemove(w.filename)
mockGraph.get_edge = Mock(spec=edge, return_value={'inputmaskname': self.locateFile(w.filename),
'videomasks': [{'startframe': 1, 'endframe': 60, 'rate': 10,
'type': 'video', 'frames': 60,
'videosegment': checkname, 'starttime': 0,
'endtime': 600},
]
})
r = graph_rules.checkMoveMask('Op', mockGraph, 'a', 'b')
self.assertIsNone(r)
w = GrayFrameWriter('test_input', 10, preferences={'vid_codec':'raw'})
m = np.zeros((1092, 720, 3), dtype='uint8')
m[150:300, 150:300, :] = 255
for i in range(60):
w.write(m, i / 10.0, i+1)
w.close()
self.addFileToRemove(w.filename)
mockGraph.get_edge = Mock(spec=edge, return_value={'inputmaskname': self.locateFile(w.filename),
'videomasks': [{'startframe': 1, 'endframe': 60, 'rate': 10,
'type': 'video', 'frames': 60,
'videosegment': checkname, 'starttime': 0,
'endtime': 600},
]
})
r = graph_rules.checkMoveMask('Op', mockGraph, 'a', 'b')
self.assertIsNotNone(r)
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/test_operations_help.py
```python
import unittest
from test_support import TestSupport
import requests
import logging
from pptx import Presentation
from maskgen.ui.help_tools import *
from maskgen.support import *
class TestOperationsHelp(TestSupport):
def pull_operations_powerpoint(self):
"""
Will pull latest powerpoint from s3, write to the file in resources.
:return:
"""
downloadLink = "https://s3.amazonaws.com/medifor/browser/journal/JournalingToolOperationsDictionary.pptx"
powerpointPlace = self.locateFile("resources/operationSlides.pptx")
r = requests.get(downloadLink)
with open(powerpointPlace, 'wb+') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
fd.close()
return powerpointPlace
def test_operations_help(self):
powerpointPlace = self.pull_operations_powerpoint() #get latest powerpoint
operations_file = self.locateFile("resources/operations.json")
prs = Presentation(powerpointPlace)
prs.save('Operations.pptx')
self.addFileToRemove('Operations.pptx')
ppt_total_slides = len(prs.slides)
print 'Number of slides gotten from online: ' + str(ppt_total_slides)
help_loader = HelpLoader()
self.assertEqual(len(help_loader.missing), 0) #any files missing that were referenced in the loader
with open(operations_file) as f2:
operations = json.load(f2)
#Do all ops have help sections in the linker, and do all help sections point to valid ops?
opNames_jt = [getValue(op, 'name') for op in getValue(operations, 'operations')]
opNames_help = getValue(help_loader.linker, 'operation').keys()
missing_help = [name for name in opNames_jt if name not in opNames_help]
missing_ops = [name for name in opNames_help if name not in opNames_jt]
if len(missing_help) > 0:
logging.getLogger('maskgen').warning('the following operations are not accounted for in the image_linker: ')
logging.getLogger('maskgen').warning(missing_help)
raise ValueError('operations missing help.')
if len(missing_ops) > 0:
logging.getLogger('maskgen').warning('the following operations are found in the image_linker '
'but are not found in the operations dictionary: ')
logging.getLogger('maskgen').warning(missing_ops)
raise ValueError('invalid/extra operations in help.')
self.remove_files()
def test_semantic_group_slides(self):
downloadLink = "https://s3.amazonaws.com/medifor/browser/journal/SemanticGroups.pptx"
powerpointPlace = self.locateFile("resources/semanticGroups.pptx")
r = requests.get(downloadLink)
with open(powerpointPlace, 'wb+') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
fd.close()
imgLinker = self.locateFile("resources/help/image_linker.json")
groups = self.locateFile("resources/project_properties.json")
prs = Presentation(powerpointPlace)
prs.save('Operations.pptx')
self.addFileToRemove('Operations.pptx')
prs = Presentation(powerpointPlace)
prs.save('Operations2.pptx')
self.addFileToRemove('Operations2.pptx')
slide = len(prs.slides)
print 'Number of slides gotten from online: ' + str(slide)
jtLocation = os.path.join(os.path.split(imgLinker)[0], 'semanticSlides')
path, dirs, slides = next(os.walk(jtLocation))
print "JT Semantic Slides: " + str(len(slides))
with open(imgLinker) as f:
data = json.load(f)
with open(groups) as f2:
semGroups = json.load(f2)
semanticGroups = []
for d in semGroups["properties"]:
try:
if d["semanticgroup"]:
semanticGroups.append(d)
except:
pass
images = set()
missing = []
for d in semanticGroups:
g = d["description"]
if g not in data["semanticgroup"] or len(data["semanticgroup"][g]["images"]) ==0 or data["semanticgroup"][g]["images"][0] =="":
missing.append(g)
else:
for i in data["semanticgroup"][g]["images"]:
self.assertTrue(os.path.exists(os.path.join(jtLocation, os.path.split(i)[1])),
os.path.join(jtLocation, i))
images.add(i)
data["semanticgroup"].pop(g)
self.assertTrue(missing == [], "Missing is not empty " + str(missing))
self.assertTrue(len(data["semanticgroup"]) == 0, "There are extra operation(s) in the help section")
self.remove_files()
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/test_software_loader.py
```python
from maskgen import software_loader
import unittest
class TestSoftwareLoader(unittest.TestCase):
def test_load(self):
ops = software_loader.getOperationsByCategory('image', 'image')
self.assertTrue('Mosaic' in ops['AdditionalEffect'])
self.assertTrue('AntiForensicExifQuantizationTable' in ops['AntiForensic'])
self.assertFalse('FilterColorLUT' in ops['Filter'])
self.assertFalse('PasteImageSpliceToFrames' in ops['Paste'])
ops = software_loader.getOperationsByCategory('video', 'video')
self.assertTrue('PasteImageSpliceToFrames' in ops['Paste'])
def test_project_properties(self):
name = "Custom"
values = ["Custom 0", "Custom 1", "Custom 10", "Custom 11"]
prop = software_loader.ProjectProperty(name=name, values=software_loader.Operation(name=name)
.getParameterValuesForType(name, 'image', values))
self.assertTrue(prop.values == values)
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/test_tool_set.py
```python
from maskgen import tool_set
import unittest
import numpy as np
from maskgen import image_wrap
from test_support import TestSupport
import sys
class TestToolSet(TestSupport):
def test_diff(self):
args = {'smoothing': 3, 'mode':'bgr', 'aggregate':'max','filling':'morphology'}
a = np.random.randint(0,255,(255,255,3)).astype('int16')
b = np.random.randint(0, 255, (255, 255, 3)).astype('int16')
m = tool_set.mediatedCompare(a,b, arguments= args)
def test_filetype(self):
self.assertEquals(tool_set.fileType(self.locateFile('images/hat.jpg')), 'image')
self.assertEquals(tool_set.fileType(self.locateFile('images/sample.json')), 'text')
f = open('test.log', 'w+')
f.close()
self.addFileToRemove('test.log')
self.assertEquals(tool_set.fileType(self.locateFile('test.log')), 'text')
self.assertEquals(tool_set.fileType(self.locateFile('tests/videos/sample1.mov')), 'video')
self.assertEquals(tool_set.fileType('foo.dng.zip'), 'zip')
self.assertEquals(tool_set.fileType('foo.jpg.zip'), 'zip')
self.assertEquals(tool_set.fileType('foo.png.zip'), 'zip')
self.assertEquals(tool_set.fileType('foo.oh.zip'), 'collection')
self.assertEquals(tool_set.fileType('foo.newgate.zip'), 'collection')
def test_md5(self):
all_md5 = tool_set.md5_of_file(self.locateFile('tests/videos/sample1.mov'))
parts_md5 = tool_set.md5_of_file(self.locateFile('tests/videos/sample1.mov'),load_size=1000)
self.assertEqual(all_md5,parts_md5)
def test_filetypes(self):
self.assertTrue(("mov files", "*.mov") in tool_set.getFileTypes())
self.assertTrue(("zipped masks", "*.tgz") in tool_set.getMaskFileTypes())
def test_zip(self):
import os
filename = self.locateFile('tests/zips/raw.zip')
self.addFileToRemove(os.path.join(os.path.dirname(filename), 'raw.png'))
img = tool_set.openImage(filename,tool_set.getMilliSecondsAndFrameCount('2'),preserveSnapshot=True)
self.assertEqual((5796, 3870),img.size)
tool_set.condenseZip(filename,keep=1)
self.addFileToRemove(os.path.join(os.path.dirname(filename),'raw_c.zip'))
contents = tool_set.getContentsOfZip(os.path.join(os.path.dirname(filename),'raw_c.zip'))
self.assertTrue('59487443539401a4d83512edaab3c1b2.cr2' in contents)
self.assertTrue('7d1800a38ca7a22021bd94e71b6e0f42.cr2' in contents)
self.assertTrue(len(contents) == 2)
def test_rotate(self):
import cv2
from maskgen import cv2api
img1 = np.zeros((100,100),dtype=np.uint8)
img1[20:50,40:50] = 1
mask = np.ones((100,100),dtype=np.uint8)*255
img1[20:50,40] = 2
img = tool_set.applyRotateToCompositeImage(img1, 90, (50,50))
self.assertTrue(sum(sum(img1-img))>40)
img = tool_set.applyRotateToCompositeImage(img,-90,(50,50))
self.assertTrue(sum(sum(img1-img)) <2)
img = tool_set.applyRotateToComposite(-90, img1, np.zeros((100,100),dtype=np.uint8), img1.shape, local=True)
self.assertTrue(sum(img[40,:]) == sum(img1[:,40]))
self.assertTrue(sum(img[40, :]) == 60)
M = cv2.getRotationMatrix2D((35,45), -90, 1.0)
img = cv2.warpAffine(img1, M, (img.shape[1], img.shape[0]),
flags=cv2api.cv2api_delegate.inter_linear)
mask[abs(img - img1) > 0] = 0
img[10:15,10:15]=3
img3 = tool_set.applyRotateToComposite(90, img, mask, img1.shape, local=True)
self.assertTrue(np.all(img3[10:15,10:15]==3))
img3[10:15, 10:15] = 0
def testCropCompare(self):
import cv2
pre = tool_set.openImageFile(self.locateFile('tests/images/prefill.png')).to_array()
post = pre[10:-10,10:-10]
resized_post = cv2.resize(post, (pre.shape[1],pre.shape[0]))
mask, analysis = tool_set.cropResizeCompare(pre,resized_post, arguments={'crop width':pre.shape[1]-20,'crop height':pre.shape[0]-20})
self.assertEquals((10,10), tool_set.toIntTuple(analysis['location']))
def test_fileMask(self):
pre = tool_set.openImageFile(self.locateFile('tests/images/prefill.png'))
post = tool_set.openImageFile(self.locateFile('tests/images/postfill.png'))
mask,analysis,error = tool_set.createMask(pre,post,invert=False,arguments={'tolerance' : 2500})
withtolerance = sum(sum(mask.image_array))
mask.save(self.locateFile('tests/images/maskfill.png'))
mask, analysis,error = tool_set.createMask(pre, post, invert=False)
withouttolerance = sum(sum(mask.image_array))
mask, analysis ,error= tool_set.createMask(pre, post, invert=False, arguments={'tolerance': 2500,'equalize_colors':True})
mask.save(self.locateFile('tests/images/maskfillt.png'))
withtoleranceandqu = sum(sum(mask.image_array))
self.assertTrue(withouttolerance < withtolerance)
self.assertTrue(withtolerance <= withtoleranceandqu)
def test_map(self):
img1 = np.random.randint(0,255,size=(100,120)).astype('uint8')
mask = np.ones((100,120))
src_pts = [(x, y) for x in xrange(20, 30, 1) for y in xrange(50, 60, 1)]
dst_pts = [(x, y) for x in xrange(55, 65, 1) for y in xrange(15, 25, 1)]
result =tool_set._remap(img1,mask,src_pts,dst_pts)
self.assertTrue(np.all(result[55:65,15:25] == img1[20:30,50:60]))
def test_time_format(self):
t = tool_set.getDurationStringFromMilliseconds(100001.111)
self.assertEqual('00:01:40.001111',t)
def test_timeparse(self):
t, f = tool_set.getMilliSecondsAndFrameCount('00:00:00')
self.assertEqual(1, f)
self.assertEqual(0, t)
t, f = tool_set.getMilliSecondsAndFrameCount('1')
self.assertEqual(1, f)
self.assertEqual(0, t)
self.assertTrue(tool_set.validateTimeString('03:10:10.434'))
t,f = tool_set.getMilliSecondsAndFrameCount('03:10:10.434')
self.assertEqual(0, f)
self.assertEqual(1690434, t)
t, f = tool_set.getMilliSecondsAndFrameCount('03:10:10:23')
self.assertFalse(tool_set.validateTimeString('03:10:10:23'))
self.assertEqual(23,f)
self.assertEqual(1690000, t)
t, f = tool_set.getMilliSecondsAndFrameCount('03:10:10:A', defaultValue=(0,0))
self.assertFalse(tool_set.validateTimeString('03:10:10:A'))
self.assertEqual((0,0), (t,f))
time_manager = tool_set.VidTimeManager(startTimeandFrame=(1000,2),stopTimeandFrame=(1003,4))
time_manager.updateToNow(999)
self.assertTrue(time_manager.isBeforeTime())
time_manager.updateToNow(1000)
self.assertTrue(time_manager.isBeforeTime())
time_manager.updateToNow(1001)
self.assertTrue(time_manager.isBeforeTime())
time_manager.updateToNow(1002)
self.assertFalse(time_manager.isBeforeTime())
self.assertFalse(time_manager.isPastTime())
time_manager.updateToNow(1003)
self.assertFalse(time_manager.isPastTime())
time_manager.updateToNow(1004)
self.assertFalse(time_manager.isPastTime())
time_manager.updateToNow(1005)
self.assertFalse(time_manager.isPastTime())
time_manager.updateToNow(1006)
self.assertFalse(time_manager.isPastTime())
time_manager.updateToNow(1007)
self.assertFalse(time_manager.isPastTime())
time_manager.updateToNow(1008)
self.assertTrue(time_manager.isPastTime())
self.assertEqual(9,time_manager.getEndFrame() )
self.assertEqual(4, time_manager.getStartFrame())
time_manager = tool_set.VidTimeManager(startTimeandFrame=(999, 2), stopTimeandFrame=None)
time_manager.updateToNow(999)
self.assertTrue(time_manager.isBeforeTime())
time_manager.updateToNow(1000)
self.assertTrue(time_manager.isBeforeTime())
time_manager.updateToNow(1001)
self.assertFalse(time_manager.isBeforeTime())
self.assertEqual(3, time_manager.getEndFrame())
self.assertEqual(3, time_manager.getStartFrame())
def test_opacity_analysis(self):
# need to redo with generated data.
initialImage = image_wrap.openImageFile(self.locateFile('tests/images/pre_blend.png'))
finalImage = image_wrap.openImageFile(self.locateFile('tests/images/post_blend.png'))
mask = image_wrap.openImageFile(self.locateFile('tests/images/blend_mask.png'))
donorMask = image_wrap.openImageFile(self.locateFile('tests/images/donor_to_blend_mask.png'))
donorImage = image_wrap.openImageFile(self.locateFile('tests/images/donor_to_blend.png'))
result = tool_set.generateOpacityImage(initialImage.to_array(), donorImage.to_array(), finalImage.to_array(), mask.to_array(),
donorMask.to_array(),None)
min = np.min(result)
max = np.max(result)
result = (result - min)/(max-min) * 255.0
def test_gray_writing(self):
import os
import sys
import time
s = time.clock()
writer = tool_set.GrayBlockWriter('test_ts_gw', 29.97002997)
mask_set = list()
for i in range(255):
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
mask_set.append(mask)
writer.write(mask, 33.3666666667*i,i+1)
for i in range(300,350):
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
mask_set.append(mask)
writer.write(mask, 33.3666666667*i, i + 1)
writer.close()
fn = writer.get_file_name()
reader = tool_set.GrayBlockReader(fn, end_frame=305)
pos = 0
while True:
mask = reader.read()
if mask is None:
break
compare = mask == mask_set[pos]
self.assertEqual(mask.size,sum(sum(compare)))
if pos == 255:
self.assertEqual(301,reader.current_frame()-1)
pos += 1
reader.close()
self.assertEqual(305, pos)
print time.clock()- s
suffix = 'm4v'
if sys.platform.startswith('win'):
suffix = 'avi'
filename = tool_set.convertToVideo(fn)
self.assertEquals('test_ts_gw_mask_0.0.' + suffix, filename)
self.assertTrue(os.path.exists(filename))
size = tool_set.openImage(filename, tool_set.getMilliSecondsAndFrameCount('00:00:01')).size
self.assertTrue(size == (1920,1090))
os.remove(filename)
os.remove(fn)
def test_global_transform_analysis(self):
from maskgen.image_wrap import ImageWrapper
analysis = {}
mask = np.random.randint(0,2,(1000, 1000), dtype=np.uint8)
mask[mask>0] = 255
tool_set.globalTransformAnalysis(analysis, ImageWrapper(mask), ImageWrapper(mask), mask=mask,
linktype='image.image',
arguments={}, directory='.')
self.assertEquals('yes', analysis['global'])
mask = np.zeros((1000,1000),dtype=np.uint8)
mask[0:30,0:30] = 255
tool_set.globalTransformAnalysis(analysis, ImageWrapper(mask), ImageWrapper(mask), mask=mask, linktype='image.image', arguments={}, directory='.')
self.assertEquals('no',analysis['global'])
self.assertEquals('small', analysis['change size category'])
mask = np.zeros((1000, 1000), dtype=np.uint8)
mask[0:75, 0:75] = 255
tool_set.globalTransformAnalysis(analysis, ImageWrapper(mask), ImageWrapper(mask), mask=mask, linktype='image.image',
arguments={}, directory='.')
self.assertEquals('no', analysis['global'])
self.assertEquals('medium', analysis['change size category'])
mask[0:100, 0:100] = 255
tool_set.globalTransformAnalysis(analysis, ImageWrapper(mask), ImageWrapper(mask), mask=mask, linktype='image.image',
arguments={}, directory='.')
self.assertEquals('no', analysis['global'])
self.assertEquals('large', analysis['change size category'])
tool_set.globalTransformAnalysis(analysis, ImageWrapper(mask), ImageWrapper(mask), mask=mask, linktype='image.image',
arguments={}, directory='.')
def test_SIFT(self):
from maskgen.image_wrap import ImageWrapper
img1 = ImageWrapper(np.random.randint(0,255,(4000,5000,3),dtype='uint8'))
img2 = ImageWrapper(np.random.randint(0,255,(8000,8000,3),dtype='uint8'))
img2.image_array[1000:2000,1000:2000,:] = img1.image_array[2000:3000,2000:3000,:]
mask1 = ImageWrapper(np.zeros((4000,5000),dtype='uint8'))
mask1.image_array[2000:3000,2000:3000] = 255
mask2 = ImageWrapper(np.zeros((8000, 8000), dtype='uint8'))
mask2.image_array[1000:2000,1000:2000] = 255
features = tool_set.getMatchedSIFeatures(img1, img2, mask1=mask1, mask2=mask2, arguments={'homography max matches': '2000', 'homography': 'RANSAC-4'})
img1 = ImageWrapper(np.random.randint(0, 65535, (4000, 5000, 3), dtype='uint16'))
img2 = ImageWrapper(np.random.randint(0, 65535, (8000, 8000, 3), dtype='uint16'))
img2.image_array[1000:2000, 1000:2000, :] = img1.image_array[2000:3000, 2000:3000, :]
mask1 = ImageWrapper(np.zeros((4000, 5000), dtype='uint8'))
mask1.image_array[2000:3000, 2000:3000] = 255
mask2 = ImageWrapper(np.zeros((8000, 8000), dtype='uint8'))
mask2.image_array[1000:2000, 1000:2000] = 255
features = tool_set.getMatchedSIFeatures(img1, img2, mask1=mask1, mask2=mask2, arguments={'homography max matches': '2000', 'homography': 'RANSAC-4'})
def testSIFCheck(self):
good_transform = {
'c': 3,
'r': 3,
'r0': [0.00081380729604268976, -1.0000367374350523, 449.94975699899271],
'r1': [1.0031702728345473, 0.0016183966076946312, -0.30844081957395447],
'r2': [3.1676664384933143e-06, 9.8915322781393527e-06, 1.0]
}
bad_transform = {
"c": 3,
"r": 3,
"r0": [-3.0764931522976067, 3.2522108810844577, 6167.618028229406],
"r1": [-1.0467579456165736, 1.1073481736839244, 2098.303251843684],
"r2": [-0.0004988685498607748, 0.0005275910530971817, 1.0]
}
self.assertTrue(tool_set.isHomographyOk(tool_set.deserializeMatrix(good_transform),450,450))
self.assertFalse(tool_set.isHomographyOk( tool_set.deserializeMatrix(bad_transform),8000,5320))
def test_time_stamp(self):
v1 = self.locateFile('tests/images/test.png')
v2 = self.locateFile('tests/images/donor_to_blend.png')
v3 = self.locateFile('tests/images/test_time_change.png')
self.assertTrue(len(tool_set.dateTimeStampCompare(v1, v1))==0)
self.assertFalse(len(tool_set.dateTimeStampCompare(v1, v2))==0)
self.assertTrue(len(tool_set.dateTimeStampCompare(v1, v3))==0)
def test_compare(self):
from maskgen import tool_set
wrapper1 = image_wrap.openImageFile(self.locateFile('tests/images/pre_blend.png'))
arr2 = np.copy(wrapper1.image_array)
for x in np.random.randint(1,arr2.shape[0]-1,100):
for y in np.random.randint(1, arr2.shape[1] - 1, 100):
arr2[x,y,1] = arr2[x,y,1] + np.random.randint(-20,20)
arr2[100:200,100:200,2] = arr2[100:200,100:200,2] - 25
wrapper2 = image_wrap.ImageWrapper(arr2)
args = [{'aggregate': 'luminance', 'minimum threshold': 3, "weight": 4},
{'aggregate': 'luminance', 'minimum threshold': 3, "weight": 1},
{'aggregate': 'max'}]
for arg in args:
result = tool_set.mediatedCompare(wrapper1.to_array().astype('int16'),
wrapper2.to_array().astype('int16'),
arguments=arg)
self.assertTrue(np.all(result[0][100:200,100:200] == 255))
result[0][100:200, 100:200] = 0
self.assertTrue(np.all(result[0] == 0))
#image_wrap.ImageWrapper(result[0]).save('/Users/ericrobertson/Downloads/foo_max.png')
if __name__ == '__main__':
unittest.main()
```
#### File: Media-Journaling-Tool/tests/test_video_tools.py
```python
import os
import unittest
import numpy as np
from maskgen import video_tools, tool_set
from maskgen.ffmpeg_api import get_meta_from_video
from maskgen.ffmpeg_api import run_ffmpeg
from mock import patch, Mock
from test_support import TestSupport
def cropForTest(frame, no):
return frame[100:-100, 100:-100, :]
def rotateForTest(frame, no):
return np.rotate(frame, -1)
def noiseForTest(frame, no):
if no < 20 or no > 80:
return frame
result = np.round(np.random.normal(0, 2, frame.shape))
return frame + result.astype('uint8')
def sameForTest(frame, no):
return frame
def changeForTest(frame, no):
if no >= 20 and no < 40:
return np.random.randint(255, size=(1090, 1920, 3)).astype('uint8')
return frame
def addForTest(frame, no):
if no != 20:
return frame
return [frame if i == 0 else np.random.randint(255, size=(1090, 1920, 3)).astype('uint8') for i in range(20)]
def addNoise(frames, no):
import random
import struct
import ctypes
b = ctypes.create_string_buffer(len(frames))
buffer_position = 0
for i in range(0, len(frames), 2):
value = struct.unpack('h', frames[i:i + 2])[0]
position = no + buffer_position
if (position >= 24 and position <= 64) or (position >= 192 and position <= 240):
value = random.randint(-32767, 32767)
struct.pack_into('h', b, buffer_position, value)
buffer_position += 2
return b
def sampleFrames(frames, no):
import struct
import ctypes
if no < 1024:
b = ctypes.create_string_buffer(192 - 24)
else:
return None
buffer_position = 0
read_position = 0
for i in range(0, len(frames), 2):
value = struct.unpack('h', frames[i:i + 2])[0]
position = no + read_position
read_position += 2
if position < 24 or position >= 192:
continue
struct.pack_into('h', b, buffer_position, value)
buffer_position += 2
return b
def singleChannelSample(filename, outfilname, skip=0):
import wave
fone = wave.open(filename, 'rb')
countone = fone.getnframes()
ftwo = wave.open(outfilname, 'wb')
ftwo.setparams((1, 2, 44100, 0, 'NONE', 'not compressed'))
toRead = min([1024, countone])
framesone = fone.readframes(toRead)
int_frames = np.fromstring(framesone, 'Int16')[6+skip:48+skip:2]
ftwo.writeframesraw(int_frames.tobytes())
fone.close()
ftwo.close()
def augmentAudio(filename, outfilname, augmentFunc):
import wave
fone = wave.open(filename, 'rb')
countone = fone.getnframes()
onechannels = fone.getnchannels()
onewidth = fone.getsampwidth()
ftwo = wave.open(outfilname, 'wb')
ftwo.setparams(fone.getparams())
position = 0
while True:
toRead = min([1024, countone])
countone -= toRead
framesone = fone.readframes(toRead)
result = augmentFunc(framesone, position)
if result is None:
break
ftwo.writeframes(result)
position += toRead
if countone <= 0:
break
fone.close()
ftwo.close()
def deleteAudio(filename, outfilname, pos, length):
import wave
import struct
import random
fone = wave.open(filename, 'rb')
countone = fone.getnframes()
onechannels = fone.getnchannels()
onewidth = fone.getsampwidth()
ftwo = wave.open(outfilname, 'wb')
ftwo.setparams(fone.getparams())
pos = pos * onechannels * onewidth
length = length * onechannels * onewidth
framesone = fone.readframes(pos)
ftwo.writeframes(framesone)
fone.readframes(length)
countone -= (pos + length)
while countone > 0:
toRead = min([1024, countone])
countone -= toRead
framesone = fone.readframes(toRead)
ftwo.writeframes(framesone)
fone.close()
ftwo.close()
def insertAudio(filename, outfilname, pos, length):
import wave
import struct
import random
fone = wave.open(filename, 'rb')
countone = fone.getnframes()
onechannels = fone.getnchannels()
onewidth = fone.getsampwidth()
ftwo = wave.open(outfilname, 'wb')
ftwo.setparams(fone.getparams())
pos = pos * onechannels * onewidth
length = length * onechannels * onewidth
position = 0
while countone > 0:
toRead = min([1024, countone])
countone -= toRead
framesone = fone.readframes(toRead)
position += toRead
if length > 0 and position > pos:
ftwo.writeframes(framesone[0:pos])
while (length > 0):
value = random.randint(-32767, 32767)
packed_value = struct.pack('h', value)
ftwo.writeframesraw(packed_value)
length -= 1
ftwo.writeframes(framesone[pos:])
else:
ftwo.writeframes(framesone)
fone.close()
ftwo.close()
class TestVideoTools(TestSupport):
filesToKill = []
def setUp(self):
source = self.locateFile('tests/videos/sample1.mov')
target = 'sample1_ffr.mov'
os.system('ffmpeg -y -i "{}" -r 10/1 "{}"'.format(source, target))
self.addFileToRemove(target)
source = self.locateFile('tests/videos/sample1.mov')
target = 'sample1_ffr_2.mov'
os.system('ffmpeg -y -i "{}" -r 8/1 "{}"'.format(source, target))
self.addFileToRemove(target)
def _init_write_zip_file(self, name, amount, fps):
writer = tool_set.ZipWriter(name, fps)
amount = int(amount)
for i in range(amount):
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
writer.write(mask)
writer.release()
self.filesToKill.append(writer.filename)
return writer.filename
def _init_write_file(self, name, start_time, start_position, amount, fps, mask_set=None, maskonly=False):
writer = tool_set.GrayBlockWriter(name, fps)
amount = int(amount)
increment = 1000 / float(fps)
count = start_position
for i in range(amount):
if maskonly:
mask = np.random.randint(2, size=(1090, 1920)).astype('uint8') * 255
else:
mask = np.random.randint(255, size=(1090, 1920)).astype('uint8')
if mask_set is not None:
mask_set.append(mask)
writer.write(mask, start_time, count)
start_time += increment
count += 1
writer.close()
self.filesToKill.append(writer.filename)
return writer.filename
def _add_mask_files_to_kill(self, segments):
for segment in segments:
if video_tools.get_file_from_segment(segment) is not None:
self.filesToKill.append(video_tools.get_file_from_segment(segment))
def _init_write_zip_files(self, name, alter_funcs):
from maskgen.tool_set import ZipWriter
try:
files = []
writer_main = ZipWriter(name,30)
rate = 1 / 30.0
main_count = 0
counters_for_all = [0 for i in alter_funcs]
writers = [ZipWriter(name + str(func).split()[1], 30.0) for func in alter_funcs]
for i in range(100):
mask = np.random.randint(255, size=(1090, 1920, 3)).astype('uint8')
writer_main.write(mask)
nextcounts = []
for writer, func, counter in zip(writers, alter_funcs, counters_for_all):
result = func(mask, i + 1)
if type(result) == list:
for item in result:
writer.write(item)
counter += 1
nextcounts.append(counter)
else:
writer.write(result)
nextcounts.append(counter + 1)
counters_for_all = nextcounts
main_count += 1
except Exception as ex:
print ex
finally:
writer_main.release()
for writer in writers:
files.append(writer.filename)
writer.release()
self.filesToKill.append(writer_main.filename)
self.filesToKill.extend(files)
return writer_main.filename, files
def _init_write_video_file(self, name, alter_funcs):
try:
files = []
writer_main = tool_set.GrayFrameWriter(name, 30 / 1.0,
preferences={'vid_suffix': 'avi', 'vid_codec': 'raw'})
rate = 1 / 30.0
main_count = 0
counters_for_all = [0 for i in alter_funcs]
writers = [tool_set.GrayFrameWriter(name + str(func).split()[1], 30 / 1.0,
preferences={'vid_suffix': 'avi', 'vid_codec': 'raw'}) for func in
alter_funcs]
for i in range(100):
mask = np.random.randint(255, size=(1090, 1920, 3)).astype('uint8')
writer_main.write(mask, main_count, main_count * rate)
nextcounts = []
for writer, func, counter in zip(writers, alter_funcs, counters_for_all):
result = func(mask, i + 1)
if type(result) == list:
for item in result:
writer.write(item, counter, counter * rate)
counter += 1
nextcounts.append(counter)
else:
writer.write(result, counter, counter * rate)
nextcounts.append(counter + 1)
counters_for_all = nextcounts
main_count += 1
except Exception as ex:
print ex
finally:
writer_main.close()
for writer in writers:
files.append(writer.filename)
writer.close()
self.filesToKill.append(writer_main.filename)
self.filesToKill.extend(files)
return writer_main.filename, files
def test_duration(self):
expected = 59350
locator = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov'))
duration = locator.get_duration()
self.assertTrue(abs(duration - expected) < 1)
duration = locator.get_duration(audio=True)
self.assertTrue(abs(duration - expected) < 2)
def test_frame_rate(self):
locator = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov'))
rate = locator.get_frame_rate()
self.assertTrue(abs(rate - 13.529) < 0.1)
rate = locator.get_frame_rate(audio=True)
self.assertTrue(abs(rate - 44100) < 1)
def test_frame_count(self):
#result = video_tools.get_frame_count(self.locateFile('tests/videos/fb1afd9b551cde13b6e011a201e42ae7.mts'), (2010, 0), None)
#self.assertEqual(2356, video_tools.get_frames_from_segment(result))
#self.assertEqual(2385, video_tools.get_end_frame_from_segment(result))
#self.assertEqual(80579, int(video_tools.get_end_time_from_segment(result)))
#self.assertEqual(2001.0, video_tools.get_start_time_from_segment(result))
#self.assertEqual(30, video_tools.get_start_frame_from_segment(result))
#result = video_tools.get_frame_count(self.locateFile('tests/videos/fb1afd9b551cde13b6e011a201e42ae7.mts'), (0, 21), (0, 593))
##self.assertEqual(573, video_tools.get_frames_from_segment(result))
#self.assertEqual(593, video_tools.get_end_frame_from_segment(result))
#self.assertEqual(20786, int(video_tools.get_end_time_from_segment(result)))
#self.assertEqual(1700, int(video_tools.get_start_time_from_segment(result)))
#self.assertEqual(21, video_tools.get_start_frame_from_segment(result))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').get_frame_count((0, 21), (0, 593))
self.assertEqual(59200.0, round(video_tools.get_end_time_from_segment(result)))
self.assertEqual(573, video_tools.get_frames_from_segment(result))
self.assertEqual(593, video_tools.get_end_frame_from_segment(result))
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').get_frame_count((2010, 0), (59200, 0))
self.assertEqual(59200.0, round(video_tools.get_end_time_from_segment(result)))
self.assertEqual(573, video_tools.get_frames_from_segment(result))
self.assertEqual(593, video_tools.get_end_frame_from_segment(result))
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').get_frame_count((0, 21), None)
self.assertEqual(575, video_tools.get_frames_from_segment(result))
self.assertEqual(595, video_tools.get_end_frame_from_segment(result))
self.assertEqual(59400.0, video_tools.get_end_time_from_segment(result))
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').get_frame_count((2010, 0), None)
self.assertEqual(575, video_tools.get_frames_from_segment(result))
self.assertEqual(595, video_tools.get_end_frame_from_segment(result))
self.assertEqual(59400.0, video_tools.get_end_time_from_segment(result))
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result))
def test_frame_binding_ffr(self):
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').getMaskSetForEntireVideo(
start_time='00:00:02.01',
end_time='00:00:59.29')
self._add_mask_files_to_kill(result)
self.assertEqual(59200.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(573, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(593, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result[0]))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').getMaskSetForEntireVideo(
start_time='00:00:02.01:02')
self._add_mask_files_to_kill(result)
self.assertEqual(2200.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0])*100- 100, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(573, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').getMaskSetForEntireVideo(
start_time='00:00:02.01')
self._add_mask_files_to_kill(result)
self.assertEqual(575, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(595, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0])*100- 100, video_tools.get_end_time_from_segment(result[0]))
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result[0]))
result = video_tools.FileMetaDataLocator('sample1_ffr.mov').getMaskSetForEntireVideo(
start_time='23',
end_time='29')
self._add_mask_files_to_kill(result)
self.assertEqual(2200.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(2800.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(29, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(29 - 23 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator('videos/Sample2_ffr.mxf').getMaskSetForEntireVideo(
start_time=21, end_time=None) # ffr vid with 'N/A' nbframes.
self._add_mask_files_to_kill(result)
self.assertEqual(567.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(45000.0, round(video_tools.get_end_time_from_segment(result[0])/100)*100)
self.assertEqual(1350, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(1350 - 21 + 1, video_tools.get_frames_from_segment(result[0]))
def test_video_to_mask(self):
from maskgen.tool_set import GrayFrameWriter, GrayBlockReader
w = GrayFrameWriter('test_source',10, preferences={'vid_codec':'raw', 'vid_suffix':'avi'})
m = np.zeros((1092,720,3),dtype='uint8');
m[100:200,100:200,0] = 255
for i in range(180):
w.write(m, i/10.0, i)
w.close()
self.addFileToRemove(w.filename)
sf = w.filename
w = GrayFrameWriter('test',10, preferences={'vid_codec':'raw', 'vid_suffix':'avi'})
m = np.zeros((1092,720,3),dtype='uint8');
m[100:200,100:200,0] = 255
for i in range(60):
w.write(m, i/10.0, i)
w.close()
self.addFileToRemove(w.filename)
masks = video_tools.videoMasksFromVid(w.filename,'test_mask')
self._add_mask_files_to_kill(masks)
hdf5filename = masks[0]['videosegment']
r = GrayBlockReader(hdf5filename)
m = r.read()
c = 0
self.assertEqual(255,m[0,0])
self.assertEqual(0, m[101,101])
self.assertEqual('video',video_tools.get_type_of_segment(masks[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(masks[0]))
self.assertEqual(60, video_tools.get_end_frame_from_segment(masks[0]))
self.assertEqual(10, video_tools.get_rate_from_segment(masks[0]))
self.assertEqual(5900, video_tools.get_end_time_from_segment(masks[0]))
while m is not None:
self.assertEquals(c+2, r.current_frame())
c += 1
m = r.read()
if m is not None:
self.assertEqual(255,m[0,0])
self.assertEqual(0, m[101,101])
self.assertEqual(60, c)
r.close()
masks = video_tools.formMaskForSource(sf,w.filename,'test_mask',
startTimeandFrame=(0,60),
stopTimeandFrame=(0,119))
hdf5filename = masks[0]['videosegment']
r = GrayBlockReader(hdf5filename)
m = r.read()
c = 0
self.assertEqual(255, m[0, 0])
self.assertEqual(0, m[101, 101])
self.assertEqual('video', video_tools.get_type_of_segment(masks[0]))
self.assertEqual(60, video_tools.get_start_frame_from_segment(masks[0]))
self.assertEqual(119, video_tools.get_end_frame_from_segment(masks[0]))
self.assertEqual(10, video_tools.get_rate_from_segment(masks[0]))
self.assertEqual(11800, int(video_tools.get_end_time_from_segment(masks[0])))
while m is not None:
self.assertEquals(c + 61, r.current_frame())
c += 1
m = r.read()
if m is not None:
self.assertEqual(255, m[0, 0])
self.assertEqual(0, m[101, 101])
self.assertEqual(60, c)
def test_frame_binding_vfr(self):
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
) # Variable FR
self._add_mask_files_to_kill(result)
self.assertEqual(0.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(803, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(803.0, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(59348.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual('video', video_tools.get_type_of_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1_slow.mov')).getMaskSetForEntireVideo(
) # Constant FR
self._add_mask_files_to_kill(result)
self.assertEqual(0.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(596, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(596, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(59500.0, video_tools.get_end_time_from_segment(result[0]))
self.assertEqual('video', video_tools.get_type_of_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1_slow.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01') # Constant FR
self._add_mask_files_to_kill(result)
self.assertEqual(2000.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(576, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(596, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(59500.0, video_tools.get_end_time_from_segment(result[0]))
self.assertEqual('video', video_tools.get_type_of_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1_swap.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01', end_time='00:00:59.29') # Variable FR, swapped streams, fails to grab all frames
self._add_mask_files_to_kill(result)
self.assertEqual(59221.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(779, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(801, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(1982.0, round(video_tools.get_start_time_from_segment(result[0])))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1_swap.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01') # Variable FR, swapped streams.
self._add_mask_files_to_kill(result)
self.assertEqual(1982.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(803 - 23 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01')
self._add_mask_files_to_kill(result)
self.assertEqual(1982.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(803 - 23 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01:02')
self._add_mask_files_to_kill(result)
self.assertEqual(2123.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(24, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(780, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01',
end_time='00:00:04.01')
self._add_mask_files_to_kill(result)
self.assertEqual(1982.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(3965.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(47, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(47 - 23 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='23',
end_time='29')
self._add_mask_files_to_kill(result)
self.assertEqual(1982.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(23, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(2548.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(29, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(29 - 23 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='29',
end_time='55')
self._add_mask_files_to_kill(result)
self.assertEqual(2548.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(29, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(4532.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(55, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(55 - 29 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='29')
self._add_mask_files_to_kill(result)
self.assertEqual(2548.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(29, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(59348.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(803, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(803 - 29 + 1, video_tools.get_frames_from_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
media_types=['audio'])
self._add_mask_files_to_kill(result)
self.assertEqual(0.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(2617262, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(2617262, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(59348.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual('audio', video_tools.get_type_of_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='1',
media_types=['video'])
self._add_mask_files_to_kill(result)
self.assertEqual(0.0, video_tools.get_start_time_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(803, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(803, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(59348.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual('video', video_tools.get_type_of_segment(result[0]))
result = video_tools.FileMetaDataLocator(self.locateFile('tests/videos/sample1.mov')).getMaskSetForEntireVideo(
start_time='00:00:02.01',
end_time='00:00:04',
media_types=['audio'])
self._add_mask_files_to_kill(result)
self.assertEqual(2010.0, round(video_tools.get_start_time_from_segment(result[0])))
self.assertEqual(88641, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(4000.0, round(video_tools.get_end_time_from_segment(result[0])))
self.assertEqual(176400, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(176400 - 88641 + 1, video_tools.get_frames_from_segment(result[0]))
def test_extract_mask(self):
amount = 30
fileOne = self._init_write_file('test_ts_em1', 2500, 75, 30, 30, maskonly=True)
fileTwo = self._init_write_file('test_ts_em2', 4100, 123, 27, 30, maskonly=True)
sets = []
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
videosegment=fileOne,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=4100,
startframe=123,
endtime=5000,
endframe=149,
frames=int(27),
rate=30,
videosegment=fileTwo,
type='video')
sets.append(change)
reader = tool_set.GrayBlockReader(fileTwo)
c = 0
while c < 3:
expect_mask = reader.read()
c += 1
reader.close()
mask = video_tools.extractMask(sets, 125)
self.assertTrue(np.all(mask == expect_mask))
def test_formMaskDiffForImage(self):
from maskgen.image_wrap import ImageWrapper
fileOne = self._init_write_zip_file('test_ts_fmdfi.png.zip', 20, 30)
test_image = np.random.randint(255, size=(1090, 1920)).astype('uint8')
masks = video_tools.formMaskDiffForImage(fileOne, ImageWrapper(test_image), 'test_ts_fmdfi', 'op')
self.assertEqual(1, len(masks))
mask = masks[0]
self.assertEquals(20, video_tools.get_frames_from_segment(mask))
self.assertEquals(1, video_tools.get_start_frame_from_segment(mask))
self.assertEquals(20, video_tools.get_end_frame_from_segment(mask))
self.assertEquals(0, video_tools.get_start_time_from_segment(mask))
self.assertEquals(666, int(video_tools.get_end_time_from_segment(mask)))
reader = tool_set.GrayBlockReader(video_tools.get_file_from_segment(mask))
count = 0
while True:
diff_mask = reader.read()
if diff_mask is None:
break
self.assertTrue(np.sum(255-diff_mask) > 0)
count += 1
self.assertEqual(20, count)
def test_inverse_intersection_for_mask(self):
amount = 30
fileOne = self._init_write_file('test_ts_em1', 2500, 75, 30, 30, maskonly=True)
sets = []
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
videosegment=fileOne,
type='video')
sets.append(change)
test_mask = np.random.randint(2, size=(1090, 1920)).astype('uint8') * 255
new_sets = video_tools.inverse_intersection_for_mask(test_mask, sets)
reader = tool_set.GrayBlockReader(video_tools.get_file_from_segment(new_sets[0]))
while True:
expect_mask = reader.read()
if expect_mask is None:
break
self.assertTrue(np.all((test_mask.astype('int') - 255-expect_mask.astype('int') <= 0)))
def test_remove_intersection(self):
setOne = []
maskitem = video_tools.create_segment(
starttime=900,
startframe=10,
endtime=2900,
endframe=30,
frames=21,
rate=10,
type='video')
setOne.append(maskitem)
maskitem = video_tools.create_segment(
starttime=4900,
startframe=50,
endtime=6900,
endframe=70,
frames=21,
rate=10,
type='video')
setOne.append(maskitem)
setTwo = []
maskitem = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=500,
endframe=6,
frames=6,
rate=10,
type='video')
setTwo.append(maskitem)
maskitem = video_tools.create_segment(
starttime=800,
startframe=9,
endtime=1400,
endframe=15,
frames=7,
rate=10,
type='video')
setTwo.append(maskitem)
maskitem = video_tools.create_segment(
starttime=2400,
startframe=25,
endtime=3400,
endframe=35,
frames=11,
rate=10,
type='video')
setTwo.append(maskitem)
maskitem = video_tools.create_segment(
starttime=3200,
startframe=44,
endtime=4600,
endframe=47,
frames=4,
rate=10,
type='video')
setTwo.append(maskitem)
maskitem = video_tools.create_segment(
starttime=8900,
startframe=90,
endtime=9400,
endframe=95,
frames=6,
rate=10,
type='video')
setTwo.append(maskitem)
finalsets = video_tools.removeIntersectionOfMaskSets(setOne, setTwo)
self.assertEquals(6, len(finalsets))
self.assertEqual([
{'endframe': 6, 'rate': 10, 'starttime': 0, 'frames': 6, 'startframe': 1, 'endtime': 500,
'type': 'video', 'error':0},
{'endframe': 9, 'rate': 10, 'starttime': 800, 'frames': 1, 'startframe': 9, 'endtime': 800.0,
'type': 'video', 'error':0},
{'endframe': 30, 'rate': 10, 'starttime': 900, 'frames': 21, 'startframe': 10, 'endtime': 2900,
'type': 'video', 'error':0},
{'endframe': 47, 'rate': 10, 'starttime': 3200, 'frames': 4, 'startframe': 44, 'endtime': 4600,
'type': 'video', 'error':0},
{'endframe': 70, 'rate': 10, 'starttime': 4900, 'frames': 21, 'startframe': 50, 'endtime': 6900,
'type': 'video', 'error':0},
{'endframe': 95, 'rate': 10, 'starttime': 8900, 'frames': 6, 'startframe': 90, 'endtime': 9400,
'type': 'video', 'error':0}], finalsets)
def test_before_dropping(self):
amount = 30
fileOne = self._init_write_file('test_ts_bd1', 2500, 75, 30, 30)
fileTwo = self._init_write_file('test_ts_bd2', 4100, 123, 27, 30)
sets = []
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=30,
frames=30,
rate=30,
error=1.1,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
error=1.2,
videosegment=fileOne,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=4100,
startframe=123,
endtime=5000,
endframe=149,
frames=int(27),
rate=30,
error=1.3,
videosegment=fileTwo,
type='video')
sets.append(change)
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 90,
'starttime': 3000,
'endframe': 117,
'endtime': 4000
})], sets)
self._add_mask_files_to_kill(result)
self.assertEqual(3, len(result))
self.assertEqual(15, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(89, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(96, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(122, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 63,
'starttime': 2100,
'endframe': 90,
'endtime': 3000
})], sets)
self._add_mask_files_to_kill(result)
self.assertEqual(3, len(result))
self.assertEqual(15, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(63, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(77, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(96, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(122, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 87,
'starttime': 2900,
'endframe': 93,
'endtime': 3100
})], sets)
self._add_mask_files_to_kill(result)
self.assertEqual(4, len(result))
self.assertEqual(12, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(86, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(12, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(87, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(98, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(117, video_tools.get_start_frame_from_segment(result[3]))
self.assertEqual(143, video_tools.get_end_frame_from_segment(result[3]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[3]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 87,
'starttime': 2900,
'endframe': 93,
'endtime': 3100
})], sets, keepTime=True)
self._add_mask_files_to_kill(result)
self.assertEqual(4, len(result))
self.assertEqual(12, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(12, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(86, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(93, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(104, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(123, video_tools.get_start_frame_from_segment(result[3]))
self.assertEqual(149, video_tools.get_end_frame_from_segment(result[3]))
self.assertEqual(4, len(result))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[3]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 1,
'starttime': 0,
'endframe': 93,
'endtime': 3100
})], sets)
self._add_mask_files_to_kill(result)
self.assertEqual(2, len(result))
self.assertEqual(12, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(12, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(31, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(57, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[1]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 93,
'starttime': 3100
})], sets)
self._add_mask_files_to_kill(result)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(30, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(92, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 1,
'starttime': 0,
'endframe': 93,
'endtime': 3100
})], sets, keepTime=True)
self._add_mask_files_to_kill(result)
self.assertEqual(2, len(result))
self.assertEqual(12, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(93, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(104, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(123, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(149, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[1]))
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 93,
'starttime': 3100,
})], sets, keepTime=True)
self._add_mask_files_to_kill(result)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(30, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(18, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(92, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
def test_before_dropping_nomask(self):
amount = 30
sets = []
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=30,
frames=30,
rate=30,
type='video',
error=1.1)
sets.append(change)
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
type='video',
error=1.2)
sets.append(change)
change = video_tools.create_segment(
starttime=4100,
startframe=123,
endtime=5000,
endframe=149,
frames=int(27),
rate=30,
type='video',
error=1.3)
sets.append(change)
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 87,
'starttime': 2900,
'endframe': 92,
'endtime': 3100
})], sets)
self.assertEqual(4, len(result))
self.assertEqual(12, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(86, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(12, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(87, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(98, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(117, video_tools.get_start_frame_from_segment(result[3]))
self.assertEqual(143, video_tools.get_end_frame_from_segment(result[3]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[3]))
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 63,
'starttime': 2100,
'endframe': 90,
'endtime': 3000
})], sets)
self.assertEqual(3, len(result))
self.assertEqual(14, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(63, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(76, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(95, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(121, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[2]))
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 87,
'starttime': 2900,
'endframe': 93,
'endtime': 3100
})], sets, keepTime=True)
self.assertEqual(4, len(result))
self.assertEqual(12, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(11, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(86, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(94, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(104, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(123, video_tools.get_start_frame_from_segment(result[3]))
self.assertEqual(149, video_tools.get_end_frame_from_segment(result[3]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.3, video_tools.get_error_from_segment(result[3]))
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 1,
'starttime': 0,
'endframe': 93,
'endtime': 3100
})], sets)
self.assertEqual(2, len(result))
self.assertEqual(11, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(11, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(30, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(56, video_tools.get_end_frame_from_segment(result[1]))
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 93,
'starttime': 3100
})], sets)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(30, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(18, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(92, video_tools.get_end_frame_from_segment(result[1]))
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 1,
'starttime': 0,
'endframe': 93,
'endtime': 3100
})], sets, keepTime=True)
self.assertEqual(2, len(result))
self.assertEqual(11, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(94, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(104, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(123, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(149, video_tools.get_end_frame_from_segment(result[1]))
result = video_tools.dropFramesWithoutMask([video_tools.create_segment(**{
'startframe': 93,
'starttime': 3100
})], sets, keepTime=True)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(30, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(18, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(92, video_tools.get_end_frame_from_segment(result[1]))
def after_general_all(self, sets, func):
result = func(
[video_tools.create_segment(**{
'startframe': 180,
'starttime': 6000,
'endframe': 210,
'endtime': 7000
})], sets)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self._add_mask_files_to_kill(result)
result = func([video_tools.create_segment(**{
'startframe': 63,
'starttime': 2100,
'endframe': 90,
'endtime': 3000
})], sets)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(103, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self._add_mask_files_to_kill(result)
result = func([video_tools.create_segment(**{
'startframe': 81,
'starttime': 2700,
'endframe': 111,
'endtime': 3700
})], sets)
self.assertEqual(3, len(result))
self.assertEqual(6, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(24, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(112, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(135, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self._add_mask_files_to_kill(result)
result = func([video_tools.create_segment(**{
'startframe': 1,
'starttime': 0,
'endframe': 63,
'endtime': 2100
})], sets)
self.assertEqual(2, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(30, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(64, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(93, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(138, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(167, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
self._add_mask_files_to_kill(result)
def test_cutCompare(self):
source = 'sample1_ffr.mov'
run_ffmpeg(['-y', '-i', source, '-ss', '00:00:00.00', '-t', '10', 'part1.mov'])
run_ffmpeg(['-y', '-i', source, '-ss', '00:00:12.00', 'part2.mov'])
run_ffmpeg(['-y', '-i', 'part1.mov', '-i', 'part2.mov', '-filter_complex',
'[0:v][0:a][1:v][1:a] concat=n=2:v=1:a=1 [outv] [outa]',
'-map', '[outv]', '-map', '[outa]', 'sample1_cut_full.mov'])
self.filesToKill.append('part1.mov')
self.filesToKill.append('part2.mov')
self.filesToKill.append('sample1_cut_full.mov')
orig_vid = video_tools.FileMetaDataLocator(source).getMaskSetForEntireVideo()
orig_audio = video_tools.FileMetaDataLocator(source).getMaskSetForEntireVideo(media_types=['audio'])
cut_vid = video_tools.FileMetaDataLocator('sample1_cut_full.mov').getMaskSetForEntireVideo()
diff_in_frames = video_tools.get_frames_from_segment(orig_vid[0]) - video_tools.get_frames_from_segment(
cut_vid[0])
maskSet, errors = video_tools.cutCompare(source, 'sample1_cut_full.mov', 'sample1',
tool_set.VidTimeManager(startTimeandFrame=(10000, 0),
stopTimeandFrame=(12000, 0)))
videoSet = [mask for mask in maskSet if video_tools.get_type_of_segment(mask) == 'video']
self.assertEquals(diff_in_frames, video_tools.get_frames_from_segment(videoSet[0]))
audioSet = [mask for mask in maskSet if video_tools.get_type_of_segment(mask) == 'audio']
print(maskSet[0])
print(audioSet[0])
self.assertEqual(1, len(audioSet))
self.assertEqual(865, int(round(video_tools.get_frames_from_segment(audioSet[0]))/100))
self.assertEqual(44, int(video_tools.get_start_frame_from_segment(audioSet[0])/10000))
self.assertEquals(video_tools.get_start_time_from_segment(audioSet[0]),
video_tools.get_start_time_from_segment(maskSet[0]))
self.assertTrue(0.2 > abs(
video_tools.get_end_time_from_segment(audioSet[0]) / 1000.0 - video_tools.get_end_time_from_segment(
maskSet[0]) / 1000.0))
self.assertEqual(44100.0, video_tools.get_rate_from_segment(audioSet[0]))
videoSet = [mask for mask in maskSet if video_tools.get_type_of_segment(mask) == 'video']
self.assertEqual(20, video_tools.get_frames_from_segment(videoSet[0]))
self.assertEqual(101, video_tools.get_start_frame_from_segment(videoSet[0]))
self.assertEqual(120, video_tools.get_end_frame_from_segment(videoSet[0]))
cut_vid = video_tools.FileMetaDataLocator('part1.mov').getMaskSetForEntireVideo()
cut_audio = video_tools.FileMetaDataLocator('part1.mov').getMaskSetForEntireVideo(media_types=['audio'])
diff_in_frames = video_tools.get_frames_from_segment(orig_vid[0]) - video_tools.get_frames_from_segment(
cut_vid[0])
maskSet, errors = video_tools.cutCompare(source, 'part1.mov', 'sample1',
tool_set.VidTimeManager(startTimeandFrame=(12000, 0)))
videoSet = [mask for mask in maskSet if video_tools.get_type_of_segment(mask) == 'video']
self.assertEquals(diff_in_frames, video_tools.get_frames_from_segment(videoSet[0]))
audioSet = [mask for mask in maskSet if video_tools.get_type_of_segment(mask) == 'audio']
diff_in_frames = video_tools.get_frames_from_segment(orig_audio[0]) - video_tools.get_frames_from_segment(
cut_audio[0])
self.assertEqual(1, len(audioSet))
self.assertEquals(diff_in_frames, int(video_tools.get_frames_from_segment(audioSet[0])))
self.assertEqual(int(round(video_tools.get_start_time_from_segment(videoSet[0]))),
int(round(video_tools.get_start_time_from_segment(audioSet[0]))))
self.assertEquals(int(video_tools.get_end_time_from_segment(audioSet[0])),
int((video_tools.get_end_frame_from_segment(audioSet[0])-1)/video_tools.get_rate_from_segment(audioSet[0])*1000.0))
def test_align_streams_meta(self):
meta_and_frames = ([{'codec_type': 'video'}, {'codec_type': 'audio', 'channel_layout': 'mono'}], # normal
[[{'key_frame': 1}], [{'channels': 1}]])
meta, frames = video_tools._align_streams_meta(meta_and_frames, excludeAudio=False)
self.assertTrue(len(meta) == 2 and meta['video']['codec_type'] == 'video')
self.assertTrue(len(frames) == 2 and frames['video'][0]['key_frame'] == 1)
meta, frames = video_tools._align_streams_meta(meta_and_frames, excludeAudio=True) # excludeAudio
self.assertTrue(len(meta) == 1 and len(frames) == 1)
meta_and_frames = ([{'codec_type': 'video'}, {'codec_type': 'audio', 'channel_layout': 'mono'},
# multiple streams of similar type
{'codec_type': 'audio', 'channel_layout': 'mono'}], [])
meta, frames = video_tools._align_streams_meta(meta_and_frames, excludeAudio=False)
self.assertTrue(len(meta) == 3 and meta.has_key('mono1'))
"""
VFR NOT WORKING
source = self.locateFile('tests/videos/sample1.mov')
orig_vid = video_tools.getMaskSetForEntireVideo(source)
video_tools.runffmpeg(
['-y', '-i', source, '-ss', '00:00:00.00', '-t', '10', '-r', str(video_tools.get_rate_from_segment(orig_vid[0])), 'part1.mov'])
video_tools.runffmpeg(
['-y', '-i', source, '-ss', '00:00:12.00', '-r', str(video_tools.get_rate_from_segment(orig_vid[0])), 'part2.mov'])
video_tools.runffmpeg(['-y','-i', 'part1.mov', '-i','part2.mov','-filter_complex',
'[0:v][0:a][1:v][1:a] concat=n=2:v=1:a=1 [outv] [outa]',
'-map','[outv]','-map','[outa]','-r', str(video_tools.get_rate_from_segment(orig_vid[0])),'sample2_cut_full.mov'])
self.filesToKill.append('part1.mov')
self.filesToKill.append('part2.mov')
self.filesToKill.append('sample2_cut_full.mov')
cut_vid = video_tools.getMaskSetForEntireVideo('sample2_cut_full.mov')
diff_in_frames = video_tools.get_frames_from_segment(orig_vid[0]) - video_tools.get_frames_from_segment(cut_vid[0])
maskSet, errors = video_tools.cutCompare(source,'sample2_cut_full.mov','sample1',tool_set.VidTimeManager(startTimeandFrame=(10000,0),
stopTimeandFrame=(11900,0)))
audioSet = [mask for mask in maskSet if type=='audio']
videoSet = [mask for mask in maskSet if type== 'video']
self.assertEquals(diff_in_frames, video_tools.get_frames_from_segment(videoSet[0]))
print(maskSet[0])
print(audioSet[0])
self.assertEqual(1, len(audioSet))
self.assertEqual(85526, video_tools.get_frames_from_segment(audioSet[0]))
self.assertEqual(440339, video_tools.get_start_frame_from_segment(audioSet[0]))
self.assertEqual(440339+85526-1, video_tools.get_end_frame_from_segment(audioSet[0]))
self.assertEquals(video_tools.get_start_time_from_segment(audioSet[0]),video_tools.get_start_time_from_segment(maskSet[0]))
self.assertTrue(0.2 > abs(video_tools.get_end_time_from_segment(audioSet[0])/1000.0-video_tools.get_end_time_from_segment(maskSet[0])/1000.0))
self.assertEqual(44100.0, video_tools.get_rate_from_segment(audioSet[0]))
"""
def test_cut(self):
sets = []
change = video_tools.create_segment(
starttime=3078.1,
startframe=94,
endtime=3111.4,
endframe=95,
frames=2,
rate=30,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=3078.1,
startframe=94,
endtime=3263.4,
endframe=99,
frames=5,
rate=30,
type='video')
result = video_tools.insertFrames([change], sets)
self.assertEqual(100, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(101, video_tools.get_end_frame_from_segment(result[0]))
self.assertAlmostEqual(3296.73, video_tools.get_start_time_from_segment(result[0]), places=2)
self.assertAlmostEqual(3330.03, video_tools.get_end_time_from_segment(result[0]), places=2)
sets = []
change = video_tools.create_segment(
starttime=3078.1,
startframe=94,
endtime=3111.4,
endframe=95,
frames=2,
rate=30,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=3296.7,
startframe=96,
endtime=3296.7,
endframe=96,
frames=2,
rate=30,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=3111.4,
startframe=95,
endtime=3111.4,
endframe=95,
frames=1,
rate=30,
type='video')
result = video_tools.insertFrames([change], sets)
self.assertEqual(94, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(94, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(96, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(96, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(97, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(97, video_tools.get_end_frame_from_segment(result[2]))
self.assertAlmostEqual(3144.73, video_tools.get_start_time_from_segment(result[1]), places=2)
self.assertAlmostEqual(3144.73, video_tools.get_end_time_from_segment(result[1]), places=2)
self.assertAlmostEqual(3330.03, video_tools.get_start_time_from_segment(result[2]), places=2)
self.assertAlmostEqual(3330.03, video_tools.get_end_time_from_segment(result[2]), places=2)
sets = []
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=3111.4,
endframe=95,
frames=2,
rate=30,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=-33.333,
endframe=0,
frames=0,
rate=30,
type='video')
result = video_tools.insertFrames([change], sets)
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(95, video_tools.get_end_frame_from_segment(result[0]))
self.assertAlmostEqual(3111.40, video_tools.get_end_time_from_segment(result[0]), places=2)
self.assertAlmostEqual(0, video_tools.get_start_time_from_segment(result[0]), places=2)
def test_after_dropping(self):
amount = 30
fileOne = self._init_write_file('test_ts_bd1', 0, 1, 30, 30)
fileTwo = self._init_write_file('test_ts_bd2', 2500, 75, 30, 30)
sets = []
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=amount,
frames=amount,
rate=30,
type='video',
error=1.1,
videosegment=fileOne)
sets.append(change)
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
error=1.2,
type='video',
videosegment=fileTwo)
sets.append(change)
self.after_general_all(sets, video_tools.insertFrames)
def test_resize(self):
fileOne = self._init_write_file('test_td_rs', 0, 1, 30, 30)
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=30,
frames=30,
rate=29,
type='video',
error=1.1,
videosegment=fileOne)
result = video_tools.resizeMask([change], (1000, 1720))
self.assertEqual(1, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
def test_crop(self):
fileOne = self._init_write_file('test_td_rs', 0, 1, 30, 30)
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=30,
frames=30,
rate=29,
type='video',
error=1.1,
videosegment=fileOne)
result = video_tools.cropMask([change], (100, 100, 900, 1120))
self.assertEqual(1, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self._add_mask_files_to_kill(result)
result = video_tools.insertMask([change], (100, 100, 900, 1120), (1090, 1920))
self.assertEqual(1, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self._add_mask_files_to_kill(result)
def test_rotate(self):
fileOne = self._init_write_file('test_td_rs', 0, 1, 30, 30)
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=30,
frames=30,
rate=29,
error=1.1,
type='video',
videosegment=fileOne)
result = video_tools.rotateMask(-90, [change], expectedDims=(1920, 1090))
self.assertEqual(1, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self._add_mask_files_to_kill(result)
def test_reverse(self):
result = video_tools.reverseMasks([video_tools.create_segment(**{
'startframe': 1,
'starttime': 0,
'endframe': 130,
'error': 1.1,
'endtime': 4333,
'type': 'video'
})], [video_tools.create_segment(**{'starttime': 0,
'startframe': 0,
'endframe': 130,
'error': 1.1,
'endtime': 4333,
'type': 'video'})])
self.assertEqual(1, len(result))
self.assertEqual(4333, video_tools.get_end_time_from_segment(result[0]))
self.assertEqual(130, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
amount = 30
fileOne = self._init_write_file('test_tr1', 2500, 75, 30, 30)
fileTwo = self._init_write_file('test_tr2', 4100, 123, 30, 27)
sets = []
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
videosegment=fileOne,
type='video',
error=1.1)
sets.append(change)
change = video_tools.create_segment(
starttime=4100,
startframe=123,
endtime=5000,
endframe=149,
frames=int(27),
rate=30,
videosegment=fileTwo,
type='video',
error=1.2)
sets.append(change)
result = video_tools.reverseMasks([video_tools.create_segment(**{
'startframe': 90,
'starttime': 3000,
'endframe': 130,
'endtime': 4333,
'type': 'video'
})], sets)
self.assertEqual(4, len(result))
self.assertEqual(15, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(89, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(15, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(130, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(116, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(8, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(97, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(90, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(19, video_tools.get_frames_from_segment(result[3]))
self.assertEqual(149, video_tools.get_end_frame_from_segment(result[3]))
self.assertEqual(131, video_tools.get_start_frame_from_segment(result[3]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[3]))
self._add_mask_files_to_kill(result)
reader_orig = tool_set.GrayBlockReader(video_tools.get_file_from_segment(sets[0]))
reader_new = tool_set.GrayBlockReader(video_tools.get_file_from_segment(result[0]))
c = 0
while c < 16:
orig_mask = reader_orig.read()
if orig_mask is None:
break
new_mask = reader_new.read()
if new_mask is None:
break
is_equal = np.all(orig_mask == new_mask)
c+=1
self.assertTrue(is_equal)
reader_new.close()
reader_new = tool_set.GrayBlockReader(video_tools.get_file_from_segment(result[1]))
reader_new.close()
reader_new = tool_set.GrayBlockReader(video_tools.get_file_from_segment(result[2]))
reader_new.close()
reader_new = tool_set.GrayBlockReader(video_tools.get_file_from_segment(result[3]))
reader_new.close()
reader_orig.close()
for item in sets:
item.pop('videosegment')
result = video_tools.reverseMasks([video_tools.create_segment(**{
'startframe': 90,
'starttime': 3000,
'endframe': 130,
'endtime': 4333,
'type': 'video'
})], sets)
self.assertEqual(4, len(result))
self.assertEqual(15, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(89, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(15, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(130, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(116, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(8, video_tools.get_frames_from_segment(result[2]))
self.assertEqual(97, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(90, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(19, video_tools.get_frames_from_segment(result[3]))
self.assertEqual(149, video_tools.get_end_frame_from_segment(result[3]))
self.assertEqual(131, video_tools.get_start_frame_from_segment(result[3]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[1]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[3]))
self._add_mask_files_to_kill(result)
def test_invertVideoMasks(self):
start_set = []
fileOne = self._init_write_file('test_iv_rs', 0, 1, 30, 30, mask_set=start_set)
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=30,
frames=30,
rate=29,
error=1.1,
type='video',
videosegment=fileOne)
result = video_tools.invertVideoMasks([change], 'x', 'y')
self.assertEqual(1, len(result))
self.assertEqual(30, video_tools.get_frames_from_segment(result[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
reader = tool_set.GrayBlockReader(video_tools.get_file_from_segment(result[0]))
self._add_mask_files_to_kill(result)
mask = reader.read()
self.assertEqual(2, reader.current_frame())
self.assertEqual(33, reader.current_frame_time())
self.assertTrue(np.all(255 - mask == start_set[0]))
reader.close()
def _compose_zip_files(self, mod_functions):
return self._init_write_zip_files('test_td_zip_rs', mod_functions)
def _compose_video_files(self, mod_functions):
return self._init_write_video_file('test_td_rs', mod_functions)
def test_zip_mods(self):
self._test_all_mods(self._compose_zip_files, 3300)
def test_vid_mods(self):
self._test_all_mods(self._compose_video_files, None)
def _test_all_mods(self, composer, endTime):
from maskgen.video_tools import FileMetaDataLocator, get_end_time_from_segment
mod_functions = [sameForTest, cropForTest, noiseForTest, addForTest, changeForTest]
fileOne, modFiles = composer(mod_functions)
maskSet = FileMetaDataLocator(fileOne).getMaskSetForEntireVideoForTuples()
if endTime is not None:
self.assertEquals(3300.00, get_end_time_from_segment(maskSet[0]))
analysis = {}
result_same, errors = video_tools.formMaskDiff(fileOne,
modFiles[0],
modFiles[0],
'AddNoise',
startSegment=None,
endSegment=None,
analysis=analysis,
alternateFunction=video_tools.detectCompare,
arguments={})
self.assertEqual(0, len(result_same))
analysis = {}
result_add, errors = video_tools.formMaskDiff(fileOne,
modFiles[4],
modFiles[4],
'PasteFrames',
startSegment=None,
endSegment=None,
analysis=analysis,
alternateFunction=video_tools.pasteCompare,
arguments={'add type': 'replace'})
self.assertEqual(1, len(result_add))
self.assertEqual(20, video_tools.get_start_frame_from_segment(result_add[0]))
self.assertEqual(39, video_tools.get_end_frame_from_segment(result_add[0]))
self.assertEqual(1266, int(video_tools.get_end_time_from_segment(result_add[0])))
result_add, errors = video_tools.formMaskDiff(fileOne,
modFiles[4],
modFiles[4],
'PasteFrames',
startSegment=None,
endSegment="39",
analysis=analysis,
alternateFunction=video_tools.pasteCompare,
arguments={'add type': 'replace'})
self.assertEqual(1, len(result_add))
self.assertEqual(20, video_tools.get_start_frame_from_segment(result_add[0]))
self.assertEqual(39, video_tools.get_end_frame_from_segment(result_add[0]))
self.assertEqual(1266, int(video_tools.get_end_time_from_segment(result_add[0])))
result_add, errors = video_tools.formMaskDiff(fileOne,
modFiles[3],
modFiles[3],
'PasteFrames',
startSegment=None,
endSegment=None,
analysis=analysis,
alternateFunction=video_tools.pasteCompare,
arguments={'add type': 'insert'})
self.assertEqual(1, len(result_add))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result_add[0]))
self.assertEqual(39, video_tools.get_end_frame_from_segment(result_add[0]))
self.assertEqual(19, video_tools.get_frames_from_segment(result_add[0]))
result_add, errors = video_tools.formMaskDiff(fileOne,
modFiles[3],
modFiles[3],
'PasteFrames',
startSegment=None,
endSegment="39",
analysis=analysis,
alternateFunction=video_tools.pasteCompare,
arguments={'add type': 'insert'})
self.assertEqual(1, len(result_add))
self.assertEqual(21, video_tools.get_start_frame_from_segment(result_add[0]))
self.assertEqual(39, video_tools.get_end_frame_from_segment(result_add[0]))
self.assertEqual(19, video_tools.get_frames_from_segment(result_add[0]))
result_crop, errors = video_tools.formMaskDiff(fileOne,
modFiles[1],
modFiles[1],
'TransformCrop',
startSegment=None,
endSegment=None,
analysis=analysis,
alternateFunction=video_tools.cropCompare,
arguments={})
self.assertEqual(1, len(result_crop))
self.assertEqual(100, video_tools.get_frames_from_segment(result_crop[0]))
self.assertEqual(1, video_tools.get_start_frame_from_segment(result_crop[0]))
self.assertTrue(analysis['location'].find('100, 100') > 0)
result_noise1, errors = video_tools.formMaskDiff(fileOne,
modFiles[2],
modFiles[2],
'AddNoise',
startSegment=None,
endSegment=None,
analysis=analysis,
arguments={'aggregate':'sum',
'minimum threshold':1,
'smoothing': 0})
self.assertTrue(len(result_noise1) >= 1)
self.assertEqual(
video_tools.get_end_frame_from_segment(result_noise1[0]) - video_tools.get_start_frame_from_segment(
result_noise1[0]) + 1, video_tools.get_frames_from_segment(result_noise1[0]))
self.assertEqual(20, video_tools.get_start_frame_from_segment(result_noise1[0]))
self.assertEqual(80, video_tools.get_end_frame_from_segment(result_noise1[0]))
result_noise2, errors = video_tools.formMaskDiff(fileOne,
modFiles[2],
modFiles[2],
'AddNoise',
startSegment=None,
endSegment=None,
analysis=analysis,
alternateFunction=video_tools.detectCompare,
arguments={'aggregate': 'sum',
'minimum threshold': 1,
'smoothing': 0})
self.assertTrue(len(result_noise2) >= 1)
self.assertEqual(
video_tools.get_end_frame_from_segment(result_noise2[0]) - video_tools.get_start_frame_from_segment(
result_noise2[0]) + 1, video_tools.get_frames_from_segment(result_noise2[0]))
self.assertEqual(20, video_tools.get_start_frame_from_segment(result_noise1[0]))
self.assertEqual(80, video_tools.get_end_frame_from_segment(result_noise1[0]))
def testMaskSet(self):
source = self.locateFile('tests/videos/sample1.mov')
source_set1 = video_tools.FileMetaDataLocator(source).getMaskSetForEntireVideo(
start_time='29', end_time='55')
source_set2 = video_tools.FileMetaDataLocator(source).getMaskSetForEntireVideo(
start_time='29', end_time='55')
self.assertEqual(len(source_set1), len(source_set2))
for i in range(len(source_set1)):
if 'mask' in source_set1[i]:
source_set1[i].pop('mask')
if 'mask' in source_set2[i]:
source_set2[i].pop('mask')
self.assertEquals(source_set1[i], source_set2[i])
def test_lossy(self):
self.assertFalse(video_tools.is_raw_or_lossy_compressed(self.locateFile('tests/videos/sample1.mov')))
video_tools.x264(self.locateFile('tests/videos/sample1.mov'), 'sample1_ffr_3.mp4')
self.files_to_remove.append('sample1_ffr_3.mp4')
self.assertTrue(video_tools.is_raw_or_lossy_compressed('sample1_ffr_3.mp3'))
def test_rgb_compress(self):
self.assertFalse(video_tools.is_raw_or_lossy_compressed(self.locateFile('tests/videos/sample1.mov')))
video_tools.x264rgb(self.locateFile('tests/videos/sample1.mov'), 'sample1_ffr_rgb.mp4')
self.files_to_remove.append('sample1_ffr_rgb.mp4')
self.assertTrue(video_tools.is_raw_or_lossy_compressed('sample1_ffr_rgb.mp3'))
r,e = video_tools.detectCompare(self.locateFile('tests/videos/sample1.mov'),
'sample1_ffr_rgb.mp4',
'sample1_ffr_rgb',
tool_set.VidTimeManager())
self.assertEquals(1,len(r))
self.assertEquals(803,video_tools.get_frames_from_segment(r[0]))
def test_lag(self):
o = video_tools.x264rgb(self.locateFile('tests/videos/LagarithExample.avi'),
'sample1_ffr_rgb.mp4',
force=False)
self.assertEquals(os.path.basename(o),'LagarithExample.avi')
r, e = video_tools.detectCompare(self.locateFile('tests/videos/LagarithExample.avi'),
self.locateFile('tests/videos/original.mp4'),
'lag_compare',
tool_set.VidTimeManager())
self.assertEquals(1, len(r))
self.assertEquals(59, video_tools.get_frames_from_segment(r[0]))
def testMetaDiff(self):
from maskgen.support import getValue
meta_diff = video_tools.form_meta_data_diff(self.locateFile('tests/videos/sample1.mov'),
self.locateFile('tests/videos/sample1_slow_swap.mov'),
media_types=['video'])
self.assertTrue('nb_frames' in getValue({'metadatadiff': meta_diff}, 'metadatadiff.video', {}))
self.assertTrue(meta_diff['video']['duration_ts'] == ('change', '35610', '610304'))
meta_diff = video_tools.form_meta_data_diff(self.locateFile('tests/videos/sample1.mov'),
self.locateFile('tests/videos/sample1_slow_swap.mov'),
media_types=['video', 'audio'])
self.assertTrue(meta_diff['stereo']['bit_rate'] == ('change', '126228', '128273'))
self.assertEquals(meta_diff['video']['bit_rate'] ,('change', '2245468', '1333223'))
meta_diff = video_tools.form_meta_data_diff(self.locateFile('tests/videos/sample1.mov'),
self.locateFile('tests/videos/sample1_slow_swap.mov'),
media_types=['audio'])
self.assertTrue(meta_diff['stereo']['nb_frames'] == ('change', '2563', '2558'))
def test_buf_to_int(self):
stream = np.random.randint(-1000,1000,128,dtype=np.int16)
self.assertTrue(np.all(stream == video_tools.buf_to_int(stream.tostring(),2)))
def test_audio_reader(self):
video_tools.audioWrite('test_tat.0.0.wav', 8192*1024)
self.filesToKill.append('test_tat.0.0.wav')
c1 = video_tools.AudioReader('test_tat.0.0.wav','all',block=8192)
block = c1.getBlock(10000,128)
c1.close()
c1 = video_tools.AudioReader('test_tat.0.0.wav','all', block=8192)
position = c1.findBlock(block, 0)
self.assertIsNotNone(position)
self.assertEqual(10000,position[0])
c1.close()
block = block[1::2]
c1 = video_tools.AudioReader('test_tat.0.0.wav', 'right', block=8192)
position = c1.findBlock(block, 0)
self.assertIsNotNone(position)
self.assertEqual(10000, position[0])
c1.close()
video_tools.audioWrite('test_tat1.0.0.wav', 8192 * 10,channels=1)
self.filesToKill.append('test_tat1.0.0.wav')
c1 = video_tools.AudioReader('test_tat1.0.0.wav', 'all', block=8192)
block = c1.getBlock(10000, 128)
c1.close()
c1 = video_tools.AudioReader('test_tat1.0.0.wav', 'all', block=8192)
position = c1.findBlock(block, 0)
self.assertIsNotNone(position)
self.assertEqual(10000, position[0])
c1.close()
import wave
wf = wave.open('test_tat2.0.0.wav', 'wb')
wf.setparams((1, 2, 44100, 0, 'NONE', 'not compressed'))
value = np.random.randint(-32767, 32767, 1024*1024, dtype=np.int16)
packed_value = value.tobytes()
wf.writeframesraw(packed_value)
wf.close()
self.filesToKill.append('test_tat2.0.0.wav')
wf = wave.open('test_tat3.0.0.wav', 'wb')
wf.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))
value1 = np.random.randint(-32767, 32767, 2*1024 * 1024, dtype=np.int16)
value1[0:40000:2] = value[0:20000]
value1[46000::2] = value[23000:]
packed_value = value1.tobytes()
wf.writeframesraw(packed_value)
wf.close()
self.filesToKill.append('test_tat3.0.0.wav')
c1 = video_tools.AudioReader('test_tat2.0.0.wav', 'all', block=8192)
c2 = video_tools.AudioReader('test_tat3.0.0.wav', 'left', block=8192)
self.assertIsNone(c1.compareToOtherReader(c2, min_threshold=0))
c1.nextBlock()
c2.nextBlock()
c1.nextBlock()
c2.nextBlock()
self.assertEquals((20000,23000-1), c1.compareToOtherReader(c2, min_threshold=0))
c1.close()
c2.close()
wf = wave.open('test_tat4.0.0.wav', 'wb')
wf.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))
value2 = np.random.randint(-32767, 32767, 2 * 1024 * 1024, dtype=np.int16)
value2[0:40000] = value1[0:40000]
value2[46000:] = value1[46000:]
packed_value = value2.tobytes()
wf.writeframesraw(packed_value)
wf.close()
c1 = video_tools.AudioReader('test_tat3.0.0.wav', 'all', block=8192)
c2 = video_tools.AudioReader('test_tat4.0.0.wav', 'all', block=8192)
self.assertIsNone(c1.compareToOtherReader(c2, min_threshold=0))
c1.nextBlock()
c2.nextBlock()
c1.nextBlock()
c2.nextBlock()
self.assertEquals((20000, 23000 - 1), c1.compareToOtherReader(c2, min_threshold=0))
c1.close()
c2.close()
c1 = video_tools.AudioReader('test_tat3.0.0.wav', 'right', block=8192)
c2 = video_tools.AudioReader('test_tat4.0.0.wav', 'right', block=8192)
self.assertIsNone(c1.compareToOtherReader(c2, min_threshold=0))
c1.nextBlock()
c2.nextBlock()
c1.nextBlock()
c2.nextBlock()
self.assertEquals((20000, 23000 - 1), c1.compareToOtherReader(c2, min_threshold=0))
c1.close()
c2.close()
wf = wave.open('test_tat5.0.0.wav', 'wb')
wf.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))
value3 = np.random.randint(-32767, 32767, 2 * 1024 * 1024, dtype=np.int16)
value3[0:40000:2] = value2[1:40000:2]
value3[46000::2] = value2[46001::2]
packed_value = value3.tobytes()
wf.writeframesraw(packed_value)
wf.close()
c1 = video_tools.AudioReader('test_tat4.0.0.wav', 'right', block=8192)
c2 = video_tools.AudioReader('test_tat5.0.0.wav', 'left', block=8192)
self.assertIsNone(c1.compareToOtherReader(c2, min_threshold=0))
c1.nextBlock()
c2.nextBlock()
c1.nextBlock()
c2.nextBlock()
self.assertEquals((20000, 23000 - 1), c1.compareToOtherReader(c2, min_threshold=0))
c1.close()
c2.close()
def test_intersection(self):
amount = 30
sets = []
change = video_tools.create_segment(
starttime=0,
startframe=1,
endtime=1000,
endframe=amount,
frames=amount,
rate=30,
error=1.1,
type='video')
sets.append(change)
change = video_tools.create_segment(
starttime=2500,
startframe=75,
endtime=3500,
endframe=75 + amount - 1,
frames=amount,
rate=30,
error=1.2,
type='video')
sets.append(change)
result = video_tools.dropFramesFromMask([video_tools.create_segment(**{
'startframe': 90,
'starttime': 3000,
'endframe': 100,
'endtime': 4000
})], sets)
self._add_mask_files_to_kill(result)
self.assertEqual(3, len(result))
self.assertEqual(15, video_tools.get_frames_from_segment(result[1]))
self.assertEqual(75, video_tools.get_start_frame_from_segment(result[1]))
self.assertEqual(89, video_tools.get_end_frame_from_segment(result[1]))
self.assertEqual(90, video_tools.get_start_frame_from_segment(result[2]))
self.assertEqual(93, video_tools.get_end_frame_from_segment(result[2]))
self.assertEqual(1.1, video_tools.get_error_from_segment(result[0]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[2]))
self.assertEqual(1.2, video_tools.get_error_from_segment(result[1]))
def testAudio(self):
from maskgen.tool_set import VidTimeManager
video_tools.audioWrite('test_ta.0.0.wav', 512)
self.filesToKill.append('test_ta2.0.0.wav')
self.filesToKill.append('test_ta.0.0.wav')
self.filesToKill.append('test_ta3.0.0.wav')
self.filesToKill.append('test_ta4.0.0.wav')
self.filesToKill.append('test_ta5.0.0.wav')
self.filesToKill.append('test_ta6.0.0.wav')
augmentAudio('test_ta.0.0.wav', 'test_ta2.0.0.wav', addNoise)
augmentAudio('test_ta.0.0.wav', 'test_ta3.0.0.wav', sampleFrames)
singleChannelSample('test_ta.0.0.wav', 'test_ta4.0.0.wav')
singleChannelSample('test_ta.0.0.wav', 'test_ta5.0.0.wav', skip=1)
insertAudio('test_ta.0.0.wav', 'test_ta6.0.0.wav', pos=28, length=6)
deleteAudio('test_ta.0.0.wav', 'test_ta7.0.0.wav', pos=28, length=6)
result, errors = video_tools.audioDeleteCompare('test_ta.0.0.wav', 'test_ta7.0.0.wav', 'test_ta_del', VidTimeManager())
self.assertEqual(1, len(result))
self.assertEqual(113, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(136, video_tools.get_end_frame_from_segment(result[0]))
result, errors = video_tools.audioInsert('test_ta.0.0.wav', 'test_ta6.0.0.wav', 'test_ta_c', VidTimeManager())
self.assertEqual(1, len(result))
self.assertEqual(29, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(40, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
result, errors = video_tools.audioCompare('test_ta.0.0.wav', 'test_ta2.0.0.wav', 'test_ta_c', VidTimeManager())
self.assertEqual(1, len(result))
self.assertEqual(7, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(255, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
result, errors = video_tools.audioSample('test_ta.0.0.wav', 'test_ta3.0.0.wav', 'test_ta_s1', VidTimeManager(startTimeandFrame=(0,7)))
self.assertEqual(1, len(result))
self.assertEqual(7, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(48, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
result, errors = video_tools.audioSample('test_ta.0.0.wav', 'test_ta3.0.0.wav', 'test_ta_s1',
VidTimeManager(startTimeandFrame=(0, 0)))
self.assertEqual(1, len(result))
self.assertEqual(7, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(48, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
result, errors = video_tools.audioSample('test_ta.0.0.wav', 'test_ta4.0.0.wav', 'test_ta_s2', VidTimeManager(startTimeandFrame=(0,3)))
self.assertEqual(1, len(result))
self.assertEqual(4, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(24, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
result, errors = video_tools.audioSample('test_ta.0.0.wav', 'test_ta4.0.0.wav', 'test_ta_s2',
VidTimeManager(startTimeandFrame=(0, 0)))
self.assertEqual(1, len(result))
self.assertEqual(4, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(24, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
result, errors = video_tools.audioSample('test_ta.0.0.wav', 'test_ta5.0.0.wav', 'test_ta_s3', VidTimeManager(),
arguments={'Copy Stream': 'right'})
self.assertEqual(1, len(result))
self.assertEqual(4, video_tools.get_start_frame_from_segment(result[0]))
self.assertEqual(24, video_tools.get_end_frame_from_segment(result[0]))
self.assertEqual(video_tools.get_end_frame_from_segment(result[0]),
video_tools.get_start_frame_from_segment(result[0]) + video_tools.get_frames_from_segment(
result[0]) - 1)
def tearDown(self):
for f in set(self.filesToKill):
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/validation/test_validation_api.py
```python
import unittest
from maskgen.validation.core import *
from tests.test_support import TestSupport
from maskgen.scenario_model import ImageProjectModel
from maskgen.maskgen_loader import MaskGenLoader
from mock import Mock
from maskgen.software_loader import insertCustomRule
class MockImageGraph:
def __init__(self,name,nodes={}):
self.name = name
self.nodes= nodes
self.api_validated_node = None
def get_name(self):
return self.name
def get_node(self,name):
return self.nodes[name]
def setDataItem(self,item,checked_nodes,excludeUpdate=True):
if item == 'api_validated_node':
self.api_validated_node = checked_nodes
def getDataItem(self,item,default_value):
if item != 'api_validated_node':
raise ValueError('Unexpected item {}'.format(item))
return default_value
class MockValidationAPI(ValidationAPI):
def __init__(self,preferences):
self.testid = preferences['test.id']
self.external = preferences['test.external']
self.configured = preferences['test.configured']
def isConfigured(self):
"""
:return: return true if validator is configured an usable
@rtype: bool
"""
return self.configured
def isExternal(self):
return self.external
def check_edge(self, op, graph, frm, to):
"""
:param op: Operation structure
:param graph: image graph
:param frm: edge source
:param to: edge target
:return:
@type op: Operation
@type graph: ImageGraph
@type frm: str
@type to: str
"""
return [self.testid]
def check_node(self, node, graph):
"""
:param node: node id
:param graph: image graph
:return:
@type node: str
@type graph: ImageGraph
"""
return [self.testid]
def test(self):
"""
:return: Error message if system is not configured properly otherwise None
@rtype: str
"""
return self.testid
def get_journal_exporttime(self, journalname):
"""
:param journalname: name of the journal
:return: export time of journal
@type journalname: str
@rtype: str
"""
return self.testid
def test_rule_donor(op,graph,frm,to):
return (Severity.ERROR, 'donor')
def test_rule_not_donor(op, graph, frm, to):
return (Severity.ERROR,'not donor')
class TestValidationAPI(TestSupport):
loader = MaskGenLoader()
def setUp(self):
self.loader.load()
def test_configure(self):
preferences = {}
setValidators(preferences,[MockValidationAPI])
c = ValidationAPIComposite({'test.configured': True,
'test.external': False,
'test.id': 'configured',
})
self.assertTrue(c.isConfigured())
c = ValidationAPIComposite({'test.configured': False,
'test.external': False,
'test.id': 'configured',
})
self.assertFalse(c.isConfigured())
def test_external(self):
preferences = {}
setValidators(preferences, [MockValidationAPI])
c = ValidationAPIComposite({'test.configured': True,
'test.external': True,
'test.id': 'external',
},external=True)
self.assertTrue(c.isExternal())
c = ValidationAPIComposite({'test.configured': True,
'test.external': False,
'test.id': 'external',
})
self.assertFalse(c.isExternal())
def test_functions(self):
preferences = {}
setValidators(preferences, [MockValidationAPI])
c = ValidationAPIComposite({'test.configured': True,
'test.external': False,
'test.id': 'functions',
})
self.assertEquals(['functions'], c.check_node(None,None))
self.assertEquals(['functions'], c.check_edge(None,None,None,None))
self.assertEquals('functions', c.get_journal_exporttime(None))
self.assertEquals('functions', c.test())
def test_journal(self):
model = ImageProjectModel(self.locateFile('images/sample.json'))
results = model.validate(external=False)
def test_designation(self):
from maskgen.software_loader import Operation
opManager = Mock()
insertCustomRule('test_rule_donor',test_rule_donor)
insertCustomRule('test_rule_not_donor', test_rule_not_donor)
operation = Operation('test', category='Test', includeInMask=False,
rules={'donor:test_rule_donor','test_rule_not_donor'},
optionalparameters={},
mandatoryparameters={},
description='test',
generateMask='all',
analysisOperations=[],
transitions=[],
compareparameters={})
opManager.getAllOperations = Mock(return_value={'test':operation})
opManager.getOperationWithGroups = Mock(return_value=operation)
graph = Mock()
graph.get_edge = Mock(return_value={'op':'test',
'username':'test',
'arguments': {},
'metadatadiff': {}})
graph.get_image = Mock(return_value=(0,self.locateFile('videos/sample1.mov')))
validator = Validator(self.loader,opManager)
results = validator.run_edge_rules(graph,'a','b', isolated=True)
self.assertEqual(0, len([r for r in results if r.Module == 'test_rule_donor']))
self.assertEqual(1, len([r for r in results if r.Module == 'test_rule_not_donor']))
results = validator.run_edge_rules(graph, 'a', 'b', isolated=False)
self.assertEqual(1, len([r for r in results if r.Module == 'test_rule_donor']))
self.assertEqual(1, len([r for r in results if r.Module == 'test_rule_not_donor']))
def test_browser_api(self):
from datetime import datetime
from maskgen.validation.browser_api import ValidationBrowserAPI
setValidators(self.loader,[ValidationBrowserAPI])
c = ValidationAPIComposite(preferences=self.loader,external=True)
self.assertEquals(None,c.test())
timeresult = c.get_journal_exporttime('023aeac56841a5961648798dfd491b16')
datetime.strptime(timeresult, '%Y-%m-%d %H:%M:%S')
graph = MockImageGraph('foo',
nodes={'023aeac56841a5961648798dfd491b16': {
'file':'023aeac56841a5961648798dfd491b16.jpg','nodetype':'base'},
'fdf9dfdsif': {
'file': 'fdf9dfdsif.jpg', 'nodetype': 'base'},
'06555b4024bf35fcda3705c34726f560': {
'file': '06555b4024bf35fcda3705c34726f560.jpg', 'nodetype': 'final'}
})
result = c.check_node('023aeac56841a5961648798dfd491b16',graph)
self.assertTrue(result is None or len(result) == 0)
result = c.check_node('fdf9dfdsif', graph)
self.assertTrue('Cannot find base media file fdf9dfdsif.jpg in the remote system' in result[0][3])
result = c.check_node('06555b4024bf35fcda3705c34726f560',graph)
self.assertTrue('Final media node 06555b4024bf35fcda3705c34726f560.jpg used in journal 0e0a5952531104c7c21a53760403f051'
in result[0][3])
def test_support_functions(self):
messages = removeErrorMessages([
ValidationMessage(Severity.ERROR,'','', 'big','mod1'),
ValidationMessage(Severity.ERROR, '', '', 'bad','mod1'),
ValidationMessage(Severity.ERROR, '', '', 'wolf','mod1')
],lambda x : x == 'big')
self.assertTrue(hasErrorMessages(messages,lambda x: x == 'bad'))
self.assertTrue(hasErrorMessages(messages, lambda x: x == 'wolf'))
messages = removeErrorMessages([
ValidationMessage(Severity.WARNING, '', '', 'big', 'mod1'),
ValidationMessage(Severity.WARNING, '', '', 'bad', 'mod1'),
ValidationMessage(Severity.WARNING, '', '', 'wolf', 'mod1')
], lambda x: x == 'big')
self.assertFalse(hasErrorMessages(messages, lambda x: x == 'bad'))
self.assertFalse(hasErrorMessages(messages, lambda x: x == 'wolf'))
if __name__ == '__main__':
unittest.main()
```
#### File: nitf_wrapper/tests/test_wrapper.py
```python
import unittest
from nitf_wrapper import opener
class TestToolSet(unittest.TestCase):
def __init__(self,methodName='runTest'):
unittest.TestCase.__init__(self,methodName=methodName)
def test_opener(self):
opener.openNTFFile('2aa5cdc0272a4b299f0c1318b04867d3.ntf')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhm-/nhlscrappo",
"score": 2
} |
#### File: nhlscrappo/nhlscrappo/fetcher.py
```python
import random
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
import nhlscrappo.constants as C
from nhlscrappo import GameType, ReportType
class ReportFetcher(object):
"""Responsible for fetching and validating the report fields"""
__docroot = "http://www.nhl.com/"
def __init__(self, season, game_num, game_type, report_type):
self.season = season
self.game_num = game_num
self.game_type = game_type
self.report_type = report_type
self.soup = None
def __random_user_agent(self):
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, " \
"like Gecko) Chrome/22.0.1207.1 Safari/537.1", \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 " \
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "\
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like " \
"Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, " \
"like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like " \
"Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like " \
"Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, " \
"like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1063.0 Safari/536.3",\
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3" \
" (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, " \
"like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, " \
"like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like " \
"Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like " \
"Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, " \
"like Gecko) Chrome/19.0.1055.1 Safari/535.24"]
return random.choice(user_agent_list)
def __load_html(self, url):
if "http://" in url:
req = Request(url, headers = {
"User-Agent": self.__random_user_agent(), \
"Accept": "text/html,application/xhtml+xml,application/" \
"xml;q=0.9,*/*;q=0.8", \
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3", \
"Accept-Encoding": "none", \
"Accept-Language": "en-US,en;q=0.8", \
"Connection": "keep-alive"})
with urlopen(req) as handle:
html = handle.read()
handle.close()
return BeautifulSoup(html.decode("utf-8", "lxml"))
else:
with open(url, "r") as handle:
html = handle.read()
handle.close()
return BeautifulSoup(html, features="lxml")
def make_soup(self, local = None):
if local:
self.soup = self.__load_html(local)
else:
url = self.__docroot + "scores/htmlreports/" + str(self.season) + \
str(self.season + 1) + "/" + self.report_type.value + "0" + \
str(self.game_type.value) + ("%04i" % self.game_num) + ".HTM"
self.soup = self.__load_html(url)
return self.soup
@property
def season(self):
return self._season
@season.setter
def season(self, value):
if not isinstance(value, int):
raise TypeError("season must be of type int")
if value < C.MIN_SEASON or value > C.MAX_SEASON:
raise ValueError("Only seasons starting from " + \
str(C.MIN_SEASON) + " until " + str(C.MAX_SEASON) + \
" are supported")
self._season = int(value)
@property
def game_num(self):
return self._game_num
@game_num.setter
def game_num(self, value):
if not isinstance(value, int):
raise TypeError("game_num must be of type int")
self._game_num = value
@property
def game_type(self):
return self._game_type
@game_type.setter
def game_type(self, value):
if value in GameType:
self._game_type = value
else:
raise TypeError("game_type must be of type GameType")
@property
def report_type(self):
return self._report_type
@report_type.setter
def report_type(self, value):
if value in ReportType:
self._report_type = value
else:
raise TypeError("report_type must be of type ReportType")
@property
def soup(self):
return self._soup
@soup.setter
def soup(self, value):
if value is not None and not isinstance(value, BeautifulSoup):
raise TypeError("soup must be of type BeautifulSoup")
self._soup = value
```
#### File: jhm-/nhlscrappo/setup.py
```python
from nhlscrappo import __version__
from distutils.core import setup
from setuptools import find_packages
def _read(file):
return open(file, 'rb').read()
setup(name="nhlscrappo",
version=__version__,
description="Web scraping API for NHL.com Real Time Shot System (RTSS) reports",
long_description=_read('README.md').decode('utf-8'),
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="https://github.com/jhm-/nhlscrappo",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=['lxml', 'beautifulsoup4']
)
``` |
{
"source": "jhm-/nhl-workbook",
"score": 2
} |
#### File: jhm-/nhl-workbook/populate_psql.py
```python
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from nhlscrappo import GameType, ReportType
from nhlscrappo.parsers import ShotParser, RosterParser, HomeTOIParser, \
EventParser, PlayParser
import nhlscrappo.constants as C
def connect_sql(**params):
sql_conn = psycopg2.connect(**params)
sql_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return sql_conn, sql_conn.cursor()
def disconnect_sql(sql_cur, sql_conn):
sql_cur.close()
sql_conn.close()
def main():
sql_params = {
"dbname": "postgres",
"user": "postgres",
"password": "<PASSWORD>",
"host": "nhlscrappo-db.cpmwmqjbkju3.us-east-1.rds.amazonaws.com",
"port": 5432
}
s_start = 2018
s_end = 2018
# XXX: check these values fall within an acceptable range
sql_conn, sql_cur = connect_sql(**sql_params)
for s in range(s_start, s_end + 1):
# create regular season database
nhl_dicto = {"season":s}
try:
sql_params["dbname"] = str(nhl_dicto["season"]) + "r"
sql_command = "CREATE DATABASE \"" + sql_params["dbname"] \
+ "\" OWNER " + sql_params["user"]
sql_cur.execute(sql_command)
except psycopg2.errors.DuplicateDatabase:
pass
# reconnect to the this database
disconnect_sql(sql_cur, sql_conn)
sql_params["dbname"] = str(nhl_dicto["season"]) + "r"
sql_conn, sql_cur = connect_sql(**sql_params)
# create a domain to check unsigned integers
try:
sql_command = "CREATE DOMAIN uint2 AS int4 CHECK (VALUE >= 0 " \
"AND VALUE < 65536)"
sql_cur.execute(sql_command)
except psycopg2.errors.DuplicateObject:
pass
# iterate through regular season games
for g in range(1, C.GAME_CT_DICT[s] + 1):
nhl_dicto = {"season":s, "game_num":g, "game_type":GameType.Regular}
# create roster tables
try:
sql_command = "CREATE TABLE \"" + ("%04i" % g) \
+ "_roster\" (\"Name\" text, \"Team\" text, " \
+ "\"Number\" uint2, \"Position\" char)"
sql_cur.execute(sql_command)
except psycopg2.errors.DuplicateTable:
pass
# fill roster tables
roster = RosterParser(**nhl_dicto)
roster.make_soup(local = \
"/home/j/workspace/nhl-workbook/2018-2019/roster/RO02" \
+ ("%04i" %g) + ".HTM")
roster.load_teams()
roster.load_players()
# XXX: check for duplicates before INSERT
for player in roster.rosters["home"]:
sql_command = "INSERT INTO \"" + ("%04i" % g) + "_roster\" " \
+ "(\"Name\", \"Team\", \"Number\", \"Position\") " \
+ "VALUES (\'" + player + "\', \'" + roster.teams["home"] \
+ "\', " + roster.rosters["home"][player]["num"] + ", " \
+ "\'" + roster.rosters["home"][player]["pos"] + "\')"
sql_cur.execute(sql_command)
for player in roster.rosters["away"]:
sql_command = "INSERT INTO \"" + ("%04i" % g) + "_roster\" " \
+ "(\"Name\", \"Team\", \"Number\", \"Position\") " \
+ "VALUES (\'" + player + "\', \'" + roster.teams["away"] \
+ "\', " + roster.rosters["away"][player]["num"] + ", " \
+ "\'" + roster.rosters["away"][player]["pos"] + "\')"
sql_cur.execute(sql_command)
disconnect_sql(sql_cur, sql_conn)
if __name__ == "__main__":
main()
``` |
{
"source": "jhmuller/py-backup",
"score": 2
} |
#### File: jhmuller/py-backup/gitback.py
```python
import os
import sys
import datetime
import time
import inspect
import warnings
import traceback
import hashlib
import zlib
import zipfile
import pickle
import shutil
import re
import logging
import subprocess
from collections import OrderedDict
from collections import namedtuple
from pathlib import PurePath
import pandas as pd
from subprocess import Popen, PIPE
print("python exe: {0}".format(sys.executable))
# import win32api
__version__ = "0.1.1"
# Force logger.warning() to omit the source code line in the message
# formatwarning_orig = warnings.formatwarning
# warnings.formatwarning = lambda message, category, filename, lineno, line=None: \
# formatwarning_orig(message, category, filename, lineno, line='')
class Utilities(object):
def __init__(self):
pass
colors_txt = OrderedDict()
colors_txt['black'] = "\033[90m"
colors_txt['red'] = "\033[91m"
colors_txt["green"] = "\033[92m"
colors_txt["yellow"] = "\033[93m"
colors_txt["blue"] = "\033[94m"
colors_txt["gray"] = "\033[97m"
colors_bg = OrderedDict()
colors_bg['black'] = "\033[100m"
colors_bg["red"] = "\033[101m"
colors_bg["green"] = "\033[102m"
colors_bg["yellow"] = "\033[103m"
colors_bg["blue"] = "\033[104m"
colors_bg["gray"] = "\033[107m"
colors_bg["none"] = "\033[107m"
txt_effects = OrderedDict()
txt_effects["end"] = "\033[0m"
txt_effects["bold"] = "\033[1m"
txt_effects["underline"] = "\033[4m"
txt_effects["blackback"] = "\033[7m"
@staticmethod
def username():
return username
@staticmethod
def os_whoami():
proc = subprocess.Popen(['whoami'], stdout=subprocess.PIPE)
out, errs = proc.communicate()
return (out)
@staticmethod
def now():
return datetime.datetime.now()
@staticmethod
def nowshortstr(fmt="%Y%m%d_%H%M%S"):
now = datetime.datetime.now()
res = now.strftime(fmt) + "_" + str(now.microsecond % 10000)
return res
@staticmethod
def nowstr(fmt="%Y-%m-%d__%H_%M_%S"):
return datetime.datetime.now().strftime(fmt)
@staticmethod
def color_str(s, txt_color='black', bg_color=None,
bold=False, underline=False,
verbosity=0):
'''
embedd hex codes for color or effects
Parameters
----------
s: srting to be enhanced
txt_color: color for text. e.g. black, red, green, blue
bg_color: background color
bold: boolean
underline: boolean
verbosity: level of diagnostics
Returns
-------
string with original and enhancements at the start
'''
if verbosity > 0:
print("{0} <{1}>".format(Utilities.whoami(), Utilities.now()))
if not isinstance(s, str):
msg0 = "input s must be string, got {0}".format(type(s))
msg0 += "trying to convert to string"
msg = Utilities.color_str(msg0, txt_color="red")
print(msg)
try:
s = str(s)
except Exception as e:
msg2 = Utilities.color_str(str(e), txt_color="red", bg_color="red")
print(msg2)
raise RuntimeError(msg2)
result = ''
if txt_color:
txt_color = txt_color.lower()
if txt_color not in Utilities.colors_txt.keys():
warnings.warn("txt_color '{0}' not a valid color".format(txt_color))
txt_color = 'black'
else:
txt_color = 'black'
result += Utilities.colors_txt[txt_color]
if bg_color:
bg_color = bg_color.lower()
if bg_color not in Utilities.colors_bg.keys():
warnings.warn("bg_color '{0}' not a valid color".format(txt_color))
bg_color = 'none'
else:
bg_color = 'none'
result += Utilities.colors_bg[bg_color]
if bold:
result += Utilities.txt_effects['bold']
if underline:
result += Utilities.txt_effects['underline']
result += s + Utilities.txt_effects['end']
return result
@staticmethod
def last_exception_parts():
(extype, exval, tb) = sys.exc_info()
return extype, exval, tb
@staticmethod
def last_exception_info(verbose=0):
'''
returns a string with info about the last exception
:param verbose:
:return: string with info about the last exception
'''
if verbose > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
msg = "Exception {0}".format(datetime.datetime.now())
(extype, exval, tb) = sys.exc_info()
msg += "\n {0} type: {1}".format(str(exval), extype)
tblist = traceback.extract_tb(tb, limit=None)
lines = traceback.format_list(tblist)
for i, line in enumerate(lines):
msg += "\n[{0}] {1}".format(i, line)
result = Utilities.color_str(msg, txt_color="red")
return result
@staticmethod
def drives(verbosity=0):
raise RuntimeError("No longer supported")
fields = ["drive", "dname", "message"]
DriveTup = namedtuple("DriveTup", fields)
dlist = []
drive_strings = None # win32api.GetLogicalDriveStrings()
drives = drive_strings.split('\000')[:-1]
for drive in drives:
dname = None
msg = ''
try:
dname = None # win32api.GetVolumeInformation(drive)[0]
except Exception as e:
msg = str(e)
dt = DriveTup(drive, dname, msg)
dlist.append(dt)
df = pd.DataFrame(dlist)
df.columns = fields
return df
@staticmethod
def module_versions(verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
mlist = list(filter(lambda x: inspect.ismodule(x[1]), globals().items()))
if verbosity > 0:
print(mlist)
fields = ["filename", "asname", "ver"]
ModTup = namedtuple("ModTup", fields)
tlist = []
for asname, mod in mlist:
fname = asname
ver = None
if asname.startswith("__"):
continue
if hasattr(mod, "__version__"):
fname = asname
if hasattr(mod, "__path__"):
fname = os.path.split(mod.__path__[0])[1]
ver = mod.__version__
mt = ModTup(fname, asname, ver)
tlist.append(mt)
df = pd.DataFrame(tlist)
df.columns = fields
return df
@staticmethod
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return ' %s:%s: %s:%s' % (filename, lineno, category.__name__, message)
@staticmethod
def whoami():
return sys._getframe(1).f_code.co_name
@staticmethod
def is_file_binary(filepath,
nlines=1000,
verbosity=0):
try:
with open(filepath, "r") as f:
for _ in range(nlines):
line = f.readline()
line.lower()
except UnicodeDecodeError:
return True
return False
@staticmethod
def sha_256(fpath,
fmode='rb',# default is text mode
encoding=None,
size=4096):
logger = logging.getLogger(__file__)
m = hashlib.sha256()
try:
lastchunk = None
fsize = os.path.getsize(fpath)
with open(fpath, mode=fmode, encoding=encoding) as fp:
try:
chunk = None
while True:
lastchunk = chunk
chunk = fp.read(size)
if chunk is None or chunk == b'':
break
m.update(chunk)
except Exception as ex:
errmsg = "fpath: {0}".format(fpath)
errmsg += Utilities.last_exception_info()
logger.warning(errmsg)
(extype, exval, tb) = sys.exc_info()
raise extype(exval)
return m.hexdigest()
except PermissionError as pe:
errmsg = "fpath: {0}".format(fpath)
errmsg += Utilities.last_exception_info()
logger.warning(errmsg)
# if tried text, then try binary
if fmode == 'r':
return Utilities.sha_256(fpath, fmode='rb', encoding=None)
else:
raise PermissionError(pe)
except TypeError as te:
errmsg = "fpath: {0}".format(fpath)
errmsg += Utilities.last_exception_info()
logger.warning(errmsg)
if fmode == 'r':
# try binary
return Utilities.sha_256(fpath, fmode='rb', encoding=None)
raise TypeError(te)
except OSError as oe:
errmsg = "fpath: {0}".format(fpath)
errmsg += Utilities.last_exception_info()
logger.warning(errmsg)
OSError(oe)
except Exception as e:
errmsg = "fpath: {0}".format(fpath)
errmsg += Utilities.last_exception_info()
logger.warning(errmsg)
(extype, exval, tb) = sys.exc_info()
raise extype(exval)
@staticmethod
def handle_exc(e, rethrow=False):
msg = Utilities.last_exception_info()
print(msg)
if rethrow:
raise RuntimeError(e)
@staticmethod
def create_new_zip(infilepath, zipfilepath,
compression=zipfile.ZIP_DEFLATED,
compresslevel=zlib.Z_DEFAULT_COMPRESSION,
verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
if verbosity > 1:
print("creating zipfile {0} from {1} <{2}>".format(infilepath, zipfilepath,
datetime.datetime.now()))
zf = zipfile.ZipFile(zipfilepath, mode='w', compression=compression,
compresslevel=compresslevel)
try:
if verbosity > 1:
print("adding {0}".format(infilepath))
zf.write(infilepath)
except Exception as e:
zf.close()
msg = "infilepath= {0}".format(infilepath)
msg += Utilities.last_exception_info()
print(msg)
raise RuntimeError(msg)
finally:
if verbosity > 1:
print('Done, closing <{0}>'.format(datetime.datetime.now()))
zf.close()
return zf
@staticmethod
def path2string(fpath, sep="_", verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
pathstring = ""
pathleft = fpath
while True:
pathleft, tail = os.path.split(pathleft)
if len(tail) == 0:
break
pathstring = tail + sep + pathstring
if verbosity > 0:
print("pathstring= {0}".format(pathstring))
return pathstring
@staticmethod
def check_outdir(outdir, create=True, verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
if os.path.isdir(outdir):
return outdir
warnings.warn("{0} not a dir".format(outdir))
if not create:
return None
if verbosity > 0:
print("trying to create {0}".format(outdir))
os.makedirs(outdir)
if not os.path.isdir(outdir):
raise RuntimeError("Cannot make dir= '{0}'".format(outdir))
return outdir
@staticmethod
def make_metafilepath(outdir, basename="generic",
sep="_", ext="",
verbosity=0):
# Figure out the filename this code should used based on
# what files already exist.
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
while True:
outfilename = basename + sep + Utilities.nowshortstr() + ext
if not os.path.exists(outfilename):
break
if verbosity > 0:
print("Creating '{0}'".format(outfilename))
outfilepath = os.path.join(outdir, outfilename)
return outfilepath
@staticmethod
def make_tempfilepath(folder, base, sep="_", ext="",
max_attempts=3,
exist_ok=True,
verbosity=0):
if verbosity > 1:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
print("folder len {0}, folner name: {1}".format(len(folder), folder))
filepath = None
if not os.path.isdir(folder):
if verbosity > 0:
print("trying to make folder {0}".format(folder))
try:
os.makedirs(folder, exist_ok=exist_ok)
except FileNotFoundError as fe:
msg = Utilities.last_exception_info()
warnings.warn(msg)
raise FileNotFoundError(fe)
except Exception as e:
msg = Utilities.last_exception_info()
warnings.warn(msg)
raise RuntimeError(e)
attempt = 0
while attempt < max_attempts:
#filename = base + sep + Utilities.nowshortstr() + ext
filename = Utilities.nowshortstr() + ext
filepath = os.path.join(folder, filename)
if len(filepath) > 260:
logger = logging.getLogger(__file__)
msg = "filepath len= {0}".format(len(filepath))
msg += "\n base= {0}".format(base)
base = re.sub(" ","",base)
msg += "newbase= {0}".format(base)
logger.warning(msg)
continue
if not os.path.exists(filepath):
break
attempt += 1
return filepath
@staticmethod
def import_backup_metafile(folder, filename, verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
filepath = os.path.join(folder, filename)
if not os.path.isfile(filepath):
raise ValueError("Cannot find file {0} in folder {1}".format(filename, folder))
data = []
with open(filepath, "rb") as fp:
while True:
try:
x = pickle.load(fp)
data.append(x)
except EOFError:
# this is expected
break
except Exception as e:
Utilities.handle_exc(e)
return data
@staticmethod
def check_folder_filename(folder, filename, verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
filepath = os.path.join(folder, filename)
if not os.path.isfile(filepath):
raise ValueError("Cannot find file {0} in folder {1}".format(filename, folder))
meta = Utilities.import_backup_metafile(folder=folder, filename=filename)
if len(meta) == 0:
warnings.warn("Empty metafile {0} in {1}".format(filename, folder))
return False
return True
@staticmethod
def get_meta(folder, filename, verbosity=0):
if verbosity > 0:
print("{0} {1}".format(Utilities.whoami(), Utilities.now()))
if not Utilities.check_folder_filename(folder, filename):
return False
meta = Utilities.import_backup_metafile(folder=folder, filename=filename)
if len(meta) == 0:
warnings.warn("Empty metafile {0} in {1}".format(filename, folder))
return None
if not meta[0]['rec_type'] == "meta_info":
msg = "file= {0}, folder= {1}\n first elem is not meta {2}".format(filename, folder, meta[0])
warnings.warn(msg)
return None
return meta
@staticmethod
def get_meta_fields(folder, filename):
if not Utilities.check_folder_filename(folder, filename):
return False
meta = Utilities.get_meta(folder, filename)
if not meta:
return None
res = {"meta_info": list(meta[0].keys())}
if len(meta) > 1:
res["file_info"] = list(meta[1].keys())
return res
@staticmethod
def get_meta_info(folder, filename, meta_fields=None,
file_info_fields=None, verbosity=0):
if not Utilities.check_folder_filename(folder, filename):
return False
meta = Utilities.get_meta(folder, filename)
if not meta:
return None
result = ""
act_fields = Utilities.get_meta_fields(folder, filename)
fields = []
if meta_fields:
for f in meta_fields:
if f in act_fields['meta_info']:
fields.append(f)
else:
warnings.warn(" requested meta_field {0} not in meta_fields".format(f))
else:
fields = act_fields['meta_info']
msglst = ["{0}: {1}".format(f, meta[0][f]) for f in fields]
result += ", ".join(msglst)
result += "\n"
nfiles = sum([int(e['rec_type']=='file_info') for e in meta])
result += "{0} files".format(nfiles)
result += "\n"
fields = []
if file_info_fields:
for f in file_info_fields:
if f in act_fields['file_info']:
fields.append(f)
else:
warnings.warn(" requested file_info_field {0} not in file_info_fields".format(f))
else:
fields = act_fields['file_info']
for i, elem in enumerate(meta[1:]):
msglst = ["[{0}]: {1}: {2}".format(i, f, elem[f]) for f in fields]
result += ", ".join(msglst)
result += "\n"
return result
@staticmethod
def check_make_path(thepath, verbosity=0):
if os.path.isdir(thepath):
return thepath
warnings.warn("{0} not a dir".format(thepath))
if verbosity > 0:
print("trying to create {0}".format(thepath))
os.makedirs(thepath)
if not os.path.isdir(thepath):
raise RuntimeError("Cannot make dir= '{0}'".format(thepath))
return thepath
@staticmethod
def is_iterable(obj):
try:
obj = iter(obj)
return True
except:
return False
@staticmethod
def check_folders(folders):
if isinstance(folders, str):
folders = [folders]
elif not Utilities.is_iterable(folders):
msg = "folders is type {0}, not iterable".format(type(folders))
raise ValueError(msg)
errmsg = ''
for folder in folders:
if not os.path.isdir(folder):
errmsg += "'{0}' is not a dir".format(folder)
if len(errmsg) > 0:
raise ValueError(errmsg)
return True
@staticmethod
def unzip_to_temp(zipfilepath,
tempfolder=None,
tempname="temp",
verbosity=0):
if verbosity > 0:
ldict = locals()
msg = "{0} <{1}>".format(Utilities.whoami(), Utilities.now())
for key in ldict.keys():
print("{0}: {1}".format(key, ldict[key]))
if tempfolder is None:
tempfolder = os.path.split(zipfilepath)[0]
zfile = zipfile.ZipFile(zipfilepath, mode='r')
zpath = os.path.split(zipfilepath)[0]
while True:
tempname = tempname + Utilities.nowshortstr()
temppath = os.path.join(zpath, tempname)
if not os.path.isfile(temppath):
break
else:
msg = "Found temp file {0} in {1}\n try another".format(tempname, zpath)
zinfolist = zfile.infolist()
if len(zinfolist) != 1:
zlen = len(zinfolist)
msg = "file = {0}, zinfolist len= {1}, should be 1".format(zipfilepath, zlen)
raise ValueError(msg)
zinfo = zinfolist[0]
zipname = zinfo.filename
try:
if verbosity > 0:
print("zipname= {0} ".format(zipname))
zfile.extract(member=zipname,
path=temppath, pwd=None)
except Exception as e:
Utilities.last_exception_info()
raise Exception(e)
finally:
zfile.close()
return temppath
class GitBack(object):
def __init__(self,
logfilepath=None,
loglevel=logging.DEBUG,
dt_fmt="%Y%m%d_%H%M%S",
verbosity=0):
self.verbosity = verbosity
self.dt_fmt = dt_fmt
if logfilepath is None:
logfilepath = __name__ + "_" + Utilities.nowstr(fmt=self.dt_fmt) + ".log"
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logging.basicConfig(filename=logfilepath, level=loglevel)
def backup_folders(self, folders=None,
dest_drive=None,
dest_folder=None,
temp_folder=None,
exclude_folders=None,
include_exts=None,
exclude_exts=None,
verbosity=0):
"""
try to backup folders to a destination
:param folders: list of folders to backup
:param dest_drive: destination drive
:param dest_folder: destination root directory
:param temp_folder: for temp files
:param exclude_folders: folder names to exclude
:param include_exts: extensions to include
:param exclude_exts: extensions to exclude
:param verbosity: level of diagnostics
:return: 0 on success
"""
ldict = locals()
verbosity = max(self.verbosity, verbosity)
logger = logging.getLogger(__file__)
Utilities.check_folders(folders)
errmsg = ''
req_param_types = {"folders": list,
"dest_drive": str,
"dest_folder": str}
for pname in req_param_types.keys():
ptype = req_param_types[pname]
val = ldict[pname]
if val is None:
errmsg += "No {0} specified".format(pname)
elif not isinstance(val, ptype):
errmsg += "{0} should be {1}, got {2}".format(pname, ptype, type(val))
if len(errmsg) > 0:
raise RuntimeError(errmsg)
msg = "Backup starting {0}".format(datetime.datetime.now())
logger.info(msg)
for folder in folders:
if not os.path.isdir(folder):
msg = "'{0}' not a folder".format(folder)
raise ValueError(msg)
pp = PurePath(folder)
less_drive = os.sep.join(pp.parts[1:])
# descr_root = os.path.join(dest_drive, dest_root)
destroot = os.path.join(dest_drive, dest_folder)
msg = "Source folder= {0}".format(folder)
msg += "\nSource less drive: {0}".format(less_drive)
msg += "\nDest root: {0}".format(destroot)
logger.info(msg)
try:
if verbosity > 0:
items = os.listdir(folder)
n = min(len(items), 7)
logger.info("Found {0}".format(items[:n]))
self.backup_folder(sourceroot=folder,
destroot=destroot,
exclude_exts=exclude_exts,
exclude_folders=exclude_folders,
tempfolder=temp_folder,
testing=False,
verbosity=verbosity)
except PermissionError as e:
msg = Utilities.last_exception_info(verbose=verbosity)
warnings.warn(msg)
raise PermissionError(msg)
except Exception as e:
msg = Utilities.last_exception_info(verbose=verbosity)
logger.error(msg)
warnings.warn(e)
#raise RuntimeError(msg)
else:
msg = "Seems ok {0}".format(datetime.datetime.now())
logger.info(msg)
return 0
def backup_folder(self, sourceroot,
destroot,
tempfolder=None,
exclude_folders=None,
include_exts=None,
exclude_exts=None,
dt_fmt='%Y-%m-%dT%H:%M:%S',
comp_thresh=0.9,
compression=zipfile.ZIP_DEFLATED,
compresslevel=zlib.Z_DEFAULT_COMPRESSION,
testing=False,
verbosity=0):
"""
try to make a backup of a single folder
:param sourceroot: original (source) path
:param destroot: destination path
:param tempfolder: folder for temp files
:param include_exts: file extensions to include
:param exclude_exts: file extensions to exclude
:param dt_fmt: format for dates
:param comp_thresh: threshold for compression
if compression ratio less than this then don't compress
:param compression: method for compression
:param compresslevel: level of compression
:param testing: switch for just testing process
:param verbosity: level of diagnostics
:return: 0 on success
"""
argdict = locals().copy()
verbosity = max(verbosity, self.verbosity)
logger = logging.getLogger(__file__)
if verbosity > 0:
msg = "{0} <{1}>".format(Utilities.whoami(), Utilities.now())
for key in argdict.keys():
msg += "\n {0}: {1}".format(key, argdict[key])
logger.info(msg)
try:
if tempfolder is None:
tempfoldername = "zztemp"
tempfolder = os.path.join(os.path.splitext(__file__)[0], tempfoldername)
if exclude_folders is None:
exclude_folders = []
exclude_folders.append(tempfoldername)
if os.path.isdir(tempfolder):
tempfiles = os.listdir(tempfolder)
for tfile in tempfiles:
os.remove(os.path.join(tempfolder, tfile))
if not os.path.isdir(tempfolder):
os.mkdir(tempfolder)
if verbosity > 0:
logger.info(" tempfolder= {0}".format(tempfolder))
except Exception as e:
msg = Utilities.last_exception_info()
logger.warning(msg)
RuntimeError(e)
# process include_exts and exclude_exts
for xname in ('include_exts', 'exclude_exts'):
x = argdict[xname]
if isinstance(x, str):
x = [x]
if isinstance(x, list):
if len(x) == 0:
x = None
elif x is not None:
raise ValueError("{0} should be None or string or list of strings")
if verbosity > 1:
logger.info("{0}: {1}".format(xname, x))
locals()[xname] = x
pp_sourceroot = PurePath(sourceroot)
if not pp_sourceroot.is_absolute():
logger.warning("sourceroot must be absolute, {0}".format(sourceroot))
pp_destroot = PurePath(destroot)
if not pp_destroot.is_absolute():
logger.warning("destroot must be absolute, {0}".format(destroot))
if (sourceroot == destroot) or (pp_sourceroot == pp_destroot):
msg = "sourceroot cannot be same as destfolder"
msg += "Please choose a different destfolder so files will not be overwritten"
raise RuntimeError(msg)
Utilities.check_make_path(destroot, verbosity=verbosity)
destfolder = os.sep.join(pp_destroot.parts[1:])
# Walk the entire folder tree and compress the files in each folder.
for dirpath, _, filenames in os.walk(sourceroot, topdown=True):
pp_dirpath = PurePath(dirpath)
# dirdrive = pp_dirpath.drive
dirfolder = os.sep.join(pp_dirpath.parts[1:])
skip_folder = False
for ef in exclude_folders:
if ef in pp_dirpath.parts:
skip_folder = True
break
if skip_folder:
msg = "skipping {0} due to exclude_folder: {1}".format(dirpath,
exclude_folders)
logger.info(msg)
continue
if verbosity > 0:
logger.info(" Adding files from '{0}' to '{1}'".format(dirpath, destfolder))
for filename in filenames:
try:
if verbosity > 1:
msg = "filename: {0}, dirpath: {1}".format(filename, dirpath)
logger.info(msg)
file_base, file_ext = os.path.splitext(filename)
if include_exts is not None:
if file_ext not in include_exts:
if verbosity > 1:
logger.info(" Skipping {0}, {1} not in include_exts".format(filename, file_ext))
continue
if exclude_exts is not None:
if file_ext in exclude_exts:
if verbosity > 1:
logger.info(" Skipping {0}, {1} in include_exts".format(filename, file_ext))
continue
# get the sha256 for the source file
sourcepath = os.path.join(dirpath, filename)
source_sha256 = Utilities.sha_256(sourcepath, fmode='rb', encoding=None,
size=4096)
# Note: source path becomes a dest folder,
# copies of source files are stored under there
temp_dest_folder = os.path.join(destroot, dirfolder)
# the backup file will go into the folder/dir this_outpath
this_dest_folder = os.path.join(temp_dest_folder, filename)
if len(this_dest_folder) > 240:
logger.warning(" Potential problem, path length = {0}".format(len(this_dest_folder)))
# try removing spaces from the filename
shortfilename = res.sub(" ", "", filename)
this_dest_folder = os.path.join(temp_dest_folder, filename)
if len(this_dest_folder) > 256:
msg = "dest path len= {0} too long".format(len(this_dest_folder))
msg += "\n {0}".format(this_dest_folder)
logger.error(msg)
continue
# now check and see if the dest folder exists
found_sha_match = False
if os.path.isdir(this_dest_folder):
# if there is a folder there
# check all the files int the folder
# to see if one of the fils sha_256 matches the source's
# if so, contents the same and no need to backup
# NOTE: should I just check the lastest file?
dest_files = os.listdir(this_dest_folder)
for dfile in dest_files:
dpath = os.path.join(this_dest_folder, dfile)
if not os.path.isfile(dpath):
continue
dext = os.path.splitext(dfile)[1]
if dext == "zip":
# have to unzip to check
temppath = Utilities.unzip_to_temp(dpath,
tempfolder=tempfolder)
dest_sha256 = Utilities.sha_256(temppath, size=4096)
else:
dest_sha256 = Utilities.sha_256(dpath, size=4096)
if source_sha256 == dest_sha256:
found_sha_match = True
break
except OSError as oe:
msg = Utilities.last_exception_info()
logger.warning(msg)
OSError(oe)
except Exception as e:
msg = Utilities.last_exception_info()
logger.warning(msg)
RuntimeError(e)
try:
if found_sha_match:
# then the same contents are already there
if verbosity > 1:
msg = "no need to backup {0}, {1} there with same contents".format(filename, dfile)
logger.info(msg)
continue
# at this point we need to backup
if verbosity > 0:
logger.info(" backing up {0} from {1} to {2}".format(filename, dirpath,
this_dest_folder))
if testing:
# if testing nothing more to do
continue
# try to zip the file
def zipit(sourcepath,
tempfolder,
verbosity=0):
zipfilepath = Utilities.make_tempfilepath(tempfolder,
base="temp",
ext=".zip",
verbosity=verbosity)
tries = 0
ok = False
while tries < 10 and not ok:
try:
tries += 1
time.sleep(0.01)
zf = Utilities.create_new_zip(sourcepath, zipfilepath)
#zfile = zipfile.ZipFile(zipfilepath, mode='r')
except OSError as oe:
msg = "\nsourcepath: {0}\nzipfilepath: {1}".format(sourcepath,
zipfilepath)
msg += Utilities.last_exception_info()
print(msg)
except Exception as e:
msg = "\nsourcepath: {0}\nzipfilepath: {1}".format(sourcepath,
zipfilepath)
msg += Utilities.last_exception_info()
print(msg)
else:
ok = True
if not ok:
msg = "can't create zfile {0} ".format(zipfilepath)
raise RuntimeError(msg)
return zipfilepath, zf.filelist[0].filename
zipfilepath, zfilename = zipit(sourcepath, tempfolder, verbosity=verbosity)
orig_size = os.path.getsize(sourcepath)
comp_size = os.path.getsize(zipfilepath)
comp_ratio = 1
if orig_size == 0:
logger.warning("{0} in {1} size is {2}".format(filename, dirpath, orig_size))
else:
comp_ratio = float(comp_size)/orig_size
# if compression ratio not less then thresh
# just use original file
compressed = True
if comp_ratio > comp_thresh:
compressed = False
infilepath = sourcepath
else:
infilepath = zipfilepath
# this_outfilebase = os.path.splitext(ddict['filename'])[0]
# this_outfilename = filename
this_ext = file_ext
if compressed:
this_ext = ".zip"
# construct the dest file path
dest_file_path = Utilities.make_tempfilepath(this_dest_folder,
base=file_base,
ext=this_ext,
verbosity=verbosity)
except OSError as oe:
msg = Utilities.last_exception_info()
logger.info(msg)
OSError(oe)
except Exception as e:
msg = Utilities.last_exception_info()
logger.info(msg)
RuntimeError(e)
# copy source to destination
try:
dfolder = os.path.split(dest_file_path)[0]
if not os.path.isdir(dfolder):
msg = " destination folder missing: {0}".format(dfolder)
logger.error(msg)
raise RuntimeError(msg)
tsize =None
if not os.path.isfile(infilepath):
msg = " source file missing: {0}".format(infilepath)
logger.error(msg)
raise RuntimeError(msg)
else:
tsize = os.path.getsize(infilepath)
shutil.copy(infilepath, dest_file_path)
except OSError as oe:
errmsg = "\ninfilepath: {0}\n dest_file_path: {1}".format(infilepath,
dest_file_path)
errmsg += Utilities.last_exception_info()
logger.error(errmsg)
raise OSError(oe)
except Exception as exc:
errmsg = "infilepath: {0}\n dest_file_path: {1}".format(infilepath,
dest_file_path)
errmsg += Utilities.last_exception_info()
logger.info(errmsg)
raise RuntimeError(exc)
try:
# create a dictionary with some file backup info
meta_dict = OrderedDict()
meta_dict['filename'] = filename
meta_dict['folder'] = dirpath
meta_dict['filepath'] = sourcepath
meta_dict['orig_size'] = orig_size
meta_dict['comp_size'] = comp_size
meta_dict['sha256'] = source_sha256
meta_dict['ctime'] = datetime.datetime.fromtimestamp(os.path.getctime(sourcepath)).\
strftime(dt_fmt)
meta_dict['mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(sourcepath)).\
strftime(dt_fmt)
meta_dict['comp_ratio'] = comp_ratio
meta_dict['compressed'] = compressed
# construct a path for this meta data
meta_folder = os.path.join(this_dest_folder, "meta_files")
meta_file_path = Utilities.make_tempfilepath(meta_folder,
base="meta",
ext=".txt",
verbosity=verbosity)
# write the meta_dict to a file in dest folder
if len(meta_file_path) > 250:
logger.info(" problem path len= {0}".format(len(meta_file_path)))
with open(meta_file_path, mode="w") as fp:
for key in meta_dict.keys():
fp.write("{0}: {1}\n".format(key, meta_dict[key]))
except FileNotFoundError as fnfe:
errmsg = Utilities.last_exception_info()
logger.info(errmsg)
# ignore it for now
raise FileNotFoundError(fnfe)
except OSError as oe:
errmsg = Utilities.last_exception_info()
logger.info(errmsg)
raise OSError(oe)
except Exception as e:
err_msg = Utilities.last_exception_info()
logger.warning(err_msg)
raise RuntimeError(e)
try:
if verbosity > 0:
msg = "filename: {0}, filepath: {1}".format(filename, sourcepath)
msg += ", osize= {0}, csize= {1}".format(orig_size, comp_size)
msg += ", compressed= {0}".format(compressed)
msg += "\n infilepath: {0} dest folder: {1}".format(infilepath, this_dest_folder)
# logger.info("sha_256= {0}".format(ddict['sha256']))
logger.info(msg)
# remove the temporary zipfile
if os.path.isfile(zipfilepath):
try:
# wait until file fully copied
source_size = os.path.getsize(infilepath)
dest_size = os.path.getsize(dest_file_path)
tries = 0
while dest_size < source_size:
dest_size = os.path.getsize(dest_file_path)
tries += 1
if tries > 20:
break
time.sleep(0.01)
if dest_size > source_size:
msg = " {0} tries checking on file, dest not written".format(tries)
raise RuntimeError(msg)
os.remove(zipfilepath)
except Exception as e:
msg = "\n Problem removing zipfile: {0}".format(zipfilepath)
msg += "\n zfile: {0}".format(zfile)
msg += Utilities.last_exception_info()
logger.warning(msg)
raise RuntimeError(e)
else:
msg = "can't find zipfile {0}".format(zipfilepath)
raise RuntimeError(msg)
except OSError as oe:
errmsg = Utilities.last_exception_info()
logger.info(errmsg)
raise OSError(oe)
except Exception as e:
err_msg = Utilities.last_exception_info()
logger.warning(err_msg)
raise RuntimeError(e)
try:
tempfiles = os.listdir(tempfolder)
if len(tempfiles) > 0:
msg = "{0} files in {1}".format(len(tempfiles),
tempfolder)
warnings.warn(msg)
except Exception as e:
err_msg = Utilities.last_exception_info()
logger.warning(err_msg)
raise RuntimeError(e)
if verbosity > 0:
logger.info("Done")
# meta_fp.close()
return 0
def find_files_in_backup(self,
backuproot,
filenames,
origfolder=None,
verbosity=0):
argdict = locals().copy()
verbosity = max(verbosity, self.verbosity)
logger = logging.getLogger(__file__)
if verbosity > 0:
msg = "{0} <{1}>".format(Utilities.whoami(), Utilities.now())
for key in argdict.keys():
msg += "\n {0}: {1}".format(key, argdict[key])
logger.info(msg)
if filenames is None:
warnings.warn("filename is None")
return -1
if backuproot is None:
warnings.warn("backuproot is None")
return -1
if not os.path.isdir(backuproot):
warnings.warn("backuproot <{0}> not a dir".format(backuproot))
return -1
found_map = OrderedDict()
for filename in filenames:
found_list = self.find_file_in_backup(backuproot=backuproot,
target_filename=filename,
verbosity=0)
found_map[filename] = found_list
return found_map
def find_file_in_backup(self,
backuproot,
target_filename,
origfolder = None,
verbosity=0):
argdict = locals().copy()
verbosity = max(verbosity, self.verbosity)
logger = logging.getLogger(__file__)
if verbosity > 0:
msg = "{0} <{1}>".format(Utilities.whoami(), Utilities.now())
for key in argdict.keys():
msg += "\n {0}: {1}".format(key, argdict[key])
logger.info(msg)
if target_filename is None:
warnings.warn("target_filename is None")
return -1
if backuproot is None:
warnings.warn("backuproot is None")
return -1
if not os.path.isdir(backuproot):
warnings.warn("backuproot <{0}> not a dir".format(backuproot))
return -1
pp_backuproot = PurePath(backuproot)
if not pp_backuproot.is_absolute():
logger.warning("backuproot must be absolute, {0}".format(backuproot))
found_list = []
for dirpath, dirnames, filenames in os.walk(backuproot, topdown=True):
#pp_dirpath = PurePath(dirpath)
# dirdrive = pp_dirpath.drive
#dirfolder = os.sep.join(pp_dirpath.parts[1:])
for dirname in dirnames:
dirfolder = os.path.join(dirpath, dirname)
if dirname == target_filename:
files = [f for f in os.listdir(dirfolder) if os.path.isfile(os.path.join(dirfolder,f))]
tup = (dirname, files)
found_list.append(tup)
return found_list
@staticmethod
def recover(folder,
filelist,
outdir,
verbosity=0):
logger = logging.getLogger(__file__)
if not os.path.isdir(folder):
logger.warning("{0} is not a folder".format(folder))
return None
meta = Utilities.get_meta(folder, meta_filename)
if not meta:
return None
if len(meta) == 1:
logger.warning("No file_info records")
return None
# res = Utilities.check_outdir(outdir, create=create_outdir, verbosity=0)
filemap = {}
for i, e in enumerate(meta[1:]):
filemap[e['filename']] = i+1
for filename in filelist:
if filename in filemap.keys():
ei = filemap[filename]
msg = "Found {0} as entry {1}".format(filename, ei)
logger.info(msg)
file_info = meta[ei]
logger.info(file_info)
if file_info['compressed']:
outfilepath = Utilities.make_tempfilepath(outdir, base="temp", ext=".zip",
verbosity=verbosity)
else:
outfilepath = os.path.join(outdir, file_info['filename'])
logger.info("outfilepath= {0}".format(outfilepath))
outfilepath = os.path.abspath(outfilepath) # make sure folder is absolute
logger.info("outfilepath= {0}".format(outfilepath))
infilename = file_info['sha256']
infilepath = os.path.join(folder, infilename)
if not os.path.isfile(infilepath):
logger.warning("Cannot fine backup file {0} in {1}".format(infilename, folder))
continue
try:
if verbosity > 0:
logger.info("copying {0} to {1}".format(infilepath, outfilepath))
shutil.copy(infilepath, outfilepath)
except Exception as e:
(extype, exval, tb) = sys.exc_info()
logger.warning("extype= {0}, exval= {1}\n {2}".format(extype, exval, tb))
if file_info['compressed']:
zipfilepath = outfilepath
outfilepath = os.path.join(outdir, file_info['filename'])
logger.info("outfilepath {0}".format(outfilepath))
if verbosity > 0:
logger.info("Unzipping {0} to {1}".format(zipfilepath, outfilepath))
zfile = zipfile.ZipFile(zipfilepath, mode='r')
for zm in zfile.infolist():
logger.info(zm)
try:
zipname = file_info['zipname']
logger.info("zipname= {0} outfilepath= {1}".format(zipname, outfilepath))
zfile.extract(member=zipname,
path=outfilepath, pwd=None)
except Exception as e:
(extype, exval, tb) = sys.exc_info()
logger.warning("extype= {0}, exval= {1}\n {2}".format(extype, exval, tb))
raise Exception(e)
zfile.close()
os.remove(zipfilepath)
else:
msg = "No entry for {0}".format(filename)
logger.warning(msg)
return None
def _get_args_dict(fn, args, kwargs):
args_names = fn.__code__.co_varnames[:fn.__code__.co_argcount]
return {**dict(zip(args_names, args)), **kwargs}
def test_func_name():
frame = inspect.getframeinfo(inspect.currentframe())
fname = frame.function
return fname
def get_fname(i=1):
frame = sys._getframe(i)
code = frame.f_code
fname = code.co_name
return fname
if __name__ == "__main__":
Utils = Utilities()
warnings.formatwarning = Utils.warning_on_one_line
logger = logging.getLogger(__file__)
# initialize parameters
# for new lenovo
computername = str(os.getenv("COMPUTERNAME"))
if computername is None:
logger.error("no computer name")
exit(-1)
username = str(os.getenv("USERNAME"))
if username is None:
logger.error("no user name")
exit(-1)
if computername.upper() == "LENOVO-LEGION":
bfolders = [
os.path.join("C:\\", "Users", username, "OneDrive", "Desktop"),
os.path.join("C:\\", "Users", username, "OneDrive", "Documents"),
os.path.join("C:\\", "Users", username, "OneDrive", "Pictures"),
os.path.join("C:\\", "Users", username, "Downloads"),
os.path.join("C:\\", "Users", username, "Videos"),
os.path.join("C:\\", "Users", username, "Music"),
os.path.join("C:\\", "Users", username, "Documents"),
]
elif re.search("hp_small", computer_name):
bfolders = [
os.path.join("C:\\", "dev"),
os.path.join("C:\\", "jmuller"),
os.path.join("C:\\", "Users", username, "Documents"),
os.path.join("C:\\", "Users", username, "Downloads"),
os.path.join("C:\\", "Users", username, "dev"),
os.path.join("C:\\", "Users", username, "enter2"),
os.path.join("C:\\", "Users", username, "Pictures"),
os.path.join("C:\\", "Users", username, "Videos"),
os.path.join("C:\\", "Users", username, "Music"),
]
bfolders = [
# os.path.join("C:\\", "dev"),
# os.path.join("C:\\", "Users", username, "OneDrive", "dev"),
# os.path.join("C:\\", "Users", username, "enter1"),
# os.path.join("C:\\", "Users", username, "enter2"),
# os.path.join("C:\\", "Users", username, "OneDrive", "Desktop"),
# os.path.join("C:\\", "Users", username, "OneDrive", "Documents"),
# os.path.join("C:\\", "Users", username, "OneDrive", "Pictures"),
os.path.join("C:\\", "Users", username, "Documents"),
os.path.join("C:\\", "Users", username, "Downloads"),
os.path.join("C:\\", "Users", username, "Videos"),
os.path.join("C:\\", "Users", username, "Music"),
]
dest_drive = "G:\\"
dest_folder = os.path.join(dest_drive, computername)
logfilename = "backup_log" + "_" + Utilities.nowshortstr() + ".txt"
logfilepath = logfilename
print("dest folder= {0}".format(dest_folder))
# create instance of class
GB = GitBack(verbosity=1)
if True:
res = GB.backup_folders(folders=bfolders,
dest_drive=dest_drive,
dest_folder=dest_folder,
exclude_folders=["zztemp"],
exclude_exts=['.exe'],
temp_folder="./zztemp",
verbosity=1)
backuproot = os.path.join(dest_drive, dest_folder)
res = GB.find_files_in_backup(backuproot=backuproot,
filenames=['addenv.bat'])
print("Done")
``` |
{
"source": "jhmvin/SimpleChatClient",
"score": 3
} |
#### File: src/main/Client.py
```python
from Tkinter import *
import tkMessageBox
from socket import*
import threading
import json
from time import sleep
import unicodedata
from pip.cmdoptions import editable
try:
import tkinter.ttk as ttk
except ImportError:
import Tkinter as tk
import ttk
import atexit
import sys
'''
CLASS DECLARATIONS
--------------------------------------
'''
'''
TIME ZONE MANAGER
--------------------------------------
'''
from datetime import datetime,tzinfo,timedelta
class Zone(tzinfo):
def __init__(self,offset,isdst,name):
self.offset = offset
self.isdst = isdst
self.name = name
def utcoffset(self, dt):
return timedelta(hours=self.offset) + self.dst(dt)
def dst(self, dt):
return timedelta(hours=1) if self.isdst else timedelta(0)
def tzname(self,dt):
return self.name
pass #end of zone
#EST = Zone(-5,False,'EST')
#print datetime.utcnow().strftime('%m/%d/%Y %H:%M:%S %Z')
'''
GMT = Zone(8,False,'GMT')
print datetime.now(GMT).strftime('%m/%d/%Y %I:%M:%S %p')
'''
#print datetime.now(EST).strftime('%m/%d/%Y %H:%M:%S %Z')
#t = datetime.strptime('2011-01-21 02:37:21','%Y-%m-%d %H:%M:%S')
#t = t.replace(tzinfo=GMT)
#print t
#print t.astimezone(EST)
'''
MONOCLIENT SERVER CONNECTIVITY
--------------------------------------
'''
class MonoClient():
def __init__(self):
#self.mime_result = {'type':'null','result':'null'}
self.HOST = '127.0.0.1' #'192.168.15.4'
self.PORT = 2224
try:
self.mono_socket = socket(AF_INET, SOCK_STREAM)
self.mono_socket.connect((self.HOST,self.PORT))
except:
self.showInfoMsg("Server Information","Server is unreachable. Please try again.")
print "Server is unreachable"
pass
#self.graph = GUI()
pass #end of construct
def send_request(self,request):
try:
data_json = json.dumps(request, ensure_ascii=False).encode('utf-8')
self.mono_socket.send(data_json)
return self.mono_socket.recv(131072)
except Exception as e:
print e
return "-143" # cannot reach the server
pass # end request
pass # end of class
#****************************
#* MAIN *
#****************************
''' CLASS GUI
---------------------------------------------------------------
'''
class GUI():
def __init__(self):
self.client = MonoClient()
self.broadcast_reciever = threading.Thread(target=self.fetch_broadcast)
self.client_lister = threading.Thread(target=self.fetch_clients)
self.graphics_render = threading.Thread(target=self.showLoginForm)
self.messenger = threading.Thread(target=self.fetch_messages)
self.private_records = []
print "BROADCAST SET"
self.auth_user = "USER"
self.GMT = Zone(8,False,'GMT') # +8 GMT ASIA TAIPEI
print datetime.now(self.GMT).strftime('%m/%d/%Y %I:%M:%S %p')
self.fetch_message_block = 1
# infinite loop all codes below will not be called
#self.showLoginForm()
self.graphics_render.start()
#self.rcv_brod = 0
#self.broadcast_reciever.start()
pass # end init
def showErrorMsg(self,title,message):
window = Tk()
window.wm_withdraw()
window.geometry("3x2+200+200")
tkMessageBox.showerror(title=title,message=message,parent=window)
def showInfoMsg(self,title,msg):
window = Tk()
window.wm_withdraw()
window.geometry("3x2+"+str(window.winfo_screenwidth()/2)+"+"+str(window.winfo_screenheight()/2))
tkMessageBox.showinfo(title=title, message=msg)
def authenticate(self):
request = {}
request['type'] = 'LOGIN'
global text_font
text_font = ('Calibri', '12')
request['username'] = self.txt_user.get()
request['password'] = self.txt_password.get()
data = self.client.send_request(request)
response = json.loads(data)
res = response['result']
if(res == "0"):
self.showErrorMsg("Account Error","Account not found.")
print "Account Not Existing"
elif(res == "-1"):
self.showErrorMsg("Account Error","Incorrect password.")
print "Wrong Password"
elif(res == "2"):
self.showInfoMsg("Account Information","Account is already online. Please use another account.")
print "Account is already online"
elif(res == "3"):
self.showErrorMsg("Account Error","Maximum client reached. Try again later.")
print "MAX CLIENT REACHED"
elif(res == "1"):
self.showInfoMsg("Account Information","Successfully Logged in!")
print "Login Success"
self.auth_user = self.txt_user.get()
self.frm_login.destroy()
self.rcv_brod = 1
self.showMainForm()
else:
self.showErrorMsg("Unknown Error","An error occured. Try again.")
print "An Error Occured"
pass
def register(self):
request = {}
request['type'] = 'REGISTER'
request['username'] = self.reg_username.get()
request['password'] = self.reg_password.get()
data = self.client.send_request(request)
response = json.loads(data)
res = response['result']
if(res == "-1"):
self.showInfoMsg("Account Information","Account already exists.")
print "Account Already Exists"
elif(res == "1"):
self.showInfoMsg("Account Information","Account created.")
print "Account Created"
self.frm_register.destroy()
self.showLoginForm()
else:
self.showErrorMsg("Unknown Error","An error occured. Try again.")
print "An Error Occured"
self.frm_register.destroy()
pass
def verifyPass(self):
username = self.reg_username.get()
passwrd = self.reg_password.get()
reenter = self.reg_confirm.get()
if(username==""):
self.showInfoMsg("Account Information","Please enter your username.")
elif(passwrd==""):
self.showInfoMsg("Account Information","Please enter your password.")
elif(reenter==""):
self.showInfoMsg("Account Information","Please re-enter your password.")
elif(passwrd==reenter):
self.fromRegToLogin()
else:
self.showErrorMsg("Account Error","Password not matched.")
pass
def broadcast(self,event):
msg = self.msgBox.get("1.0",END)
print "msg here: ", msg
bad_words = ['fuck', 'bitch', 'shit', 'damn', 'piss', 'asshole', 'slut', 'tangina', 'puta', 'gago', 'hudas', 'lintik', 'ulol', 'tarantado', 'buwisit',
'burat', 'kupal', 'leche', 'ungas', 'punyeta', 'hinayupak', 'pucha', 'pesteng yawa', 'pakshet', 'tanga']
index=0
ctr=0
while 1:
if(index==len(bad_words)):
break
if(bad_words[index] in msg.lower()):
ctr=1
break
index+=1
if ctr==1:
self.showErrorMsg("Content Error","Please avoid bad or foul words.")
else:
msg_nrm = unicodedata.normalize('NFKD', msg).encode('ascii','ignore').strip()
request = {}
request['type'] = 'BROADCAST'
request['sender'] = self.auth_user
request['content'] = msg_nrm
request['send_date'] = datetime.now(self.GMT).strftime('%m/%d/%Y %I:%M:%S %p')
while(1==1):
try:
data = self.client.send_request(request)
response = json.loads(data)
except:
self.showInfoMsg("Message Information","Retrying to send message.")
print "Retrying to send"
sleep(0.5)
continue
pass
break
try:
if(response['type'] == "BROADCAST"):
print response
self.msgBox.delete("0.0",END)
except Exception as e:
print e
#self.fetch_broadcast()
pass # end of broadcast
def fetch_messages(self):
request = {}
request['type'] = 'FETCH_PRIVATE'
while self.fetch_message_block==1:
# ok
while 1==1:
# ok
try:
data = self.client.send_request(request)
response = json.loads(data)
except Exception as e:
print "Retrieving Messages: ",e
sleep(0.5)
continue
pass
break
pass # end loop
if(response['type'] == "FETCH_PRIVATE"):
#print
try:
self.private_records = []
msg_counter = 0
while(msg_counter<(len(response)-1)):
line = response[str(msg_counter)]
arrange_me = json.loads(line)
msg_counter+=1
self.private_records.append(arrange_me)
pass
#print 'CHATBOX REFRESHED'
except:
self.showErrorMsg("Message Error","Cannot retrieve private messages.")
print "CANNOT RETRIEVED PRIVATE MESSAGES"
break
pass
pass # end of if
sleep(2)
pass # end of infinite loop
pass # end fetch
# this function refreshes the message box
def fetch_broadcast(self):
request = {}
request['type'] = 'FETCH_BROADCAST'
while 1==1:
# ok
sleep(1)
while 1==1:
# ok
try:
data = self.client.send_request(request)
response = json.loads(data)
except Exception as e:
print "Retrieving Messages: ",e
sleep(0.5)
continue
pass
break
pass # end loop
if(response['type'] == "FETCH_BROADCAST"):
#print
msg_counter = 0
public_message_string = ""
#message loop
while(msg_counter<(len(response)-1)):
line = response[str(msg_counter)]
arrange_me = json.loads(line)
msg_counter+=1
public_message_string += (arrange_me['send_date'] +" >>> [ "+arrange_me['sender'] + " ] : " +arrange_me['content'] + "\n")
pass # end of message loop
try:
self.publicList.configure(state='normal')
self.publicList.delete('1.0', END)
self.publicList.insert(END, public_message_string)
self.publicList.see(END)
self.publicList.configure(state='disabled')
#print 'CHATBOX REFRESHED'
except:
self.showErrorMsg("Message Error","Cannot retrieve messages.")
print "CANNOT RETRIEVED MESSAGES"
break
pass
pass # end of if
pass # end of infinite loop
pass # end fetch
def listClick(self,evt):
try:
selected_index = self.clientList.curselection()
select_string = self.clientList.get(selected_index)
st,name = select_string.split("-")
self.showPrivateMsgForm(name.strip())
except:
print "BAD INDEX at 255"
pass
pass
def fetch_clients(self):
request = {}
request['type'] = 'FETCH_CLIENTS'
while 1==1:
# ok
sleep(2)
while 1==1:
# ok
try:
data = self.client.send_request(request)
response = json.loads(data)
except Exception as e:
print "Retrieving CLIENTS: ",e
sleep(0.5)
continue
pass
break
pass # end loop
if(response['type'] == "FETCH_CLIENTS"):
#print
try:
self.publicList.configure(state='disabled')
#-0---------------Populate online client
user_count = (len(response) - 1)
self.clientList.delete(0,END)
x = 0
while(x < user_count):
user_item = response[str(x)]
user_state = "[ " + user_item['state'] + " ] - " + user_item['username']
self.clientList.insert(END,user_state)
x+=1
pass
#---------------------------------------
pass
#print 'CHATBOX REFRESHED'
except:
self.showErrorMsg("Account Error","Cannot retrieve client list.")
print "CANNOT RETRIEVED CLIENT LIST"
break
pass
pass # end of if
pass # end of infinite loop
pass # end fetch
def change_pass(self,old_pass,new_pass):
request = {}
if(old_pass.get()==""):
self.showInfoMsg("Acount Information", "Please enter your old password.")
return 0
elif(new_pass.get() == ""):
self.showInfoMsg("Account Information", "Please enter a valid new password.")
return 0
elif(new_pass.get() != self.change_confirm_pass.get()):
self.showErrorMsg("Account Error", "New password not matched.")
return 0
request['type'] = 'CHANGE_PASS'
request['user'] = self.auth_user
request['old_pass'] = <PASSWORD>()
request['new_pass'] = <PASSWORD>()
data = self.client.send_request(request)
response = json.loads(data)
res = response['result']
if(res=="-1"):
self.showErrorMsg("Acount Error","Old password not matched.")
elif(res=="1"):
self.showInfoMsg("Account Information", "Password sucessfully changed.")
print res
pass
def logout(self):
self.frm_public.destroy()
exit()
pass
def change_profile(self):
self.showInfoMsg("Application Information","No available process.")
def change_font(self, event):
global combo_box
print combo_box.get()
font = combo_box.get()
if(font=="Arial Black"):
self.publicList.configure(height=22)
text_font = (font,'9')
elif(font=="Cambria"):
self.publicList.configure(width=70)
self.publicList.configure(height=25)
text_font = (font,'10')
elif(font=="Arial"):
self.publicList.configure(width=70)
self.publicList.configure(height=25)
text_font = (font,'9')
else:
self.publicList.configure(height=20)
text_font = (font,'12')
print text_font
self.publicList.configure(font=text_font)
self.clientList.configure(font=text_font)
self.msgBox.configure(font=text_font)
def btn_pm(self):
self.showInfoMsg("Application Information","Double click the user you want to send private message.")
#****************************************************
# THEMES
#****************************************************
def theme1(self):
self.frm_public.configure(background='dodgerblue2')
print "theme1"
pass
def theme2(self):
self.frm_public.configure(background='springgreen2')
print "theme2"
pass
def theme3(self):
self.frm_public.configure(background='midnight blue')
print "theme3"
pass
def theme4(self):
self.frm_public.configure(background='dark slate gray')
print "theme4"
pass
def theme5(self):
self.frm_public.configure(background='Coral')
print "theme5"
pass
def default(self):
self.frm_public.configure(background='white smoke')
print "default"
pass
'''
------------------------------------------------------------------------------------------------------
UI MODULE
------------------------------------------------------------------------------------------------------
'''
def showLoginForm(self):
#createWindow("Login", "350x400+100+200")
self.frm_login = Tk()
self.frm_login.geometry("430x430+"+str((430/2)+(430/2))+"+"+str(430/2-70))
self.frm_login.title("Login")
self.frm_login.resizable(width="false", height="false")
#self.frm_login.geometry("430x430+100+200")
lbl1 = Label(self.frm_login, text="Login", width=10, height=3, fg="#1A4AA0", font="Calibri 19")
lbl1.pack(side=TOP)
usernameFrame = Frame(self.frm_login)
usernameFrame.pack()
lbl2 = Label(usernameFrame, text="Username:", width=10, fg="#1A4AA0", font="Calibri 14")
lbl2.pack(side=LEFT)
self.txt_user = Entry(usernameFrame, fg="#1A4AA0", font="Calibri 14")
self.txt_user.pack(side=LEFT)
passFrame = Frame(self.frm_login)
passFrame.pack()
lbl3 = Label(passFrame, text="Password:", width=10, height=3, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
self.txt_password = Entry(passFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.txt_password.pack(side=LEFT)
buttonFrame = Frame(self.frm_login)
buttonFrame.pack(side=RIGHT, padx=25)
btnLogin = Button(buttonFrame, text="Login", height=1, width=12,
command=self.authenticate, fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnLogin.pack(pady=5)
btnRegister = Button(buttonFrame, text="Register",
height=1, width=12, command=self.showRegisterForm,
fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnRegister.pack(pady=5)
btnSettings = Button(buttonFrame, text="Connection Settings",
height=2, width=17, command=self.showConnectionForm,
fg="#F0F0F0", bg="#2A3540", font="Calibri 10")
#btnSettings.pack(pady=5)
self.frm_login.mainloop()
def showConnectionForm(self):
con_set = Tk()
con_set.title("Connection Settings")
con_set.resizable(width="false", height="false")
con_set.geometry("430x430+"+str((430/2)+(430/2))+"+"+str(380/2))
#con_set.geometry("430x430+100+200")
frm1 = Frame(con_set)
frm1.pack(pady=15, padx=10)
lbl2 = Label(frm1, text="Server IP:", width=8, fg="#1A4AA0", font="Calibri 14")
lbl2.pack(side=LEFT)
entry1 = Entry(frm1, fg="#1A4AA0", font="Calibri 14")
entry1.pack(side=LEFT)
frm2 = Frame(con_set)
frm2.pack()
lbl3 = Label(frm2, text="Port:", width=8, height=3, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
entry2 = Entry(frm2, fg="#1A4AA0", font="Calibri 14")
entry2.pack(side=LEFT)
buttonFrame = Frame(con_set)
buttonFrame.pack(fill=BOTH)
btnSubmit = Button(buttonFrame, text="Submit", height=1, width=12,
command=self.authenticate, fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnSubmit.pack(pady=5)
frm3 = Frame(con_set)
frm3.pack(fill=BOTH)
lbl3 = Label(frm3, text="Result:", width=8, height=1, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT, padx=48)
frm4 = Frame(con_set)
frm4.pack(fill=BOTH)
con_list = Listbox(frm4, relief=SUNKEN, width=37, height=10, font="Calibri 12")
con_list.pack(pady=2, padx=5)
'''
REGISTRATION
'''
def showRegisterForm(self):
self.frm_login.destroy() #destroy the login
self.frm_register = Tk()
self.frm_register.title("Register")
self.frm_register.resizable(width="false", height="false")
self.frm_register.geometry("430x430+"+str((430/2)+(430/2))+"+"+str(380/2))
#self.frm_register.geometry("430x380+100+200")
frmLbl = Frame(self.frm_register)
frmLbl.pack(fill=BOTH, pady=5)
lbl1 = Label(frmLbl, text="Registration", width=10, height=3, fg="#1A4AA0", font="Calibri 19")
lbl1.pack(side=RIGHT, padx=25)
usernameFrame = Frame(self.frm_register)
usernameFrame.pack()
lbl2 = Label(usernameFrame, text="Username:", width=15, height=2, fg="#1A4AA0", font="Calibri 14")
lbl2.pack(side=LEFT)
self.reg_username = Entry(usernameFrame, fg="#1A4AA0", font="Calibri 14")
self.reg_username.pack(side=LEFT)
passFrame = Frame(self.frm_register)
passFrame.pack()
lbl3 = Label(passFrame, text="Password:", width=15, height=2, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
self.reg_password = Entry(passFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.reg_password.pack(side=LEFT)
reTypePassFrame = Frame(self.frm_register)
reTypePassFrame.pack()
lbl3 = Label(reTypePassFrame, text="Retype-Password:", width=15, height=2, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
self.reg_confirm = Entry(reTypePassFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.reg_confirm.pack(side=LEFT)
buttonFrame = Frame(self.frm_register)
buttonFrame.pack(side=RIGHT, padx=30)
btnRegister = Button(buttonFrame, text="Register", width=12, command=self.verifyPass,
fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnRegister.pack(pady=2)
btnBack = Button(buttonFrame, text="Back", width=12, command=self.btnBack,
fg="#F0F0F0", bg="#2A3540", font="Calibri 14")
btnBack.pack(pady=2)
pass
def btnBack(self):
self.frm_register.destroy()
self.showLoginForm()
pass
def fromRegToLogin(self):
self.register()
#self.frm_register.destroy()
#self.showLoginForm()
pass
'''
END REGISTRATION
'''
global text_font
def showMainForm(self):
#createWindow("Main", "1000x565+100+200")
self.frm_public = Tk()
self.frm_public.title("Main")
self.frm_public.resizable(width="false", height="false")
self.frm_public.geometry("900x565+"+str((900/2)-200)+"+"+str(565/2-200))
#self.frm_public.geometry("900x565+100+200")
#MENU
menu = Menu(self.frm_public)
self.frm_public.config(menu=menu)
subMenuFile = Menu(menu)
menu.add_cascade(label="File", menu=subMenuFile)
subMenuFile.add_command(label="Logout",
command=self.logout)
subMenuEdit = Menu(menu)
menu.add_cascade(label="Edit", menu=subMenuEdit)
subMenuEdit.add_command(label="Change profile",
command=self.showChangePassForm)
subMenuView = Menu(menu)
menu.add_cascade(label="View", menu=subMenuView)
subMenuView.add_command(label="Default | White Smoke",
command=self.default)
subMenuView.add_command(label="Theme 1 | Dodger Blue",
command=self.theme1)
subMenuView.add_command(label="Theme 2 | Spring Green",
command=self.theme2)
subMenuView.add_command(label="Theme 3 | Midnight Blue",
command=self.theme3)
subMenuView.add_command(label="Theme 4 | Dark Slate Gray",
command=self.theme4)
subMenuView.add_command(label="Theme 5 | Coral",
command=self.theme5)
#Public Chat Frame
publicChatLogs = Frame(self.frm_public)
publicChatLogs.pack(side=LEFT, padx=10, pady=5)
frm1 = Frame(publicChatLogs)
frm1.pack(fill=BOTH)
lbl1 = Label(frm1, text="Public Chat", fg="#1A4AA0", font="Calibri 12")
lbl1.pack(side=LEFT)
#publicList = Listbox(publicChatLogs, relief=SUNKEN, width=50, height=18, font=text_font)
self.publicList = Text (publicChatLogs, fg="#232C35", font=text_font, relief=GROOVE, height=20, width=50)
#
# create a Scrollbar and associate it with txt
#
self.publicList.pack(fill=BOTH, pady=5)
frm2 = Frame(publicChatLogs)
frm2.pack(fill=BOTH)
global combo_box
combo_box = ttk.Combobox(frm2, font=text_font) # apply font to combobox
combo_box.bind("<<ComboboxSelected>>", self.change_font)
combo_box.pack(side=LEFT)
combo_box['values'] = ('Arial', "Arial Black", 'Calibri', 'Cambria')
self.msgBox = Text (publicChatLogs, fg="#232C35", font=text_font, relief=GROOVE, height=3)
self.msgBox.pack(expand=1, fill=BOTH, pady=5)
btnSend = Button(publicChatLogs, text="Send", height=2, width=12,
command=lambda: self.broadcast('<Return>'), fg="#F0F0F0", bg="#2A3540")
btnSend.pack(side=RIGHT, pady=5)
############################################
self.frm_public.bind_all('<Return>', self.broadcast)
############################################
#Private Message Frame
privateMsgFrame = Frame(self.frm_public)
privateMsgFrame.pack(side=TOP, pady=8, padx=10)
frm2 = Frame(privateMsgFrame)
frm2.pack(fill=BOTH)
lbl2 = Label(frm2, text="Connected Clients:", fg="#1A4AA0", font="Calibri 12")
lbl2.pack(side=LEFT)
#----------------------------------------------------------------------------
self.clientList = Listbox(privateMsgFrame, relief=SUNKEN, width=45,
height=22, font=text_font)
self.clientList.pack(pady=5)
self.clientList.bind('<Double-Button-1>',self.listClick)
btnPrivateMsg = Button(privateMsgFrame, text="Private Message",
height=2, width=50,# remove command
fg="#F0F0F0", bg="#2A3540", command=self.btn_pm)
btnPrivateMsg.pack(pady=5)
#RUN DAEMON
self.broadcast_reciever.start()
self.client_lister.start()
def showPrivateMsgForm(self,reciever):
pm = Tk()
pm.title(reciever)
pm.resizable(width="false", height="false")
#self.pm.geometry("600x450+"+str((600/2)+(600/2+200))+"+"+str(450/2+20))
pm.geometry("600x450+400+150")
text_font = ('Calibri', '12')
frm1 = Frame(pm)
frm1.pack(fill=BOTH)
lbl1 = Label(frm1, text="Private Chat", fg="#1A4AA0", font="Calibri 12")
lbl1.pack(side=LEFT, padx=10, pady=5)
privateList = Text (pm, fg="#232C35", font=text_font, relief=GROOVE, height=11)
privateList.pack(fill=BOTH, padx=10, pady=5)
frm = Frame(pm)
frm.pack(fill=BOTH)
combo_box = ttk.Combobox(frm, font=text_font) # apply font to combobox
combo_box.pack(side=LEFT, padx=10)
combo_box['values'] = ('Arial', "Arial Black", 'Calibri', 'Cambria')
msgBox = Text(pm, fg="#232C35", font=text_font, relief=GROOVE, height=3)
msgBox.pack(fill=BOTH, pady=5, padx=10)
# --------------------------------------------------- ERROR
def load_messages(rec):
x= 1
while(x==1):
sleep(1)
try:
privateList.delete("1.0", END)
#print "INSIDE LE:", len(self.private_records)
#print "THREAD STATE:", self.messenger.is_alive()
if(len(self.private_records) > 0):
temp = self.private_records
print "recieve"
for mymsg in temp:
if((mymsg['to'] == rec and mymsg['from'] == self.auth_user) or (mymsg['to'] == self.auth_user and mymsg['from'] == rec)):
privateList.insert(END,mymsg['send_date'] + " >> " + mymsg['from'] + ": " + mymsg['message'] + "\n")
#privateList.insert(END,"Hiiii")
else:
continue
pass
#privateList.see(0)
#sleep(1)
except Exception as e:
if(str(e) == "invalid command name \".66008448\""):
print "super error"
pass
elif(str(e) == "out of stack space (infinite loop?)"):
print "stupid error"
pass
elif(str(e) == "invalid command name \".70163552\""):
print "stupid error"
pass
elif(str(e) == "invalid command name \".70238896\""):
print "stupid error"
pass
else:
print "PRIVATE CLOSE" + str(e)
x=0
break
pass
'''
for each_record in self.private_records:
print "FROM: " + each_record['from'] + " TO: " + each_record['to'] + " MESSAGE " + each_record['message']
pass
'''
pass
message_displayer = threading.Thread(target = load_messages,args=(reciever,))
message_displayer.start()
#---------------------------------- ERROR
def send_message(evt,reciever):
#get message
msg = msgBox.get("1.0",END)
print "msg here: ", msg
bad_words = ['fuck', 'bitch', 'shit', 'damn', 'piss', 'asshole', 'slut', 'tangina', 'puta', 'gago', 'hudas', 'lintik', 'ulol', 'tarantado', 'buwisit',
'burat', 'kupal', 'leche', 'ungas', 'punyeta', 'hinayupak', 'pucha', 'pesteng yawa', 'pakshet', 'tanga']
index=0
ctr=0
while 1:
if(index==len(bad_words)):
break
if(bad_words[index] in msg.lower()):
ctr=1
break
index+=1
if ctr==1:
self.showErrorMsg("Content Error","Please avoid bad or foul words.")
else:
msg_nrm = unicodedata.normalize('NFKD', msg).encode('ascii','ignore').strip()
request = {}
request['type'] = "PRIVATE"
request['from'] = self.auth_user
request['to'] = reciever
request['message'] = msg_nrm
request['send_date'] = datetime.now(self.GMT).strftime('%m/%d/%Y %I:%M:%S %p')
print request
#retru if no response was recieve
x=1
while(x==1):
try:
data = self.client.send_request(request)
response = json.loads(data)
except:
self.showInfoMsg("Message Information","Retrying to send message.")
print "Retrying to send"
sleep(0.5)
continue
pass
x=0
break
if(response['type'] == "PRIVATE"):
print response
pass
msgBox.delete("1.0", END)
pass
btnSend = Button(pm, text="Send", height=2, width=12,
command=lambda: send_message('<Return>',reciever), fg="#F0F0F0", bg="#2A3540")
btnSend.pack(side=RIGHT, pady=5, padx=10)
#use lambda event to bind function with parameters
############################################
pm.bind_all('<Return>',lambda event: send_message('<Return>',reciever))
############################################
if(self.messenger.is_alive()):
print "ALREADY LIVE"
pass
else:
self.fetch_message_block = 1
self.messenger.start()
print "THREAD STARTED"
pass
pass
def showChangePassForm(self):
cp = Tk()
cp.title("Change Profile")
cp.geometry("500x320+400+150")
text_font = ('Calibri', '12')
frme1 = Frame(cp)
frme1.pack(fill=BOTH)
lbel1 = Label(frme1, text="Change Password", font="Calibri 22", fg="#1A4AA0")
lbel1.pack(side=LEFT, padx=10, pady=5)
usernameFrame = Frame(cp)
usernameFrame.pack()
lbl2 = Label(usernameFrame, text="Old Password:", width=15, height=2, fg="#1A4AA0", font="Calibri 14")
lbl2.pack(side=LEFT)
self.change_old_pass = Entry(usernameFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.change_old_pass.pack(side=LEFT)
passFrame = Frame(cp)
passFrame.pack()
lbl3 = Label(passFrame, text="New Password:", width=15, height=2, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
self.change_new_pass = Entry(passFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.change_new_pass.pack(side=LEFT)
reTypePassFrame = Frame(cp)
reTypePassFrame.pack()
lbl3 = Label(reTypePassFrame, text="Retype-New Password:", width=20, height=2, fg="#1A4AA0", font="Calibri 14")
lbl3.pack(side=LEFT)
self.change_confirm_pass = Entry(reTypePassFrame, show="*", fg="#1A4AA0", font="Calibri 14")
self.change_confirm_pass.pack(side=LEFT)
buttonFrame = Frame(cp)
buttonFrame.pack(side=RIGHT, padx=30)
btnSave = Button(buttonFrame, text="Save changes", width=12,
fg="#F0F0F0", bg="#2A3540", font="Calibri 14",command=lambda: self.change_pass(self.change_old_pass, self.change_new_pass))
btnSave.pack(pady=2)
btnBack = Button(buttonFrame, text="Back", width=12,
fg="#F0F0F0", bg="#2A3540", font="Calibri 14",)
#btnBack.pack(pady=2)
pass # end of class GUI
GUI()
``` |
{
"source": "jhmz333/ai-masters-degree-stuff",
"score": 3
} |
#### File: python_fundations/activity_2/exercise_1.py
```python
def tempeture_converter(tempeture_list):
return list(map(lambda t: round(t + 273.15, 2), tempeture_list))
temperatures = [-23, 0, 34, 22, -30, 23]
print(f"temperatures = {temperatures}")
print(f"temperatures = {tempeture_converter(temperatures)}")
``` |
{
"source": "jhn--/bubble_sorts",
"score": 4
} |
#### File: jhn--/bubble_sorts/mit_bubble.py
```python
def mit_bubble(L):
"""Bubble sort
Parameters:
L (list): Unsorted (eventually sorted) list.
swap (boolean): Whether list's elements have been swapped before, serves as an indicator on whether we're done sorting.
Returns:
L (list): Sorted list.
"""
swap = False
while not swap:
swap = True
for j in range(1, len(L)):
if L[j-1] > L[j]:
swap = False
(L[j-1], L[j]) = (L[j], L[j-1])
return L
``` |
{
"source": "jhn--/dell-emc-isilon-related-scripts",
"score": 3
} |
#### File: jhn--/dell-emc-isilon-related-scripts/isi_release_quarantined_files.py
```python
import subprocess, shlex, json
def get_threat_list():
isi_threat_list_raw = "isi antivirus reports threats list --format json -a -z"
isi_threat_list_split = shlex.split(isi_threat_list_raw)
isi_threat_list_cmd = subprocess.Popen(isi_threat_list_split, stdout = subprocess.PIPE)
isi_threat_list_strings = isi_threat_list_cmd.communicate()[0]
isi_threat_list_results = json.loads(isi_threat_list_strings)
return isi_threat_list_results
def release_threats():
isi_threat_list_results = get_threat_list()
for i in isi_threat_list_results:
isi_release_quarantine_raw = "isi antivirus release --verbose"
isi_release_quarantine_split = shlex.split(isi_release_quarantine_raw)
isi_release_quarantine_split.append(i["file"])
isi_release_quarantine_cmd = subprocess.Popen(isi_release_quarantine_split, stdout = subprocess.PIPE)
isi_release_quarantine_results = isi_release_quarantine_cmd.communicate()[0]
print(isi_release_quarantine_results)
def main():
release_threats()
if __name__ == '__main__':
main()
``` |
{
"source": "Jhneric/dhis2-docs-builder",
"score": 2
} |
#### File: dhis2_plugins/dhis2_docs/transifex.py
```python
import requests
import json
import argparse
import os
import glob
import sys
import tempfile
import frontmatter
class tx:
def __init__(self,project_slug):
# Transifex
# project_slug='meta-who-packages'
self.tx_token = os.getenv('DHIS2_DOCS_TX_TOKEN')
self.project_slug=project_slug
# tx_i18n_type='KEYVALUEJSON'
self.tx_mode='default'
self.tx_langs_api='https://www.transifex.com/api/2/project/{s}/resource/{r}/?details'
self.tx_stats_api='https://www.transifex.com/api/2/project/{s}/resource/{r}/stats/{l}'
self.tx_translations_api='https://www.transifex.com/api/2/project/{s}/resource/{r}/translation/{l}/?mode={m}&file'
self.tx_resources_api='https://www.transifex.com/api/2/project/{s}/resources/'
self.tx_resource_api='https://www.transifex.com/api/2/project/{s}/resource/{r}'
self.tx_content_api='https://www.transifex.com/api/2/project/{s}/resource/{r}/content'
self.tx_translations_update_api='https://www.transifex.com/api/2/project/{s}/resource/{r}/translation/{l}'
self.tx_edit_root='https://www.transifex.com/hisp-uio/'
# We need to map language codes that DHIS2 doesn't support natively
# fa_AF --> prs
# uz@Cyrl --> uz
# uz@Latn --> uz_UZ
self.langmap={'fa_AF': 'prs', 'uz@Cyrl':'uz','uz@Latn':'uz_UZ'}
self.TX_AUTH=('api',self.tx_token)
# get a list of resources for the project
self.tx_resources = []
urlr = self.tx_resources_api.format(s=project_slug)
response = requests.get(urlr, auth=self.TX_AUTH)
if response.status_code == requests.codes['OK']:
res = (x['slug'] for x in response.json())
for resource_s in res:
self.tx_resources.append(resource_s)
def push(self,path_to_file,resource_slug,categories,tx_i18n_type):
print("Pushing",path_to_file,"to transifex...")
cats = resource_slug.split('__')
if len(cats) > 1:
ca = []
ca.append(cats[0])
c = ca + categories
else:
c = categories
# check if our resource exists
if resource_slug in self.tx_resources:
# If it does - update it
url = self.tx_content_api.format(s=self.project_slug, r=resource_slug)
files = {'upload_file': open(path_to_file, "rb")}
r = requests.put(url, files=files, auth=self.TX_AUTH)
# print(r.status_code,": PUT ",url)
# print(r.headers,": PUT ",url)
else:
# if it doesn't - create it
print("Resource does not exist. Creating...")
url = self.tx_resources_api.format(s=self.project_slug)
data = {
'name': resource_slug,
'slug': resource_slug,
'i18n_type': tx_i18n_type,
'categories': c
}
files = {'upload_file': open(path_to_file, "rb")}
r = requests.post(url, files=files, data=data, auth=self.TX_AUTH)
# print(r.status_code,": POST ",url)
if r.status_code != requests.codes['OK']:
print(r.text)
# if c:
# url = self.tx_resource_api.format(s=self.project_slug, r=resource_slug)
# data = {
# 'categories': c
# }
# r = requests.put(url, data=json.dumps(data), auth=self.TX_AUTH, headers={'content-type': 'application/json'})
# if r.status_code != requests.codes['OK']:
# print(r.text)
def pull(self,path_to_file,resource_slug,language_code):
print("Pulling",path_to_file,"from transifex...")
# We need to map language codes that DHIS2 doesn't support natively
# uz@Cyrl --> uz
# uz@Latn --> uz_UZ
# mapped_language_code = language_code.replace("@Latn","_UZ").replace("@Cyrl","")
mapped_language_code = language_code
if language_code in self.langmap.keys():
mapped_language_code = self.langmap[language_code]
url = self.tx_translations_api.format(s=self.project_slug, r=resource_slug, l=language_code, m=self.tx_mode)
response = requests.get(url, auth=self.TX_AUTH)
if response.status_code == requests.codes['OK']:
os.makedirs(os.path.dirname(path_to_file), exist_ok=True)
with open(path_to_file, 'wb') as f:
for line in response.iter_content():
f.write(line)
# set the appropriate edit url to transifex resource
fm = frontmatter.load(path_to_file)
fm['edit_url'] = self.tx_edit_root+self.project_slug+'/translate/#'+language_code+'/'+resource_slug
with open(path_to_file, 'w') as emd:
print(frontmatter.dumps(fm), file=emd)
```
#### File: markdown-extensions/d_card/d_card.py
```python
from markdown.extensions import Extension
from markdown.blockprocessors import BlockProcessor
from markdown.util import etree
# import xml.etree.ElementTree as etree
import re
class D_cardExtension(Extension):
""" D_card extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add D_card to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.register(D_cardProcessor(md.parser), 'd_card', 104)
class D_cardProcessor(BlockProcessor):
CLASSNAME = 'd_card'
CLASSNAME_TITLE = 'd_card-title'
RE = re.compile(r'(?:^|\n)!!! ?(.*)? *(?:\n|$)')
RE_SPACES = re.compile(' +')
def test(self, parent, block):
sibling = self.lastChild(parent)
return self.RE.search(block) or \
(block.startswith(' ' * self.tab_length) and sibling is not None and
sibling.get('class', '').find(self.CLASSNAME) != -1)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
block = block[m.end():] # removes the first line
block, theRest = self.detab(block)
if m:
title = m.group(1)
div = etree.SubElement(parent, 'div')
div.set('class', self.CLASSNAME)
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class D_cardBlockProcessor(BlockProcessor):
RE_FENCE_START = r'^ *!{3,} *\n' # start line, e.g., ` !!!! `
RE_FENCE_END = r'\n *!{3,}\s*$' # last non-blank line, e.g, '!!!\n \n\n'
def test(self, parent, block):
return re.match(self.RE_FENCE_START, block)
def run(self, parent, blocks):
original_block = blocks[0]
blocks[0] = re.sub(self.RE_FENCE_START, '', blocks[0])
# Find block with ending fence
for block_num, block in enumerate(blocks):
if re.search(self.RE_FENCE_END, block):
# remove fence
blocks[block_num] = re.sub(self.RE_FENCE_END, '', block)
# render fenced area inside a new div
e = etree.SubElement(parent, 'div')
e.set('style', 'display: inline-block; border: 1px solid red;')
self.parser.parseBlocks(e, blocks[0:block_num + 1])
# remove used blocks
for i in range(0, block_num + 1):
blocks.pop(0)
return True # or could have had no return statement
# No closing marker! Restore and do nothing
blocks[0] = original_block
return False # equivalent to our test() routine returning False
def makeExtension(**kwargs): # pragma: no cover
return D_cardExtension(**kwargs)
``` |
{
"source": "jhn--/kindalikexkcdpasswordgenerator",
"score": 4
} |
#### File: jhn--/kindalikexkcdpasswordgenerator/kindalikexkcdpasswordgenerator.py
```python
from random import randrange
import argparse
import requests
_ALPHABET = {}
_PASSWORD = []
_WORD_LIST_URL = "https://raw.githubusercontent.com/dwyl/english-words/\
master/words_alpha.txt"
def get_words(word_set: set, num_of_letters: int) -> dict:
"""Accepts a set() of alphabets and populates the _global_ dictionary,
`_ALPHABET` with words from `_WORD_LIST_URL` that start with the alphabets
with letter count not more than the maximum number of letters defined by
`num_of_letters`.
Args:
word_set (set): A set() of alphabets split from word.
num_of_letters (int): Number of letters new random words can have up
to.
"""
try:
r = requests.get(_WORD_LIST_URL)
except Exception as e:
raise e
else:
for letter in word_set:
_ALPHABET[letter] = [word for word in r.text.split(
'\r\n') if word[0] == letter and len(word) <= num_of_letters]
def check_num_of_letters(num_of_letters: int) -> int:
"""Accepts `num_of_letters` to check if it is less than 3.
If `num_of_letters` is greater than or equals to 3, return `num_of_letters`
as-is. Otherwise, return `num_of_letters` with the value of 6.
Args:
num_of_letters (int)
Returns:
num_of_letters (int)
"""
if num_of_letters < 3:
print("Number of letters defined by user is less than 3. \
Using default value of 6.")
return 6
else:
return num_of_letters
def main(word: str, count: int) -> list:
"""Main function.
Converts string `word` to a set() to remove duplicate alphabets in `word`.
Forwards `count` to `check_num_of_letters()` to check if value less than 3.
Passes `word_set` and `num_of_letters` to get_words() to list of words in
dictionary `_ALPHABET`. Randomly select words that starts with the
alphabets in set() and populate into global `_PASSWORD`.
Args:
word (str): Word to split up into alphabets, after which alphabets are
used as starting letter for a new random word. count (int): Number of
alphabets new random word should have at most.
"""
word_set = set(word)
num_of_letters = check_num_of_letters(count)
get_words(word_set, num_of_letters)
for i in word:
_PASSWORD.append(_ALPHABET[i][randrange(len(_ALPHABET[i]))])
print(f'Password generated: {(" ").join(_PASSWORD)}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"word", type=str, help="Word to break up and convert into xkcd \
style password.")
parser.add_argument(
"-c", "--count", type=int, help="Maximum number of alphabets you want each component to be. \
Default set at 6. Program will set value at 6 if any user input \
value is lesser than 3.", default=6)
args = parser.parse_args()
main(args.word, args.count)
``` |
{
"source": "JhnLee/kucalendar-chatbot",
"score": 3
} |
#### File: JhnLee/kucalendar-chatbot/utils.py
```python
import requests
import json
ERROR_MESSAGE = '네트워크 접속에 문제가 발생했습니다. 다시 시도해주세요.'
def get_menu(answer):
menu = []
index = answer.find(' 1. ')
if index < 0:
return answer, menu
menu_string = answer[index + 1:]
answer = answer[:index]
number = 1
while 1:
number += 1
search_string = ' %d. ' % number
index = menu_string.find(search_string)
if index < 0:
menu.append(menu_string[3:].strip())
break;
menu.append(menu_string[3:index].strip())
menu_string = menu_string[index + 1:]
return answer, menu
def get_menu_button(menu):
if len(menu) == 0:
return None
menu_button = {
'type': 'buttons',
'buttons': menu
}
return menu_button
def get_answer(text, user_key):
data_send = {
'lang': 'ko',
'query': text,
'sessionId': user_key,
'timezone': 'Asia/Seoul'
}
data_header = {
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer TOKEN' #TOKEN 부분에 개인의 Client Access token 입력
}
dialogflow_url = 'https://api.dialogflow.com/v1/query?v=20150910'
res = requests.post(dialogflow_url,
data=json.dumps(data_send),
headers=data_header)
if res.status_code != requests.codes.ok:
return ERROR_MESSAGE
data_receive = res.json()
answer = data_receive['result']['fulfillment']['speech']
return answer
``` |
{
"source": "jhnnsrs/arbeider",
"score": 2
} |
#### File: arbeider/larvik/consumers.py
```python
from typing import Callable, Awaitable, Any, Dict, List, Tuple
import dask
from asgiref.sync import async_to_sync
from channels.consumer import AsyncConsumer, SyncConsumer
from channels.layers import get_channel_layer
from django.db import models
from rest_framework import serializers
from larvik.discover import NodeType
from larvik.helpers import LarvikManager
from larvik.logging import get_module_logger
from larvik.models import LarvikJob
from larvik.structures import StatusCode, LarvikStatus, larvikError, larvikProgress
from django.conf import settings
channel_layer = get_channel_layer()
from dask.distributed import LocalCluster
cluster = LocalCluster()
DEBUG = settings.ARNHEIM_DEBUG
class LarvikError(Exception):
def __init__(self, message):
self.message = message
class AsyncLarvikConsumer(AsyncConsumer,NodeType):
def __init__(self, scope):
super().__init__(scope)
self.publishers = None
self.job = None
self.data = None
self.request = None
self.requestSerializer = None
self.logger = get_module_logger(type(self).__name__)
def register(self,data):
self.publishers = dict(data["publishers"])
self.job = data["job"]
self.data = data
async def updateModel(self,model: models.Model, method: str):
await self.modelCreated(model, self.getSerializers()[type(model).__name__], method)
async def modelCreated(self, model: models.Model, serializerclass: serializers.ModelSerializer.__class__, method: str):
'''Make sure to call this if you created a new Model on the Database so that the actionpublishers can do their work'''
serialized = serializerclass(model)
stream = str(type(model).__name__).lower()
if stream in self.publishers.keys():
#self.logger.info("Found stream {0} in Publishers. Tyring to publish".format(str(stream)))
await self.publish(serialized, method,self.publishers[stream],stream)
async def publish(self,serializer, method, publishers,stream):
if publishers is not None:
for el in publishers:
path = ""
for modelfield in el:
try:
value = serializer.data[modelfield]
path += "{0}_{1}_".format(str(modelfield), str(value))
except:
self.logger.info("Modelfield {0} does not exist on {1}".format(str(modelfield), str(stream)))
path += "{0}_".format((str(modelfield)))
path = path[:-1] # Trim the last underscore
#self.logger.info("Publishing to Channel {0}".format(path))
await channel_layer.group_send(
path,
{
"type": "stream",
"stream": stream,
"room": path,
"method": method,
"data": serializer.data
}
)
def getRequestFunction(self) -> Callable[[Dict], Awaitable[LarvikJob]]:
'''Should return a Function that returns the Model and not the Serialized instance'''
raise NotImplementedError
def updateRequestFunction(self) -> Callable[[models.Model,str], Awaitable[models.Model]]:
'''Should update the Status (provided as string) on the Model and return the Model'''
raise NotImplementedError
def getSerializers(self) -> Dict[str,type(serializers.Serializer)]:
''' Should return a dict with key: modelname, value: serializerClass (no instance)'''
raise NotImplementedError
def getDefaultSettings(self, request: models.Model) -> Dict:
''' Should return the Defaultsettings as a JSON parsable String'''
raise NotImplementedError
async def progress(self,message=None):
self.logger.info(f"Progress {message}")
await self.updateRequest(larvikProgress(message))
async def updateRequest(self, status: LarvikStatus):
# Classic Update Circle
if self.requestSerializer is None: self.requestSerializer = self.getSerializers()[type(self.request).__name__]
self.request = await self.updateRequestFunction()(self.request, status)
await self.modelCreated(self.request, self.requestSerializer, "update")
async def start(self,request: LarvikJob, settings: Dict):
raise NotImplementedError
async def startJob(self, data):
self.register(data)
self.logger.info("Received Data")
# Working on models is easier, so the cost of a database call is bearable
self.request: LarvikJob = await self.getRequestFunction()(data["data"])
await self.updateRequest(LarvikStatus(StatusCode.STARTED, "Started"))
# TODO: Impelement with a request Parentclass
self.settings: dict = self._getsettings(self.request.settings, self.getDefaultSettings(self.request))
try:
await self.start(self.request, self.settings)
await self.updateRequest(LarvikStatus(StatusCode.DONE, "Done"))
except LarvikError as e:
self.logger.error(e)
await self.updateRequest(LarvikStatus(StatusCode.ERROR, e.message))
except Exception as e:
self.logger.error(e)
if DEBUG:
await self.updateRequest(LarvikStatus(StatusCode.ERROR, e))
raise e
else:
await self.updateRequest(LarvikStatus(StatusCode.ERROR, "Uncaught Error on Server, check log there"))
def _getsettings(self, settings: str, defaultsettings: Dict):
"""Updateds the Settings with the Defaultsettings"""
import json
try:
settings = json.loads(settings)
try:
defaultsettings = defaultsettings
except:
defaultsettings = {}
except:
defaultsettings = {}
settings = {}
defaultsettings.update(settings)
return defaultsettings
class ModelFuncAsyncLarvikConsumer(AsyncLarvikConsumer):
def getRequestFunction(self) -> Callable[[Dict], Awaitable[LarvikJob]]:
raise NotImplementedError
def updateRequestFunction(self) -> Callable[[models.Model, str], Awaitable[models.Model]]:
raise NotImplementedError
def getSerializers(self) -> Dict[str, type(serializers.Serializer)]:
raise NotImplementedError
def getDefaultSettings(self, request: models.Model) -> Dict:
raise NotImplementedError
def getModelFuncDict(self):
raise NotImplementedError
async def parse(self, request: LarvikJob, settings: dict) -> Dict[str, Any]:
raise NotImplementedError
async def start(self, request: LarvikJob, settings: Dict):
try:
returndict = await self.parse(self.request, self.settings)
for modelname, modelparams in returndict.items():
models = await self.getModelFuncDict()[modelname](modelparams, self.request, self.settings)
for (model, method) in models:
await self.updateModel(model, method)
await self.updateRequest(LarvikStatus(StatusCode.DONE, "Done"))
except LarvikError as e:
self.logger.error(e)
await self.updateRequest(LarvikStatus(StatusCode.ERROR, e.message))
except Exception as e:
self.logger.error(e)
if DEBUG:
await self.updateRequest(LarvikStatus(StatusCode.ERROR, e))
else:
await self.updateRequest(LarvikStatus(StatusCode.ERROR, "Uncaught Error on Server, check log there"))
class SyncLarvikConsumer(SyncConsumer, NodeType, LarvikManager):
requestClass = None
requestClassSerializer = None
def __init__(self, scope):
super().__init__(scope)
self.publishers = None
self.job = None
self.data = None
self.request = None
self.requestSerializer = None
self.logger = get_module_logger(type(self).__name__)
def register(self,data):
self.publishers = dict(data["publishers"])
self.job = data["job"]
self.data = data
def updateModel(self,model: models.Model, method: str):
return self.modelCreated(model, self.getSerializers()[type(model).__name__], method)
def modelCreated(self, model: models.Model, serializerclass: serializers.ModelSerializer.__class__, method: str):
'''Make sure to call this if you created a new Model on the Database so that the actionpublishers can do their work'''
serialized = serializerclass(model)
stream = str(type(model).__name__).lower()
if stream in self.publishers.keys():
#self.logger.info("Found stream {0} in Publishers. Tyring to publish".format(str(stream)))
self.publish(serialized, method,self.publishers[stream],stream)
def progress(self,message=None):
self.logger.info(f"Progress {message}")
self.updateStatus(larvikProgress(message=message))
def publish(self,serializer, method, publishers,stream):
if publishers is not None:
for el in publishers:
path = ""
for modelfield in el:
try:
value = serializer.data[modelfield]
path += "{0}_{1}_".format(str(modelfield), str(value))
except:
#self.logger.info("Modelfield {0} does not exist on {1}".format(str(modelfield), str(stream)))
path += "{0}_".format((str(modelfield)))
path = path[:-1] # Trim the last underscore
self.logger.info("Publishing to Channel {0}".format(path))
async_to_sync(channel_layer.group_send)(
path,
{
"type": "stream",
"stream": stream,
"room": path,
"method": method,
"data": serializer.data
}
)
def getRequest(self,data) -> LarvikJob:
'''Should return a Function that returns the Model and not the Serialized instance'''
if self.requestClass is None:
raise NotImplementedError("Please specifiy 'requestModel' or override getRequest")
else:
return self.requestClass.objects.get(pk = data["id"])
def getSerializers(self):
'''Should return a Function that returns the Model and not the Serialized instance'''
raise NotImplementedError
def updateStatus(self, status: LarvikStatus):
# Classic Update Circle
if self.requestSerializer is None: self.requestSerializer = self.getSerializers()[type(self.request).__name__]
self.request.statuscode = status.statuscode
self.request.statusmessage = status.message
self.request.save()
self.modelCreated(self.request, self.requestSerializer, "update")
def getDefaultSettings(self, request: models.Model) -> Dict:
''' Should return the Defaultsettings as a JSON parsable String'''
raise NotImplementedError
def start(self,request: LarvikJob, settings: dict):
raise NotImplementedError
def startJob(self, data):
self.register(data)
self.logger.info("Received Data")
# Working on models is easier, so the cost of a database call is bearable
self.request: LarvikJob = self.getRequest(data["data"])
# TODO: Impelement with a request Parentclass
self.settings: dict = self._getsettings(self.request.settings, self.getDefaultSettings(self.request))
self.progress("Settings Loaded")
try:
self.start(self.request,self.settings)
self.updateStatus(LarvikStatus(StatusCode.DONE, "Done"))
except LarvikError as e:
self.logger.error(e)
self.updateStatus(larvikError(repr(e)))
except Exception as e:
self.logger.error(e)
if DEBUG:
self.updateStatus(larvikError(repr(e)))
raise e
else:
self.updateStatus(LarvikStatus(StatusCode.ERROR, "Uncaught Error on Server, check log there"))
def _getsettings(self, settings: str, defaultsettings: Dict):
"""Updateds the Settings with the Defaultsettings"""
import json
try:
settings = json.loads(settings)
try:
defaultsettings = defaultsettings
except:
defaultsettings = {}
except:
defaultsettings = {}
settings = {}
defaultsettings.update(settings)
return defaultsettings
class DaskSyncLarvikConsumer(SyncLarvikConsumer):
def __init__(self, scope):
super().__init__(scope)
self.iscluster = False
def compute(self, graph):
if self.iscluster:
return self.c.compute(graph)
else:
with dask.config.set(scheduler='threads'):
result = graph.compute()
return result
def persist(self, graph):
return graph.persist()
def getSerializers(self):
raise NotImplementedError
def getDefaultSettings(self, request: models.Model) -> Dict:
raise NotImplementedError
def parse(self, request: LarvikJob, settings: dict) -> List[Tuple[models.Model,str]]:
raise NotImplementedError
def start(self, request: LarvikJob, settings: dict):
try:
returndict = self.parse(self.request, self.settings)
for item in returndict:
model, method = item
self.modelCreated(model, self.getSerializers()[type(model).__name__], method)
self.logger.info(str(method).capitalize() + " Model " + type(model).__name__)
except FileNotFoundError as e:
self.logger.info("RETRYING")
self.start(request,settings) # recuversive
from typing import TypeVar, Generic
T = TypeVar('T')
class TypedSyncLarvikConsumer(Generic[T], SyncLarvikConsumer):
def start(self, request: T, settings: dict):
pass
```
#### File: arbeider/larvik/helpers.py
```python
import xarray as xr
class MetaMethods(object):
def prepend(self, el, string= "Prewitt of"):
items = el.channels.data.compute()
for merge in items:
merge["Name"] = f"{string} {merge['Name']}"
return items
def merge(self,channels: list, el):
items = el.sel(c=channels).channels.data.compute()
if len(items) == 1:
return items[0]
name = ",".join([item["Name"] for item in items])
merge = items[0]
merge["Index"] = -1
merge["Name"] = f"Merged Channel ({name})"
return merge
class HelperMethods(object):
def addChannel(self,tosize=2):
pass
class LarvikManager(object):
def __init__(self):
self._helpers = None
self._meta = None
self.iteration = None
self.name = self.__class__.__name__
@property
def helpers(self):
if self._meta is None: self._meta = MetaMethods()
return self._meta
@property
def helpers(self):
if self._helpers is None: self._helpers = HelperMethods()
return self._helpers
@staticmethod
def fromIteration(iteration, name):
manager = LarvikManager()
manager.iteration = iteration
manager.name = name
return manager
def progress(self, progress):
print(f"Iter {self.iteration} at {self.name}: {progress}")
def persist(self, graph):
graph.persist()
def compute(self, graph):
graph.compute()
class LarvikParser(object):
@staticmethod
def filter(array: xr.DataArray, settings: dict, manager: LarvikManager) -> xr.DataArray:
raise NotImplementedError
```
#### File: arbeider/larvik/models.py
```python
import uuid
from json import JSONEncoder
import dask
import xarray
import zarr as zr
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.postgres.fields.array import ArrayField
from django.contrib.postgres.fields.jsonb import JSONField
from django.db import models
# Create your models here.
from larvik.fields import DimsField, ShapeField, StoreFileField
from larvik.logging import get_module_logger
from larvik.managers import LarvikArrayManager
from larvik.storage.default import get_default_storagemode
from larvik.storage.local import LocalStorage, ZarrStorage
from larvik.storage.s3 import S3Storage
logger = get_module_logger(__name__)
get_user_model()
class LarvikJob(models.Model):
statuscode = models.IntegerField( null=True, blank=True)
statusmessage = models.CharField(max_length=500, null=True, blank=True)
settings = models.CharField(max_length=1000) # jsondecoded
creator = models.ForeignKey(User, on_delete=models.CASCADE)
nodeid = models.CharField(max_length=400, null=True, blank=True)
class Meta:
abstract = True
def _repr_html_(self):
return f'''<h5>Request by {self.creator.username} </h5>
<ul>
<li> Last Status: {self.statusmessage}</li>
<li> Node Status: {self.nodeid}</li>
<li> Settings: {self.settings}</li>
</ul>'''
class LarvikConsumer(models.Model):
name = models.CharField(max_length=100)
channel = models.CharField(max_length=100, unique=True, default="Not active")
settings = models.CharField(max_length=1000) # json decoded standardsettings
class Meta:
abstract = True
class LarvikArrayBase(models.Model):
fileserializer = None
store = StoreFileField(verbose_name="store",storage=get_default_storagemode().zarr(), upload_to="zarr", blank=True, null= True, help_text="The location of the Array on the Storage System (S3 or Media-URL)")
shape = ShapeField(models.IntegerField(),help_text="The arrays shape")
dims = DimsField(models.CharField(max_length=100),help_text="The arrays dimension")
name = models.CharField(max_length=1000, blank=True, null= True,help_text="Cleartext name")
signature = models.CharField(max_length=300,null=True, blank=True,help_text="The arrays unique signature")
unique = models.UUIDField(default=uuid.uuid4, editable=False)
objects = LarvikArrayManager()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Meta:
abstract = True
@property
def info(self):
return self.array.info()
@property
def viewer(self):
import larvik.extenders
return self.array.viewer
@property
def biometa(self):
import larvik.extenders
return self.array.biometa
@property
def array(self):
"""Accessor for the xr.DataArray class attached to the Model
Raises:
NotImplementedError: If Array does not contain a Store
Returns:
[xr.DataArray] -- The xr.DataArray class
"""
if self.store:
array = self.store.loadDataArray()
return array
else:
raise NotImplementedError("This array does not have a store")
@property
def dataset(self):
"""Accessor for the xr.DataSet class attached to the Model
Raises:
NotImplementedError: If Array does not contain a Store
Returns:
[xr.Dataset] -- The Dataset
"""
if self.store:
array = self.store.loadDataset()
return array
else:
raise NotImplementedError("This array does not have a store")
def _repr_html_(self):
return "<h1>" + f'Array at {str(self.group)} in {self.store}' + "</h1>"
class LarvikArray(LarvikArrayBase):
channels = JSONField(null=True)
class Meta:
abstract = True
```
#### File: arbeider/larvik/views.py
```python
import json
import xarray as xr
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.http import FileResponse, HttpResponse, StreamingHttpResponse
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException
from rest_framework.schemas.openapi import AutoSchema
from rest_framework.serializers import Serializer
from zarr.storage import (array_meta_key, attrs_key, default_compressor,
group_meta_key)
from larvik.logging import get_module_logger
from larvik.models import LarvikArray
from larvik.utils import UUIDEncoder
channel_layer = get_channel_layer()
# Zarr Specific Settings
zarr_metadata_key = '.zmetadata'
api_array = "array"
class LarvikJobWrapper(object):
def __init__(self, data=None, actiontype=None, actionpublishers=None, job=None, channel=None):
self.actiontype = actiontype
self.data = data
self.job = job if job else data
self.actionpublishers = actionpublishers
self.channel = channel
class LarvikViewSet(viewsets.ModelViewSet):
# TODO: The stringpublishing is yet not working
publishers = None
viewset_delegates = None
stringpublish = True
def __init__(self, **kwargs):
self.logger = get_module_logger(type(self).__name__)
super().__init__(**kwargs)
def publish(self, serializer, method):
serializedData = serializer.data
serializedData = json.loads(json.dumps(serializedData, cls=UUIDEncoder)) #Shit workaround to get UUUID to be string
if self.publishers is not None:
self.logger.info(f"Publishers {self.publishers}")
for el in self.publishers:
self.logger.info(f"What up dog {el}")
modelfield = "empty"
try:
path = ""
for modelfield in el:
try:
value = serializedData[modelfield]
path += "{0}_{1}_".format(str(modelfield), str(value))
except KeyError as e:
self.logger.info("Modelfield {0} does not exist on {1}".format(str(el), str(self.serializer_class.__name__)))
self.logger.info("Publishing to String {0}".format(modelfield))
path += "{0}_".format(str(modelfield))
path = path[:-1]
self.logger.info("Publishing to Models {0}".format(path))
stream = str(serializer.Meta.model.__name__)
async_to_sync(channel_layer.group_send)(path, {"type": "stream", "stream": stream, "room": path,
"method": method, "data": serializedData})
except KeyError as e:
self.logger.info("Error Babe !!!".format(str(el), str(self.serializer_class.__name__)))
def perform_create(self, serializer):
super().perform_create(serializer)
self.logger.info("CALLED create")
self.publish(serializer, "create")
def perform_update(self, serializer):
super().perform_update(serializer)
self.publish(serializer, "update")
def perform_destroy(self, instance):
serialized = self.serializer_class(instance)
self.publish(serialized, "delete")
super().perform_destroy(instance)
class LarvikArrayViewSet(LarvikViewSet):
lookup_value_regex = '[^/]+'
def arraySelect(self,request):
larvik: LarvikArray = self.get_object()
query_params = request.query_params
array = larvik.array
# We are trying to pass on selection params
array = self.queryselect(array, query_params)
return array
def datasetSelect(self,request):
larvik: LarvikArray = self.get_object()
query_params = request.query_params
dataset = larvik.dataset
return dataset
def queryselect(self, array: xr.DataArray, query_params: dict) -> xr.DataArray:
"""Selects the Array Acording to some query parameters
Arguments:
array {xr.DataArray} -- "The xr.DataArray to select from"
query_params {dict} -- "The params according to Django QueryDicts"
Raises:
APIException: An APIExpection
Returns:
xr.DataArray -- The selected xr.DataArray
"""
import larvik.extenders
try:
array = array.sel(c=query_params["c"]) if "c" in query_params else array
array = array.sel(t=query_params["t"]) if "t" in query_params else array
if "channel_name" in query_params:
s = f'Name == "{query_params["channel_name"]}"'
print(s)
c = array.biometa.channels.compute().query(s).index
array = array.sel(c= c)
except Exception as e:
raise APIException(e)
return array
@action(methods=['get'], detail=True,
url_path='shape', url_name='shape')
def shape(self, request, pk):
# We are trying to pass on selection params
array = self.arraySelect(request)
answer = json.dumps(array.shape)
response = HttpResponse(answer, content_type="application/json")
return response
@action(methods=['get'], detail=True,
url_path='dims', url_name='dims')
def dims(self, request, pk):
# We are trying to pass on selection params
array = self.arraySelect(request)
answer = json.dumps(array.dims)
response = HttpResponse(answer, content_type="application/json")
return response
@action(methods=['get'], detail=True,
url_path='channels', url_name='channels')
def channels(self, request, pk):
# We are trying to pass on selection params
array = self.arraySelect(request)
answer = array.biometa.channels.compute().to_json(orient="records")
response = HttpResponse(answer, content_type="application/json")
return response
@action(methods=['get'], detail=True,
url_path='info', url_name='info')
def info(self, request, pk):
# We are trying to pass on selection params
array: xr.DataArray = self.arraySelect(request)
with xr.set_options(display_style='html'):
answer = array._repr_html_()
response = HttpResponse(answer, content_type="text/html")
return response
def returnFile(self, key: str, subkey: str) -> FileResponse:
"""Returns the FIle in the Store as a File Response
Arguments:
key {string} -- key of the xr.Array Variable
subkey {string} -- subkey of the chunk
Returns:
[FileResponse] -- The streaming HTTP FileReponse
"""
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{key}/{subkey}","rb")
return FileResponse(test)
@action(methods=['get'], detail=True,
url_path=f'{api_array}/{zarr_metadata_key}', url_name=f'{api_array}/{zarr_metadata_key}')
def get_zmetadata(self, request, pk):
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{zarr_metadata_key}","r")
file_content = test.read()
test.close()
return HttpResponse(content=file_content, content_type="application/json")
@action(methods=['get'], detail=True,
url_path=f'{api_array}/{group_meta_key}', url_name=f'{api_array}/{group_meta_key}')
def get_zgroupdata(self, request, pk):
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{group_meta_key}","r")
file_content = test.read()
test.close()
return HttpResponse(content=file_content, content_type="application/json")
@action(methods=['get'], detail=True,
url_path=f'{api_array}/{attrs_key}', url_name=f'{api_array}/{attrs_key}')
def get_zattrs(self, request, pk):
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{attrs_key}","r")
file_content = test.read()
test.close()
return HttpResponse(content=file_content, content_type="application/json")
@action(methods=['get'], detail=True,
url_path=f'{api_array}/(?P<c_key>[^/.]+)/(?P<c_value>[^/]+)', url_name=f'{api_array}/arrayaccessor')
def get_data_key(self, request, c_key, c_value, pk):
return self.returnFile(c_key,c_value)
class LarvikArrayMixIn():
lookup_value_regex = '[^/]+'
def arraySelect(self,request):
larvik: LarvikArray = self.get_object()
query_params = request.query_params
array = larvik.array
# We are trying to pass on selection params
array = self.queryselect(array, query_params)
return array
def datasetSelect(self,request):
larvik: LarvikArray = self.get_object()
query_params = request.query_params
dataset = larvik.dataset
return dataset
def queryselect(self, array: xr.DataArray, query_params: dict) -> xr.DataArray:
"""Selects the Array Acording to some query parameters
Arguments:
array {xr.DataArray} -- "The xr.DataArray to select from"
query_params {dict} -- "The params according to Django QueryDicts"
Raises:
APIException: An APIExpection
Returns:
xr.DataArray -- The selected xr.DataArray
"""
import larvik.extenders
try:
array = array.sel(c=query_params["c"]) if "c" in query_params else array
array = array.sel(t=query_params["t"]) if "t" in query_params else array
if "channel_name" in query_params:
s = f'Name == "{query_params["channel_name"]}"'
print(s)
c = array.biometa.channels.compute().query(s).index
array = array.sel(c= c)
except Exception as e:
raise APIException(e)
return array
@action(methods=['get'], detail=True,
url_path='shape', url_name='shape')
def shape(self, request, pk):
# We are trying to pass on selection params
array = self.arraySelect(request)
answer = json.dumps(array.shape)
response = HttpResponse(answer, content_type="application/json")
return response
@action(methods=['get'], detail=True,
url_path='dims', url_name='dims')
def dims(self, request, pk):
# We are trying to pass on selection params
array = self.arraySelect(request)
answer = json.dumps(array.dims)
response = HttpResponse(answer, content_type="application/json")
return response
@action(methods=['get'], detail=True,
url_path='channels', url_name='channels')
def channels(self, request, pk):
# We are trying to pass on selection params
array = self.arraySelect(request)
answer = array.biometa.channels.compute().to_json(orient="records")
response = HttpResponse(answer, content_type="application/json")
return response
@action(methods=['get'], detail=True,
url_path='info', url_name='info')
def info(self, request, pk):
# We are trying to pass on selection params
array: xr.DataArray = self.arraySelect(request)
with xr.set_options(display_style='html'):
answer = array._repr_html_()
response = HttpResponse(answer, content_type="text/html")
return response
def returnFile(self, key: str, subkey: str) -> FileResponse:
"""Returns the FIle in the Store as a File Response
Arguments:
key {string} -- key of the xr.Array Variable
subkey {string} -- subkey of the chunk
Returns:
[FileResponse] -- The streaming HTTP FileReponse
"""
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{key}/{subkey}","rb")
return FileResponse(test)
@action(methods=['get'], detail=True,
url_path=f'{api_array}/{zarr_metadata_key}', url_name=f'{api_array}/{zarr_metadata_key}')
def get_zmetadata(self, request, pk):
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{zarr_metadata_key}","r")
file_content = test.read()
test.close()
return HttpResponse(content=file_content, content_type="application/json")
@action(methods=['get'], detail=True,
url_path=f'{api_array}/{group_meta_key}', url_name=f'{api_array}/{group_meta_key}')
def get_zgroupdata(self, request, pk):
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{group_meta_key}","r")
file_content = test.read()
test.close()
return HttpResponse(content=file_content, content_type="application/json")
@action(methods=['get'], detail=True,
url_path=f'{api_array}/{attrs_key}', url_name=f'{api_array}/{attrs_key}')
def get_zattrs(self, request, pk):
larvik: LarvikArray = self.get_object()
test = larvik.store.storage.open(f"{larvik.store.name}/{attrs_key}","r")
file_content = test.read()
test.close()
return HttpResponse(content=file_content, content_type="application/json")
@action(methods=['get'], detail=True,
url_path=f'{api_array}/(?P<c_key>[^/.]+)/(?P<c_value>[^/]+)', url_name=f'{api_array}/arrayaccessor')
def get_data_key(self, request, c_key, c_value, pk):
return self.returnFile(c_key,c_value)
class LarvikJobViewSet(LarvikViewSet):
actionpublishers = None # this publishers will be send to the Action Handles and then they can send to the according
channel = None
actiontype = "startJob"
def __init__(self, **kwargs):
super().__init__(**kwargs)
def preprocess_jobs(self, serializer: Serializer):
""" If you need to alter any data like creating an Model on the fly
or create various jobs from one request, here is the place
should return Array of Jobs that need executing"""
return [self.create_job(serializer.data)]
def create_job(self, data, actiontype=None, actionpublishers=None, job=None, channel=None) -> LarvikJobWrapper:
actiontype = actiontype if actiontype else self.actiontype
actionpublishers = actionpublishers if actionpublishers else self.actionpublishers
job = job if job else data
channel = channel if channel else self.channel
return LarvikJobWrapper(data, actiontype, actionpublishers, job, channel)
def perform_create(self, serializer):
""" Right now only the creation of a new Job is possible, no way of stopping a job on its way"""
serializer.save()
jobs = self.preprocess_jobs(serializer)
self.publish_jobs(jobs)
self.publish(serializer, "create")
def publish_jobs(self, jobs: [LarvikJobWrapper]):
for nana in jobs:
async_to_sync(channel_layer.send)(nana.channel, {"type": nana.actiontype, "data": nana.data,
"publishers": nana.actionpublishers, "job": nana.job})
```
#### File: arbeider/mandal/urls.py
```python
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from graphene_django.views import GraphQLView
from rest_framework import routers
from rest_framework.schemas import get_schema_view
from django.contrib.admin.views.decorators import staff_member_required
import answers.routes
import bioconverter.routes
import drawing.routes
import elements.routes
import evaluators.routes
import filters.routes
import flow.routes
import importer.routes
import metamorphers.routes
import mutaters.routes
import revamper.routes
import social.routes
import strainers.routes
import transformers.routes
import visualizers.routes
class MyRouter(routers.DefaultRouter):
include_format_suffixes = False
# Rest Framework Routers
router = MyRouter()
router.registry.extend(social.routes.router.registry)
router.registry.extend(drawing.routes.router.registry)
router.registry.extend(elements.routes.router.registry)
router.registry.extend(filters.routes.router.registry)
router.registry.extend(bioconverter.routes.router.registry)
router.registry.extend(metamorphers.routes.router.registry)
router.registry.extend(visualizers.routes.router.registry)
router.registry.extend(strainers.routes.router.registry)
router.registry.extend(importer.routes.router.registry)
router.registry.extend(answers.routes.router.registry)
router.registry.extend(transformers.routes.router.registry)
router.registry.extend(evaluators.routes.router.registry)
router.registry.extend(mutaters.routes.router.registry)
router.registry.extend(flow.routes.router.registry)
router.registry.extend(revamper.routes.router.registry)
# Bootstrap Backend
@login_required
def index(request):
# Render that in the index template
return render(request, "index-oslo.html")
@login_required
@staff_member_required
def config(request):
from .modeselektor import ArnheimDefaults
import json
dafaults = ArnheimDefaults(initial=False)
dictionary = dafaults.__dict__
dictionary.pop("zarr_dtype")
# Render that in the index template
return render(request, "config.html", context={"config": dictionary})
urlpatterns = [
path('', index, name='index'),
url(r'^config$', config, name="config"),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^graphql$', GraphQLView.as_view(graphiql=True)),
path('admin/', admin.site.urls),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^api/', include((router.urls, 'api'))),
path('openapi', get_schema_view(
title="Arnheim",
description="API for accessing the underlying Architecture",
version="1.0.0"
), name='openapi-schema'),
path('redoc/', TemplateView.as_view(
template_name='redoc.html',
extra_context={'schema_url':'openapi-schema'}
), name='redoc'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
```
#### File: arbeider/trontheim/middleware.py
```python
from django.test import RequestFactory
from rest_framework.settings import api_settings
from larvik.logging import get_module_logger
logger = get_module_logger(__name__)
class QueryAuthMiddleware:
"""
Custom middleware (insecure) that takes user IDs from the query string.
"""
def __init__(self, inner):
# Store the ASGI application we were passed
self.inner = inner
def __call__(self, scope):
# Look up user from query string (you should also do things like
# check it's a valid user ID, or if scope["user"] is already populated)
line = str(scope["query_string"])
if "user" in scope:
if scope["user"] is not None:
return self.inner(dict(scope))
else:
pass
auth_token = None
try:
auth_token = line.split('token=')[-1][0:-1]
except AttributeError:
logger.error("No token provided")
pass
user = None
if auth_token:
# compatibility with rest framework
rf = RequestFactory()
get_request = rf.get('/api/comments/')
get_request._request = {}
get_request.method = "GET"
get_request.META["HTTP_AUTHORIZATION"] = "Bearer {}".format(auth_token)
authenticators = [auth() for auth in api_settings.DEFAULT_AUTHENTICATION_CLASSES]
for authenticator in authenticators:
user_auth_tuple = None
user_auth_tuple = authenticator.authenticate(get_request)
if user_auth_tuple is not None:
user, auth = user_auth_tuple
break
# Return the inner application directly and let it run everything else
return self.inner(dict(scope, user=user))
```
#### File: arbeider/trontheim/views.py
```python
from larvik.logging import get_module_logger
from larvik.utils import UUIDEncoder
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from rest_framework.serializers import Serializer
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException
import json
channel_layer = get_channel_layer()
class LarvikJobWrapper(object):
def __init__(self, data=None, actiontype=None, actionpublishers=None, job=None, channel=None):
self.actiontype = actiontype
self.data = data
self.job = job if job else data
self.actionpublishers = actionpublishers
self.channel = channel
class PublishingModelViewSet(viewsets.ModelViewSet):
# TODO: The stringpublishing is yet not working
publishers = None
viewset_delegates = None
stringpublish = True
def __init__(self, **kwargs):
self.logger = get_module_logger(type(self).__name__)
super().__init__(**kwargs)
def publish(self, serializer, method):
serializedData = serializer.data
serializedData = json.loads(json.dumps(serializedData, cls=UUIDEncoder)) #Shit workaround to get UUUID to be string
if self.publishers is not None:
self.logger.info(f"Publishers {self.publishers}")
for el in self.publishers:
self.logger.info(f"What up dog {el}")
modelfield = "empty"
try:
path = ""
for modelfield in el:
try:
value = serializedData[modelfield]
path += "{0}_{1}_".format(str(modelfield), str(value))
except KeyError as e:
self.logger.info("Modelfield {0} does not exist on {1}".format(str(el), str(self.serializer_class.__name__)))
self.logger.info("Publishing to String {0}".format(modelfield))
path += "{0}_".format(str(modelfield))
path = path[:-1]
self.logger.info("Publishing to Models {0}".format(path))
stream = str(serializer.Meta.model.__name__)
async_to_sync(channel_layer.group_send)(path, {"type": "stream", "stream": stream, "room": path,
"method": method, "data": serializedData})
except KeyError as e:
self.logger.info("Error Babe !!!".format(str(el), str(self.serializer_class.__name__)))
def perform_create(self, serializer):
super().perform_create(serializer)
self.logger.info("CALLED create")
self.publish(serializer, "create")
def perform_update(self, serializer):
super().perform_update(serializer)
self.publish(serializer, "update")
def perform_destroy(self, instance):
serialized = self.serializer_class(instance)
self.publish(serialized, "delete")
super().perform_destroy(instance)
class TaskPublishingViewSet(PublishingModelViewSet):
actionpublishers = None # this publishers will be send to the Action Handles and then they can send to the according
channel = None
actiontype = "startJob"
def __init__(self, **kwargs):
super().__init__(**kwargs)
def preprocess_jobs(self, serializer: Serializer):
""" If you need to alter any data like creating an Model on the fly
or create various jobs from one request, here is the place
should return Array of Jobs that need executing"""
return [self.create_job(serializer.data)]
def create_job(self, data, actiontype=None, actionpublishers=None, job=None, channel=None) -> LarvikJobWrapper:
actiontype = actiontype if actiontype else self.actiontype
actionpublishers = actionpublishers if actionpublishers else self.actionpublishers
job = job if job else data
channel = channel if channel else self.channel
return LarvikJobWrapper(data, actiontype, actionpublishers, job, channel)
def perform_create(self, serializer):
""" Right now only the creation of a new Job is possible, no way of stopping a job on its way"""
serializer.save()
jobs = self.preprocess_jobs(serializer)
self.publish_jobs(jobs)
self.publish(serializer, "create")
def publish_jobs(self, jobs: [LarvikJobWrapper]):
for nana in jobs:
async_to_sync(channel_layer.send)(nana.channel, {"type": nana.actiontype, "data": nana.data,
"publishers": nana.actionpublishers, "job": nana.job})
``` |
{
"source": "jhnnsrs/spatial-omics-hackathon-2021",
"score": 3
} |
#### File: spatial-omics-hackathon-2021/zarr_anndata/anndata_utils.py
```python
import os
from typing import Dict, Tuple, Union
import anndata
import pandas as pd
DENSE_COLUMNS = [
'zc',
'yc',
'xc'
]
def setup_anndata(
fpath: os.PathLike,
out_dir: os.PathLike
) -> Tuple[Tuple[()], Dict[str, Union[anndata.AnnData, os.PathLike]]]:
"""Create the anndata object from the example csv
Parameters
----------
fpath : os.PathLike
The filepath to the csv file.
Returns
-------
benchmark_args : Dict[str, Union[anndata.AnnData, os.PathLike]]
The input arguments for ann_to_zarr. 'ann_obj' is the
AnnData object to write, 'out_path' is the file path
to write the zarr to.
"""
df = pd.read_csv(fpath)
# get the dense array
dense_array = df[DENSE_COLUMNS].to_numpy()
# drop the dense array from the table
obs = df.drop(DENSE_COLUMNS, axis='columns')
# create the AnnData object
ann_obj = anndata.AnnData(X=dense_array, obs=obs)
# make the filepath
out_path = os.path.join(out_dir, 'test.zarr')
return (), {'ann_obj': ann_obj, 'out_path': out_path}
def ann_to_zarr(ann_obj: anndata.AnnData, out_path: os.PathLike):
ann_obj.write_zarr(out_path)
``` |
{
"source": "jhnnsrs/xarray-multiscale",
"score": 2
} |
#### File: src/xarray_multiscale/storage.py
```python
import dask
import dask.array as da
from dask.utils import SerializableLock
import xarray
from typing import Sequence
def blocked_store(
sources: Sequence[xarray.DataArray], targets, chunks=None
) -> Sequence[dask.delayed]:
stores = []
for slices, source in sources:
if chunks is not None:
rechunked_sources = [
s.data.rechunk(chunks) for s, z in zip(source, targets)
]
elif hasattr(targets[0], "chunks"):
rechunked_sources = [
s.data.rechunk(z.chunks) for s, z in zip(source, targets)
]
else:
rechunked_sources = [s.data for s in source]
stores.append(
da.store(
rechunked_sources,
targets,
lock=SerializableLock(),
regions=slices,
compute=False,
)
)
return stores
```
#### File: xarray-multiscale/tests/test_doc.py
```python
from xarray_multiscale import multiscale
from xarray_multiscale.reducers import windowed_mean
import numpy as np
import xarray as xr
def test_xarray_example():
data = xr.DataArray(np.zeros((1024, 1024)), dims=("x", "y"))
scaled_data = multiscale(data, windowed_mean, (2, 2))
assert len(scaled_data) == 11, "Incorrect number of arrays returned"
```
#### File: xarray-multiscale/tests/test_reducers.py
```python
from xarray_multiscale.reducers import windowed_mean, windowed_mode
import numpy as np
def test_windowed_mode():
data = np.arange(16) % 3 + np.arange(16) % 2
answer = np.array([2, 0, 1, 2])
results = windowed_mode(data, (4,))
assert np.array_equal(results, answer)
data = np.arange(16).reshape(4,4) % 3
answer = np.array([[1,0],[0,2]])
results = windowed_mode(data, (2,2))
assert np.array_equal(results, answer)
def test_windowed_mean():
data = np.arange(16).reshape(4,4) % 2
answer = np.array([[0.5, 0.5],[0.5, 0.5]])
results = windowed_mean(data, (2,2))
assert np.array_equal(results, answer)
``` |
{
"source": "jhnphm/boar",
"score": 2
} |
#### File: boar/blobrepo/repository.py
```python
from __future__ import with_statement
"""
The Repository together with SessionWriter and SessionReader are the
only classes that directly accesses the repository.
"""
import os
import re
import shutil
import sessions
import sys
import tempfile
import random, time
from common import *
from boar_common import *
import blobreader
from jsonrpc import FileDataSource
from boar_exceptions import *
from blobreader import create_blob_reader
import deduplication
LATEST_REPO_FORMAT = 5
REPOID_FILE = "repoid.txt"
VERSION_FILE = "version.txt"
RECOVERYTEXT_FILE = "recovery.txt"
QUEUE_DIR = "queue"
BLOB_DIR = "blobs"
SESSIONS_DIR = "sessions"
RECIPES_DIR = "recipes"
TMP_DIR = "tmp"
DERIVED_DIR = "derived"
DERIVED_SHA256_DIR = os.path.join(DERIVED_DIR, "sha256")
DERIVED_BLOCKS_DIR = os.path.join(DERIVED_DIR, "blocks")
DERIVED_BLOCKS_DB = os.path.join(DERIVED_BLOCKS_DIR, "blocks.db")
DELETE_MARKER = "deleted.json"
REPO_DIRS_V0 = (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR)
REPO_DIRS_V1 = (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR,\
DERIVED_DIR, DERIVED_SHA256_DIR)
REPO_DIRS_V2 = (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR,\
DERIVED_DIR)
REPO_DIRS_V3 = (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR,\
DERIVED_DIR)
REPO_DIRS_V4 = (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR,\
DERIVED_DIR)
REPO_DIRS_V5 = (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR,\
DERIVED_DIR, DERIVED_BLOCKS_DIR, RECIPES_DIR)
DEDUP_BLOCK_SIZE = 2**16
recoverytext = """Repository format v%s
This is a versioned repository of files. It is designed to be easy to
recover in case the original software is unavailable.
This document describes the layout of the repository, so that a
programmer can construct a simple program that recovers the data. To
extract the data, only basic programming skills are necessary. The
extraction can also be performed manually for individual files.
Note that it is always easier and safer to use the original software,
if possible. At the time of this writing, the boar software can be
downloaded at http://code.google.com/p/boar/
== The non-vital files ==
The repository contains files that are not useful for extracting
data. These are the "tmp", "derived", and "queue" folders. They can be
ignored for this purpose.
== The blobs ==
All files are stored verbatim in the "blobs" directory, named after
their md5 checksum, and sorted in sub directories based on the start
of their names. For instance, if a file "testimage.jpg" has the
checksum bc7b0fb8c2e096693acacbd6cb070f16, it will be stored in
blobs/bc/bc7b0fb8c2e096693acacbd6cb070f16 since the checksum starts
with the letters "bc". The filename "testimage.jpg" is discarded. Such
an anonymized file is called a "blob" in this document.
== The sessions==
All data files are in the JSON file format. It is quite simple, but if
you need details, see RFC4627.
The information necessary to reconstruct a file tree is stored under
the "sessions" directory. Each session consists of a series of
snapshots of a specific file tree. All snapshots have a revision id
corresponding to the name of the directory under "sessions". Each
snapshot represents a file tree at a point in time.
The details of a snapshot will be given in the files "session.json"
and "bloblist.json" (the bloblist). The bloblist contains the mapping
between filenames and blobs. To restore a snapshot, iterate over the
bloblist and copy the blob with the corresponding id to a file
with the name specified in the bloblist.
However, to completely restore a file tree, you must consider the
"base_session" value in session.json. If there is such a value, the
snapshot with that revision id must be extracted before the current
snapshot is extracted on top of it. This may repeat recursively until
a snapshot is found that does not have the base_session value set. In
other words, extraction must begin from the bottom of this
"base_session" chain.
Every snapshot with a "base_session" value describes the changes that
needs to be applied to transform the base snapshot into the current
snapshot. Therefore, there are also special entries in the bloblist
that indicate that files should be removed from the base
snapshot. These are the entries containing a keyword "action" with the
value "remove". If you simply want to extract as much data as
possible, these special entries can be ignored.
""" % LATEST_REPO_FORMAT
verify_assert()
def misuse_assert(test, errormsg = None):
if not test:
raise MisuseError(errormsg)
def integrity_assert(test, errormsg = None):
if not test:
raise CorruptionError(errormsg)
def create_repository(repopath, enable_deduplication = False):
if enable_deduplication and not deduplication.dedup_available:
# Ok, we COULD create a deduplicated repo without the module,
# but the user is likely to be confused when he cannot use it.
raise UserError("Cannot create deduplicated repository: deduplication module is not installed")
os.mkdir(repopath)
create_file(os.path.join(repopath, VERSION_FILE), str(LATEST_REPO_FORMAT))
for d in QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR, DERIVED_DIR, DERIVED_BLOCKS_DIR, RECIPES_DIR:
os.mkdir(os.path.join(repopath, d))
create_file(os.path.join(repopath, "recovery.txt"), recoverytext)
create_file(os.path.join(repopath, REPOID_FILE), generate_random_repoid())
if enable_deduplication:
with open(os.path.join(repopath, "ENABLE_DEDUPLICATION"), "wb"):
pass
def looks_like_repo(repo_path):
"""Superficial check to see if the given path contains anything
resembling a repo of any version."""
assert LATEST_REPO_FORMAT == 5 # Look through this function when updating format
for dirname in (QUEUE_DIR, BLOB_DIR, SESSIONS_DIR, TMP_DIR):
dirpath = os.path.join(repo_path, dirname)
if not (os.path.exists(dirpath) and os.path.isdir(dirpath)):
return False
return True
def has_pending_operations(repo_path):
dirpath = os.path.join(repo_path, QUEUE_DIR)
return len(os.listdir(dirpath)) != 0
def get_recipe_md5(recipe_filename):
md5 = recipe_filename.split(".")[0]
assert is_md5sum(md5)
return md5
BlocksDB = deduplication.FakeBlocksDB
if deduplication.dedup_available:
BlocksDB = deduplication.BlocksDB
class Repo:
def __init__(self, repopath):
# The path must be absolute to avoid problems with clients
# that changes the cwd. For instance, fuse.
assert isinstance(repopath, unicode)
assert(os.path.isabs(repopath)), "The repo path must be absolute. "\
+"Was: " + repopath
if not looks_like_repo(repopath):
raise UserError("The path %s does not exist or does not contain a valid repository" % repopath)
self.repopath = repopath
if self.__get_repo_version() > LATEST_REPO_FORMAT:
raise UserError("Repo is from a future boar version. Upgrade your boar.")
self.session_readers = {}
self.scanners = ()
self.repo_mutex = FileMutex(os.path.join(repopath, TMP_DIR), "__REPOLOCK__")
misuse_assert(os.path.exists(self.repopath), "No such directory: %s" % (self.repopath))
self.readonly = os.path.exists(os.path.join(repopath, "READONLY"))
if not isWritable(os.path.join(repopath, TMP_DIR)):
if self.get_queued_session_id() != None:
raise UserError("Repo is write protected with pending changes. Cannot continue.")
if self.__get_repo_version() not in (0, 1, 2, 3, 4, 5):
# Intentional explicit counting so that we'll remember to check compatability with future versions
raise UserError("Repo is write protected and from an unsupported older version of boar. Cannot continue.")
notice("Repo is write protected - only read operations can be performed")
self.readonly = True
if self.deduplication_enabled() and not deduplication.dedup_available:
self.readonly = True
notice("This repository requires the native deduplication module for writing - only read operations can be performed.")
if not self.readonly:
self.repo_mutex.lock_with_timeout(60)
try:
self.__upgrade_repo()
self.__quick_check()
self.blocksdb = BlocksDB(os.path.join(self.repopath, DERIVED_BLOCKS_DB), DEDUP_BLOCK_SIZE)
self.process_queue()
finally:
self.repo_mutex.release()
def __enter__(self):
self.repo_mutex.lock()
assert self.repo_mutex.is_locked()
return self
def __exit__(self, type, value, traceback):
self.repo_mutex.release()
def close(self):
pass
def __quick_check(self):
"""This method must be called after any repository upgrade
procedure. It will assert that the repository is upgraded to
the latest format and looks somewhat ok. It will raise an
exception if an error is found."""
repo_version = self.__get_repo_version()
assert repo_version, "Repo format obsolete. Upgrade failed?"
if repo_version != LATEST_REPO_FORMAT:
raise UserError("Repo version %s can not be handled by this version of boar" % repo_version)
assert_msg = "Repository at %s is missing vital files. (Is it really a repository?)" % self.repopath
assert LATEST_REPO_FORMAT == 5 # Check below must be updated when repo format changes
for directory in REPO_DIRS_V5:
integrity_assert(dir_exists(os.path.join(self.repopath, directory)), assert_msg)
integrity_assert(os.path.exists(os.path.join(self.repopath, REPOID_FILE)),
"Repository at %s does not have an identity file." % self.repopath)
def allows_permanent_erase(self):
return os.path.exists(os.path.join(self.repopath, "ENABLE_PERMANENT_ERASE"))
def deduplication_enabled(self):
return os.path.exists(os.path.join(self.repopath, "ENABLE_DEDUPLICATION"))
def __upgrade_repo(self):
assert not self.readonly, "Repo is read only, cannot upgrade"
assert self.repo_mutex.is_locked()
version = self.__get_repo_version()
if version > LATEST_REPO_FORMAT:
raise UserError("Repo version %s can not be handled by this version of boar" % version)
if version == LATEST_REPO_FORMAT:
return
notice("Old repo format detected. Upgrading...")
if self.__get_repo_version() == 0:
self.__upgrade_repo_v0()
assert self.__get_repo_version() == 1
if self.__get_repo_version() == 1:
self.__upgrade_repo_v1()
assert self.__get_repo_version() == 2
if self.__get_repo_version() == 2:
self.__upgrade_repo_v2()
assert self.__get_repo_version() == 3
if self.__get_repo_version() == 3:
self.__upgrade_repo_v3()
assert self.__get_repo_version() == 4
if self.__get_repo_version() == 4:
self.__upgrade_repo_v4()
assert self.__get_repo_version() == 5
try:
self.__quick_check()
except:
warn("Post-upgrade quickcheck of repository failed!")
raise
def __upgrade_repo_v0(self):
""" This upgrade will upgrade a repository from before strict
version numbering (v0), to a v1 format repository. It does this by
performing the following actions:
* Create directory "derived"
* Create directory "derived/sha256"
* Update "recovery.txt"
* Create "version.txt" with value 1
"""
assert not self.readonly, "Repo is read only, cannot upgrade"
version = self.__get_repo_version()
assert version == 0
if not isWritable(self.repopath):
raise UserError("Cannot upgrade repository - write protected")
try:
recipes_dir = os.path.join(self.repopath, RECIPES_DIR)
if os.path.exists(recipes_dir):
# recipes_dir is an experimental feature and should not contain
# any data in a v0 repo (if it exists at all)
try:
os.rmdir(recipes_dir)
except:
raise UserError("Problem removing obsolete 'recipes' dir. Make sure it is empty and try again.")
if not dir_exists(self.repopath + "/" + DERIVED_DIR):
os.mkdir(self.repopath + "/" + DERIVED_DIR)
if not dir_exists(os.path.join(self.repopath, DERIVED_SHA256_DIR)):
os.mkdir(os.path.join(self.repopath, DERIVED_SHA256_DIR))
replace_file(os.path.join(self.repopath, RECOVERYTEXT_FILE), recoverytext)
version_file = os.path.join(self.repopath, VERSION_FILE)
if os.path.exists(version_file):
warn("Version marker should not exist for repo format v0")
safe_delete_file(version_file)
create_file(version_file, "1")
except OSError, e:
raise UserError("Upgrade could not complete. Make sure that the repository "+
"root is writable and try again. The error was: '%s'" % e)
def __upgrade_repo_v1(self):
""" This upgrade will perform the following actions:
* If it exists, delete file "derived/sha256/sha256cache"
* Rmdir directory "derived/sha256"
* Update "version.txt" to 2
"""
assert not self.readonly, "Repo is read only, cannot upgrade"
version = self.__get_repo_version()
assert version == 1
if not isWritable(self.repopath):
raise UserError("Cannot upgrade repository - write protected")
try:
for directory in REPO_DIRS_V1:
integrity_assert(dir_exists(os.path.join(self.repopath, directory)), \
"Repository says it is v1 format but is missing %s" % directory)
dbfile = os.path.join(self.repopath, DERIVED_SHA256_DIR, "sha256cache")
sha256_dir = os.path.join(self.repopath, DERIVED_SHA256_DIR)
if os.path.exists(dbfile):
# recipes_dir is an experimental feature and should not contain
# any data in a v0 repo (if it exists at all)
safe_delete_file(dbfile)
if os.path.exists(sha256_dir):
os.rmdir(sha256_dir)
replace_file(os.path.join(self.repopath, VERSION_FILE), "2")
except OSError, e:
raise UserError("Upgrade could not complete. Make sure that the repository "+
"root is writable and try again. The error was: '%s'" % e)
def __upgrade_repo_v2(self):
""" This upgrade will perform the following actions:
* Update "version.txt" to 3
* Restore any legally missing snapshots with a deleted snapshot definition.
"""
assert not self.readonly, "Repo is read only, cannot upgrade"
version = self.__get_repo_version()
assert version == 2
if not isWritable(self.repopath):
raise UserError("Cannot upgrade repository - write protected")
for directory in REPO_DIRS_V2:
integrity_assert(dir_exists(os.path.join(self.repopath, directory)), \
"Repository says it is v2 format but is missing %s" % directory)
for rev in range(1, self.get_highest_used_revision() + 1):
if os.path.exists(self.get_session_path(rev)):
continue
tmpdir = tempfile.mkdtemp(prefix = "tmp_", dir = os.path.join(self.repopath, TMP_DIR))
writer = sessions._NaiveSessionWriter(session_name = u"__deleted", base_session = None, path = tmpdir)
writer.set_fingerprint("<KEY>")
writer.commit()
del writer
os.rename(tmpdir, self.get_session_path(rev))
try:
replace_file(os.path.join(self.repopath, RECOVERYTEXT_FILE), recoverytext)
replace_file(os.path.join(self.repopath, VERSION_FILE), "3")
except OSError, e:
raise UserError("Upgrade could not complete. Make sure that the repository "+
"root is writable and try again. The error was: '%s'" % e)
def __upgrade_repo_v3(self):
""" This upgrade will perform the following actions:
* Update "version.txt" to 4
* Write a random identity string to "repoid.txt"
"""
assert not self.readonly, "Repo is read only, cannot upgrade"
version = self.__get_repo_version()
assert version == 3
if not isWritable(self.repopath):
raise UserError("Cannot upgrade repository - write protected")
for directory in REPO_DIRS_V3:
integrity_assert(dir_exists(os.path.join(self.repopath, directory)), \
"Repository says it is v3 format but is missing %s" % directory)
try:
replace_file(os.path.join(self.repopath, RECOVERYTEXT_FILE), recoverytext)
repoid = generate_random_repoid()
if not os.path.exists(os.path.join(self.repopath, REPOID_FILE)):
create_file(os.path.join(self.repopath, REPOID_FILE), repoid)
replace_file(os.path.join(self.repopath, VERSION_FILE), "4")
except OSError, e:
raise UserError("Upgrade could not complete. Make sure that the repository "+
"root is writable and try again. The error was: '%s'" % e)
def __upgrade_repo_v4(self):
""" This upgrade will perform the following actions:
* Update "version.txt" to 5
"""
assert not self.readonly, "Repo is read only, cannot upgrade"
version = self.__get_repo_version()
assert version == 4
if not isWritable(self.repopath):
raise UserError("Cannot upgrade repository - write protected")
for directory in REPO_DIRS_V4:
integrity_assert(dir_exists(os.path.join(self.repopath, directory)), \
"Repository says it is v4 format but is missing %s" % directory)
try:
if not dir_exists(os.path.join(self.repopath, DERIVED_BLOCKS_DIR)):
os.mkdir(os.path.join(self.repopath, DERIVED_BLOCKS_DIR))
if not dir_exists(os.path.join(self.repopath, RECIPES_DIR)):
os.mkdir(os.path.join(self.repopath, RECIPES_DIR))
replace_file(os.path.join(self.repopath, RECOVERYTEXT_FILE), recoverytext)
replace_file(os.path.join(self.repopath, VERSION_FILE), "5")
except OSError, e:
raise UserError("Upgrade could not complete. Make sure that the repository "+
"root is writable and try again. The error was: '%s'" % e)
def get_path(self, subdir, *parts):
return os.path.join(self.repopath, subdir, *parts)
def get_tmpdir(self):
return self.get_path(TMP_DIR)
def get_repo_identifier(self):
"""Returns the identifier for the repo, or None if the
repository has no identifier. The latter scenarion will
happen when accessing write-protected old (v3 or earlier)
repositories."""
idfile = os.path.join(self.repopath, REPOID_FILE)
if not os.path.exists(idfile):
return None
lines = read_file(idfile).splitlines()
assert len(lines) == 1, "%s must contain exactly one line" % REPOID_FILE
identifier = lines[0].strip()
assert re.match("^[a-zA-Z0-9_-]+$", identifier), "illegal characters in repo identifier '%s'" % identifier
return identifier
def __get_repo_version(self):
version_file = os.path.join(self.repopath, VERSION_FILE)
if os.path.exists(version_file):
with safe_open(version_file, "rb") as f:
return int(f.read())
# Repo is from before repo format numbering started.
# Make sure it is a valid one and return v0
for directory in REPO_DIRS_V0:
integrity_assert(dir_exists(os.path.join(self.repopath, directory)),
"The repo at %s does not look like a repository (missing %s)" % (self.repopath, directory))
return 0
def __str__(self):
return "repo:"+self.repopath
def get_repo_path(self):
return self.repopath
def get_queue_path(self, session_id):
assert isinstance(session_id, int)
return os.path.join(self.repopath, QUEUE_DIR, str(session_id))
def get_blob_path(self, sum):
assert is_md5sum(sum), "Was: %s" % (sum)
return os.path.join(self.repopath, BLOB_DIR, sum[0:2], sum)
def has_block(self, sha):
return self.blocksdb.has_block(sha)
def get_block_location(self, sha):
blob, offset = self.blocksdb.get_blob_location(sha)
assert self.has_raw_blob(blob)
return blob, offset
def get_recipe_path(self, recipe):
if is_recipe_filename(recipe):
recipe = get_recipe_md5(recipe)
assert is_md5sum(recipe)
return os.path.join(self.repopath, RECIPES_DIR, recipe[0:2], recipe + ".recipe")
def has_raw_blob(self, sum):
"""Returns true if there is an actual (non-recipe based)
blob with the given checksum"""
blobpath = self.get_blob_path(sum)
return os.path.exists(blobpath)
def has_recipe_blob(self, sum):
return os.path.exists(self.get_recipe_path(sum))
def has_blob(self, sum):
"""Returns true if there is a blob with the given
checksum. The blob may be raw or recipe-based."""
blobpath = self.get_blob_path(sum)
recpath = self.get_recipe_path(sum)
return os.path.exists(blobpath) or os.path.exists(recpath)
def get_recipe(self, sum):
recpath = self.get_recipe_path(sum)
if not os.path.exists(recpath):
return None
try:
recipe = read_json(recpath)
except ValueError:
raise CorruptionError("Recipe is malformed: %s" % recpath)
if "md5sum" not in recipe:
raise CorruptionError("Recipe is missing properties: %s" % recpath)
if recipe['md5sum'] != sum:
raise CorruptionError("Recipe name does not match recipe contents: %s" % recpath)
return recipe
def get_blob_size(self, sum):
blobpath = self.get_blob_path(sum)
if os.path.exists(blobpath):
# Windows always returns a Long. Let's be consistent.
return long(os.path.getsize(blobpath))
recipe = self.get_recipe(sum)
if not recipe:
raise ValueError("No such blob or recipe exists: "+sum)
return long(recipe['size'])
def get_blob_reader(self, sum, offset = 0, size = None):
""" Returns a blob reader object that can be used to stream
the requested data. """
assert offset >= 0, offset
assert size == None or size >= 0, size
if self.has_raw_blob(sum):
blobsize = self.get_blob_size(sum)
if size == None:
size = blobsize
assert offset + size <= blobsize
path = self.get_blob_path(sum)
fo = safe_open(path, "rb")
fo.seek(offset)
return FileDataSource(fo, size)
recipe = self.get_recipe(sum)
if recipe:
reader = blobreader.RecipeReader(recipe, self, offset=offset, size=size)
return reader
raise ValueError("No such blob or recipe exists: "+sum)
def get_session_path(self, session_id):
assert isinstance(session_id, int)
return os.path.join(self.repopath, SESSIONS_DIR, str(session_id))
def get_all_sessions(self):
return get_all_ids_in_directory(self.get_path(SESSIONS_DIR))
def is_deleted(self, rev):
return self.get_session(rev).is_deleted()
def get_deleted_snapshots(self):
result = []
for sid in self.get_all_sessions():
if self.get_session(sid).is_deleted():
result.append(sid)
return result
def get_highest_used_revision(self):
""" Returns the highest used revision id in the
repository. Deleted revisions are counted as well. Note that
this method returns 0 in the case that there are no
revisions. """
existing_sessions = get_all_ids_in_directory(self.get_path(SESSIONS_DIR))
return max([0] + existing_sessions)
def has_snapshot(self, id):
assert isinstance(id, int)
path = os.path.join(self.repopath, SESSIONS_DIR, str(id))
return os.path.exists(path)
def verify_snapshot(self, id):
if self.__get_repo_version() < 3: # To make it possible to access old read-only repos
warn("todo: implement verify_snapshot for early versions")
return True
session_exists = self.has_snapshot(id)
if not session_exists:
raise CorruptionError("Snapshot %s is missing" % id)
snapshot = self.get_session(id)
# No exception - all is well
def get_session(self, id):
assert id, "Id was: "+ str(id)
assert isinstance(id, int)
misuse_assert(self.has_snapshot(id), "There is no snapshot with id %s" % id)
if id not in self.session_readers:
self.session_readers[id] = sessions.SessionReader(self, self.get_session_path(id))
return self.session_readers[id]
def create_snapshot(self, session_name, base_session = None, session_id = None, force_base_snapshot = False):
misuse_assert(not self.readonly, "Repository is read-only")
assert isinstance(session_name, unicode)
assert base_session == None or isinstance(base_session, int)
assert session_id == None or isinstance(session_id, int)
assert isinstance(force_base_snapshot, bool)
return sessions.SessionWriter(self, session_name = session_name,
base_session = base_session,
session_id = session_id,
force_base_snapshot = force_base_snapshot)
def find_last_revision(self, session_name):
""" Returns the id of the latest snapshot in the specified
session. Returns None if there is no such session. """
assert isinstance(session_name, unicode)
all_sids = self.get_all_sessions()
all_sids.sort()
all_sids.reverse()
for sid in all_sids:
session = self.get_session(sid)
name = session.get_client_value("name")
if name == session_name:
return sid
return None
def find_next_session_id(self):
return self.get_highest_used_revision() + 1
def get_raw_blob_names(self):
blobpattern = re.compile("/([0-9a-f]{32})$")
assert blobpattern.search("b5/b5fb453aeaaef8343353cc1b641644f9")
tree = get_tree(os.path.join(self.repopath, BLOB_DIR))
matches = set()
for f in tree:
m = blobpattern.search(f)
if m:
matches.add(m.group(1))
return list(matches)
def get_recipe_names(self):
recipepattern = re.compile("([0-9a-f]{32})([.]recipe)$")
assert recipepattern.search("b5fb453aeaaef8343353cc1b641644f9.recipe")
tree = get_tree(os.path.join(self.repopath, RECIPES_DIR))
matches = set()
for f in tree:
m = recipepattern.search(f)
if m:
matches.add(m.group(1))
return list(matches)
def get_stats(self):
result = []
result.append(('number_of_snapshots', len(self.get_all_sessions())))
result.append(('number_of_user_files', len(self.get_all_level_1_blobs())))
result.append(('number_of_raw_blobs', len(self.get_raw_blob_names())))
result.append(('number_of_recipes', len(self.get_recipe_names())))
virtual_size = sum([self.get_blob_size(md5) for md5 in self.get_all_level_1_blobs()])
actual_size = sum([self.get_blob_size(md5) for md5 in self.get_raw_blob_names()])
result.append(('virtual_size', virtual_size))
result.append(('actual_size', actual_size))
try:
result.append(('dedup_removed_percentage',
round((1.0 - 1.0 * actual_size / virtual_size) * 100, 2)))
except ZeroDivisionError:
result.append(('dedup_removed_percentage', None))
try:
result.append(('dedup_blocksdb_size', os.path.getsize(os.path.join(self.repopath, DERIVED_BLOCKS_DB))))
except:
result.append(('dedup_blocksdb_size', None))
return result
def get_all_level_1_blobs(self):
"""Return a set of all blobs and recipes that are directly
referred to by any snapshot. (This excludes blobs only used in
recipes) This method takes into account any pending new
snapshot as well."""
used_blobs = set()
for sid in self.get_all_sessions():
snapshot = self.get_session(sid)
for blobinfo in snapshot.get_raw_bloblist():
if 'md5sum' in blobinfo:
used_blobs.add(blobinfo['md5sum'])
if self.get_queued_session_id():
# Must ensure that any queued new snapshot is considered as well
queue_dir = self.get_path(QUEUE_DIR, str(self.get_queued_session_id()))
queued_session = sessions.SessionReader(None, queue_dir)
for blobinfo in queued_session.get_raw_bloblist():
if 'md5sum' in blobinfo and blobinfo.get("action", None) == None:
used_blobs.add(blobinfo['md5sum'])
return used_blobs
def get_orphan_blobs(self):
"""Returns a list of all blobs (recipes and raw blobs) that
exists in the repo but aren't referred to by any
snapshot. This method asserts that it must not be called
during the processing of a commit containing new blobs or
recipes."""
queue_dir = self.get_path(QUEUE_DIR, str(self.get_queued_session_id()))
if self.get_queued_session_id():
# We simply has no need for this case right now. Just make
# sure it is clear that we don't support it.
for item in os.listdir(queue_dir):
assert not (is_recipe_filename(item) or is_md5sum(item)), \
"get_orphan_blobs() must not be called while a non-truncate commit is in progress"
used_blobs = self.get_all_level_1_blobs()
for blob in set(used_blobs):
recipe = self.get_recipe(blob)
if recipe:
for piece in recipe['pieces']:
used_blobs.add(piece['source'])
orphans = set()
orphans.update(self.get_raw_blob_names())
orphans.update(self.get_recipe_names())
orphans -= used_blobs
return orphans
def verify_blob(self, sum):
recipe = self.get_recipe(sum)
md5_summer = hashlib.md5()
if recipe:
reader = create_blob_reader(recipe, self)
while reader.bytes_left():
md5_summer.update(reader.read(4096))
return sum == md5_summer.hexdigest()
if not self.has_raw_blob(sum):
raise ValueError("No such blob or recipe: " + sum)
path = self.get_blob_path(sum)
with safe_open(path, "rb") as f:
for block in file_reader(f):
md5_summer.update(block)
md5 = md5_summer.hexdigest()
verified_ok = (sum == md5)
return verified_ok
def find_redundant_raw_blobs(self):
all_blobs = self.get_raw_blob_names()
for blob in all_blobs:
if self.has_recipe_blob(blob):
yield blob
def get_queued_session_id(self):
path = os.path.join(self.repopath, QUEUE_DIR)
files = os.listdir(path)
assert len(files) <= 1, "Corrupted queue directory - more than one item in queue"
if len(files) == 0:
return None
result = int(files[0])
assert result > 0, "Corrupted queue directory - illegal session id"
return result
def consolidate_snapshot(self, session_path, forced_session_id = None, progress_callback = lambda f: None):
assert isinstance(session_path, unicode)
assert forced_session_id == None or isinstance(forced_session_id, int)
assert not self.readonly, "Cannot consolidate because repo is read-only"
self.repo_mutex.lock_with_timeout(60)
try:
return self.__consolidate_snapshot(session_path, forced_session_id, progress_callback)
finally:
self.repo_mutex.release()
def __consolidate_snapshot(self, session_path, forced_session_id, progress_callback):
assert isinstance(session_path, unicode)
assert self.repo_mutex.is_locked()
assert not self.get_queued_session_id()
assert not self.readonly, "Cannot consolidate because repo is read-only"
if forced_session_id:
session_id = forced_session_id
else:
session_id = self.find_next_session_id()
assert session_id > 0
assert session_id not in self.get_all_sessions()
queue_dir = self.get_queue_path(session_id)
assert not os.path.exists(queue_dir), "Queue entry collision: %s" % queue_dir
shutil.move(session_path, queue_dir)
self.process_queue(progress_callback = progress_callback)
return session_id
def get_referring_snapshots(self, rev):
""" Returns a (possibly empty) list of all the snapshots that
has the given rev as base snapshot. """
assert isinstance(rev, int)
result = []
for sid in self.get_all_sessions():
snapshot = self.get_session(sid)
if snapshot.get_base_id() == rev:
result.append(rev)
return result
def _erase_snapshots(self, snapshot_ids):
assert self.repo_mutex.is_locked()
if not snapshot_ids:
# Avoid check for erase permissions if not erasing anything
return
if not self.allows_permanent_erase():
raise MisuseError("Not allowed for this repo")
misuse_assert(not self.readonly, "Cannot erase snapshots from a write protected repo")
snapshot_ids = map(int, snapshot_ids) # Make sure there are only ints here
snapshot_ids.sort()
snapshot_ids.reverse()
trashdir = tempfile.mkdtemp(prefix = "TRASH_erased_snapshots_", dir = self.get_path(TMP_DIR))
for rev in snapshot_ids:
try:
self.__erase_snapshot(rev, trashdir)
except OSError, e:
if e.errno == 13:
raise UserError("Snapshot %s is write protected, cannot erase. Change your repository file permissions and try again." % rev)
raise
def __erase_snapshot(self, rev, trashdir):
# Carefully here... We must allow for a resumed operation
if not self.allows_permanent_erase():
raise MisuseError("Not allowed for this repo")
misuse_assert(not self.readonly, "Cannot erase snapshots from a write protected repo")
if self.get_referring_snapshots(rev):
raise MisuseError("Erasing rev %s would create orphan snapshots" % rev)
if rev in self.session_readers:
del self.session_readers[rev]
session_path = self.get_session_path(rev)
delete_copy = os.path.join(session_path, "deleted")
if not os.path.exists(delete_copy):
tmpcopy = tempfile.mktemp(prefix ="deleted_", dir = self.get_path(TMP_DIR))
shutil.copytree(session_path, tmpcopy)
os.rename(tmpcopy, delete_copy)
session_data = read_json(os.path.join(delete_copy, "session.json"))
for filename in "session.json", "bloblist.json", "session.md5", session_data['fingerprint'] + ".fingerprint":
full_path = os.path.join(session_path, filename)
if os.path.exists(full_path):
unsafe_delete(full_path)
_snapshot_delete_test_hook(rev)
writer = sessions._NaiveSessionWriter(session_name = u"__deleted", base_session = None, path = session_path)
writer.delete(deleted_session_name = session_data['client_data']['name'], deleted_fingerprint = session_data['fingerprint'])
writer.set_fingerprint("d41d8cd98f00b204e9800998ecf8427e")
writer.commit()
os.rename(delete_copy, os.path.join(trashdir, str(rev) + ".deleted"))
def erase_orphan_blobs(self):
assert self.repo_mutex.is_locked()
if not self.allows_permanent_erase():
raise MisuseError("Not allowed for this repo")
misuse_assert(not self.readonly, "Cannot erase blobs from a write protected repo")
orphan_blobs = self.get_orphan_blobs()
trashdir = tempfile.mkdtemp(prefix = "TRASH_erased_blobs_", dir = self.get_path(TMP_DIR))
self.blocksdb.begin()
self.blocksdb.delete_blocks([blob for blob in orphan_blobs if self.has_raw_blob(blob)])
self.blocksdb.commit()
for blob in orphan_blobs:
if self.has_recipe_blob(blob):
recipe_path = self.get_recipe_path(blob)
os.rename(recipe_path, os.path.join(trashdir, blob + ".recipe"))
elif self.has_raw_blob(blob):
os.rename(self.get_blob_path(blob), os.path.join(trashdir, blob))
else:
warn("Tried to erase a non-existing blob: %s" % blob)
return len(orphan_blobs)
def process_queue(self, progress_callback = lambda x: None):
progress_callback(.0)
sw = StopWatch(enabled=False, name="process_queue")
assert self.repo_mutex.is_locked()
assert not self.readonly, "Repo is read only, cannot process queue"
session_id = self.get_queued_session_id()
if session_id == None:
return
queued_item = self.get_queue_path(session_id)
progress_callback(.01)
sw.mark("Lock mutex and init")
transaction = Transaction(self, queued_item)
transaction.verify_meta()
progress_callback(.02)
sw.mark("Meta check 1")
transaction.trim()
sw.mark("Trim")
progress_callback(.10)
number_of_blobs = len(transaction.get_raw_blobs())
number_of_recipes = len(transaction.get_recipes())
# We have 0.8 progress to split between blobs and recipes
blob_ratio = 0.8 * calculate_progress(number_of_blobs + number_of_recipes, number_of_blobs)
recipe_ratio = 0.8 * calculate_progress(number_of_blobs + number_of_recipes, number_of_recipes)
ph = ProgressHelper(start_f = .10, progress_callback = progress_callback)
transaction.verify_blobs(ph.partial_progress(blob_ratio))
sw.mark("Verify blobs")
transaction.verify_recipes(ph.partial_progress(recipe_ratio))
sw.mark("Verify recipes")
transaction.verify_meta()
sw.mark("Meta check 2")
# Everything seems OK, move the blobs and consolidate the session
transaction.integrate_files()
sw.mark("Files integrated")
transaction.integrate_deletions()
sw.mark("Deletions integrated")
progress_callback(.95)
transaction.integrate_blocks()
progress_callback(.99)
sw.mark("Block specifications inserted")
session_path = os.path.join(self.repopath, SESSIONS_DIR, str(session_id))
assert not os.path.exists(session_path), "Session path already exists: %s" % session_path
self._before_transaction_completion()
shutil.move(queued_item, session_path)
assert not self.get_queued_session_id(), "Commit completed, but queue should be empty after processing"
progress_callback(1.0)
sw.mark("done")
def _before_transaction_completion(self):
"""This method is called before the final transaction
completion stage. It is intended to be overridden for whitebox
testing."""
pass
class Transaction:
def __init__(self, repo, transaction_dir):
self.repo = repo
self.path = transaction_dir
self.session_reader = sessions.SessionReader(repo, self.path)
def integrate_deletions(self):
snapshots_to_delete = []
snapshots_to_delete_file = os.path.join(self.path, "delete.json")
if os.path.exists(snapshots_to_delete_file):
# Intentionally redundant check for erase enable flag
assert os.path.exists(os.path.join(self.repo.repopath, "ENABLE_PERMANENT_ERASE"))
snapshots_to_delete = read_json(snapshots_to_delete_file)
self.repo._erase_snapshots(snapshots_to_delete)
if os.path.exists(os.path.join(self.path, "delete.json")):
self.repo.erase_orphan_blobs()
safe_delete_file(os.path.join(self.path, "delete.json"))
def integrate_files(self):
for filename in os.listdir(self.path):
if is_md5sum(filename):
# Any redundant blobs should have been trimmed above
assert not self.repo.has_blob(filename)
blob_to_move = os.path.join(self.path, filename)
destination_path = self.repo.get_blob_path(filename)
move_file(blob_to_move, destination_path, mkdirs = True)
elif is_recipe_filename(filename):
# Any redundant recipes should have been trimmed above
assert not self.repo.has_blob(get_recipe_md5(filename))
recipe_to_move = os.path.join(self.path, filename)
destination_path = self.repo.get_recipe_path(filename)
move_file(recipe_to_move, destination_path, mkdirs = True)
else:
pass # The rest becomes a snapshot definition directory
def integrate_blocks(self):
blocksdb = self.repo.blocksdb
blocks_fname = os.path.join(self.path, "blocks.json")
if not os.path.exists(blocks_fname):
return
blocks = read_json(blocks_fname)
blocksdb.begin()
for block_spec in blocks:
blob_md5, offset, rolling, sha256 = block_spec
# Possibly another commit sneaked in a recipe while we
# were looking the other way. Let's be lenient for now.
# assert self.has_raw_blob(blob_md5), "Tried to register a
# block for non-existing blob %s" % blob_md5
if self.repo.has_raw_blob(blob_md5):
blocksdb.add_block(blob_md5, offset, sha256)
blocksdb.add_rolling(rolling)
blocksdb.commit()
safe_delete_file(blocks_fname)
def get_raw_blobs(self):
"""Returns a list of all raw blobs that are present in the transaction directory"""
return [fn for fn in os.listdir(self.path) if is_md5sum(fn)]
def get_recipes(self):
"""Returns a list of all blob recipes that are present in the
transaction directory."""
return [get_recipe_md5(fn) for fn in os.listdir(self.path) if is_recipe_filename(fn)]
def get_path(self, filename):
"""Returns the full path to a filename in this transaction directory."""
assert os.path.dirname(filename) == ""
return os.path.join(self.path, filename)
def get_recipe_path(self, md5):
"""Returns the full path to a recipe in this transaction directory."""
assert is_md5sum(md5)
return self.get_path(md5 + ".recipe")
def verify_blobs(self, progress_callback = lambda x: None):
"""Read and checksum all raw blobs in the transaction. An
assertion error is raised if any errors are found."""
raw_blobs = self.get_raw_blobs()
for n, blob in enumerate(raw_blobs):
full_path = self.get_path(blob)
pp = PartialProgress(float(n) / float(len(raw_blobs)), float(n+1) / float(len(raw_blobs)), progress_callback)
size = os.path.getsize(full_path)
assert blob == md5sum_file(full_path,
end=size,
progress_callback=pp), "Invalid blob found in queue dir:" + full_path
assert os.path.getsize(full_path) == size
#progress_callback((1.0*n+1)/len(raw_blobs))
def verify_recipes(self, progress_callback = lambda x: None):
"""Read and checksum all recipes in the transaction. An
assertion error is raised if any errors are found."""
for recipe_blob in self.get_recipes():
full_path = self.get_recipe_path(recipe_blob)
md5summer = hashlib.md5()
recipe = read_json(full_path)
reader = blobreader.RecipeReader(recipe, self.repo, local_path=self.path)
while reader.bytes_left():
# DEDUP_BLOCK_SIZE should fit the data nicely, as
# a recipe will be chunked suchwise.
md5summer.update(reader.read(DEDUP_BLOCK_SIZE))
assert recipe_blob == md5summer.hexdigest(), "Invalid recipe found in queue dir:" + full_path
def verify_meta(self):
"""Check the existence of all required files in the
transaction and make sure everything is consistent. This
method does not verify the integrity of blobs or recipes."""
# TODO: check the contents for validity
meta_info = read_json(self.get_path("session.json"))
contents = os.listdir(self.path)
has_recipes = False
has_blobs = False
has_delete = False
# Check that there are no unexpected files in the snapshot,
# and perform a simple test for json well-formedness
for filename in contents:
if is_md5sum(filename):
has_blobs = True
continue # Blob
if filename == meta_info['fingerprint']+".fingerprint":
continue # Fingerprint file
if filename in ["session.json", "bloblist.json"]:
read_json(self.get_path(filename)) # Check if malformed
continue
if filename in ["session.md5"]:
continue
if is_recipe_filename(filename):
read_json(self.get_path(filename)) # Check if malformed
has_recipes = True
continue
if filename == "delete.json":
read_json(self.get_path("delete.json"))
has_delete = True
continue
if filename == "blocks.json":
read_json(self.get_path("blocks.json"))
continue
assert False, "Unexpected file in new session:" + filename
if has_delete:
assert not (has_blobs or has_recipes), "Truncation commits must not contain any blobs or recipes"
# Check that all necessary files are present in the snapshot
assert set(contents) >= \
set([meta_info['fingerprint']+".fingerprint",\
"session.json", "bloblist.json", "session.md5"]), \
"Missing files in queue dir: "+str(contents)
def trim(self):
"""Some items in the transaction may have become redundant due
to commits that have occured since we started this
commit. Trim them away."""
used_blobs = set() # All the blobs that this commit must have
for blobinfo in self.session_reader.get_raw_bloblist():
if 'action' not in blobinfo:
used_blobs.add(blobinfo['md5sum'])
for recipe_blob in self.get_recipes():
assert recipe_blob in used_blobs
if self.repo.has_recipe_blob(recipe_blob) or self.repo.has_raw_blob(recipe_blob):
safe_delete_recipe(self.get_recipe_path(recipe_blob))
else:
used_blobs.update(get_recipe_blobs(self.get_recipe_path(recipe_blob)))
for blob in self.get_raw_blobs():
if self.repo.has_blob(blob) or blob not in used_blobs:
safe_delete_blob(self.get_path(blob))
def get_all_ids_in_directory(path):
result = []
for dir in os.listdir(path):
if re.match("^[0-9]+$", dir) != None:
assert int(dir) > 0, "No session 0 allowed in repo"
result.append(int(dir))
assert len(result) == len(set(result))
result.sort()
return result
def get_recipe_blobs(recipe_filename):
result = set()
recipe = read_json(recipe_filename)
for piece in recipe['pieces']:
result.add(piece['source'])
return result
def _snapshot_delete_test_hook(rev):
""" This method is intended to be replaced during testing to
simulate an interrupted operation."""
pass
def generate_random_repoid():
# Nothing fancy here, just a reasonably unique string.
return "repo_" + md5sum(str(random.random()) + "!" + str(time.time()))
```
#### File: boar/tests/test_common.py
```python
from __future__ import with_statement
import sys, os, unittest, tempfile, shutil
TMPDIR=tempfile.gettempdir()
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import common
class TestStrictFileWriterBasics(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='testcommon_', dir=TMPDIR)
self.filename = os.path.join(self.tmpdir, "test.txt")
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors = True)
def testEmptyFile(self):
sfw = common.StrictFileWriter(self.filename, "d41d8cd98f00b204e9800998ecf8427e", 0)
sfw.close()
self.assertEquals("", open(self.filename).read())
def testWithHappy(self):
with common.StrictFileWriter(self.filename, "6118fda28fbc20966ba8daafdf836683", len("avocado")) as sfw:
sfw.write("avocado")
def testWithTooShort(self):
def dotest():
with common.StrictFileWriter(self.filename, "6118fda28fbc20966ba8daafdf836683", len("avocado")) as sfw:
sfw.write("avocad")
self.assertRaises(common.ConstraintViolation, dotest)
def testWithTooShort2(self):
def dotest():
with common.StrictFileWriter(self.filename, "6118fda28fbc20966ba8daafdf836683", len("avocado")) as sfw:
pass
self.assertRaises(common.ConstraintViolation, dotest)
class TestStrictFileWriterEnforcement(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='testcommon_', dir=TMPDIR)
self.filename = os.path.join(self.tmpdir, "avocado.txt")
self.sfw = common.StrictFileWriter(self.filename, "6118fda28fbc20966ba8daafdf836683", len("avocado"))
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors = True)
def testExisting(self):
self.sfw.write("avocado")
self.sfw.close()
self.assertRaises(common.ConstraintViolation, common.StrictFileWriter, self.filename, \
"fe01d67a002dfa0f3ac084298142eccd", len("orange"))
self.assertEquals("avocado", open(self.filename).read())
def testExistingOverwrite(self):
self.sfw.write("avocado")
self.sfw.close()
with common.StrictFileWriter(self.filename, "fe01d67a002dfa0f3ac084298142eccd", \
len("orange"), overwrite = True) as sfw2:
sfw2.write("orange")
self.assertEquals("orange", open(self.filename).read())
def testOverrun(self):
self.assertRaises(common.ConstraintViolation, self.sfw.write, "avocadoo")
def testOverrun2(self):
self.sfw.write("avo")
self.sfw.write("cad")
self.sfw.write("o")
self.assertRaises(common.ConstraintViolation, self.sfw.write, "o")
def testUnderrun(self):
self.sfw.write("avocad")
self.assertRaises(common.ConstraintViolation, self.sfw.close)
def testHappyPath(self):
self.sfw.write("avocado")
self.sfw.close()
self.assertEquals("avocado", open(self.filename).read())
def testHappyPath2(self):
self.sfw.write("avo")
self.sfw.write("cad")
self.sfw.write("")
self.sfw.write("o")
self.sfw.close()
self.assertEquals("avocado", open(self.filename).read())
def testWrongChecksum(self):
self.assertRaises(common.ConstraintViolation, self.sfw.write, "avocato")
def testWithHappyPath(self):
with self.sfw:
self.sfw.write("avocado")
self.assertTrue(self.sfw.is_closed())
self.assertEquals("avocado", open(self.filename).read())
def testWithContentViolation(self):
try:
with self.sfw:
self.sfw.write("AVOCADO")
assert False, "Should throw an exception"
except Exception, e:
# Must be a content violation
self.assertEquals(type(e), common.ContentViolation)
self.assertTrue(self.sfw.is_closed())
def testWithUnderrunViolation(self):
try:
with self.sfw:
self.sfw.write("AVO")
assert False, "Should throw an exception"
except Exception, e:
# Must be a size violation
self.assertEquals(type(e), common.SizeViolation)
self.assertTrue(self.sfw.is_closed())
def testWithOverrunViolation(self):
try:
with self.sfw:
self.sfw.write("avocados")
assert False, "Should throw an exception"
except Exception, e:
# Must be a size violation
self.assertEquals(type(e), common.SizeViolation)
self.assertTrue(self.sfw.is_closed())
class TestStripPathOffset(unittest.TestCase):
def testSimple(self):
self.assertEquals("b", common.strip_path_offset("/a", "/a/b"))
self.assertEquals("", common.strip_path_offset("/a", "/a"))
def testArgumentChecks(self):
# Offset must not end with slash.
self.assertRaises(AssertionError, common.strip_path_offset, "/a/", "/a/b/")
# The child path must start with the offset
self.assertRaises(AssertionError, common.strip_path_offset, "/b", "/a")
class TestMisc(unittest.TestCase):
def test_common_tail(self):
def test(s1, s2, expected):
result = common.common_tail(s1, s2)
self.assertEquals(expected, result)
test("abc", "abc", "abc")
test("c", "abc", "c")
test("abc", "c", "c")
test("bc", "abc", "bc")
test("abc", "bc", "bc")
test("abc", "a", "")
test("a", "abc", "")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhNsXO/UnifiedFirmware",
"score": 2
} |
#### File: PlatformIO/scripts/stm32_bootloader.py
```python
import os,sys,marlin
Import("env")
from SCons.Script import DefaultEnvironment
board = DefaultEnvironment().BoardConfig()
board_keys = board.get("build").keys()
#
# For build.offset define LD_FLASH_OFFSET, used by ldscript.ld
#
if 'offset' in board_keys:
LD_FLASH_OFFSET = board.get("build.offset")
marlin.relocate_vtab(LD_FLASH_OFFSET)
# Flash size
maximum_flash_size = int(board.get("upload.maximum_size") / 1024)
marlin.replace_define('STM32_FLASH_SIZE', maximum_flash_size)
# Get upload.maximum_ram_size (defined by /buildroot/share/PlatformIO/boards/VARIOUS.json)
maximum_ram_size = board.get("upload.maximum_ram_size")
for i, flag in enumerate(env["LINKFLAGS"]):
if "-Wl,--defsym=LD_FLASH_OFFSET" in flag:
env["LINKFLAGS"][i] = "-Wl,--defsym=LD_FLASH_OFFSET=" + LD_FLASH_OFFSET
if "-Wl,--defsym=LD_MAX_DATA_SIZE" in flag:
env["LINKFLAGS"][i] = "-Wl,--defsym=LD_MAX_DATA_SIZE=" + str(maximum_ram_size - 40)
#
# For build.rename simply rename the firmware file.
#
if 'rename' in board_keys:
def rename_target(source, target, env):
firmware = os.path.join(target[0].dir.path, board.get("build.rename"))
import shutil
shutil.copy(target[0].path, firmware)
marlin.add_post_action(rename_target)
``` |
{
"source": "jhoagland18/util-scripts",
"score": 4
} |
#### File: jhoagland18/util-scripts/lncp.py
```python
welcome_message = "Welcome to Line Complete. This program will complete repetitive typing tasks for you.\n\n" \
"To begin, type your text pattern as follows using \"[]\" to indicate a variable location:\n" \
"[Your text 1 here][][Your text 2 here][]\n" \
"Once you have entered your pattern, enter the comma separated variables you wish to fill the pattern with.\n\n" \
"For example:\n" \
"My favorite color is [] and my favorite number is [].\n" \
"Green, Blue\n" \
"9,100\n\n" \
"Will output:\n" \
"My favorite color is Green and my favorite number is 9\n" \
"My favorite color is Blue and my favorite number is 100\n"
enter_pattern_message = "Please enter your text pattern:\n"
#enter_filedir_message = "Please enter the file you want to save the text to. Leave blank to output on terminal.\n"
enter_delimiter_message = "Please enter a string or character delimiter for your input data.\n"
enter_variables_message = "Please enter the variables, separated by commas.\n"
empty_values_warning_message = "WARNING: Not all variable lists are the same length. Empty values have been inserted at the missing locations.\n"
delimiter = ','
def main():
trailing_variable = False
print(welcome_message)
while(True):
rawTextPattern = input(enter_pattern_message)
if(rawTextPattern.endswith("[]")):
trailing_variable=True
textPattern = rawTextPattern.split("[]")
numVariables = len(textPattern)-1
variablesList = list()
delimiter = input(enter_delimiter_message)
for i in range(0,numVariables):
variablesList.append(input(enter_variables_message).split(delimiter))
numRows = longest(variablesList)
print("\nOutput:")
emptyValuesWarning = False
for i in range(0,numRows):
output = ""
for j in range(0,len(variablesList)):
output += textPattern[j]
if(i>=len(variablesList[j])):
emptyValuesWarning=True
output+=''
else:
output += variablesList[j][i]
output+=textPattern[j+1]
print(output)
print("\n")
if(emptyValuesWarning):
print(empty_values_warning_message)
def longest(l): #https://stackoverflow.com/questions/30902558/finding-the-longest-list-in-a-list-of-lists-in-python
if(not isinstance(l, list)): return(0)
return(max([len(l),] + [len(subl) for subl in l if isinstance(subl, list)] +
[longest(subl) for subl in l]))
if __name__ == '__main__':
main()
``` |
{
"source": "jhoballah/Melanoma_JSY",
"score": 3
} |
#### File: jhoballah/Melanoma_JSY/test_send.py
```python
import numpy as np
import json
import tensorflow as tf
import base64
import io
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
from flask import Flask, request, jsonify
from get_prediction import get_prediction
app = Flask(__name__)
@app.route('/test_send', methods=['POST'])
def image_send():
j_dict = request.json
""" try:
j_dict = json.dumps(j_dict)
j_dict = json.loads(j_dict)
# load is for file, loads is for string
except ValueError:
return send_error("Input is not JSON dictionary", 600)
"""
# decode the base64 string into bytes
image_b64_data = j_dict['im64']
buf = io.BytesIO(image_b64_data)
#save bytes to temp.jpg on disk
filename = "temp.jpg"
with open(filename, "wb") as image_out:
image_out.write(base64.b64decode(buf))
#read in temp.jpg using matplotlib.image's imread to give us the correct numpy.ndarray to pass into get_prediction
image = mpimg.imread(temp.jpg)
(label, prediction) = get_prediction(image)
label_dict = {"diagnosis": label.tolist()}
probability_dict = {"likelihood": prediction.tolist()}
results_content = jsonify([label_dict], [probability_dict])
print(results_content)
return results_content
``` |
{
"source": "jhoballah/PatientWebService",
"score": 2
} |
#### File: site-packages/pymodm/common.py
```python
import re
from collections import Mapping
from importlib import import_module
import pymongo
from pymodm.errors import ModelDoesNotExist
from pymodm.compat import string_types
# Mapping of class names to class objects.
# Used for fields that nest or reference other Model classes.
_DOCUMENT_REGISTRY = {}
# Mapping of fully-qualified names to their imported objects.
_IMPORT_CACHE = {}
CTS1 = re.compile('(.)([A-Z][a-z]+)')
CTS2 = re.compile('([a-z0-9])([A-Z])')
def snake_case(camel_case):
snake = re.sub(CTS1, r'\1_\2', camel_case)
snake = re.sub(CTS2, r'\1_\2', snake)
return snake.lower()
def _import(full_name):
"""Avoid circular imports without re-importing each time."""
if full_name in _IMPORT_CACHE:
return _IMPORT_CACHE[full_name]
module_name, class_name = full_name.rsplit('.', 1)
module = import_module(module_name)
_IMPORT_CACHE[full_name] = getattr(module, class_name)
return _IMPORT_CACHE[full_name]
def register_document(document):
key = '%s.%s' % (document.__module__, document.__name__)
_DOCUMENT_REGISTRY[key] = document
def get_document(name):
"""Retrieve the definition for a class by name."""
if name in _DOCUMENT_REGISTRY:
return _DOCUMENT_REGISTRY[name]
possible_matches = []
for key in _DOCUMENT_REGISTRY:
parts = key.split('.')
if name == parts[-1]:
possible_matches.append(key)
if len(possible_matches) == 1:
return _DOCUMENT_REGISTRY[possible_matches[0]]
raise ModelDoesNotExist('No document type by the name %r.' % (name,))
#
# Type validation.
#
def validate_string(option, value):
if not isinstance(value, string_types):
raise TypeError('%s must be a string type, not a %s'
% (option, value.__class__.__name__))
return value
def validate_string_or_none(option, value):
if value is None:
return value
return validate_string(option, value)
def validate_mongo_field_name(option, value):
"""Validates the MongoDB field name format described in:
https://docs.mongodb.com/manual/core/document/#field-names
"""
validate_string(option, value)
if value == '':
return value
if value[0] == '$':
raise ValueError('%s cannot start with the dollar sign ($) '
'character, %r.' % (option, value))
if '.' in value:
raise ValueError('%s cannot contain the dot (.) character, %r.'
% (option, value))
if '\x00' in value:
raise ValueError('%s cannot contain the null character, %r.'
% (option, value))
return value
def validate_mongo_keys(option, dct):
"""Recursively validate that all dictionary keys are valid in MongoDB."""
for key in dct:
validate_mongo_field_name(option, key)
value = dct[key]
if isinstance(value, dict):
validate_mongo_keys(option, value)
elif isinstance(value, (list, tuple)):
validate_mongo_keys_in_list(option, value)
def validate_mongo_keys_in_list(option, lst):
for elem in lst:
if isinstance(elem, dict):
validate_mongo_keys(option, elem)
elif isinstance(elem, (list, tuple)):
validate_mongo_keys_in_list(option, elem)
def validate_mongo_field_name_or_none(option, value):
if value is None:
return value
return validate_mongo_field_name(option, value)
def validate_boolean(option, value):
if not isinstance(value, bool):
raise TypeError('%s must be a boolean, not a %s'
% (option, value.__class__.__name__))
return value
def validate_boolean_or_none(option, value):
if value is None:
return value
return validate_boolean(option, value)
def validate_list_or_tuple(option, value):
if not isinstance(value, (list, tuple)):
raise TypeError('%s must be a list or a tuple, not a %s'
% (option, value.__class__.__name__))
return value
def validate_list_tuple_or_none(option, value):
if value is None:
return value
return validate_list_or_tuple(option, value)
def validate_mapping(option, value):
if not isinstance(value, Mapping):
raise TypeError('%s must be a Mapping, not a %s'
% (option, value.__class__.__name__))
return value
def validate_ordering(option, ordering):
ordering = validate_list_or_tuple(option, ordering)
for order in ordering:
order = validate_list_or_tuple(option + "'s elements", order)
if len(order) != 2:
raise ValueError("%s's elements must be (field_name, "
"direction) not %s" % (option, order))
validate_string("field_name", order[0])
if order[1] not in (pymongo.ASCENDING, pymongo.DESCENDING):
raise ValueError("sort direction must be pymongo.ASCENDING or '"
"pymongo.DECENDING not %s" % (order[1]))
return ordering
```
#### File: site-packages/pymodm/validators.py
```python
from pymodm.errors import ValidationError
def together(*funcs):
"""Run several validators successively on the same value."""
def validator(value):
for func in funcs:
func(value)
return validator
def validator_for_func(func):
"""Return a validator that re-raises any errors from the given function."""
def validator(value):
try:
func(value)
except Exception as exc:
raise ValidationError(exc)
return validator
def validator_for_type(types, value_name=None):
"""Return a validator that ensures its value is among the given `types`."""
def validator(value):
if not isinstance(value, types):
if isinstance(types, tuple): # multiple types
type_names = tuple(t.__name__ for t in types)
err = 'must be one of %r' % (type_names,)
else:
err = 'must be a %s' % types.__name__
raise ValidationError(
'%s %s, not %r'
% (value_name or 'Value', err, value))
return validator
def validator_for_geojson_type(geojson_type):
"""Return a validator that validates its value as having the given GeoJSON
``type``.
"""
def validator(value):
if value.get('type') != geojson_type:
raise ValidationError(
'GeoJSON type must be %r, not %r'
% (geojson_type, value.get('type')))
return validator
def validator_for_min_max(min, max):
"""Return a validator that validates its value against a minimum/maximum."""
def validator(value):
if min is not None and value < min:
raise ValidationError(
'%s is less than minimum value of %s.' % (value, min))
if max is not None and value > max:
raise ValidationError(
'%s is greater than maximum value of %s.' % (value, max))
return validator
def validator_for_length(min, max):
"""Return a validator that validates a given value's length."""
def validator(value):
len_value = len(value)
if min is not None and len_value < min:
raise ValidationError(
'%s is under the minimum length of %d.' % (value, min))
if max is not None and len_value > max:
raise ValidationError(
'value exceeds the maximum length of %d.' % (max,))
return validator
``` |
{
"source": "jhobbs/morsegen",
"score": 3
} |
#### File: morsegen/remorse/wavegen.py
```python
import itertools
import math
from pyaudio import PyAudio
BITRATE = 16000
FADE_LENGTH = 0.003
FADE_FRAMES = int(BITRATE * FADE_LENGTH)
MAX_AMPLITUDE = 127
def sine(frequency, length):
"""Generate a sine wave in 8-bit unsigned PCM format.
Uses linear fading at the beginning and end to avoid click noise.
Good reference on how simple digital sound generation works:
http://www.cs.nmsu.edu/~rth/cs/computermusic/Simple%20sound%20generation.html
We use s_n = (a * sin(2*pi*f*n/sr)) + 128 where:
- n is the sample number.
- s_n is sample n.
- f is frequency in hertz.
- sr is the sample rate in samples per second.
- a is the amplitude in the range of 0 to 127.
Adding 128 serves to center the samples at 128, which is silence in 8-bit
unsigned PCM format.
"""
wave_data = ''
number_of_frames = int(BITRATE * length)
factor = (float(frequency) * (math.pi * 2)) / BITRATE
for n in xrange(number_of_frames):
if n < FADE_FRAMES:
amplitude_factor = float(n) / FADE_FRAMES
elif number_of_frames - n < FADE_FRAMES:
amplitude_factor = float(number_of_frames - n) / FADE_FRAMES
else:
amplitude_factor = 1
amplitude = MAX_AMPLITUDE * amplitude_factor
zero_centered = int(math.sin(n * factor) * amplitude)
wave_data += chr(zero_centered + 128)
return wave_data
def silence(length):
wave_data = ''
number_of_frames = int(BITRATE * length)
for x in xrange(number_of_frames):
wave_data += chr(128)
return wave_data
def play(wave_data):
chunk_size = BITRATE/10
p = PyAudio()
stream = p.open(format = p.get_format_from_width(1),
channels = 1,
rate = BITRATE,
output = True)
for chunk in itertools.islice(wave_data, chunk_size):
stream.write(chunk)
stream.stop_stream()
stream.close()
p.terminate()
``` |
{
"source": "jhoblitt/ltd-keeper",
"score": 3
} |
#### File: app/api_v1/editions.py
```python
from flask import jsonify, request, current_app
from . import api
from .. import db
from ..auth import token_auth, permission_required
from ..models import Product, Edition, Permission
from ..dasher import build_dashboard_safely
@api.route('/products/<slug>/editions/', methods=['POST'])
@token_auth.login_required
@permission_required(Permission.ADMIN_EDITION)
def new_edition(slug):
"""Create a new Edition for a Product.
**Authorization**
User must be authenticated and have ``admin_edition`` permissions.
**Example request**
.. code-block:: http
POST /products/lsst_apps/editions/ HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKcFlYUWlPakUwTlRZM056SXpORGdzSW1WNGNDSTZNVFEx...
Connection: keep-alive
Content-Length: 150
Content-Type: application/json
Host: localhost:5000
User-Agent: HTTPie/0.9.3
{
"build_url": "http://localhost:5000/builds/1",
"slug": "latest",
"title": "Latest",
"tracked_refs": [
"master"
]
}
**Example response**
.. code-block:: http
HTTP/1.0 201 CREATED
Content-Length: 2
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:29 GMT
Location: http://localhost:5000/editions/1
Server: Werkzeug/0.11.3 Python/3.5.0
{}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:param slug: Product slug.
:<json string build_url: URL of the build entity this Edition uses.
:<json string slug: URL-safe name for edition.
:<json string title: Human-readable name for edition.
:<json array tracked_refs: Git ref(s) that describe the version of the
Product that this this Edition is intended to point to. For
multi-package documentation builds this is a list of Git refs that
are checked out, in order of priority, for each component repository.
:resheader Location: URL of the created Edition resource.
:statuscode 201: No errors.
:statuscode 404: Product not found.
"""
product = Product.query.filter_by(slug=slug).first_or_404()
edition = Edition(product=product)
try:
edition.import_data(request.json)
db.session.add(edition)
db.session.commit()
except Exception:
db.session.rollback()
raise
build_dashboard_safely(current_app, request, product)
return jsonify({}), 201, {'Location': edition.get_url()}
@api.route('/editions/<int:id>', methods=['DELETE'])
@token_auth.login_required
@permission_required(Permission.ADMIN_EDITION)
def deprecate_edition(id):
"""Deprecate an Edition of a Product.
When an Edition is deprecated, the current time is added to the
Edition's ``date_ended`` field. Any Edition record with a non-``null``
``date_ended`` field will be garbage-collected by LTD Keeper (the
deletion does not occur immediately upon API request).
**Authorization**
User must be authenticated and have ``admin_edition`` permissions.
**Example request**
.. code-block:: http
DELETE /editions/1 HTTP/1.1
Authorization: Basic ZXlKcFlYUWlPakUwTlRZM056SXpORGdzSW1WNGNDSTZNVFEx...
Connection: keep-alive
Content-Length: 0
Host: localhost:5000
User-Agent: HTTPie/0.9.3
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 2
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:30 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:param id: Edition id.
:statuscode 200: No errors.
:statuscode 404: Edition not found.
"""
edition = Edition.query.get_or_404(id)
edition.deprecate()
db.session.commit()
build_dashboard_safely(current_app, request, edition.product)
return jsonify({}), 200
@api.route('/products/<slug>/editions/', methods=['GET'])
def get_product_editions(slug):
"""List all editions published for a Product.
**Example request**
.. code-block:: http
GET /products/lsst_apps/editions/ HTTP/1.1
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 62
Content-Type: application/json
Date: Tue, 01 Mar 2016 18:50:19 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"editions": [
"http://localhost:5000/editions/1"
]
}
:param slug: Slug of the Product.
:>json array editions: List of URLs of Edition entities for this Product.
:statuscode 200: No errors.
:statuscode 404: Product not found.
"""
edition_urls = [edition.get_url() for edition in
Edition.query.join(Product,
Product.id == Edition.product_id)
.filter(Product.slug == slug)
.filter(Edition.date_ended == None).all()] # NOQA
return jsonify({'editions': edition_urls})
@api.route('/editions/<int:id>', methods=['GET'])
def get_edition(id):
"""Show metadata for an Edition.
**Example request**
.. code-block:: http
GET /editions/1 HTTP/1.1
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 413
Content-Type: application/json
Date: Tue, 01 Mar 2016 18:50:18 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"build_url": "http://localhost:5000/builds/1",
"date_created": "2016-03-01T11:50:18.196724Z",
"date_ended": null,
"date_rebuilt": "2016-03-01T11:50:18.196706Z",
"product_url": "http://localhost:5000/products/lsst_apps",
"published_url": "pipelines.lsst.io",
"self_url": "http://localhost:5000/editions/1",
"slug": "latest",
"surrogate_key": "2a5f38f27e3c46258fd9b0e69afe54fd",
"title": "Development master",
"tracked_refs": [
"master"
]
}
:param id: ID of the Edition.
:>json string build_url: URL of the build entity this Edition uses.
:>json string date_created: UTC date time when the edition was created.
:>json string date_ended: UTC date time when the edition was deprecated;
will be ``null`` for editions that are *not deprecated*.
:>json string date_rebuilt: UTC date time when the edition last re-pointed
to a different build.
:>json string product_url: URL of parent product entity.
:>json string published_url: Full URL where this edition is published.
:>json string self_url: URL of this Edition entity.
:>json string slug: URL-safe name for edition.
:>json string surrogate_key: Surrogate key that should be used in the
``x-amz-meta-surrogate-control`` header of any the edition's S3
objects to control Fastly caching.
:>json string title: Human-readable name for edition.
:>json string tracked_refs: Git ref that this Edition points to. For multi-
repository builds, this can be a comma-separated list of refs to use,
in order of priority.
:statuscode 200: No errors.
:statuscode 404: Edition not found.
"""
return jsonify(Edition.query.get_or_404(id).export_data())
@api.route('/editions/<int:id>', methods=['PATCH'])
@token_auth.login_required
@permission_required(Permission.ADMIN_EDITION)
def edit_edition(id):
"""Edit an Edition.
This PATCH method allows you to specify a subset of JSON fields to replace
existing fields in the Edition resource. Not all fields in an Edition are
editable via the API. See the allowed JSON fields below.
Use :http:delete:`/editions/(int:id)` to deprecate an edition.
The full resource record is returned.
**Authorization**
User must be authenticated and have ``admin_edition`` permissions.
**Example request**
.. code-block:: http
PATCH /editions/1 HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKcFlYUWlPakUwTlRZM056SXpORGdzSW1WNGNDSTZNVFEx...
Connection: keep-alive
Content-Length: 31
Content-Type: application/json
Host: localhost:5000
User-Agent: HTTPie/0.9.3
{
"title": "Development master"
}
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 413
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:29 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"build_url": "http://localhost:5000/builds/2",
"date_created": "2016-03-01T10:21:29.017615Z",
"date_ended": null,
"date_rebuilt": "2016-03-01T10:21:29.590839Z",
"product_url": "http://localhost:5000/products/lsst_apps",
"published_url": "pipelines.lsst.io",
"self_url": "http://localhost:5000/editions/1",
"slug": "latest",
"surrogate_key": "2a5f38f27e3c46258fd9b0e69afe54fd",
"title": "Development master",
"tracked_refs": [
"master"
]
}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:param id: ID of the Edition.
:<json string build_url: URL of the build entity this Edition uses
(optional). Effectively this 'rebuilds' the edition.
:<json string title: Human-readable name for edition (optional).
:<json string slug: URL-safe name for edition (optinal). Changing the slug
dynamically updates the ``published_url``.
:<json array tracked_refs: Git ref(s) that this Edition points to.
For multi-package documentation builds this is a list of Git refs that
are checked out, in order of priority, for each component repository
(optional).
:>json string build_url: URL of the build entity this Edition uses.
:>json string date_created: UTC date time when the edition was created.
:>json string date_ended: UTC date time when the edition was deprecated;
will be ``null`` for editions that are *not deprecated*.
:>json string date_rebuilt: UTC date time when the edition last re-pointed
to a different build.
:>json string product_url: URL of parent product entity.
:>json string published_url: Full URL where this edition is published.
:>json string self_url: URL of this Edition entity.
:>json string slug: URL-safe name for edition.
:>json string surrogate_key: Surrogate key that should be used in the
``x-amz-meta-surrogate-control`` header of any the edition's S3
objects to control Fastly caching.
:>json string title: Human-readable name for edition.
:>json string tracked_refs: Git ref that this Edition points to. For multi-
repository builds, this can be a comma-separated list of refs to use,
in order of priority.
:statuscode 200: No errors.
:statuscode 404: Edition resource not found.
"""
edition = Edition.query.get_or_404(id)
try:
edition.patch_data(request.json)
db.session.add(edition)
db.session.commit()
except Exception:
db.session.rollback()
raise
build_dashboard_safely(current_app, request, edition.product)
return jsonify(edition.export_data())
```
#### File: app/api_v1/products.py
```python
from flask import jsonify, request, current_app
from . import api
from .. import db
from ..auth import token_auth, permission_required
from ..models import Product, Permission, Edition
from ..dasher import build_dashboard_safely, build_dashboards
@api.route('/products/', methods=['GET'])
def get_products():
"""List all documentation products (anonymous access allowed).
**Example request**
.. code-block:: http
GET /products/ HTTP/1.1
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 122
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:27 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"products": [
"http://localhost:5000/products/lsst_apps",
"http://localhost:5000/products/qserv_distrib"
]
}
:>json array products: List of product URLs.
:statuscode 200: No error.
"""
return jsonify({'products': [product.get_url() for product in
Product.query.all()]})
@api.route('/products/<slug>', methods=['GET'])
def get_product(slug):
"""Get the record of a single documentation product (anonymous access
allowed).
**Example request**
.. code-block:: http
GET /products/pipelines HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKaGJHY2lPaUpJVXpJMU5pSXNJbVY0Y0NJNk1UUTJNVEV3...
Connection: keep-alive
Host: localhost:5000
User-Agent: HTTPie/0.9.3
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 385
Content-Type: application/json
Date: Tue, 19 Apr 2016 21:17:52 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"bucket_name": "an-s3-bucket",
"doc_repo": "https://github.com/lsst/pipelines_docs.git",
"domain": "pipelines.lsst.io",
"fastly_domain": "pipelines.lsst.io.global.ssl.fastly.net",
"root_domain": "lsst.io",
"root_fastly_domain": "global.ssl.fastly.net",
"self_url": "http://localhost:5000/products/pipelines",
"slug": "pipelines",
"surrogate_key": "2a5f38f27e3c46258fd9b0e69afe54fd",
"title": "LSST Science Pipelines"
}
:param slug: Identifier for this product.
:>json string bucket_name: Name of the S3 bucket hosting builds.
:>json string doc_repo: URL of the Git documentation repo (i.e., on
GitHub).
:>json string domain: Full domain where this product's documentation
is served from this LSST the Docs installation is served from.
(e.g., ``pipelines.lsst.io``).
:>json string fastly_domain: Full domain where Fastly serves content
for this product. Note that ``domain`` is CNAME'd to ``fastly_domain``.
:>json string root_domain: Root domain name where documentation for
this LSST the Docs installation is served from. (e.g., ``lsst.io``).
:>json string root_fastly_domain: Root domain name for Fastly CDN used
by this LSST the Docs installation.
:>json string published_url: Full URL where this product is published to
the reader.
:>json string self_url: URL of this Product resource.
:>json string slug: URL/path-safe identifier for this product.
:>json string surrogate_key: Surrogate key that should be used in the
``x-amz-meta-surrogate-control`` header of any product-level
dashboards to control Fastly caching.
:>json string title: Human-readable product title.
:statuscode 200: No error.
:statuscode 404: Product not found.
"""
product = Product.query.filter_by(slug=slug).first_or_404()
return jsonify(product.export_data())
@api.route('/products/', methods=['POST'])
@token_auth.login_required
@permission_required(Permission.ADMIN_PRODUCT)
def new_product():
"""Create a new documentation product.
Every new product also includes a default edition (slug is 'main'). This
main edition tracks the master branch by default. Fastly is configured to
show this main edition at the product's root URL rather than in the /v/
directory.
**Authorization**
User must be authenticated and have ``admin_product`` permissions.
**Example request**
.. code-block:: http
POST /products/ HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKaGJHY2lPaUpJVXpJMU5pSXNJbVY0Y0NJNk1UUTJNVEV3...
Connection: keep-alive
Content-Length: 218
Content-Type: application/json
Host: localhost:5000
User-Agent: HTTPie/0.9.3
{
"bucket_name": "an-s3-bucket",
"doc_repo": "https://github.com/lsst/pipelines_docs.git",
"root_domain": "lsst.io",
"root_fastly_domain": "global.ssl.fastly.net",
"slug": "pipelines",
"surrogate_key": "2a5f38f27e3c46258fd9b0e69afe54fd",
"title": "LSST Science Pipelines"
}
**Example response**
.. code-block:: http
HTTP/1.0 201 CREATED
Content-Length: 2
Content-Type: application/json
Date: Tue, 19 Apr 2016 21:17:52 GMT
Location: http://localhost:5000/products/pipelines
Server: Werkzeug/0.11.3 Python/3.5.0
{}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:<json string bucket_name: Name of the S3 bucket hosting builds.
:<json string doc_repo: URL of the Git documentation repo (i.e., on
GitHub).
:<json string root_domain: Root domain name where documentation for
this LSST the Docs installation is served from. (e.g., ``lsst.io``).
:<json string root_fastly_domain: Root domain name for Fastly CDN used
by this LSST the Docs installation.
:<json string self_url: URL of this Product resource.
:<json string slug: URL/path-safe identifier for this product. The slug
is validated against the regular expression ``^[a-z]([-]*[a-z0-9])*$``.
:<json string title: Human-readable product title.
:resheader Location: URL of the created product.
:statuscode 201: No error.
"""
product = Product()
try:
product.import_data(request.json)
db.session.add(product)
# Create a default edition for the product
edition = Edition(product=product)
edition.import_data({'tracked_refs': ['master'],
'slug': 'main',
'title': 'Latest'})
db.session.add(edition)
db.session.commit()
except Exception:
db.session.rollback()
raise
build_dashboard_safely(current_app, request, product)
return jsonify({}), 201, {'Location': product.get_url()}
@api.route('/products/<slug>', methods=['PATCH'])
@token_auth.login_required
@permission_required(Permission.ADMIN_PRODUCT)
def edit_product(slug):
"""Update a product.
Note that not all fields can be updated with this method (currently).
See below for updateable fields. Contact the operator to update the slug,
bucket name, or Fastly domain.
**Authorization**
User must be authenticated and have ``admin_product`` permissions.
**Example request**
.. code-block:: http
PATCH /products/qserv HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKaGJHY2lPaUpJVXpJMU5pSXNJbVY0Y0NJNk1UUTJNVEV3...
Connection: keep-alive
Content-Length: 30
Content-Type: application/json
Host: localhost:5000
User-Agent: HTTPie/0.9.3
{
"title": "Qserv Data Access"
}
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 2
Content-Type: application/json
Date: Tue, 19 Apr 2016 21:17:53 GMT
Location: http://localhost:5000/products/qserv
Server: Werkzeug/0.11.3 Python/3.5.0
{}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:param slug: Product slug.
:<json string doc_repo: URL of the Git documentation repo (i.e., on
GitHub) (optional).
:<json string title: Human-readable product title (optional).
:resheader Location: URL of the created product.
:statuscode 200: No error.
:statuscode 404: Product not found.
"""
product = Product.query.filter_by(slug=slug).first_or_404()
try:
product.patch_data(request.json)
db.session.add(product)
db.session.commit()
except Exception:
db.session.rollback()
raise
build_dashboard_safely(current_app, request, product)
return jsonify({}), 200, {'Location': product.get_url()}
@api.route('/products/<slug>/dashboard', methods=['POST'])
@token_auth.login_required
@permission_required(Permission.ADMIN_PRODUCT)
def rebuild_product_dashboard(slug):
"""Rebuild the LTD Dasher dashboard manually for a single product.
Note that the dashboard is built asynchronously.
**Authorization**
User must be authenticated and have ``admin_product`` permissions.
:statuscode 202: Dashboard rebuild trigger sent.
**See also**
- :http:post:`/dashboards`
"""
product = Product.query.filter_by(slug=slug).first_or_404()
build_dashboards([product.get_url()],
current_app.config['LTD_DASHER_URL'],
current_app.logger)
return jsonify({}), 202, {}
```
#### File: ltd-keeper/app/dbcopy.py
```python
import logging
import sqlalchemy as sa
logger = logging.getLogger(__name__)
class Connection():
def __init__(self, url):
self.engine = sa.create_engine(url)
self.conn = self.engine.connect()
self.meta = sa.MetaData()
self.meta.reflect(self.engine)
tables = sa.schema.sort_tables(self.meta.tables.values())
self.tables = [i.name for i in tables]
class Crossover():
def __init__(self, source, target, bulk):
self.source = Connection(source)
self.target = Connection(target)
self.bulk = bulk
self.insert_data = self.insert_data_simple
def copy_data_in_transaction(self):
with self.target.conn.begin():
self.copy_data()
def copy_data(self):
if set(self.source.tables) != set(self.target.tables):
logger.warning("Source and target database table lists are not "
"identical!")
for table in self.source.tables:
if table in self.target.tables:
self.copy_table(table)
def copy_table(self, table):
offset = 0
source_table = self.target.meta.tables[table]
while True:
data = list(self.source.conn.execute(
sa.select([source_table]).offset(offset).limit(self.bulk)
))
if not data:
break
self.insert_data(table, data)
offset += self.bulk
def insert_data_simple(self, table, data):
self.target.conn.execute(self.target.meta.tables[table].insert(), data)
```
#### File: ltd-keeper/app/fastly.py
```python
import logging
import requests
from .exceptions import FastlyError
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class FastlyService(object):
"""API client for a Fastly service.
Parameters
----------
service_id : str
The Fastly service ID.
api_key : str
The Fastly API key. We only support key-based authentication.
"""
def __init__(self, service_id, api_key):
super(FastlyService, self).__init__()
self.service_id = service_id
self.api_key = api_key
self._api_root = 'https://api.fastly.com'
def _url(self, path):
return self._api_root + path
def purge_key(self, surrogate_key):
"""Instant purge URLs with a given `surrogate_key`.
See
https://docs.fastly.com/api/purge#purge_077dfb4aa07f49792b13c87647415537
for more information.
"""
path = '/service/{service}/purge/{surrogate_key}'.format(
service=self.service_id, surrogate_key=surrogate_key)
log.info('Fastly purge {0}'.format(path))
r = requests.post(self._url(path),
headers={'Fastly-Key': self.api_key,
'Accept': 'application/json'})
if r.status_code != 200:
raise FastlyError(r.json)
```
#### File: ltd-keeper/app/testutils.py
```python
from base64 import b64encode
from collections import namedtuple
import json
from urllib.parse import urlsplit, urlunsplit
response = namedtuple('response', 'status headers json')
class TestClient():
"""TestClient wraps Flask's/Werkzeug's built-in testing client.
The `get`, `post`, `put`, `delete` methods mirror HTTP
commands and return a `response` `namedtuple` with fields:
- `status`: the integer HTTP response code
- `header`: the HTTP response headers
- `json`: the return data, parse as JSON into a Python `dict` object.
"""
def __init__(self, app, username, password=''):
self.app = app
self.auth = 'Basic ' + b64encode((username + ':' + password)
.encode('utf-8')).decode('utf-8')
def send(self, url, method='GET', data=None, headers={}):
# for testing, URLs just need to have the path and query string
url_parsed = urlsplit(url)
url = urlunsplit(('', '', url_parsed.path, url_parsed.query,
url_parsed.fragment))
# append the autnentication headers to all requests
headers = headers.copy()
headers['Authorization'] = self.auth
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
# convert JSON data to a string
if data:
data = json.dumps(data)
# send request to the test client and return the response
with self.app.test_request_context(url, method=method, data=data,
headers=headers):
rv = self.app.preprocess_request()
if rv is None:
rv = self.app.dispatch_request()
rv = self.app.make_response(rv)
rv = self.app.process_response(rv)
return response(rv.status_code, rv.headers,
json.loads(rv.data.decode('utf-8')))
def get(self, url, headers={}):
return self.send(url, 'GET', headers=headers)
def post(self, url, data, headers={}):
return self.send(url, 'POST', data, headers=headers)
def put(self, url, data, headers={}):
return self.send(url, 'PUT', data, headers=headers)
def patch(self, url, data, headers={}):
return self.send(url, 'PATCH', data, headers=headers)
def delete(self, url, headers={}):
return self.send(url, 'DELETE', headers=headers)
```
#### File: ltd-keeper/tests/conftest.py
```python
import pytest
from app import create_app, db
from app.models import User, Permission
from app.testutils import TestClient
DEFAULT_USERNAME = 'hipster'
DEFAULT_PASSWORD = '<PASSWORD>'
# additional usernames with specific roles
PRODUCT_ADMIN_USERNAME = 'product_admin'
EDITION_ADMIN_USERNAME = 'edition_admin'
BUILD_UPLOADER_USERNAME = 'build_uploader'
BUILD_DEPRECATOR_USERNAME = 'build_deprecator'
@pytest.fixture
def empty_app(request):
"""An application with only a single user, but otherwise empty"""
app = create_app('testing')
ctx = app.app_context()
ctx.push()
db.drop_all()
db.create_all()
# Creates users with each of the permissions
u = User(username=DEFAULT_USERNAME,
permissions=Permission.full_permissions())
u.set_password(<PASSWORD>)
db.session.add(u)
u = User(username=PRODUCT_ADMIN_USERNAME,
permissions=Permission.ADMIN_PRODUCT)
u.set_password(<PASSWORD>)
db.session.add(u)
u = User(username=EDITION_ADMIN_USERNAME,
permissions=Permission.ADMIN_EDITION)
u.set_password(<PASSWORD>)
db.session.add(u)
u = User(username=BUILD_UPLOADER_USERNAME,
permissions=Permission.UPLOAD_BUILD)
u.set_password(<PASSWORD>)
db.session.add(u)
u = User(username=BUILD_DEPRECATOR_USERNAME,
permissions=Permission.DEPRECATE_BUILD)
u.set_password(<PASSWORD>)
db.session.add(u)
db.session.commit()
def fin():
db.session.remove()
db.drop_all()
ctx.pop()
request.addfinalizer(fin)
return app
@pytest.fixture
def basic_client(empty_app):
"""Client with username/password auth, using the `app` application."""
client = TestClient(empty_app, DEFAULT_USERNAME, DEFAULT_PASSWORD)
return client
@pytest.fixture
def client(empty_app):
"""Client with token-based auth, using the `app` application."""
_c = TestClient(empty_app, DEFAULT_USERNAME, DEFAULT_PASSWORD)
r = _c.get('/token')
client = TestClient(empty_app, r.json['token'])
return client
@pytest.fixture
def anon_client(empty_app):
"""Anonymous client."""
client = TestClient(empty_app, '', '')
return client
@pytest.fixture
def product_client(empty_app):
"""Client with token-based auth with ADMIN_PRODUCT permissions."""
_c = TestClient(empty_app, PRODUCT_ADMIN_USERNAME, DEFAULT_PASSWORD)
r = _c.get('/token')
client = TestClient(empty_app, r.json['token'])
return client
@pytest.fixture
def edition_client(empty_app):
"""Client with token-based auth with ADMIN_EDITION permissions."""
_c = TestClient(empty_app, EDITION_ADMIN_USERNAME, DEFAULT_PASSWORD)
r = _c.get('/token')
client = TestClient(empty_app, r.json['token'])
return client
@pytest.fixture
def upload_build_client(empty_app):
"""Client with token-based auth with UPLOAD_BUILD permissions."""
_c = TestClient(empty_app, BUILD_UPLOADER_USERNAME, DEFAULT_PASSWORD)
r = _c.get('/token')
client = TestClient(empty_app, r.json['token'])
return client
@pytest.fixture
def deprecate_build_client(empty_app):
"""Client with token-based auth with DEPRECATE_BUILD permissions."""
_c = TestClient(empty_app, BUILD_DEPRECATOR_USERNAME, DEFAULT_PASSWORD)
r = _c.get('/token')
client = TestClient(empty_app, r.json['token'])
return client
```
#### File: ltd-keeper/tests/test_editions.py
```python
import pytest
from werkzeug.exceptions import NotFound
from app.exceptions import ValidationError
def test_editions(client):
# Add a sample product
p = {'slug': 'pipelines',
'doc_repo': 'https://github.com/lsst/pipelines_docs.git',
'title': 'LSST Science Pipelines',
'root_domain': 'lsst.io',
'root_fastly_domain': 'global.ssl.fastly.net',
'bucket_name': 'bucket-name'}
r = client.post('/products/', p)
product_url = r.headers['Location']
assert r.status == 201
# Create builds
r = client.post('/products/pipelines/builds/',
{'git_refs': ['master']})
assert r.status == 201
b1_url = r.json['self_url']
client.patch(b1_url, {'uploaded': True})
r = client.post('/products/pipelines/builds/',
{'git_refs': ['master']})
assert r.status == 201
b2_url = r.json['self_url']
client.patch(b2_url, {'uploaded': True})
# Setup an edition
e1 = {'tracked_refs': ['master'],
'slug': 'latest',
'title': 'Latest',
'build_url': b1_url}
r = client.post(product_url + '/editions/', e1)
e1_url = r.headers['Location']
r = client.get(e1_url)
assert r.status == 200
assert r.json['tracked_refs'][0] == e1['tracked_refs'][0]
assert r.json['slug'] == e1['slug']
assert r.json['title'] == e1['title']
assert r.json['build_url'] == b1_url
assert r.json['date_created'] is not None
assert r.json['date_ended'] is None
assert r.json['published_url'] == 'https://pipelines.lsst.io/v/latest'
# Re-build the edition
r = client.patch(e1_url, {'build_url': b2_url})
assert r.status == 200
assert r.json['build_url'] == b2_url
# Change the title with PATCH
r = client.patch(e1_url, {'title': "Development version"})
assert r.status == 200
assert r.json['title'] == 'Development version'
# Change the tracked_refs with PATCH
r = client.patch(e1_url, {'tracked_refs': ['tickets/DM-9999', 'master']})
assert r.status == 200
assert r.json['tracked_refs'][0] == 'tickets/DM-9999'
assert r.json['tracked_refs'][1] == 'master'
# Deprecate the editon
r = client.delete(e1_url)
assert r.status == 200
r = client.get(e1_url)
assert r.status == 200
assert r.json['date_ended'] is not None
# Deprecated editions no longer in the editions list
r = client.get(product_url + '/editions/')
assert r.status == 200
assert len(r.json['editions']) == 1 # only default edition (main) remains
# Verify we can't make a second 'main' edition
with pytest.raises(ValidationError):
r = client.post('/products/pipelines/editions/',
{'slug': 'main',
'tracked_refs': ['master'],
'title': 'Main'})
# Authorizion tests: POST /products/<slug>/editions/ =========================
# Only the full admin client and the edition-authorized client should get in
def test_post_edition_auth_anon(anon_client):
r = anon_client.post('/products/test/editions/', {'foo': 'bar'})
assert r.status == 401
def test_post_edition_auth_product_client(product_client):
r = product_client.post('/products/test/editions/', {'foo': 'bar'})
assert r.status == 403
def test_post_edition_auth_edition_client(edition_client):
with pytest.raises(NotFound):
edition_client.post('/products/test/editions/', {'foo': 'bar'})
def test_post_edition_auth_builduploader_client(upload_build_client):
r = upload_build_client.post('/products/test/editions/', {'foo': 'bar'})
assert r.status == 403
def test_post_edition_auth_builddeprecator_client(deprecate_build_client):
r = deprecate_build_client.post('/products/test/editions/', {'foo': 'bar'})
assert r.status == 403
# Authorizion tests: PATCH /editions/<slug>/editions/ =========================
# Only the full admin client and the edition-authorized client should get in
def test_patch_edition_auth_anon(anon_client):
r = anon_client.patch('/editions/1', {'foo': 'bar'})
assert r.status == 401
def test_patch_edition_auth_product_client(product_client):
r = product_client.patch('/editions/1', {'foo': 'bar'})
assert r.status == 403
def test_patch_edition_auth_edition_client(edition_client):
with pytest.raises(NotFound):
edition_client.patch('/editions/1', {'foo': 'bar'})
def test_patch_edition_auth_builduploader_client(upload_build_client):
r = upload_build_client.patch('/editions/1', {'foo': 'bar'})
assert r.status == 403
def test_patch_edition_auth_builddeprecator_client(deprecate_build_client):
r = deprecate_build_client.patch('/editions/1', {'foo': 'bar'})
assert r.status == 403
# Authorizion tests: DELETE /editions/<slug> =================================
# Only the full admin client and the edition-authorized client should get in
def test_delete_edition_auth_anon(anon_client):
r = anon_client.delete('/editions/1')
assert r.status == 401
def test_delete_edition_auth_product_client(product_client):
r = product_client.delete('/editions/1')
assert r.status == 403
def test_delete_edition_auth_edition_client(edition_client):
with pytest.raises(NotFound):
edition_client.delete('/editions/1')
def test_delete_edition_auth_builduploader_client(upload_build_client):
r = upload_build_client.delete('/editions/1')
assert r.status == 403
def test_delete_edition_auth_builddeprecator_client(deprecate_build_client):
r = deprecate_build_client.delete('/editions/1')
assert r.status == 403
```
#### File: ltd-keeper/tests/test_utils.py
```python
import pytest
from app.exceptions import ValidationError
from app.utils import (auto_slugify_edition, validate_path_slug,
validate_product_slug)
@pytest.mark.parametrize(
'git_refs,expected',
[(['tickets/DM-1234'], 'DM-1234'),
(['tickets/LCR-758'], 'LCR-758'),
(['master'], 'master'),
(['u/rowen/r12_patch1'], 'u-rowen-r12-patch1'),
(['tickets/DM-1234', 'tickets/DM-5678'],
'tickets-DM-1234-tickets-DM-5678')])
def test_auto_slugify_edition(git_refs, expected):
assert expected == auto_slugify_edition(git_refs)
assert validate_path_slug(auto_slugify_edition(git_refs))
def test_validate_product_slug():
with pytest.raises(ValidationError):
validate_product_slug('DM-1234')
with pytest.raises(ValidationError):
validate_product_slug('DM_1234')
assert validate_product_slug('dm-1234') is True
``` |
{
"source": "jhockett/amplify-ci-support",
"score": 2
} |
#### File: android/stacks/maven_release_stack.py
```python
from aws_cdk import (
core,
aws_iam
)
from amplify_custom_resources import MavenPublisher
class MavenReleaseStack(core.Stack):
def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
required_props = ['github_source']
for prop in required_props:
if prop not in props:
raise RuntimeError(f"Parameter {prop} is required.")
codebuild_project_name_prefix = props['codebuild_project_name_prefix']
github_source = props['github_source']
owner = github_source['owner']
repo = github_source['repo']
base_branch = github_source['base_branch']
buildspec_path = props['buildspec_path'] if 'buildspec_path' in props else None
create_webhooks = props['create_webhooks']
policy = aws_iam.ManagedPolicy(self,
"SecretsAccessPolicy",
managed_policy_name=f"{codebuild_project_name_prefix}-SecretsAccessPolicy",
description="Policy used by the CodeBuild role to access secrets when uploading to Sonatype",
statements=[
aws_iam.PolicyStatement(
actions=["secretsmanager:GetSecretValue"],
effect=aws_iam.Effect.ALLOW,
resources=[
f"arn:aws:secretsmanager:{self.region}:{self.account}:secret:awsmobilesdk/android/signing*",
f"arn:aws:secretsmanager:{self.region}:{self.account}:secret:awsmobilesdk/android/sonatype*"
]
)
]
)
publisher = MavenPublisher(self, "ReleasePublisher", project_name=f"{codebuild_project_name_prefix}-ReleasePublisher",
github_owner=owner,
github_repo=repo,
base_branch=base_branch,
buildspec_path=buildspec_path,
create_webhooks=create_webhooks)
policy.attach_to_role(publisher.role)
```
#### File: cdk/scripts/auth.py
```python
from common import *
import json
import os
def get_auth_config():
is_update = True if get_category_config("auth") is not None else False
if(is_update):
auth_config_json_element_name = 'serviceModification'
user_pool_config_json_element_name = 'userPoolModification'
id_pool_config_json_element_name = 'identityPoolModification'
else:
auth_config_json_element_name = 'serviceConfiguration'
user_pool_config_json_element_name = 'userPoolConfiguration'
id_pool_config_json_element_name = 'identityPoolConfiguration'
auth_config = {
'version': 1,
'resourceName':'AndroidIntegTestAuth'
}
user_pool_config = {
'requiredSignupAttributes':['EMAIL', 'NAME', 'NICKNAME'],
'signinMethod':'USERNAME',
'userPoolGroups': [
{ 'groupName': 'Admins' },
{ 'groupName': 'Bloggers' },
{ 'groupName': 'Moderators' }
],
'writeAttributes': ['EMAIL', 'NAME', 'NICKNAME'],
'readAttributes':['EMAIL', 'NAME', 'NICKNAME'],
'refreshTokenPeriod': 365
}
id_pool_config = {
'unauthenticatedLogin': True,
'identityPoolName': 'androididpool'
}
auth_config[auth_config_json_element_name] = {
'serviceName': 'Cognito',
'includeIdentityPool': True
}
auth_config[auth_config_json_element_name][user_pool_config_json_element_name] = user_pool_config
auth_config[auth_config_json_element_name][id_pool_config_json_element_name] = id_pool_config
return auth_config
def config_auth(auth_config):
cmd = [AMPLIFY_COMMAND,
"add" if get_category_config('auth') is None else "update",
"auth",
"--headless"]
result = run_command(cmd, input=json.dumps(auth_config))
return result.returncode
```
#### File: cdk/stacks/amplify_deployer_stack.py
```python
import boto3
import base64
from botocore.exceptions import ClientError
from aws_cdk import (
aws_codebuild,
aws_iam,
core,
)
class AmplifyDeployer(core.Stack):
DEFAULT_GITHUB_OWNER = "aws-amplify"
DEFAULT_BRANCH = "refs/heads/main"
def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
build_file_path = 'src/integ_test_resources/android/amplify/integration/cdk/scripts/buildspec.yml'
github_repo = props['github_repo']
github_owner = self.DEFAULT_GITHUB_OWNER if 'github_owner' not in props else props['github_owner']
branch = self.DEFAULT_BRANCH if 'branch' not in props else props['branch']
build_environment = aws_codebuild.BuildEnvironment(build_image=aws_codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
privileged=True,
compute_type=aws_codebuild.ComputeType.SMALL)
project = aws_codebuild.Project(self,
props['project_name'],
source=aws_codebuild.Source.git_hub(owner=github_owner,
report_build_status=False,
repo=github_repo,
branch_or_ref=branch,
webhook=False), # Will need to setup creds to make this true
environment=build_environment,
build_spec=aws_codebuild.BuildSpec.from_source_filename(filename=build_file_path))
individual_actions = [
"amplify:Get*",
"amplify:List*",
"iam:CreateRole",
"iam:DeleteRole",
"iam:PutRolePolicy",
"iam:DeleteRolePolicy"
]
policy = aws_iam.ManagedPolicy(self,
"AmplifyDeployerLeastPrivilegePolicy",
managed_policy_name="AmplifyDeployerLeastPrivilegePolicy",
description="Policy used by the CodeBuild role that manages the creation of backend resources using the Amplify CLI",
# document=aws_iam.PolicyDocument(
statements=[
aws_iam.PolicyStatement(actions=individual_actions, effect=aws_iam.Effect.ALLOW, resources=["*"]),
]
# )
)
policy.attach_to_role(project.role)
project.role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess"))
project.role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name("AWSCloudFormationFullAccess"))
project.role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name('IAMReadOnlyAccess'))
project.role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name('AWSLambdaFullAccess'))
project.role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name('AWSAppSyncAdministrator'))
```
#### File: cdk/cdk_integration_tests_ios/cognito_idp_stack.py
```python
from aws_cdk import aws_iam, core
from common.common_stack import CommonStack
from common.region_aware_stack import RegionAwareStack
class CognitoIdpStack(RegionAwareStack):
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._supported_in_region = self.is_service_supported_in_region()
all_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW, actions=["cognito-idp:CreateUserPool"], resources=["*"]
)
common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
specified_resources_arn = "arn:aws:cognito-idp:{}:{}:userpool/*".format(
self.region, self.account
)
specified_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"cognito-idp:AdminConfirmSignUp",
"cognito-idp:AdminUpdateUserAttributes",
"cognito-idp:CreateUserPoolClient"
],
resources=[specified_resources_arn],
)
common_stack.add_to_common_role_policies(self, policy_to_add=specified_resources_policy)
```
#### File: cdk/cdk_integration_tests_ios/kms_stack.py
```python
from aws_cdk import aws_iam, core
from common.common_stack import CommonStack
from common.region_aware_stack import RegionAwareStack
class KmsStack(RegionAwareStack):
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._supported_in_region = self.is_service_supported_in_region()
all_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["kms:CreateKey"],
resources=["*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
alias_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["kms:CreateAlias"],
resources=[f"arn:aws:kms:{self.region}:{self.account}:alias*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=alias_policy)
key_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"kms:CancelKeyDeletion",
"kms:CreateAlias",
"kms:Decrypt",
"kms:DescribeKey",
"kms:DisableKeyRotation",
"kms:Encrypt",
"kms:ScheduleKeyDeletion",
],
resources=[f"arn:aws:kms:{self.region}:{self.account}:key*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=key_policy)
```
#### File: custom_resources/iot_custom_authorizer_user_pass_function/iot_custom_authorizer_user_pass.py
```python
import base64
import json
import os
def handler(event, __):
print(f"### handler: {event}")
token = event["token"].lower()
mqtt_user = event["protocolData"]["mqtt"]["username"]
expected_username = os.environ["custom_auth_user_pass_username"]
expected_password = os.environ["custom_auth_user_pass_password"]
password = event["protocolData"]["mqtt"]["password"]
base64_decoded = base64.b64decode(password).decode("utf-8")
passwordMatches = expected_password == base64_decoded
effect = (
"Allow"
if token == "allow" and mqtt_user.startswith(expected_username) and passwordMatches
else "Deny"
)
response = make_auth_response(effect)
response_string = json.dumps(response)
print(f"### returning response: {response_string}")
return response_string
def make_auth_response(effect):
resource_arn = os.environ.get("RESOURCE_ARN", "*")
response = {
"isAuthenticated": True,
"principalId": "somePrincipalId",
"disconnectAfterInSeconds": 3600,
"refreshAfterInSeconds": 600,
"policyDocuments": [
{
"Version": "2012-10-17",
"Statement": [
{
"Action": ["iot:Connect", "iot:Subscribe", "iot:Publish", "iot:Receive"],
"Effect": effect,
"Resource": resource_arn,
}
],
}
],
}
return response
```
#### File: cdk/credential_rotation/iam_construct.py
```python
from aws_cdk import aws_iam, core
class IAMConstruct(core.Construct):
def __init__(
self,
scope: core.Construct,
construct_id: str,
bucket_arn: str,
cloudfront_arn: str,
**kwargs
) -> None:
super().__init__(scope, construct_id, **kwargs)
self.create_circleci_release_user()
self.create_circleci_release_proceess_role(
bucket_arn=bucket_arn, cloudfront_arn=cloudfront_arn
)
self.create_lambda_execution_role()
def create_circleci_release_user(self) -> None:
self.circleci_user = aws_iam.User(
self, "circleci_iam_user", user_name="CircleCIReleaseProcessIAMUser"
)
def create_circleci_release_proceess_role(self, bucket_arn: str, cloudfront_arn: str) -> None:
self.circleci_release_role = aws_iam.Role(
self,
"circleci_release_role",
assumed_by=self.circleci_user,
role_name="CircleCIReleaseProcessRole",
max_session_duration=core.Duration.hours(4),
)
bucket_resource = bucket_arn + "/aws-sdk-ios/*"
bucket_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["s3:PutObject"],
resources=[bucket_resource],
)
self.circleci_release_role.add_to_policy(bucket_policy)
cloudfront_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["cloudfront:CreateInvalidation"],
resources=[cloudfront_arn],
)
self.circleci_release_role.add_to_policy(cloudfront_policy)
def create_lambda_execution_role(self) -> None:
self.lambda_role = aws_iam.Role(
self,
"lambda_key_rotation_execution_role",
assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
role_name="LambdaKeyRotationExecutionRole",
)
self.lambda_role.add_managed_policy(
aws_iam.ManagedPolicy.from_aws_managed_policy_name(
"service-role/AWSLambdaBasicExecutionRole"
)
)
lambda_role_rotate_keys_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["iam:CreateAccessKey", "iam:DeleteAccessKey"],
resources=[self.circleci_user.user_arn],
)
self.lambda_role.add_to_policy(lambda_role_rotate_keys_policy)
def add_policy_to_lambda_role(self, policy: aws_iam.PolicyStatement) -> None:
self.lambda_role.add_to_policy(policy)
```
#### File: src/destination/circleci.py
```python
import requests
from utils.retry import retry
from utils.secrets_manager_helper import retrieve_secret
CIRCLECI_URL_TEMPLATE = "https://circleci.com/api/v2/project/gh/{project_path}/envvar"
def update_environment_variables(variables: map, configuration: map):
"""Updates CircleCI environment variables
Args:
variables:
<list expected keys & values>
configuration:
<list expected keys & values>
Raises
KeyError: if `configuration` does not contain the expected keys
ValueError: if `configuration` is `None`, or if any required Lambda environment
variables are missing.
"""
if not configuration:
raise RuntimeError("Configuration is required to update CircleCI environment variables")
github_path = configuration["github_path"]
circleci_api_token_secret_id_lambda_env_var_key = configuration[
"circleci_api_token_secret_id_lambda_env_var_key"
]
circleci_api_token = retrieve_secret(circleci_api_token_secret_id_lambda_env_var_key)
for key, value in variables.items():
update_env_vars(
env_var_name=key,
env_var_value=value,
token=circleci_api_token,
project_path=github_path,
)
def get_secret_value(secret_id: str, *, secretsmanager) -> str:
response = secretsmanager.get_secret_value(SecretId=secret_id)
api_key = response["SecretString"]
return api_key
@retry()
def update_env_vars(env_var_name: str, env_var_value: str, token: str, project_path: str):
url = CIRCLECI_URL_TEMPLATE.format(project_path=project_path)
headers = {"Circle-Token": token}
payload = {"name": env_var_name, "value": env_var_value}
response = requests.post(url, json=payload, headers=headers)
if not is_successful_response(response):
safe_content = response.text.replace(env_var_value, "*" * len(env_var_value))
raise RuntimeError(
"Could not update env var "
+ f"key={env_var_name} "
+ f"status_code={response.status_code} "
+ f"body={safe_content}"
)
def is_successful_response(response):
return response.status_code == 200 or response.status_code == 201
```
#### File: lambda_functions/src/handler.py
```python
from destination import circleci
from models.destination_type import DestinationType
from models.source_type import SourceType
from source_data_generator import (
aws_session_credential_source,
lambda_env_var_data_source,
secrets_data_source,
)
def handler(event, context, *, iam=None, sts=None, secretsmanager=None):
"""
Invoked with the following event structure:
```
{
"sources": [
{
"type": "aws_session_cred",
"description": "Temporary AWS Credentials to upload the release artifacts to S3 and invalidate Cloudfront",
"configuration": {
"user_env_variable": "IAM_USERNAME",
"iam_role_env_variable": "IAM_ROLE"
},
"destination": {
"specifier": "aws-sdk-ios-cci",
"mapping_to_destination": [
{
"destination_key_name": "XCF_ACCESS_KEY_ID",
"result_value_key": "access_key_id"
},
{
"destination_key_name": "XCF_SECRET_ACCESS_KEY",
"result_value_key": "secret_access_key"
},
{
"destination_key_name": "XCF_SESSION_TOKEN",
"result_value_key": "session_token"
}
]
}
},
{
"type": "secrets_manager",
"description": "",
"configuration": {
"secret_key_env_variable": "GITHUB_CREDENTIALS_SECRET"
},
"destination": {
"specifier": "aws-sdk-ios-cci",
"mapping_to_destination": [
{
"destination_key_name": "GITHUB_SPM_TOKEN",
"result_value_key": "GITHUB_SPM_TOKEN"
},
{
"destination_key_name": "GITHUB_SPM_USER",
"result_value_key": "GITHUB_SPM_USER"
}
]
}
},
{
"type": "lambda_env_variables",
"description": "",
"configuration": {
"lambda_env_var_key": "SPM_S3_BUCKET_NAME"
},
"destination": {
"specifier": "aws-sdk-ios-cci",
"mapping_to_destination": [
{
"destination_key_name": "XCF_S3_BUCKET_NAME"
}
]
}
}
],
"destinations": {
"aws-sdk-ios-cci": {
"type": "cci_env_variable",
"description": "Circle CI environment variable for AWS SDK iOS repo",
"github_path": "aws-amplify/aws-sdk-ios",
"circleci_api_token_secret_id_lambda_env_var_key": "CIRCLE_CI_IOS_SDK_API_TOKEN"
}
}
}
```
"""
sources = event["sources"]
destinations = event["destinations"]
destination_values_map = {}
for source in sources:
source_type = source["type"]
destination_specifier = source["destination"]["specifier"]
destination_mapping = source["destination"]["mapping_to_destination"]
configuration = source["configuration"]
source_map = {}
if source_type == SourceType.AWS_SESSION_CREDENTIALS:
source_map = aws_session_credential_source.generate_session_credentials(configuration)
elif source_type == SourceType.SECRETS_MANAGER:
source_map = secrets_data_source.retrieve_secrets(configuration)
elif source_type == SourceType.LAMBDA_ENVIRONMENT_VARIABLE:
source_map = lambda_env_var_data_source.retrieve_lambda_env_var_value(configuration)
mapped_result = {}
for item in destination_mapping:
destination_key_name = item["destination_key_name"]
result_value_key = item.get("result_value_key", "result")
if result_value_key in source_map:
mapped_result[destination_key_name] = source_map[result_value_key]
destination_values_map.setdefault(destination_specifier, {}).update(mapped_result)
for name, destination_configuration in destinations.items():
destination_type = destination_configuration["type"]
mapped_result = destination_values_map.get(name, {})
if destination_type == DestinationType.CIRCLECI_ENVIRONMENT_VARIABLE:
circleci.update_environment_variables(mapped_result, destination_configuration)
```
#### File: src/source_data_generator/secrets_data_source.py
```python
import json
from typing import Dict
from utils.secrets_manager_helper import retrieve_secret
def retrieve_secrets(configuration: map) -> Dict[str, str]:
if not configuration:
raise RuntimeError("Configuration is required to retrieve secrets")
secret_key_env_variable = configuration["secret_key_env_variable"]
secret_value = retrieve_secret(secret_key_env_variable)
try:
json_value = json.loads(secret_value)
return json_value
except (json.decoder.JSONDecodeError):
return {"result": secret_value}
```
#### File: lambda_functions/test/test_circleci.py
```python
import unittest
from unittest.mock import Mock, call, patch
from src.destination import circleci
access_key_id = "AKIAIOSFODNN7EXAMPLE"
secret_access_key = "<KEY>"
session_token = (
"<KEY>"
+ "<KEY>"
+ "<KEY>"
+ "<KEY>"
+ "<KEY>"
+ "<KEY>"
+ "<KEY>
)
class TestCircleCIDestination(unittest.TestCase):
def test_generate_credential_with_null_variables(self):
with self.assertRaises(RuntimeError):
circleci.update_environment_variables(variables=None, configuration=None)
def test_generate_credential_with_null_configuration(self):
with self.assertRaises(RuntimeError):
circleci.update_environment_variables(
variables=self.mock_variables(), configuration=None
)
@patch("src.destination.circleci.requests.post")
@patch("src.destination.circleci.retrieve_secret")
def test_updates_variables(self, mock_retrieve_secret, post):
post.return_value = Mock()
post.return_value.status_code = 200
mock_retrieve_secret.return_value = "SEKRET!"
circleci.update_environment_variables(
variables=self.mock_variables(), configuration=self.mock_configuration()
)
url = "https://circleci.com/api/v2/project/gh/aws-amplify/aws-sdk-ios/envvar"
header = {"Circle-Token": "<PASSWORD>!"}
values = {
"XCF_ACCESS_KEY_ID": access_key_id,
"XCF_SECRET_ACCESS_KEY": secret_access_key,
"XCF_SESSION_TOKEN": session_token,
}
for i, (key, value) in enumerate(values.items()):
expected_json = {"name": key, "value": value}
expected = call(url, json=expected_json, headers=header)
self.assertEqual(post.mock_calls[i], expected)
def mock_variables(self):
return {
"XCF_ACCESS_KEY_ID": access_key_id,
"XCF_SECRET_ACCESS_KEY": secret_access_key,
"XCF_SESSION_TOKEN": session_token,
}
def mock_configuration(self):
return {
"type": "cci_env_variable",
"description": "Circle CI environment variable for AWS SDK iOS repo",
"github_path": "aws-amplify/aws-sdk-ios",
"circleci_api_token_secret_id_lambda_env_var_key": "CIRCLE_CI_IOS_SDK_API_TOKEN",
}
if __name__ == "__main__":
unittest.main()
```
#### File: lambda_functions/test/test_secrets_data_source.py
```python
import unittest
from unittest.mock import patch
from src.source_data_generator import secrets_data_source
class TestSecretsDataSource(unittest.TestCase):
def test_null_environment_value(self):
with self.assertRaises(RuntimeError):
secrets_data_source.retrieve_secrets(configuration=None)
@patch("src.source_data_generator.secrets_data_source.retrieve_secret")
def test_valid_result(self, mock_retrieve_secret):
mock_retrieve_secret.return_value = "SEKRET!"
configuration = {"secret_key_env_variable": "secret_key"}
result = secrets_data_source.retrieve_secrets(configuration)
self.assertIsNotNone(result)
@patch("src.source_data_generator.secrets_data_source.retrieve_secret")
def test_valid_result_string(self, mock_retrieve_secret):
mock_retrieve_secret.return_value = "SEKRET!"
configuration = {"secret_key_env_variable": "secret_key"}
result = secrets_data_source.retrieve_secrets(configuration)
self.assertTrue(isinstance(result, dict))
secret_value = result["result"]
self.assertEqual(secret_value, "SEKRET!")
@patch("src.source_data_generator.secrets_data_source.retrieve_secret")
def test_valid_result_json(self, mock_retrieve_secret):
mock_retrieve_secret.return_value = """{"GITHUB_SPM_RELEASE_USER": "user",
"GITHUB_SPM_RELEASE_TOKEN": "token"}
"""
configuration = {"secret_key_env_variable": "secret_key"}
result = secrets_data_source.retrieve_secrets(configuration)
self.assertTrue(isinstance(result, dict))
secret_value = result["GITHUB_SPM_RELEASE_USER"]
secret_token = result["GITHUB_SPM_RELEASE_TOKEN"]
self.assertEqual(secret_value, "user")
self.assertEqual(secret_token, "token")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jhockx/data-pipeline",
"score": 3
} |
#### File: data-pipeline/adcpipeline/pipeline.py
```python
import logging
import os
import re
from abc import ABC
from typing import Callable, Dict, List, Iterator, Optional
import pandas as pd
import yaml
from pandas import DataFrame
logger = logging.getLogger(__name__)
class PipelineBase(ABC):
_methods_settings: List[Dict] = []
_method_list: List[Callable] = []
def __get_lambda_method(self, setting: Dict) -> Callable:
"""
Args:
setting: The key is the name of the method, the value is a dict containing the argument names and
corresponding values.
Returns:
A lambda containing a method with method parameters to be called when the lambda is called.
"""
method_name, method_params = list(setting.items())[0]
# Consistency checks
if len(setting.items()) > 1:
raise ValueError('There should be only one dict available for setting')
if not isinstance(method_name, str):
raise TypeError('The method name should be a string')
if method_params is not None:
for key in method_params.keys():
if not isinstance(key, str):
raise TypeError('Argument names for methods should be strings')
# Get lambda
method = getattr(self, method_name)
if method_params is None:
return lambda: method()
else:
return lambda: method(**method_params)
@property
def method_settings(self) -> List[Dict]:
"""
Returns:
A list of dicts containing the methods and corresponding arguments that will be called (in order) when
the pipeline is run. Format: [{<method_name>: {<argument_name>: <argument_value>}}, ...]
"""
return self._methods_settings
@method_settings.setter
def method_settings(self, methods_settings: List[Dict]) -> None:
"""
This method saves the list of dicts as property and converts all methods and corresponding arguments to callable
lambdas. These lambdas are saved in the property 'method_list'.
Args:
methods_settings: A list of dicts containing the methods and corresponding arguments that will be called
(in order) when the pipeline is run. Format: [{<method_name>: {<argument_name>: <argument_value>}}, ...]
"""
self._methods_settings = methods_settings
self._method_list = []
for setting in methods_settings:
self._method_list.append(self.__get_lambda_method(setting))
@property
def method_list(self) -> List[Callable]:
"""
Returns:
A list of callable lambdas, as defined by the property 'method_settings'. These are called in order by the
when the pipeline is run.
"""
return self._method_list
def __init__(self, df: Optional[DataFrame], method_settings: List, filename: str = None) -> None:
self.df = df
self.method_settings = method_settings
self.step = 0
self.path = None
if filename is None:
return
if len(re.split(r'/\\', filename)) == 1: # Filename is a path with directory
if filename is not None and not os.path.exists('cache'):
os.makedirs('cache')
self.path = os.path.join('cache/', f"{filename}")
self.path = os.path.join(f"{self.path}.pkl")
@classmethod
def from_yaml_file(cls, df: DataFrame, path: str, filename: str = None):
"""
This is a factory method to instantiate this class by loading the settings from a yaml file.
Format of yaml file should be:
pipeline:
- <method_name>: {<argument_name>: <argument_value>}
- <method_name>: {<argument_name>: <argument_value>}
- ...
Args:
df: This is your data in a DataFrame format.
path: Path to yaml file.
filename: Path/filename for caching
Returns:
Instance of this class.
"""
with open(file=path, mode='r') as f:
settings = yaml.safe_load(f.read())['pipeline']
return cls(df=df, method_settings=settings, filename=filename)
def __call__(self) -> None:
self.run()
def __repr__(self) -> str:
return str(self.method_settings)
def __getitem__(self, i: int) -> Dict:
return self.method_settings[i]
def __setitem__(self, i: int, setting: Dict) -> None:
self.method_settings[i] = setting
self._method_list[i] = self.__get_lambda_method(setting)
def __delitem__(self, i: int) -> None:
del self.method_settings[i]
del self.method_list[i]
def __len__(self) -> int:
return len(self.method_settings)
def __iter__(self) -> Iterator[Dict]:
return iter(self.method_settings)
def __reversed__(self) -> List[Dict]:
return list(reversed(self.method_settings))
def insert(self, index: int, setting: Dict) -> None:
self.method_settings.insert(index, setting)
self.method_list.insert(index, self.__get_lambda_method(setting))
def run(self) -> None:
"""
This will call all the lambdas (in order) saved in the 'method_list'. These methods can be set with the
property 'method_settings'.
"""
logger.info(f'Running pipeline using the following settings: {self.method_settings}')
for method in self.method_list:
method()
if self.path is not None and self.df is not None:
self.df.to_pickle(self.path)
def run_or_load(self, load_cache_from_step: int = None) -> None:
"""
Instead of running the pipeline, load the result from the pipeline from a cache file. Useful functionality
if you have not modified the pipeline, but want to use the output. Beware that if you change the pipeline, you
should re-run the pipeline first or remove the cache. Otherwise this method will load incorrect results.
Alternatively you can provide the from_step parameter to load the first N steps from cache and only execute
the steps afterwards
Args:
load_cache_from_step (Optional): Determine from which step onwards you want to load the cache and continue with
executing the pipeline. Each method is accounted as a single step
"""
if self.path is None:
raise ValueError("Mode not possible without a valid filename attribute in class initiation")
if load_cache_from_step is not None:
assert isinstance(load_cache_from_step, int), "Please provide an int as from_step parameter"
step_path = f"{self.path.rstrip('.pkl')}_step{load_cache_from_step}.pkl"
if os.path.isfile(step_path):
self.df = pd.read_pickle(step_path)
for method in self.method_list[load_cache_from_step:]:
method()
else:
for idx, method in enumerate(self.method_list):
if idx == load_cache_from_step and self.df is not None:
self.df.to_pickle(step_path)
method()
else:
if os.path.isfile(self.path):
self.df = pd.read_pickle(self.path)
else:
self.run()
``` |
{
"source": "jhockx/home-assistant-plots",
"score": 3
} |
#### File: energy_plots/src/utils.py
```python
from datetime import date
import pandas as pd
class NoInfluxDataError(Exception):
pass
def get_df_current_month(client, entity, unit, first_day_of_the_month, last_day_of_the_month):
# Get daily data
result = client.query(f"SELECT entity_id, value FROM homeassistant.infinite.{unit} WHERE entity_id = '{entity}' "
f"AND time >= '{first_day_of_the_month.strftime('%Y-%m-%dT%H:%M:%SZ')}'")
if not result:
raise NoInfluxDataError
df = result[unit]
df = df.sort_index().resample('D').max()
# Filter data this month
df = df[df.index.month == first_day_of_the_month.month]
# Add empty value on the last day of the month for the plot if it doesn't exist
if df.empty is False and df.index[-1] != last_day_of_the_month:
df = df.append(pd.DataFrame(index=[last_day_of_the_month], data=[[entity, 0]], columns=['entity_id', 'value']),
sort=True)
return df
def get_df_current_year(client, entity, unit, now):
# Get daily data
result = client.query(f"SELECT entity_id, value FROM homeassistant.infinite.{unit} WHERE entity_id = '{entity}' "
f"AND time >= '{now.strftime('%Y-%m-%dT%H:%M:%SZ')}' - 365d")
if not result:
raise NoInfluxDataError
df = result[unit]
df = df.sort_index().resample('D').max()
df = df.sort_index().resample('M', kind='period').sum().to_timestamp() # returns first day on each month
# Filter data this year
beginning_of_current_year = pd.to_datetime(date(now.year, 1, 1))
end_of_current_year = pd.to_datetime(date(now.year, 12, 1))
df = df[df.index.year == now.year]
df['entity_id'] = entity
# Add empty value on the first and last day of the year for the plot if it doesn't exist
if df.empty is False and df.index[0] != beginning_of_current_year:
df = df.append(pd.DataFrame(index=[beginning_of_current_year], data=[[entity, 0]],
columns=['entity_id', 'value']), sort=True)
if df.empty is False and df.index[-1] != end_of_current_year:
df = df.append(pd.DataFrame(index=[end_of_current_year], data=[[entity, 0]],
columns=['entity_id', 'value']), sort=True)
df = df.sort_index()
return df
``` |
{
"source": "jhockx/python-snippets",
"score": 4
} |
#### File: jhockx/python-snippets/python_snippets.py
```python
from collections import OrderedDict
def list_diff(list1, list2) -> list:
"""
Get the difference between two lists
:arg list1: First list for the difference
:arg list2: Second list for the difference
"""
return list(list(set(list1) - set(list2)) + list(set(list2) - set(list1)))
def list_drop_duplicates(li: list, keep: str = 'first') -> list:
"""
Drop duplicates from a (ordered) list
:param li: List to drop duplicates from
:param keep: Keep first or last occurrence of the unique items
"""
if keep == 'first':
return list(OrderedDict((x, True) for x in li).keys())
elif keep == 'last':
li.reverse()
li = list(OrderedDict((x, True) for x in li).keys())
li.reverse()
return li
else:
raise ValueError(f'Cannot parse {keep} as argument for keep. This should be either "first" or "last"')
``` |
{
"source": "jhod0/cluster_toolkit",
"score": 3
} |
#### File: cluster_toolkit/cluster_toolkit/bias.py
```python
import cluster_toolkit
from cluster_toolkit import _ArrayWrapper
import numpy as np
# from .peak_height import *
def bias_at_M(M, k, P, Omega_m, delta=200):
"""Tinker et al. 2010 bais at mass M [Msun/h].
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Matter density fraction.
delta (int; optional): Overdensity, default is 200.
Returns:
float or array like: Halo bias.
"""
M = _ArrayWrapper(M, 'M')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
if k.shape != P.shape:
raise ValueError('k and P must have the same shape')
bias = _ArrayWrapper.zeros_like(M)
cluster_toolkit._lib.bias_at_M_arr(M.cast(), len(M), delta,
k.cast(), P.cast(), len(k),
Omega_m, bias.cast())
return bias.finish()
def bias_at_R(R, k, P, delta=200):
"""Tinker 2010 bais at mass M [Msun/h] corresponding to radius R [Mpc/h comoving].
Args:
R (float or array like): Lagrangian radius in Mpc/h comoving.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
delta (int; optional): Overdensity, default is 200.
Returns:
float or array like: Halo bias.
"""
R = _ArrayWrapper(R, 'R')
k = _ArrayWrapper(k)
P = _ArrayWrapper(P)
bias = _ArrayWrapper.zeros_like(R)
cluster_toolkit._lib.bias_at_R_arr(R.cast(), len(R), delta,
k.cast(), P.cast(), len(k),
bias.cast())
return bias.finish()
def bias_at_nu(nu, delta=200):
"""Tinker 2010 bais at peak height nu.
Args:
nu (float or array like): Peak height.
delta (int; optional): Overdensity, default is 200.
Returns:
float or array like: Halo bias.
"""
nu = _ArrayWrapper(nu, 'nu')
bias = _ArrayWrapper.zeros_like(nu)
cluster_toolkit._lib.bias_at_nu_arr(nu.cast(), len(nu), delta,
bias.cast())
return bias.finish()
def dbiasdM_at_M(M, k, P, Omega_m, delta=200):
"""d/dM of Tinker et al. 2010 bais at mass M [Msun/h].
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Matter density fraction.
delta (int; optional): Overdensity, default is 200.
Returns:
float or array like: Derivative of the halo bias.
"""
M = _ArrayWrapper(M, 'M')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
deriv = _ArrayWrapper.zeros_like(M)
cluster_toolkit._lib.dbiasdM_at_M_arr(M.cast(), len(M), delta, k.cast(),
P.cast(), len(k), Omega_m,
deriv.cast())
return deriv.finish()
def _bias_at_nu_FREEPARAMS(nu, A, a, B, b, C, c, delta=200):
"""A special function used only for quickly computing best fit parameters
for the halo bias models.
"""
nu = _ArrayWrapper(nu, allow_multidim=True)
bias = _ArrayWrapper.zeros_like(nu)
cluster_toolkit._lib.bias_at_nu_arr_FREEPARAMS(nu.cast(), len(nu), delta,
A, a, B, b, C, c,
bias.cast())
return bias.finish()
```
#### File: cluster_toolkit/cluster_toolkit/massfunction.py
```python
import cluster_toolkit
from cluster_toolkit import _ArrayWrapper, _handle_gsl_error
import numpy as np
def dndM_at_M(M, k, P, Omega_m, d=1.97, e=1.0, f=0.51, g=1.228):
"""Tinker et al. 2008 appendix C mass function at a given mass.
Default behavior is for :math:`M_{200m}` mass definition.
NOTE: by default, this function is only valid at :math:`z=0`. For use
at higher redshifts either recompute the parameters yourself, or
wait for this behavior to be patched.
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of the matter power spectrum in h/Mpc comoving.
P_lin (array like): Linear matter power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Matter density fraction.
d (float; optional): First Tinker parameter. Default is 1.97.
e (float; optional): Second Tinker parameter. Default is 1.
f (float; optional): Third Tinker parameter. Default is 0.51.
g (float; optional): Fourth Tinker parameter. Default is 1.228.
Returns:
float or array like: Mass function :math:`dn/dM`.
"""
M = _ArrayWrapper(M, 'M')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
dndM = _ArrayWrapper.zeros_like(M)
cluster_toolkit._lib.dndM_at_M_arr(M.cast(), len(M), k.cast(),
P.cast(), len(k), Omega_m,
d, e, f, g, dndM.cast())
return dndM.finish()
def G_at_M(M, k, P, Omega_m, d=1.97, e=1.0, f=0.51, g=1.228):
"""Tinker et al. 2008 appendix C multiplicity funciton G(M) as
a function of mass. Default behavior is for :math:`M_{200m}` mass
definition.
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of the matter power spectrum in h/Mpc comoving.
P_lin (array like): Linear matter power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Matter density fraction.
d (float; optional): First Tinker parameter. Default is 1.97.
e (float; optional): Second Tinker parameter. Default is 1.
f (float; optional): Third Tinker parameter. Default is 0.51.
g (float; optional): Fourth Tinker parameter. Default is 1.228.
Returns:
float or array like: Halo multiplicity :math:`G(M)`.
"""
M = _ArrayWrapper(M, 'M')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
G = _ArrayWrapper.zeros_like(M)
cluster_toolkit._lib.G_at_M_arr(M.cast(), len(M),
k.cast(), P.cast(), len(k),
Omega_m, d, e, f, g, G.cast())
return G.finish()
def G_at_sigma(sigma, d=1.97, e=1.0, f=0.51, g=1.228):
"""Tinker et al. 2008 appendix C multiplicity funciton G(sigma) as
a function of sigma.
NOTE: by default, this function is only valid at :math:`z=0`. For use
at higher redshifts either recompute the parameters yourself, or
wait for this behavior to be patched.
Args:
sigma (float or array like): RMS variance of the matter density field.
d (float; optional): First Tinker parameter. Default is 1.97.
e (float; optional): Second Tinker parameter. Default is 1.
f (float; optional): Third Tinker parameter. Default is 0.51.
g (float; optional): Fourth Tinker parameter. Default is 1.228.
Returns:
float or array like: Halo multiplicity G(sigma).
"""
sigma = _ArrayWrapper(sigma, 'sigma')
G = _ArrayWrapper.zeros_like(sigma)
cluster_toolkit._lib.G_at_sigma_arr(sigma.cast(), len(sigma),
d, e, f, g, G.cast())
return G.finish()
def n_in_bins(edges, Marr, dndM):
"""Tinker et al. 2008 appendix C binned mass function.
Args:
edges (array like): Edges of the mass bins.
Marr (array like): Array of locations that dndM has been evaluated at.
dndM (array like): Array of dndM.
Returns:
numpy.ndarray: number density of halos in the mass bins. Length is :code:`len(edges)-1`.
"""
edges = _ArrayWrapper(edges, 'edges')
n = _ArrayWrapper.zeros(len(edges)-1)
Marr = _ArrayWrapper(Marr, 'Marr')
dndM = _ArrayWrapper(dndM, 'dndM')
rc = cluster_toolkit._lib.n_in_bins(edges.cast(), len(edges),
Marr.cast(), dndM.cast(), len(Marr),
n.cast())
_handle_gsl_error(rc, n_in_bins)
return n.finish()
def n_in_bin(Mlo, Mhi, Marr, dndM):
"""Tinker et al. 2008 appendix C binned mass function.
Args:
Mlo (float): Lower mass edge.
Mhi (float): Upper mass edge.
Marr (array like): Array of locations that dndM has been evaluated at.
dndM (array like): Array of dndM.
Returns:
float: number density of halos in the mass bin.
"""
return np.squeeze(n_in_bins([Mlo, Mhi], Marr, dndM))
def _dndM_sigma2_precomputed(M, sigma2, dsigma2dM, Omega_m, d=1.97, e=1.0, f=0.51, g=1.228):
M = _ArrayWrapper(M, allow_multidim=True)
sigma2 = _ArrayWrapper(sigma2, allow_multidim=True)
dsigma2dM = _ArrayWrapper(dsigma2dM, allow_multidim=True)
dndM = _ArrayWrapper.zeros_like(M)
cluster_toolkit._lib.dndM_sigma2_precomputed(M.cast(), sigma2.cast(),
dsigma2dM.cast(), len(M),
Omega_m, d, e, f, g,
dndM.cast())
return dndM.finish()
```
#### File: cluster_toolkit/cluster_toolkit/peak_height.py
```python
import cluster_toolkit
from cluster_toolkit import _ArrayWrapper, _handle_gsl_error
import numpy as np
def sigma2_at_M(M, k, P, Omega_m):
"""RMS variance in top hat sphere of lagrangian radius R [Mpc/h comoving] corresponding to a mass M [Msun/h] of linear power spectrum.
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Omega_matter, matter density fraction.
Returns:
float or array like: RMS variance of top hat sphere.
"""
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
if isinstance(M, list) or isinstance(M, np.ndarray):
M = _ArrayWrapper(M, allow_multidim=True)
s2 = _ArrayWrapper.zeros_like(M)
rc = cluster_toolkit._lib.sigma2_at_M_arr(M.cast(), len(M), k.cast(), P.cast(), len(k), Omega_m, s2.cast())
_handle_gsl_error(rc, sigma2_at_M)
return s2.finish()
else:
return cluster_toolkit._lib.sigma2_at_M(M, k.cast(), P.cast(), len(k), Omega_m)
def sigma2_at_R(R, k, P):
"""RMS variance in top hat sphere of radius R [Mpc/h comoving] of linear power spectrum.
Args:
R (float or array like): Radius in Mpc/h comoving.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Returns:
float or array like: RMS variance of a top hat sphere.
"""
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
if isinstance(R, list) or isinstance(R, np.ndarray):
R = _ArrayWrapper(R)
s2 = _ArrayWrapper.zeros_like(R)
rc = cluster_toolkit._lib.sigma2_at_R_arr(R.cast(), len(R), k.cast(), P.cast(), len(k), s2.cast())
_handle_gsl_error(rc, sigma2_at_R)
return s2.finish()
else:
return cluster_toolkit._lib.sigma2_at_R(R, k.cast(), P.cast(), len(k))
def nu_at_M(M, k, P, Omega_m):
"""Peak height of top hat sphere of lagrangian radius R [Mpc/h comoving] corresponding to a mass M [Msun/h] of linear power spectrum.
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Omega_matter, matter density fraction.
Returns:
nu (float or array like): Peak height.
"""
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
if isinstance(M, list) or isinstance(M, np.ndarray):
M = _ArrayWrapper(M)
nu = _ArrayWrapper.zeros_like(M)
cluster_toolkit._lib.nu_at_M_arr(M.cast(), len(M), k.cast(), P.cast(), len(k), Omega_m, nu.cast())
return nu.finish()
else:
return cluster_toolkit._lib.nu_at_M(M, k.cast(), P.cast(), len(k), Omega_m)
def nu_at_R(R, k, P):
"""Peak height of top hat sphere of radius R [Mpc/h comoving] of linear power spectrum.
Args:
R (float or array like): Radius in Mpc/h comoving.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Returns:
float or array like: Peak height.
"""
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
if isinstance(R, list) or isinstance(R, np.ndarray):
R = _ArrayWrapper(R)
nu = _ArrayWrapper.zeros_like(R)
cluster_toolkit._lib.nu_at_R_arr(R.cast(), len(R), k.cast(), P.cast(), len(k), nu.cast())
return nu.finish()
else:
return cluster_toolkit._lib.nu_at_R(R, k.cast(), P.cast(), len(k))
def dsigma2dM_at_M(M, k, P, Omega_m):
"""Derivative w.r.t. mass of RMS variance in top hat sphere of
lagrangian radius R [Mpc/h comoving] corresponding to a mass
M [Msun/h] of linear power spectrum.
Args:
M (float or array like): Mass in Msun/h.
k (array like): Wavenumbers of power spectrum in h/Mpc comoving.
P (array like): Power spectrum in (Mpc/h)^3 comoving.
Omega_m (float): Omega_matter, matter density fraction.
Returns:
float or array like: d/dM of RMS variance of top hat sphere.
"""
P = _ArrayWrapper(P, allow_multidim=True)
k = _ArrayWrapper(k, allow_multidim=True)
if isinstance(M, list) or isinstance(M, np.ndarray):
M = _ArrayWrapper(M, allow_multidim=True)
ds2dM = _ArrayWrapper.zeros_like(M)
rc = cluster_toolkit._lib.dsigma2dM_at_M_arr(M.cast(), len(M), k.cast(),
P.cast(), len(k), Omega_m,
ds2dM.cast())
_handle_gsl_error(rc, dsigma2dM_at_M)
return ds2dM.finish()
else:
return cluster_toolkit._lib.dsigma2dM_at_M(M, k.cast(), P.cast(),
len(k), Omega_m)
def _calc_sigma2_at_R(R, k, P, s2):
"""Direct call to vectorized version of RMS variance in top hat
sphere of radius R [Mpc/h comoving] of linear power spectrum.
"""
R = _ArrayWrapper(R, allow_multidim=True)
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
s2 = _ArrayWrapper(s2, allow_multidim=True)
cluster_toolkit._lib.sigma2_at_R_arr(R.cast(), len(R), k.cast(), P.cast(), len(k), s2.cast())
def _calc_sigma2_at_M(M, k, P, Omega_m, s2):
"""Direct call to vectorized version of RMS variance in top hat sphere of lagrangian radius R [Mpc/h comoving] corresponding to a mass M [Msun/h] of linear power spectrum.
"""
M = _ArrayWrapper(M, allow_multidim=True)
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
s2 = _ArrayWrapper(s2, allow_multidim=True)
rc = cluster_toolkit._lib.sigma2_at_M_arr(M.cast(), len(M), k.cast(), P.cast(), len(k), Omega_m, s2.cast())
_handle_gsl_error(rc, _calc_sigma2_at_M)
def _calc_nu_at_R(R, k, P, nu):
"""Direct call to vectorized version of peak height of R.
"""
R = _ArrayWrapper(R, allow_multidim=True)
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
nu = _ArrayWrapper(nu, allow_multidim=True)
rc = cluster_toolkit._lib.nu_at_R_arr(R.cast(), len(R), k.cast(), P.cast(), len(k), nu.cast())
_handle_gsl_error(rc, _calc_nu_at_R)
def _calc_nu_at_M(M, k, P, Omega_m, nu):
"""Direct call to vectorized version of peak height of M.
"""
M = _ArrayWrapper(M, allow_multidim=True)
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
nu = _ArrayWrapper(nu, allow_multidim=True)
rc = cluster_toolkit._lib.nu_at_M_arr(M.cast(), len(M), k.cast(), P.cast(), len(k), Omega_m, nu.cast())
_handle_gsl_error(rc, _calc_nu_at_M)
```
#### File: cluster_toolkit/cluster_toolkit/xi.py
```python
import cluster_toolkit
from cluster_toolkit import _ArrayWrapper, _handle_gsl_error
import numpy as np
def xi_nfw_at_r(r, M, c, Omega_m, delta=200):
"""NFW halo profile correlation function.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h
c (float): Concentration
Omega_m (float): Omega_matter, matter fraction of the density
delta (int; optional): Overdensity, default is 200
Returns:
float or array like: NFW halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_nfw(r.cast(), len(r), M, c, delta,
Omega_m, xi.cast())
return xi.finish()
def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.):
"""Einasto halo profile.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h; not used if rhos is specified
conc (float): Concentration
alpha (float): Profile exponent
om (float): Omega_matter, matter fraction of the density
delta (int): Overdensity, default is 200
rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional
Returns:
float or array like: Einasto halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos,
conc, alpha, delta, om, xi.cast())
return xi.finish()
def xi_mm_at_r(r, k, P, N=500, step=0.005, exact=False):
"""Matter-matter correlation function.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
k (array like): Wavenumbers of power spectrum in h/Mpc comoving
P (array like): Matter power spectrum in (Mpc/h)^3 comoving
N (int; optional): Quadrature step count, default is 500
step (float; optional): Quadrature step size, default is 5e-3
exact (boolean): Use the slow, exact calculation; default is False
Returns:
float or array like: Matter-matter correlation function
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(r)
if not exact:
rc = cluster_toolkit._lib.calc_xi_mm(r.cast(), len(r), k.cast(),
P.cast(), len(k), xi.cast(),
N, step)
_handle_gsl_error(rc, xi_mm_at_r)
else:
if r.arr.max() > 1e3:
raise Exception("max(r) cannot be >1e3 for numerical stability.")
rc = cluster_toolkit._lib.calc_xi_mm_exact(r.cast(), len(r),
k.cast(), P.cast(),
len(k), xi.cast())
_handle_gsl_error(rc, xi_mm_at_r)
return xi.finish()
def xi_2halo(bias, xi_mm):
"""2-halo term in halo-matter correlation function
Args:
bias (float): Halo bias
xi_mm (float or array like): Matter-matter correlation function
Returns:
float or array like: 2-halo term in halo-matter correlation function
"""
xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(xi_mm)
cluster_toolkit._lib.calc_xi_2halo(len(xi_mm), bias, xi_mm.cast(),
xi.cast())
return xi.finish()
def xi_hm(xi_1halo, xi_2halo, combination="max"):
"""Halo-matter correlation function
Note: at the moment you can combine the 1-halo and 2-halo terms by either taking the max of the two or the sum of the two. The 'combination' field must be set to either 'max' (default) or 'sum'.
Args:
xi_1halo (float or array like): 1-halo term
xi_2halo (float or array like, same size as xi_1halo): 2-halo term
combination (string; optional): specifies how the 1-halo and 2-halo terms are combined, default is 'max' which takes the max of the two
Returns:
float or array like: Halo-matter correlation function
"""
if combination == "max":
switch = 0
elif combination == 'sum':
switch = 1
else:
raise Exception("Combinations other than maximum not implemented yet")
xi_1halo = _ArrayWrapper(xi_1halo, allow_multidim=True)
xi_2halo = _ArrayWrapper(xi_2halo, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(xi_1halo)
cluster_toolkit._lib.calc_xi_hm(len(xi_1halo), xi_1halo.cast(),
xi_2halo.cast(), xi.cast(), switch)
return xi.finish()
def xi_DK(r, M, conc, be, se, k, P, om, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, xi.cast())
return xi.finish()
def xi_DK_appendix1(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile, first form from the appendix, eq. A3.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
bias (float): halo bias
xi_mm (float or array like): matter correlation function at r
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True)
xi = np.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK_app1(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast())
return xi.finish()
def xi_DK_appendix2(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile, second form from the appendix, eq. A4.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
bias (float): halo bias
xi_mm (float or array like): matter correlation function at r
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k)
P = _ArrayWrapper(P)
xi_mm = _ArrayWrapper(xi_mm)
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK_app2(r.cast(), len(r), M, rhos, conc, be,
se, alpha, beta, gamma, delta,
k.cast(), P.cast(), len(k), om, bias,
xi_mm.cast(), xi.cast())
return xi.finish()
``` |
{
"source": "jhod0/SIDMpy",
"score": 3
} |
#### File: sidmpy/Profiles/deflection_angle_util.py
```python
import numpy as np
from scipy.integrate import quad
from scipy.interpolate import interp1d
def r3d(r2d, z):
"""
Three dimensional density as a function of z and two-d density r2d
"""
return np.sqrt(r2d ** 2 + z ** 2)
def integrand_mproj(z, r2d, rhofunc, args):
"""
The integrand for the projection integral
sigma(R) = \int_{-\infty}^{\infty} \rho\left(r\left(R, z\right)\right) dz
"""
try:
return 2 * rhofunc(r3d(r2d, z), *args)
except:
return 2 * rhofunc(r3d(r2d, z), args)
def integrand_deflection(r, rhofunc, args):
"""
The integrand for the deflection integral
deflection(R) \sim \frac{2}{R} \int_0^{R} r * sigma(r) dr
"""
return r * projected_mass(r, rhofunc, args)
def projected_mass(R2D, rho_function, function_args):
"""
Computes the projection integral
:param R2D:
:param rho_function:
:param function_args:
:return:
"""
return quad(integrand_mproj, 0, 1000, args=(R2D, rho_function, function_args))[0]
def deflection_point(args):
"""
Computes the deflection angle at R
:param R2D:
:param rho_function:
:param function_args:
:return:
"""
(R, rho_function, function_args) = args
return (2 / R) * quad(integrand_deflection, 0, R, args=(rho_function, function_args))[0]
def deflection(Rvalues, rho_function, function_args,
use_pool=False, nproc=10):
"""
:param Rvalues: r coordinates in 3d
:param rho_function: a function that outputs the 3d density given a 3d r coordinate
Must be of the form
def rho_function(r3d, arg1, arg2, ...):
return density_at_r3d
or equivalently
def rho_function(r3d, *function_args):
return density_at_r3d
:param function_args: a tuple (arg1, arg2, ...)
:param use_pool: use multi-processing
:return: deflection angles evaluated at Rvalues
"""
args = []
for k, ri in enumerate(Rvalues):
args.append((ri, rho_function, function_args))
if use_pool:
from multiprocessing.pool import Pool
pool = Pool(nproc)
defangle = pool.map(deflection_point, args)
pool.close()
else:
defangle = [deflection_point(args_i) for args_i in args]
return np.array(defangle)
def deflection_multiprocessing(args):
return deflection(*args)
def deflection_from_profile(Rvalues, rho_3D_array, r_evaluate):
"""
:param three dimensional r coordinate
:param rho_3D_array: the density at r
:param r_evaluate: the coordinates at which to evaluate the deflection angles
:return: the deflection angle at each Rcoordinate
"""
rho_interp = interp1d(Rvalues, rho_3D_array)
def _dummy_interp_function(x, *args, **kwargs):
"""
Required since the density function must take some arguments, but interp1d takes only one argument
"""
return rho_interp(x)
return deflection(r_evaluate, _dummy_interp_function, function_args=None)
``` |
{
"source": "jhodges10/custom-actions-app-python",
"score": 2
} |
#### File: custom-actions-app-python/lib/video_handler.py
```python
import subprocess
import shutil
import os
import math
import urllib
from requests.exceptions import HTTPError
from random import randint
from pprint import pprint
from timecode import Timecode
from frameioclient import FrameioClient
from dotenv import load_dotenv
from pathlib import Path # python3 only
def render_and_upload_slate(**kwargs):
print("Slate processing has begun...")
# Create temp directory
if os.path.isdir(os.path.join(os.path.curdir, "temp")):
pass
else:
os.mkdir("temp")
# First try to grab ENV from system state
token = os.environ.get("FRAMEIO_TOKEN")
if token == None:
try:
print("Failure to load .env file... Trying one directory up.")
cur_dir = Path.cwd()
env_path = cur_dir.parents[0] / '.env'
print(env_path)
load_dotenv(dotenv_path=env_path, verbose=False)
token = os.environ.get("FRAMEIO_TOKEN")
if token == None:
raise HTTPError
except HTTPError as e:
print(e)
print("Failure to load .env file... Trying one directory up.")
cur_dir = Path.cwd()
env_path = cur_dir.parents[1] / '.env'
load_dotenv(dotenv_path=env_path, verbose=False)
token = os.environ.get("FRAMEIO_TOKEN")
finally:
client = FrameioClient(token)
else:
client = FrameioClient(token)
# Get asset info
asset_info = client.get_asset(kwargs['resource_id'])
# TODO add handling of version stacks here (really just get the asset id of the latest version and replace asset_info with that)
# pprint(asset_info)
og_asset_url = asset_info['original']
# Download original frame.io video
print("Downloading video...")
dl_path = os.path.join(os.path.curdir, 'temp', asset_info['name'])
urllib.request.urlretrieve(og_asset_url, dl_path)
print("Video downloaded. Continuing...")
resolution = {
"width": asset_info['transcodes']['original_width'],
"height": asset_info['transcodes']['original_height']
}
slate_path = generate_slate(client=kwargs['client'], fps=asset_info['fps'],
duration=asset_info['duration'], project=kwargs['project'], resolution=resolution)
# Merge new slate with video
ul_filepath = merge_slate_with_video(slate_path, dl_path)
# Upload new video to Frame.io
upload_to_frameio(ul_filepath, asset_info, client)
# Clean-up temp folder
shutil.rmtree("temp")
return True
def generate_slate(**kwargs):
print("Generating slate...")
# Crate slate w/ FFMPEG
movie_path = f"temp/new_slate_{randint(1,100)}.mp4"
tc = Timecode(kwargs['fps'], f"00:00:{kwargs['duration']}")
slate_string = """-y -i lib/8s_blank_slate.mp4 -vf \
'drawtext=fontfile=lib/AvenirNext.ttc: \
text={}: fontcolor=white: fontsize=62: box=0: \
x=1114:y=351, \
drawtext=fontfile=lib/AvenirNext.ttc: \
text={}: fontcolor=white: fontsize=62: box=0: \
x=1114: y=551, \
drawtext=fontfile=lib/AvenirNext.ttc: \
text={}: fontcolor=white: fontsize=62: box=0: \
x=1114: y=742, scale={}:{}, fps=fps={}' \
-pix_fmt yuv420p {} \
""".format(kwargs['client'].upper(), kwargs['project'].upper(), str(tc).replace(":", "\\\\\\\\\\\\:"), kwargs['resolution']['width'], kwargs['resolution']['height'], kwargs['fps'], movie_path)
# x=1118: y=742' -vf scale={}:{} \ -- backup line in case getting rid of the additional call for -vf doesn't work
# add '-an' to end of FFMPEG script, before output specified in order to remove audio from slate.
black_slate_string = """-y -i lib/2s_black.mp4 -vf 'scale={}:{}, fps=fps={}' -pix_fmt yuv420p temp/temp_black.mp4 \
""".format(kwargs['resolution']['width'], kwargs['resolution']['height'], kwargs['fps'])
with open("FFMPEG_log.log", "a") as output:
# Generate actual slate
subprocess.call(
"""ffmpeg {}""".format(
slate_string),
shell=True, stdout=output, stderr=output
)
# Generate 2s of black
subprocess.call(
"""ffmpeg {}""".format(
black_slate_string),
shell=True, stdout=output, stderr=output
)
print("Slate generation completed. Continuing...")
return movie_path
def merge_slate_with_video(slate_path, video_path):
# Process w/ FFMPEG
with open("FFMPEG_log.log", "a") as output:
# Generate intermediate transport streams to prevent re-encoding of h.264
print("Generating intermediate1.ts")
subprocess.call(
"""ffmpeg -y -i '{}' -c copy -bsf:v h264_mp4toannexb -f mpegts ./temp/intermediate1.ts""".format(
slate_path),
shell=True, stdout=output, stderr=output
)
print("Done Generating intermediate1.ts")
print("Creating intermediate2.ts")
subprocess.call(
"""ffmpeg -y -i ./temp/temp_black.mp4 -c copy -bsf:v h264_mp4toannexb -f mpegts ./temp/intermediate2.ts""",
shell=True, stdout=output, stderr=output
)
print("Done Generating intermediate2.ts")
print("Creating intermediate3.ts")
subprocess.call(
"""ffmpeg -y -i '{}' -c copy -bsf:v h264_mp4toannexb -f mpegts ./temp/intermediate3.ts""".format(
video_path),
shell=True, stdout=output, stderr=output
)
print("Done Generating intermediate3.ts")
print("Beginning merge...")
# Merge together transport streams
subprocess.call(
"""ffmpeg -y -i 'concat:./temp/intermediate1.ts|./temp/intermediate2.ts|./temp/intermediate3.ts' -c copy -bsf:a aac_adtstoasc ./temp/slated_output.mp4""",
shell=True, stdout=output, stderr=output
)
print("Merge completed... Ready to upload!")
return "temp/slated_output.mp4"
def upload_to_frameio(final_video_path, asset_info, client):
# Rename file to original file name
new_name = asset_info['name'].split(
'.')[0] + '_s' + '.' + asset_info['name'].split('.')[1]
ul_path = os.path.join(os.curdir, 'temp', new_name)
os.rename(os.path.join(os.curdir, final_video_path), ul_path)
# Get parent asset and project
parent_id = asset_info['parent_id']
# project_id = asset_info['project_id']
filesize = os.path.getsize(ul_path)
# Upload
asset = client.create_asset(
parent_asset_id=parent_id,
name=new_name,
type="file",
filetype="video/quicktime",
filesize=filesize
)
with open(ul_path, "rb") as file:
print("Starting upload...")
try:
client.upload(asset, file)
except HTTPError:
print(HTTPError)
print("Failed uploading")
return False
print("Upload completed!")
return True
if __name__ == "__main__":
# Initialize FIO Class
try:
env_path = Path('') / '.env'
load_dotenv(dotenv_path=env_path, verbose=False)
except Exception as e:
print(e)
print("Failure to load .env file... Trying one directory up.")
env_path = Path('..') / '.env'
load_dotenv(dotenv_path=env_path, verbose=False)
token = os.environ.get("FRAMEIO_TOKEN")
client = FrameioClient(token)
asset_info = client.get_asset('a1a27d9b-181a-4005-b176-27d74cef8150')
pprint(asset_info)
upload_to_frameio("temp/slated_output.mp4", asset_info, client)
``` |
{
"source": "jhodges10/slack-dash-vote-check",
"score": 2
} |
#### File: slack-dash-vote-check/extended_libs/s3_integration.py
```python
import boto3
bucket = 'dash-cache-images'
# Upload local file to S3
def upload(byte_stream):
s3 = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_KEY
)
print('S3 Client Initiated')
s3.upload_file(byte_stream, bucket, file_key)
# s3.Bucket('review-app-jh').put_object(Key=filename, Body=data)
print('S3 Upload Completed')
return s3, file_key
# This wraps the upload and URL creation for the Graph
def add_and_upload_simple(byte_stream):
s3, uploaded_file_id = upload(byte_stream)
url = gen_URL(bucket, uploaded_file_id, s3)
return url
# Generate URL from object ID
def gen_URL(bucket, key, s3):
url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': bucket,
'Key': key
}
)
return url
``` |
{
"source": "jhoe123/Elastos.Hive.Node",
"score": 3
} |
#### File: hive/main/hive_manage.py
```python
from hive.settings import hive_setting
from hive.util.server_response import ServerResponse
import logging
logger = logging.getLogger("HiveManage")
class HiveManage:
def __init__(self):
self.app = None
self.response = ServerResponse("HiveManage")
def init_app(self, app):
self.app = app
def get_hive_version(self):
data = {"version": hive_setting.VERSION}
print("version:" + hive_setting.VERSION)
logger.debug("version:" + hive_setting.VERSION)
return self.response.response_ok(data)
def get_hive_commit_hash(self):
data = {"commit_hash": hive_setting.LAST_COMMIT}
print("commit_hash:" + hive_setting.LAST_COMMIT)
logger.debug("commit_hash:" + hive_setting.LAST_COMMIT)
return self.response.response_ok(data)
```
#### File: hive/main/__init__.py
```python
import threading
from . import view, view_db, view_file, view_scripting, view_payment, interceptor, scheduler, view_internal, \
view_backup, view_pubsub
import logging
from hive.util.constants import HIVE_MODE_TEST
from ..settings import hive_setting
from ..util.did.did_init import init_did_backend
from ..util.payment.vault_service_manage import count_vault_storage_job
logging.getLogger().level = logging.INFO
class RefreshVaultStorageUsageThread(threading.Thread):
def __init__(self):
super().__init__()
def run(self):
# Reset the storage size of all vaults when initialize.
try:
logging.info(f'[RefreshVaultStorageUsageThread] Start init all vaults usage.')
count_vault_storage_job()
logging.info(f'[RefreshVaultStorageUsageThread] Init vault usage successfully')
except Exception as e:
logging.error(f'[RefreshVaultStorageUsageThread] Init vault usage failed {str(e)}')
def init_app(app, mode):
hive_setting.init_config()
logging.getLogger('v1_init').info('enter init_app')
init_did_backend()
interceptor.init_app(app)
view.init_app(app)
view_db.init_app(app)
view_file.init_app(app)
view_scripting.init_app(app)
view_payment.init_app(app)
view_internal.init_app(app, mode)
view_backup.init_app(app, mode)
view_pubsub.init_app(app, mode)
if mode == HIVE_MODE_TEST:
scheduler.scheduler_init(app, paused=True)
else:
scheduler.scheduler_init(app, paused=False)
RefreshVaultStorageUsageThread().start()
logging.getLogger('v1_init').info('leave init_app')
```
#### File: hive/main/view.py
```python
from flask import Blueprint, request, jsonify
from hive.main.hive_auth import HiveAuth
from hive.main.hive_manage import HiveManage
h_auth = HiveAuth()
h_manage = HiveManage()
main = Blueprint('main', __name__)
def init_app(app):
h_auth.init_app(app)
h_manage.init_app(app)
app.register_blueprint(main)
@main.route('/api/v1/echo', methods=['POST'])
def echo():
content = request.get_json()
return jsonify(content)
# hive version
@main.route('/api/v1/hive/version', methods=['GET'])
def get_hive_version():
return h_manage.get_hive_version()
# hive commit hash
@main.route('/api/v1/hive/commithash', methods=['GET'])
def get_hive_commit_hash():
return h_manage.get_hive_commit_hash()
# did auth
@main.route('/api/v1/did/sign_in', methods=['POST'])
def access_request():
return h_auth.sign_in()
@main.route('/api/v1/did/auth', methods=['POST'])
def request_did_auth():
return h_auth.request_did_auth()
@main.route('/api/v1/did/check_token', methods=['POST'])
def check_token():
return h_auth.check_token()
@main.route('/api/v1/did/backup_auth', methods=['POST'])
def backup_auth():
return h_auth.backup_auth()
@main.route('/api/v1/did/check_backup_token', methods=['POST'])
def check_backup_token():
return h_auth.check_backup_token()
@main.route('/api/v1/did/<did_base58>/<app_id_base58>/callback', methods=['POST'])
def did_auth_callback(did_base58, app_id_base58):
return h_auth.did_auth_callback(did_base58, app_id_base58)
```
#### File: hive/tools/creat_default_vault_for_registered_user.py
```python
from hive.util.constants import DID_INFO_REGISTER_COL, DID_INFO_DB_NAME, DID
from hive.util.did_mongo_db_resource import create_db_client
from hive.util.payment.payment_config import PaymentConfig
from hive.util.payment.vault_service_manage import get_vault_service, setup_vault_service
def create_vault_of_did(did):
service = get_vault_service(did)
if service:
return
free_info = PaymentConfig.get_free_vault_info()
setup_vault_service(did, free_info["maxStorage"], free_info["serviceDays"])
def create_all_vault():
connection = create_db_client()
db = connection[DID_INFO_DB_NAME]
col = db[DID_INFO_REGISTER_COL]
infos = col.find()
for did_info in infos:
if DID in did_info:
create_vault_of_did(did_info[DID])
if __name__ == '__main__':
create_all_vault()
```
#### File: hive/util/auth.py
```python
from hive.util.constants import DID, APP_ID
from hive.main import view
def did_auth():
info, err = view.h_auth.get_token_info()
if info:
if APP_ID in info:
return info[DID], info[APP_ID]
else:
return info[DID], None
else:
return None, None
def did_auth2():
""" Only for src part. """
info, err = view.h_auth.get_token_info()
did = info[DID] if info else None
app_did = info[APP_ID] if info and APP_ID in info else None
return did, app_did, err
```
#### File: util/flask_rangerequest/_utils.py
```python
from flask import abort
def parse_range_header(range_header: str, target_size: int) -> list:
end_index = target_size - 1
if range_header is None:
return [(0, end_index)]
bytes_ = 'bytes='
if not range_header.startswith(bytes_):
abort(416)
ranges = []
for range_ in range_header[len(bytes_):].split(','):
split = range_.split('-')
if len(split) == 1:
try:
start = int(split[0])
end = end_index
except ValueError:
abort(416)
elif len(split) == 2:
start, end = split[0], split[1]
if not start:
# parse ranges of the form "bytes=-100" (i.e., last 100 bytes)
end = end_index
try:
start = end - int(split[1]) + 1
except ValueError:
abort(416)
else:
# parse ranges of the form "bytes=100-200"
try:
start = int(start)
if not end:
end = target_size
else:
end = int(end)
except ValueError:
abort(416)
if end < start:
abort(416)
end = min(end, end_index)
else:
abort(416)
ranges.append((start, end))
# merge the ranges
merged = []
ranges = sorted(ranges, key=lambda x: x[0])
for range_ in ranges:
# initial case
if not merged:
merged.append(range_)
else:
# merge ranges that are adjacent or overlapping
if range_[0] <= merged[-1][1] + 1:
merged[-1] = (merged[-1][0], max(range_[1], merged[-1][1]))
else:
merged.append(range_)
return merged
```
#### File: util/pubsub/subscriber.py
```python
from datetime import datetime
import pymongo
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from hive.settings import hive_setting
from hive.util.constants import DID_INFO_DB_NAME, SUB_MESSAGE_COLLECTION, SUB_MESSAGE_PUB_DID, \
SUB_MESSAGE_PUB_APPID, SUB_MESSAGE_CHANNEL_NAME, SUB_MESSAGE_SUB_DID, SUB_MESSAGE_SUB_APPID, \
SUB_MESSAGE_MODIFY_TIME, SUB_MESSAGE_DATA, SUB_MESSAGE_TIME, SUB_MESSAGE_SUBSCRIBE_ID
from hive.util.pubsub.publisher import pubsub_get_subscribe_id
def sub_setup_message_subscriber(pub_did, pub_appid, channel_name, sub_did, sub_appid):
if hive_setting.MONGO_URI:
uri = hive_setting.MONGO_URI
connection = MongoClient(uri)
else:
connection = MongoClient(hive_setting.MONGODB_URI)
db = connection[DID_INFO_DB_NAME]
col = db[SUB_MESSAGE_COLLECTION]
_id = pubsub_get_subscribe_id(pub_did, pub_appid, channel_name, sub_did, sub_appid)
dic = {
"_id": _id,
SUB_MESSAGE_PUB_DID: pub_did,
SUB_MESSAGE_PUB_APPID: pub_appid,
SUB_MESSAGE_CHANNEL_NAME: channel_name,
SUB_MESSAGE_SUB_DID: sub_did,
SUB_MESSAGE_SUB_APPID: sub_appid,
SUB_MESSAGE_MODIFY_TIME: datetime.utcnow().timestamp()
}
try:
ret = col.insert_one(dic)
except DuplicateKeyError:
return None
subscribe_id = ret.inserted_id
return subscribe_id
def sub_remove_message_subscriber(pub_did, pub_appid, channel_name, sub_did, sub_appid):
if hive_setting.MONGO_URI:
uri = hive_setting.MONGO_URI
connection = MongoClient(uri)
else:
connection = MongoClient(hive_setting.MONGODB_URI)
db = connection[DID_INFO_DB_NAME]
col = db[SUB_MESSAGE_COLLECTION]
query = {
SUB_MESSAGE_PUB_DID: pub_did,
SUB_MESSAGE_PUB_APPID: pub_appid,
SUB_MESSAGE_CHANNEL_NAME: channel_name,
SUB_MESSAGE_SUB_DID: sub_did,
SUB_MESSAGE_SUB_APPID: sub_appid,
}
col.delete_many(query)
def sub_get_message_subscriber(pub_did, pub_appid, channel_name, sub_did, sub_appid):
if hive_setting.MONGO_URI:
uri = hive_setting.MONGO_URI
connection = MongoClient(uri)
else:
connection = MongoClient(hive_setting.MONGODB_URI)
db = connection[DID_INFO_DB_NAME]
col = db[SUB_MESSAGE_COLLECTION]
_id = pubsub_get_subscribe_id(pub_did, pub_appid, channel_name, sub_did, sub_appid)
query = {
"_id": _id
}
info = col.find_one(query)
return info
def sub_add_message(pub_did, pub_appid, channel_name, sub_did, sub_appid, message, message_time):
if hive_setting.MONGO_URI:
uri = hive_setting.MONGO_URI
connection = MongoClient(uri)
else:
connection = MongoClient(hive_setting.MONGODB_URI)
db = connection[DID_INFO_DB_NAME]
col = db[SUB_MESSAGE_COLLECTION]
_id = pubsub_get_subscribe_id(pub_did, pub_appid, channel_name, sub_did, sub_appid)
dic = {
SUB_MESSAGE_SUBSCRIBE_ID: _id,
SUB_MESSAGE_PUB_DID: pub_did,
SUB_MESSAGE_PUB_APPID: pub_appid,
SUB_MESSAGE_CHANNEL_NAME: channel_name,
SUB_MESSAGE_SUB_DID: sub_did,
SUB_MESSAGE_SUB_APPID: sub_appid,
SUB_MESSAGE_DATA: message,
SUB_MESSAGE_TIME: message_time
}
ret = col.insert_one(dic)
return ret
def sub_pop_messages(pub_did, pub_appid, channel_name, sub_did, sub_appid, limit):
if hive_setting.MONGO_URI:
uri = hive_setting.MONGO_URI
connection = MongoClient(uri)
else:
connection = MongoClient(hive_setting.MONGODB_URI)
db = connection[DID_INFO_DB_NAME]
col = db[SUB_MESSAGE_COLLECTION]
_id = pubsub_get_subscribe_id(pub_did, pub_appid, channel_name, sub_did, sub_appid)
query = {
SUB_MESSAGE_SUBSCRIBE_ID: _id,
}
cursor = col.find(query).sort(SUB_MESSAGE_TIME, pymongo.ASCENDING).limit(limit)
message_list = list()
message_ids = list()
for message in cursor:
data = {
"message": message[SUB_MESSAGE_DATA],
"time": message[SUB_MESSAGE_TIME]
}
message_list.append(data)
message_ids.append(message["_id"])
if message_ids:
__remove_messages(message_ids)
return message_list
def __remove_messages(message_ids):
if hive_setting.MONGO_URI:
uri = hive_setting.MONGO_URI
connection = MongoClient(uri)
else:
connection = MongoClient(hive_setting.MONGODB_URI)
db = connection[DID_INFO_DB_NAME]
col = db[SUB_MESSAGE_COLLECTION]
col.delete_many({"_id": {"$in": message_ids}})
```
#### File: utils/did/entity.py
```python
import logging
import os
import base58
from src.utils.consts import DID
from src.utils.http_exception import BadRequestException, HiveException
from src.settings import hive_setting
from src.utils_v1.common import gene_temp_file_name
from src.utils.did.did_wrapper import DIDStore, DIDDocument, RootIdentity, Issuer, Credential, JWTBuilder
class Entity:
def __init__(self, name, mnemonic=None, passphrase=None, storepass=None, need_resolve=True, from_file=False, file_content=None):
"""
:param file_content: base58
"""
passphrase, storepass = '<PASSWORD>' if passphrase is None else passphrase, 'password' if storepass is None else storepass
self.name = name
store_dir = hive_setting.DID_DATA_STORE_PATH + os.sep + self.name
self.did_store: DIDStore = DIDStore(store_dir, storepass)
self.did: DID
self.doc: DIDDocument
if from_file:
assert file_content, 'Entity.__init__: file_content must provide.'
self.did, self.doc = self.init_did_from_file(file_content, passphrase)
else:
assert mnemonic, 'Entity.__init__: mnemonic must provide.'
self.did, self.doc = self.init_did_from_mnemonic(mnemonic, passphrase, need_resolve)
self.did_str = str(self.did)
self.issuer: Issuer = self.did_store.create_issuer(self.did)
def init_did_from_file(self, file_content: str, passphrase: str) -> (DID, DIDDocument):
try:
return self.load_existed_did()
except HiveException as e:
logging.info('Entity.init_from_file: try to load DID failed, need load first')
self.load_did_to_store(file_content, passphrase)
return self.load_existed_did()
def load_did_to_store(self, file_content: str, passphrase: str):
try:
file_content_str = base58.b58decode(file_content).decode('utf8')
except Exception as e:
raise RuntimeError(f'get_verified_owner_did: invalid value of NODE_CREDENTIAL')
file_path = gene_temp_file_name()
with open(file_path, 'w') as f:
ret_val = f.write(file_content_str)
f.flush()
try:
self.did_store.import_did(file_path.as_posix(), passphrase)
except Exception as ex:
raise ex
finally:
file_path.unlink()
def load_existed_did(self):
dids = self.did_store.list_dids()
if not dids:
raise BadRequestException(msg='Entity.init_from_file: no did in store')
return dids[0], self.did_store.load_did(dids[0])
def init_did_from_mnemonic(self, mnemonic: str, passphrase: str, need_resolve: bool):
root_identity = self.did_store.get_root_identity(mnemonic, passphrase)
return self.init_did_by_root_identity(root_identity, need_resolve=need_resolve)
def init_did_by_root_identity(self, root_identity: RootIdentity, need_resolve=True):
did, doc = root_identity.get_did_0(), None
if self.did_store.contains_did(did) and self.did_store.contains_did(did):
# direct get
return did, self.did_store.load_did(did)
if need_resolve:
# resolve, then get
root_identity.sync_0()
return did, self.did_store.load_did(did)
# create, then get
doc = root_identity.new_did_0()
return doc.get_subject(), doc
def get_name(self):
return self.name
def get_doc(self) -> DIDDocument:
return self.doc
def get_did_string(self) -> str:
return self.did_str
def create_credential(self, type_, props, owner_did: DID = None) -> Credential:
did = owner_did if owner_did else self.did
return self.issuer.create_credential_by_string(did, self.name, type_, props, self.doc.get_expires())
def create_presentation_str(self, vc: Credential, nonce: str, realm: str) -> str:
return self.did_store.create_presentation(self.did, 'jwtvp', nonce, realm, vc).to_json()
def create_vp_token(self, vp_json, subject, hive_did: str, expire) -> str:
return self.create_jwt_token(subject, hive_did, expire, 'presentation', vp_json)
def create_jwt_token(self, subject: str, audience_did_str: str, expire: int, claim_key: str, claim_value: any, claim_json: bool = True) -> str:
builder: JWTBuilder = self.did_store.get_jwt_builder(self.doc)
return builder.create_token(subject, audience_did_str, expire, claim_key, claim_value, claim_json=claim_json)
```
#### File: src/utils/http_response.py
```python
import traceback
import logging
import typing as t
from flask import request, make_response, jsonify
from flask_restful import Api
from sentry_sdk import capture_exception
from src.utils.http_exception import HiveException, InternalServerErrorException
class HiveApi(Api):
@staticmethod
def _get_resp_success_code():
codes = {
'GET': 200,
'PUT': 200,
'PATCH': 200,
'POST': 201,
'DELETE': 204,
}
assert request.method in codes
return codes[request.method]
def make_response(self, data, *args, **kwargs):
""" Custom response for success response.
:param data: the data returned by the API class method.
:return: response object
"""
resp = super().make_response(data, *args, **kwargs)
resp.status_code = HiveApi._get_resp_success_code()
return resp
def handle_error(self, e):
""" Convert any exception (HiveException and Exception) to error response message. """
ex = e
if not hasattr(e, 'get_error_dict') or not hasattr(e, 'code'):
if hasattr(e, 'code'):
ex = HiveException(e.code, -1, str(e))
else:
msg = f'V2 internal error: {str(e)}, {traceback.format_exc()}'
logging.getLogger('http response').error(msg)
capture_exception(error=Exception(f'V2 UNEXPECTED: {msg}'))
ex = InternalServerErrorException(msg=msg)
return jsonify(ex.get_error_dict()), ex.code
def response_stream(f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]:
""" A decorator which makes the endpoint support the stream response """
def wrapper(*args, **kwargs):
ret_value = f(*args, **kwargs)
if not ret_value or type(ret_value) is dict:
return ret_value
response = make_response(ret_value)
response.headers['content-type'] = 'application/octet-stream'
return response
return wrapper
```
#### File: src/utils/node_settings.py
```python
from pathlib import Path
from src.settings import hive_setting
from src.utils_v1.common import did_tail_part
def _st_get_vault_path(user_did):
"""
Get the root dir of the vault.
:param user_did: The user DID.
:return: Path: the path of the vault root.
"""
path = Path(hive_setting.VAULTS_BASE_DIR)
if path.is_absolute():
path = path / did_tail_part(user_did)
else:
path = path.resolve() / did_tail_part(user_did)
return path.resolve()
def st_get_ipfs_cache_path(user_did):
"""
Get the root dir of the IPFS cache files.
:param user_did: The user DID
:return: Path: the path of the cache root.
"""
return _st_get_vault_path(user_did) / 'ipfs_cache'
```
#### File: src/view/auth.py
```python
from flask_restful import Resource
from src.modules.auth import auth
from src.utils.http_exception import InvalidParameterException
from src.utils.http_request import params
class SignIn(Resource):
def __init__(self):
self.auth = auth.Auth()
def post(self):
""" Sign in with the application instance DID to get the challenge string.
.. :quickref: 01 Authentication; Sign in with app instance DID
**Request**:
.. code-block:: json
{
"id": "<the user’s did_document>",
}
**Response OK**:
.. sourcecode:: http
HTTP/1.1 201 Created
.. code-block:: json
{
“challenge”: “<the authentication challenge encoded in JWT>”
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
"""
doc, msg = params.get_dict('id')
if msg or not doc:
raise InvalidParameterException()
return self.auth.sign_in(doc)
class Auth(Resource):
def __init__(self):
self.auth = auth.Auth()
def post(self):
""" Auth to get the access token for the user DID and the application DID.
.. :quickref: 01 Authentication; Get the access token.
**Request**:
.. code-block:: json
{
"challenge_response": "<the response for the authentication challenge encoded in JWT>",
}
**Response OK**:
.. sourcecode:: http
HTTP/1.1 201 Created
.. code-block:: json
{
“token”: “<the access token encoded in JWT>”
}
**Response Error**:
.. sourcecode:: http
HTTP/1.1 400 Bad Request
"""
challenge_response, msg = params.get_str('challenge_response')
if msg:
raise InvalidParameterException(msg=msg)
return self.auth.auth(challenge_response)
class BackupAuth(Resource):
def __init__(self):
self.auth = auth.Auth()
def post(self):
""" Get the access token for the vault service node. """
challenge_response, msg = params.get_str('challenge_response')
if msg or not challenge_response:
raise InvalidParameterException()
return self.auth.backup_auth(challenge_response)
```
#### File: Elastos.Hive.Node/tests/subscription_test.py
```python
import unittest
from tests.utils.http_client import HttpClient
from tests import init_test
class SubscriptionTestCase(unittest.TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
init_test()
self.cli = HttpClient(f'/api/v2')
self.backup_cli = HttpClient(f'/api/v2', is_backup_node=True)
def test01_vault_subscribe(self):
response = self.cli.put('/subscription/vault')
self.assertTrue(response.status_code in [200, 455])
@unittest.skip
def test02_vault_activate(self):
response = self.cli.post('/subscription/vault?op=activation')
self.assertEqual(response.status_code, 201)
def test03_vault_get_info(self):
response = self.cli.get('/subscription/vault')
self.assertEqual(response.status_code, 200)
def test04_vault_get_app_stats(self):
response = self.cli.get('/subscription/vault/app_stats')
self.assertTrue(response.status_code in [200, 404])
@unittest.skip
def test05_vault_deactivate(self):
response = self.cli.post('/subscription/vault?op=deactivation')
self.assertEqual(response.status_code, 201)
def test06_vault_unsubscribe(self):
response = self.cli.delete('/subscription/vault')
self.assertEqual(response.status_code, 204)
response = self.cli.delete('/subscription/vault')
self.assertEqual(response.status_code, 404)
def test07_price_plan(self):
response = self.cli.get('/subscription/pricing_plan?subscription=all&name=Free')
self.assertEqual(response.status_code, 200)
self.assertTrue('backupPlans' in response.json())
self.assertTrue('pricingPlans' in response.json())
def test08_backup_subscribe(self):
response = self.backup_cli.put('/subscription/backup')
self.assertTrue(response.status_code in [200, 455])
def test09_backup_get_info(self):
response = self.backup_cli.get('/subscription/backup')
self.assertEqual(response.status_code, 200)
def test10_backup_unsubscribe(self):
response = self.backup_cli.delete('/subscription/backup')
self.assertEqual(response.status_code, 204)
response = self.backup_cli.delete('/subscription/backup')
self.assertEqual(response.status_code, 404)
if __name__ == '__main__':
unittest.main()
```
#### File: Elastos.Hive.Node/tests_v1/hive_auth_test.py
```python
import json
import unittest
import logging
from flask import appcontext_pushed, g
import flask_unittest
from contextlib import contextmanager
from src.utils.did.eladid import ffi, lib
from src.utils.did.did_wrapper import DID, Credential
from src import create_app
from hive.util.constants import HIVE_MODE_TEST
from hive.util.did.v1_entity import V1Entity
from tests_v1 import test_common
logger = logging.getLogger()
logger.level = logging.DEBUG
class DIDApp(V1Entity):
def __init__(self, name, mnemonic=None, passphrase=None):
V1Entity.__init__(self, name, mnemonic=mnemonic, passphrase=passphrase)
def issue_auth(self, app) -> Credential:
props = {'appDid': app.appId}
return super().create_credential('AppIdCredential', props, owner_did=DID(app.get_did()))
def issue_backup_auth(self, hive1_did, host, hive2_did):
props = {'sourceDID': hive1_did, 'targetHost': host, 'targetDID': hive2_did}
return super().create_credential('BackupCredential', props, owner_did=DID.from_string(hive1_did)).vc
class DApp(V1Entity):
access_token = "<PASSWORD>"
appId = test_common.app_id
def __init__(self, name, appId=None, mnemonic=None, passphrase=None):
if (appId is not None):
self.appId = appId
V1Entity.__init__(self, name, mnemonic=mnemonic, passphrase=passphrase, need_resolve=False)
def access_api_by_token(self):
return self.access_token
def set_access_token(self, token):
self.access_token = token
class HiveAuthTestCase(flask_unittest.ClientTestCase):
app = create_app(mode=HIVE_MODE_TEST)
@classmethod
def setUpClass(cls):
logging.getLogger("HiveAuthTestCase").debug("Setting up HiveAuthTestCase\n")
@classmethod
def tearDownClass(cls):
logging.getLogger("HiveAuthTestCase").debug("\n\nShutting down HiveAuthTestCase")
def setUp(self, client):
logging.getLogger("HiveAuthTestCase").info("\n")
self.app.config['TESTING'] = True
self.content_type = ("Content-Type", "application/json")
self.json_header = [self.content_type, ]
def tearDown(self, client):
logging.getLogger("HiveAuthTestCase").info("\n")
def parse_response(self, r):
try:
v = json.loads(r.get_data())
except json.JSONDecodeError:
v = None
return v, r.status_code
def assert200(self, status):
self.assertEqual(status, 200)
def assert201(self, status):
self.assertEqual(status, 201)
def test_a_echo(self, client):
logging.getLogger("HiveAuthTestCase").debug("\nRunning test_a_echo")
r, s = self.parse_response(
client.post('/api/v1/echo', data=json.dumps({"key": "value"}), headers=self.json_header)
)
logging.getLogger("HiveAuthTestCase").debug(f"\nr:{r}")
self.assert200(s)
def __test_auth_common(self, client, didapp, testapp):
# sign_in
doc = lib.DIDStore_LoadDID(testapp.get_did_store(), testapp.get_did())
doc_str = ffi.string(lib.DIDDocument_ToJson(doc, True)).decode()
logging.getLogger("HiveAuthTestCase").debug(f"\ndoc_str: {doc_str}")
doc = json.loads(doc_str)
rt, s = self.parse_response(
client.post('/api/v1/did/sign_in', data=json.dumps({"document": doc,}), headers=self.json_header)
)
self.assert200(s)
self.assertEqual(rt["_status"], "OK")
jwt = rt["challenge"]
# print(jwt)
jws = lib.DefaultJWSParser_Parse(jwt.encode())
if not jws:
assert False, ffi.string(lib.DIDError_GetLastErrorMessage()).decode()
aud = ffi.string(lib.JWT_GetAudience(jws)).decode()
self.assertEqual(aud, testapp.get_did_string())
nonce = ffi.string(lib.JWT_GetClaim(jws, "nonce".encode())).decode()
hive_did = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
# auth
vc = didapp.issue_auth(testapp)
vp_json = testapp.create_presentation_str(vc, nonce, hive_did)
auth_token = testapp.create_vp_token(vp_json, "DIDAuthResponse", hive_did, 60)
# print(auth_token)
logging.getLogger("HiveAuthTestCase").debug(f"\nauth_token: {auth_token}")
rt, s = self.parse_response(
client.post('/api/v1/did/auth', data=json.dumps({"jwt": auth_token,}), headers=self.json_header)
)
self.assert200(s)
self.assertEqual(rt["_status"], "OK")
token = rt["access_token"]
jws = lib.DefaultJWSParser_Parse(token.encode())
aud = ffi.string(lib.JWT_GetAudience(jws)).decode()
self.assertEqual(aud, testapp.get_did_string())
issuer = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
# print(token)
logging.getLogger("HiveAuthTestCase").debug(f"\ntoken: {token}")
testapp.set_access_token(token)
# auth_check
# token = test_common.get_auth_token()
self.json_header = [
("Authorization", "token " + token),
self.content_type,
]
rt, s = self.parse_response(client.post('/api/v1/did/check_token', headers=self.json_header))
self.assert200(s)
self.assertEqual(rt["_status"], "OK")
return token, hive_did
def test_b_auth(self, client):
logging.getLogger("HiveAuthTestCase").debug("\nRunning test_b_auth")
didapp = DIDApp("didapp", "clever bless future fuel obvious black subject cake art pyramid member clump")
testapp = DApp("testapp", test_common.app_id,
"chimney limit involve fine absent topic catch chalk goat era suit leisure")
self.__test_auth_common(client, didapp, testapp)
def test_c_auth(self, client):
logging.getLogger("HiveAuthTestCase").debug("\nRunning test_c_auth")
didapp = DIDApp("didapp", "clever bless future fuel obvious black subject cake art pyramid member clump")
testapp1 = DApp("testapp", test_common.app_id, "chimney limit involve fine absent topic catch chalk goat era suit leisure")
testapp2 = DApp("testapp2", test_common.app_id2, "chimney limit involve fine absent topic catch chalk goat era suit leisure", "")
# testapp3 = DApp("testapp3", "appid3", "license mango cluster candy payment prefer video rice desert pact february rabbit")
token = self.__test_auth_common(client, didapp, testapp1)
token2 = self.__test_auth_common(client, didapp, testapp2)
logging.getLogger("HiveAuthTestCase").debug(f"\ntoken: {token}")
# self.__test_auth_common(didapp, testapp3)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JhoelWit/SCoPP",
"score": 4
} |
#### File: JhoelWit/SCoPP/latlongcartconv.py
```python
from math import sin, cos, sqrt, atan2, radians, degrees
import numpy as np
class LLCCV:
"""
Cartesian to geographical coordinates and vice versa object. Each instance of the class has an origin, and uses this
origin to perform transformations to convert between cartesian space (meters) and geographical space (latitude,
longitude).
"""
def __init__(self, origin):
"""
Parameters:
----------
origin: list, tuple - location of the origin for the class, in latitude, longitude
"""
self.R = 6373.0 # approximate radius of earth in km
self.lon1 = radians(origin[0])
self.lat1 = radians(origin[1])
self.conversion_boundaries = [[], []]
self.conversion_vector = None
def get_cartesian(self, point):
"""
Parameters:
----------
point: list, tuple - point for conversion to cartesian space, in latitude, longitude
Returns:
----------
x, y: list - point converted to cartesian
"""
lon2 = radians(point[0])
lat2 = radians(point[1])
dlon = lon2 - self.lon1
dlat = lat2 - self.lat1
a = sin(dlat / 2) ** 2 + cos(self.lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = self.R * c
theta = atan2(dlat, dlon)
x = round(distance * cos(theta) * 1000)
y = round(distance * sin(theta) * 1000)
self.conversion_boundaries[0].append([x, y])
self.conversion_boundaries[1].append([degrees(lon2), degrees(lat2)])
return [x, y]
def get_geographic(self, point):
"""
Parameters:
----------
point: list, tuple - point for conversion to geographical space
Returns:
----------
x, y: list - point converted to geographical space
"""
if self.conversion_vector is not None:
point = list(point)
point_mat = np.zeros((2, 6))
point.extend([1])
point_mat[0, 0:3] = point
point_mat[1, 3:] = point
converted = np.matmul(point_mat, self.conversion_vector)
return converted[1][0], converted[0][0]
else:
point = list(point)
cart_bounds = np.concatenate(
(np.array(self.conversion_boundaries[0]), np.ones((len(self.conversion_boundaries[0]), 1))), axis=1)
geo_bounds = np.array(self.conversion_boundaries[1])
geo_bounds_vec = []
for item in geo_bounds:
for value in item:
geo_bounds_vec.append([np.transpose(value)])
cart_bounds_mat = np.zeros((2 * len(self.conversion_boundaries[0]), 6))
row_count = 0
for item in cart_bounds:
cart_bounds_mat[row_count, 0:3] = item
cart_bounds_mat[row_count + 1, 3:] = item
row_count += 2
cart_bounds_mat_inv = np.matmul(np.linalg.inv((np.matmul(np.transpose(cart_bounds_mat), cart_bounds_mat))),
np.transpose(cart_bounds_mat))
self.conversion_vector = np.matmul(cart_bounds_mat_inv, geo_bounds_vec)
point_mat = np.zeros((2, 6))
point.extend([1])
point_mat[0, 0:3] = point
point_mat[1, 3:] = point
converted = np.matmul(point_mat, self.conversion_vector)
return converted[1][0], converted[0][0]
``` |
{
"source": "jhoelzl/Wikipedia",
"score": 3
} |
#### File: Wikipedia/tests/session_test.py
```python
import unittest
import wikipedia
class TestSession(unittest.TestCase):
def test_session(self):
""" Test the new_session function """
wikipedia.new_session()
s1 = wikipedia.wikipedia.SESSION
self.assertIsNotNone(s1)
wikipedia.new_session()
s2 = wikipedia.wikipedia.SESSION
self.assertIsNotNone(s2)
self.assertNotEqual(s1, s2)
``` |
{
"source": "jhofker/ha-wyzeapi",
"score": 2
} |
#### File: custom_components/wyzeapi/light.py
```python
import asyncio
import logging
from datetime import timedelta
# Import the device class from the component that you want to support
from typing import Any
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
LightEntity
)
from homeassistant.const import ATTR_ATTRIBUTION
from . import DOMAIN
from .wyzeapi.client import WyzeApiClient
from .wyzeapi.devices import Bulb
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Wyze"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Wyze Light platform."""
_LOGGER.debug("""Creating new WyzeApi light component""")
_ = config
_ = discovery_info
wyzeapi_client: WyzeApiClient = hass.data[DOMAIN]["wyzeapi_account"]
# Add devices
bulbs = await wyzeapi_client.list_bulbs()
async_add_entities([HAWyzeBulb(wyzeapi_client, bulb) for bulb in bulbs], True)
class HAWyzeBulb(LightEntity):
"""Representation of a Wyze Bulb."""
__client: WyzeApiClient
__light: Bulb
__just_updated = False
def __init__(self, client: WyzeApiClient, light: Bulb):
"""Initialize a Wyze Bulb."""
self.__light = light
self.__client = client
@property
def should_poll(self) -> bool:
return True
@staticmethod
def translate(value, left_min, left_max, right_min, right_max):
if value is None:
return None
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - left_min) / float(left_span)
# Convert the 0-1 range into a value in the right range.
return right_min + (value_scaled * right_span)
def turn_on(self, **kwargs: Any) -> None:
asyncio.get_event_loop().run_until_complete(self.__client.turn_on(self.__light))
self.__light.switch_state = 1
self.__just_updated = True
def turn_off(self, **kwargs: Any) -> None:
asyncio.get_event_loop().run_until_complete(self.__client.turn_off(self.__light))
self.__light.switch_state = 0
self.__just_updated = True
@property
def name(self):
"""Return the display name of this light."""
# self._name = "wyzeapi_"+self._device_mac+"_"+ self._name
return self.__light.nick_name
@property
def unique_id(self):
return self.__light.mac
@property
def available(self):
"""Return the connection status of this light"""
return self.__light.available
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self.is_on,
"available": self.available,
"device model": self.__light.product_model,
"ssid": self.__light.ssid,
"ip": self.__light.ip,
"rssi": self.__light.rssi,
"mac": self.unique_id
}
@property
def brightness(self):
"""Return the brightness of the light.
This method is optional. Removing it indicates to Home Assistant
that brightness is not supported for this light.
"""
return self.translate(self.__light.brightness, 1, 100, 1, 255)
@property
def color_temp(self):
"""Return the CT color value in mired."""
return self.translate(self.__light.color_temp, 2700, 6500, 500, 140)
@property
def is_on(self):
"""Return true if light is on."""
return self.__light.switch_state == 1
@property
def supported_features(self):
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on.
You can skip the brightness part if your light does not support
brightness control.
"""
self.__light.brightness = self.translate(kwargs.get(ATTR_BRIGHTNESS), 1, 255, 1, 100)
self.__light.color_temp = self.translate(kwargs.get(ATTR_COLOR_TEMP), 500, 140, 2700, 6500)
await self.__client.turn_on(self.__light)
self.__light.switch_state = 1
self.__just_updated = True
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self.__client.turn_off(self.__light)
self.__light.switch_state = 0
self.__just_updated = True
async def async_update(self):
"""Fetch new state data for this light.
This is the only method that should fetch new data for Home Assistant.
"""
_LOGGER.debug("Updating Light: {}".format(self.name))
if self.__just_updated:
self.__just_updated = False
return
self.__light = await self.__client.update(self.__light)
```
#### File: wyzeapi/devices/switch.py
```python
from typing import Dict
from .base_networked_device import BaseNetworkedDevice
from ..interfaces import ISwitchable, IUpdatable
class Switch(BaseNetworkedDevice, ISwitchable, IUpdatable):
switch_state: int
def __init__(self, nick_name, product_model, mac, switch_state, rssi, ssid, ip):
super().__init__(nick_name, product_model, mac, ssid, rssi, ip)
self.switch_state = switch_state
self.__rssi = rssi
self.__ssid = ssid
self.__ip = ip
@staticmethod
def switch_on_props(**kwargs) -> Dict:
return {"P3": "1"}
@staticmethod
def switch_off_props(**kwargs) -> Dict:
return {"P3": "0"}
def prop_map(self) -> Dict:
return {
"P3": ("switch_state", "int"),
"P5": ("avaliable", "int"),
"P1612": ("rssi", "str"),
}
``` |
{
"source": "jhofmann/yubiauth",
"score": 2
} |
#### File: jhofmann/yubiauth/release.py
```python
from distutils import log
from distutils.core import Command
from distutils.errors import DistutilsSetupError
import os
import re
from datetime import date
class release(Command):
description = "create and release a new version"
user_options = [
('keyid', None, "GPG key to sign with"),
('skip-tests', None, "skip running the tests"),
('pypi', None, "publish to pypi"),
]
boolean_options = ['skip-tests', 'pypi']
def initialize_options(self):
self.keyid = None
self.skip_tests = 0
self.pypi = 0
def finalize_options(self):
self.cwd = os.getcwd()
self.fullname = self.distribution.get_fullname()
self.name = self.distribution.get_name()
self.version = self.distribution.get_version()
def _verify_version(self):
with open('NEWS', 'r') as news_file:
line = news_file.readline()
now = date.today().strftime('%Y-%m-%d')
if not re.search(r'Version %s \(released %s\)' % (self.version, now),
line):
raise DistutilsSetupError("Incorrect date/version in NEWS!")
def _verify_tag(self):
if os.system('git tag | grep -q "^%s\$"' % self.fullname) == 0:
raise DistutilsSetupError(
"Tag '%s' already exists!" % self.fullname)
def _sign(self):
if os.path.isfile('dist/%s.tar.gz.asc' % self.fullname):
# Signature exists from upload, re-use it:
sign_opts = ['--output dist/%s.tar.gz.sig' % self.fullname,
'--dearmor dist/%s.tar.gz.asc' % self.fullname]
else:
# No signature, create it:
sign_opts = ['--detach-sign', 'dist/%s.tar.gz' % self.fullname]
if self.keyid:
sign_opts.insert(1, '--default-key ' + self.keyid)
self.execute(os.system, ('gpg ' + (' '.join(sign_opts)),))
if os.system('gpg --verify dist/%s.tar.gz.sig' % self.fullname) != 0:
raise DistutilsSetupError("Error verifying signature!")
def _tag(self):
tag_opts = ['-s', '-m ' + self.fullname, self.fullname]
if self.keyid:
tag_opts[0] = '-u ' + self.keyid
self.execute(os.system, ('git tag ' + (' '.join(tag_opts)),))
def _do_call_publish(self, cmd):
self._published = os.system(cmd) == 0
def _publish(self):
web_repo = os.getenv('YUBICO_GITHUB_REPO')
if web_repo and os.path.isdir(web_repo):
artifacts = [
'dist/%s.tar.gz' % self.fullname,
'dist/%s.tar.gz.sig' % self.fullname
]
cmd = '%s/publish %s %s %s' % (
web_repo, self.name, self.version, ' '.join(artifacts))
self.execute(self._do_call_publish, (cmd,))
if self._published:
self.announce("Release published! Don't forget to:", log.INFO)
self.announce("")
self.announce(" (cd %s && git push)" % web_repo, log.INFO)
self.announce("")
else:
self.warn("There was a problem publishing the release!")
else:
self.warn("YUBICO_GITHUB_REPO not set or invalid!")
self.warn("This release will not be published!")
def run(self):
if os.getcwd() != self.cwd:
raise DistutilsSetupError("Must be in package root!")
self._verify_version()
self._verify_tag()
self.execute(os.system, ('git2cl > ChangeLog',))
if not self.skip_tests:
self.run_command('check')
# Nosetests calls sys.exit(status)
try:
self.run_command('nosetests')
except SystemExit as e:
if e.code != 0:
raise DistutilsSetupError("There were test failures!")
self.run_command('sdist')
if self.pypi:
cmd_obj = self.distribution.get_command_obj('upload')
cmd_obj.sign = True
if self.keyid:
cmd_obj.identity = self.keyid
self.run_command('upload')
self._sign()
self._tag()
self._publish()
self.announce("Release complete! Don't forget to:", log.INFO)
self.announce("")
self.announce(" git push && git push --tags", log.INFO)
self.announce("")
```
#### File: yubiauth/core/controller.py
```python
from yubiauth.util.controller import Controller
from yubiauth.util.model import Session
from yubiauth.core.model import User, YubiKey, AttributeAssociation
from numbers import Integral
import logging
log = logging.getLogger(__name__)
__all__ = [
'YubiAuth'
]
class YubiAuth(Controller):
"""
Main class for interacting with the YubiAuth backend.
"""
def __init__(self, session=Session()):
super(YubiAuth, self).__init__(session)
def query_users(self, **kwargs):
"""
Performs a query on all available users.
Gets a list of all users matching the filter, represented as dicts
containing id and name.
Filtering is dony by supplying keyword arguments, where each key-value
pair will match an Attribute for the user.
A special keyword "yubikey" will create a filter on users assigned
to the YubiKey with the prefix of the value given.
Example:
# Get users with the YubiKey ccccccccccce:
query_users(yubikey='ccccccccccce')
# Get users with the attribute 'area' equal to 'Stockholm'
# AND the attribute 'admin' equal to 'True':
query_users(area='Stockholm', admin='True')
For more advanced querying, use the session attribute directly.
@return: A list of users
@rtype: list
"""
query = self.session.query(User.id, User.name)
if 'yubikey' in kwargs:
query = query.filter(User.yubikeys.any(prefix=kwargs['yubikey']))
del kwargs['yubikey']
for key in kwargs:
query = query.filter(
User._attribute_association.has(
AttributeAssociation._attributes.any(
key=key,
value=kwargs[key]
)
)
)
result = query.all()
log.debug('User query: %r resulted in %d matches', kwargs, len(result))
return [{'id': row[0], 'name': row[1]} for row in result]
def query_yubikeys(self, **kwargs):
"""
Performs a query on all available YubiKeys.
Gets a list of all YubiKeys matching the filter.
Filtering is dony by supplying keyword arguments, where each key-value
pair will match an Attribute for the YubiKey.
Example:
# Get YubiKey with the attribute 'revoke' equal to 'foo':
query_yubikeys(revoke='foo')
@return: A list of YubiKeys
@rtype: list
"""
query = self.session.query(YubiKey)
for key in kwargs:
query = query.filter(
YubiKey._attribute_association.has(
AttributeAssociation._attributes.any(
key=key,
value=kwargs[key]
)
)
)
result = query.all()
log.debug('YubiKey query: %r resulted in %d matches', kwargs,
len(result))
return result
def get_user(self, user_username_or_id):
"""
Does a lookup for a user based on a username or ID.
For practicality also checks if the argument is a C{User},
in which case it is returned as-is.
@param user_username_or_id: A username or user ID
@type user_username_or_id: C{User} or string or int
"""
if isinstance(user_username_or_id, User):
return user_username_or_id
else:
query = self.session.query(User)
try:
if isinstance(user_username_or_id, Integral):
user = query.get(user_username_or_id)
if user:
log.debug('User lookup id=%d successful',
user_username_or_id)
return user
else:
user = query.filter(User.name == user_username_or_id).one()
log.debug('User lookup username=%s successful',
user_username_or_id)
return user
except:
pass
log.debug('User lookup on: %r of type: %s failed', user_username_or_id,
type(user_username_or_id))
raise LookupError('User not found!')
def get_yubikey(self, prefix):
"""
Gets a YubiKey by its prefix.
@param prefix: A YubiKey prefix
@type prefix: string
"""
key = self.session.query(YubiKey).filter(
YubiKey.prefix == prefix).one()
log.debug('YubiKey lookup prefix=%s failed', prefix)
return key
def create_user(self, username, password):
"""
Creates a new user.
@param username: A unique username to give the new user.
@type username: string
@param password: The password to give the new user.
@type password: string
@return: The created user.
@rtype: C{User}
"""
try:
self.get_user(username)
except:
user = User(username, password)
self.session.add(user)
log.info('User created: %s', user.name)
return user
log.error('Create user failed! Username exists: %s',
username)
raise ValueError('A user with that username already exists!')
``` |
{
"source": "jhogan/boog",
"score": 4
} |
#### File: jhogan/boog/boog.py
```python
import copy
import os
import curses
import time
import sys
false=False
true=True
class char:
def __init__(self, x, y, char):
self.x = x
self.y = y
self.letter = char
def str(self):
return "%s,%s %s" % (self.x, self.y, self.letter)
class board:
def __init__(self):
# create matrix with 5 elements for both dimentions
# to hold char objects
self.chars=[None]*5
for i in range(5):
self.chars[i] = [None] * 5
y=0
if false:
for line in str.split("\n"):
x=0
for c in line:
self.chars[y][x] = char(x, y, c)
x += 1
y += 1
def isvalid(self):
for x in range(5):
for y in range(5):
c = self.getchar(x,y)
if c == None or (c.letter.isupper() and c.letter != 'Q'):
return false
return true
def getchar(self, x, y):
return self.chars[y][x]
def setchar(self, x, y, c):
self.chars[y][x] = char(x, y, c)
def str(self):
r=''
for w in self.words:
r+= "%s\n" % w.str()
return r
class word:
def __init__(self):
self.word=[]
def str(self):
r=""
for c in self.word:
l = c.letter
if l == 'Q':
l='qu'
r+=l
return r
def append(self, c):
self.word.append(c)
def contains(self, char):
for c in self.word:
if c is char:
return True
return False
def pop(self):
self.word.pop()
def len(self):
return len(self.word)
def graph(self, board):
r=""
for x in range(5):
for y in range(5):
c = board.getchar(x,y)
inword=false
for c0 in self.word:
if c.x == c0.x and c.y == c0.y:
r += c.letter.upper()
inword=true
break
if not inword:
r += c.letter.lower()
r += "\n"
return r
class words:
def __init__(self):
self.words=[]
def clear(self):
self.words=[]
def append(self, word):
self.words.append(word)
self.raiseonappend(word)
def raiseonappend(self, word):
self.onappend(word)
def uniqappend(self, word):
if not self.contains(word):
self.append(word)
def contains(self, word):
for w in self.words:
if word.str() == w.str():
return true
return false
def str(self):
r=''
for w in self.words:
r+= "%s\n" % w.str()
return r
def graph(self, board):
r=''
for w in self.words:
r += "%s\n\n\n\n" % w.graph(board)
return r
def sort(self):
new=[]
smalllist=copy.copy(self.words)
lennew = len(new)
lenwords = len(self.words)
while lennew < lenwords:
smallest=None
for w in smalllist:
if smallest == None or w.len() < smallest:
smallest = w.len()
smallestword = w
smalllist.remove(smallestword)
new.append(smallestword)
lennew += 1
new.reverse()
self.words=new
class finger:
def __init__(self, board):
self.b=board
self.word=word()
self.reset()
def raiseonboardupd(self):
self.onboardupd(self)
def reset(self):
self.startchar=None
def nextstart(self):
if self.startchar == None:
self.startchar = self.b.getchar(0,0)
else:
x=self.startchar.x
y=self.startchar.y
if x < 4:
x += 1
elif y < 4:
x = 0
y += 1
else:
return false # we would be at the end
self.startchar = self.b.getchar(x,y)
self.x=self.startchar.x
self.y=self.startchar.y
#print "starting at (%s,%s)" % (self.x, self.y)
self.word=word()
self.word.append(self.b.getchar(self.x, self.y))
return true
def mv(self, direction):
xincr=0
yincr=0
d0=direction[0]
if len(direction) == 2:
if direction[1] == 'l':
xincr=-1
else:
xincr=1 # assume 'r'
if d0 == 'u':
yincr=-1
elif d0 == 'd':
yincr=1
elif d0 == 'l':
xincr=-1
elif d0 == 'r':
xincr=1
prevchar = self.b.getchar(self.x, self.y)
self.x = self.x + xincr
self.y = self.y + yincr
if self.x < 0 or self.y < 0 or self.x > 4 or self.y > 4:
self.x=prevchar.x
self.y=prevchar.y
return false
char = self.b.getchar(self.x, self.y)
if self.word.contains(char):
self.x=prevchar.x
self.y=prevchar.y
return False
self.word.append(char)
return true
def curchar(self):
return self.b.getchar(self.x, self.y)
def revert(self):
self.word.word.pop()
if len(self.word.word) > 0:
c = self.word.word[-1]
self.x = c.x
self.y = c.y
else:
self.x = None
self.y = None
def strword(self):
r=""
for i in range(self.word.len()):
l=self.word.word[i].letter
if l == 'Q':
l='qu'
r += l
return r
def str(self):
r=""
for y in range(5):
for x in range(5):
char = self.b.getchar(x,y)
letter = char.letter
for c in self.word.word:
if c is char:
letter = letter.upper()
r += letter + ' '
r += "\n"
return r
class boogler:
def __init__(self, dict, board):
self.words=words()
self.dict = dict
self.b = board
self.f = finger(self.b)
self.depth = 0
def find(self):
self.words.clear()
self.f.reset()
while self.f.nextstart():
self.find_()
def find_(self):
#print "depth: %s" % self.depth
self.depth +=1
if self.dict.startswith(self.f.strword()):
for d in ('d', 'u', 'l', 'r', 'dl', 'dr', 'ul', 'ur'):
if self.f.mv(d):
#self.f.raiseonboardupd()
#print self.f.str()
strword = self.f.strword()
if len(strword) > 3:
#print "--reverting--"
#print self.f.str()
if self.dict.find(strword):
self.words.uniqappend(copy.deepcopy(self.f.word))
self.find_()
self.f.revert()
self.depth -=1
def str(self):
return self.words.str()
def graph(self):
return self.words.graph(self.b)
class dict:
def __init__(self, file):
self.d={}
self.l=[]
f = open(file)
try:
for w in f:
if w[0].islower():
self.d[w.rstrip()] = ''
self.l.append(w.rstrip())
self.l.sort()
finally:
f.close()
def find(self, k):
return (k in self.d)
def len(self):
return len(self.d)
def startswith(self, str):
hi=len(self.l)
lo=0
while lo < hi:
mid = (lo+hi)//2
word=self.l[mid]
if word.startswith(str):
return true
elif str < word:
hi = mid
else:
lo = mid+1
class cscr:
def cboard_onenter(self, obj):
if self.board.isvalid():
self.msgstat("finding")
self.boogler.find()
self.msgstat("sorting")
self.boogler.words.sort()
self.msgstat("displaying")
self.wrdlst.refresh(None)
self.msgstat(None)
def wrdlst_onchange(self, c):
self.cboard.graph(c.word)
def __init__(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.noecho()
curses.cbreak()
self.stdscr.keypad(1)
def run(self):
self.msgstat("loading dictionary")
d = dict('/usr/share/dict')
self.msgstat()
self.board = board()
self.boogler = boogler(d, self.board)
self.cwidgets=cwidgets()
# widget: cboard
cb=cboard(self, self.boogler)
cb.top = 3
cb.left = 3
self.cwidgets.append(cb)
self.cboard = cb
cb.onenter = self.cboard_onenter
# widget: wrdlst
h=self.stdscr.getmaxyx()[0]-3
w=self.stdscr.getmaxyx()[1]-20
wl = wrdlst(self, self.boogler, 3, 15, w, h )
wl.onchange = self.wrdlst_onchange
self.boogler.words.onappend=wl.onappend
self.cwidgets.append(wl)
self.wrdlst=wl
self.cwidgets.show()
def msgstat(self, msg=None):
self.stdscr.addstr(0,0, ' ' * 40)
if msg != None:
self.stdscr.addstr(0,0, msg, curses.A_REVERSE)
self.stdscr.refresh()
def destroy(self):
self.stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
class cwidgets:
def __init__(self):
self.widgets=[]
self.TAB='\t'
self._curwidget=None
def firstwidget(self):
for w in self.widgets:
if w.tabord == 0:
return w
def lastwidget(self):
maxwidget = self.firstwidget()
for w in self.widgets:
if w.tabord > maxwidget.tabord:
maxwidget = w
return maxwidget.tabord
def curwidget(self, v=None):
if v!=None:
self._curwidget=v
if self._curwidget == None:
self._curwidget = self.firstwidget()
return self._curwidget
def show(self):
while 1:
w=self.curwidget()
r = w.setfocus()
if r == ord("\t"):
w = self.curwidget(self.next())
# fixme
if r == "":
w = self.curwidget(self.prev())
def prev(self):
curtab = self.curwidget().tabord
for w in self.widgets:
if curtab - 1 == w.tabord:
return w
return self.lastwidget()
def next(self):
curtab = self.curwidget().tabord
for w in self.widgets:
if curtab + 1 == w.tabord:
return w
return self.firstwidget()
def append(self, w):
if w.tabord == None:
w.tabord = self.maxtab() + 1
self.widgets.append(w)
def maxtab(self):
max=-1
for w in self.widgets:
if max < w.tabord:
max = w.tabord
return max
class cwidget:
def __init__(self, cscr):
self.cscr=cscr
self.stdscr=cscr.stdscr
self.tabord=None
class cboard(cwidget):
def __init__(self, cscr, boogler):
self.x=0
self.y=0
self.cmdmode=false
self.top=self.left=0
self.board=boogler.b
self.boogler=boogler
cwidget.__init__(self, cscr)
boogler.f.onboardupd=self.onboardupd
def clean(self):
self.x=self.y=0
done=false
while not done:
c = self.board.getchar(self.x,self.y)
self.stdscr.addstr(self.offy(), self.offx(), c.letter)
self.mvnext()
done = (self.y==0 and self.x==0)
def cx(self):
return self.left + self.x
def cy(self):
return self.top + self.y
def setfocus(self):
while 1:
c = self.stdscr.getch(self.cy(), self.cx())
if not self.cmdmode:
if c == curses.KEY_LEFT:
self.mv('l')
elif c == curses.KEY_RIGHT:
self.mv('r')
elif c == curses.KEY_DOWN:
self.mv('d')
elif c == curses.KEY_UP:
self.mv('u')
elif c == 263: # BS
self.backspace()
elif c == 27: # ESC
self.cmdmode=true
elif c == ord("\n"):
self.onenter(self)
elif c == ord("\t"):
return c
elif c in (range(97, 123) + [81]): # [a-zQ]
self.stdscr.addstr(self.cy(), self.cx(), chr(c))
self.board.setchar(self.x, self.y, chr(c))
if self.board.isvalid():
self.cscr.msgstat()
else:
self.cscr.msgstat('board invalid')
self.boogler.f.reset()
self.mvnext()
else:
if c in (curses.KEY_LEFT, ord('h')):
self.mv('l')
elif c in (curses.KEY_RIGHT, ord('l')):
self.mv('r')
elif c in (curses.KEY_DOWN, ord('j')):
self.mv('d')
elif c in (curses.KEY_UP, ord('k')):
self.mv('u')
elif c == ord('a'):
self.mvnext()
self.cmdmode=false
elif c == ord('i'):
self.cmdmode=false
def mvnext(self):
if self.x < 4:
self.mv('r')
else:
self.enter()
def enter(self):
if self.y < 4:
self.x=0
self.y+=1
elif self.x == 4:
self.x=self.y=0
def backspace(self):
if self.x>0:
self.x -= 1
elif self.y>0:
self.x=4
self.y -=1
def mv(self, d):
xincr=yincr=0
if d == 'u':
yincr=-1
elif d == 'd':
yincr=1
elif d == 'l':
xincr=-1
elif d == 'r':
xincr=1
self.x = self.x + xincr
self.y = self.y + yincr
if self.x < 0 or self.y < 0 or self.x > 4 or self.y > 4:
self.x = self.x - xincr
self.y = self.y - yincr
def graph(self, w):
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
for y in range(5):
for x in range(5):
sx=x+3
sy=y+3
c = self.board.getchar(x,y)
inword=false
for c0 in w.word:
if c.x == c0.x and c.y == c0.y:
if c0 is w.word[-1]:
cp=curses.color_pair(6)
elif c0 is w.word[0]:
cp = curses.color_pair(1)
else:
cp=curses.color_pair(2)
inword=true
break
if not inword:
cp=curses.color_pair(0)
self.stdscr.addstr(sy, sx, c.letter, cp)
self.stdscr.refresh()
def onboardupd(self, f):
return
#time.sleep(0)
self.cscr.wrdlst.updwrd(f.word.str())
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
i=1
for y in range(5):
for x in range(5):
sx=x+3
sy=y+3
c = f.b.getchar(x,y)
inword=false
for c0 in f.word.word:
if c.x == c0.x and c.y == c0.y:
if c0 is f.word.word[-1]:
pass
#self.stdscr.addstr(sy, sx, c.letter, curses.color_pair(6))
else:
pass
#self.stdscr.addstr(sy, sx, c.letter, curses.color_pair(i))
inword=true
if i < 2: i += 1
break
if not inword:
pass
#self.stdscr.addstr(sy, sx, c.letter, curses.color_pair(0))
self.stdscr.refresh()
class wrdlst(cwidget):
def __init__(self, cscr, boogler, top, left, width, hight):
self.boogler=boogler
self.win=curses.newwin(hight, width, top, left)
self.win.keypad(1)
self.win.border('|', '|', '-', '-', '+', '+', '+', '+')
self.win.refresh()
self.x=self.y=1
self.page=self.maxpage=0
self.cellmargin=1
self._cells=None
cwidget.__init__(self, cscr)
self._curcell=None
def setfocus(self):
while 1:
curses.curs_set(0)
self.curcell()
c = self.win.getch(0,0)
if c == ord("\t"):
curses.curs_set(1)
return c
elif c in (curses.KEY_LEFT, ord('h')):
self.mv('l')
elif c in (curses.KEY_RIGHT, ord('l')):
self.mv('r')
elif c in (curses.KEY_DOWN, ord('j')):
self.mv('d')
elif c in (curses.KEY_UP, ord('k')):
self.mv('u')
elif c == curses.KEY_NPAGE:
if self.page < self.maxpage:
self.page += 1
self.refresh()
self.curcell(None, true)
elif c == curses.KEY_PPAGE:
if self.page != 0:
self.page -= 1
self.refresh()
self.curcell(None, true)
def getcell(self, x, y, page):
for c in self._cells:
if c.x == x and c.y == y and c.page == page:
return c
return None
def curcell(self, c=None, refresh=false):
if c==None:
if refresh or self._curcell == None:
for c in self._cells:
#if c.y==0 and c.x==0:
if c.page == self.page:
self.refresh(c)
self._curcell=c
self.onchange(c)
break
else:
self._curcell=c
self.onchange(c)
self.refresh(c)
return self._curcell
def mv(self, d):
cc = self.curcell()
if cc == None: return
if d == 'u':
c = self.getcell(cc.x, cc.y - 1, cc.page)
elif d == 'd':
c = self.getcell(cc.x, cc.y + 1, cc.page)
elif d == 'l':
c = self.getcell(cc.x - 1, cc.y, cc.page)
elif d == 'r':
c = self.getcell(cc.x + 1, cc.y, cc.page)
if c != None:
self.curcell(c)
def clear(self):
self._cells=None
self.win.erase()
self.win.border('|', '|', '-', '-', '+', '+', '+', '+')
self.win.refresh()
self.x=self.y=1
def append(self, w):
# don't need to use w param
# we have ref to boogler.words
#self.refresh(w)
pass
def updwrd(self, w):
#self.win.addstr(self.y+1, self.x, ' ' * 20)
#self.win.addstr(self.y+1, self.x, w)
#self.win.refresh()
pass
def onappend(self,w):
self.append(w)
def refresh(self, hlc=None):
self.x=self.cellmargin
self.clear()
cells = self.cells(true) #force refresh
for col in range(self.columncnt()):
lastc=None
for c in cells:
if c.page == self.page and c.x==col:
self.y=c.y+self.cellmargin
if hlc != None and hlc.x == c.x and hlc.y == c.y:
self.win.addstr(self.y, self.x, c.word.str(), curses.A_REVERSE )
else:
self.win.addstr(self.y, self.x, c.word.str())
lastc=c
if lastc != None:
self.x += (lastc.len() + self.cellmargin)
self.win.refresh()
def cells(self, refresh=false):
if self._cells == None or refresh:
maxyx=self.win.getmaxyx()
maxy=maxyx[0]
maxx=maxyx[1]
self.maxpage=x=y=page=0
self._cells=[]
for w in self.boogler.words.words:
c = wrdlst.cell(w, self)
c.y=y; c.x=x; c.page=page
if c.y == maxy-self.cellmargin-2:
if self.cellslen(page) > maxx-10:
page+=1
self.maxpage=page
y=x=0
else:
x+=1
y=0
else:
y+=1
self._cells.append(c)
return self._cells
def cellslen(self, page):
len=0
for c in self.cells():
if c.y==0 and c.page == page:
for colord in range(self.columncnt()):
if c.x==colord:
len+=(c.len() + self.cellmargin)
break
return len
def columncnt(self):
maxx=0
for c in self.cells():
if maxx < c.x:
maxx = c.x
return maxx+1
def row(self, ordinal):
r=[]
for c in self.cells():
if c.x == ordinal:
r.append(c)
return r
class cell:
def __init__(self, word, wrdlst):
self.word=word
self.wrdlst=wrdlst
self.x=self.y=self.page=None
def str(self):
return self.word.str()
def len(self):
maxlen=0
for c in self.wrdlst.row(self.x):
#if c.page != self.page: continue
wrdlen=len(c.word.str())
if wrdlen > maxlen:
maxlen = wrdlen
return maxlen
c=cscr()
try:
c.run()
except KeyboardInterrupt:
c.destroy()
except:
c.destroy()
print sys.exc_info()[0]
``` |
{
"source": "jhogan/commonpy",
"score": 2
} |
#### File: jhogan/commonpy/message.py
```python
import smtplib
from email.message import EmailMessage
from email.headerregistry import Address
from entities import *
from pdb import set_trace; B=set_trace
from configfile import configfile
# TODO Write Tests
class email(entity):
def __init__(self, type):
self._type = type
self._from = emailaddresses()
self._to = emailaddresses()
self._subject = ''
self._text = ''
@property
def from_(self):
return self._from
@from_.setter
def from_(self, v):
self._from = v
@property
def to(self):
return self._to
@to.setter
def to(self, v):
self._to = v
@property
def text(self):
return self._text
@text.setter
def text(self, v):
self._text = v
@property
def type(self):
return self._type
def send(self):
# TODO Add demandvalid()
msg = EmailMessage()
msg.set_content(self.text)
msg['Subject'] = self.subject
msg['From'] = self.from_.toaddress()
msg['To'] = self.to.toaddresses()
accts = configfile.getinstance().accounts
accts = getattr(accts.smtpaccounts, self.type)
for acct in accts:
try:
smtp = smtplib.SMTP(acct.host, acct.port)
#smtp.set_debuglevel(1)
smtp.login(acct.username, acct.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
# Don't include username here because it can be the same
# as password (e.g., smtp.postmarkapp.com).
msg = 'Failed sending email using: ' + \
str(acct.host) + ':' + str(acct.port)
self.log.exception(msg)
continue
break
class emailaddresses(entities):
def toaddresses(self):
r = []
for addr in self:
r.append(addr.toaddress())
return r
class emailaddress(entity):
def __init__(self, email, displayname):
self._email = email
self._displayname = displayname
def toaddress(self):
return Address(self.displayname, self.username, self.domain)
@property
def email(self):
return self._email
@property
def displayname(self):
return self._displayname
@property
def username(self):
return self.email.split('@')[0]
@property
def domain(self):
return self.email.split('@')[1]
def __repr__(self):
r = super().__repr__() + ' '
r += self.displayname + ' <' + self.email + '>'
return r
```
#### File: jhogan/commonpy/tester.py
```python
from entities import *
from pprint import pprint
from textwrap import dedent
from types import FunctionType
import inspect
import json
import pdb; B=pdb.set_trace
import pprint
import sys
import uuid
from pprint import pprint
from configfile import configfile
# TODO Ensure tester.py won't run in non-dev environment
class invoketesteventargs(eventargs):
def __init__(self, meth):
self.method = meth
class testers(entities):
def __init__(self, initial=None):
self.oninvoketest = event()
super().__init__(initial=initial)
self.breakonexception = False
def run(self, tu=None):
testclass, testmethod, *_ = tu.split('.') + [None] if tu else [None] * 2
cfg = configfile.getinstance()
if cfg.isloaded and cfg.inproduction:
raise Exception("Won't run in production environment.")
for subcls in tester.__subclasses__():
if testclass and subcls.__name__ != testclass:
continue
inst = subcls()
inst.testers = self
self += inst
for meth in subcls.__dict__.items():
if type(meth[1]) != FunctionType: continue
if meth[0][0] == '_': continue
if testmethod and testmethod != meth[0]:
continue
try:
eargs = invoketesteventargs(meth)
self.oninvoketest(self, eargs)
getattr(inst, meth[0])()
except Exception as ex:
if self.breakonexception:
print(ex)
pdb.post_mortem(ex.__traceback__)
inst._failures += failure(ex, assert_=meth[0])
print('')
def __str__(self):
return self._tostr(str, includeHeader=False)
class tester(entity):
def __init__(self):
self._failures = failures()
self.testers = None
def assertFull(self, actual, msg=None):
if type(actual) != str or actual.strip() == '':
self._failures += failure()
def full(self, actual, msg=None):
if type(actual) != str or actual.strip() == '':
self._failures += failure()
def assertEmpty(self, o, msg=None):
if o != '': self._failures += failure()
def empty(self, o, msg=None):
if o != '': self._failures += failure()
def assertUuid(self, id, msg=None):
if isinstance(id, uuid.UUID):
raise ValueError('Assert type instead')
try:
uuid.UUID(str(id), version=4)
except ValueError:
self._failures += failure()
def uuid(self, id, msg=None):
if isinstance(id, uuid.UUID):
raise ValueError('Assert type instead')
try:
uuid.UUID(str(id), version=4)
except ValueError:
self._failures += failure()
def assertTrue(self, actual, msg=None):
if type(actual) != bool:
raise ValueError('actual must be bool')
if not actual: self._failures += failure()
def true(self, actual, msg=None):
if type(actual) != bool:
raise ValueError('actual must be bool')
if not actual: self._failures += failure()
def assertTruthy(self, actual, msg=None):
if not actual: self._failures += failure()
def assertFalse(self, actual, msg=None):
if type(actual) != bool:
raise ValueError('actual must be bool')
if actual: self._failures += failure()
def false(self, actual, msg=None):
if type(actual) != bool:
raise ValueError('actual must be bool')
if actual: self._failures += failure()
def assertFalsey(self, actual, msg=None):
if actual: self._failures += failure()
def assertFail(self, msg=None):
self._failures += failure()
def fail(self, msg=None):
self._failures += failure()
def assertPositive(self, actual):
if actual < 0: self._failures += failure()
def assertIsInstance(self, expect, actual, msg=None):
if not isinstance(expect, actual): self._failures += failure()
def assertType(self, expect, actual, msg=None):
if type(actual) is not expect: self._failures += failure()
def type(self, expect, actual, msg=None):
if type(actual) is not expect: self._failures += failure()
def assertEq(self, expect, actual, msg=None):
if expect != actual: self._failures += failure()
def eq(self, expect, actual, msg=None):
if expect != actual: self._failures += failure()
def assertNe(self, expect, actual, msg=None):
if expect == actual: self._failures += failure()
def assertGt(self, expect, actual, msg=None):
if not (expect > actual): self._failures += failure()
def assertGe(self, expect, actual, msg=None):
if not (expect >= actual): self._failures += failure()
def assertLt(self, expect, actual, msg=None):
if not (expect < actual): self._failures += failure()
def assertLe(self, expect, actual, msg=None):
if not (expect <= actual): self._failures += failure()
def assertIs(self, expect, actual, msg=None):
if expect is not actual: self._failures += failure()
def assertNone(self, o, msg=None):
if o != None: self._failures += failure()
def none(self, o, msg=None):
if o != None: self._failures += failure()
def assertNotNone(self, o, msg=None):
if o == None: self._failures += failure()
def assertZero(self, actual):
if len(actual) != 0: self._failures += failure()
def assertOne(self, actual):
if len(actual) != 1: self._failures += failure()
def one(self, actual):
if len(actual) != 1: self._failures += failure()
def assertTwo(self, actual):
if len(actual) != 2: self._failures += failure()
def two(self, actual):
if len(actual) != 2: self._failures += failure()
def assertThree(self, actual):
if len(actual) != 3: self._failures += failure()
def three(self, actual):
if len(actual) != 3: self._failures += failure()
def nine(self, actual):
if len(actual) != 9: self._failures += failure()
def ten(self, actual):
if len(actual) != 10: self._failures += failure()
def eleven(self, actual):
if len(actual) != 11: self._failures += failure()
def assertCount(self, expect, actual, msg=None):
if expect != len(actual): self._failures += failure()
def count(self, expect, actual, msg=None):
if expect != len(actual): self._failures += failure()
def assertValid(self, ent):
v = ent.isvalid
if type(v) != bool:
raise Exception('invalid property must be a boolean')
if not v:
self._failures += failure(ent=ent)
def valid(self, ent):
v = ent.isvalid
if type(v) != bool:
raise Exception('invalid property must be a boolean')
if not v:
self._failures += failure(ent=ent)
def assertInValid(self, ent):
v = ent.isvalid
if type(v) != bool:
raise Exception('invalid property must be a boolean')
if v:
self._failures += failure(ent=ent)
def assertBroken(self, ent, prop, rule):
if not ent.brokenrules.contains(prop, rule):
self._failures += failure()
def broken(self, ent, prop, rule):
if not ent.brokenrules.contains(prop, rule):
self._failures += failure()
@property
def failures(self):
return self._failures
def __str__(self):
if self.failures.isempty:
r = ''
ok = 'pass'
else:
r = '\n'
ok = 'FAIL'
name = self.__class__.__name__
r += "[{}]{}{}".format(name, ' ' * (72 - len(name)), ok)
if self.failures.isempty:
return r
return r + "\n" + self.failures._tostr(includeHeader=False) + "\n"
@staticmethod
def preserve(str):
return dedent(str)[1:-1]
def post(self, cls, meth, args):
import app
body = {
'__class': cls,
'__method': meth,
'__args': args,
}
def sres(statuscode0, resheads0):
global statuscode
global resheads
statuscode, resheads = statuscode0, resheads0
env= {
'content_length': len(body),
'content_type': 'application/x-www-form-urlencoded',
'http_accept': '*/*',
'http_host': '127.0.0.0:8000',
'http_user_agent': 'tester/1.0',
'path_info': '/',
'query_string': '',
'raw_uri': '/',
'remote_addr': '192.168.127.12',
'remote_port': '43130',
'request_method': 'post',
'script_name': '',
'server_name': '172.31.9.64',
'server_port': '8000',
'server_protocol': 'http/1.1',
'server_software': 'gunicorn/19.4.5',
'gunicorn.socket': None,
'wsgi.errors': None,
'wsgi.file_wrapper': None,
'wsgi.input': body,
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0)
}
app.app.breakonexception = self.testers.breakonexception
iter = app.app(env, sres)
for body in iter:
body = body.decode('utf-8')
body = json.loads(body)
statusmessage = statuscode
statuscode0 = int(statuscode[:3])
return httpresponse(statuscode0, statusmessage, resheads, body)
class httpresponse(entity):
def __init__(self, statuscode, statusmessage, headers, body):
self.statuscode = statuscode
self.statusmessage = statusmessage
self.headers = headers
self.body = body
@property
def brokenrules(self):
brs = brokenrules()
if self.statuscode < 200 or self.statuscode > 400:
brs += brokenrule('Status code is not valid: ' + str(self.statuscode))
if self.body['__exception']:
brs += brokenrules('Exception was returned');
return brs
def hasbrokenrule(self, prop, type=None, msg=None):
brs = self.body['__brokenrules']
for br in brs:
if br['property'] != prop:
continue
if type != None:
try:
if br['type'] != type:
continue
except KeyError:
continue
if msg != None:
try:
if msg != br['message']:
continue
except KeyError:
continue
return True
return False
def __str__(self):
r = self.statusmessage + '\n'
for hdr in self.headers:
r += hdr[0] + ': ' + hdr[1] + '\n'
r += pprint.pformat(self.body)
return r
class failures(entities):
pass
class failure(entity):
def __init__(self, cause=None, assert_=None, ent=None):
self._assert = assert_
self.cause = cause
self.entity = ent
if not cause:
stack = inspect.stack()
self._assert = stack[1][3]
self._test = stack[2][3]
self._line = stack[2][2]
try:
self._expect = inspect.getargvalues(stack[1][0])[3]['expect']
except KeyError:
pass
try:
self._actual = inspect.getargvalues(stack[1][0])[3]['actual']
except KeyError:
pass
try:
self._message = inspect.getargvalues(stack[1][0])[3]['msg']
except KeyError:
pass
def __str__(self):
if self.cause:
r = "{}: {} in {}".format(self.cause.__class__.__name__,
repr(self.cause),
self._assert)
else:
r = "{} in {} at {}".format(self._assert, self._test, self._line)
if hasattr(self,'_expect'):
r += "\nexpect: " + repr(self._expect)
r += "\nactual: " + repr(self._actual)
if hasattr(self, '_message') and self._message != None:
r += "\nmessage: " + self._message
if self.entity:
for br in self.entity.brokenrules:
r += "\n - " + str(br)
return r
``` |
{
"source": "jhogan/epiphenomenon-py",
"score": 2
} |
#### File: jhogan/epiphenomenon-py/crm.py
```python
from pdb import set_trace; B=set_trace
import db
from entities import brokenrules
class leads(db.dbentities):
def __init__(self, rset=None):
super().__init__()
if rset != None:
for r in rset:
self += lead(r)
@property
def _table(self):
return 'leads'
def getunemailed():
sql = """
select *
from leads
where emailed_at is null
"""
conn = db.connections.getinstance().default
res = conn.query(sql)
ls = leads()
for r in res:
ls += lead(r)
return ls
class lead(db.dbentity):
def __init__(self, v=None):
super().__init__()
if type(v) == int:
sql = 'select * from leads where id = %s'
conn = db.connections.getinstance().default
v = conn.query(sql, (v,))
if v == None:
ls = [''] * 5 + [None]
self._marknew()
elif type(v) == db.dbresultset:
if v.hasone:
ls = list(v.first)
else:
ls = [None] * 5
self._markold()
elif type(v) == db.dbresult:
ls = list(v)
self._markold()
self._id, self._name, self._email, \
self._subject, self._message, \
self._emailed_at = ls
@property
def name(self):
return self._name
@name.setter
def name(self, v):
return self._setvalue('_name', v, 'name')
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, v):
return self._setvalue('_subject', v, 'subject')
@property
def email(self):
return self._email
@email.setter
def email(self, v):
return self._setvalue('_email', v, 'email')
@property
def message(self):
return self._message
@message.setter
def message(self, v):
return self._setvalue('_message', v, 'message')
@property
def emailed_at(self):
return self._emailed_at
@emailed_at.setter
def emailed_at(self, v):
return self._setvalue('_emailed_at', v, 'emailed_at')
def _insert(self, cur=None):
insert = """
insert into leads
values(null, %s, %s, %s, %s, %s);
"""
conn = db.connections.getinstance().default
args = (self.name, self.email, self.subject,
self.message, self.emailed_at)
res = conn.query(insert, args, cur)
self._id = res.lastrowid
def _update(self):
sql = """
update leads set
name = %s, email = %s,
subject = %s, message = %s, emailed_at = %s
where id = %s;
"""
conn = db.connections.getinstance().default
args = (self.name, self.email, self.subject,
self.message, self.emailed_at, self.id)
res = conn.query(sql, args)
# TODO: This should be _delete(). Have the super class call it from
# its delete() method.
def delete(self):
if not self.id:
raise Exception("Can't delete lead.")
sql = 'delete from leads where id = %s'
conn = db.connections.getinstance().default
v = conn.query(sql, (self.id,))
@property
def brokenrules(self):
brs = brokenrules()
brs.demand(self, 'name', isfull=True, maxlen=50)
brs.demand(self, 'email', isemail=True, maxlen=50)
brs.demand(self, 'message', isfull=True, maxlen=1000)
if self.emailed_at != None:
brs.demand(self, 'emailed_at', isdate=True)
return brs
def _create(self):
return """
create table leads(
id int(6) unsigned auto_increment primary key,
name varchar(255) not null,
email varchar(255) not null,
subject varchar(255) not null,
message text not null,
emailed_at datetime,
)
"""
def _alter(self):
r = []
r.append('alter table leads '
' modify column emailed_at datetime(6)')
return r
def __str__(self):
r = 'Name: ' + str(self.name) + '\n'
r += 'Email: ' + str(self.email) + '\n'
r += 'Subject: ' + str(self.subject) + '\n'
r += 'Message: ' + str(self.message) + '\n'
return r
def __repr__(self):
r = super().__repr__()
r += ' id: ' + str(self.id)
r += ' name: ' + self.name
r += ' email: ' + self.email
return r
``` |
{
"source": "jhogan/tincture",
"score": 2
} |
#### File: jhogan/tincture/app.py
```python
from configfile import configfile
from functools import reduce
from pprint import pprint
import json
import os
import pdb; B=pdb.set_trace
import re
import sys
import traceback
class application:
Logformat = '"{0} {1}.{2}({3})" {4} {5}'
def __init__(self):
self.clear()
self.breakonexception = False
def clear(self):
self._requestbody = None
self._requestsize = None
self._requestdata = None
self._class = None
self._method = None
@property
def environment(self):
return self._env
def demandvalid(self):
if len(self.requestbody) == 0:
raise http400('No data in body of request message.')
try:
reqdata = self.requestdata
except json.JSONDecodeError as ex:
raise http400(str(ex))
try:
cls = reqdata['__class']
except KeyError:
msg = 'The class value was not supplied.'
raise http404(msg)
try:
meth = self.method
except KeyError:
msg = 'The method value was not supplied.'
raise ValueError(msg)
if meth[0] == '_':
raise http403('Invalid method.')
try:
import ctrl
except ImportError as ex:
raise ImportError('Error importing controller: ' + str(ex))
msg = 'Class not found'
try:
cls = sys.modules['ctrl'].__dict__[cls]
except:
raise http404(msg)
if not isinstance(cls, type) or cls.__module__ != ctrl.__name__:
raise http404(msg)
@property
def requestsize(self):
if self._requestsize == None:
try:
self._requestsize = int(self.environment.get('CONTENT_LENGTH', 0))
except(ValueError):
self._requestsize = 0
return self._requestsize
@property
def requestbody(self):
if self._requestbody == None:
wsgiinput = self.environment['wsgi.input']
if type(wsgiinput) == dict:
self._requestbody = wsgiinput
else:
reqsize = self.requestsize
self._requestbody = self.environment['wsgi.input'].read(reqsize).decode('utf-8')
return self._requestbody
@property
def requestdata(self):
if self._requestdata == None:
reqbody = self.requestbody
if type(reqbody) == dict:
self._requestdata = reqbody
else:
self._requestdata = json.loads(reqbody)
return self._requestdata
@property
def class_(self):
if self._class == None:
reqdata = self.requestdata
cls = reqdata['__class']
self._class = reduce(getattr, cls.split('.'), sys.modules['ctrl'])
return self._class
@property
def method(self):
if self._method == None:
reqdata = self.requestdata
self._method = reqdata['__method']
return self._method
def __call__(self, env, sres):
statuscode = None
log = None
try: log = configfile.getinstance().logs.default
except: pass
try:
self.clear()
self._env = env
self.demandvalid()
reqdata = self.requestdata
cls, meth = self.class_, self.method
obj = cls(self)
data = getattr(obj, meth)()
data = [] if data == None else data
try:
br = data['__brokenrules']
if len(br):
statuscode = '422 Unprocessable Entity'
except KeyError:
# If action return no __brokenrules
data['__brokenrules'] = ''
data['__exception'] = None
except Exception as ex:
if self.breakonexception:
if not isinstance(ex, httperror):
print(ex)
pdb.post_mortem(ex.__traceback__)
if log: log.exception('')
if isinstance(ex, httperror):
statuscode = ex.statuscode
else:
statuscode = '500 Internal Server Error'
# Get the stack trace
tb = traceback.format_exception(etype=None, value=None, tb=ex.__traceback__)
# The top and bottom of the stack trace don't correspond to frames, so remove them
tb.pop(); tb.pop(0)
tb = [re.split('\n +', f.strip()) for f in tb]
data = {'__exception': type(ex).__name__ + ': ' + str(ex),
'__traceback': tb,
'__brokenrules': []
}
else:
if not statuscode:
statuscode = '200 OK'
finally:
# Log
try:
d, env = self.requestdata, self.environment
addr, cls, meth, = env['REMOTE_ADDR'], d['__class'], d['__method']
args, st, ua = str(d['__args']), statuscode[:3], env['HTTP_USER_AGENT']
log.info (application.Logformat.format(addr, cls, meth, '', st, ua))
log.debug(application.Logformat.format(addr, cls, meth, args, st, ua))
except:
pass
# Return data
data = json.dumps(data)
data = bytes(data, 'utf-8')
resheads=[
('Content-Length', str(len(data))),
('Content-Type', 'application/json'),
('Access-Control-Allow-Origin', '*'),
]
sres(statuscode, resheads)
return iter([data])
class httperror(Exception):
def __init__(self, statuscode, msg):
self.statuscode = statuscode
self.message = msg
def __repr__(self):
return self.message
class http422(httperror):
def __init__(self, msg):
super().__init__('422 Unprocessable Entity', msg)
class http404(httperror):
def __init__(self, msg):
super().__init__('404 Not Found', msg)
class http403(httperror):
def __init__(self, msg):
super().__init__('403 Forbidden', msg)
class http401(httperror):
def __init__(self, msg):
super().__init__('401 Unauthorized', msg)
class http400(httperror):
def __init__(self, msg):
super().__init__('400 Bad Request', msg)
class controller:
def __init__(self, app):
self._app = app
@property
def application(self):
return self._app
@property
def data(self):
return self.application.requestdata
@property
def _arguments(self):
return self.application.requestdata['__args']
def getargument(self, arg):
args = self._arguments
try:
return args[arg]
except KeyError:
return None
@staticmethod
def convertbrokenrules(ent):
brs = []
for br in ent.brokenrules:
brs.append({
'property': br.property,
'message': br.message,
'type': br.type
})
return brs
app = application()
``` |
{
"source": "jhogg/dlive-osc-gateway",
"score": 2
} |
#### File: allenheath/dlive/rpcproxy.py
```python
from __future__ import print_function
__all__ = ['init_rpc_proxy']
#
# Map MixRack version string to internal proxy versions
#
class ProxyConfig(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def get(self, key, default=None):
return self.__dict__.get(key, default)
CONFIG_130 = [
ProxyConfig(name='proxyDCA', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x0019, _addr2=0x0019-1,
),
ProxyConfig(name='proxyInput', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001b, _addr2=0x001b-1,
),
ProxyConfig(name='proxyFXReturn', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001c, _addr2=0x001c-1,
),
ProxyConfig(name='proxyFXSend', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001d, _addr2=0x001d-1,
),
ProxyConfig(name='proxyMix', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001e, _addr2=0x001e-1,
),
ProxyConfig(name='proxyInputProcessing', klass='proxyInputProcessing',
_deviceid=0x0001, _addr1=0x0102, _addr2=0x0102-1,
inserta_ofs = 0x000a,
insertb_ofs = 0x000b,
delay_ofs = 0x000f,
),
]
CONFIG_140 = [
ProxyConfig(name='proxyDCA', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001b, _addr2=0x001b-1,
),
ProxyConfig(name='proxyInput', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001d, _addr2=0x001d-1,
),
ProxyConfig(name='proxyFXReturn', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001e, _addr2=0x001e-1,
),
ProxyConfig(name='proxyFXSend', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x001f, _addr2=0x001f-1,
),
ProxyConfig(name='proxyMix', klass='proxyBasic',
_deviceid=0x0001, _addr1=0x0020, _addr2=0x0020-1,
),
ProxyConfig(name='proxyInputProcessing', klass='proxyInputProcessing',
_deviceid=0x0001, _addr1=0x0145, _addr2=0x0145-1,
inserta_ofs = 0x000a,
insertb_ofs = 0x000b,
delay_ofs = 0x000f,
),
]
VERSION_MAPPING = {
'V1.30 - Rev. 27648': CONFIG_130,
'V1.31 - Rev. XXXXX': CONFIG_130, # FIXME: Need 1.31 Build #
'V1.40 - Rev. 30551': CONFIG_140,
}
def init_rpc_proxy(console, version):
print("init_rpc_proxy(%s)" % version)
# --- Verify firmware version is recognized
if not version in VERSION_MAPPING:
raise UnknownFirmwareVersion(version)
# --- Dynamically create proxies
for entry in VERSION_MAPPING[version]:
obj = globals()[entry.get('klass')](console, entry)
console.__dict__[entry.get('name')] = obj
# -----------------------------------------------------------------------------
class proxyBase(object):
def configure(self, console, config):
self._console = console
self._config = config
for key in ['_deviceid', '_addr1', '_addr2', '_method']:
self.__dict__[key] = config.get(key, None)
#self._deviceid, self._addr1, self._addr2 = 0x0001, 0x001d, 0x001c # 1.40 TEST
def sysex_send(self, deviceid=None, addr1=None, addr2=None, method=None, data = [], **kw):
deviceid = deviceid or self._deviceid
addr1 = (addr1 or self._addr1) + kw.get('addr1_ofs', 0)
addr2 = (addr2 or self._addr2) + kw.get('addr2_ofs', kw.get('addr1_ofs', 0))
method = (method or self._method) + kw.get('method_ofs', 0)
len1 = len(data) / 256
len2 = len(data) % 256
sysex = [ 0xf0,
deviceid/256, deviceid%256,
addr1/256, addr1%256,
addr2/256, addr2%256, # Typically addr1 - 0x0001
method/256, method%256,
len1, len2]
sysex.extend(data)
sysex.append(0xf7)
fmt_data = ' '.join('%02x' % i for i in sysex)
print("sysex [ %s ]" % (fmt_data))
return sysex
class proxyBasic(proxyBase):
def __init__(self, console, config):
self.configure(console, config)
def set(self, method, data):
self._console.queue_command(self.sysex_send(method=method, data=data))
class proxyInputProcessing(proxyBase):
def __init__(self, console, config):
self.configure(console, config)
self._inserta_ofs = config.get('inserta_ofs')
self._insertb_ofs = config.get('insertb_ofs')
self._delay_ofs = config.get('delay_ofs')
def inserta(self, channel_idx, method, data):
addr1_ofs = (channel_idx) * 0x10 + self._inserta_ofs
self._console.queue_command(self.sysex_send(addr1_ofs=addr1_ofs, method=method, data=data))
def insertb(self, channel_idx, method, data):
addr1_ofs = (channel_idx) * 0x10 + self._insertb_ofs
self._console.queue_command(self.sysex_send(addr1_ofs=addr1_ofs, method=method, data=data))
def delay(self, channel_idx, method, data):
addr1_ofs = (channel_idx) * 0x10 + self._delay_ofs
self._console.queue_command(self.sysex_send(addr1_ofs=addr1_ofs, method=method, data=data))
``` |
{
"source": "jhogsett/linkit",
"score": 2
} |
#### File: linkit/python/app_template.py
```python
import sys
import argparse
import os
#import time
#import datetime
import terminal_colors as tc
import app_ui as ui
import utils
# ----------------------------------------
def begin():
pass
def run():
pass
# ----------------------------------------
# ========================================
global app_description, verbose_mode, quiet_mode
app_description = None
verbose_mode = None
quiet_mode = None
def get_options():
global app_description, verbose_mode
app_description = "(application template) - Apollo Lighting System v.0.1 1-0-2019"
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="don't use terminal colors (False)")
args = parser.parse_args()
verbose_mode = args.verbose
quiet_mode = args.quiet
def validate_options():
pass
def introduction():
ui.app_description(app_description)
ui.report_verbose("verbose mode")
ui.report_verbose()
def initialize():
get_options()
validate_options()
tc.begin(quiet_mode)
ui.begin(verbose_mode, quiet_mode)
begin()
introduction()
def loop():
run()
# ========================================
# ========================================
if __name__ == '__main__':
initialize()
try:
loop()
except KeyboardInterrupt:
sys.exit("\nExiting...\n")
except Exception:
raise
```
#### File: linkit/python/beep_tester.py
```python
import sys
import argparse
import os
#import time
#import datetime
import terminal_colors as tc
import app_ui as ui
import utils
import led_command as lc
# ----------------------------------------
music = 1.059463094359295
duration = 1000
start = 309 #309
end = 4500
def begin():
lc.begin(verbose_mode);
def run():
# for n in range(400, 4400, 5):
# for n in range(1800, 2000, 1):
# for n in range(2300, 2500, 3):
# for n in range(2900, 2950, 1):
# for n in range(3100, 3500, 3):
freq = start
while freq < end:
print int(freq)
lc.command(str(int(freq)) + "," + str(duration) + ":ton")
freq = freq * music
# ----------------------------------------
# ========================================
global app_description, verbose_mode, quiet_mode
app_description = None
verbose_mode = None
quiet_mode = None
def get_options():
global app_description, verbose_mode
app_description = "(application template) - Apollo Lighting System v.0.1 1-0-2019"
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="don't use terminal colors (False)")
args = parser.parse_args()
verbose_mode = args.verbose
quiet_mode = args.quiet
def validate_options():
pass
def introduction():
ui.app_description(app_description)
ui.report_verbose("verbose mode")
ui.report_verbose()
def initialize():
get_options()
validate_options()
tc.begin(quiet_mode)
ui.begin(verbose_mode, quiet_mode)
begin()
introduction()
def loop():
run()
# ========================================
# ========================================
if __name__ == '__main__':
initialize()
try:
loop()
except KeyboardInterrupt:
sys.exit("\nExiting...\n")
except Exception:
raise
```
#### File: linkit/python/circleci5.py
```python
import serial
import time
import requests
import json
import sys
import logging
import os
script_path = os.getcwd()
log_path = "/root/dev/linkit/python/circleci.log"
print "logging to " + log_path
logging.basicConfig(filename=log_path, level=logging.INFO, format='%(asctime)s %(message)s')
logging.info("Circleci5.py started")
job_limit = 16
max_job = job_limit - 1
token = sys.argv[1]
url = 'https://circleci.com/api/v1.1/recent-builds?circle-token=' + token + '&limit=' + str(job_limit) + '&offset=0'
request_frequency = 15
s = None
color_map = {
"running": "ltblue|breathe",
"success": "green",
"finished": "green",
"fixed": "seafoam",
"failed": "red",
"no_tests": "gray",
"retried": "yellow",
"timedout": "yellow|blink",
"canceled": "gray",
"not_run": "gray",
"queued": "purple|breathe",
"scheduled": "purple",
"not_running": "pink",
"missing": "gray",
"spacer": "black",
"masterfailed": "red|blink",
"masterfixed": "green|blink",
"infrastructure_fail": "pink",
"jerryrunning": "magenta|breathe"
}
inter_command_delay = 0.01
def command(cmd_text):
s.write((cmd_text + '|').encode())
time.sleep(inter_command_delay)
def get_color_cmd(color_cmd_text):
if color_cmd_text is None:
return "missing"
else:
return color_map.get(color_cmd_text, "black")
def color_command(color_cmd_text):
color_cmd = get_color_cmd(color_cmd_text)
command(color_cmd)
def color_command3(color_cmd_text):
color_cmd = get_color_cmd(color_cmd_text)
for i in range(0, 3):
command(color_cmd)
def spacer():
color_command("spacer")
def fix_missing(value):
if value is None:
return "missing"
else:
return value
def setup():
global s
s = serial.Serial("/dev/ttyS0", 57600)
time.sleep(0.1)
command("erase")
def loop():
try:
r = requests.get(url)
r = r.text.encode('utf-8')
j = json.loads(r)
command("pause");
for x in range(0, job_limit):
st = fix_missing(j[x]['status'])
lc = fix_missing(j[x]['lifecycle'])
oc = fix_missing(j[x]['outcome'])
br = j[x]['branch']
rp = j[x]['reponame']
cn = j[x]['committer_name']
master = br == 'master'
orders = rp == 'orders'
orders_master = master and orders
jh = cn == 'jerry'
logging.info("st:" + st + " lc:" + lc + " oc:" + oc + " br:" + br + " rp:" + rp)
if st == 'failed':
if orders_master:
color_command3('masterfailed')
else:
color_command3('failed')
elif st == 'success':
color_command3('success')
elif st == 'running':
if jh:
color_command3('jerryrunning')
else:
color_command3('running')
elif st == 'fixed':
if orders_master:
color_command3('masterfixed')
else:
color_command3('fixed')
elif oc == 'infrastructure_fail':
color_command3('infrastructure_fail')
else:
color_command(st)
color_command(oc)
color_command(lc)
spacer()
command("continue");
command("flush");
time.sleep(request_frequency)
except requests.exceptions.ConnectionError:
logging.error("Connection error - retrying")
command("pause|blue|blink|flood|continue")
time.sleep(15)
except Exception:
logging.error(sys.exc_info()[0])
command("pause|yellow|blink|flood|continue")
raise
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/circleci6.py
```python
import serial
import time
import requests
import json
import sys
import logging
import os
import traceback
import sys
script_path = os.getcwd()
log_path = "/root/dev/linkit/python/circleci.log"
print "logging to " + log_path
logging.basicConfig(filename=log_path, level=logging.INFO, format='%(asctime)s %(message)s')
logging.info("Circleci5.py started")
global job_limit
job_limit = 18
max_job = job_limit - 1
max_leds = job_limit * 4
try:
token = sys.argv[1]
except Exception:
sys.exit("\nNo API key supplied!\n")
global url
url = 'https://circleci.com/api/v1.1/recent-builds?circle-token=' + token + '&limit=' + str(job_limit) + '&offset=0'
request_frequency = 15
s = None
color_map = {
"running": "ltblue:breathe",
"success": "green",
"finished": "green",
"fixed": "seafoam",
"failed": "red",
"no_tests": "dkgray",
"retried": "pink",
"timedout": "yellow:blink",
"canceled": "orange",
"not_run": "gray",
"queued": "purple:blink",
"scheduled": "ltgreen:breathe",
"not_running": "purple:blink",
"missing": "dkgray",
"spacer": "black",
"masterfailed": "red:blink",
"masterfixed": "green:blink",
"infrastructure_fail": "yellow:blink",
"jerryrunning": "magenta:breathe"
}
def flush_input():
s.read(s.inWaiting())
def wait_for_ack():
while s.inWaiting() == 0:
pass
flush_input()
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def get_color_cmd(color_cmd_text):
if color_cmd_text is None:
return "missing"
else:
return color_map.get(color_cmd_text, "black")
def color_command(color_cmd_text):
color_cmd = get_color_cmd(color_cmd_text)
command(color_cmd)
def color_command3(color_cmd_text):
color_cmd = get_color_cmd(color_cmd_text)
for i in range(0, 3):
command(color_cmd)
def spacer():
color_command("spacer")
def fix_missing(value):
if value is None:
return "missing"
else:
return value
build_keys = None
def setup():
global s, build_keys, job_limit, url
s = serial.Serial("/dev/ttyS0", 115200)
build_keys = {}
time.sleep(0.1)
flush_input()
command(":::reset")
if len(sys.argv) > 2:
command(sys.argv[2])
if len(sys.argv) > 3:
job_limit = int(sys.argv[3]) / 4
url = 'https://circleci.com/api/v1.1/recent-builds?circle-token=' + token + '&limit=' + str(job_limit) + '&offset=0'
oldest_first = True
def translate_position(pos):
if oldest_first:
return (job_limit - 1) - pos
else:
return pos
def get_value(json, value_name):
try:
return fix_missing(json[value_name])
except Exception:
return "missing"
def loop():
global build_keys
try:
r = requests.get(url)
r = r.text.encode('utf-8')
j = json.loads(r)
command("pause");
insert_count = 0
for x in range(0, job_limit):
y = translate_position(x)
key = get_value(j[y], 'build_url')
if not build_keys.has_key(key):
insert_count += 4
if insert_count > 0:
command(str(max_leds - insert_count) + "," + str(insert_count) + ":pshifto")
build_keys = {}
for x in range(0, job_limit):
y = translate_position(x)
key = get_value(j[y], 'build_url')
build_keys[key] = True
st = get_value(j[y], 'status')
lc = get_value(j[y], 'lifecycle')
oc = get_value(j[y], 'outcome')
br = get_value(j[y], 'branch')
rp = get_value(j[y], 'reponame')
cn = get_value(j[y], 'committer_name')
master = br == 'master'
orders = rp == 'orders'
orders_master = master and orders
jh = cn == 'jerry'
logging.info("st:" + st + " lc:" + lc + " oc:" + oc + " br:" + br + " rp:" + rp + " cn:" + cn)
if st == 'failed':
if orders_master:
color_command3('masterfailed')
else:
color_command3('failed')
elif st == 'success':
color_command3('success')
elif st == 'running':
if jh:
color_command3('jerryrunning')
else:
color_command3('running')
elif st == 'fixed':
if orders_master:
color_command3('masterfixed')
else:
color_command3('fixed')
elif oc == 'infrastructure_fail':
color_command3('infrastructure_fail')
elif oc == 'no_tests':
color_command3('no_tests')
elif oc == 'timedout':
color_command3('timedout')
elif oc == 'canceled':
color_command3('canceled')
elif st == 'not_run':
color_command3('not_run')
elif st == 'not_running':
color_command3('not_running')
elif st == 'queued':
color_command3('queued');
else:
color_command(st)
color_command(oc)
color_command(lc)
spacer()
command("continue:flush")
time.sleep(request_frequency)
except requests.exceptions.ConnectionError:
logging.error("Connection error - retrying")
command("pause:blue:blink:flood:continue")
time.sleep(15)
except KeyboardInterrupt:
sys.exit("\nExiting...\n")
except Exception:
logging.error(sys.exc_info()[0])
logging.error(traceback.format_tb(sys.exc_info()[2]))
command("pause:yellow:blink:flood:continue")
raise
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/disc_speed_test.py
```python
import serial
import time
import random
s = None
num_leds = 93
play_time = 0.0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command("::pau")
command("rst:era")
num_colors = 12
colors = [ "red", "org", "yel", "lgr", "grn", "sea", "cyn", "lbl", "blu", "pur", "mag", "pnk", "blk", "rnd" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
color = "black"
last_color = "black"
def display1():
global color, last_color
while True:
color = random_color()
if color != last_color:
break
command(random_color() + ":flo:flu")
command("blk:flo:flu")
last_color = color
def display2():
command(random_color() + ":92:repeat:flush")
command("black:92:repeat:flush")
def loop():
display1()
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/fading_colors.py
```python
import serial
import time
import random
import sys
s = None
num_leds = 100
play_time = 15.0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((':::' + cmd_text + ':').encode())
wait_for_ack()
def clean_up():
command("reset")
def setup():
global s, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
command("pause:reset:pause")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = int(sys.argv[2])
def loop():
try:
command("random:flood:cfade")
time.sleep(play_time)
except KeyboardInterrupt:
sys.exit("\nExiting...\n")
clean_up()
except Exception:
clean_up()
raise
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/flower20.py
```python
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pause:reset:erase")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zone:red:7:repeat:white:7:repeat:red:7:repeat:white:7:repeat")
command("5:zone:red:5:repeat:white:5:repeat:red:5:repeat:white:5:repeat")
command("4:zone:red:3:repeat:white:3:repeat:red:3:repeat:white:3:repeat")
command("3:zone:red:2:repeat:white:2:repeat:red:2:repeat:white:2:repeat")
command("2:zone:red:1:repeat:white:1:repeat:red:1:repeat:white:1:repeat")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "random" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":blink" + str(zone) + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
global idx
idx = -1
def do_zone(zone):
command(str(zone) + ":zone:rotate")
def loop():
for i in range(2, 7):
do_zone(i)
command("flush")
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/keyboard.py
```python
import time
import app_ui as ui
import utils
import led_command as lc
import tones
global poll_frequency, long_press, too_short_press
global sleep_time, key_callback, verbose_mode
verbose_mode = False
poll_frequency = 20
long_press = 50
too_short_press = 3
sleep_time = 1.0 / poll_frequency
def begin(callback=None, verbose_mode_=False):
global key_callback, verbose_mode
if callback == None:
callback = default_callback
key_callback = callback
verbose_mode = verbose_mode_
lc.begin() #verbose_mode)
lc.stop_all()
tones.begin(verbose_mode)
enable_keyboard()
tones.store_long_press_tone()
def default_callback(key, long_press):
print "key: {} long press: {}".format(key, long_press)
tones.long_activate() if long_press else t.activate()
def poll_forever():
while(True):
poll_once()
def poll_wait():
time.sleep(sleep_time)
def poll_once():
poll()
poll_wait()
def poll():
result = lc.command_str("4:key")
args = result.split(",")
if len(args) < 2:
return
key = int(args[0])
if key:
count = int(args[1])
if count <= too_short_press:
return
key_callback(key, True if count >= long_press else False)
# ========================================
def send(command):
lc.command("::3:pau:" + command + ":3:cnt:1:cnt")
ui.report_verbose_alt("sent: " + command)
def enable_keyboard():
send(":::pau")
send("3,-1,-1:key")
send("4,0:cfg:1:cnt:4:cnt")
```
#### File: linkit/python/multicast.py
```python
import socket
from socket import error as socket_error
import struct
import threading
import time
import app_ui as ui
global host_name, verbose_mode, multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
global responses, background_threads
responses = {}
background_threads = []
def begin(host_name_, verbose_mode_=False, multicast_group_ip_=None, multicast_port_=None, timeout_in_seconds_=None, num_times_=None, msg_delay_=None):
global host_name, verbose_mode, multicast_group_ip, multicast_port, timeout_in_seconds, multicast_group, num_times, no_keys, msg_delay
host_name = host_name_
verbose_mode = verbose_mode_
multicast_group_ip = multicast_group_ip_ or '172.16.31.10'
multicast_port = multicast_port_ or 10000
timeout_in_seconds = timeout_in_seconds_ or 0.1
num_times = num_times_ or 15
no_keys = False
msg_delay = msg_delay_ or 0.001
multicast_group = (multicast_group_ip, multicast_port)
ui.begin(verbose_mode)
def conclude():
wait_for_active_threads()
def broadcast(message, regex=None):
message = create_key(message, regex)
send_background_message(message)
def received():
return responses
# ========================================
def create_key(message, regex=None):
key = []
key.append(host_name)
key.append(str(time.time()))
if regex:
key.append(regex)
keystr = "/".join(key)
return keystr + ";" + message
# ========================================
def cast_socket():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
sock.settimeout(timeout_in_seconds)
# Set the time-to-live for messages to 1 so they do not go past the
# local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
return sock
def send_socket_message(sock, message, times):
global responses
responses = {}
for n in range(0, times):
ui.report_verbose('sending "%s"' % message)
sent = sock.sendto(message, multicast_group)
while True:
try:
data, server = sock.recvfrom(256)
except socket.timeout:
break
ip = server[0].strip("'")
responses[ip] = data
ui.report_verbose('received "%s" from %s' % (data, ip))
if n < (times - 1):
time.sleep(msg_delay * (2 ** n))
def send_message(message):
sock = cast_socket()
send_socket_message(sock, message, num_times)
sock.close()
def handle_background_message(message):
send_message(message)
thread = threading.current_thread()
background_threads.remove(thread)
ui.report_verbose("terminating thread: " + str(thread))
def send_background_message(message):
thread = threading.Thread(target=handle_background_message, args=(message, ))
ui.report_verbose("new thread: " + str(thread))
background_threads.append(thread)
thread.start()
def wait_for_active_threads():
if(len(background_threads) > 0):
ui.report_warn("waiting for active threads to terminate...")
for t in background_threads:
t.join()
############################################################################
############################################################################
#if __name__ == '__main__':
# setup()
# try:
# run()
# except KeyboardInterrupt:
# pass
# sys.exit("\nExiting...\n")
#
# finally:
# wait_for_active_threads()
# conclude()
# sys.exit("\nExiting...\n")
```
#### File: linkit/python/progmac.py
```python
import serial
import time
import sys
import terminal_colors as tc
import led_command as lc
import argparse
import app_ui as ui
import macro_compiler as mc
import math
import progmac_utils as pu
import utils
global app_description, verbose_mode, debug_mode, macro_count, program, macro_run_number, presets, dryrun, bytes_programmed, show_output, show_tables, quiet_mode, allow_mutability, no_led_show
app_description = None
verbose_mode = None
debug_mode = None
macro_count = 0
program = None
macro_run_number = None
presets = None
dryrun = None
bytes_programmed = None
show_output = None
show_tables = None
quiet_mode = None
allow_mutability = None
no_led_show = None
global device_profile, num_leds, starting_macro, num_macro_bytes, ending_macro, number_of_macros, char_buffer_size
global number_of_fine_zones, number_of_colors, number_of_sequencers, show_preprocessed
device_profile = None
num_leds = None
starting_macro = None
num_macro_bytes = None
ending_macro = None
number_of_macros = None
char_buffer_size = None
number_of_fine_zones = None
number_of_colors = None
number_of_sequencers = None
print_macros = None
show_preprocessed = None
def get_options():
global verbose_mode, debug_mode, program, macro_run_number, presets, dryrun, show_output, show_tables, num_macro_bytes_override, starting_macro_override, ending_macro_override, char_buffer_override, quiet_mode, allow_mutability
global print_macros, no_led_showi, show_preprocessed
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("-m", "--macro", type=int, dest="macro", default=10, help="macro number to run after programming (10)")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-d", "--debug", dest="debug", action="store_true", help="display verbose info (False)")
parser.add_argument("-r", "--dryrun", dest="dryrun", action="store_true", help="process the script but don't actually program the device (False)")
parser.add_argument("-o", "--show-output", dest="show_output", action="store_true", help="display compiled script (False)")
parser.add_argument("-t", "--show-tables", dest="show_tables", action="store_true", help="display compilation dictionaries (False)")
parser.add_argument("-b", "--bytes-per-macro", type=int, dest="bytes_per_macro", default=0, help="bytes per macro override (none)")
parser.add_argument("-s", "--starting-macro", type=int, dest="starting_macro", default=0, help="starting macro override (none)")
parser.add_argument("-e", "--ending-macro", type=int, dest="ending_macro", default=0, help="ending macro override (none)")
parser.add_argument("-c", "--char-buffer", type=int, dest="char_buffer", default=0, help="char buffer size override (none)")
parser.add_argument("-p", "--print-macros", dest="print_macros", action="store_true", help="print current macros on device (False)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="don't use terminal colors (False)")
parser.add_argument("-a", "--allow-mutability", dest="mutability", action="store_true", help="allow variable values to be changed (False)")
parser.add_argument("-n", "--no-led-show", dest="no_led_show", action="store_true", help="don't write to the display while programming (False)")
parser.add_argument("-w", "--show-preprocessed", dest="show_preprocessed", action="store_true", help="Show preprocessed script (False)")
parser.add_argument("program", nargs="?", help="program to transmit")
parser.add_argument("presets", nargs=argparse.REMAINDER, help="resolved=value presets (None)")
args = parser.parse_args()
program = args.program
macro_run_number = args.macro
verbose_mode = args.verbose
debug_mode = args.debug
quiet_mode = args.quiet
num_macro_bytes_override = args.bytes_per_macro
starting_macro_override = args.starting_macro
ending_macro_override = args.ending_macro
char_buffer_override = args.char_buffer
presets = args.presets
dryrun = args.dryrun
show_output = args.show_output
show_tables = args.show_tables
print_macros = args.print_macros
allow_mutability = args.mutability
no_led_show = args.no_led_show
show_preprocessed = args.show_preprocessed
def initialize():
global app_description, bytes_programmed
global device_profile, num_leds, starting_macro, num_macro_bytes, ending_macro, number_of_macros, char_buffer_size, number_of_fine_zones, number_of_colors, number_of_sequencers, last_macro_bytes, total_macro_bytes
app_description = "Apollo Lighting System - Macro Programmer v.3.0 4-0-2018"
get_options()
if not validate_options():
sys.exit("\nExiting...\n")
tc.begin(quiet_mode)
ui.begin(verbose_mode, quiet_mode)
bytes_programmed = 0
lc.begin(False) #verbose_mode)
if dryrun:
lc.pause()
else:
lc.stop_all()
device_profile = lc.get_device_profile()
num_leds = device_profile["NUM-LEDS"]
starting_macro = device_profile["START-MACRO"]
total_macro_bytes = device_profile["TOTAL-MACRO-BYTES"]
num_macro_bytes = device_profile["NUM-MACRO-BYTES"]
last_macro_bytes = device_profile["LAST-MACRO-BYTES"]
ending_macro = device_profile["END-MACRO"]
number_of_macros = device_profile["NUM-MACRO-BYTES"]
char_buffer_size = device_profile["CHAR-BUFFER-SIZE"]
number_of_fine_zones = device_profile["NUM-FINE-ZONES"]
number_of_colors = device_profile["NUM-PALETTE-COLORS"]
number_of_sequencers = device_profile["NUM-SEQUENCERS"]
if num_macro_bytes_override != 0:
num_macro_bytes = num_macro_bytes_override
if starting_macro_override != 0:
starting_macro = starting_macro_override
if ending_macro_override != 0:
ending_macro = ending_macro_override
if char_buffer_override != 0:
char_buffer_size = char_buffer_override
all_presets = utils.merge_dicts(device_profile, get_command_line_presets())
mc.begin(lc, verbose_mode, quiet_mode, all_presets, starting_macro, ending_macro, number_of_sequencers, num_macro_bytes, char_buffer_size, last_macro_bytes, allow_mutability)
if dryrun:
lc.resume()
def get_command_line_presets():
result = {}
for preset in presets:
args = preset.split("=")
if len(args) < 2:
ui.report_error("command line preset '" + preset + "' is not a key=value pair - ignored")
else:
result[args[0]] = args[1]
return result
# returns True if they're valid
def validate_options():
errors = False
if not print_macros:
if len(program) == 0:
ui.report_error("Must specify a progam to upload")
errors = True
return not errors
def set_script(script_text):
global macro_count, bytes_programmed
try:
bytes = lc.command_int(script_text);
bytes_programmed += bytes
ui.report_verbose("programmed: " + script_text)
ui.report_verbose_alt("bytes: " + str(bytes))
if not no_led_show:
lc.command_str(str(bytes % number_of_colors) + ":pal:mir:flu")
macro_count += 1
if not debug_mode:
if not verbose_mode:
ui.write(tc.green('.'))
else:
macro_number = int(script_text.split(":")[0])
macro, carry_over_macro = lc.get_macro(macro_number)
print macro
except ValueError as e:
print str(e) + " - retrying"
set_script(script_text)
#def import_file(program_name):
# script = []
# show_comments = True
# program_name = utils.locate_file(program_name, ".mac")
# file = open(program_name, "r")
# for line in file:
# line = line.strip()
# if len(line) == 0:
# continue
# if line[0] == "#":
# if show_comments:
# print tc.yellow(line[1:].strip())
# continue
# else:
# if show_comments:
# print
# show_comments = False
# script.append(line)
# return script
def program_macros(program_name):
compiled_script = ""
ui.report_info_header("1. Compiling ")
compilation_succeeded = True
try:
compiled_script = mc.compile_file(program_name)
except ValueError, e:
print
ui.report_error("Fatal error compiling script. Reported error: ")
ui.report_error_alt(str(e))
ui.report_error("more error details:")
pu.print_script(mc.get_saved_bad_script())
compilation_succeeded = False
print
if verbose_mode:
ui.report_verbose("compiled script:")
for script_text in compiled_script:
ui.report_verbose_alt3(script_text)
script_ok = False
if compilation_succeeded:
compilation_valid = mc.compilation_valid(compiled_script)
if not mc.compilation_valid(compiled_script):
print
ui.report_error("Compilation failed!")
if not verbose_mode:
print
pu.print_script(compiled_script)
script_ok = compilation_valid
if compilation_valid:
if not dryrun:
ui.report_info_header("2. Recording ")
if verbose_mode:
print
for script_text in compiled_script:
set_script(script_text)
print
ui.report_info_header("3. Verifying ")
script_ok = pu.verify_programming(compiled_script)
if show_tables:
print
pu.print_table("Presets", mc.get_presets())
pu.print_table("Includes", mc.get_includes())
pu.print_table("Resolved Values", mc.get_resolved())
pu.print_table("Unresolved Macros", mc.get_unresolved())
pu.print_table("Final Macro Numbers", mc.get_final_macro_numbers())
pu.print_table("Macros", mc.get_macros())
pu.print_table("Translation", mc.get_translation())
if show_preprocessed and not verbose_mode:
print
ui.report_info("preprocessed script:")
preprocessed = mc.get_preprocessed()
for line in preprocessed:
ui.report_info_alt(line)
if show_output and not verbose_mode:
print
ui.report_info("compiled script:")
for script_text in compiled_script:
ui.report_info_alt3(script_text)
return script_ok
# --------------------------------------------------------------------------
def introduction():
ui.app_description(app_description)
ui.report_verbose("verbose mode")
ui.report_verbose("debug mode: " + str(debug_mode))
ui.report_verbose("macro bytes override: " + str(num_macro_bytes_override))
ui.report_verbose("start macro override: " + str(starting_macro_override))
ui.report_verbose("end macro override: " + str(ending_macro_override))
ui.report_verbose("char buffer override: " + str(char_buffer_override))
ui.report_verbose()
ui.report_info(ui.intro_entry("Number of LEDs", num_leds))
ui.report_info(ui.intro_entry("Number of macros", (ending_macro - starting_macro) + 1))
ui.report_info(ui.intro_entry("Number of sequencers", number_of_sequencers))
ui.report_info(ui.intro_entry("Bytes per macro", num_macro_bytes))
ui.report_info(ui.intro_entry("First macro", starting_macro))
ui.report_info(ui.intro_entry("Last macro", ending_macro))
ui.report_info(ui.intro_entry("Char buffer size", char_buffer_size))
if not print_macros:
ui.report_info("program: " + tc.green(program))
if len(presets) > 0:
for preset in presets:
ui.report_info("command-line preset: " + tc.yellow(preset))
print
if dryrun:
ui.report_warn("Dry-run enabled. The device will not be programmed.")
print
def summary():
total_macros = (ending_macro - starting_macro) + 1
# count the number of wasted macros
wasted_macros = 0
for key in mc.get_final_macro_numbers():
if "-" in str(key):
wasted_macros += 1
wasted_macros_percent = (100.0 * wasted_macros / total_macros)
used_macros = macro_count + wasted_macros
remaining_macros = total_macros - used_macros
used_macros_percent = (100.0 * used_macros / total_macros)
remaining_macros_percent = (100.0 * remaining_macros / total_macros)
remaining_sequencers = mc.remaining_sequencers()
used_sequencers = number_of_sequencers - remaining_sequencers
remaining_sequencers_percent = round(100.0 * remaining_sequencers / number_of_sequencers)
used_sequencers_percent = round(100.0 * used_sequencers / number_of_sequencers)
total_bytes_programmed = used_macros * num_macro_bytes
if ending_macro in mc.get_final_macro_numbers().keys():
#reduce by the missing bytes in the ending macro
total_bytes_programmed -= (num_macro_bytes - last_macro_bytes)
remaining_macro_bytes = total_macro_bytes - total_bytes_programmed
used_bytes_percent = round(100.0 * total_bytes_programmed / total_macro_bytes)
remaining_bytes_percent = round(100.0 * remaining_macro_bytes / total_macro_bytes)
bytes_used_per_macro = round(bytes_programmed / used_macros) if int(used_macros) > 0 else 0
bytes_used_per_macro_percent = round(100.0 * bytes_used_per_macro / num_macro_bytes)
print
print tc.green("%d Macros successfully programmed" % macro_count)
print
print tc.yellow("%d Used / %d free macros (%d%% / %d%%)" % (used_macros, remaining_macros, used_macros_percent, remaining_macros_percent))
print tc.yellow("%d Used / %d free macro bytes (%d%% / %d%%)" % (total_bytes_programmed, remaining_macro_bytes, used_bytes_percent, remaining_bytes_percent))
print tc.yellow("%d Used / %d free sequencers (%d%% / %d%%)" % (used_sequencers, remaining_sequencers, used_sequencers_percent, remaining_sequencers_percent))
print tc.cyan("%d Bytes per macro (%d%% efficiency)" % (bytes_used_per_macro, bytes_used_per_macro_percent))
print tc.cyan("%d Carry over macros (%d%% of total)" % (wasted_macros, wasted_macros_percent))
print
def upload_programs():
return program_macros(program)
def run_default_macro():
if dryrun:
pass
else:
resolved = mc.get_resolved()
final_macro_numbers = mc.get_final_macro_numbers()
if "%play-macro" in resolved:
run_macro_name = resolved["%play-macro"]
if run_macro_name in resolved:
orig_macro_number = resolved[run_macro_name]
if "'" in str(orig_macro_number):
orig_macro_number = int(orig_macro_number[1:-1]) # remove '
if orig_macro_number in final_macro_numbers:
run_macro_number = final_macro_numbers[orig_macro_number]
ui.report_info("Running macro: " + run_macro_name + " (" + str(run_macro_number) + ")")
lc.run_macro(run_macro_number)
else:
ui.report_verbose("Skipping run macro: " + str(orig_macro_number) + " (not found)")
else:
ui.report_verbose("Skipping run macro: " + str(run_macro_name) + " (not found)")
else:
lc.run_macro(macro_run_number)
#def print_device_macros():
# macros = lc.get_macros()
# ui.report_info("Macros on device:")
# for macro in macros:
# ui.report_info_alt(macro)
############################################################################
def setup():
initialize()
introduction()
def loop():
if upload_programs():
run_default_macro()
summary()
sys.exit()
if __name__ == '__main__':
setup()
if print_macros:
pu.print_device_macros()
sys.exit()
while True:
# try:
loop()
# except KeyboardInterrupt:
# sys.exit("\nExiting...\n")
# except Exception:
# raise
```
#### File: linkit/python/selfdemo.py
```python
import serial
import time
import random
s = None
def flush_input():
s.read(s.inWaiting())
def wait_for_ack():
while s.inWaiting() == 0:
pass
flush_input()
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s
s = serial.Serial("/dev/ttyS0", 115200)
def loop():
command("demo")
time.sleep(30)
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/test1.py
```python
import serial
import time
s = None
def setup():
global s
s = serial.Serial("/dev/ttyS0", 57600)
s.write("erase\0".encode())
def loop():
s.write("red\0".encode())
time.sleep(1)
s.write("green\0".encode())
time.sleep(1)
s.write("blue\0".encode())
time.sleep(1)
s.write("orange\0".encode())
time.sleep(1)
if __name__ == '__main__':
setup()
while True:
loop()
```
#### File: linkit/python/uno_tester.py
```python
import sys
import argparse
import os
import time
#import datetime
import terminal_colors as tc
import app_ui as ui
import utils
import led_command as lc
poll_frequency = 20
long_press = 50
too_short_press = 3
short_press_tone = 2800
short_press_duration = 50
alt_press_tone = 2200
alt_press_duration = 50
long_press_tone = 3100
long_press_duration = 200
# ----------------------------------------
def begin():
lc.begin(verbose_mode);
lc.stop_all()
lc.command("4,0:cfg:1:cnt:3:cnt")
lc.command_str("3,-1,0:key:0:set:" + str(long_press_tone) + "," + str(long_press_duration) + ":ton")
#lc.command("3,-1,-1:key")
def short_beep():
lc.command(str(short_press_tone) + "," + str(short_press_duration) + ":ton")
def alt_beep():
lc.command(str(alt_press_tone) + "," + str(alt_press_duration) + ":ton")
def long_beep():
lc.command(str(long_press_tone) + "," + str(long_press_duration) + ":ton")
def long_beep2():
lc.command(str(long_press_tone) + "," + str(long_press_duration) + ":ton")
time.sleep(long_press_duration / 1000.0)
lc.command(str(long_press_tone) + "," + str(long_press_duration) + ":ton")
num_keys = 16
num_rows = 4
num_cols = 4
global keys
keys = [False for i in range(num_keys)]
colors = ["blu", "blu", "blu", "blu", "grn", "grn", "grn", "grn", "yel", "yel", "yel", "yel", "red", "red", "red", "red"]
effects = ["" for n in range(num_keys)]
positions = [0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15]
column_selected = [False, False, False, False]
no_effect = ""
def set_column_effect(column, effect):
for row in range(num_rows):
index = (row * num_cols) + column
effects[index] = effect
def set_column_blink(column):
for row in range(num_rows):
index = (row * num_cols) + column
effects[index] = "bl" + str(column + 1)
def clear_column(column):
column_selected[column] = False
for row in range(num_rows):
index = (row * num_cols) + column
keys[index] = False
effects[index] = no_effect
def on_command_key(key, long_press):
column = key - 17
if long_press:
if column_selected[column]:
column_selected[column] = False
set_column_effect(column, no_effect)
else:
column_selected[column] = True
set_column_blink(column)
else:
clear_column(column)
long_beep()
render()
def render():
# lc.attention(False);
lc.command("::pau")
lc.push_command("era:")
for i in range(num_keys):
if keys[i]:
lc.push_command(str(positions[i]) + ":pos:" + colors[i] + ":" + effects[i] + ":rst:")
lc.push_command("1,1:flu")
lc.push_command()
lc.command("1:cnt")
def on_key(key, long_press):
global keys
if key > 16:
return on_command_key(key, long_press)
key -= 1
if keys[key]:
keys[key] = False
alt_beep()
else:
keys[key] = True
short_beep()
render()
def run():
sleep_time = 1.0 / poll_frequency
while(True):
time.sleep(sleep_time)
result = lc.command_str("4:key")
args = result.split(",")
if len(args) < 2:
continue
key = int(args[0])
if key:
count = int(args[1])
if count <= too_short_press:
continue
on_key(key, True if count >= long_press else False)
def run_test():
sleep_time = 1.0 / poll_frequency
while(True):
time.sleep(sleep_time)
result = lc.command_str("4:key")
args = result.split(",")
if len(args) < 2:
continue
key = int(args[0])
count = int(args[1])
if key:
if count <= too_short_press:
print "debounce"
continue
press = ""
if count >= long_press:
lc.command(str(long_press_tone) + "," + str(long_press_duration) + ":ton")
press = "long"
else:
lc.command(str(short_press_tone) + "," + str(short_press_duration) + ":ton")
press = "short"
print "Key: " + str(key) + " " + press + " press"
# ----------------------------------------
# ========================================
global app_description, verbose_mode, quiet_mode
app_description = None
verbose_mode = None
quiet_mode = None
def get_options():
global app_description, verbose_mode
app_description = "(application template) - Apollo Lighting System v.0.1 1-0-2019"
parser = argparse.ArgumentParser(description=app_description)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="display verbose info (False)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="don't use terminal colors (False)")
args = parser.parse_args()
verbose_mode = args.verbose
quiet_mode = args.quiet
def validate_options():
pass
def introduction():
ui.app_description(app_description)
ui.report_verbose("verbose mode")
ui.report_verbose()
def initialize():
get_options()
validate_options()
tc.begin(quiet_mode)
ui.begin(verbose_mode, quiet_mode)
begin()
introduction()
def loop():
run()
# ========================================
# ========================================
if __name__ == '__main__':
initialize()
try:
loop()
except KeyboardInterrupt:
sys.exit("\nExiting...\n")
except Exception:
raise
``` |
{
"source": "jhogstrom/pkglic",
"score": 2
} |
#### File: pkglic/pkglic/pkglic.py
```python
import os
import sys
import requests
import json
import argparse
import logging
import datetime
import re
from typing import List, Type
import concurrent.futures
from lxml import etree
from collections import defaultdict
from jinja2 import Template
# from pprint import pprint
try:
from .authinfo import PROGRAM_NAME, AUTHOR, VERSION
except ImportError:
from authinfo import PROGRAM_NAME, AUTHOR, VERSION
SORTORDER = {
0: lambda x: [x.type(), x.name],
1: lambda x: [x.license, x.name],
2: lambda x: [x.type(), x.license],
3: "g"
}
logger = logging.getLogger(PROGRAM_NAME)
logger.setLevel(logging.DEBUG)
class PackageInfo:
def __init__(self, name: str, version: str, filename: str):
self.name = name
self.version = version
self.filename = filename
self.license = "NOT_DOWNLOADED"
self.licenseurl = ""
self.author = None
self.author_email = None
self.home_page = None
self.summary = None
self.whitelisted = False
self.remapped = False
def __str__(self):
name = self.name
if self.version is not None:
name += f" {self.version}"
props = []
if self.licenseurl:
props.append(self.licenseurl)
if self.remapped:
props.append(f"(remapped from '{self.orglicense}')")
if self.whitelisted:
props.append("(whitelisted)")
return f"[{self.type()}] {name:30} {self.license} {' '.join(props)}".strip()
def clean_license(self, license: str) -> str:
MAXLEN = 50
license = license.replace('"', '')
if license == "":
return "NOT_SPECIFIED"
if license.startswith("GNU LESSER GENERAL PUBLIC LICENSE"):
return "LGPL"
if len(license) > MAXLEN:
return license[:MAXLEN] + "..."
return license
def asjson(self):
return {
"name": self.name,
"version": self.version,
"license": self.license,
"licenseurl": self.licenseurl,
"author": self.author,
"author_email": self.author_email,
"home_page": self.home_page,
"summary": self.summary
}
class NpmPackageInfo(PackageInfo):
@property
def url(self) -> str:
return f"https://registry.npmjs.org/{self.name}/{self.version}"
@classmethod
def type(self) -> str:
return "js"
def update_metadata(self, s: str) -> None:
d = json.loads(s)
self.license = self.clean_license(d.get("license", "NOT_SPECIFIED"))
author = d.get("author")
if author is not None:
self.author = author.get("name")
self.author_email = author.get("email")
self.home_page = d.get("home_page")
self.summary = d.get("description")
@classmethod
def can_parse(self, filename: str) -> bool:
return "package.json" in filename
@classmethod
def parse(self, filename: str) -> List[PackageInfo]:
"""
Create information for all packages found in filename.
Filename is expected to be in the package.json format.
Args:
filename (str): Name of file to read.
Returns:
list: List of PackageInfo instances.
"""
def clean_version(v: str) -> str:
# https://docs.npmjs.com/about-semantic-versioning
return v.replace(".x", ".0").replace("~", "").replace("^", "")
dependencies = json.load(open(filename)).get("dependencies", {})
return [NpmPackageInfo(name, clean_version(version), filename) for name, version in dependencies.items()]
class PythonPackageInfo(PackageInfo):
@property
def url(self) -> str:
if self.version is not None:
return f"https://pypi.org/pypi/{self.name}/{self.version}/json"
return f"https://pypi.org/pypi/{self.name}/json"
@classmethod
def type(self) -> str:
return "py"
def update_metadata(self, s: str) -> None:
d = json.loads(s)
self.license = self.clean_license(d.get("info", {}).get("license", "NOT_SPECIFIED"))
self.version = d.get("info", {}).get("version", "??")
self.author = d.get("info", {}).get("author")
self.author_email = d.get("info", {}).get("author_email")
self.home_page = d.get("info", {}).get("home_page")
self.summary = d.get("info", {}).get("summary")
@classmethod
def can_parse(self, filename: str) -> bool:
return "requirements.txt" in filename
@classmethod
def parse(self, filename: str) -> List[PackageInfo]:
"""
Create information for all packages found in filename.
Filename is expected to be in the requirements.txt format.
Args:
filename (str): Name of file to read.
Returns:
list: List of PackageInfo instances.
"""
files = [_.split("#")[0].strip() for _ in open(filename).readlines() if not _.startswith("#")]
fetchinfo = []
name_regexp = "(.*)[=>~]="
version_regexp = "[=>~]=(.*)"
re_name = re.compile(name_regexp)
re_version = re.compile(version_regexp)
for f in files:
f = f.split(";")[0].split(",")[0].split("[")[0]
name = re_name.search(f)
version = re_version.search(f)
if version is not None:
version = version.group(1).strip()
if name is None:
name = f.strip()
else:
name = name.group(1).strip()
fetchinfo.append(PythonPackageInfo(name, version, filename))
return fetchinfo
class CSharpPackageInfo(PackageInfo):
@property
def url(self) -> str:
# https://docs.microsoft.com/en-us/nuget/api/package-base-address-resource
return f"https://api.nuget.org/v3-flatcontainer/{self.name}/{self.version}/{self.name}.nuspec".lower()
@classmethod
def type(self) -> str:
return "cs"
@classmethod
def can_parse(self, filename: str) -> bool:
return ".csproj" in filename
@classmethod
def parse(self, filename: str) -> List[PackageInfo]:
"""
Create information for all packages found in filename.
Filename is expected to be in the *.csproj format.
Args:
filename (str): Name of file to read.
Returns:
list: List of PackageInfo instances.
"""
doc = etree.parse(filename)
packagenode = doc.xpath("//PackageReference")
return [CSharpPackageInfo(
n.xpath("./@Include")[0],
n.xpath("./@Version")[0],
filename) for n in packagenode]
def update_metadata(self, s: str) -> None:
# https://docs.microsoft.com/en-us/nuget/reference/nuspec
# print(s)
doc = etree.XML(s)
#
# There is a namespace defined, but not explicitly used.
# This made all my xpath queries return None until I mapped the
# None-namespace to a proper (but arbitrary) prefix, and used that prefix
# when querying.
# After downloading a bunch of packages, it turmed out that not exactly all
# nuspec files contain the namespace definition, hence the conditional
# insertion of the prefixed namespace (and use of prefix).
#
nsmap = {}
namespace = ""
if None in doc.nsmap:
nsmap["nuget"] = doc.nsmap[None]
namespace = "nuget:"
# licenseUrl is deprecated, but there are still traces of it in many packages
licenseurls = doc.xpath(f"//{namespace}licenseUrl/text()", namespaces=nsmap)
if len(licenseurls) > 0:
self.licenseurl = licenseurls[0]
license = doc.xpath(f"//{namespace}license", namespaces=nsmap)
if len(license) > 0:
lictype = license[0].xpath("./@type")[0]
licvalue = license[0].xpath("./text()")[0]
if lictype == "expression":
self.license = self.clean_license(licvalue)
else:
self.license = f"{lictype}: {licvalue}"
else:
self.license = "NOT_SPECIFIED"
self.author = (doc.xpath(f"//{namespace}authors/text()", namespaces=nsmap) + [None])[0]
self.home_page = (doc.xpath(f"//{namespace}projectUrl/text()", namespaces=nsmap) + [None])[0]
self.summary = (doc.xpath(f"//{namespace}description/text()", namespaces=nsmap) + [None])[0]
class NugetPackageInfo(CSharpPackageInfo):
@classmethod
def can_parse(self, filename: str) -> bool:
return "packages.config" in filename
@classmethod
def parse(self, filename: str) -> List[PackageInfo]:
"""
Create information for all packages found in filename.
Filename is expected to be in the packages.config format.
Args:
filename (str): Name of file to read.
Returns:
list: List of PackageInfo instances.
"""
doc = etree.parse(filename)
packagenode = doc.xpath("//package")
return [CSharpPackageInfo(
n.xpath("./@id")[0],
n.xpath("./@version")[0],
filename) for n in packagenode]
#
# Register parser classes here to make them participate in the fun.
#
PARSERS: List[Type] = [
CSharpPackageInfo,
PythonPackageInfo,
NpmPackageInfo,
NugetPackageInfo]
def update_package_info(package_info: PackageInfo, verbose: bool) -> None:
"""
Fetch metadata for package_info and update the license property.
Args:
package_info (PackageInfo): Instance containing package information.
verbose (bool): Increase verbosity if True.
"""
if verbose:
print(f"Downloading {package_info.name} from {package_info.filename}.")
r = requests.get(package_info.url)
if r.status_code == 404:
logger.warning(f"{package_info.name} - not found at {package_info.url}")
package_info.license = "404_NOT_FOUND"
return
package_info.update_metadata(r.content)
def fetch_package_infos(fetchinfo: List[PackageInfo], verbose: bool) -> None:
"""
Initiate workers that fetch metadata for all packages.
Args:
fetchinfo (list): List of packages to fetch metadata for.
verbose (bool): Increase verbosity if True.
"""
if not verbose:
print("Fetching package meta data...")
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
futures = [executor.submit(update_package_info, info, verbose) for info in fetchinfo]
[f.result() for f in futures]
print()
def get_parser(filename: str, default_type: str) -> Type:
"""
Get a parser for the file `filename`.
Args:
filename (str): Path to file.
default_type (str): Default if not possible to determine type based on name.
Returns:
Returns a parser that can parse the file type.
"""
for p in PARSERS:
if p.can_parse(filename):
return p
for p in PARSERS:
if p.type() == default_type:
return p
def acquire_package_info(
filenames: List[str],
default_type: str,
verbose: bool,
exclude_packages: List[str],
make_unique: bool) -> List[PackageInfo]:
"""
Download the metadata for all packages.
Args:
default_type (str): Default type if not possible to determine from file names.
verbose (bool): Increase verbosity if True.
exclude_packages: (List[str]): List of packages to exclude from checking.
Returns:
list: List of PackageInfo instances.
"""
packages = []
for filename in filenames:
parser = get_parser(filename, default_type)
packages += parser.parse(filename)
packages = [p for p in packages if p.name not in exclude_packages]
if make_unique:
unique = []
for p in packages:
if all([_.name != p.name for _ in unique]):
unique.append(p)
print(f"Eliminated {len(packages) - len(unique)} duplicates based on name.")
packages = unique
fetch_package_infos(packages, verbose)
return packages
def print_package_info(packages: List[PackageInfo], sortkey) -> None:
"""
Print the package info ordered by the lambda function sortkey.
Args:
packages (list): List of PackageInfo instances
sortkey: Lambda-function used to sort the output.
"""
if sortkey == "g":
groups = defaultdict(list)
for p in packages:
groups[f"{p.license} {p.licenseurl}"].append(p)
for key, plist in groups.items():
print(f"{key}")
for p in plist:
print(f"\t* {p}")
print()
else:
for package in sorted(packages, key=sortkey):
print(package)
print(f"\n{len(packages)} packages checked.")
def detect_unwanted_licenses(packages: List[PackageInfo], unwanted_licenses: List[str]) -> bool:
"""
Detect if any of the licenses are 'unwanted' and if so print them.
Args:
packages (List[PackageInfo]): List of PackageInfo instances.
unwanted_licenses (List[str]): List of licenses that are unwanted.
Returns:
bool: Returns true if there are unwanted licenses present
"""
unwanted_upper = [_.upper() for _ in unwanted_licenses]
unwanted = [p for p in packages if p.license in unwanted_upper and not p.whitelisted]
if unwanted:
print("\nUnwanted license(s) detected!")
for p in sorted(unwanted, key=SORTORDER[0]):
print(f"[{p.type()}] '{p.name}' from {p.filename} uses unwanted license '{p.license}'.")
return True
return False
def init_argparse() -> argparse.ArgumentParser:
"""
Initiate argument parser.
"""
parser = argparse.ArgumentParser(prog=PROGRAM_NAME, fromfile_prefix_chars='@')
parser.add_argument(
'-f', "--files",
metavar='file',
action='append',
required=True,
help='input files to scan.')
parser.add_argument(
"-t", "--type",
choices=[p.type() for p in PARSERS],
default=None,
help="Assume <type> for all --files if not guessable by filename.")
parser.add_argument(
"--uniq",
action="store_true",
help="Remove duplicate packages.")
parser.add_argument(
"-x", "--exclude",
metavar="file|package",
action='append',
default=[],
help="Do not check (or list) excluded packages.")
parser.add_argument(
"-u", "--unwanted",
metavar="package",
action='append',
default=[],
help="Exit with errorlevel on these license types.")
parser.add_argument(
"-w", "--whitelist",
metavar="file|package",
action='append',
default=[],
help="Read whitelisted packages form <file>.")
parser.add_argument(
"-o", "--order",
type=int,
choices=SORTORDER.keys(),
default=list(SORTORDER.keys())[0],
help="Which fields to use to sort output; 0 - type, name, 1: license, name, 2: type, license, 3: group by license.") # noqae501
parser.add_argument(
"--json",
metavar="file",
default=None,
help="Output as json-string to <file>.")
parser.add_argument(
"--credits",
metavar="file",
default=None,
help="Generate a credits file.")
parser.add_argument(
"--creditstemplate",
metavar="file",
default=None,
help="Template used to generate credits file.")
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Increase verbosity.")
# Automatically add the parameter file args.txt if it exists.
if os.path.exists("args.txt") and "@args.txt" not in sys.argv:
sys.argv.append("@args.txt")
return parser
def print_packages_to_json(packages: List[PackageInfo], filename: str) -> None:
"""
Generate a json-string with all packages and write to 'filename'.
Args:
packages (List[PackageInfo]): List of PackageInfo
filename (str): File to write to.
"""
if filename is None:
return
res = {
"generator": f"{PROGRAM_NAME} {VERSION} (c) {AUTHOR} 2021",
"generated": datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),
"packages": [p.asjson() for p in packages]
}
with open(filename, "w") as f:
f.write(json.dumps(res))
print(f"\nJson-data written to '{filename}'.")
def get_whitelisted(whitelisted: List[str]) -> dict:
"""
Examine the content of whitelisted and return a json structure with whitelisting.
Args:
whitelisted (List[str]): List of strings with either files or packages.
Returns:
dict: Dictionary with whitelist info.
"""
def update_res(package: str) -> str:
if package.strip().startswith("#"):
return
parts = package.split(":")
if len(parts) == 1:
parts.append("*")
licmap = parts[1].split("->")
if len(licmap) == 1:
licmap.append("")
res[parts[0].strip()] = {"expect": licmap[0].strip(), "mapto": licmap[1].strip()}
if not whitelisted:
logger.debug("No whitelisted packages.")
return None
res = {}
for w in whitelisted:
if not os.path.exists(w):
update_res(w)
else:
with open(w) as f:
try:
res = res + json.load(f)
except json.decoder.JSONDecodeError:
res = {}
logger.debug(f"{w} not in json format. Attempting line-by-line.")
f.seek(0)
for line in f.readlines():
if line.strip().startswith("#"):
continue
parts = line.split(":")
if len(parts) == 1:
parts.append("*")
licmap = parts[1].split("->")
if len(licmap) == 1:
licmap.append("")
res[parts[0].strip()] = {"expect": licmap[0].strip(), "mapto": licmap[1].strip()}
return res
except Exception as e:
logger.exception(e)
return res
def apply_whitelisting(packages: List[PackageInfo], whitelisting: dict) -> List[PackageInfo]:
"""
Apply the whitelisting information and update `packages`.
Args:
packages (List[PackageInfo]): List of PackageInfo instances.
whitelisting (dict): Whitelisting information.
Returns:
List[PackageInfo]: Updated list of PackageInfo with whitelisting information applied.
"""
if whitelisting is None:
return packages
res = []
for p in packages:
if p.name not in whitelisting:
res.append(p)
continue
whitelist = whitelisting[p.name]
if whitelist.get("expect", "*") == "*" and whitelist.get("mapto", "") == "":
p.whitelisted = True
elif whitelist.get("expect", "*") == "*":
p.whitelisted = True
p.orglicense = p.license
p.license = whitelist.get("mapto", "")
p.remapped = True
elif p.license == whitelist.get("expect", "*") and whitelist.get("mapto", "") == "":
p.whitelisted = True
elif p.license == whitelist.get("expect", "*"):
p.whitelisted = True
p.orglicense = p.license
p.license = whitelist.get("mapto", "")
p.remapped = True
res.append(p)
return res
def get_excluded(exclude_list: List[str]) -> List[str]:
"""
Retrieve excluded packages from `exclude_list`.
Args:
exclude_list (List[str]): Filenames containing packages to exclude or individual packages.
Returns:
List[str]: List of packages not to check.
"""
res = []
for exclude in exclude_list:
if os.path.exists(exclude):
with open(exclude) as f:
res.extend([p.strip() for p in f.readlines() if not p.strip().startswith("#")])
else:
if "," in exclude:
res.extend([p.strip() for p in exclude.split(",")])
else:
res.append(exclude)
return res
def verify_file(filename: str, *, required: bool = True, argname: str = "") -> None:
"""
Verify the filename is set and if required also existing. If validation fails the
application with terminate with an error code.
Args:
filename (str): Name of file
required (bool, optional): Indicates if the filename must be set. Defaults to True.
argname (str, optional): Name of parameter - used in error message. Defaults to "".
"""
if filename is None:
if required:
print(f"Missing parameter '{argname}'. Cannot continue.")
exit(1)
return
if not os.path.exists(filename):
print(f"The file '{filename}' could not be found. Terminating.")
exit(1)
def create_credits_file(packages: List[PackageInfo], sortkey, outfile: str, templatefile: str) -> None:
"""
Create a summary file based on `templatefile` and write it to `outfile`.
Data will be sorted according to sortkey.
Args:
packages (List[PackageInfo]): List of packages.
sortkey ([type]): Lambda to sort the packages.
outfile (str): File into which to write the output.
templatefile (str): Template used for expansion.
"""
with open(templatefile) as f:
template = Template(f.read())
data = {
"packages": sorted(packages, key=sortkey),
"program": PROGRAM_NAME,
"author": AUTHOR}
with open(outfile, "w") as f:
f.write(template.render(data))
print(f"Credits written to '{outfile}'.")
def validate_args(args) -> None:
"""
Validates command lline arguments.
Args:
args: The command line arguments namespace.
"""
for f in args.files:
verify_file(f)
if args.credits is not None:
if args.creditstemplate is None:
here = os.path.abspath(os.path.dirname(__file__))
args.creditstemplate = os.path.join(here, "creditstemplate.txt")
verify_file(args.creditstemplate, argname="creditstemplate")
if args.creditstemplate is not None and args.credits is None:
print(f"Need --credits <file> if --creditstemplate is specified ('{args.creditstemplate}').")
exit(1)
def main():
"""
Main function of the script.
"""
print(f"{PROGRAM_NAME} {VERSION} - (c) {AUTHOR} 2021.")
print(f"Executed {datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}.\n")
parser = init_argparse()
args = parser.parse_args()
validate_args(args)
exclude_packages = get_excluded(args.exclude)
whitelisting = get_whitelisted(args.whitelist)
packages = acquire_package_info(
args.files,
args.type,
args.verbose,
exclude_packages,
args.uniq)
packages = apply_whitelisting(packages, whitelisting)
print_package_info(packages, SORTORDER[args.order])
print_packages_to_json(packages, args.json)
if args.credits is not None:
create_credits_file(
packages,
SORTORDER[args.order if args.order in [0, 1, 2] else 0],
args.credits,
args.creditstemplate)
# Do the detection last. This means the json-file and credits file *will* be generated even of there
# are packages with unwanted licenses.
if detect_unwanted_licenses(packages, args.unwanted):
print("Exiting with error level!")
exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "jhol0613/pacbot2018",
"score": 3
} |
#### File: pacbot2018/python/motorModule.py
```python
import os
import robomodules as rm
from messages import *
import RPi.GPIO as GPIO
import time
import signal
import sys
ADDRESS = os.environ.get("BIND_ADDRESS","localhost")
PORT = os.environ.get("BIND_PORT", 11293)
FREQUENCY = 0
LEFT_PWM = 32
LEFT_1 = 36
LEFT_2 = 38
RIGHT_PWM = 33
RIGHT_1 = 35
RIGHT_2 = 37
BACKWARD = 0
FORWARD = 1
LEFT_MOTOR = 0
RIGHT_MOTOR = 1
class MotorModule(rm.ProtoModule):
def __init__(self, addr, port):
print("Initializing Motors...")
self.subscriptions = [MsgType.TWIST]
super().__init__(addr, port, message_buffers, MsgType, FREQUENCY, self.subscriptions)
self.initializeMotors()
self.leftSpeed = 0
self.rightSpeed = 0
self.leftDir = 0
self.rightDir = 0
print("Motors Initialized")
def msg_received(self, msg, msg_type):
# This gets called whenever any message is received
if msg_type == MsgType.TWIST:
self.processTwist(msg.velocity, msg.omega)
def tick(self):
# this function will get called in a loop with FREQUENCY frequency
return
def initializeMotors(self):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LEFT_PWM, GPIO.OUT)
GPIO.setup(LEFT_1, GPIO.OUT)
GPIO.setup(LEFT_2, GPIO.OUT)
GPIO.setup(RIGHT_PWM, GPIO.OUT)
GPIO.setup(RIGHT_1, GPIO.OUT)
GPIO.setup(RIGHT_2, GPIO.OUT)
self.right_pwm = GPIO.PWM(RIGHT_PWM, 100)
self.left_pwm = GPIO.PWM(LEFT_PWM, 100)
self.right_pwm.start(0)
self.left_pwm.start(0)
self.setDirection(LEFT_MOTOR, FORWARD)
self.setDirection(RIGHT_MOTOR, FORWARD)
time.sleep(1)
def setDirection(self, motor, direction):
if motor == LEFT_MOTOR:
if direction == FORWARD:
GPIO.output(LEFT_1, True)
GPIO.output(LEFT_2, False)
else:
GPIO.output(LEFT_1, False)
GPIO.output(LEFT_2, True)
else:
if direction == FORWARD:
GPIO.output(RIGHT_1, True)
GPIO.output(RIGHT_2, False)
else:
GPIO.output(RIGHT_1, False)
GPIO.output(RIGHT_2, True)
# Takes linear and rotational values and converts into signals for left and right motor
def processTwist(self, linSpeed, rotSpeed):
leftSpeed = linSpeed
rightSpeed = linSpeed
leftSpeed += rotSpeed
rightSpeed -= rotSpeed
if leftSpeed >= 0:
self.setDirection(LEFT_MOTOR, FORWARD)
else:
self.setDirection(LEFT_MOTOR, BACKWARD)
if rightSpeed >= 0:
self.setDirection(RIGHT_MOTOR, FORWARD)
else:
self.setDirection(RIGHT_MOTOR, BACKWARD)
# If speeds beyond limits, set to limits. Otherwise set to calculated speed
if abs(leftSpeed) > 100:
self.left_pwm.ChangeDutyCycle(100)
else:
self.left_pwm.ChangeDutyCycle(abs(leftSpeed))
if abs(rightSpeed) > 100:
self.right_pwm.ChangeDutyCycle(100)
else:
self.right_pwm.ChangeDutyCycle(abs(rightSpeed))
def destroy(*args):
GPIO.cleanup()
print("Motor module safely terminated")
sys.exit()
def main():
signal.signal(signal.SIGINT, destroy)
signal.signal(signal.SIGTERM, destroy)
module = MotorModule(ADDRESS, PORT)
module.run()
if __name__ == "__main__":
main()
```
#### File: robomodules/comm/asyncProto.py
```python
import asyncio
from .constants import *
from robomodules.comm import pack_msg
class AsyncProto(asyncio.Protocol):
def __init__(self):
self.transport = None
def connection_made(self, transport):
self.transport = transport
self.__length = 0
self.__buffer = b""
self.__msg_type = -1
def connection_lost(self, exception):
if exception:
print(repr(exception))
def data_received(self, data):
self.__buffer += data
while self.__buffer:
if not self.__length and len(self.__buffer) > SIZE_HEADER.size:
magic, msg_type, self.__length = SIZE_HEADER.unpack(
self.__buffer[:SIZE_HEADER.size])
self.__buffer = self.__buffer[SIZE_HEADER.size:]
self.__msg_type = msg_type
if magic != MAGIC_HEADER:
self.transport.close()
if hasattr(self, "connect"):
self.loop.call_soon(self.connect)
return
elif self.__length and len(self.__buffer) >= self.__length:
self.msg_received(self.__buffer[:self.__length], self.__msg_type)
self.__buffer = self.__buffer[self.__length:]
self.__length = 0
self.__msg_type = -1
else:
# Not enough data has been buffered and read to form a complete
# header or message
return
def write(self, msg, msg_type):
if self.transport:
self.transport.write(pack_msg(msg, msg_type))
def msg_received(self, data):
raise NotImplementedError()
```
#### File: pacbot2018/python/ultrasonicSensorModule.py
```python
import os, random
import robomodules as rm
from messages import *
import RPi.GPIO as GPIO
import time
import signal
import sys
ADDRESS = os.environ.get("BIND_ADDRESS","localhost")
PORT = os.environ.get("BIND_PORT", 11293)
FREQUENCY = 15
TIMEOUT_DISTANCE = 14 # Centimeters
TRIG_PINS = [7, 11, 15, 21, 23]
ECHO_PINS = [8, 12, 16, 22, 24]
# Indices for accessing trigger and echo pins
FRT_CTR = 0
FRT_LFT = 1
FRT_RGT = 2
REAR_LFT = 3
REAR_RGT = 4
class UltrasonicSensorModule(rm.ProtoModule):
def __init__(self, addr, port):
print("Initializing Ultrasonic Sensors...")
super().__init__(addr, port, message_buffers, MsgType, FREQUENCY)
self.initializeSensors()
print("Ultrasonic Sensors Initialized")
def msg_received(self, msg, msg_type):
# This gets called whenever any message is received
# This module only sends data, so we ignore incoming messages
return
def tick(self):
# this function will get called in a loop with FREQUENCY frequency
# start = time.time()
msg = UltrasonicArray()
msg.front_center = self.pulse(FRT_CTR)
msg.front_left = self.pulse(FRT_LFT)
msg.front_right = self.pulse(FRT_RGT)
msg.rear_left = self.pulse(REAR_LFT)
msg.rear_right = self.pulse(REAR_RGT)
# end = time.time()
# print("Measurement time: ", end-start)
msg = msg.SerializeToString()
self.write(msg, MsgType.ULTRASONIC_ARRAY)
# end = time.time()
# print("Tick time: ", end-start)
def initializeSensors(self):
GPIO.setmode(GPIO.BOARD)
for pin in TRIG_PINS:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
for pin in ECHO_PINS:
GPIO.setup(pin, GPIO.IN)
time.sleep(1)
def pulse(self, sensor):
GPIO.output(TRIG_PINS[sensor], True)
time.sleep(0.00001)
GPIO.output(TRIG_PINS[sensor], False)
pulse_start = 0
distance = TIMEOUT_DISTANCE
while GPIO.input(ECHO_PINS[sensor])==0:
pulse_start = time.time()
distance = (time.time() - pulse_start) * 17150
if distance > TIMEOUT_DISTANCE:
return TIMEOUT_DISTANCE
while GPIO.input(ECHO_PINS[sensor])==1:
distance = (time.time() - pulse_start) * 17150
if distance > TIMEOUT_DISTANCE:
return TIMEOUT_DISTANCE
distance = round(distance, 2)
time.sleep(.001)
return distance
# Pulses 2 sensors at once to save time
# def pulse2(self, sensor1, sensor2):
# GPIO.output(TRIG_PINS[sensor1], True)
# time.sleep(0.00001)
# GPIO.output(TRIG_PINS[sensor1], False)
# GPIO.output(TRIG_PINS[sensor2], True)
# time.sleep(0.00001)
# GPIO.output(TRIG_PINS[sensor2], False)
# while GPIO.input(ECHO_PINS[sensor])==0:
# pulse_start = time.time()
# while GPIO.input(ECHO_PINS[sensor])==1:
# distance = (time.time() - pulse_start) * 17150
# if distance > TIMEOUT_DISTANCE:
# return TIMEOUT_DISTANCE
# distance = round(distance, 2)
# return distance
def destroy(*args):
GPIO.cleanup()
print("Ultrasonic sensor module safely terminated")
sys.exit()
def main():
signal.signal(signal.SIGINT, destroy)
signal.signal(signal.SIGTERM, destroy)
module = UltrasonicSensorModule(ADDRESS, PORT)
module.run()
if __name__ == "__main__":
main()
``` |
{
"source": "JhoLee/django-lecture_manager",
"score": 2
} |
#### File: django-lecture_manager/accounts/forms.py
```python
import django.contrib.auth.forms as auth_forms
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from accounts.models import Profile
class SignupForm(auth_forms.UserCreationForm):
username = forms.EmailField(max_length=254, help_text='이메일 가입만 가능합니다.', label="ID")
name = forms.CharField(help_text='실명으로 적어주세요.', max_length=100, label='이름')
id_number = forms.IntegerField(help_text='숫자로만 적어주세요.', label='직번/학번')
ROLE_CHOICES = (
("0", '학생'),
('1', '교수'),
)
role = forms.ChoiceField(help_text='선택해주세요.', choices=ROLE_CHOICES, label='신분', initial=0)
class Meta:
model = User
fields = ('username', '<PASSWORD>', '<PASSWORD>', 'name', 'id_number', 'role',)
def __init__(self, *args, **kwargs):
super(auth_forms.UserCreationForm, self).__init__(*args, **kwargs)
self.fields['password1'].help_text = "최소 8자 이상"
# form-control
# Ref. https://stackoverflow.com/questions/31627253/django-modelform-with-bootstrap
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
})
class SigninForm(auth_forms.AuthenticationForm):
username = forms.EmailField(widget=forms.EmailInput(attrs={'autofocus': True}))
# form=control
# Ref. https://stackoverflow.com/questions/31627253/django-modelform-with-bootstrap
def __init__(self, *args, **kwargs):
super(SigninForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
})
class UpdateUserProfileForm(ModelForm):
class Meta:
model = Profile
fields = ('id_number', 'name',)
def __init__(self, *args, **kwargs):
super(UpdateUserProfileForm, self).__init__(*args, **kwargs)
self.fields['id_number'].label = "직번/학번"
self.fields['name'].label = "이름"
# form-control
# Ref. https://stackoverflow.com/questions/31627253/django-modelform-with-bootstrap
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
})
class ChangePasswordForm(auth_forms.PasswordChangeForm):
# form=control
# Ref. https://stackoverflow.com/questions/31627253/django-modelform-with-bootstrap
def __init__(self, *args, **kwargs):
super(ChangePasswordForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
})
```
#### File: django-lecture_manager/lecture/forms.py
```python
from django import forms
from django.forms import ModelForm
from .models import Course, Notice, Enrollment, NoticeComment
class CourseForm(forms.ModelForm):
class Meta:
model = Course
fields = ['name', 'semester', 'year', 'description', ]
def __init__(self, *args, **kwargs):
super(CourseForm, self).__init__(*args, **kwargs)
self.fields['name'].label = "강의명"
self.fields['semester'].label = "학기"
self.fields['year'].label = "년도"
self.fields['description'].label = "강의개요"
def save(self, commit=True):
self.instance = super().save(commit=commit)
return self.instance
class NoticeForm(forms.ModelForm):
class Meta:
model = Notice
fields = ['title', 'content', 'file', ]
def __init__(self, *args, **kwargs):
super(NoticeForm, self).__init__(*args, **kwargs)
self.fields['title'].label = "제목"
self.fields['content'].label = "내용"
self.fields['file'].label = "첨부파일"
self.fields['file'].required = False
def save(self, commit=True):
self.instance = super().save(commit=commit)
return self.instance
class NoticeCommentForm(forms.ModelForm):
class Meta:
model = NoticeComment
fields = ['title', 'content', 'file', ]
def __init__(self, *args, **kwargs):
super(NoticeCommentForm, self).__init__(*args, **kwargs)
self.fields['title'].label = "제목"
self.fields['content'].label = "내용"
self.fields['file'].label = "첨부파일"
self.fields['file'].required = False
def save(self, commit=True):
self.instance = super().save(commit=commit)
return self.instance
```
#### File: django-lecture_manager/lecture/views.py
```python
from django.contrib import messages
from django.shortcuts import render, get_object_or_404, redirect
from lecture.forms import CourseForm, NoticeForm, NoticeCommentForm
from lecture.models import Course, Enrollment, Notice, NoticeComment
def index(request):
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
context = {}
if check_role(user) == "교수":
# 교수
my_courses = get_professors_courses(user)
context["my_courses"] = my_courses
context['course_count'] = len(my_courses)
return render(request, 'lecture/index_professor.html', context)
elif check_role(user) == "학생":
# get course list
my_courses = get_students_courses(user)
course_count = len(my_courses)
context["my_courses"] = my_courses
context["course_count"] = course_count
return render(request, 'lecture/index_student.html', context)
else:
message = 'unknown'
my_courses = []
def course_index(request, course_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, pk=course_id)
context["course"] = course
if check_role(user) == "학생":
enrollment = Enrollment.objects.filter(course=course)
if not enrollment.filter(student=user).exists():
return redirect('lecture:course_join', course_id)
if check_role(user) == "교수" and course.professor.id is not user.id:
return redirect('lecture:index')
try:
notices = Notice.objects.all().filter(course=course)
notices = notices.order_by('-pub_dt')[:3]
except Notice.DoesNotExist:
notices = []
context["notices"] = notices
return render(request, 'lecture/course/course_index.html', context=context)
def course_create(request):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
if request.method == "POST":
course_form = CourseForm(request.POST)
if course_form.is_valid():
course = course_form.save(commit=False)
course.professor = user
course.save()
return redirect('lecture:course_index', course.id)
else:
messages.error(request, "무언가 잘못 되었습니다. 다시 시도해주세요.")
context['course_form'] = course_form
else:
course_form = CourseForm()
context['course_form'] = course_form
return render(request, 'lecture/course/course_create.html', context)
def course_update(request, course_id):
context = {}
course = get_object_or_404(Course, pk=course_id)
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
if request.user.id is not course.professor.id:
return redirect('lecture:course_index', course_id)
context['course'] = course
context['user'] = user
if request.method == "POST":
course_form = CourseForm(request.POST or None, instance=course)
if course_form.is_valid():
new_course = course_form.save(commit=False)
new_course.professor = user
new_course.save()
messages.info(request, "강의 수정 완료!")
return redirect('lecture:course_index', course.id)
else:
course_form = CourseForm(instance=course)
context['course_form'] = course_form
return render(request, 'lecture/course/course_update.html', context)
def course_delete(request, course_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, id=course_id)
if request.user.id is not course.professor.id:
return redirect('lecture:course_index', course_id)
course.delete()
messages.info(request, "강의 삭제 완료!")
return redirect('lecture:index')
def course_join(request, course_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
if check_role(user) == "교수":
messages.error(request, "권한이 없습니다.")
return redirect('lecture:index')
is_enrolled = Enrollment.objects.filter(student=user, course=course_id).exists()
if is_enrolled:
context['is_enrolled'] = True
return render(request, 'lecture/course/course_join.html', context)
else:
if request.method == "POST":
_id = request.POST['_id']
course = get_object_or_404(Course, pk=_id)
Enrollment.objects.create(course=course, student=user)
messages.info(request, '강의 참가 완료!')
return redirect('lecture:course_index', _id)
context['is_enrolled'] = False
course = get_object_or_404(Course, pk=course_id)
context['course'] = course
return render(request, 'lecture/course/course_join.html', context)
def notice_index(request, course_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, pk=course_id)
context['course'] = course
user = request.user
notices = Notice.objects.all().filter(course=course)
context['notices'] = notices
return render(request, 'lecture/notice/notice_index.html', context)
def notice_create(request, course_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, pk=course_id)
context['course'] = course
notices = Notice.objects.all().filter(course=course)
context['notices'] = notices
if request.method == "POST":
notice_form = NoticeForm(request.POST, request.FILES)
if notice_form.is_valid():
notice = notice_form.save(commit=False)
notice.publisher = user
notice.course = course
notice.save()
return redirect('lecture:notice_index', course.id)
else:
notice_form = NoticeForm()
context['notice_form'] = notice_form
return render(request, 'lecture/notice/notice_create.html', context)
def notice_read(request, course_id, notice_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, id=course_id)
notice = get_object_or_404(Notice, id=notice_id)
comments = get_notice_comment_list(request, course_id, notice_id)
context["user"] = user
context["course"] = course
context["notice"] = notice
context["comments"] = comments
return render(request, 'lecture/notice/notice_read.html', context=context)
def notice_update(request, course_id, notice_id):
context = {}
course = get_object_or_404(Course, pk=course_id)
notice = get_object_or_404(Notice, pk=notice_id)
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
if request.user.id is not notice.publisher.id:
return redirect('lecture:notice_read', course_id, notice_id)
context["course"] = course
context["notice"] = notice
context["user"] = user
if request.method == "POST":
notice_update_form = NoticeForm(request.POST or None, request.FILES or None, instance=notice)
if notice_update_form.is_valid():
new_notice = notice_update_form.save(commit=False)
new_notice.course = course
new_notice.publisher = request.user
new_notice.save()
return redirect('lecture:notice_read', course.id, new_notice.id)
else:
notice_update_form = NoticeForm(instance=notice)
context["notice_update_form"] = notice_update_form
return render(request, 'lecture/notice/notice_update.html', context=context)
def notice_delete(request, course_id, notice_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, id=course_id)
notice = get_object_or_404(Notice, id=notice_id)
if request.user.id is not notice.publisher.id:
return redirect('lecture:notice_read', course_id, notice_id)
notice.delete()
messages.info(request, "게시글 삭제 완료!")
return redirect('lecture:notice_index', course.id)
def get_notice_comment_list(request, course_id, notice_id):
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
notice = get_object_or_404(Notice, id=notice_id)
comments = NoticeComment.objects.all().filter(notice=notice)
return comments
def notice_comment_create(request, course_id, notice_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, pk=course_id)
notice = get_object_or_404(Notice, pk=notice_id)
context['course'] = course
context['notice'] = notice
user = request.user
if request.method == "POST":
comment_form = NoticeCommentForm(request.POST, request.FILES)
if comment_form.is_valid():
comment = comment_form.save(commit=False)
comment.writer = user
comment.notice = notice
comment.save()
return redirect('lecture:notice_read', course.id, notice.id)
else:
comment_form = NoticeCommentForm()
context['comment_form'] = comment_form
return render(request, 'lecture/notice/notice_comment_create.html', context)
def notice_comment_update(request, course_id, notice_id, comment_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, id=course_id)
notice = get_object_or_404(Notice, id=notice_id)
comment = get_object_or_404(NoticeComment, id=comment_id)
if request.user.id is not comment.writer.id:
return redirect('lecture:notice_read', course_id, notice_id)
context["course"] = course
context["notice"] = notice
context["comment"] = comment
initial_data = {
'title': comment.title,
'content': comment.content,
'file': comment.file,
}
form = NoticeCommentForm(request.POST, request.FILES, initial=initial_data, instance=comment)
if request.method == "POST":
if form.is_valid():
comment = form.save(commit=False)
comment.writer = request.user
comment.notice = notice
comment.save()
return redirect('lecture:notice_read', course.id, notice.id)
else:
form = NoticeCommentForm(instance=comment)
context['comment_update_form'] = form
return render(request, 'lecture/notice/notice_comment_update.html', context=context)
def notice_comment_delete(request, course_id, notice_id, comment_id):
context = {}
user = request.user
if not check_if_authenticated(request):
return redirect('accounts:login')
course = get_object_or_404(Course, id=course_id)
notice = get_object_or_404(Notice, id=notice_id)
comment = get_object_or_404(NoticeComment, id=comment_id)
comment.delete()
return redirect('lecture:notice_read', course.id, notice_id)
def check_if_authenticated(request):
if request.user.is_authenticated is False:
messages.error(request, "로그인 하셔야 합니다.")
return False
else:
return True
def get_students_courses(student):
try:
my_courses = []
my_enrollments = Enrollment.objects.filter(
student=student.profile.id
)
for enrollment in my_enrollments:
my_courses.append(enrollment.course)
except Enrollment.DoesNotExist:
my_courses = []
return my_courses
def get_professors_courses(professor):
try:
my_courses = Course.objects.filter(
professor=professor.profile.id
)
except Course.DoesNotExist:
my_courses = []
return my_courses
def check_role(user):
if user.profile.role == 0:
return "학생"
elif user.profile.role == 1:
return "교수"
else:
return "unknown"
def course_search(request):
context = {}
user = request.user
if check_role(user) == "교수":
return redirect('lecture:index')
available_courses = get_available_course_list(user)
context['available_courses'] = available_courses
return render(request, 'lecture/search_course.html', context)
def get_available_course_list(user):
if check_role(user) == "교수":
return None
elif check_role(user) == "학생":
courses = Course.objects.all()
enroll = Enrollment.objects.all().filter(student=user)
for en in enroll:
courses = courses.exclude(pk=en.course.id)
return courses
``` |
{
"source": "JhoLee/ecampus-manager",
"score": 2
} |
#### File: ecampus-manager/sources/lectures.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.Qt import QMessageBox, QSize, QIcon
from PyQt5.QtWidgets import QDialog, QListWidgetItem
from main import show_messagebox
from PyQt5.QtGui import QStandardItemModel, QStandardItem
class Ui_Lectures(QDialog):
def __init__(self, manager):
super().__init__()
self.manager = manager
self.lectures = self.manager.lectures
self.items = QStandardItemModel()
self.setFixedSize(QSize(272, 200))
self.setWindowIcon(QIcon('../resources/breadzip.ico'))
self.is_clicked_selection = False
def setupUi(self):
self.setObjectName("Lectures")
self.resize(272, 200)
self.lst_lectures = QtWidgets.QListWidget(self)
self.lst_lectures.setGeometry(QtCore.QRect(10, 30, 251, 121))
font = QtGui.QFont()
font.setFamily("Malgun Gothic")
font.setPointSize(10)
self.lst_lectures.setFont(font)
self.lst_lectures.setObjectName("lst_lectures")
self.btn_select_subject = QtWidgets.QPushButton(self)
self.btn_select_subject.setGeometry(QtCore.QRect(180, 160, 81, 31))
font = QtGui.QFont()
font.setFamily("Malgun Gothic")
font.setPointSize(10)
self.btn_select_subject.setFont(font)
self.btn_select_subject.setObjectName("btn_start")
self.label_3 = QtWidgets.QLabel(self)
self.label_3.setGeometry(QtCore.QRect(10, 10, 251, 16))
font = QtGui.QFont()
font.setFamily("Malgun Gothic")
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setText("<html><head/><body><p align=\"justify\">수강할 과목을 선택하십시오.</p></body></html>")
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("Lectures", "Dialog"))
self.btn_select_subject.setText(_translate("Lectures", "과목 선택"))
self.setWindowTitle(_translate("Login", "수강과목 선택 :: KNUT 빵셔틀"))
# self.items = QStandardItemModel()
# for lecture in self.lectures:
# self.items.appendRow(QStandardItem(lecture.text))
# self.lst_lectures.setModel(self.items)
for lec in self.lectures:
self.lst_lectures.addItem(lec.text)
QtCore.QMetaObject.connectSlotsByName(self)
self.btn_select_subject.clicked.connect(self.select)
def select(self):
self.is_clicked_selection = True
self.close()
```
#### File: ecampus-manager/sources/manager.py
```python
import datetime
import re
import sys
import time
from sources.utils import check_os, check_chrome_version, load_webdriver, load_auth_info
ECAMPUS_PATH = {
'MAIN': 'https://ecampus.ut.ac.kr',
'LECTURE_ROOM': 'https://ecampus.ut.ac.kr/lms/class/courseSchedule/doListView.dunet'
}
class EcampusManager(object):
def __init__(self, debug=False, show_chrome=False):
"""
:param debug:
:param show_chrome:
"""
self.__id = None
self.__pw = None
# system
self.os = check_os()
self.version = check_chrome_version(self.os)
# selenium
self.driver = load_webdriver(show_chrome)
self.main_window = None
# list
self.lecture = None
self.course = None
# current status
self.lectures = []
self.courses = [] # 전체 코스 목록
self.attendable_courses = [] # 출석해야 하는 코스 목록
self.attendable_courses_info = []
# log
self.logs = []
self.log_level = 2
self.language = 'KOREAN'
self.msg = self.message()
if debug:
self.log_level = 3
else:
self.log_level = 2
@property
def id(self):
return self.__id
@id.setter
def id(self, value):
self.__id = value
@property
def pw(self):
return self.__pw
@pw.setter
def pw(self, value):
self.__pw = value
@property
def courses(self):
return self.courses
@courses.setter
def courses(self, courses):
self.courses = courses
def log(self, message, level='INFO'):
"""
Push messages into a list(queue).
self.log_level define how deep to show.
:param message: message to log.
:param level: log level
:type message: str
:type level: str or int
"""
LOG_LEVEL = {'NONE': 0, 'WARN': 1, 'INFO': 2, 'DEBUG': 3}
if isinstance(level, str):
_level = LOG_LEVEL[level.upper()]
elif isinstance(level, int):
_level = level
else:
_level = 0
message = "({:%H:%M:%S}) [{}] {}".format(datetime.datetime.now(), level, message)
if _level <= self.log_level:
self.logs.append(message)
print(message)
def open_page(self, path):
self.driver.get(path)
window = self.driver.current_window_handle
return window
def open_main(self):
"""
Open main page (https://ecampus.ut.ac.kr)
After open, save this winodw to self.main_window
"""
if self.main_window is not None:
self.driver.switch_to.window(self.main_window)
time.sleep(1)
self.driver.get(ECAMPUS_PATH['MAIN'])
time.sleep(1.5)
self.main_window = self.driver.current_window_handle
self.log("Opened main page.", 'DEBUG')
def login(self):
"""
Login with self.id, self.pw
"""
try:
self.logout()
except:
pass
self.log("{} ID: {}".format(self.msg['LOGIN_TRY'], self.id), 'DEBUG')
input_id = self.driver.find_element_by_id('id')
input_pw = self.driver.find_element_by_id('pass')
input_id.send_keys(self.id)
input_pw.send_keys(self.pw)
self.driver.execute_script('login_proc()')
time.sleep(7)
self.log("{}, {}".format(self.msg['HELLO'], self.id))
def login_check(self):
self.log("Checking login...", 'debug')
msg = ''
try:
alert = self.driver.switch_to_alert()
msg += alert.text
self.log("Login Failed.", 'debug')
self.log(msg, 'debug')
alert.accept()
is_success = False
except:
self.log("Login Success!", 'debug')
is_success = True
return is_success, msg
def logout(self):
self.driver.switch_to.window(self.main_window)
time.sleep(1)
self.driver.find_element_by_id('btn_logout').click()
time.sleep(1)
self.log("{}".format(self.msg['LOGOUT_SUCCESS']), 'DEBUG')
def change_display_language(self, lang='english'):
"""
Change display-language into English to find elements by text.
:param lang:
"""
self.log("Changing display-language into English for qualified action...", 'debug')
time.sleep(1)
select_lang = self.driver.find_element_by_xpath("//select[@name='lang']/option[text()='ENGLISH']")
select_lang.click()
time.sleep(5)
self.log("Display-language has been changed to {}".format(lang), 'debug')
def get_lectures(self, year):
"""
Get lecture list from panel, and save them to self.lectures
:param year: A keyword to find lecture.
"""
self.log("Crawling lectures info...", 'DEBUG')
self.driver.switch_to.window(self.main_window)
time.sleep(3)
panel = self.driver.find_element_by_id('selfInfoAfter')
lecture_list = panel.find_element_by_class_name('lecInfo')
self.lectures = lecture_list.find_elements_by_xpath("//a[contains(., '{}')]".format(year))
self.log("{} {}".format(self.msg['COURSES_TO_ATTEND'], len(self.lectures)), 'info')
def open_lecture(self, lecture_idx):
self.lecture = self.lectures[lecture_idx]
lecture_name = self.lecture.text
self.log("Opening the lecture room for '{}'.".format(lecture_name), 'DEBUG')
self.driver.switch_to.window(self.main_window)
time.sleep(1)
self.lecture.click()
time.sleep(3)
self.driver.get(ECAMPUS_PATH['LECTURE_ROOM'])
time.sleep(3)
self.log("Lecture room for '{}' was opened.".format(lecture_name), 'DEBUG')
def crawl_courses(self, lecture_idx):
# TODO: Modify to use this method in main.
self.driver.switch_to.window(self.main_window)
time.sleep(2)
self.open_lecture(lecture_idx)
self.change_display_language('English') # TODO: Modify to use Korean instead.
courses_link = self.driver.find_elements_by_xpath("//a[cotains(., 'Lecture view')]")
courses_element = [course_link.find_element_by_xpath("../..") for course_link in courses_link]
courses = []
for course in courses_element:
datas = course.find_elements_by_tag_name('td')
title = datas[1].txt
lecture_time = datas[2].txt
period = datas[3].text
status = datas[4].text
link = datas[5].find_element_by_class_name('lectureWindow')
progress = self.extract_progress(status)
time_left = self.compute_left_time(lecture_time=lecture_time, progress=progress)
courses.append(
{
'title': title,
'time': int(lecture_time[:-6]),
'period': period,
'status': status,
'link': link,
'progress': progress,
'time_left': time_left,
}
)
return courses
def get_attendable_courses(self, lecture_idx):
self.log("강의 목록을 가져옵니다.", 'DEBUG')
self.driver.switch_to.window(self.main_window)
time.sleep(2)
self.open_lecture(lecture_idx)
self.change_display_language('English')
attendable_courses_link = self.driver.find_elements_by_xpath("//a[contains(., 'Lecture view')]")
attendable_courses = [course_link.find_element_by_xpath("../..") for course_link in attendable_courses_link]
# self.courses = []
for course in attendable_courses:
datas = course.find_elements_by_tag_name('td')
title = datas[1].text
lecture_time = datas[2].text
period = datas[3].text
status = datas[4].text
link = datas[5].find_element_by_class_name('lectureWindow')
progress = self.extract_progress(status)
time_left = self.compute_left_time(lecture_time=lecture_time, progress=progress)
if status != 'Complete':
self.attendable_courses.append(
{
'title': title,
'time': int(lecture_time[:-6]),
'period': period,
'status': status,
'link': link,
'progress': progress,
'time_left': time_left,
}
)
self.courses.append(
{
'title': title,
'time': int(lecture_time[:-6]),
'period': period,
'status': status,
'link': link,
'progress': progress,
'time_left': time_left,
}
)
self.log("강의 목록 조회 완료!", 'info')
if len(self.attendable_courses) == 0:
self.log("더 이상 출석할 강의가 없습니다!", 'info')
else:
self.log("출석해야할 강의 수: {}".format(len(self.attendable_courses)))
# self.print_courses_info()
self.get_attendable_courses_info()
for info in self.attendable_courses_info:
self.log(info, 'INFO')
self.log("전체 강의 수: {}".format(len(self.courses)))
def get_course_info(self, course):
# course = self.courses[course_idx]
course_info = \
"""
###########################################
title: {title}
time: {time} Minutes
period: {period}
status: {status}
time left: {left_min} Minutes and {left_sec} Seconds
############################################
""".format(
title=course['title'],
time=course['time'],
period=course['period'].replace('\n', ''),
status=course['status'],
left_min=course['time_left'] // 60 + 2,
left_sec=course['time_left'] % 60).replace('\t', ' ')
return course_info
def get_attendable_courses_info(self):
self.attendable_courses_info = []
for idx, course in enumerate(self.attendable_courses):
# self.attendable_courses_info.append( \
# """
# ###########################################
# title: {title}
# time: {time} Minutes
# period: {period}
# status: {status}
# time left: {left_min} Minutes and {left_sec} Seconds
# ############################################
# """.format(
# title=course['title'],
# time=course['time'],
# period=course['period'].replace('\n', ''),
# status=course['status'],
# left_min=course['time_left'] // 60 + 2,
# left_sec=course['time_left'] % 60).replace('\t', ' '))
self.attendable_courses_info.append(self.get_course_info(idx))
def attend_course(self, course_idx):
# self.course = self.courses[course_idx]
self.course = self.attendable_courses.pop(course_idx)
self.course['progress'] = self.extract_progress(self.course['status'])
self.course['time_left'] = self.compute_left_time(lecture_time=self.course['time'], progress=self.course['progress'])
# self.log("Opening the course '{}' for {} min {} sec.".format(
# self.course['title'],
# self.course['time_left'] // 60 + 2,
# self.course['time_left'] % 60))
self.driver.switch_to.window(self.main_window)
time.sleep(2)
self.attend_time = time.time()
self.finish_time = (self.attend_time + self.course['time_left']) + 120
finish_time = time.gmtime(self.finish_time)
finish_time = "{}:{}".format(finish_time.tm_hour, finish_time.tm_min)
self.course['link'].click()
time.sleep(4)
self.lecture_window = self.driver.window_handles[-1]
self.log("Lecture opened. It will be finished at {}".format(finish_time), 'debug')
# TODO: Convert to thread.
# TODO: Or while loop..?
# Todo: Or checking main's remain time
time.sleep(self.course['time_left'])
self.driver.switch_to.window(self.lecture_window)
time.sleep(3)
self.log("Time Over!!", "debug")
if len(self.driver.window_handles) > 1:
self.driver.close()
self.driver.switch_to.window(self.main_window)
self.log("{} '{}'".format(self.msg['COURSE_END'], self.course.text), 'info')
def attend_all_courses(self):
self.log("Attending all courses in the lecture..", 'debug')
self.driver.switch_to.window(self.main_window)
self.driver.implicitly_wait(1)
for idx, course in enumerate(self.attendable_courses):
self.attend_course(idx)
self.log("현재 강의 내의 모든 영상 출석 완료!", 'info')
@staticmethod
def extract_progress(status: str):
progress = re.findall('\d+', status)
if len(progress) > 0:
return int(progress[0])
else:
return 0
@staticmethod
def compute_left_time(lecture_time, progress):
time_left = lecture_time * (100 - progress) * 6 // 10
return time_left
def message(self):
language_order = {'KOREAN': 0, 'ENGLISH': 1}
_lang = language_order[self.language]
messages = {
'LOGIN_TRY': [
"로그인 시도중...",
"Loggin in..."
],
'LOGIN_SUCCESS': [
"로그인 되었습니다.",
"Logged in.",
],
'LOGOUT_SUCCESS': [
"로그아웃 되었습니다.",
"Logout success.",
],
'HELLO': [
"안녕하세요",
"Hello",
],
'COURSES_TO_ATTEND': [
"이번주에 출석해야 하는 강의 수:",
"Courses to attend due this week:"
],
'COURSE_END': [
"강의가 끝났습니다.",
"Course ended.",
],
}
messages = {key: message[_lang] for key, message in messages.items()}
return messages
if __name__ == "__main__":
print("Testing...")
manager = EcampusManager(debug=True, show_chrome=False)
sj = 'secrets.json'
manager.id, manager.pw = load_auth_info(sj)
manager.login()
manager.get_lectures(year=2020)
lecture_range = range(len(manager.lectures))
choice = int(input("Select the number you want to attend >> "))
while choice not in lecture_range:
manager.log("Please enter a number in {}~{}".format(0, len(manager.lectures) - 1), 'warn')
choice = int(input("Select the number you want >> "))
manager.get_attendable_courses(lecture_idx=choice)
if len(manager.courses) > 0:
manager.attend_course(course_idx=0)
print("Finish.")
manager.driver.close()
sys.exit()
``` |
{
"source": "jholewinski/ics-12-overlapped-tiling",
"score": 2
} |
#### File: scripts/benchrunner/simulator.py
```python
import math
class Simulator:
def __init__(self, cfg):
self.config = cfg
class ModelOne(Simulator):
def __init__(self, cfg):
Simulator.__init__(self, cfg)
```
#### File: ics-12-overlapped-tiling/scripts/utils.py
```python
import os
import subprocess
import sys
import time
try:
import yaml
except:
print('Please install PyYAML')
sys.exit(1)
def _parse_range(value):
if isinstance(value, int):
return [value]
elif len(value) == 3:
return range(value[0], value[1]+1, value[2])
elif len(value) == 2:
return range(value[0], value[1]+1, 1)
elif len(value) == 1:
return [value[0]]
else:
print('Unable to handle object')
sys.exit(1)
def _query_range(data, name, default):
try:
value = data[name]
return _parse_range(value)
except KeyError:
return default
def run_experiment(filename):
try:
handle = open(filename)
except IOError,e:
print(e)
sys.exit(1)
data = yaml.load(handle)
handle.close()
dimensions = data['dimensions']
outfile = data['outfile']
binary = data['binary']
parameters = data['parameters']
elements_per_thread = _query_range(parameters, 'elements_per_thread', [1])
problem_size = _query_range(parameters, 'problem_size', [128])
time_steps = _query_range(parameters, 'time_steps', [64])
time_tile_size = _query_range(parameters, 'time_tile_size', [1])
block_size_x = _query_range(parameters, 'block_size_x', [16])
if dimensions > 1:
block_size_y = _query_range(parameters, 'block_size_y', [16])
if dimensions > 2:
block_size_z = _query_range(parameters, 'block_size_z', [8])
else:
block_size_z = [1]
else:
block_size_y = [1]
block_size_z = [1]
try:
phase_limit = data['phase_limit']
except:
phase_limit = 0
try:
counters = data['counters'].split(',')
except:
counters = []
output = open(outfile, 'w')
num_runs = len(problem_size) * len(time_steps) * len(elements_per_thread) \
* len(time_tile_size) * len(block_size_x) * len(block_size_y) \
* len(block_size_z)
print('Number of Runs: %d' % num_runs)
curr = 0
total_start = time.time()
# Run through each permutation
for ps in problem_size:
for ts in time_steps:
for elems in elements_per_thread:
for tt in time_tile_size:
for bsx in block_size_x:
for bsy in block_size_y:
for bsz in block_size_z:
# Before each run, blow away the nv cache
os.system('rm -rf ~/.nv/')
curr = curr + 1
print('Running %d of %d' % (curr, num_runs))
args = [binary,
'-n',
'%d' % ps,
'-t',
'%d' % ts,
'-e',
'%d' % elems,
'-s',
'%d' % tt,
'-x',
'%d' % bsx,
'-p',
'%d' % phase_limit,
'-w',
'/tmp/temp-kernel.cl']
if dimensions > 1:
args.append('-y')
args.append('%d' % bsy)
if dimensions > 2:
args.append('-z')
args.append('%d' % bsz)
args = ' '.join(args)
proc = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Keep a watchdog on the process
start_time = time.time()
while proc.poll() == None:
time.sleep(0.1)
now = time.time()
elapsed = now - start_time
if elapsed > 15.0:
print('Watchdog timer expired!')
proc.terminate()
proc.wait()
break
end_time = time.time()
if proc.returncode != 0:
print('- FAILURE:')
print(proc.stdout.read())
print(proc.stderr.read())
else:
for line in proc.stdout.readlines():
output.write('%d#%s' % (curr, line))
output.flush()
elapsed = end_time - start_time
total = time.time() - total_start
if proc.returncode == 0:
ret = subprocess.call('~/projects/llvm/tests/dump-cl-binary.x /tmp/temp-kernel.cl', shell=True)
assert(ret == 0)
ret = subprocess.call('ptxas -arch sm_20 /tmp/temp-kernel.cl.ptx -o /tmp/temp-kernel.o', shell=True)
assert(ret == 0)
proc = subprocess.Popen('cuobjdump -sass /tmp/temp-kernel.o', shell=True, stdout=subprocess.PIPE)
(sass_out, _) = proc.communicate()
assert(proc.returncode == 0)
num_fadd = sass_out.count('FADD')
num_fmul = sass_out.count('FMUL')
num_ffma = sass_out.count('FFMA')
num_mufu = sass_out.count('MUFU')
num_fsetp = sass_out.count('FSETP')
output.write('%d#num_fp: %d\n' % (curr, (num_fadd+num_fmul+num_ffma+num_fsetp)))
output.write('%d#num_sfu: %d\n' % (curr, num_mufu))
for cnt in counters:
if proc.returncode == 0:
with open('/tmp/experiment-profiler.conf', 'w') as conf:
conf.write(cnt)
prof_args = 'COMPUTE_PROFILE=1 COMPUTE_PROFILE_CONFIG=/tmp/experiment-profiler.conf COMPUTE_PROFILE_LOG=/tmp/prof.log COMPUTE_PROFILE_CSV=1 %s' % args
proc = subprocess.Popen(prof_args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Keep a watchdog on the process
start_time = time.time()
while proc.poll() == None:
time.sleep(0.1)
now = time.time()
elapsed = now - start_time
if elapsed > 15.0:
print('Watchdog timer expired!')
proc.terminate()
proc.wait()
break
end_time = time.time()
if proc.returncode != 0:
print('- FAILURE:')
print(proc.stdout.read())
print(proc.stderr.read())
else:
all_values = []
with open('/tmp/prof.log') as log:
for line in log.readlines():
line = line.strip()
if line.startswith('kernel_func'):
value = line.split(',')[-1]
all_values.append(float(value))
#for line in proc.stdout.readlines():
# output.write('%d#%s' % (curr, line))
value_avg = float(sum(all_values)) / float(len(all_values))
output.write('%d#%s: %f\n' % (curr, cnt.strip(), value_avg))
output.flush()
elapsed = end_time - start_time
total = time.time() - total_start
seconds_per_run = total / float(curr)
remaining_runs = float(num_runs) - float(curr)
remaining_secs = seconds_per_run * \
remaining_runs
remaining_secs = int(remaining_secs)
remaining_mins = remaining_secs / 60
remaining_secs = remaining_secs % 60
remaining_hrs = remaining_mins / 60
remaining_mins = remaining_mins % 60;
print('Elapsed: %f Total: %f Remaining: %d:%d:%d' % \
(elapsed, total, remaining_hrs,
remaining_mins, remaining_secs))
output.close()
``` |
{
"source": "jholkeboer/check-me-in",
"score": 2
} |
#### File: factual/query/resolve.py
```python
from .read import Read
class Resolve(Read):
def __init__(self, api, table, values={}):
Read.__init__(self, api, 't/' + table + '/resolve', values)
def values(self, values):
return self._copy({'values': values})
def _copy(self, params):
return Resolve(self.api, self.merge_params(params))
```
#### File: factual/utils/utils.py
```python
def circle(lat, lon, radius):
"""
Creates an API-ready circle from the given latitude, longitude,
and radius parameters
"""
return {'$circle': {'$center': [lat, lon], '$meters': radius}}
def point(lat, lon):
"""
Creates an API-ready point from the given latitude and longitue.
"""
return {'$point': [lat, lon]}
try:
isinstance('', basestring)
def is_str(obj):
return isinstance(obj, basestring)
except NameError:
def is_str(obj):
return isinstance(obj, str)
```
#### File: WTForms-0.6/tests/ext_dateutil.py
```python
from datetime import datetime, date
from unittest import TestCase
from wtforms.form import Form
from wtforms.ext.dateutil.fields import DateTimeField, DateField
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
class DateutilTest(TestCase):
class F(Form):
a = DateTimeField()
b = DateField(default=lambda: date(2004, 9, 12))
c = DateField(parse_kwargs=dict(yearfirst=True, dayfirst=False))
def test_form_input(self):
f = self.F(DummyPostData(a='2008/09/12 4:17 PM', b='04/05/06', c='04/05/06'))
self.assertEqual(f.a.data, datetime(2008, 9, 12, 16, 17))
self.assertEqual(f.a._value(), '2008/09/12 4:17 PM')
self.assertEqual(f.b.data, date(2006, 4, 5))
self.assertEqual(f.c.data, date(2004, 5, 6))
self.assert_(f.validate())
f = self.F(DummyPostData(a='<NAME>'))
self.assert_(not f.validate())
def test_defaults_display(self):
f = self.F(a=datetime(2001, 11, 15))
self.assertEqual(f.a.data, datetime(2001, 11, 15))
self.assertEqual(f.a._value(), u'2001-11-15 00:00')
self.assertEqual(f.b.data, date(2004, 9, 12))
self.assertEqual(f.b._value(), u'2004-09-12')
self.assertEqual(f.c.data, None)
self.assert_(f.validate())
if __name__ == '__main__':
from unittest import main
main()
```
#### File: WTForms-0.6/tests/widgets.py
```python
from unittest import TestCase
from wtforms.widgets import html_params
from wtforms.widgets import *
class DummyField(object):
def __init__(self, data, name='f', label='', id='', type='TextField'):
self.data = data
self.name = name
self.label = label
self.id = id
self.type = type
_value = lambda x: x.data
__unicode__ = lambda x: x.data
__call__ = lambda x, **k: x.data
__iter__ = lambda x: iter(x.data)
iter_choices = lambda x: iter(x.data)
class HTMLParamsTest(TestCase):
def test(self):
self.assertEqual(html_params(foo=9, k='wuuu'), u'foo="9" k="wuuu"')
self.assertEqual(html_params(class_='foo'), u'class="foo"')
self.assertEqual(html_params(class__='foo'), u'class_="foo"')
self.assertEqual(html_params(for_='foo'), u'for="foo"')
class ListWidgetTest(TestCase):
def test(self):
# ListWidget just expects an iterable of field-like objects as its
# 'field' so that is what we will give it
field = DummyField([DummyField(x, label='l' + x) for x in ['foo', 'bar']], id='hai')
self.assertEqual(ListWidget()(field), u'<ul id="hai"><li>lfoo: foo</li><li>lbar: bar</li></ul>')
w = ListWidget(html_tag='ol', prefix_label=False)
self.assertEqual(w(field), u'<ol id="hai"><li>foo lfoo</li><li>bar lbar</li></ol>')
class TableWidgetTest(TestCase):
def test(self):
field = DummyField([DummyField(x, label='l' + x) for x in ['foo', 'bar']], id='hai')
self.assertEqual(TableWidget()(field), u'<table id="hai"><tr><th>lfoo</th><td>foo</td></tr><tr><th>lbar</th><td>bar</td></tr></table>')
class BasicWidgetsTest(TestCase):
"""Test most of the basic input widget types"""
field = DummyField('foo', name='bar', label='label', id='id')
def test_html_marking(self):
html = TextInput()(self.field)
self.assert_(hasattr(html, '__html__'))
self.assert_(html.__html__() is html)
def test_text_input(self):
self.assertEqual(TextInput()(self.field), u'<input id="id" name="bar" type="text" value="foo" />')
def test_password_input(self):
self.assert_(u'type="password"' in PasswordInput()(self.field))
self.assert_(u'value=""' in PasswordInput()(self.field))
self.assert_(u'value="foo"' in PasswordInput(hide_value=False)(self.field))
def test_hidden_input(self):
self.assert_(u'type="hidden"' in HiddenInput()(self.field))
def test_checkbox_input(self):
self.assertEqual(CheckboxInput()(self.field, value='v'), '<input checked="checked" id="id" name="bar" type="checkbox" value="v" />')
field2 = DummyField(False)
self.assert_(u'checked' not in CheckboxInput()(field2))
def test_radio_input(self):
pass # TODO
def test_textarea(self):
# Make sure textareas escape properly and render properly
f = DummyField('hi<>bye')
self.assertEqual(TextArea()(f), '<textarea id="" name="f">hi<>bye</textarea>')
class SelectTest(TestCase):
field = DummyField([('foo', 'lfoo', True), ('bar', 'lbar', False)])
def test(self):
self.assertEqual(Select()(self.field),
u'<select id="" name="f"><option selected="selected" value="foo">lfoo</option><option value="bar">lbar</option></select>')
self.assertEqual(Select(multiple=True)(self.field),
'<select id="" multiple="multiple" name="f"><option selected="selected" value="foo">lfoo</option><option value="bar">lbar</option></select>')
if __name__ == '__main__':
from unittest import main
main()
```
#### File: ext/django/fields.py
```python
from wtforms import widgets
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
__all__ = (
'ModelSelectField', 'QuerySetSelectField',
)
class QuerySetSelectField(SelectFieldBase):
"""
Given a QuerySet either at initialization or inside a view, will display a
select drop-down field of choices. The `data` property actually will
store/keep an ORM model instance, not the ID. Submitting a choice which is
not in the queryset will result in a validation error.
Specifying `label_attr` in the constructor will use that property of the
model instance for display in the list, else the model object's `__str__`
or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for the blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=u'', validators=None, queryset=None, label_attr='', allow_blank=False, blank_text=u'', **kwargs):
super(QuerySetSelectField, self).__init__(label, validators, **kwargs)
self.label_attr = label_attr
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if queryset is not None:
self.queryset = queryset.all() # Make sure the queryset is fresh
def _get_data(self):
if self._formdata is not None:
for obj in self.queryset:
if obj.pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.queryset:
label = self.label_attr and getattr(obj, self.label_attr) or obj
yield (obj.pk, label, obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = int(valuelist[0])
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.queryset:
if self.data == obj:
break
else:
raise ValidationError('Not a valid choice')
class ModelSelectField(QuerySetSelectField):
"""
Like a QuerySetSelectField, except takes a model class instead of a
queryset and lists everything in it.
"""
def __init__(self, label=u'', validators=None, model=None, **kwargs):
super(ModelSelectField, self).__init__(label, validators, queryset=model._default_manager.all(), **kwargs)
``` |
{
"source": "jholkeboer/cs496week2",
"score": 2
} |
#### File: jholkeboer/cs496week2/main.py
```python
import time
import json
from flask import Flask
from flask import render_template, redirect, request
from flaskext import wtf
from flaskext.wtf import validators
from google.appengine.ext import db
from flaskext import wtf
from flaskext.wtf import validators
from models import Recipe, Ingredient
from factual import Factual
from factual.utils import circle
app = Flask(__name__)
app.secret_key='unsafe'
app.DEBUG=True
app.CSRF_ENABLED=True
app.CSRF_SESSION_LKEY='unsafe'
class RecipeForm(wtf.Form):
name = wtf.TextField('Name')
under30 = wtf.BooleanField('Under 30 minutes to make?')
category = wtf.RadioField('Category', choices=[('breakfast','Breakfast'),('lunch','Lunch'),('dinner','Dinner')])
ingredients = wtf.TextAreaField('Ingredients')
instructions = wtf.TextAreaField('Instructions')
#####################################
# FACTUAL API ACCESS FOR ASSIGNMENT 4
#####################################
# load creds from client_secrets.json
secrets_file = open('client_secrets.json', 'r')
factual_creds = json.loads(secrets_file.read())
secrets_file.close()
api_key = factual_creds['api_key']
api_secret = factual_creds['api_secret']
factual = Factual(api_key, api_secret)
# route to hit Factual api
# based on Factual documentation: https://github.com/Factual/factual-python-driver
@app.route('/mobile/factual', methods=['POST'])
def factual_restaurants():
latitude = float(request.form.get('latitude'))
longitude = float(request.form.get('longitude'))
search_term = request.form.get('search_term')
if not latitude or not longitude:
return json.dumps({"error": "No location provided"}), 500
if not search_term:
return json.dumps({"error": "No search term provided"}), 500
radius = 1000
places = factual.table('places')
# look up restaurants near location
result = places.search(search_term).geo(circle(latitude, longitude, radius)).data()
return json.dumps(result), 200
# route to display JQuery mobile page
@app.route('/mobile/home', methods=['GET'])
def mobile_home():
return render_template('mobile_home.html')
#####################################
# API ROUTES FOR ASSIGNMENT 3 PART 2
#####################################
@app.route('/recipes/all', methods=['GET'])
def api_all_recipes():
# returns the names and keys of all recipes in the system
recipes = db.GqlQuery("SELECT * FROM Recipe")
recipeList = []
for r in recipes:
recipeList.append({'name': r.name, 'key': str(r.key())})
return json.dumps({'status': 'OK', 'recipes': recipeList}), 200
@app.route('/recipes', methods=['GET'])
def recipes():
# get a specific recipe
key = request.args.get('key')
recipe = Recipe.get(key)
print recipe
if not recipe:
return json.dumps({'error': 'Recipe not found'}), 500
recipe_object = {'name': recipe.name,
'prepTime': recipe.prepTime,
'instructions': recipe.instructions}
ingredientList = []
for i in recipe.ingredients:
ing = Ingredient.get(i)
ingredientList.append({'ing_name': ing.name, 'ing_key': str(ing.key())})
recipe_object['ingredients'] = ingredientList
return json.dumps({'status': 'OK', 'recipe': recipe_object}), 200
@app.route('/recipes/new', methods=['POST'])
def recipes_new():
# inserts a new recipe
if not request.args.get('ingredients'):
return json.dumps({"error": "No ingredients provided"}), 500
if not request.args.get('instructions'):
return json.dumps({"error": "No instructions provided"}), 500
recipeName = request.args.get('name')
if not recipeName:
return json.dumps({"error": "No name provided"}), 500
prepTime = request.args.get('prepTime')
if not prepTime:
return json.dumps({"error": "No prepTime provided"}), 500
category = request.args.get('category')
ingredientList = request.args.get('ingredients').split(',')
ingredientList = [x.lower() for x in ingredientList]
instructionList = request.args.get('instructions').split(',')
# error handling
if prepTime.isdigit():
prepTime = int(prepTime)
if prepTime <= 0:
return json.dumps({"error": "Invalid prep time"}), 500
else:
return json.dumps({"error": "Invalid prep time"}), 500
if category not in ['breakfast', 'lunch', 'dinner']:
return json.dumps({"error": "Invalid category. Valid categories are breakfast, lunch, and dinner"}), 500
if len(ingredientList) < 1:
return json.dumps({"error": "Need at least one ingredient"}), 500
if len(instructionList) < 1:
return json.dumps({"error": "Need at least one instruction"}), 500
# get ingredient keys
ingredient_keys = []
new_ingredients = []
for ingName in ingredientList:
ings = db.GqlQuery("SELECT * FROM Ingredient WHERE name = :1", ingName)
result_count = 0
for item in ings:
result_count += 1
ingredient_keys.append(item.key())
if result_count == 0:
new_ingredients.append(ingName)
newRecipe = Recipe(name = recipeName,
prepTime = prepTime,
category = category,
ingredients = [],
instructions = instructionList)
newRecipe.put()
recipeKey = newRecipe.key()
# create new ingredients if necessary
for n in new_ingredients:
newIngredient = Ingredient(name=n, usedIn=[recipeKey])
newIngredient.put()
ingredient_keys.append(newIngredient.key())
newRecipe.ingredients = ingredient_keys
newRecipe.put()
return json.dumps({'status': "OK", 'message': "Recipe saved successfully"}), 200
@app.route('/ingredients', methods=['GET'])
def ingredients():
# returns the list of Recipe keys which include the given ingredient name
ingredientName = request.args.get('ingredientName')
if not ingredientName:
return json.dumps({'error': "Invalid request parameter."}), 500
# see if the ingredient exists in the database
ingredient = db.GqlQuery("SELECT * FROM Ingredient WHERE name = :1", ingredientName)
result_count = 0
for item in ingredient:
result_count += 1
ingredient_key = item.key()
if not ingredient_key:
return json.dumps({'error': "Ingredient not found"}), 500
# look up recipes that contain that ingredient
recipe_count = 0
recipe_list = []
recipes = db.GqlQuery("SELECT * FROM Recipe WHERE ingredients = :1", ingredient_key)
for r in recipes:
recipe_count += 1
print r
recipe_list.append(str(r.key()))
return json.dumps({'status': 'OK', 'recipeList': recipe_list}), 200
@app.route('/recipes/add_ingredient', methods=['PUT'])
def recipes_add_ingredient():
# associates an ingredient with a recipe
recipe_key = request.args.get('recipe')
ingredient_key = request.args.get('ingredient')
recipe = Recipe.get(recipe_key)
if not recipe:
return json.dumps({'error': 'Recipe not found'}), 500
ingredient = Ingredient.get(ingredient_key)
if not ingredient:
return json.dumps({'error': 'Ingredient not found.'}), 500
if ingredient.key() in recipe.ingredients:
return json.dumps({'status': 'Ingredient already in recipe.'}), 200
# if the ingredient is not already associated, associate it
recipe.ingredients.append(ingredient.key())
recipe.put()
# add the recipe to the ingredient's list of recipes that use it
if recipe.key() not in ingredient.usedIn:
ingredient.usedIn.append(recipe.key())
ingredient.put()
return json.dumps({'status': 'Associated ingredient and recipe successfully'}), 200
@app.route('/recipes/delete', methods=['DELETE'])
def recipes_delete():
# deletes a recipe from the datastore
key = request.args.get('recipe')
recipe = Recipe.get(key)
if not recipe:
return json.dumps({'error': 'Attempted to delete recipe that does not exist'}), 500
recipe.delete()
# remove recipe from 'usedIn' property of ingredients
ingredients = db.GqlQuery("SELECT * FROM Ingredient WHERE usedIn = :1", recipe.key())
for i in ingredients:
i.usedIn.remove(recipe.key())
i.put()
return json.dumps({'status': 'Deleted successfully'}), 200
#################################
# LEGACY WEB INTERFACE ROUTES from previous assignment
#################################
@app.route('/')
def hello():
return redirect('/all')
@app.route('/edit', methods=['GET','POST'])
def edit():
key = request.args.get('key')
recipe = Recipe.get(key)
form = RecipeForm()
category = str(recipe.category)
print category
if request.method == 'GET':
recipe = Recipe.get(key)
if recipe:
form.name.data = recipe.name
form.under30.data = recipe.under30
# form.category.default = recipe.category
# form.ingredients.data = recipe.ingredients
form.instructions.data = recipe.instructions
form.key = recipe.key()
elif form.validate_on_submit():
recipe.name = form.name.data
recipe.under30 = form.under30.data
recipe.category = form.category.data
recipe.ingredients = form.ingredients.data
recipe.instructions = form.instructions.data
recipe.put()
print "Recipe edited."
time.sleep(1)
return redirect('/all')
return render_template('edit_recipes.html', recipe=recipe, form=form, category=category)
@app.route('/delete', methods=['GET'])
def add():
key = request.args.get('key')
print 'deleting key %s' % key
recipe = Recipe.get(key)
recipe.delete()
time.sleep(1)
return redirect('/all')
@app.route('/all', methods=['GET'])
def all():
# recipes = Recipe.all()
recipes = db.GqlQuery("SELECT * FROM Recipe")
for r in recipes:
for i in r.ingredients:
print i
return render_template('view_recipes.html', recipes=recipes)
@app.errorhandler(404)
def page_not_found(e):
return '404 Not Found.', 404
@app.errorhandler(500)
def application_error(e):
return 'Error: {}'.format(e), 500
``` |
{
"source": "JHollandaise/Warp-firmware",
"score": 3
} |
#### File: boot/ksdk1.1.0/HX711_scale_monitor.py
```python
import pylink
import time
import matplotlib.pyplot as plt
def main(target_device):
"""Creates an interactive terminal to the target via RTT.
The main loop opens a connection to the JLink, and then connects
to the target device. RTT is started, the number of buffers is presented,
and then two worker threads are spawned: one for read, and one for write.
The main loops sleeps until the JLink is either disconnected or the
user hits ctrl-c.
Args:
target_device (string): The target CPU to connect to.
Returns:
Always returns ``0`` or a JLinkException.
Raises:
JLinkException on error.
"""
jlink = pylink.JLink()
print("connecting to JLink...")
jlink.open()
print("connecting to %s..." % target_device)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(target_device)
print("connected, starting RTT...")
jlink.rtt_start()
while True:
try:
num_up = jlink.rtt_get_num_up_buffers()
num_down = jlink.rtt_get_num_down_buffers()
print("RTT started, %d up bufs, %d down bufs." % (num_up, num_down))
break
except pylink.errors.JLinkRTTException:
time.sleep(0.1)
try:
thread.start_new_thread(read_rtt, (jlink,))
thread.start_new_thread(write_rtt, (jlink,))
while jlink.connected():
time.sleep(1)
print("JLink disconnected, exiting...")
except KeyboardInterrupt:
print("ctrl-c detected, exiting...")
pass
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
``` |
{
"source": "jholland-usgs/asl-java-tools",
"score": 3
} |
#### File: asl-java-tools/pytools/Database.py
```python
try:
import hashlib
sha1 = hashlib.sha1
except:
import sha
sha1 = sha.new
import base64
import psycopg2
import sys
class Database:
def __init__(self, conString=None):
self.db = None
self.cur = None
if conString:
self.select_database(conString)
def __del__(self):
self.close()
# ===== Public Methods ===========================
def select_database(self, conString):
self.close()
host, user, pwd, db, port = conString.split(',')
self.db = psycopg2.connect(host=host, user=user, password=<PASSWORD>, database=db, port=port)
self.cur = self.db.cursor()
def close(self):
if self.cur:
self.cur.close()
del self.cur
if self.db:
self.db.close()
del self.db
def execute(self, query, data=None):
if data is not None:
self.cur.execute(query, data)
else:
self.cur.execute(query)
def select(self, query, data=None):
if data is not None:
self.cur.execute(query, data)
else:
self.cur.execute(query)
return self.cur.fetchall()
def insert(self, query, data=None, commit=True):
if data is not None:
self.cur.execute(query, data)
else:
self.cur.execute(query)
if commit:
self.db.commit()
def insert_many(self, query, iterator, commit=True):
print query
#print iterator
self.cur.executemany(query, iterator)
if commit:
self.db.commit()
def delete(self, query, commit=True):
self.cur.execute(query)
if commit:
self.db.commit()
def interrupt(self):
self.db.interrupt()
def run_script(self, script):
return self.cur.executescript(script)
def commit(self):
self.db.commit()
# ===== Private Methods ==========================
def _hash(self, text):
sha_obj = sha1()
sha_obj.update(text)
return base64.urlsafe_b64encode(sha_obj.digest())
``` |
{
"source": "jholland-usgs/stationxml1.0-1.1-converter",
"score": 3
} |
#### File: stationxml1.0-1.1-converter/stationxml/xslt_converter.py
```python
import argparse
import glob
import os
from lxml import etree
def parseargs():
"""
Parse commandline arguments
Returns
-------
args parsed from input
"""
parser = argparse.ArgumentParser(prog="xslt_converter.py")
parser.add_argument("xslt",
help="The xslt file to convert input to output with")
parser.add_argument("input",
help="Either the directory or file to be converted. Wildcards are permitted. This does restrict to .xml if directory provided.")
parser.add_argument("output",
help="The directory to output converted xml files into")
parser.add_argument("-f", "--force_completion", help="Force completion despite failures", action='store_true')
args = parser.parse_args()
return args
def convert_single_file(transform, input_file_path, output_dir):
"""
Convert a single file with provided transform and write it out to the output_dir with the same basename.
Parameters
----------
transform etree.XSLT produced transform
input_file_path str Path to the input file
output_dir str Path to the output directory This will overwrite on conflicting names.
Returns
-------
"""
with open(input_file_path, 'r') as input_file:
xml_in = etree.parse(input_file)
xml_out = transform(xml_in)
file_out = open(os.path.join(output_dir, os.path.basename(input_file_path)), 'wb')
file_out.write(etree.tostring(xml_out, pretty_print=True, xml_declaration=True, encoding='UTF-8))
def convert_from_paths(xslt, input, output, force_completion=False):
"""
Convert all files in either a directory or a wildcard path matching glob. Output is written to a directory with a matching filename
Parameters
----------
xslt str Path to XSLT transform file
input str Path to either a directory or wildcardable file path
output str Path to output directory
force_completion bool Upon exception, continue conversions.
Returns
-------
"""
try:
xslt_file = open(xslt, 'r')
xslt_parsed =etree.parse(xslt_file)
transform = etree.XSLT(xslt_parsed)
except Exception as e:
print("Error parsing XSLT file: {}".format(xslt))
print(str(e))
return
try:
if os.path.isdir(input):
input = input + '/*.xml'
input_paths = glob.iglob(input)
except Exception as e:
print("Error processing input path(s)")
print(str(e))
return
try:
if not os.path.isdir(output):
print("Output must be a directory")
return
except Exception as e:
print("Error processing output folder")
print(str(e))
return
for file in input_paths:
try:
convert_single_file(transform, file, output)
except Exception as e:
print("Error converting file: {}".format(file))
print(str(e))
if force_completion:
continue
else:
return
def main():
args = parseargs()
convert_from_paths(**vars(args))
if __name__ == "__main__":
main()
``` |
{
"source": "jhollowed/gammaProf",
"score": 3
} |
#### File: gammaProf/shearfit/lensing_system.py
```python
import pdb
import warnings
import numpy as np
from scipy import stats
import astropy.units as units
import astropy.constants as const
from astropy.cosmology import WMAP7
class obs_lens_system:
"""
This class constructs an object representing an observer-lens-source system, which contains the
lens redshift, and data vectors for the background sources of a given lens, including the lensing
geometry, and methods to perform computations on that data.
Parameters
----------
zl : float
The redshift of the lens.
cosmo : object, optional
An astropy cosmology object (defaults to `WMAP7`).
Attributes
----------
zl : float
The redshift of the lens.
has_sources : boolean
Whether or not the background population has been set for this instance
(`False` until `set_background()` is called).
bg_theta1 : float array
The source lens-centric azimuthal angular coordinates, in arcseconds
(uninitialized until `set_background()` is called).
bg_theta2 : float array
The source lens-centric coaltitude angular coordinates, in arcseconds
(uninitialized until `set_background()` is called).
zs : float array
Redshifts of background sources
(uninitialized until `set_background()` is called).
r : float array
Projected separation of each source at the redshift `zl`, in comoving :math:`\\text{Mpc}`
(uninitialized until `set_background()` is called).
y1 : float array
The real component of the source shears.
y2 : float array
The imaginary component of the source shears.
yt : float array
The source tangential shears.
k : float array
The source convergences.
Methods
-------
set_background(theta1, theta2, zs, y1, y2)
Defines and assigns background souce data vectors to attributes of the lens object.
get_background()
Returns the source population data vectors to the caller, as a list.
calc_sigma_crit()
Computes the critical surface density at the redshift `zl`.
"""
def __init__(self, zl, cosmo=WMAP7):
self.zl = zl
self._cosmo = cosmo
self._has_sources = False
self._has_shear12 = False
self._has_shear1 = False
self._has_shear2 = False
self._has_kappa = False
self._has_rho = False
self._has_radial_cuts = False
self._rmin = None
self._rmax = None
self._theta1 = None
self._theta2 = None
self._zs = None
self._r = None
self._phi = None
self._y1 = None
self._y2 = None
self._yt = None
self._k =None
def _check_sources(self):
"""
Checks that set_background has been called (intended to be called before any
operations on the attributes initialized by `set_background()`).
"""
assert(self._has_sources), 'sources undefined; first run set_background()'
def set_radial_cuts(self, rmin=None, rmax=None):
'''
Sets a class-wide radial mask which will be applied to data vectors returned from
`get_background()`, `calc_delta_sigma()`, `calc_delta_sigma_binned()`, and `calc_sigma_crit()`.
Parameters
----------
rmin : float, optional
Sources with halo-centric radial distances less than this value will be removed by
application of the mask constructed from this function. Defautls to None, in which case
rmin is set to `0` (i.e. nothing is masked on the upper end of the radial distribution).
rmax : float, optional
Sources with halo-centric radial distances greater than this value will be removed by
application of the mask constructed from this function. Defautls to None, in which case
rmax is set to coincide with the maximum source radial distance (i.e. nothing is masked
on the upper end of the radial distribution).
'''
self._check_sources()
if(rmin is None): rmin = 0
if(rmax is None): rmax = np.max(self._r)
self._radial_mask = np.logical_and(self._r >= rmin, self._r <= rmax)
def set_background(self, theta1, theta2, zs, y1=None, y2=None, yt=None, k=None, rho=None):
'''
Defines and assigns background souce data vectors to attributes of the lens object,
including the angular positions, redshifts, projected comoving distances from
the lens center in comoving :math:`\\text{Mpc}`, and shear components. The user
should either pass the shear components `y1` and `y2`, or the tangential shear `yt`;
if both or neither are passed, an exception will be raised.
Parameters
----------
theta1 : float array
The source lens-centric azimuthal angular coordinates, in arcseconds.
theta2 : float_array
The source lens-centric coaltitude angular coordinates, in arcseconds.
zs : float array
The source redshifts.
y1 : float array, optional
The shear component :math:`\\gamma_1`. Must be passed along with `y2`, unless passing `yt`.
y2 : float array, optional
The shear component :math:`\\gamma_2`. Must be passed along with `y1`, unless passing `yt`.
yt : float array, optional
The tangential shear :math:`\\gamma_T`. Must be passed if not passing `y1` and `y2`.
k : float array, optional
The convergence :math:`\\kappa`. Not needed for any computations of this class, but is
offered as a convenience. Defaults to `None`.
rho : float array, optional
The matter density at the projected source positions on the lens plane. Not needed for
any computations of this class, but is offered as a convenience; intended use is in the
case that the user wishes to fit `\\delta\\Sigma` directly to the projected mass density
on the grid (output prior to ray-tracing). Defaults to `None`.
'''
# make sure shear was passed correctly -- either tangenetial, or components, not both
# the _has_shear12 attribute will be used to communicate to other methods which usage
# has been invoked
if((y1 is None and y2 is None and yt is None) or
((y1 is not None or y2 is not None) and yt is not None)):
raise Exception('Either y1 and y2 must be passed, or yt must be passed, not both.')
# initialize source data vectors
self._theta1 = np.array((np.pi/180) * (theta1/3600))
self._theta2 = np.array((np.pi/180) * (theta2/3600))
self._zs = np.array(zs)
self._y1 = np.array(y1)
self._y2 = np.array(y2)
self._yt = np.array(yt)
self._k = np.array(k)
self._rho = np.array(rho)
# set flags and compute additonal quantities
if(yt is None): self._has_shear12 = True
if(k is not None): self._has_kappa = True
if(rho is not None): self._has_rho = True
self._has_sources = True
self._comp_bg_quantities()
self.set_radial_cuts(None, None)
def _comp_bg_quantities(self):
"""
Computes background source quantites that depend on the data vectors initialized in
set_background (this function meant to be called from the setter method of each
source property).
"""
self._check_sources()
# compute halo-centric projected radial separation of each source, in proper Mpc
#self._r = np.linalg.norm([np.tan(self._theta1), np.tan(self._theta2)], axis=0) * \
# self._cosmo.comoving_distance(self.zl).value
#arcsec_per_Mpc = (self._cosmo.arcsec_per_kpc_proper(self.zl)).to( units.arcsec / units.Mpc )
#angular_sep_arcsec = np.linalg.norm([180/np.pi * self._theta1 * 3600,
# 180/np.pi * self._theta2 * 3600], axis=0) * units.arcsec
#self._r = (angular_sep_arcsec / arcsec_per_Mpc).value
# Projected distance in proper Mpc; Wright & Brainerd, under Eq.10
self._r = np.linalg.norm([self._theta1, self._theta2], axis=0) * \
self._cosmo.angular_diameter_distance(self.zl).value
if(self._has_shear12):
# compute tangential shear yt
self._phi = np.arctan(self._theta2/self._theta1)
#self._yt = -(self._y1 * np.cos(2*self._phi) +
# self._y2*np.sin(2*self._phi))
self._yt = np.sqrt(self._y1**2 + self._y2**2)
def get_background(self):
'''
Returns the source population data vectors to the caller as a numpy
rec array, sorted in ascending order with respect to the halo-centric
radial distance
Returns
-------
bg : 2d numpy array
A list of the source population data vectors (2d numpy array), with
labeled columns.
If shear components are being used (see docstring for `set_background()`,
then the contents of the return array is
[theta1, theta2, r, zs, y1, y2, yt], where theta1 and theta2 are the
halo-centric angular positions of the sources in arcseconds, r is the
halo-centric projected radial distance of each source in proper
:math:`\\text{Mpc}`, zs are the source redshifts, y1 and y2 are the
shear components of the sources, and yt are the source tangential shears.
If only the tangential shear is being used, then y1 and y2 are omitted
'''
self._check_sources()
bg_arrays = [(180/np.pi * self._theta1 * 3600),
(180/np.pi * self._theta2 * 3600),
self._r, self._zs, self._yt]
bg_dtypes = [('theta1',float), ('theta2',float), ('r',float),
('zs',float), ('yt',float)]
if(self._has_shear12):
bg_arrays.append(self._y1)
bg_arrays.append(self._y2)
bg_dtypes.append(('y1', float))
bg_dtypes.append(('y2', float))
if(self._has_kappa):
bg_arrays.append(self._k)
bg_dtypes.append(('k', float))
if(self._has_rho):
bg_arrays.append(self._rho)
bg_dtypes.append(('rho', float))
bg_arrays = [arr[self._radial_mask] for arr in bg_arrays]
bg = np.rec.fromarrays(bg_arrays, dtype = bg_dtypes)
return bg
@property
def cosmo(self): return self._cosmo
@cosmo.setter
def cosmo(self, value):
self._cosmo = value
self._comp_bg_quantities()
@property
def r(self): return self._r
def r(self, value):
raise Exception('Cannot change source \'r\' value; update angular positions instead')
@property
def theta1(self): return self._theta1
@theta1.setter
def theta1(self, value):
self._theta1 = value
self._comp_bg_quantities()
@property
def theta2(self): return self._theta2
@theta2.setter
def theta2(self, value):
self._theta2 = value
self._comp_bg_quantities()
@property
def zs(self): return self._zs
@theta1.setter
def zs(self, value):
self._zs = value
self._comp_bg_quantities()
@property
def k(self): return self._k
@k.setter
def k(self, value):
self._k = value
if(value is None): self._has_kappa = False
else: self._has_kappa = True
@property
def y1(self): return self._y1
@y1.setter
def y1(self, value):
if(not self._has_shear12):
raise Exception('object initialized with yt rather than y1,y2; cannot call y1 setter')
else:
self._y1 = value
self._comp_bg_quantities()
@property
def y2(self): return self._y2
@y2.setter
def y2(self, value):
if(not self._has_shear12):
raise Exception('object initialized with yt rather than y1,y2; cannot call y2 setter')
else:
self._y2 = value
self._comp_bg_quantities()
@property
def yt(self): return self._yt
@yt.setter
def yt(self, value):
self._yt = value
if(self._has_shear12 or self._has_shear1 or self._has_shear2):
warnings.warn('Warning: setting class attribute yt, but object was initialized'
'with y1,y2 (or y1/y2 setters were called); shear components y1'
'and y2 being set to None')
self._has_shear12 = False
self._y1= None
self._y2 = None
self._comp_bg_quantities()
@property
def get_radial_cuts(self): return [self._rmin, self._rmax]
def _k_rho(self):
'''
Rescales the convergence on the lens plane into a matter density. This is mostly offered
for debugging purposes, and is only really meaningful in the case that the input is the
raytracing of a single lens plane (otherwise the recovered matter density is cumulative,
in some sense, across the line of sight).
Returns
-------
rho : float or float array
The projected mass density at the source positions on the lens plane
'''
rho = self._k * self.calc_sigma_crit()
return rho
def calc_sigma_crit(self, zs=None):
'''
Computes :math:`\\Sigma_\\text{c}(z_s)`, the critical surface density as a function of source
redshift :math:`z_s`, at the lens redshift :math:`z_l`, in proper :math:`M_{\\odot}/\\text{pc}^2`,
assuming a flat cosmology
Parameters
----------
zs : float or float array, optional
A source redshift (or array of redshifts). If None (default), then use background
source redshifts given at object instatiation, `self.zs`
Returns
-------
Sigma_crit : float or float array
The critical surface density, :math:`\\Sigma_\\text{c}`, in proper
:math:`M_{\\odot}/\\text{pc}^2`
'''
if(zs is None):
self._check_sources()
zs = self._zs[self._radial_mask]
# G in Mpc^3 M_sun^-1 Gyr^-2,
# speed of light C in Mpc Gyr^-1
# distance to lens Dl and source Ds in proper Mpc
# --> warning: this assumes a flat cosmology; or that angular diamter distance = proper distance
G = const.G.to(units.Mpc**3 / (units.M_sun * units.Gyr**2)).value
C = const.c.to(units.Mpc / units.Gyr).value
Ds = self._cosmo.angular_diameter_distance(zs).value
Dl = self._cosmo.angular_diameter_distance(self.zl).value
Dls = Ds - Dl
# critical surface mass density Σ_c in proper M_sun/pc^2;
# final quotient scales to Mpc to pc
sigma_crit = (C**2/(4*np.pi*G) * (Ds)/(Dl*Dls))
sigma_crit = sigma_crit / (1e12)
return sigma_crit
def calc_delta_sigma(self):
'''
Computes :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, the differential surface density at the lens
redshift :math:`z_l`, in proper :math:`M_{\\odot}/\\text{pc}^2`, assuming a flat cosmology.
Returns
-------
delta_sigma : float or float array
The differential surface density, :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, in proper
:math:`M_{\\odot}/\\text{pc}^2
'''
self._check_sources()
yt = self._yt[self._radial_mask]
sigma_crit = self.calc_sigma_crit()
delta_sigma = yt*sigma_crit
return delta_sigma
def calc_delta_sigma_binned(self, nbins, return_edges=False, return_std=False, return_gradients=False):
'''
Computes :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, the differential surface density at the lens
redshift :math:`z_l`, in proper :math:`M_{\\odot}/\\text{pc}^2`, assuming a flat cosmology.
Parameters
----------
nbins : int
Number of bins to place the data into. The bin edges will be distributed uniformly in radial space
(i.e. the bin widths will be constant, rather than bin areas)
return_edges : bool, optional
whether or not to return the resulting bin edges. Defautls to False
return_std : bool, optional
Whether or not to return the standard deviation and standard error of the mean of each bin.
Defaults to False.
return_gradients : bool, optional
Whether or not to return the approximate gradient of each bin. The gradient is computed by
fitting a linear form to each bin's data, and returning the slope parameter. Defaults to False.
Returns
-------
delta_sigma : float or float array
The differential surface density, :math:`\\Delta\\Sigma = \\gamma\\Sigma_c`, in proper
:math:`M_{\\odot}/\\text{pc}^2
'''
self._check_sources()
# load data and sort by increasing radial distance
r = self._r[self._radial_mask]
sorter = np.argsort(r)
r = r[sorter]
yt = self._yt[self._radial_mask][sorter]
sigma_crit = self.calc_sigma_crit()[sorter]
delta_sigma = yt*sigma_crit
# get bin means
[r_mean, bin_edges, _] = stats.binned_statistic(r, r, statistic='mean', bins=nbins)
[delta_sigma_mean,_,_] = stats.binned_statistic(r, delta_sigma, statistic='mean', bins=nbins)
return_arrays = [r_mean, delta_sigma_mean]
return_cols = ['r_mean', 'delta_sigma_mean']
# and standard deviations, errors of the mean
if(return_std):
[delta_sigma_std,_,_] = stats.binned_statistic(r, delta_sigma, statistic='std', bins=nbins)
[delta_sigma_count,_,_] = stats.binned_statistic(r, delta_sigma, statistic='count', bins=nbins)
delta_sigma_se = delta_sigma_std / delta_sigma_count
[r_std,_,_] = stats.binned_statistic(r, r, statistic='std', bins=nbins)
[r_count,_,_] = stats.binned_statistic(r, r, statistic='count', bins=nbins)
r_se = r_std / r_count
return_arrays.extend([r_std, r_se, delta_sigma_std, delta_sigma_se])
return_cols.extend(['r_std', 'r_se_mean', 'delta_sigma_std', 'delta_sigma_se_mean'])
# return bin edges
if(return_edges):
return_arrays.append(bin_edges)
return_cols.append('bin_edges')
# return bin gradient and errors... compute these manually
if(return_gradients):
bin_gradients = np.zeros(nbins)
for i in range(nbins):
bin_mask = np.logical_and(r > bin_edges[i], r < bin_edges[i+1])
if(np.sum(bin_mask) == 0): bin_gradients[i] = float('NaN')
else:
ds, dr = delta_sigma[bin_mask], r[bin_mask]
bin_gradients[i],_ = np.polyfit(dr, ds, 1)
return_arrays.append(bin_gradients)
return_cols.append('bin_grad')
# gather for return
bin_dict = {}
for i in range(len(return_arrays)): bin_dict[return_cols[i]] = return_arrays[i]
return bin_dict
``` |
{
"source": "jhollowe-forks/python-mapswipe-workers",
"score": 3
} |
#### File: mapswipe_workers/locust_files/load_testing.py
```python
import datetime
import json
import random
from uuid import uuid4
from locust import HttpUser, between, task
from mapswipe_workers.definitions import logger
from mapswipe_workers.utils import user_management
class MapSwipeUser(HttpUser):
# assuming that users need between 30 sec and 120 sec to map a group
wait_time = between(30, 120)
def set_up_user(self):
# check if is already signed in
if self.signed_in_user is None:
logger.info("user is not signed in. Will create a new user.")
# create user if not exists
user = user_management.create_user(self.email, self.username, self.password)
self.user_id = user.uid
# sign in user
self.signed_in_user = user_management.sign_in_with_email_and_password(
self.email, self.password
)
logger.info("Created a new user.")
else:
logger.info("user is already signed in.")
pass
def create_mock_result(self, group):
"""Create a result object for a build area project.
The result values are generated randomly.
"""
start_time = datetime.datetime.utcnow().isoformat()[0:-3] + "Z"
end_time = datetime.datetime.utcnow().isoformat()[0:-3] + "Z"
x_min = int(group["xMin"])
x_max = int(group["xMax"])
y_min = int(group["yMin"])
y_max = int(group["yMax"])
results = {}
for x in range(x_min, x_max):
for y in range(y_min, y_max):
task_id = f"18-{x}-{y}"
result = random.choices([0, 1, 2, 3])[0] # no, yes, maybe, bad_imagery
results[task_id] = result
data = {
"results": results,
"startTime": start_time,
"endTime": end_time,
}
return data
def set_firebase_db(self, path, data, token=None):
"""Upload results to Firebase using REST api."""
request_ref = f"{path}.json?auth={token}"
headers = {"content-type": "application/json; charset=UTF-8"}
self.client.put(
request_ref, headers=headers, data=json.dumps(data).encode("utf-8")
)
logger.info(f"set data in firebase for {path}.json")
@task
def map_a_group(self):
"""Get a group from Firebase for this user and "map" it.
Make sure that this user has not worked on this group before.
Get the group and create mock results.
Upload results to Firebse.
"""
# get the groups that need to be mapped
path = f"/v2/groups/{self.project_id}"
# make sure to set '&' at the end of the string
custom_arguments = 'orderBy="requiredCount"&limitToLast=15&'
new_groups = user_management.get_firebase_db(
path, custom_arguments, self.signed_in_user["idToken"]
)
# get the groups the user has worked on already
path = f"/v2/users/{self.user_id}/contributions/{self.project_id}"
# make sure to set & at the end of the string
custom_arguments = "shallow=True&"
existing_groups = user_management.get_firebase_db(
path, custom_arguments, self.signed_in_user["idToken"]
)
# pick group for mapping
# Get difference between new_groups and existing groups.
# We should get the groups the user has not yet worked on.
if existing_groups is None:
next_group_id = random.choice(list(new_groups.keys()))
else:
existing_groups.pop(
"taskContributionCount", None
) # need to remove this since it is no groupId
remaining_group_ids = list(
set(new_groups.keys()) - set(existing_groups.keys())
)
next_group_id = random.choice(remaining_group_ids)
# get group object
next_group = new_groups[next_group_id]
# create mock result for this group
result = self.create_mock_result(next_group)
# upload results in firebase
path = f"/v2/results/{self.project_id}/{next_group_id}/{self.user_id}"
self.set_firebase_db(path, result, self.signed_in_user["idToken"])
def on_start(self):
"""Set up user and define project when user starts running."""
self.project_id = "-MYg8CEf2k1-RitN62X0"
random_string = uuid4()
self.email = f"<EMAIL>"
self.username = f"test_{random_string}"
self.password = "<PASSWORD>"
self.user_id = None
self.signed_in_user = None
self.set_up_user()
```
#### File: project_types/tile_map_service_grid/tutorial.py
```python
from mapswipe_workers.definitions import ProjectType, logger
from mapswipe_workers.project_types.base.tile_server import BaseTileServer
from mapswipe_workers.project_types.base.tutorial import BaseTutorial
from mapswipe_workers.utils import tile_functions as t
class Tutorial(BaseTutorial):
"""The subclass for an TMS Grid based Tutorial."""
def __init__(self, tutorial_draft):
# this will create the basis attributes
super().__init__(tutorial_draft)
self.projectType = tutorial_draft["projectType"]
self.zoomLevel = int(tutorial_draft.get("zoomLevel", 18))
self.tileServer = vars(BaseTileServer(tutorial_draft["tileServer"]))
self.tutorial_tasks = tutorial_draft["tutorialTasks"]
self.groups = dict()
self.tasks = dict()
# get TileServerB for change detection and completeness type
if self.projectType in [3, 4]:
self.tileServerB = vars(BaseTileServer(tutorial_draft["tileServerB"]))
def create_tutorial_groups(self):
"""Create group for the tutorial based on provided examples in geojson file."""
# load examples/tasks from file
number_of_screens = len(self.screens)
# create the groups dict to be uploaded in Firebase
self.groups[101] = {
"xMax": 100
+ (2 * number_of_screens)
- 1, # this depends on the number of screens/tasks to show
"xMin": 100, # this will be always set to 100
"yMax": 131074, # this is set to be at the equator
"yMin": 131072, # this is set to be at the equator
"requiredCount": 5, # not needed from backend perspective, maybe for client
"finishedCount": 0, # not needed from backend perspective, maybe for client
"groupId": 101, # a tutorial has only one group
"projectId": self.projectId,
"numberOfTasks": len(
self.tutorial_tasks
), # this depends on the number of screens/tasks to show
"progress": 0, # not needed from backend perspective, maybe for client
}
if self.projectType in [ProjectType.CHANGE_DETECTION.value]:
# need to adjust xMax and yMax for Change Detection projects
# since they use a different view with only one tile per screen
self.groups[101]["xMax"] = str(100 + (number_of_screens - 1))
self.groups[101]["yMax"] = str(self.groups[101]["yMin"])
logger.info(
f"{self.projectId}"
f" - create_tutorial_groups - "
f"created groups dictionary"
)
def create_tutorial_tasks(self):
"""Create the tasks dict based on provided examples in geojson file."""
self.tasks = dict()
self.tasks[101] = list()
number_of_screens = len(self.screens)
for screen in range(1, number_of_screens + 1):
# get all tasks for this screen
raw_tasks_screen = [
d
for d in self.tutorial_tasks["features"]
if d["properties"]["screen"] in [screen]
]
# sort by tile_x and tile_y
raw_tasks_screen_sorted = sorted(
raw_tasks_screen,
key=lambda k: (k["properties"]["tile_x"], k["properties"]["tile_y"]),
)
for i, raw_task in enumerate(raw_tasks_screen_sorted):
tile_x = raw_task["properties"]["tile_x"]
tile_y = raw_task["properties"]["tile_y"]
if i < 3: # get adjusted tile_x to fit in tutorial data schema
tile_x_tutorial = self.groups[101]["xMin"] + (2 * (screen - 1))
else:
tile_x_tutorial = self.groups[101]["xMin"] + (2 * (screen - 1)) + 1
if i in [0, 3]: # get adjusted tile_y to fit in tutorial data schema
tile_y_tutorial = self.groups[101]["yMin"]
elif i in [1, 4]:
tile_y_tutorial = self.groups[101]["yMin"] + 1
elif i in [2, 5]:
tile_y_tutorial = int(self.groups[101]["yMin"]) + 2
task = {
"taskId_real": f"{self.zoomLevel}-{tile_x}-{tile_y}",
"taskId": f"{self.zoomLevel}-{tile_x_tutorial}-{tile_y_tutorial}",
"taskX": tile_x_tutorial, # need to set correctly based on screen
"taskY": tile_y_tutorial, # need to set correctly based on screen
"groupId": 101, # a tutorial has only one group
"projectId": self.projectId,
"referenceAnswer": raw_task["properties"]["reference"],
"screen": raw_task["properties"]["screen"],
"url": t.tile_coords_zoom_and_tileserver_to_url(
tile_x, tile_y, self.zoomLevel, self.tileServer
),
}
# Completeness and Change Detection projects use a second tile image url
if self.projectType in [
ProjectType.CHANGE_DETECTION.value,
ProjectType.COMPLETENESS.value,
]:
task["urlB"] = t.tile_coords_zoom_and_tileserver_to_url(
tile_x, tile_y, self.zoomLevel, self.tileServerB
)
self.tasks[101].append(task)
logger.info(
f"{self.projectId}"
f" - create_tutorial_tasks - "
f"created tasks dictionary"
)
```
#### File: mapswipe_workers/utils/api_calls.py
```python
from xml.etree import ElementTree
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from mapswipe_workers.definitions import (
OHSOME_API_LINK,
OSM_API_LINK,
OSMCHA_API_KEY,
OSMCHA_API_LINK,
CustomError,
logger,
)
def remove_troublesome_chars(string: str):
"""Remove chars that cause trouble when pushed into postgres."""
if type(string) is not str:
return string
troublesome_chars = {'"': "", "'": "", "\n": " "}
for k, v in troublesome_chars.items():
string = string.replace(k, v)
return string
def retry_get(url, retries=3, timeout=4, to_osmcha: bool = False):
"""Retry a query for a variable amount of tries."""
retry = Retry(total=retries)
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retry))
if to_osmcha:
headers = {"Authorization": f"Token {OSMCHA_API_KEY}"}
return session.get(url, timeout=timeout, headers=headers)
else:
return session.get(url, timeout=timeout)
def geojsonToFeatureCollection(geojson: dict) -> dict:
"""Take a GeoJson and wrap it in a FeatureCollection."""
if geojson["type"] != "FeatureCollection":
collection = {
"type": "FeatureCollection",
"features": [{"type": "feature", "geometry": geojson}],
}
return collection
return geojson
def chunks(arr, n_objects):
"""Return a list of list with n_objects in each sublist."""
return [
arr[i * n_objects : (i + 1) * n_objects]
for i in range((len(arr) + n_objects - 1) // n_objects)
]
def query_osmcha(changeset_ids: list, changeset_results):
"""Get data from changesetId."""
id_string = ",".join(map(str, changeset_ids))
url = OSMCHA_API_LINK + f"changesets/?ids={id_string}"
response = retry_get(url, to_osmcha=True)
if response.status_code != 200:
err = f"osmcha request failed: {response.status_code}"
logger.warning(f"{err}")
logger.warning(response.json())
raise CustomError(err)
response = response.json()
for feature in response["features"]:
changeset_results[int(feature["id"])] = {
"username": remove_troublesome_chars(feature["properties"]["user"]),
"userid": feature["properties"]["uid"],
"comment": remove_troublesome_chars(feature["properties"]["comment"]),
"editor": remove_troublesome_chars(feature["properties"]["editor"]),
}
return changeset_results
def query_osm(changeset_ids: list, changeset_results):
"""Get data from changesetId."""
id_string = ",".join(map(str, changeset_ids))
url = OSM_API_LINK + f"changesets?changesets={id_string}"
response = retry_get(url)
if response.status_code != 200:
err = f"osm request failed: {response.status_code}"
logger.warning(f"{err}")
logger.warning(response.json())
raise CustomError(err)
tree = ElementTree.fromstring(response.content)
for changeset in tree.iter("changeset"):
id = changeset.attrib["id"]
username = remove_troublesome_chars(changeset.attrib["user"])
userid = changeset.attrib["uid"]
comment = created_by = None
for tag in changeset.iter("tag"):
if tag.attrib["k"] == "comment":
comment = tag.attrib["v"]
if tag.attrib["k"] == "created_by":
created_by = tag.attrib["v"]
changeset_results[int(id)] = {
"username": remove_troublesome_chars(username),
"userid": userid,
"comment": remove_troublesome_chars(comment),
"editor": remove_troublesome_chars(created_by),
}
return changeset_results
def remove_noise_and_add_user_info(json: dict) -> dict:
"""Delete unwanted information from properties."""
logger.info("starting filtering and adding extra info")
batch_size = 100
# remove noise
changeset_results = {}
missing_rows = {
"@changesetId": 0,
"@lastEdit": 0,
"@osmId": 0,
"@version": 0,
}
for feature in json["features"]:
new_properties = {}
for attribute in missing_rows.keys():
try:
new_properties[attribute.replace("@", "")] = feature["properties"][
attribute
]
except KeyError:
missing_rows[attribute] += 1
changeset_results[new_properties["changesetId"]] = None
feature["properties"] = new_properties
# add info
len_osm = len(changeset_results.keys())
batches = int(len(changeset_results.keys()) / batch_size) + 1
logger.info(
f"""{len_osm} changesets will be queried in roughly {batches} batches from osmCHA""" # noqa E501
)
chunk_list = chunks(list(changeset_results.keys()), batch_size)
for i, subset in enumerate(chunk_list):
changeset_results = query_osmcha(subset, changeset_results)
progress = round(100 * ((i + 1) / len(chunk_list)), 1)
logger.info(f"finished query {i+1}/{len(chunk_list)}, {progress}")
missing_ids = [i for i, v in changeset_results.items() if v is None]
chunk_list = chunks(missing_ids, batch_size)
batches = int(len(missing_ids) / batch_size) + 1
logger.info(
f"""{len(missing_ids)} changesets where missing from osmCHA and are now queried via osmAPI in {batches} batches""" # noqa E501
)
for i, subset in enumerate(chunk_list):
changeset_results = query_osm(subset, changeset_results)
progress = round(100 * ((i + 1) / len(chunk_list)), 1)
logger.info(f"finished query {i+1}/{len(chunk_list)}, {progress}")
for feature in json["features"]:
changeset = changeset_results[int(feature["properties"]["changesetId"])]
for attribute_name in ["username", "comment", "editor", "userid"]:
if attribute_name == "userid":
feature["properties"][attribute_name] = int(changeset[attribute_name])
else:
feature["properties"][attribute_name] = changeset[attribute_name]
logger.info("finished filtering and adding extra info")
if any(x > 0 for x in missing_rows.values()):
logger.warning(f"features missing values:\n{missing_rows}")
return json
def ohsome(request: dict, area: str, properties=None) -> dict:
"""Request data from Ohsome API."""
url = OHSOME_API_LINK + request["endpoint"]
data = {"bpolys": area, "filter": request["filter"]}
if properties:
data["properties"] = properties
logger.info("Target: " + url)
logger.info("Filter: " + request["filter"])
response = requests.post(url, data=data)
if response.status_code != 200:
err = f"ohsome request failed: {response.status_code}"
logger.warning(
f"{err} - check for errors in filter or geometries - {request['filter']}"
)
logger.warning(response.json())
raise CustomError(err)
else:
logger.info("Query succesfull.")
response = response.json()
if properties:
response = remove_noise_and_add_user_info(response)
return response
```
#### File: mapswipe_workers/python_scripts/add_project_geometries_to_api.py
```python
import ogr
from mapswipe_workers import auth
from mapswipe_workers.definitions import DATA_PATH
from mapswipe_workers.utils import geojson_functions
def add_project_geometries_to_api():
"""Load project geometries from postgres and save as geojson."""
# load from postgres
pg_db = auth.postgresDB()
sql_query = """
SELECT
project_id
,ST_AsText(geom) as geom
FROM projects
"""
data = pg_db.retr_query(sql_query)
print(len(data))
# save as geojson one by one
for project in data:
project_id = project[0]
wkt_geom = project[1]
outfile = (
f"{DATA_PATH}/api/project_geometries/project_geom_{project_id}.geojson"
)
try:
geometries = [ogr.CreateGeometryFromWkt(wkt_geom)]
geojson_functions.create_geojson_file(geometries, outfile)
except Exception:
print(f"got an error for {project_id}")
# just ignore if this fails
pass
add_project_geometries_to_api()
```
#### File: mapswipe_workers/python_scripts/add_usernames_in_firebase.py
```python
from firebase_admin import auth
from mapswipe_workers.auth import firebaseDB
from mapswipe_workers.definitions import logger
def update_username(uid):
"""Set username in Firebase DB based on auth.display_name for user id."""
fb_db = firebaseDB()
try:
user = auth.get_user(uid)
username = user.display_name
# only set username for users with display_name
if not username:
logger.info(f"user {uid} has no display_name in firebase.")
else:
ref = fb_db.reference(f"v2/users/{user.uid}/username")
ref.set(username)
logger.info(f"updated username for user {uid}: {username}")
except auth.UserNotFoundError:
logger.info(f"could not find user {uid} in firebase to update username.")
def get_all_users():
"""Get the user ids from all users in Firebase DB."""
fb_db = firebaseDB()
users = fb_db.reference("v2/users/").get(shallow=True)
uid_list = users.keys()
return uid_list
if __name__ == "__main__":
"""Get all user ids from Firebase and update username based on auth.display_name."""
uid_list = get_all_users()
for uid in uid_list:
update_username(uid)
```
#### File: tests/integration/test_create_footprint_project.py
```python
import unittest
import set_up
import tear_down
from click.testing import CliRunner
from mapswipe_workers import auth, mapswipe_workers
from mapswipe_workers.utils.create_directories import create_directories
class TestCreateProject(unittest.TestCase):
def setUp(self):
self.project_id = [
set_up.create_test_project_draft("footprint", "footprint_TMId"),
set_up.create_test_project_draft("footprint", "footprint_aoiFile"),
set_up.create_test_project_draft("footprint", "footprint_link"),
]
create_directories()
def tearDown(self):
for element in self.project_id:
tear_down.delete_test_data(element)
def test_create_footprint_project(self):
runner = CliRunner()
result = runner.invoke(mapswipe_workers.run_create_projects)
if result.exit_code != 0:
raise result.exception
pg_db = auth.postgresDB()
for element in self.project_id:
query = "SELECT project_id FROM projects WHERE project_id = %s"
result = pg_db.retr_query(query, [element])[0][0]
self.assertEqual(result, element)
# check if usernames made it to postgres
if element != "test_footprint_link":
query = """
SELECT count(*)
FROM tasks
WHERE project_id = %s
and project_type_specifics->'properties'->'username' is not null
"""
result = pg_db.retr_query(query, [element])[0][0]
self.assertGreater(result, 0)
fb_db = auth.firebaseDB()
ref = fb_db.reference(f"/v2/projects/{element}")
result = ref.get(shallow=True)
self.assertIsNotNone(result)
ref = fb_db.reference(f"/v2/groups/{element}")
result = ref.get(shallow=True)
self.assertIsNotNone(result)
# Footprint projects have tasks in Firebase
ref = fb_db.reference(f"/v2/tasks/{element}")
result = ref.get(shallow=True)
self.assertIsNotNone(result)
if __name__ == "__main__":
unittest.main()
```
#### File: tests/integration/test_update_project_data.py
```python
import unittest
from mapswipe_workers.firebase_to_postgres import update_data
class TestUpdateProjectData(unittest.TestCase):
def setUp(self):
self.project_ids = [
"-MRuCvru6yfFrJZxWn_z",
"-MOXET8nkuvw2AqGpQDQ",
"-MCGa66fcuFS6fi9M8-M"
]
def test_custom_project_ids(self):
"""Test update users when no users are in postgres yet."""
update_data.update_project_data(self.project_ids)
# self.assertIsNotNone(result)
def test_non_archived_projects(self):
"""Test update users when no users are in postgres yet."""
update_data.update_project_data()
# self.assertIsNotNone(result)
if __name__ == "__main__":
unittest.main()
```
#### File: scripts/v1_to_v2/generate_copy_to_csv.py
```python
import sys
def get_query(project_ids):
clause = 'WHERE project_id in (select project_id from results group by project_id)'
clause_import = 'WHERE import_id in (select importkey as import_id from projects)'
clause_group = 'WHERE project_id in (select project_id from projects group by project_id) and project_id in (select project_id from results group by project_id)'
if project_ids is None:
pass
elif len(project_ids) == 1:
clause = f'{clause} AND project_id = {project_ids[0]}'
clause_import = f', projects p {clause_import} AND p.project_id = {project_ids[0]} AND p.importkey = i.import_id'
clause_group = f'{clause_group} AND project_id = {project_ids[0]}'
else:
clause = f'{clause} AND project_id = {project_ids[0]}'
clause_group = f'{clause_group} AND project_id = {project_ids[0]}'
clause_import = f', projects p {clause_import} AND p.project_id = {project_ids[0]} AND p.importkey = i.import_id'
for project_id in project_ids[1:]:
clause = clause + f' OR project_id = {project_id}'
clause_group = clause_group + f' OR project_id = {project_id}'
clause_import = clause + f' OR p.project_id = {project_id} AND p.importkey = i.import_id'
query = f'-- Export v1 MapSwipe data to csv.\n' \
f'-- Rename attributes to conform to v2.\n' \
f'\copy (SELECT archive, image, importkey as "import_id", isfeatured AS "is_featured", lookfor AS "look_for", name, progress, projectdetails AS "project_details", project_id, project_type, state AS "status", info AS "project_type_specifics" FROM projects {clause}) TO projects.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT i.import_id, i.info FROM imports i {clause_import}) TO imports.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT project_id, group_id as "v1_group_id", count as "number_of_tasks", completedcount as "finished_count", verificationcount as "required_count", info as "project_type_specifics" FROM groups {clause_group} ) TO groups.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT project_id, group_id as "v1_group_id", task_id, info as "project_type_specifics" FROM tasks {clause_group} LIMIT 10000000) TO tasks1.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT project_id, group_id as "v1_group_id", task_id, info as "project_type_specifics" FROM tasks {clause_group} OFFSET 10000000 LIMIT 10000000) TO tasks2.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT project_id, group_id as "v1_group_id", task_id, info as "project_type_specifics" FROM tasks {clause_group} OFFSET 20000000 LIMIT 10000000) TO tasks3.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT project_id, group_id as "v1_group_id", task_id, info as "project_type_specifics" FROM tasks {clause_group} OFFSET 30000000) TO tasks4.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n' \
f'\copy (SELECT user_id, username FROM users) TO users.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n'
return query
def get_result_query(project_ids):
clause = 'WHERE project_id in (SELECT project_id FROM projects GROUP BY project_id) AND user_id in (SELECT user_id FROM users)'
if project_ids is None:
pass
elif len(project_ids) == 1:
clause = f'{clause} AND project_id = {project_ids[0]}'
else:
clause = f'{clause} AND project_id = {project_ids[0]}'
for project_id in project_ids[1:]:
clause = clause + f' OR project_id = {project_id}'
query = f'-- Export v1 MapSwipe data to csv.\n' \
f'-- Rename attributes to conform to v2.\n' \
f'\copy (SELECT project_id, task_id, user_id, timestamp as "timeint", info FROM results {clause}) TO results.csv WITH (FORMAT CSV, DELIMITER ",", HEADER TRUE);\n'
return query
if __name__ == '__main__':
if len(sys.argv) == 1:
query = get_query(None)
result_query = get_result_query(None)
else:
query = get_query(sys.argv[1:])
result_query = get_result_query(sys.argv[1:])
with open('copy_to_csv.sql', 'w') as f:
f.write(query)
with open('copy_results_to_csv.sql', 'w') as f:
f.write(result_query)
``` |
{
"source": "jhollowe/groceri.es",
"score": 3
} |
#### File: groceri.es/app/config.py
```python
import os
class Config:
SECRET_KEY = ''
SQLALCHEMY_DATABASE_URI = 'sqlite:///db/app.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
LANGUAGES = ['en', 'nl']
def __init__(self):
for name, var in os.environ.items():
if hasattr(Config, name):
setattr(Config, name, var)
``` |
{
"source": "jhollowe/pyad",
"score": 2
} |
#### File: pyad/pyad/adsearch.py
```python
from __future__ import absolute_import
from .adquery import *
from .adbase import *
_ad_query_obj = ADQuery()
def by_cn(cn, search_base=None, options={}):
if not search_base:
if not ADBase.default_domain:
raise Exception("Unable to detect default domain. Must specify search base.")
search_base = ADBase.default_domain
_ad_query_obj.reset()
_ad_query_obj.execute_query(where_clause=("CN = '%s'" % cn),
base_dn=search_base,
options=options,
type="GC")
return _ad_query_obj.get_single_result()['distinguishedName']
def by_upn(upn, search_base=None, options={}):
if not search_base:
if not ADBase.default_forest:
raise Exception("Unable to detect default forest. Must specify search base.")
search_base = ADBase.default_forest
_ad_query_obj.reset()
_ad_query_obj.execute_query(where_clause=("userPrincipalName = '%s'" % upn),
base_dn=search_base,
type="GC",
options=options)
return _ad_query_obj.get_single_result()['distinguishedName']
def by_sid(sid, search_base=None, options={}):
if not search_base:
if not ADBase.default_domain:
raise Exception("Unable to detect default domain. Must specify search base.")
search_base = ADBase.default_domain
_ad_query_obj.reset()
_ad_query_obj.execute_query(where_clause=("objectSid = '%s'" % sid),
base_dn=search_base,
options=options,
type="GC")
return _ad_query_obj.get_single_result()['distinguishedName']
def all_results_by_cn(cn, search_base=None, options={}):
if not search_base:
if not ADBase.default_domain:
raise Exception("Unable to detect default domain. Must specify search base.")
search_base = ADBase.default_domain
_ad_query_obj.reset()
_ad_query_obj.execute_query(where_clause=("CN = '%s'" % cn),
base_dn=search_base,
options=options,
type="GC")
return [result['distinguishedName'] for result in _ad_query_obj.get_all_results()]
def all_results_by_upn(upn, search_base=None, options={}):
if not search_base:
if not ADBase.default_forest:
raise Exception("Unable to detect default forest. Must specify search base.")
search_base = ADBase.default_forest
_ad_query_obj.reset()
_ad_query_obj.execute_query(where_clause=("userPrincipalName = '%s'" % upn),
base_dn=search_base,
type="GC",
options=options)
return [result['distinguishedName'] for result in _ad_query_obj.get_all_results()]
def all_results_by_sid(sid, search_base=None, options={}):
if not search_base:
if not ADBase.default_domain:
raise Exception("Unable to detect default domain. Must specify search base.")
search_base = ADBase.default_domain
_ad_query_obj.reset()
_ad_query_obj.execute_query(where_clause=("objectSid = '%s'" % sid),
base_dn=search_base,
options=options,
type="GC")
return [result['distinguishedName'] for result in _ad_query_obj.get_all_results()]
```
#### File: pyad/tests/tests_adbase.py
```python
from __future__ import absolute_import
from .pyadunittest import *
class TestADBase(ADTestCase):
def setUp(self):
# set all defaults back to their default
pyad.adbase.ADBase.default_ldap_server = None
pyad.adbase.ADBase.default_gc_server = None
pyad.adbase.ADBase.default_ldap_port = None
pyad.adbase.ADBase.default_gc_port = None
def test_detected_forest(self):
self.assertEqual(pyad.adbase.ADBase.default_domain, self.SANDBOX_DOMAIN)
def test_detected_domain(self):
self.assertEqual(pyad.adbase.ADBase.default_forest, self.SANDBOX_FOREST)
def test_set_defaults(self):
pyad.adbase.set_defaults(ldap_server = 'iowadc1', ldap_port = 389)
self.assertEqual(pyad.adbase.ADBase.default_ldap_server, 'iowadc1')
self.assertEqual(pyad.adbase.ADBase.default_ldap_port, 389)
```
#### File: jhollowe/pyad/setup.py
```python
import os
import os.path
from setuptools import setup
def read(fname):
if os.path.exists(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
else:
return ''
setup(
name = "pyad",
version = "0.5.15",
author = "<NAME>",
author_email = "<EMAIL>",
maintainer = "<NAME>",
maintainer_email = "<EMAIL>",
download_url = "https://github.com/zakird/pyad/",
url = "https://zakird.com/pyad",
description = "An Object-Oriented Active Directory management framework built on ADSI",
license = "Apache License, Version 2.0",
keywords = "python microsoft windows active directory AD adsi",
packages=[
'pyad'
],
long_description = read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: System Administrators",
"Natural Language :: English",
"Operating System :: Microsoft :: Windows",
"Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP"
],
install_requires=[
'setuptools',
'future'
]
)
``` |
{
"source": "jholmes/tc1",
"score": 3
} |
#### File: jholmes/tc1/seisDataLogger.py
```python
from obspy.core import Stream, Trace, UTCDateTime
from seisDataLoggerDaemon import Daemon
from threading import Thread
from time import strftime
import serial, sys, os
import numpy as np
class DataLoggerDaemon(Daemon):
def run(self):
baseTime = UTCDateTime()
# Assign all the configuration variables
port = self.config.get ("connection","port")
baud = self.config.getint ("connection","baudrate")
tout = self.config.getfloat("connection","timeout")
interval = self.config.getint ("data","interval")
offset = self.config.getint ("calibration","offset")
network = self.config.get ("device","network")
station = self.config.get ("device","station")
location = self.config.get ("device","location")
channel = self.config.get ("device","channel")
samprate = self.config.getfloat("device","samplerate")
dataqual = self.config.get ("device","dataquality")
sampleIdx = 0
traceData = np.array([])
self.logger.debug("["+ strftime('%X') + "] connecting...")
rawData = serial.Serial(port,baud,timeout=tout)
self.logger.debug("["+ strftime('%X') + "] listening for incoming data...")
while True: # While loop that loops forever
while (rawData.inWaiting()==0): #Wait here until there is data
pass #do nothing
dataPointString = rawData.readline()
try:
traceData = np.append(traceData, int(dataPointString))
except ValueError:
offset = int(np.mean(traceData))
self.logger.debug("["+ strftime('%X') + "] * Bad value received. Replacing with current mean...")
traceData = np.append(traceData, offset)
sampleIdx = sampleIdx + 1
currentTime = UTCDateTime()
elapsedTime = (currentTime - baseTime)
# Write the data after x seconds
if elapsedTime >= (interval + (baseTime.microsecond / 1e6)):
# Fill header attributes
stats = {'network': network,
'station': station,
'location': location,
'channel': channel,
'npts': len(traceData),
'sampling_rate': samprate,
'mseed': {'dataquality': dataqual},
'starttime': baseTime}
# Save the file using a different thread.
worker = Thread(target=self._writeData, args=(traceData, stats, baseTime))
worker.setDaemon(True)
worker.start()
baseTime = currentTime
sampleIdx = 0
traceData = np.array([])
def _writeData(self, traceData, stats, timeObj):
streamObj = Stream([Trace(data=traceData, header=stats)])
filename = self._prepareFilename(timeObj)
offset = int(np.mean(streamObj.traces[0].data))
streamObj.traces[0].data = np.array([x - offset for x in streamObj.traces[0].data])
self.logger.debug("["+ strftime('%X') + "] Saving %d samples (corrected by %d) to %s..." % (len(traceData), offset, filename))
streamObj.write(filename, format='MSEED')
def _prepareFilename(self, timeObj):
datapath = self.config.get("file","datapath")
filepath = datapath +"%d/%d/" % (timeObj.year, timeObj.julday)
try:
if not os.path.exists(filepath):
os.makedirs(filepath)
except OSError as exception:
self.logger.debug("["+ strftime('%X') + "] * Error preparing path: (%d) %s" % (exception.errno, exception.strerror))
network = self.config.get("device","network")
station = self.config.get("device","station")
channel = self.config.get("device","channel")
filename = network+"."+station+".%02d%02d%4d_%02d%02d%02d." % \
(timeObj.day,timeObj.month,timeObj.year,
timeObj.hour,timeObj.minute,timeObj.second) +channel+".mseed"
return (filepath+filename)
def normalize(v):
norm=np.linalg.norm(v)
if norm==0:
return v
return v/norm
if __name__ == "__main__":
daemon = DataLoggerDaemon('/srv/tc1/run/seis_data_logger.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
```
#### File: jholmes/tc1/seisPlotAvg.py
```python
import serial, argparse
import numpy as np
from collections import deque
import ConfigParser as cp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# plot class
class AnalogPlot:
# constr
def __init__(self, strPort, maxLen):
# open serial port
self.ser = serial.Serial(strPort, 9600, timeout=.1)
self.ax = deque([0.0]*maxLen)
self.ay = deque([0.0]*maxLen)
self.maxLen = maxLen
config = cp.RawConfigParser()
config.read("/srv/tc1/conf/datalogger.props.cfg")
self.offset = config.getint("calibration","offset")
self.dataArr = np.array([])
self.sec = 0
# add to buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
# add data
def add(self, data):
assert(len(data) == 2)
self.addToBuf(self.ax, data[0])
self.addToBuf(self.ay, data[1])
# update plot
def update(self, frameNum, a0, a1):
try:
line = self.ser.readline()
try:
datapt = float(line)
#datapt = datapt - 33487
if (datapt > (self.offset * 2) or datapt < (self.offset / 2)):
return self.offset
self.dataArr = np.append(self.dataArr, datapt)
self.sec = self.sec + 1
data = [self.sec, self.dataArr.mean()]
# print data
self.add(data)
print "x: %d y: %d val: %d" % (data[0], data[1], datapt)
a0.set_data(range(self.maxLen), self.ax)
a1.set_data(range(self.maxLen), self.ay)
except ValueError:
pass
except KeyboardInterrupt:
print('exiting')
return a0,
# clean up
def close(self):
# close serial
self.ser.flush()
self.ser.close()
# main() function
def main():
config = cp.RawConfigParser()
config.read("/srv/tc1/conf/datalogger.props.cfg")
offset = config.getint("calibration","offset")
strPort = '/dev/ttyACM0'
print('reading from serial port %s...' % strPort)
# plot parameters
analogPlot = AnalogPlot(strPort, 100)
print('plotting data...')
# set up animation
fig = plt.figure()
ax = plt.axes(xlim=(0, 100), ylim=(offset-10, offset+10))
a0, = ax.plot([], [])
a1, = ax.plot([], [])
anim = animation.FuncAnimation(fig, analogPlot.update,
fargs=(a0, a1),
interval=10)
# show plot
plt.show()
# clean up
analogPlot.close()
print('exiting.')
# call main
if __name__ == '__main__':
main()
``` |
{
"source": "jhol/symbiflow-arch-defs",
"score": 2
} |
#### File: symbiflow-arch-defs/utils/icebox-rr_graph-import.py
```python
from os.path import commonprefix
import icebox
import getopt, sys, re
import operator
from collections import namedtuple, OrderedDict
from functools import reduce
import lxml.etree as ET
mode_384 = False
mode_5k = False
mode_8k = False
def usage():
print("""
Usage: icebox_chipdb [options] [bitmap.asc]
-3
create chipdb for 384 device
-5
create chipdb for 5k device
-8
create chipdb for 8k device
""")
sys.exit(0)
VERBOSE=True
try:
opts, args = getopt.getopt(sys.argv[1:], "358")
except:
usage()
device_name = '1k'
for o, a in opts:
if o == "-8":
mode_8k = True
device_name = '8k'
elif o == "-5":
mode_5k = True
device_name = '5k'
elif o == "-3":
mode_384 = True
device_name = '384'
else:
usage()
ic = icebox.iceconfig()
if mode_8k:
ic.setup_empty_8k()
elif mode_5k:
ic.setup_empty_5k()
elif mode_384:
ic.setup_empty_384()
else:
ic.setup_empty_1k()
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
_TilePos = namedtuple('T', ['x', 'y'])
class TilePos(_TilePos):
_sentinal = []
def __new__(cls, x, y=_sentinal, *args):
if y is cls._sentinal:
if len(x) == 2:
x, y = x
else:
raise TypeError("TilePos takes 2 positional arguments not {}".format(x))
assert isinstance(x, int), "x must be an int not {!r}".format(x)
assert isinstance(y, int), "y must be an int not {!r}".format(y)
return _TilePos.__new__(cls, x=x, y=y)
class GlobalName(tuple):
def __new__(cls, *args, **kw):
return super(GlobalName, cls).__new__(cls, args, **kw)
def __init__(self, *args, **kw):
pass
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Root of XML
# ------------------------------
"""
<rr_graph tool_name="" tool_version="" tool_comment="">
"""
rr_graph = ET.Element(
'rr_graph',
dict(tool_name="icebox", tool_version="???", tool_comment="Generated for iCE40 {} device".format(device_name)),
)
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Create the switch types
# ------------------------------
"""
<switches>
<switch id="0" name="my_switch" buffered="1"/>
<timing R="100" Cin="1233-12" Cout="123e-12" Tdel="1e-9"/>
<sizing mux_trans_size="2.32" buf_size="23.54"/>
</switch>
</switches>
"""
switches = ET.SubElement(rr_graph, 'switches')
# Buffer switch drives an output net from a possible list of input nets.
buffer_id = 0
switch_buffer = ET.SubElement(
switches, 'switch',
{'id': str(buffer_id), 'name': 'buffer', 'buffered': "1", 'type': "mux"},
)
switch_buffer_sizing = ET.SubElement(
switch_buffer, 'sizing',
{'mux_trans_size': "2.32", 'buf_size': "23.54"},
)
# Routing switch connects two nets together to form a span12er wire.
routing_id = 1
switch_routing = ET.SubElement(
switches, 'switch',
{'id': str(routing_id), 'name': 'routing', 'buffered': "0", 'type': "mux"},
)
switch_routing_sizing = ET.SubElement(
switch_routing, 'sizing',
{'mux_trans_size': "2.32", 'buf_size': "23.54"},
)
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Build the segment list
# ------------------------------
"""fpga_arch
<segment name="unique_name" length="int" type="{bidir|unidir}" freq="float" Rmetal="float" Cmetal="float">
content
</segment>
<!-- The sb/cb pattern does not actually match the iCE40 you need to manually generate rr_graph -->
<!-- Span 4 wires which go A -> A+5 (IE Span 4 tiles) -->
<segment name="span4" length="5" type="bidir" freq="float" Rmetal="float" Cmetal="float">
<sb type="pattern">1 1 1 1 1</sb>
<cb type="pattern">1 1 1 1</cb>
</segment>
<segment name="span12" length="13" type="bidir" freq="float" Rmetal="float" Cmetal="float">
<sb type="pattern">1 1 1 1 1 1 1 1 1 1 1 1 1</sb>
<cb type="pattern">1 1 1 1 1 1 1 1 1 1 1 1</cb>
</segment>
<segments>
<segment id="0" name="global">
<timing R_per_meter="101" C_per_meter="2.25000004521955232483776399022e-14"/>
</segment>
<segment id="1" name="span12"> <!-- span12 ->
<timing R_per_meter="101" C_per_meter="2.25000004521955232483776399022e-14"/>
</segment>
<segment id="2" name="span4"> <!-- span4 -->
<timing R_per_meter="101" C_per_meter="2.25000004521955232483776399022e-14"/>
</segment>
<segment id="3" name="local">
<timing R_per_meter="101" C_per_meter="2.25000004521955232483776399022e-14"/>
</segment>
<segment id="4" name="neighbour">
<timing R_per_meter="101" C_per_meter="2.25000004521955232483776399022e-14"/>
</segment>
</segments>
"""
segment_types = OrderedDict([
('global', {}),
('span12', {}),
('span4', {}),
('local', {}),
('direct', {}),
])
segments = ET.SubElement(rr_graph, 'segments')
for sid, (name, attrib) in enumerate(segment_types.items()):
seg = ET.SubElement(segments, 'segment', {'id':str(sid), 'name':name})
segment_types[name] = sid
ET.SubElement(seg, 'timing', {'R_per_meter': "101", 'C_per_meter':"1.10e-14"})
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Mapping dictionaries
globalname2netnames = {}
globalname2node = {}
globalname2nodeid = {}
netname2globalname = {}
def add_globalname2localname(globalname, pos, localname):
global globalname2netnames
assert isinstance(globalname, GlobalName), "{!r} must be a GlobalName".format(globalname)
assert isinstance(pos, TilePos), "{!r} must be a TilePos".format(tilepos)
nid = (pos, localname)
if nid in netname2globalname:
assert globalname == netname2globalname[nid], (
"While adding global name {} found existing global name {} for {}".format(
globalname, netname2globalname[nid], nid))
return
netname2globalname[nid] = globalname
if globalname not in globalname2netnames:
globalname2netnames[globalname] = set()
if nid not in globalname2netnames[globalname]:
globalname2netnames[globalname].add(nid)
print("Adding alias for {} is tile {} - {}".format(globalname, pos, localname))
else:
print("Existing alias for {} is tile {} - {}".format(globalname, pos, localname))
def localname2globalname(pos, localname, default=None):
"""Convert from a local name to a globally unique name."""
assert isinstance(pos, TilePos), "{!r} must be a TilePos".format(tilepos)
nid = (pos, localname)
return netname2globalname.get(nid, default)
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Nodes
# --------------------------------
# The rr_nodes tag stores information about each node for the routing resource
# graph. These nodes describe each wire and each logic block pin as represented
# by nodes.
# type - Indicates whether the node is a wire or a logic block.
# * CHANX and CHANY describe a horizontal and vertical channel.
# * SOURCE and SINK describes where nets begin and end.
# * OPIN represents an output pin.
# * IPIN represents an input pin.
# direction
# If the node represents a track (CHANX or CHANY), this field represents its
# direction as {INC_DIR | DEC_DIR | BI_DIR}.
# In other cases this attribute should not be specified.
# -- All channels are BI_DIR in the iCE40
"""
<node id="1536" type="CHANX" direction="BI_DIR" capacity="1">
<loc xlow="1" ylow="0" xhigh="4" yhigh="0" ptc="0"/>
<timing R="404" C="1.25850014003753285507514192432e-13"/>
<segment segment_id="0"/>
</node>
<node id="1658" type="CHANY" direction="BI_DIR" capacity="1">
<loc xlow="4" ylow="1" xhigh="4" yhigh="4" ptc="0"/>
<timing R="404" C="1.01850006293396910805881816486e-13"/>
<segment segment_id="0"/>
</node>
<edge src_node="1536" sink_node="1609" switch_id="1"/>
<edge src_node="1536" sink_node="1618" switch_id="0"/>
<edge src_node="1536" sink_node="1623" switch_id="1"/>
<edge src_node="1536" sink_node="1632" switch_id="0"/>
<edge src_node="1536" sink_node="1637" switch_id="1"/>
<edge src_node="1536" sink_node="1645" switch_id="0"/>
<edge src_node="1536" sink_node="1650" switch_id="1"/>
<edge src_node="1536" sink_node="1658" switch_id="0"/>
<node id="1658" type="CHANY" direction="BI_DIR" capacity="1">
<loc xlow="4" ylow="1" xhigh="4" yhigh="4" ptc="0"/>
<timing R="404" C="1.01850006293396910805881816486e-13"/>
<segment segment_id="0"/>
</node>
<node id="1659" type="CHANY" direction="BI_DIR" capacity="1">
<loc xlow="4" ylow="1" xhigh="4" yhigh="1" ptc="1"/>
<timing R="101" C="6.0040006007264917764487677232e-14"/>
<segment segment_id="0"/>
</node>
"""
nodes = ET.SubElement(rr_graph, 'rr_nodes')
def add_node(globalname, attribs):
"""Add node with globalname and attributes."""
assert isinstance(globalname, GlobalName), "{!r} should be a GlobalName".format(globalname)
# Add common attributes
attribs['capacity'] = str(1)
# Work out the ID for this node and add to the mapping
attribs['id'] = str(len(globalname2node))
node = ET.SubElement(nodes, 'node', attribs)
# Stash in the mappings
assert globalname not in globalname2node
assert globalname not in globalname2nodeid
globalname2node[globalname] = node
globalname2nodeid[globalname] = attribs['id']
# Add some helpful comments
if VERBOSE:
node.append(ET.Comment(" {} ".format(globalname)))
return node
# Edges -----------------------------------------------------------------
edges = ET.SubElement(rr_graph, 'rr_edges')
def add_edge(src_globalname, dst_globalname, bidir=False):
if bidir:
add_edge(src_globalname, dst_globalname)
add_edge(dst_globalname, src_globalname)
return
assert isinstance(src_globalname, GlobalName), "src {!r} should be a GlobalName".format(src_globalname)
assert isinstance(dst_globalname, GlobalName), "dst {!r} should be a GlobalName".format(dst_globalname)
src_node_id = globalname2nodeid[src_globalname]
dst_node_id = globalname2nodeid[dst_globalname]
attribs = {
'src_node': str(src_node_id),
'sink_node': str(dst_node_id),
'switch_id': str(0),
}
e = ET.SubElement(edges, 'edge', attribs)
# Add some helpful comments
if VERBOSE:
e.append(ET.Comment(" {} -> {} ".format(src_globalname, dst_globalname)))
globalname2node[src_globalname].append(ET.Comment(" this -> {} ".format(dst_globalname)))
globalname2node[dst_globalname].append(ET.Comment(" {} -> this ".format(src_globalname)))
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Channels (node) ----------------------------------------------------
channels = {}
for y in range(ic.max_y+1):
channels[(-1,y)] = {}
for x in range(ic.max_x+1):
channels[(x,-1)] = {}
def add_channel(globalname, nodetype, start, end, idx, segtype):
assert isinstance(globalname, GlobalName), "{!r} should be a GlobalName".format(globalname)
assert isinstance(start, TilePos), "{!r} must be a TilePos".format(start)
assert isinstance(end, TilePos), "{!r} must be a TilePos".format(end)
x_start = start[0]
y_start = start[1]
x_end = end[0]
y_end = end[1]
if nodetype == 'CHANY':
assert x_start == x_end
channel = (x_start, -1)
w_start, w_end = y_start, y_end
elif nodetype == 'CHANX':
assert y_start == y_end
channel = (-1, y_start)
w_start, w_end = x_start, x_end
else:
assert False
assert channel in channels, "{} not in {}".format(channel, channels)
if w_start < w_end:
chandir = "INC_DIR"
elif w_start > w_end:
chandir = "DEC_DIR"
if idx not in channels[channel]:
channels[channel][idx] = []
channels[channel][idx].append(globalname)
attribs = {
'direction': 'BI_DIR',
'type': nodetype,
}
node = add_node(globalname, attribs)
# <loc xlow="int" ylow="int" xhigh="int" yhigh="int" side="{LEFT|RIGHT|TOP|BOTTOM}" ptc="int">
# xlow, xhigh, ylow, yhigh - Integer coordinates of the ends of this routing source.
# ptc - This is the pin, track, or class number that depends on the rr_node type.
# side - { LEFT | RIGHT | TOP | BOTTOM }
# For IPIN and OPIN nodes specifies the side of the grid tile on which the node
# is located. Purely cosmetic?
ET.SubElement(node, 'loc', {
'xlow': str(x_start), 'ylow': str(y_start),
'xhigh': str(x_end), 'yhigh': str(y_end),
'ptc': str(idx),
})
ET.SubElement(node, 'segment', {'segment_id': str(segtype)})
print("Adding channel {} from {} -> {} pos {}".format(globalname, start, end, idx))
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Pins
# ------------------------------
def globalname_pin(pos, localname):
return GlobalName("pin", TilePos(*pos), localname)
"""
def iceboxname_pin(tiletype, localname):
if tiletype == 'IO':
prefix = 'io['
if localname.startswith(prefix):
return 'io_{}/{}'.format(
localname[len(prefix):len(prefix)+1],
localname[localname.split('.')[-1]],
)
else:
return 'io_global/{}'.format(localname)
elif tiletype == "LOGIC":
prefix = 'lut['
if localname.startswith(prefix):
a, b = localname.split('.')
prefix2 = 'in['
if b.startswith(prefix2):
return 'lutff_{}/{}'.format(
localname[len(prefix):len(prefix)+1],
b
)
else:
return 'lutff_{}/{}'.format(
localname[len(prefix):len(prefix)+1],
b
)
else:
return 'lutff_global/{}'.format(localname)
"""
def pos_to_vpr(pos):
return [pos[0] + 1, pos[1] + 1]
def add_pin(pos, localname, dir, idx):
"""Add an pin at index i to tile at pos."""
"""
<node id="0" type="SINK" capacity="1">
<loc xlow="0" ylow="1" xhigh="0" yhigh="1" ptc="0"/>
<timing R="0" C="0"/>
</node>
<node id="2" type="IPIN" capacity="1">
<loc xlow="0" ylow="1" xhigh="0" yhigh="1" side="TOP" ptc="0"/>
<timing R="0" C="0"/>
</node>
"""
gname = globalname_pin(pos, localname)
gname_pin = GlobalName(*gname, 'pin')
add_globalname2localname(gname, pos, localname)
vpos = pos_to_vpr(pos)
if dir == "out":
# Sink node
attribs = {
'type': 'SINK',
}
node = add_node(gname, attribs)
ET.SubElement(node, 'loc', {
'xlow': str(vpos[0]), 'ylow': str(vpos[1]),
'xhigh': str(vpos[0]), 'yhigh': str(vpos[1]),
'ptc': str(idx),
})
ET.SubElement(node, 'timing', {'R': str(0), 'C': str(0)})
# Pin node
attribs = {
'type': 'IPIN',
}
node = add_node(gname_pin, attribs)
ET.SubElement(node, 'loc', {
'xlow': str(vpos[0]), 'ylow': str(vpos[1]),
'xhigh': str(vpos[0]), 'yhigh': str(vpos[1]),
'ptc': str(idx),
'side': 'TOP',
})
ET.SubElement(node, 'timing', {'R': str(0), 'C': str(0)})
# Edge between pin node
add_edge(gname, gname_pin)
elif dir == "in":
# Source node
attribs = {
'type': 'SOURCE',
}
node = add_node(gname, attribs)
ET.SubElement(node, 'loc', {
'xlow': str(vpos[0]), 'ylow': str(vpos[1]),
'xhigh': str(vpos[0]), 'yhigh': str(vpos[1]),
'ptc': str(idx),
})
ET.SubElement(node, 'timing', {'R': str(0), 'C': str(0)})
# Pin node
attribs = {
'type': 'OPIN',
}
node = add_node(gname_pin, attribs)
ET.SubElement(node, 'loc', {
'xlow': str(vpos[0]), 'ylow': str(vpos[1]),
'xhigh': str(vpos[0]), 'yhigh': str(vpos[1]),
'ptc': str(idx),
'side': 'TOP',
})
ET.SubElement(node, 'timing', {'R': str(0), 'C': str(0)})
# Edge between pin node
add_edge(gname_pin, gname)
else:
assert False, "Unknown dir of {} for {}".format(dir, gname)
print("Adding pin {} on tile {}@{}".format(gname, pos, idx))
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# Local Tracks
# ------------------------------
def globalname_track_local(pos, g, i):
return GlobalName("local", TilePos(*pos), (g, i))
def localname_track_local(pos, g, i):
return 'local_g{}_{}'.format(g, i)
#def iceboxname_track_local(pos, g, i):
# return 'local_g{}_{}'.format(g, i)
def globalname_track_glb2local(pos, i):
return GlobalName("glb2local", TilePos(*pos), i)
def localname_track_glb2local(pos, i):
return 'glb2local_{}'.format(i)
#def iceboxname_track_glb2local(pos, i):
# return 'gbl2local_{}'.format(i)
"""
def _add_local(globalname, pos, ptc):
attribs = {
'direction': 'BI_DIR',
'type': 'CHANX',
}
node = add_node(globalname, attribs)
ET.SubElement(node, 'loc', {
'xlow': str(pos.x), 'ylow': str(pos.y),
'xhigh': str(pos.x), 'yhigh': str(pos.y),
'ptc': str(ptc),
})
ET.SubElement(node, 'segment', {'segment_id': str('local')})
"""
LOCAL_TRACKS_PER_GROUP = 8
LOCAL_TRACKS_MAX_GROUPS = 4
GBL2LOCAL_MAX_TRACKS = 4
SPAN4_MAX_TRACKS = 48
SPAN12_MAX_TRACKS = 24
GLOBAL_MAX_TRACKS = 8
def add_track_local(pos, g, i):
lname = localname_track_local(pos, g, i)
gname = globalname_track_local(pos, g, i)
idx = g * (LOCAL_TRACKS_PER_GROUP) + i
#print("Adding local track {} on tile {}@{}".format(gname, pos, idx))
add_channel(gname, 'CHANY', pos, pos, idx, 'local')
add_globalname2localname(gname, pos, lname)
def add_track_gbl2local(pos, i):
lname = localname_track_glb2local(pos, i)
gname = globalname_track_glb2local(pos, i)
idx = LOCAL_TRACKS_MAX_GROUPS * (LOCAL_TRACKS_PER_GROUP) + i
#print("Adding glb2local {} track {} on tile {}@{}".format(i, gname, pos, idx))
add_channel(gname, 'CHANY', pos, pos, idx, 'gbl2local')
add_globalname2localname(gname, pos, lname)
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def tiles(ic):
for x in range(ic.max_x+1):
for y in range(ic.max_y+1):
yield TilePos(x, y)
all_tiles = list(tiles(ic))
corner_tiles = set()
for x in (0, ic.max_x):
for y in (0, ic.max_y):
corner_tiles.add((x, y))
# Should we just use consistent names instead?
tile_name_map = {"IO" : "PIO", "LOGIC" : "PLB", "RAMB" : "RAMB", "RAMT" : "RAMT"}
# Add the tiles
# ------------------------------
tile_types = {
"PIO": {
"id": 1,
"pin_map": OrderedDict([
('outclk', ('in', 0)),
('inclk', ('in', 1)),
('cen', ('in', 2)),
('latch', ('in', 3)),
('io[0].d_in_0', ('out', 4)),
('io[0].d_in_1', ('out', 5)),
('io[0].d_out_0', ('in', 6)),
('io[0].d_out_1', ('in', 7)),
('io[0].out_enb', ('in', 8)),
('io[1].d_in_0', ('out', 10)),
('io[1].d_in_1', ('out', 11)),
('io[1].d_out_0', ('in', 12)),
('io[1].d_out_1', ('in', 13)),
('io[1].out_enb', ('in', 14)),
]),
'size': (1, 1),
},
"PLB": {
"id": 2,
"pin_map": OrderedDict([
('lut[0].in[0]', ('in', 0)),
('lut[0].in[1]', ('in', 1)),
('lut[0].in[2]', ('in', 2)),
('lut[0].in[3]', ('in', 3)),
('lut[1].in[0]', ('in', 4)),
('lut[1].in[1]', ('in', 5)),
('lut[1].in[2]', ('in', 6)),
('lut[1].in[3]', ('in', 7)),
('lut[2].in[0]', ('in', 8)),
('lut[2].in[1]', ('in', 9)),
('lut[2].in[2]', ('in', 10)),
('lut[2].in[3]', ('in', 11)),
('lut[3].in[0]', ('in', 12)),
('lut[3].in[1]', ('in', 13)),
('lut[3].in[2]', ('in', 14)),
('lut[3].in[3]', ('in', 15)),
('lut[4].in[0]', ('in', 16)),
('lut[4].in[1]', ('in', 17)),
('lut[4].in[2]', ('in', 18)),
('lut[4].in[3]', ('in', 19)),
('lut[5].in[0]', ('in', 20)),
('lut[5].in[1]', ('in', 21)),
('lut[5].in[2]', ('in', 22)),
('lut[5].in[3]', ('in', 23)),
('lut[6].in[0]', ('in', 24)),
('lut[6].in[1]', ('in', 25)),
('lut[6].in[2]', ('in', 26)),
('lut[6].in[3]', ('in', 27)),
('lut[7].in[0]', ('in', 28)),
('lut[7].in[1]', ('in', 29)),
('lut[7].in[2]', ('in', 30)),
('lut[7].in[3]', ('in', 31)),
('cen', ('in', 32)),
('s_r', ('in', 33)),
('lut[0].out', ('out', 34)),
('lut[1].out', ('out', 35)),
('lut[2].out', ('out', 36)),
('lut[3].out', ('out', 37)),
('lut[4].out', ('out', 38)),
('lut[5].out', ('out', 39)),
('lut[6].out', ('out', 40)),
('lut[7].out', ('out', 41)),
('clk', ('in', 32)),
]),
'size': (1, 1),
},
"RAMB": {
"id": 3,
"pin_map": OrderedDict([
('rdata[0]', ('out', 0)),
('rdata[1]', ('out', 0)),
('rdata[2]', ('out', 0)),
('rdata[3]', ('out', 0)),
('rdata[4]', ('out', 0)),
('rdata[5]', ('out', 0)),
('rdata[6]', ('out', 0)),
('rdata[7]', ('out', 0)),
('waddr[0]', ('in', 0)),
('waddr[1]', ('in', 0)),
('waddr[2]', ('in', 0)),
('waddr[3]', ('in', 0)),
('waddr[4]', ('in', 0)),
('waddr[5]', ('in', 0)),
('waddr[6]', ('in', 0)),
('waddr[7]', ('in', 0)),
('waddr[8]', ('in', 0)),
('waddr[9]', ('in', 0)),
('waddr[10]', ('in', 0)),
('mask[0]', ('in', 0)),
('mask[1]', ('in', 0)),
('mask[2]', ('in', 0)),
('mask[3]', ('in', 0)),
('mask[4]', ('in', 0)),
('mask[5]', ('in', 0)),
('mask[6]', ('in', 0)),
('mask[7]', ('in', 0)),
('wdata[0]', ('in', 0)),
('wdata[1]', ('in', 0)),
('wdata[2]', ('in', 0)),
('wdata[3]', ('in', 0)),
('wdata[4]', ('in', 0)),
('wdata[5]', ('in', 0)),
('wdata[6]', ('in', 0)),
('wdata[7]', ('in', 0)),
('we', ('in', 0)),
('wclk', ('in', 0)),
('wclke', ('in', 0)),
]),
'size': (1, 1),
},
"RAMT": {
"id": 4,
"pin_map": OrderedDict([
('rdata[8]', ('out', 0)),
('rdata[9]', ('out', 0)),
('rdata[10]', ('out', 0)),
('rdata[11]', ('out', 0)),
('rdata[12]', ('out', 0)),
('rdata[13]', ('out', 0)),
('rdata[14]', ('out', 0)),
('rdata[15]', ('out', 0)),
('raddr[0]', ('in', 0)),
('raddr[1]', ('in', 0)),
('raddr[2]', ('in', 0)),
('raddr[3]', ('in', 0)),
('raddr[4]', ('in', 0)),
('raddr[5]', ('in', 0)),
('raddr[6]', ('in', 0)),
('raddr[7]', ('in', 0)),
('raddr[8]', ('in', 0)),
('raddr[9]', ('in', 0)),
('raddr[10]', ('in', 0)),
('mask[8]', ('in', 0)),
('mask[9]', ('in', 0)),
('mask[10]', ('in', 0)),
('mask[11]', ('in', 0)),
('mask[12]', ('in', 0)),
('mask[13]', ('in', 0)),
('mask[14]', ('in', 0)),
('mask[15]', ('in', 0)),
('wdata[8]', ('in', 0)),
('wdata[9]', ('in', 0)),
('wdata[10]', ('in', 0)),
('wdata[11]', ('in', 0)),
('wdata[12]', ('in', 0)),
('wdata[13]', ('in', 0)),
('wdata[14]', ('in', 0)),
('wdata[15]', ('in', 0)),
('re', ('in', 0)),
('rclk', ('in', 0)),
('rclke', ('in', 0)),
]),
'size': (1, 1),
},
}
print()
print("Generate tiles types")
print("="*75)
"""
<block_types>
<block_type id="0" name="io" width="1" height="1">
<pin_class type="input">
0 1 2 3
</pin_class>
<pin_class type="output">
4 5 6 7
</pin_class>
</block_type>
</block_types>
"""
tt = ET.SubElement(rr_graph, 'block_types')
for tile_name, tile_desc in tile_types.items():
print("{}".format(tile_name))
tile = ET.SubElement(
tt, 'block_type',
{'id': str(tile_desc['id']),
'name': tile_name,
'width': str(tile_desc["size"][0]),
'height': str(tile_desc["size"][1]),
})
#pins_in = ET.SubElement(tile, 'pin_class', {'type': 'input'})
#pins_out = ET.SubElement(tile, 'pin_class', {'type': 'output'})
# ------------------------------
grid = ET.SubElement(rr_graph, 'grid')
print()
print("Generate grid")
print("="*75)
for x in range(ic.max_x+3):
for y in range(ic.max_y+3):
tx = x - 1
ty = y - 1
block_type_id = 0
if tx >= 0 and tx <= ic.max_x and ty >= 0 and ty <= ic.max_y and (tx,ty) not in corner_tiles:
block_type_id = tile_types[tile_name_map[ic.tile_type(tx, ty)]]["id"]
grid_loc = ET.SubElement(
grid, 'grid_loc',
{'x': str(x),
'y': str(y),
'block_type_id': str(block_type_id),
'width_offset': "0",
'height_offset': "0",
})
print()
print("Generate tiles (with pins and local tracks)")
print("="*75)
for x, y in all_tiles:
# Corner tile == Empty
if (x,y) in corner_tiles:
continue
pos = TilePos(x, y)
tile_type = tile_types[tile_name_map[ic.tile_type(pos.x, pos.y)]]
tid = (pos, tile_type)
attribs = {
'x': str(pos.x), 'y': str(pos.y),
'block_type_id': tile_type["id"],
'width_offset': str(tile_type["size"][0]-1), 'height_offset': str(tile_type["size"][1]-1),
}
# Add pins for the tile
print()
print("{}: Adding pins".format(tid))
print("-"*75)
for idx, (name, (dir, _)) in enumerate(tile_type["pin_map"].items()):
add_pin(pos, name, dir, idx)
# Add the local tracks
if tile_type == "IO":
groups_local = (2, LOCAL_TRACKS_PER_GROUP)
groups_glb2local = 0
else:
groups_local = (LOCAL_TRACKS_MAX_GROUPS, LOCAL_TRACKS_PER_GROUP)
groups_glb2local = GBL2LOCAL_MAX_TRACKS
print()
print("{}: Adding local tracks".format(tid))
print("-"*75)
for g in range(0, groups_local[0]):
for i in range(0, groups_local[1]):
add_track_local(pos, g, i)
if groups_glb2local:
print()
print("{}: Adding glb2local tracks".format(tid))
print("-"*75)
for i in range(0, groups_glb2local):
add_track_gbl2local(pos, i)
# Nets
# ------------------------------
def globalname_net(pos, name):
return netname2globalname[(pos, name)]
def _calculate_globalname_net(group):
tiles = set()
names = set()
assert group
for x, y, name in group:
if name.startswith('lutff_'): # Actually a pin
lut_idx, pin = name.split('/')
if lut_idx == "lutff_global":
return GlobalName("pin", TilePos(x, y), pin)
else:
if '_' in pin:
pin, pin_idx = pin.split('_')
return GlobalName("pin", TilePos(x, y), "lut[{}].{}[{}]".format(lut_idx[len("lutff_"):], pin, pin_idx).lower())
else:
return GlobalName("pin", TilePos(x, y), "lut[{}].{}".format(lut_idx[len("lutff_"):], pin).lower())
elif name.startswith('io_'): # Actually a pin
io_idx, pin = name.split('/')
if io_idx == "io_global":
return GlobalName("pin", TilePos(x, y), pin)
else:
return GlobalName("pin", TilePos(x, y), "io[{}].{}".format(io_idx[len("io_"):], pin).lower())
elif name.startswith('ram/'): # Actually a pin
name = name[len('ram/'):]
if '_' in name:
pin, pin_idx = name.split('_')
return GlobalName("pin", TilePos(x, y), "{}[{}]".format(pin, pin_idx).lower())
else:
return GlobalName("pin", TilePos(x, y), name.lower())
if not name.startswith('sp4_r_v_'):
tiles.add(TilePos(x, y))
names.add(name)
if not tiles:
tiles.add(TilePos(x, y))
assert names, "No names for {}".format(names)
wire_type = []
if len(tiles) == 1:
pos = tiles.pop()
name = names.pop().lower()
if name.startswith('local_'):
m = re.match("local_g([0-3])_([0-7])", name)
assert m, "{!r} didn't match local regex".format(name)
g = int(m.group(1))
i = int(m.group(2))
assert name == localname_track_local(pos, g, i)
return globalname_track_local(pos, g, i)
elif name.startswith('glb2local_'):
m = re.match("glb2local_([0-3])", name)
assert m, "{!r} didn't match glb2local regex".format(name)
i = int(m.group(1))
assert name == localname_track_glb2local(pos, i), "{!r} != {!r}".format(
name, localname_track_glb2local(pos, i))
return globalname_track_glb2local(pos, i)
# Special case when no logic to the right....
elif name.startswith('sp4_r_v_') or name.startswith('neigh_op_'):
m = re.search("_([0-9]+)$", name)
wire_type += ["channel", "stub", name]
wire_type += ["span4"]
wire_type += [(pos, int(m.group(1)), pos, 1)]
return GlobalName(*wire_type)
print("Unknown only local net {}".format(name))
return None
# Global wire, as only has one name?
elif len(names) == 1:
wire_type = ['global', '{}_tiles'.format(len(tiles)), names.pop().lower()]
# Work out the type of wire
if not wire_type:
for n in names:
if n.startswith('span4_horz_'):
if wire_type and 'horizontal' not in wire_type:
wire_type = ['channel', 'span4', 'corner']
break
else:
wire_type = ['channel', 'span4', 'horizontal']
if n.startswith('span4_vert_'):
if wire_type and 'vertical' not in wire_type:
wire_type = ['channel', 'span4', 'corner']
break
else:
wire_type = ['channel', 'span4', 'vertical']
if n.startswith('sp12_h_'):
wire_type = ['channel', 'span12', 'horizontal']
break
if n.startswith('sp12_v_'):
wire_type = ['channel', 'span12', 'vertical']
break
if n.startswith('sp4_h_'):
wire_type = ['channel', 'span4','horizontal']
break
if n.startswith('sp4_v_'):
wire_type = ['channel', 'span4', 'vertical']
break
if n.startswith('neigh_op'):
#wire_type = ['direct', 'neighbour']
break
if n == 'carry_in':
wire_type = ['direct', 'carrychain',]
break
if not wire_type:
return None
if 'channel' in wire_type:
xs = set()
ys = set()
es = set()
for x, y in tiles:
xs.add(x)
ys.add(y)
es.add(ic.tile_pos(x, y))
if 'horizontal' in wire_type:
# Check for constant y value
assert len(ys) == 1, repr((ys, names))
y = ys.pop()
start = TilePos(min(xs), y)
end = TilePos(max(xs), y)
offset = min(xs)
delta = end[0] - start[0]
elif 'vertical' in wire_type:
# Check for constant x value
assert len(xs) in (1, 2), repr((xs, names))
x = xs.pop()
start = TilePos(x, min(ys))
end = TilePos(x, max(ys))
offset = min(ys)
delta = end[1] - start[1]
elif 'corner' in wire_type:
assert len(es) == 2, (es, group)
if 't' in es:
if 'l' in es:
# +--
# |
assert min(xs) == 0
#assert (0,max(ys)) in tiles, tiles
start = TilePos(0,min(ys))
end = TilePos(max(xs), max(ys))
delta = max(ys)-min(ys)+min(xs)
elif 'r' in es:
# --+
# |
#assert (max(xs), max(ys)) in tiles, tiles
start = TilePos(min(xs), max(ys))
end = TilePos(max(xs), min(ys))
delta = max(xs)-min(xs) + max(ys)-min(ys)
else:
assert False
elif 'b' in es:
if 'l' in es:
# |
# +--
assert min(xs) == 0
assert min(ys) == 0
#assert (0,0) in tiles, tiles
start = TilePos(0,max(ys))
end = TilePos(max(xs), 0)
delta = max(xs) + max(ys)-min(ys)
elif 'r' in es:
# |
# --+
assert min(ys) == 0
#assert (max(xs), 0) in tiles, tiles
start = TilePos(min(xs), 0)
end = TilePos(max(xs), max(ys))
delta = max(xs)-min(xs) + max(ys)
else:
assert False
else:
assert False, 'Unknown span corner wire {}'.format((es, group))
offset = 0 # FIXME: ????
elif 'neighbour' in wire_type:
x = list(sorted(xs))[int(len(xs)/2)+1]
y = list(sorted(ys))[int(len(ys)/2)+1]
return None
elif 'carrychain' in wire_type:
assert len(xs) == 1
assert len(ys) == 2
start = TilePos(min(xs), min(ys))
end = TilePos(min(xs), max(ys))
delta = 1
return None
else:
assert False, 'Unknown span wire {}'.format((wire_type, group))
assert start in tiles
assert end in tiles
n = None
for x, y, name in group:
if x == start[0] and y == start[1]:
n = int(name.split("_")[-1])
break
assert n is not None
if "span4" in wire_type:
max_channels = SPAN4_MAX_TRACKS
max_span = 4
elif "span12" in wire_type:
max_channels = SPAN12_MAX_TRACKS
max_span = 12
finish_per_offset = int(max_channels / max_span)
filled = (max_channels - ((offset * finish_per_offset) % max_channels))
idx = (filled + n) % max_channels
#wire_type.append('{:02}-{:02}x{:02}-{:02}x{:02}'.format(delta, start[0], start[1], end[0], end[1]))
wire_type.append((start, idx, end, delta))
return GlobalName(*wire_type)
# ------------------------------
print()
print("Calculating nets")
print("="*75)
def filter_name(localname):
if localname.endswith('cout') or localname.endswith('lout'):
return True
if localname.startswith('padout_') or localname.startswith('padin_'):
return True
if localname in ("fabout","carry_in","carry_in_mux"):
return True
return False
def filter_localnames(group):
fgroup = []
for x,y,name in group:
if not ic.tile_has_entry(x, y, name):
print("Skipping {} on {},{}".format(name, x,y))
continue
if filter_name(name):
continue
fgroup.append((x, y, name))
return fgroup
def add_net_global(i):
lname = 'glb_netwk_{}'.format(i)
gname = GlobalName('global', '248_tiles', lname)
add_channel(gname, 'CHANY', TilePos(0, 0), TilePos(0, 0), i, 'global')
for i in range(0, 8):
add_net_global(i)
add_channel(GlobalName('global', 'fabout'), 'CHANY', TilePos(0, 0), TilePos(0, 0), 0, 'global')
# ------------------------------
all_group_segments = ic.group_segments(all_tiles, connect_gb=False)
for group in sorted(all_group_segments):
fgroup = filter_localnames(group)
if not fgroup:
continue
print()
gname = _calculate_globalname_net(tuple(fgroup))
if not gname:
print('Could not calculate global name for', group)
continue
if gname[0] == "pin":
alias_type = "pin"
assert gname in globalname2netnames, gname
else:
alias_type = "net"
if gname not in globalname2netnames:
print("Adding net {}".format(gname))
print(x, y, gname, group)
for x, y, netname in fgroup:
add_globalname2localname(gname, TilePos(x, y), netname)
# Create the channels
# -------------------
print()
print("Adding span channels")
print("-"*75)
x_channel_offset = LOCAL_TRACKS_MAX_GROUPS * (LOCAL_TRACKS_PER_GROUP) + GBL2LOCAL_MAX_TRACKS
y_channel_offset = 0
def add_track_span(globalname):
start, idx, end, delta = globalname[-1]
x_start = start[0]
y_start = start[1]
x_end = end[0]
y_end = end[1]
if x_start == x_end:
nodetype = 'CHANY'
assert "vertical" in globalname or "stub" in globalname
idx += x_channel_offset
elif y_start == y_end:
nodetype = 'CHANX'
assert "horizontal" in globalname or "stub" in globalname
idx += y_channel_offset
else:
return
if 'span4' in globalname:
segtype = 'span4'
elif 'span12' in globalname:
segtype = 'span12'
idx += SPAN4_MAX_TRACKS #+ 1
elif 'local' in globalname:
segtype = 'local'
else:
assert False, globalname
add_channel(globalname, nodetype, start, end, idx, segtype)
for globalname in sorted(globalname2netnames.keys()):
if globalname[0] != "channel":
continue
add_track_span(globalname)
print()
print()
print()
print("Channel summary")
print("="*75)
for channel in sorted(channels):
print()
print(channel)
print("-"*75)
m = max(channels[channel])
for idx in range(0, m+1):
print()
print(idx)
if idx not in channels[channel]:
print("-"*5)
continue
for track in channels[channel][idx]:
if track in globalname2netnames:
print(track, globalname2netnames[track])
else:
print(track, None)
print()
print("Generate channels")
print("="*75)
# TODO check this
chwm = LOCAL_TRACKS_MAX_GROUPS * (LOCAL_TRACKS_PER_GROUP+1) + GBL2LOCAL_MAX_TRACKS + SPAN4_MAX_TRACKS + SPAN12_MAX_TRACKS + GLOBAL_MAX_TRACKS
chans = ET.SubElement(rr_graph, 'channels')
chan = ET.SubElement(
chans, 'channel',
{'chan_width_max': str(chwm),
'x_min': str(0),
'x_max': str(chwm),
'y_min': str(0),
'y_max': str(chwm),
})
for i in range(4):
x_list = ET.SubElement(
chans, 'x_list',
{'index': str(i),
'info': str(chwm)
})
y_list = ET.SubElement(
chans, 'y_list',
{'index': str(i),
'info': str(chwm)
})
# Generating edges
# ------------------------------
# These need to match the architecture definition given to vpr.
# rr_edges
# rr_edges tag that encloses information about all the edges between nodes.
# Each rr_edges tag contains multiple subtags:
# <edge src_node="int" sink_node="int" switch_id="int"/>
# This subtag repeats every edge that connects nodes together in the graph.
# Required Attributes:
# * src_node, sink_node
# The index for the source and sink node that this edge connects to.
# * switch_id
# The type of switch that connects the two nodes.
"""
<rr_edges>
<edge src_node="0" sink_node="1" switch_id="0"/>
<edge src_node="1" sink_node="2" switch_id="0"/>
</rr_edges>
"""
print()
print("Generating edges")
print("="*75)
for x, y in all_tiles:
pos = TilePos(x, y)
if pos in corner_tiles:
continue
print()
print(x, y)
print("-"*75)
for entry in ic.tile_db(x, y):
if not ic.tile_has_entry(x, y, entry):
continue
switch_type = entry[1]
if switch_type not in ("routing", "buffer"):
continue
rtype = entry[1]
src_localname = entry[2]
dst_localname = entry[3]
if filter_name(src_localname) or filter_name(dst_localname):
continue
src_globalname = localname2globalname(pos, src_localname, default='???')
dst_globalname = localname2globalname(pos, dst_localname, default='???')
src_nodeid = globalname2nodeid.get(src_globalname, None)
dst_nodeid = globalname2nodeid.get(dst_globalname, None)
if src_nodeid is None or dst_nodeid is None:
print("Skipping {} ({}, {}) -> {} ({}, {})".format(
(pos, src_localname), src_globalname, src_nodeid,
(pos, dst_localname), dst_globalname, dst_nodeid,
))
else:
add_edge(src_globalname, dst_globalname, switch_type == "routing")
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
f = open('rr_graph.xml', 'w')
f.write(ET.tostring(rr_graph, pretty_print=True).decode('utf-8'))
f.close()
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
# 'local_'
# 'neigh_'
# ((11, 10, 'neigh_op_tnr_0'),
# (11, 11, 'neigh_op_rgt_0'),
# (11, 12, 'neigh_op_bnr_0'),
#
# (12, 10, 'neigh_op_top_0'),
# (12, 11, 'lutff_0/out'),
# (12, 12, 'neigh_op_bot_0'),
#
# (13, 10, 'logic_op_tnl_0'),
# (13, 11, 'logic_op_lft_0'),
# (13, 12, 'logic_op_bnl_0'))
# (11,12) | (12,12) | (13,12)
# --------+---------+--------
# (11,11) | (12,11) | (13,11)
# --------+---------+--------
# (11,10) | (12,10) | (13,10)
# bnr | bot | l bnl
# --------+---------+--------
# rgt |lutff/out| l lft
# --------+---------+--------
# tnr | top | l tnl
# channel, multiple tiles
# 'sp12_'
# 'sp4_'
# pin, one tile
# 'lutff_'
# sp4_v
# (11, 12, 'sp4_r_v_b_10'), (12, 12, 'sp4_v_b_10'),
# (11, 11, 'sp4_r_v_b_23'), (12, 11, 'sp4_v_b_23'),
# (11, 10, 'sp4_r_v_b_34'), (12, 10, 'sp4_v_b_34'),
# (11, 9, 'sp4_r_v_b_47'), (12, 9, 'sp4_v_b_47'),
# (12, 8, 'sp4_v_t_47'),
# sp4_h
# ((5, 9, 'sp4_h_r_9'),
# (6, 9, 'sp4_h_r_20'),
# (7, 9, 'sp4_h_r_33'),
# (8, 9, 'sp4_h_r_44'),
# (9, 9, 'sp4_h_l_44'))
# ((0, 1, 'glb_netwk_2'),
# (0, 2, 'glb_netwk_2'),
# (0, 3, 'glb_netwk_2'),
# ...
# ((0, 1, 'io_global/latch'),
# (0, 2, 'io_global/latch'),
# (0, 3, 'io_global/latch'),
# (0, 4, 'io_global/latch'),
# (0, 5, 'io_global/latch'),
# (0, 6, 'io_global/latch'),
# (0, 7, 'fabout'),
# (0, 7, 'io_global/latch'),
# (0, 8, 'io_global/latch'),
# (0, 9, 'io_global/latch'),
# (0, 10, 'io_global/latch'),
# (0, 11, 'io_global/latch'),
# (0, 12, 'io_global/latch'),
# (0, 13, 'io_global/latch'),
# (0, 14, 'io_global/latch'),
# (0, 15, 'io_global/latch'),
# (0, 16, 'io_global/latch'))
# .buffer X Y DST_NET_INDEX CONFIG_BITS_NAMES
# CONFIG_BITS_VALUES_1 SRC_NET_INDEX_1
# .routing X Y DST_NET_INDEX CONFIG_BITS_NAMES
# CONFIG_BITS_VALUES_1 SRC_NET_INDEX_1
```
#### File: utils/vlog/xmlinc.py
```python
import lxml.etree as ET
import os
xi_url = "http://www.w3.org/2001/XInclude"
xi_include = "{{{}}}include".format(xi_url)
ET.register_namespace('xi', xi_url)
def make_relhref(outfile, href):
outpath = os.path.dirname(os.path.abspath(outfile))
relpath = os.path.relpath(os.path.dirname(os.path.abspath(href)), outpath)
return os.path.join(relpath, os.path.basename(href))
def include_xml(parent, href, outfile, xptr = None):
"""
Generate an XML include, using a relative path.
Inputs
------
parent : XML element to insert include into
href : path to included file
outfile : path to output file, for relative path generation
xptr : optional value for xpointer attribute
"""
xattrs = {'href': make_relhref(outfile, href)}
if xptr is not None:
xattrs["xpointer"] = xptr
return ET.SubElement(parent, xi_include, xattrs)
```
#### File: vlog/yosys/run.py
```python
import os, subprocess, sys, re
import tempfile, json
import yosys.utils
def get_yosys():
"""Return how to execute Yosys: the value of $YOSYS if set, otherwise just
`yosys`."""
return os.getenv("YOSYS", "yosys")
def get_output(params):
"""Run Yosys with given command line parameters, and return stdout as a string"""
cmd = [get_yosys()] + params
return subprocess.check_output(cmd).decode("utf-8")
defines = []
def add_define(defname):
"""Add a Verilog define to the list of defines to set in Yosys"""
defines.append(defname)
def get_defines():
"""Return a list of set Verilog defines, as a list of arguments to pass to Yosys `read_verilog`"""
return " ".join(["-D" + _ for _ in defines])
def commands(commands, infiles = []):
"""Run a given string containing Yosys commands
Inputs
-------
commands : string of Yosys commands to run
infiles : list of input files
"""
commands = "read_verilog {} {}; ".format(get_defines(), " ".join(infiles)) + commands
params = ["-q", "-p", commands]
return get_output(params)
def script(script, infiles = []):
"""Run a Yosys script given a path to the script
Inputs
-------
script : path to Yosys script to run
infiles : list of input files
"""
params = ["-q", "-s", script] + infiles
return get_output(params)
def vlog_to_json(infiles, flatten = False, aig = False, mode = None, module_with_mode = None):
"""
Convert Verilog to a JSON representation using Yosys
Inputs
-------
infiles : list of input files
flatten : set to flatten output hierarchy
aig : generate And-Inverter-Graph modules for gates
mode : set to a value other than None to use `chparam` to set the value of the MODE parameter
module_with_mode : the name of the module to apply `mode` to
"""
prep_opts = "-flatten" if flatten else ""
json_opts = "-aig" if aig else ""
if mode is not None:
mode_str = 'chparam -set MODE "{}" {}; '.format(mode, module_with_mode)
else:
mode_str = ""
cmds = "{}prep {}; write_json {}".format(mode_str, prep_opts, json_opts)
j = yosys.utils.strip_yosys_json(commands(cmds, infiles))
"""with open('dump.json', 'w') as dbg:
print(j,file=dbg)"""
return json.loads(j)
def extract_pin(module, pstr, _regex=re.compile(r"([^/]+)/([^/]+)")):
"""
Extract the pin from a line of the result of a Yosys select command, or
None if the command result is irrelevant (e.g. does not correspond to the
correct module)
Inputs
-------
module: Name of module to extract pins from
pstr: Line from Yosys select command (`module/pin` format)
"""
m = re.match(r"([^/]+)/([^/]+)", pstr)
if m and m.group(1) == module:
return m.group(2)
else:
return None
def do_select(infiles, module, expr):
"""
Run a Yosys select command (given the expression and input files) on a module
and return the result as a list of pins
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
expr: Yosys selector expression for select command
"""
"""TODO: All of these functions involve a fairly large number of calls to Yosys
Although performance here is unlikely to be a major priority any time soon,
it might be worth investigating better options?"""
outfile = tempfile.mktemp()
sel_cmd = "prep -top {} -flatten; cd {}; select -write {} {}" .format(module, module, outfile, expr)
commands(sel_cmd, infiles)
pins = []
with open(outfile, 'r') as f:
for net in f:
snet = net.strip()
if(len(snet) > 0):
pin = extract_pin(module, snet)
if pin is not None:
pins.append(pin)
os.remove(outfile)
return pins
def get_combinational_sinks(infiles, module, innet):
"""Return a list of output ports which are combinational sinks of a given
input.
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
innet: Name of input net to find sinks of
"""
return do_select(infiles, module, "{} %coe* o:* %i {} %d".format(innet, innet))
def list_clocks(infiles, module):
"""Return a list of clocks in the module
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
"""
return do_select(infiles, module, "c:* %x:+[CLK] a:CLOCK=1 %u c:* %d")
def get_clock_assoc_signals(infiles, module, clk):
"""Return the list of signals associated with a given clock.
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return do_select(infiles, module, "select -list {} %x* i:* o:* %u %i a:ASSOC_CLOCK={} %u {} %d".format(clk, clk, clk))
``` |
{
"source": "jholt13/relocatable-python",
"score": 2
} |
#### File: relocatable-python/locallibs/get.py
```python
from __future__ import print_function
import os
import shutil
import subprocess
import sys
import tempfile
CURL = "/usr/bin/curl"
DITTO = "/usr/bin/ditto"
PKGUTIL = "/usr/sbin/pkgutil"
DEFAULT_BASEURL = "https://www.python.org/ftp/python/%s/python-%s%s-macosx%s.pkg"
DEFAULT_PYTHON_VERSION = "2.7.15"
DEFAULT_OS_VERSION = "10.9"
DEFAULT_CANDIDATE_VERSION = ""
class FrameworkGetter(object):
"""Handles getting the Python.org pkg and extracting the framework"""
downloaded_pkg_path = ""
expanded_path = ""
def __init__(
self,
candidate_version=DEFAULT_CANDIDATE_VERSION,
python_version=DEFAULT_PYTHON_VERSION,
os_version=DEFAULT_OS_VERSION,
base_url=DEFAULT_BASEURL,
):
self.candidate_version = candidate_version
self.python_version = python_version
self.os_version = os_version
self.base_url = base_url
self.destination = ""
def __del__(self):
"""Clean up"""
if self.expanded_path:
shutil.rmtree(self.expanded_path)
if self.downloaded_pkg_path:
os.unlink(self.downloaded_pkg_path)
def download(self):
"""Downloads a macOS installer pkg from python.org.
Returns path to the download."""
url = self.base_url % (
self.python_version,
self.python_version,
self.candidate_version,
self.os_version,
)
(file_handle, destination_path) = tempfile.mkstemp()
os.close(file_handle)
cmd = [CURL, "-o", destination_path, url]
print("Downloading %s..." % url)
subprocess.check_call(cmd)
self.downloaded_pkg_path = destination_path
def expand(self):
"""Uses pkgutil to expand our downloaded pkg. Returns a path to the
expanded contents."""
self.expanded_path = self.downloaded_pkg_path + "__expanded__"
cmd = [
PKGUTIL,
"--expand",
self.downloaded_pkg_path,
self.expanded_path,
]
print("Expanding %s..." % self.downloaded_pkg_path)
subprocess.check_call(cmd)
def extract_framework(self):
"""Extracts the Python framework from the expanded pkg"""
payload = os.path.join(
self.expanded_path, "Python_Framework.pkg/Payload"
)
cmd = [DITTO, "-xz", payload, self.destination]
print("Extracting %s to %s..." % (payload, self.destination))
subprocess.check_call(cmd)
def download_and_extract(self, destination="."):
"""Downloads and extracts the Python framework.
Returns path to the framework."""
destination = os.path.expanduser(destination)
if os.path.basename(destination) != "Python.framework":
destination = os.path.join(destination, "Python.framework")
if os.path.exists(destination):
print(
"Destination %s already exists!" % destination, file=sys.stderr
)
return None
self.destination = destination
try:
self.download()
self.expand()
self.extract_framework()
return destination
except subprocess.CalledProcessError as err:
print("%s" % err, file=sys.stderr)
return None
``` |
{
"source": "jholtmann/surveyor",
"score": 2
} |
#### File: surveyor/products/microsoft_defender_for_endpoints.py
```python
import configparser
import json
import logging
import os
import requests
from common import Product, Tag, Result
class DefenderForEndpoints(Product):
"""
Surveyor implementation for product "Microsoft Defender For Endpoint"
"""
product: str = 'dfe'
creds_file: str # path to credential configuration file
_token: str # AAD access token
def __init__(self, profile: str, creds_file: str, **kwargs):
if not os.path.isfile(creds_file):
raise ValueError(f'Credential file {creds_file} does not exist')
self.creds_file = creds_file
super().__init__(self.product, profile, **kwargs)
def _authenticate(self):
config = configparser.ConfigParser()
config.sections()
config.read(self.creds_file)
if self.profile not in config:
raise ValueError(f'Profile {self.profile} is not present in credential file')
section = config[self.profile]
if 'tenantId' not in section or 'appId' not in section or 'appSecret' not in section:
raise ValueError(f'Credential file must contain tenantId, appId, and appSecret values')
self._token = self._get_aad_token(section['tenantId'], section['appId'], section['appSecret'])
def _get_aad_token(self, tenant_id: str, app_id: str, app_secret: str):
"""
Retrieve an authentication token from Azure Active Directory using app ID and secret.
"""
self.log.debug(f'Acquiring AAD access token for tenant {tenant_id} and app {app_id}')
body = {
"resource": 'https://api.securitycenter.windows.com',
"client_id": app_id,
"client_secret": app_secret,
"grant_type": "client_credentials"
}
url = f"https://login.windows.net/{tenant_id}/oauth2/token"
response = requests.get(url, data=body)
response.raise_for_status()
return response.json()['access_token']
def _post_advanced_query(self, data: dict, headers: dict) -> list[Result]:
results = set()
try:
url = "https://api.securitycenter.microsoft.com/api/advancedqueries/run"
response = requests.post(url, data=json.dumps(data).encode('utf-8'), headers=headers)
if response.status_code == 200:
for res in response.json()["Results"]:
result = Result(res["DeviceName"], res["AccountName"], res["ProcessCommandLine"], res["FolderPath"],
(res["Timestamp"],))
results.add(result)
else:
self._echo(f"Received status code: {response.status_code} (message: {response.json()})")
except KeyboardInterrupt:
self._echo("Caught CTRL-C. Rerun surveyor")
except Exception as e:
self._echo(f"There was an exception {e}")
self.log.exception(e)
return list(results)
def _get_default_header(self):
return {
"Authorization": 'Bearer ' + self._token,
"Content-Type": 'application/json',
"Accept": 'application/json'
}
def process_search(self, tag: Tag, base_query: dict, query: str) -> None:
query = query + self.build_query(base_query)
query = "DeviceEvents " + query + \
" | project DeviceName, AccountName, ProcessCommandLine, FolderPath, Timestamp "
query = query.rstrip()
self.log.debug(f'Query: {query}')
query = {'Query': query}
results = self._post_advanced_query(data=query, headers=self._get_default_header())
self._add_results(list(results), tag)
def nested_process_search(self, tag: Tag, criteria: dict, base_query: dict) -> None:
results = set()
query_base = self.build_query(base_query)
try:
for search_field, terms in criteria.items():
all_terms = ', '.join(f"'{term}'" for term in terms)
if search_field == 'process_name':
query = f" | where FileName has_any ({all_terms})"
elif search_field == "filemod":
query = f" | where FileName has_any ({all_terms})"
elif search_field == "ipaddr":
query = f" | where RemoteIP has_any ({all_terms})"
elif search_field == "cmdline":
query = f" | where ProcessCommandLine has_any ({all_terms})"
elif search_field == "digsig_publisher":
query = f" | where Signer has_any ({all_terms})"
elif search_field == "domain":
query = f" | where RemoteUrl has_any ({all_terms})"
elif search_field == "internal_name":
query = f" | where ProcessVersionInfoInternalFileName has_any ({all_terms})"
else:
self._echo(f'Query filter {search_field} is not supported by product {self.product}',
logging.WARNING)
continue
query = "union DeviceEvents, DeviceFileCertificateInfo, DeviceProcessEvents" + query_base + query \
+ " | project DeviceName, AccountName, ProcessCommandLine, FolderPath, Timestamp "
query = query.rstrip()
self.log.debug(f'Query: {query}')
data = {'Query': query}
for entry in self._post_advanced_query(data=data, headers=self._get_default_header()):
results.add(entry)
except KeyboardInterrupt:
self._echo("Caught CTRL-C. Returning what we have...")
self._add_results(list(results), tag)
def build_query(self, filters: dict) -> str:
query_base = ''
for key, value in filters.items():
if key == 'days':
query_base += f'| where Timestamp > ago({value}d)'
elif key == 'minutes':
query_base += f'| where Timestamp > ago({value}m)'
elif key == 'hostname':
query_base += f'| where DeviceName contains "{value}"'
elif key == 'username':
query_base += f'| where AccountName contains "{value}"'
else:
self._echo(f'Query filter {key} is not supported by product {self.product}', logging.WARNING)
return query_base
def get_other_row_headers(self) -> list[str]:
return ['Timestamp']
```
#### File: surveyor/products/sentinel_one.py
```python
import configparser
import json
import logging
import os
import time
from tqdm import tqdm
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Optional, Tuple, Callable
import requests
from requests.adapters import HTTPAdapter
from common import Product, Tag, Result
from help import datetime_to_epoch_millis
@dataclass
class Query:
start_date: datetime
end_date: datetime
parameter: Optional[str]
operator: Optional[str]
search_value: Optional[str]
full_query: Optional[str] = None
PARAMETER_MAPPING: dict[str, str] = {
'process_name': 'ProcessName',
'ipaddr': 'IP',
'cmdline': 'CmdLine',
'digsig_publisher': 'SrcProcPublisher',
'domain': 'Url',
'internal_name': 'TgtFileInternalName'
}
class SentinelOne(Product):
"""
Surveyor implementation for product "SentinelOne"
"""
product: str = 's1'
creds_file: str # path to credential configuration file
_token: str # AAD access token
_url: str # URL of SentinelOne console
_site_id: Optional[str] # Site ID for SentinelOne
_account_id: Optional[str] # Account ID for SentinelOne
_session: requests.Session
_queries: dict[Tag, list[Query]]
_last_request: float
_site_ids: list[str]
def __init__(self, profile: str, creds_file: str, account_id: Optional[list[str]] = None,
site_id: Optional[list[str]] = None, account_name: Optional[list[str]] = None, **kwargs):
if not os.path.isfile(creds_file):
raise ValueError(f'Credential file {creds_file} does not exist')
self.creds_file = creds_file
self._queries = dict()
self._last_request = 0.0
super().__init__(self.product, profile, **kwargs)
config = configparser.ConfigParser()
config.read(self.creds_file)
# instantiate site_ids and account_ids if not set
site_ids = site_id if site_id else list()
account_ids = account_id if account_id else list()
account_names = account_name if account_name else list()
# extract account/site ID from configuration if set
if 'account_id' in config[profile] and config[profile]['account_id'] not in account_ids:
account_ids.append(config[profile]['account_id'])
if 'site_id' in config[profile] and config[profile]['site_id'] not in site_ids:
site_ids.append(config[profile]['site_id'])
if 'account_name' in config[profile] and config[profile]['account_name'] not in account_names:
account_names.append(config[profile]['account_name'])
# determine site IDs to query (default is all)
self._site_ids = site_ids
# ensure specified site IDs are valid
site_response_data = self._get_all_paginated_data(self._build_url('/web/api/v2.1/sites'),
params={'siteIds': ','.join(site_ids)},
add_default_params=False)
existing_site_ids = set[int]()
for response in site_response_data:
for site in response['sites']:
existing_site_ids.add(site['id'])
for scope_id in self._site_ids:
if scope_id not in existing_site_ids:
raise ValueError(f'Site with ID {scope_id} does not exist')
# get site IDs for each specified account id
for scope_id in account_ids:
for response in self._get_all_paginated_data(self._build_url('/web/api/v2.1/sites'),
params={'accountId': scope_id},
add_default_params=False):
for site in response['sites']:
if site['id'] not in self._site_ids:
self._site_ids.append(site['id'])
for name in account_names:
for response in self._get_all_paginated_data(self._build_url('/web/api/v2.1/sites'),
params={'name': name},
add_default_params=False):
for site in response['sites']:
if site['id'] not in self._site_ids:
self._site_ids.append(site['id'])
self.log.debug(f'Site IDs: {self._site_ids}')
def _authenticate(self):
config = configparser.ConfigParser()
config.read(self.creds_file)
if self.profile not in config:
raise ValueError(f'Profile {self.profile} is not present in credential file')
section = config[self.profile]
# ensure configuration has required fields
if 'url' not in section:
raise ValueError(f'S1 configuration invalid, ensure "url" is specified')
if 'site_id' not in section and 'account_id' not in section:
raise ValueError(f'S1 configuration invalid, specify a site_id or account_id')
# extract required information from configuration
if 'token' in section:
self._token = section['token']
else:
if 'S1_TOKEN' not in os.environ:
raise ValueError(f'S1 configuration invalid, specify "token" configuration value or "S1_TOKEN" '
f'environment variable')
self._token = os.environ['S1_TOKEN']
self._site_id = section['site_id'] if 'site_id' in section else None
self._account_id = section['account_id'] if 'account_id' in section else None
self._url = section['url'].rstrip('/')
if not self._url.startswith('https://'):
raise ValueError(f'URL must start with "https://"')
# create a session and a pooled HTTPAdapter
self._session = requests.session()
self._session.mount('https://', HTTPAdapter(pool_connections=10, pool_maxsize=10, max_retries=3))
# test API key by retrieving the sensor count, which is a fast operation
data = self._session.get(self._build_url('/web/api/v2.1/agents/count'),
headers=self._get_default_header(),
params=self._get_default_body()).json()
if 'errors' in data:
if data['errors'][0]['code'] == 4010010:
raise ValueError(f'Failed to authenticate to SentinelOne: {data}')
else:
raise ValueError(f'Error when authenticating to SentinelOne: {data}')
def _build_url(self, stem: str):
"""
Assemble URL for SentinelOne API query using base URI and URI stem.
"""
if not stem.startswith('/'):
stem = '/' + stem
return self._url + stem
def _get_default_body(self) -> dict:
"""
Get the default request body for a SentinelOne API query.
"""
return {"siteIds": [self._site_id]} if self._site_id else {"accountIds": [self._account_id]}
def _get_default_header(self):
"""
Get the default header for a SentinelOne API query.
"""
return {"Authorization": f"ApiToken {self._token}", "Content-Type": "application/json"}
def build_query(self, filters: dict) -> Tuple[str, datetime, datetime]:
to_date = datetime.utcnow()
from_date = to_date - timedelta(days=14)
query_base = ''
for key, value in filters.items():
if key == 'days':
from_date = to_date - timedelta(days=value)
elif key == 'minutes':
from_date = to_date - timedelta(minutes=value)
elif key == 'hostname':
if query_base:
query_base += ' AND '
query_base += f' EndpointName containscis "{value}"'
elif key == 'username':
if query_base:
query_base += ' AND '
query_base += f' UserName containscis "{value}"'
else:
self._echo(f'Query filter {key} is not supported by product {self.product}', logging.WARNING)
# S1 requires the date range to be supplied in the query request, not the query text
# therefore we return the from/to dates separately
return query_base, from_date, to_date
def _get_all_paginated_data(self, url: str, params: Optional[dict] = None, headers: Optional[dict] = None,
key: str = 'data', after_request: Optional[Callable] = None, limit: int = 1000,
no_progress: bool = True, progress_desc: str = 'Retrieving data',
add_default_params: bool = True) -> list[dict]:
"""
Get and return all paginated data from the response, making additional queries if necessary.
:param url: URL to make GET request to.
:param params: Additional parameters for GET request
:param limit: Number of items to query per page.
:param headers: Additional headers for GET quest.
:param key: Dictionary key in which result data resides.
:param after_request: Optional callable that is executed after each pagination request. The callable is
passed the response to the last API call.
:param no_progress: Suppress progress bar.
:param progress_desc: Specify description for progress bar.
:param add_default_params: Whether _get_default_body() should be added to parameter set.
:returns: List containing data from all pages.
"""
if params is None:
params = dict()
if add_default_params:
params.update(self._get_default_body())
params['limit'] = limit
if headers is None:
headers = dict()
headers.update(self._get_default_header())
data = list[dict]()
total: int = 0
next_cursor = True
with tqdm(desc=progress_desc, disable=not self._tqdm_echo or no_progress) as p_bar:
while next_cursor:
response = self._session.get(url, params=params, headers=headers)
if after_request:
# execute after request callback
after_request(response)
response.raise_for_status()
call_data = response.json()[key]
if not isinstance(call_data, list):
call_data = [call_data]
self.log.debug(f'Got {len(call_data)} results in page')
data.extend(call_data)
pagination_data = response.json()['pagination']
# update progress bar
if pagination_data['totalItems'] > total:
total = pagination_data['totalItems']
p_bar.reset(total=total)
p_bar.update(len(call_data))
next_cursor = pagination_data['nextCursor']
params['cursor'] = next_cursor
return data
def _get_dv_events(self, query_id: str) -> list[dict]:
"""
Retrieve events associated with a SentinelOne Deep Visibility query ID.
"""
p_bar = tqdm(desc='Running query', disable=not self._tqdm_echo, total=100)
try:
last_progress_status = 0
while True:
query_status_response = self._session.get(self._build_url('/web/api/v2.1/dv/query-status'),
params={'queryId': query_id}, headers=self._get_default_header())
query_status_response.raise_for_status()
data = query_status_response.json()['data']
self.log.debug(str(data))
p_bar.update(data['progressStatus'] - last_progress_status)
last_progress_status = data['progressStatus']
if data['progressStatus'] == 100 or data['responseState'] == 'FAILED':
if data['responseState'] == 'FAILED':
raise ValueError(f'S1QL query failed with message "{data["responseError"]}"')
p_bar.close()
return self._get_all_paginated_data(self._build_url('/web/api/v2.1/dv/events'),
params={'queryId': query_id},
no_progress=False,
progress_desc='Retrieving query results')
else:
# query-status endpoint has a one request per second rate limit
time.sleep(1)
except Exception as e:
p_bar.close()
raise e
def process_search(self, tag: Tag, base_query: dict, query: str) -> None:
build_query, from_date, to_date = self.build_query(base_query)
query = query + build_query
self._echo(f'Built Query: {query}')
if tag not in self._queries:
self._queries[tag] = list()
query = Query(from_date, to_date, None, None, None, query)
self._queries[tag].append(query)
def nested_process_search(self, tag: Tag, criteria: dict, base_query: dict):
query_base, from_date, to_date = self.build_query(base_query)
try:
for search_field, terms in criteria.items():
all_terms = ', '.join(f'"{term}"' for term in terms)
if search_field not in PARAMETER_MAPPING:
self._echo(f'Query filter {search_field} is not supported by product {self.product}',
logging.WARNING)
continue
parameter = PARAMETER_MAPPING[search_field]
search_value = all_terms
if len(terms) > 1:
search_value = f'({all_terms})'
operator = 'in contains anycase'
else:
operator = 'contains'
if tag not in self._queries:
self._queries[tag] = list()
self._queries[tag].append(Query(from_date, to_date, parameter, operator, search_value))
except KeyboardInterrupt:
self._echo("Caught CTRL-C. Returning what we have...")
def _process_queries(self):
"""
Process all cached queries.
"""
start_date = datetime.utcnow()
end_date = start_date
# determine earliest start date
for tag, queries in self._queries.items():
for query in queries:
if query.start_date < start_date:
start_date = query.start_date
try:
# queries with certain operators can be combined into a more compact query format
# key is a tuple of the query operator and parameter
# value is a list of Tuples where each tupe contains the query tag and search value
combined_queries = dict[Tuple[str, str], list[Tuple[Tag, str]]]()
# tuple contains tag and full query
# these chunks will be combined with OR statements and executed
query_text = list[Tuple[Tag, str]]()
for tag, queries in self._queries.items():
for query in queries:
if query.operator == 'contains':
key = (query.operator, query.parameter)
if query.operator not in combined_queries:
combined_queries[key] = list()
combined_queries[key].append((tag, query.search_value))
else:
full_query = query.parameter + ' ' + query.operator + ' ' + query.search_value
query_text.append((tag, full_query))
# merge combined queries and add them to query_text
data: list[Tuple[Tag, str]]
for (operator, parameter), data in combined_queries.items():
if operator == 'contains':
full_query = f'{parameter} in contains anycase ({", ".join(x[1] for x in data)})'
tag = Tag(','.join(tag[0].tag for tag in data), ','.join(tag[0].data for tag in data))
query_text.append((tag, full_query))
else:
raise NotImplementedError(f'Combining operator "{operator}" queries is not support')
# all queries that need to be executed are now in query_text
# execute queries in chunks
chunk_size = 10
# merge queries into one large query and execute it
for i in range(0, len(query_text), chunk_size):
# do not chain more than 10 ORs in a S1QL query
merged_tags = set[Tag]()
merged_query = ''
for tag, query in query_text[i:i + chunk_size]:
# combine queries with ORs
if merged_query:
merged_query += ' OR '
merged_query += query
# add tags to set to de-duplicate
merged_tags.add(tag)
# merge all query tags into a single string
merged_tag = Tag(','.join(tag.tag for tag in merged_tags), ','.join(tag.data for tag in merged_tags))
if len(self._site_ids):
# restrict query to specified sites
# S1QL does not support restricting a query to a specified account ID
merged_query = f'SiteID in contains ("' + '", "'.join(self._site_ids) + f'") AND ({merged_query})'
# build request body for DV API call
params = self._get_default_body()
params.update({
"fromDate": datetime_to_epoch_millis(start_date),
"isVerbose": False,
"queryType": ['events'], # options: 'events', 'procesState'
"limit": 20000,
"toDate": datetime_to_epoch_millis(end_date),
"query": merged_query
})
self.log.debug(f'Query params: {params}')
# ensure we do not submit more than one request every 60 seconds to comply with rate limit
seconds_sice_last_request = time.time() - self._last_request
if seconds_sice_last_request < 60:
sleep_seconds = 60 - seconds_sice_last_request
self.log.debug(f'Sleeping for {sleep_seconds}')
time.sleep(sleep_seconds)
# start deep visibility API call
query_response = self._session.post(self._build_url('/web/api/v2.1/dv/init-query'),
headers=self._get_default_header(), data=json.dumps(params))
self._last_request = time.time()
body = query_response.json()
if 'errors' in body and any(('could not parse query' in x['detail'] for x in body['errors'])):
raise ValueError(f'S1 could not parse query "{merged_query}"')
self.log.debug(query_response.json())
query_response.raise_for_status()
query_id = body['data']['queryId']
self.log.info(f'Query ID is {query_id}')
events = self._get_dv_events(query_id)
self.log.debug(f'Got {len(events)} events')
self._results[merged_tag] = list()
for event in events:
hostname = event['endpointName']
username = event['srcProcUser']
path = event['processImagePath']
command_line = event['srcProcCmdLine']
additional_data = (event['eventTime'], event['siteId'], event['siteName'])
result = Result(hostname, username, path, command_line, additional_data)
self._results[merged_tag].append(result)
self._queries.clear()
except KeyboardInterrupt:
self._echo("Caught CTRL-C. Returning what we have . . .")
def get_results(self, final_call: bool = True) -> dict[Tag, list[Result]]:
self.log.debug('Entered get_results')
# process any unprocessed queries
if final_call and len(self._queries) > 0:
self.log.debug(f'Executing additional _process_queries')
self._process_queries()
return self._results
def get_other_row_headers(self) -> list[str]:
return ['Event Time', 'Site ID', 'Site Name']
``` |
{
"source": "jhomble/electron435",
"score": 3
} |
#### File: copct-master/monroe_corpus/monroe_domain.py
```python
from monroe_static import locs, watercos, powercos, poslocs, sleaders, gens, food, pcrews
from monroe_utils import unify, single_unify
"""
M: maximum length of v for all (u,v) in causal relation
"""
M = 6
def mid_causes(v):
"""
Encodes all mid-level causal relations in the knowledge base.
Inputs:
v: A sequence of tasks in the form (state, taskname, parameters)
Each state has the form (objects, facts)
Outputs:
g: The set of all possible causes of v, each also in the form (state, taskname, parameters).
"""
states = tuple(s for (s,t,x) in v) # states (each of the form (objs, facts))
tasknames = tuple(t for (s,t,x) in v) # Task names
params = tuple((None,)+x for (s,t,x) in v) # Parameter lists, leading None for task name offset
g = set()
"""
;; clean-up-hazard
(:method (clean-up-hazard ?from ?to)
very-hazardous ;; just call the feds
((hazard-seriousness ?from ?to very-hazardous))
((!call fema))
normal ;; we can take care of it
((hazard-team ?ht))
((get-to ?ht ?from) (!clean-hazard ?ht ?from ?to)))
"""
if tasknames == ('!CALL',) and params[0][1] == 'FEMA':
m = unify(states[0][1], ('HAZARD-SERIOUSNESS', None, None, 'VERY-HAZARDOUS'))
for (fromloc, toloc) in m:
g.add((states[0],'CLEAN-UP-HAZARD', (fromloc, toloc)))
if tasknames == ('GET-TO','!CLEAN-HAZARD'):
fromloc, toloc = params[1][2], params[1][3]
if fromloc == params[0][2]:
g.add((states[0],'CLEAN-UP-HAZARD', (fromloc, toloc)))
# Missing get-to
if tasknames == ('!CLEAN-HAZARD',):
fromloc, toloc = params[0][2], params[0][3]
g.add((states[0],'CLEAN-UP-HAZARD', (fromloc, toloc)))
"""
;; block-road - blocks off a road
(:method (block-road ?from ?to)
normal
((police-unit ?police))
(:unordered (set-up-cones ?from ?to)
(get-to ?police ?from)))
"""
if tasknames == ('SET-UP-CONES','GET-TO'):
fromloc, toloc = params[0][1], params[0][2]
if fromloc == params[1][2]:
g.add((states[0],'BLOCK-ROAD', (fromloc, toloc)))
if tasknames == ('GET-TO','SET-UP-CONES'):
fromloc, toloc = params[1][1], params[1][2]
if fromloc == params[0][2]:
g.add((states[0],'BLOCK-ROAD', (fromloc, toloc)))
# Missing get-to
if tasknames == ('SET-UP-CONES',):
fromloc, toloc = params[0][1], params[0][2]
g.add((states[0],'BLOCK-ROAD', (fromloc, toloc)))
"""
;; unblock-road - unblocks a road
(:method (unblock-road ?from ?to)
normal
()
((take-down-cones ?from ?to)))
"""
if tasknames == ('TAKE-DOWN-CONES',):
fromloc, toloc = params[0][1], params[0][2]
g.add((states[0],'UNBLOCK-ROAD', (fromloc, toloc)))
"""
;; get-electricity provides electricity to a site (if not already there)
(:method (get-electricity ?loc)
already-has-electricity ;; do nothing
((not (no-electricity ?loc)))
()
no-electricity
()
((generate-temp-electricity ?loc))
)
"""
if tasknames == ('GENERATE-TEMP-ELECTRICITY',):
loc = params[0][1]
g.add((states[0],'GET-ELECTRICITY', (loc,)))
"""
;; repair-pipe
(:method (repair-pipe ?from ?to) ;; repairs a pipe at location
normal
((water-crew ?crew))
((get-to ?crew ?from)
(set-up-cones ?from ?to)
(open-hole ?from ?to)
(!replace-pipe ?crew ?from ?to)
(close-hole ?from ?to)
(take-down-cones ?from ?to)))
"""
if tasknames == ('GET-TO','SET-UP-CONES','OPEN-HOLE','!REPLACE-PIPE','CLOSE-HOLE','TAKE-DOWN-CONES'):
fromloc, toloc = params[0][2], params[1][2]
if (fromloc == params[1][1] == params[2][1] == params[3][2] == params[4][1] == params[5][1]) and (toloc == params[2][2] == params[3][3] == params[4][2] == params[5][2]):
g.add((states[0],'REPAIR-PIPE', (fromloc, toloc)))
# Missing get-to
if tasknames == ('SET-UP-CONES','OPEN-HOLE','!REPLACE-PIPE','CLOSE-HOLE','TAKE-DOWN-CONES'):
fromloc, toloc = params[0][1], params[0][2]
if (fromloc == params[1][1] == params[2][2] == params[3][1] == params[4][1]) and (toloc == params[2][3] == params[3][2] == params[4][2]):
g.add((states[0],'REPAIR-PIPE', (fromloc, toloc)))
"""
;; open-hole
(:method (open-hole ?from ?to) ;; opens a hole in the street
normal
((backhoe ?backhoe))
((get-to ?backhoe ?from)
(!dig ?backhoe ?from)))
"""
if tasknames == ('GET-TO','!DIG'):
fromloc = params[0][2]
if fromloc == params[1][2]:
for toloc in poslocs:
g.add((states[0],'OPEN-HOLE', (fromloc, toloc)))
# Missing get-to
if tasknames == ('!DIG',):
fromloc = params[0][2]
for toloc in poslocs:
g.add((states[0],'OPEN-HOLE', (fromloc, toloc)))
"""
;; close-hole
(:method (close-hole ?from ?to) ;; opens a hole in the street
normal
((backhoe ?backhoe))
((get-to ?backhoe ?from)
(!fill-in ?backhoe ?from)))
"""
if tasknames == ('GET-TO','!FILL-IN'):
fromloc = params[0][2]
if fromloc == params[1][2]:
for toloc in poslocs:
g.add((states[0],'CLOSE-HOLE', (fromloc, toloc)))
# Missing get-to
if tasknames == ('!FILL-IN',):
fromloc = params[0][2]
for toloc in poslocs:
g.add((states[0],'CLOSE-HOLE', (fromloc, toloc)))
"""
;; set-up-cones
(:method (set-up-cones ?from ?to) ;; sets up orange cones at road
normal
((work-crew ?crew))
((get-to ?crew ?from) (!place-cones ?crew)))
"""
if tasknames == ('GET-TO','!PLACE-CONES'):
fromloc = params[0][2]
for toloc in poslocs:
g.add((states[0],'SET-UP-CONES', (fromloc, toloc)))
# Missing get-to
if tasknames == ('!PLACE-CONES',):
crew = params[0][1]
m = unify(states[0][1], ('ATLOC', crew, None))
# crew could be at both a town and posloc within a town
if len(m)==1:
fromloc = m.pop()[0]
for toloc in poslocs:
g.add((states[0],'SET-UP-CONES', (fromloc, toloc)))
else:
for fromloc in poslocs:
for toloc in poslocs:
g.add((states[0],'SET-UP-CONES', (fromloc, toloc)))
"""
;; take-down-cones
(:method (take-down-cones ?from ?to) ;; takes down cones
normal
((work-crew ?crew))
((get-to ?crew ?from) (!pickup-cones ?crew)))
"""
if tasknames == ('GET-TO','!PICKUP-CONES'):
fromloc = params[0][2]
for toloc in poslocs:
g.add((states[0],'TAKE-DOWN-CONES', (fromloc, toloc)))
# Missing get-to
if tasknames == ('!PICKUP-CONES',):
crew = params[0][1]
m = unify(states[0][1], ('ATLOC', crew, None))
# crew could be at both a town and posloc within a town
if len(m)==1:
fromloc = m.pop()[0]
for toloc in poslocs:
g.add((states[0],'TAKE-DOWN-CONES', (fromloc, toloc)))
else:
for fromloc in poslocs:
for toloc in poslocs:
g.add((states[0],'TAKE-DOWN-CONES', (fromloc, toloc)))
"""
;; clear-wreck
(:method (clear-wreck ?from ?to) ;; gets rid of a wreck in any loc
normal
((wrecked-vehicle ?from ?to ?veh) (garbage-dump ?dump))
((tow-to ?veh ?dump)))
"""
if tasknames == ('TOW-TO',):
m = unify(states[0][1], ('WRECKED-VEHICLE', None, None, None))
for (fromloc, toloc, veh) in m:
g.add((states[0],'CLEAR-WRECK', (fromloc, toloc)))
"""
;; tow-to - tows a vehicle somewhere
(:method (tow-to ?veh ?to)
normal
((tow-truck ?ttruck) (vehicle ?veh) (atloc ?veh ?vehloc))
((get-to ?ttruck ?vehloc)
(!hook-to-tow-truck ?ttruck ?veh)
(get-to ?ttruck ?to)
(!unhook-from-tow-truck ?ttruck ?veh)))
"""
if tasknames == ('GET-TO','!HOOK-TO-TOW-TRUCK','GET-TO','!UNHOOK-FROM-TOW-TRUCK'):
veh, toloc = params[1][2], params[2][2]
g.add((states[0],'TOW-TO', (veh, toloc)))
# Missing get-to branches
if tasknames == ('!HOOK-TO-TOW-TRUCK','GET-TO','!UNHOOK-FROM-TOW-TRUCK'):
veh, toloc = params[0][2], params[1][2]
g.add((states[0],'TOW-TO', (veh, toloc)))
if tasknames == ('GET-TO','!HOOK-TO-TOW-TRUCK','!UNHOOK-FROM-TOW-TRUCK'):
veh = params[1][2]
for toloc in ['BRIGHTON-DUMP','HENRIETTA-DUMP']:
g.add((states[0],'TOW-TO', (veh, toloc)))
if tasknames == ('!HOOK-TO-TOW-TRUCK','!UNHOOK-FROM-TOW-TRUCK'):
veh = params[0][2]
for toloc in ['BRIGHTON-DUMP','HENRIETTA-DUMP']:
g.add((states[0],'TOW-TO', (veh, toloc)))
"""
;; clear-tree
(:method (clear-tree ?tree) ;; this gets rid of a tree in any loc
normal
((tree-crew ?tcrew) (tree ?tree)
(atloc ?tree ?treeloc))
((get-to ?tcrew ?treeloc) (!cut-tree ?tcrew ?tree)
(remove-blockage ?tree)))
"""
if tasknames == ('GET-TO','!CUT-TREE','REMOVE-BLOCKAGE'):
tree = params[1][2]
g.add((states[0],'CLEAR-TREE', (tree,)))
# Missing get-to
if tasknames == ('GET-TO','!CUT-TREE'):
tree = params[1][2]
g.add((states[0],'CLEAR-TREE', (tree,)))
if tasknames == ('!CUT-TREE','REMOVE-BLOCKAGE'):
tree = params[0][2]
g.add((states[0],'CLEAR-TREE', (tree,)))
if tasknames == ('!CUT-TREE'):
tree = params[0][2]
g.add((states[0],'CLEAR-TREE', (tree,)))
"""
;; remove-blockage
(:method (remove-blockage ?stuff)
move-to-side-of-street
((work-crew ?crew) (atloc ?stuff ?loc))
((get-to ?crew ?loc)
(!carry-blockage-out-of-way ?crew ?stuff)))
"""
if tasknames == ('GET-TO','!CARRY-BLOCKAGE-OUT-OF-WAY'):
stuff = params[1][2]
g.add((states[0],'REMOVE-BLOCKAGE', (stuff,)))
# Missing get-to
if tasknames == ('!CARRY-BLOCKAGE-OUT-OF-WAY',):
stuff = params[0][2]
g.add((states[0],'REMOVE-BLOCKAGE', (stuff,)))
"""
(:method (remove-blockage ?stuff)
carry-away
((garbage-dump ?dump))
((get-to ?stuff ?dump)))
"""
if tasknames == ('GET-TO',):
dump = params[0][2]
if dump in ('HENRIETTA-DUMP','BRIGHTON-DUMP'):
stuff = params[0][1]
g.add((states[0],'REMOVE-BLOCKAGE', (stuff,)))
"""
;; declare-curfew
(:method (declare-curfew ?town)
normal
()
(:unordered (!call EBS) (!call police-chief)))
"""
if tasknames == ('!CALL', '!CALL'):
if 'EBS' in (params[0][1], params[1][1]):
for town in locs:
g.add((states[0],'DECLARE-CURFEW', (town,)))
"""
;; generate-temp-electricity
(:method (generate-temp-electricity ?loc)
with-generator
((generator ?gen))
((make-full-fuel ?gen) (get-to ?gen ?loc) (!hook-up ?gen ?loc)
(!turn-on ?gen)))
"""
if tasknames == ('MAKE-FULL-FUEL','GET-TO','!HOOK-UP','!TURN-ON'):
loc = params[1][2]
if loc == params[2][2]:
g.add((states[0],'GENERATE-TEMP-ELECTRICITY', (loc,)))
# Missing get-to
if tasknames == ('MAKE-FULL-FUEL','!HOOK-UP','!TURN-ON'):
loc = params[1][2]
g.add((states[0],'GENERATE-TEMP-ELECTRICITY', (loc,)))
"""
;; make-full-fuel - makes sure arg1 is full of fuel
(:method (make-full-fuel ?gen)
with-gas-can
((gas-can ?gc) (atloc ?gen ?genloc) (service-station ?ss))
((get-to ?gc ?ss) (add-fuel ?ss ?gc) (get-to ?gc ?genloc)
(!pour-into ?gc ?gen)))
"""
if tasknames == ('GET-TO','ADD-FUEL','GET-TO','!POUR-INTO'):
gen = params[3][2]
if params[0][1] == params[1][2] == params[2][1]:
g.add((states[0],'MAKE-FULL-FUEL', (gen,)))
"""
(:method (make-full-fuel ?gen)
at-service-station
((service-station ?ss))
((get-to ?gen ?ss) (add-fuel ?ss ?gen)))
"""
if tasknames == ('GET-TO','ADD-FUEL'):
if params[0][1] == params[1][2] and params[0][2] == params[1][1]:
gen = params[0][1]
g.add((states[0],'MAKE-FULL-FUEL', (gen,)))
# Missing get-to
if tasknames == ('ADD-FUEL',):
gen = params[0][2]
g.add((states[0],'MAKE-FULL-FUEL', (gen,)))
"""
;; add-fuel (at service-station)
(:method (add-fuel ?ss ?obj)
normal
()
(:unordered (!pay ?ss) (!pump-gas-into ?ss ?obj)))
"""
if tasknames in (('!PAY','!PUMP-GAS-INTO'), ('!PUMP-GAS-INTO','!PAY')):
ss = params[0][1]
if len(params[0]) > 2:
obj = params[0][2]
else:
obj = params[1][2]
g.add((states[0],'ADD-FUEL', (ss, obj)))
"""
;; repair-line
(:method (repair-line ?crew ?lineloc)
with-tree
((tree ?tree) (atloc ?tree ?lineloc)
(atloc ?crew ?lineloc))
((shut-off-power ?crew ?lineloc)
(:unordered (clear-tree ?tree)
(!remove-wire ?crew ?lineloc))
(!string-wire ?crew ?lineloc) (turn-on-power ?crew ?lineloc))
"""
if tasknames in (('SHUT-OFF-POWER','CLEAR-TREE','!REMOVE-WIRE','!STRING-WIRE','TURN-ON-POWER'),('SHUT-OFF-POWER','!REMOVE-WIRE','CLEAR-TREE','!STRING-WIRE','TURN-ON-POWER')):
crew, lineloc = params[0][1], params[0][2]
g.add((states[0],'REPAIR-LINE', (crew, lineloc)))
"""
without-tree
((atloc ?crew ?lineloc))
((shut-off-power ?crew ?lineloc)
(!remove-wire ?crew ?lineloc)
(!string-wire ?crew ?lineloc) (turn-on-power ?crew ?lineloc)))
"""
if tasknames == ('SHUT-OFF-POWER','!REMOVE-WIRE','!STRING-WIRE','TURN-ON-POWER'):
crew, lineloc = params[0][1], params[0][2]
g.add((states[0],'REPAIR-LINE', (crew, lineloc)))
"""
;; shut-off-power
(:method (shut-off-power ?crew ?loc)
normal
((in-town ?loc ?town) (powerco-of ?town ?powerco))
(!call ?powerco))
"""
if tasknames == ('!CALL',) and params[0][1] in powercos:
for obj in states[0][0]:
if obj in pcrews:
for loc in powercos[params[0][1]]:
g.add((states[0],'SHUT-OFF-POWER', (obj, loc)))
"""
;; turn-on-power
(:method (turn-on-power ?crew ?loc)
normal
((in-town ?loc ?town) (powerco-of ?town ?powerco))
(!call ?powerco))
"""
if tasknames == ('!CALL',) and params[0][1] in powercos:
for obj in states[0][0]:
if obj in pcrews:
for loc in powercos[params[0][1]]:
g.add((states[0],'TURN-ON-POWER', (obj, loc)))
"""
;; shut-off-water
(:method (shut-off-water ?from ?to)
normal
((in-town ?from ?town) (waterco-of ?town ?waterco))
((!call ?waterco)))
"""
if tasknames == ('!CALL',) and params[0][1] in watercos:
for fromloc in watercos[params[0][1]]:
for toloc in poslocs:
g.add((states[0],'SHUT-OFF-WATER', (fromloc, toloc)))
"""
;; turn-on-water
(:method (turn-on-water ?from ?to)
normal
((in-town ?from ?town) (waterco-of ?town ?waterco))
((!call ?waterco)))
"""
if tasknames == ('!CALL',) and params[0][1] in watercos:
for fromloc in watercos[params[0][1]]:
for toloc in poslocs:
g.add((states[0],'TURN-ON-WATER', (fromloc, toloc)))
"""
;; emt-treat
(:method (emt-treat ?person)
emt
((emt-crew ?emt) (atloc ?person ?personloc))
((get-to ?emt ?personloc) (!treat ?emt ?person)))
"""
if tasknames == ('GET-TO', '!TREAT'):
person = params[1][2]
g.add((states[0],'EMT-TREAT', (person,)))
# Missing get-to:
if tasknames == ('!TREAT',):
person = params[0][2]
g.add((states[0],'EMT-TREAT', (person,)))
"""
;; stabilize
(:method (stabilize ?person)
emt
()
((emt-treat ?person)))
"""
if tasknames == ('EMT-TREAT',):
person = params[0][1]
g.add((states[0],'STABILIZE', (person,)))
"""
;; get-to
(:method (get-to ?obj ?place)
already-there
((atloc ?obj ?place))
())
(:method (get-to ?person ?place)
person-drives-themself
((not (atloc ?person ?place))
(person ?person) (vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc))
((drive-to ?person ?veh ?place)))
"""
if tasknames == ('DRIVE-TO',):
person, place = params[0][1], params[0][3]
g.add((states[0],'GET-TO', (person, place)))
"""
(:method (get-to ?veh ?place)
vehicle-gets-driven
((not (atloc ?veh ?place))
(person ?person)
(vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc)
)
((drive-to ?person ?veh ?place)))
"""
if tasknames == ('DRIVE-TO',):
veh, place = params[0][2], params[0][3]
g.add((states[0],'GET-TO', (veh, place)))
"""
(:method (get-to ?obj ?place)
as-cargo
((not (atloc ?obj ?place))
(vehicle ?veh)
(atloc ?obj ?objloc) (fit-in ?obj ?veh)
(not (non-ambulatory ?obj)))
((get-to ?veh ?objloc) (get-in ?obj ?veh) (get-to ?veh ?place)
(get-out ?obj ?veh))
"""
if tasknames == ('GET-TO','GET-IN','GET-TO','GET-OUT'):
veh, obj, place = params[0][1], params[1][1], params[2][2]
if (veh == params[1][2] == params[2][1] == params[3][2]) and (obj == params[3][1]):
g.add((states[0],'GET-TO', (obj, place)))
if obj[:3]=='GEN': # deal with monroe bug?
g.add((states[0],'GET-TO', (obj, 'TEXACO1')))
# Missing get-to
if tasknames == ('GET-IN','GET-TO','GET-OUT'):
veh, obj, place = params[0][2], params[0][1], params[1][2]
if (veh == params[0][2] == params[1][1] == params[2][2]) and (obj == params[2][1]):
g.add((states[0],'GET-TO', (obj, place)))
if obj[:3]=='GEN': # monroe bug?
g.add((states[0],'GET-TO', (obj, 'TEXACO1')))
if tasknames == ('GET-TO','GET-IN','GET-OUT'):
veh, obj = params[1][2], params[1][1]
if (veh == params[0][1] == params[2][2]) and (obj == params[2][1]):
m = unify(states[2][1], ('ATLOC', veh, None))
if len(m)==1:
place = m.pop()[0]
g.add((states[0],'GET-TO', (obj, place)))
else:
for loc in poslocs:
g.add((states[0],'GET-TO',(obj,loc)))
if obj[:3]=='GEN': # monroe bug?
g.add((states[0],'GET-TO', (obj, 'TEXACO1')))
if tasknames == ('GET-IN','GET-OUT'):
veh, obj = params[1][2], params[1][1]
if (veh == params[0][2]) and (obj == params[0][1]):
m = unify(states[1][1], ('ATLOC', veh, None))
if len(m)==1:
place = m.pop()[0]
g.add((states[0],'GET-TO', (obj, place)))
else:
m = unify(states[1][1], ('ATLOC', obj, None))
if len(m)==1:
place = m.pop()[0]
g.add((states[0],'GET-TO', (obj, place)))
else:
for loc in poslocs:
g.add((states[0],'GET-TO',(obj,loc)))
if obj[:3]=='GEN': # monroe bug?
g.add((states[0],'GET-TO', (obj, 'TEXACO1')))
"""
with-ambulance ;; same as above, just with ambulance
((not (atloc ?obj ?place))
(atloc ?obj ?objloc) (ambulance ?veh) (fit-in ?obj ?veh)
)
((get-to ?veh ?objloc) (stabilize ?obj) (get-in ?obj ?veh)
(get-to ?veh ?place) (get-out ?obj ?veh))
)
"""
if tasknames == ('GET-TO','STABILIZE','GET-IN','GET-TO','GET-OUT'):
veh, obj, place = params[0][1], params[1][1], params[3][2]
if (veh == params[2][2] == params[3][1] == params[4][2]) and (obj == params[2][1] == params[4][1]):
g.add((states[0],'GET-TO', (obj, place)))
# Missing get-to
if tasknames == ('STABILIZE','GET-IN','GET-TO','GET-OUT'):
veh, obj, place = params[1][2], params[0][1], params[2][2]
if (veh == params[2][1] == params[3][2]) and (obj == params[1][1] == params[3][1]):
g.add((states[0],'GET-TO', (obj, place)))
"""
(:method (drive-to ?person ?veh ?loc)
normal
((person ?person) (vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc) (can-drive ?person ?veh))
((!navegate-vehicle ?person ?veh ?loc)))
"""
if tasknames == ('!NAVEGATE-VEHICLE',):
person, veh, loc = params[0][1], params[0][2], params[0][3]
g.add((states[0],'DRIVE-TO', (person, veh, loc)))
"""
(:method (get-in ?obj ?veh)
ambulatory-person
((atloc ?obj ?objloc) (atloc ?veh ?objloc)
(person ?obj) (not (non-ambulatory ?obj)))
(!climb-in ?obj ?veh)
"""
if tasknames == ('!CLIMB-IN',):
obj, veh = params[0][1], params[0][2]
g.add((states[0],'GET-IN', (obj, veh)))
"""
load-in
((atloc ?obj ?objloc) (atloc ?veh ?objloc)
(person ?person) (can-lift ?person ?obj))
((get-to ?person ?objloc) (!load ?person ?obj ?veh)))
"""
if tasknames == ('GET-TO', '!LOAD'):
obj, veh = params[1][2], params[1][3]
g.add((states[0],'GET-IN', (obj, veh)))
# Missing get-to
if tasknames == ('!LOAD',):
obj, veh = params[0][2], params[0][3]
g.add((states[0],'GET-IN', (obj, veh)))
"""
(:method (get-out ?obj ?veh)
ambulatory-person
((person ?obj) (not (non-ambulatory ?obj)))
(!climb-out ?obj ?veh)
"""
if tasknames == ('!CLIMB-OUT',):
obj, veh = params[0][1], params[0][2]
g.add((states[0],'GET-OUT', (obj, veh)))
"""
unload
((atloc ?veh ?vehloc) (person ?person) (can-lift ?person ?obj))
((get-to ?person ?vehloc) (!unload ?person ?obj ?veh)))
"""
if tasknames == ('GET-TO', '!UNLOAD'):
obj, veh = params[1][2], params[1][3]
g.add((states[0],'GET-OUT', (obj, veh)))
# Missing get-to
if tasknames == ('!UNLOAD',):
obj, veh = params[0][2], params[0][3]
g.add((states[0],'GET-OUT', (obj, veh)))
return g
def top_causes(v):
"""
Encodes all top-level causal relations in the knowledge base.
Inputs:
v: A sequence of tasks in the form (state, taskname, parameters)
Each state has the form (objects, facts)
Outputs:
g: The set of all possible causes of v, each also in the form (state, taskname, parameters).
"""
states = tuple(s for (s,t,x) in v) # states (each of the form (objs, facts))
tasknames = tuple(t for (s,t,x) in v)
params = tuple((None,)+x for (s,t,x) in v) # Leading None for task name offset
g = set()
"""
;;set-up-shelter sets up a shelter at a certain location
(:method (set-up-shelter ?loc)
normal
((shelter-leader ?leader)
(not (assigned-to-shelter ?leader ?other-shelter))
(food ?food))
((get-electricity ?loc) (get-to ?leader ?loc) (get-to ?food ?loc)))
"""
if tasknames == ('GET-ELECTRICITY','GET-TO','GET-TO'):
loc = params[0][1]
if loc == params[1][2] == params[2][2]:
if params[1][1] in sleaders and params[2][1] in food:
g.add((states[0],'SET-UP-SHELTER', (loc,)))
# Missing get-elecricity
if tasknames == ('GET-TO','GET-TO'):
loc = params[0][2]
if loc == params[1][2]:
if params[0][1] in sleaders and params[1][1] in food:
g.add((states[0],'SET-UP-SHELTER', (loc,)))
"""
;;fix-water-main
(:method (fix-water-main ?from ?to)
normal
()
((shut-off-water ?from ?to)
(repair-pipe ?from ?to)
(turn-on-water ?from ?to)))
"""
if tasknames == ('SHUT-OFF-WATER','REPAIR-PIPE','TURN-ON-WATER'):
fromloc, toloc = params[0][1], params[0][2]
if (fromloc == params[1][1] == params[2][1]) and (toloc == params[1][2] == params[2][2]):
g.add((states[0],'FIX-WATER-MAIN', (fromloc, toloc)))
"""
;; clear-road-hazard - cleans up a hazardous spill
(:method (clear-road-hazard ?from ?to)
normal
()
((block-road ?from ?to)
(clean-up-hazard ?from ?to)
(unblock-road ?from ?to)))
"""
if tasknames == ('BLOCK-ROAD','CLEAN-UP-HAZARD','UNBLOCK-ROAD'):
fromloc, toloc = params[0][1], params[0][2]
if (fromloc == params[1][1] == params[2][1]) and (toloc == params[1][2] == params[2][2]):
g.add((states[0],'CLEAR-ROAD-HAZARD',(fromloc,toloc)))
"""
;; clear-road-wreck - gets a wreck out of the road
(:method (clear-road-wreck ?from ?to)
normal
()
((set-up-cones ?from ?to)
(clear-wreck ?from ?to)
(take-down-cones ?from ?to)))
"""
if tasknames == ('SET-UP-CONES','CLEAR-WRECK','TAKE-DOWN-CONES'):
fromloc, toloc = params[0][1], params[0][2]
if (fromloc == params[1][1] == params[2][1]) and (toloc == params[1][2] == params[2][2]):
g.add((states[0],'CLEAR-ROAD-WRECK', (fromloc, toloc)))
"""
;; clear-road-tree
(:method (clear-road-tree ?from ?to) ;; clears a tree that's in the road
normal
((tree-blocking-road ?from ?to ?tree))
((set-up-cones ?from ?to)
(clear-tree ?tree)
(take-down-cones ?from ?to)))
"""
if tasknames == ('SET-UP-CONES','CLEAR-TREE','TAKE-DOWN-CONES'):
fromloc, toloc = params[0][1], params[0][2]
if (fromloc == params[2][1]) and (toloc == params[2][2]):
g.add((states[0],'CLEAR-ROAD-TREE', (fromloc, toloc)))
"""
;; plow-road
(:method (plow-road ?from ?to)
plow
((road-snowy ?from ?to)
(snowplow ?plow)
(atloc ?plow ?plowloc)
(plowdriver ?driver)
)
((get-to ?driver ?plowloc)
(!navegate-snowplow ?driver ?plow ?from) ;; must use nav-snowplow
;; since regular cars can't drive if snowy
(!engage-plow ?driver ?plow)
(!navegate-snowplow ?driver ?plow ?to)
(!disengage-plow ?driver ?plow)))
"""
if tasknames == ('GET-TO','!NAVEGATE-SNOWPLOW','!ENGAGE-PLOW','!NAVEGATE-SNOWPLOW','!DISENGAGE-PLOW'):
fromloc, toloc = params[1][3], params[3][3]
if params[0][1] == params[1][1] == params[2][1] == params[3][1] == params[4][1]:
g.add((states[0],'PLOW-ROAD',(fromloc,toloc)))
# Missing get-to:
if tasknames == ('!NAVEGATE-SNOWPLOW','!ENGAGE-PLOW','!NAVEGATE-SNOWPLOW','!DISENGAGE-PLOW'):
fromloc, toloc = params[0][3], params[2][3]
if params[0][1] == params[1][1] == params[2][1] == params[3][1]:
g.add((states[0],'PLOW-ROAD',(fromloc,toloc)))
"""
;;quell-riot
(:method (quell-riot ?loc)
with-police
((in-town ?loc ?town)
(police-unit ?p1) (police-unit ?p2) (not (equal ?p1 ?p2)))
((declare-curfew ?town) (get-to ?p1 ?loc) (get-to ?p2 ?loc)
(!set-up-barricades ?p1) (!set-up-barricades ?p2)))
"""
if tasknames == ('DECLARE-CURFEW','GET-TO','GET-TO','!SET-UP-BARRICADES','!SET-UP-BARRICADES'):
loc = params[1][2]
if loc == params[2][2]:
g.add((states[0],'QUELL-RIOT',(loc,)))
# Missing get-to
if tasknames == ('DECLARE-CURFEW','GET-TO','!SET-UP-BARRICADES','!SET-UP-BARRICADES',):
loc = params[1][2]
g.add((states[0],'QUELL-RIOT',(loc,)))
if tasknames == ('DECLARE-CURFEW','!SET-UP-BARRICADES','!SET-UP-BARRICADES',):
p2 = params[2][1]
m = unify(states[2][1], ('ATLOC', p2, None))
if len(m) == 1:
loc = m.pop()[0]
g.add((states[0],'QUELL-RIOT',(loc,)))
else:
p1 = params[1][1]
m = unify(states[1][1], ('ATLOC', p1, None))
if len(m)==1:
loc = m.pop()[0]
g.add((states[0],'QUELL-RIOT',(loc,)))
else:
for loc in poslocs:
g.add((states[0],'QUELL-RIOT',(loc,)))
"""
;;provide-temp-heat
(:method (provide-temp-heat ?person)
to-shelter
((person ?person) (shelter ?shelter))
((get-to ?person ?shelter)))
"""
if tasknames == ('GET-TO',):
person = params[0][1]
if len(person) >= 6 and person[:6]=='PERSON':
g.add((states[0],'PROVIDE-TEMP-HEAT',(person,)))
"""
(:method (provide-temp-heat ?person)
local-electricity
((person ?person) (atloc ?person ?ploc))
((generate-temp-electricity ?ploc) (!turn-on-heat ?ploc)))
"""
if tasknames == ('GENERATE-TEMP-ELECTRICITY','!TURN-ON-HEAT'):
for obj in states[0][0]:
if len(obj) >= 6 and obj[:6] == 'PERSON':
g.add((states[0],'PROVIDE-TEMP-HEAT',(obj,)))
"""
;;fix-power-line
(:method (fix-power-line ?lineloc)
normal
((power-crew ?crew) (power-van ?van))
((get-to ?crew ?lineloc) (get-to ?van ?lineloc)
(repair-line ?crew ?lineloc)))
"""
if tasknames == ('GET-TO','GET-TO','REPAIR-LINE'):
lineloc = params[2][2] # params[0][2] need not be lineloc, monroe bug?
if lineloc == params[1][2] and params[0][1]==params[2][1]:
if params[0][1] in pcrews:
g.add((states[0],'FIX-POWER-LINE', (lineloc,)))
# Missing get-to
if tasknames in [('GET-TO','REPAIR-LINE'),('REPAIR-LINE',)]:
lineloc = params[-1][2]
if lineloc == params[0][2]:
g.add((states[0],'FIX-POWER-LINE', (lineloc,)))
"""
;;provide-medical-attention
(:method (provide-medical-attention ?person)
in-hospital
((hospital ?hosp) (has-condition ?person ?cond)
(not (hospital-doesnt-treat ?hosp ?cond)))
((get-to ?person ?hosp) (!treat-in-hospital ?person ?hosp)))
"""
if tasknames == ('GET-TO','!TREAT-IN-HOSPITAL'):
person = params[0][1]
if (person == params[1][1]):
g.add((states[0],'PROVIDE-MEDICAL-ATTENTION', (person,)))
# Missing get-to
if tasknames == ('!TREAT-IN-HOSPITAL',):
person = params[0][1]
g.add((states[0],'PROVIDE-MEDICAL-ATTENTION', (person,)))
"""
(:method (provide-medical-attention ?person)
simple-on-site
((has-condition ?person ?cond) (not (serious-condition ?cond)))
((emt-treat ?person)))
"""
if tasknames == ('EMT-TREAT',):
person = params[0][1]
g.add((states[0],'PROVIDE-MEDICAL-ATTENTION', (person,)))
return g
def causes(v):
"""
Full causal relation (both mid- and top-level)
"""
return top_causes(v) | mid_causes(v)
def main():
pass
if __name__ == "__main__":
main()
```
#### File: copct-master/monroe_corpus/monroe_preprocessing.py
```python
from monroe_static import locs, watercos, powercos, poslocs, sleaders, gens, food, pcrews
from monroe_utils import unify, single_unify
def parse_monroe(infilename='monroe5000.txt', outfilename='monroe5000.py'):
"""
Rewrite the Monroe corpus lisp data file as a tuple in a python script.
All symbols are converted to strings.
Inputs:
infilename: filename from which the lisp data is read
outfilename: filename to which the python script is written
"""
infile = open(infilename,"r")
outfile = open(outfilename,"w")
outfile.write("corpus = (\n")
syntax_chars = "() \t\n" # lisp syntax
previous_char = " "
for line in infile:
for char in line:
if (previous_char in syntax_chars) != (char in syntax_chars):
# just changed between syntax and symbol, quote symbol for python
outfile.write("\"")
# separate symbols with commas for python
if char in syntax_chars: outfile.write(",")
# separate sub-lists with commas for python
if previous_char == ")": outfile.write(",")
# write current character and advance
outfile.write(char)
previous_char = char
outfile.write(")")
infile.close()
outfile.close()
def populate_states_from_op(pre_states, op):
"""
Infers additional facts that must have been true in the previous states for the op to be applied successfully.
Returns the states with the additional facts added.
This implementation has a separate case for every operator in the Monroe domain.
Inputs:
op: a grounded operator of the form (name, arg1, arg2, ...)
pre_states: the states leading up to the application of the operator.
pre_states[i] is the i^{th} state, of the form (objs, facts).
objs is a list of possible parameter values, facts is a list of relations over those objects.
Outputs:
states[i]: the states with additional facts added.
if op is a primitive action, the last element is a new state after op was applied.
"""
objs = pre_states[-1][0]
# facts true before and after operator is applied (may be altered)
pre_facts = set(pre_states[-1][1])
task = op[0]
"""
(:operator (!navegate-vehicle ?person ?veh ?loc)
((person ?person) (vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc) (can-drive ?person ?veh)
(not (wrecked-car ?veh)))
((atloc ?veh ?vehloc) (atloc ?person ?vehloc))
((atloc ?veh ?loc) (atloc ?person ?loc)))
"""
if task == '!NAVEGATE-VEHICLE':
person, veh, loc = op[1:]
for s in range(len(pre_states)):
pre_states[s] = (objs, tuple(set(pre_states[s][1]) | set((('PERSON', person), ('VEHICLE', veh)))))
post_facts = pre_facts | set((('ATLOC', veh, loc), ('ATLOC', person, loc)))
vehloc, = single_unify(pre_facts, ('ATLOC', veh, None), ('ATLOC', person, None))
if vehloc is not None:
pre_facts |= set((('ATLOC', veh, vehloc), ('ATLOC', person, vehloc)))
post_facts -= set((('ATLOC', veh, vehloc), ('ATLOC', person, vehloc)))
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
(:operator (!climb-in ?obj ?veh)
((atloc ?obj ?objloc) (atloc ?veh ?objloc) (fit-in ?obj ?veh))
((atloc ?obj ?objloc))
((atloc ?obj ?veh)))
"""
if task == '!CLIMB-IN':
obj, veh = op[1:]
post_facts = pre_facts | set((('ATLOC', obj, veh),))
objloc, = single_unify(pre_facts, ('ATLOC', obj, None), ('ATLOC', veh, None))
if objloc is not None:
pre_facts.add(('ATLOC', obj, objloc))
post_facts.discard(('ATLOC', obj, objloc))
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
(:operator (!climb-out ?obj ?veh)
((atloc ?obj ?veh) (atloc ?veh ?vehloc))
((atloc ?obj ?veh))
((atloc ?obj ?vehloc)))
"""
if task == '!CLIMB-OUT':
obj, veh = op[1:]
pre_facts.add(('ATLOC', obj, veh))
post_facts = pre_facts - set((('ATLOC', obj, veh),))
vehloc, = single_unify(pre_facts, ('ATLOC', veh, None))
if vehloc is not None:
post_facts.add(('ATLOC', obj, vehloc))
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
(:operator (!load ?person ?obj ?veh)
((atloc ?obj ?objloc)
(atloc ?veh ?objloc)
(atloc ?person ?objloc)
(fit-in ?obj ?veh))
((atloc ?obj ?objloc))
((atloc ?obj ?veh)))
"""
if task == '!LOAD':
person, obj, veh = op[1:]
for s in range(len(pre_states)):
pre_states[s] = (objs, tuple(set(pre_states[s][1]) | set((('FIT-IN', obj, veh),))))
post_facts = set(pre_facts) | set((('ATLOC', obj, veh),))
objloc, = single_unify(pre_facts, *[('ATLOC', param, None) for param in op[1:]])
if objloc is not None:
pre_facts |= set(tuple(('ATLOC', param, objloc) for param in op[1:]))
post_facts.discard(('ATLOC', obj, objloc))
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
(:operator (!unload ?person ?obj ?veh)
((atloc ?obj ?veh) (atloc ?veh ?vehloc) (atloc ?person ?vehloc))
((atloc ?obj ?veh))
((atloc ?obj ?vehloc)))
"""
if task == '!UNLOAD':
person, obj, veh = op[1:]
pre_facts |= set((('ATLOC', obj, veh),))
post_facts = set(pre_facts) - set((('ATLOC', obj, veh),))
vehloc, = single_unify(pre_facts, *[('ATLOC', param, None) for param in [veh, person]])
if vehloc is not None:
pre_facts |= set(tuple(('ATLOC', param, vehloc) for param in [veh, person]))
post_facts.add(('ATLOC', obj, vehloc))
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
(:operator (!treat ?emt ?person)
((atloc ?person ?ploc) (atloc ?emt ?ploc))
()
())
"""
if task == '!TREAT':
emt, person = op[1:]
ploc, = single_unify(pre_facts, *[('ATLOC', param, None) for param in [emt, person]])
if ploc is not None:
pre_facts |= set(tuple(('ATLOC', param, ploc) for param in [emt, person]))
post_facts = set(pre_facts)
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
(:operator (!treat-in-hospital ?person ?hospital)
((atloc ?person ?hospital))
()
())
"""
if task == 'TREAT-IN-HOSPITAL':
pre_facts |= set((('ATLOC', op[1], op[2]),))
post_facts = set(pre_facts)
pre_states[-1] = (objs, tuple(pre_facts))
post_state = (objs, tuple(post_facts))
return pre_states + [post_state]
"""
;;set-up-shelter sets up a shelter at a certain location
(:method (set-up-shelter ?loc)
normal
((shelter-leader ?leader)
(not (assigned-to-shelter ?leader ?other-shelter))
(food ?food))
((get-electricity ?loc) (get-to ?leader ?loc) (get-to ?food ?loc)))
"""
if task == 'SET-UP-SHELTER': return pre_states # could do better with tree?
"""
;;fix-water-main
(:method (fix-water-main ?from ?to)
normal
()
((shut-off-water ?from ?to)
(repair-pipe ?from ?to)
(turn-on-water ?from ?to)))
"""
if task == 'FIX-WATER-MAIN': return pre_states # no information
"""
;; clear-road-hazard - cleans up a hazardous spill
(:method (clear-road-hazard ?from ?to)
normal
()
((block-road ?from ?to)
(clean-up-hazard ?from ?to)
(unblock-road ?from ?to)))
"""
if task == 'CLEAR-ROAD-HAZARD': return pre_states # no information
"""
;; clear-road-wreck - gets a wreck out of the road
(:method (clear-road-wreck ?from ?to)
normal
()
((set-up-cones ?from ?to)
(clear-wreck ?from ?to)
(take-down-cones ?from ?to)))
"""
if task == 'CLEAR-ROAD-WRECK': return pre_states # no information
"""
;; clear-road-tree
(:method (clear-road-tree ?from ?to) ;; clears a tree that's in the road
normal
((tree-blocking-road ?from ?to ?tree))
((set-up-cones ?from ?to)
(clear-tree ?tree)
(take-down-cones ?from ?to)))
"""
if task == 'CLEAR-ROAD-TREE': return pre_states # no information not already in subs
"""
;; plow-road
(:method (plow-road ?from ?to)
plow
((road-snowy ?from ?to)
(snowplow ?plow)
(atloc ?plow ?plowloc)
(plowdriver ?driver)
)
((get-to ?driver ?plowloc)
(!navegate-snowplow ?driver ?plow ?from) ;; must use nav-snowplow
;; since regular cars can't drive if snowy
(!engage-plow ?driver ?plow)
(!navegate-snowplow ?driver ?plow ?to)
(!disengage-plow ?driver ?plow)))
"""
if task == 'PLOW-ROAD': return pre_states # road-snowy worth it?
"""
;;quell-riot
(:method (quell-riot ?loc)
with-police
((in-town ?loc ?town)
(police-unit ?p1) (police-unit ?p2) (not (equal ?p1 ?p2)))
((declare-curfew ?town) (get-to ?p1 ?loc) (get-to ?p2 ?loc)
(!set-up-barricades ?p1) (!set-up-barricades ?p2)))
"""
if task == 'QUELL-RIOT': return pre_states #
"""
;;provide-temp-heat
(:method (provide-temp-heat ?person)
to-shelter
((person ?person) (shelter ?shelter))
((get-to ?person ?shelter)))
(:method (provide-temp-heat ?person)
local-electricity
((person ?person) (atloc ?person ?ploc))
((generate-temp-electricity ?ploc) (!turn-on-heat ?ploc)))
"""
if task == 'PROVIDE-TEMP-HEAT': return pre_states #
"""
;;fix-power-line
(:method (fix-power-line ?lineloc)
normal
((power-crew ?crew) (power-van ?van))
((get-to ?crew ?lineloc) (get-to ?van ?lineloc)
(repair-line ?crew ?lineloc)))
"""
if task == 'FIX-POWER-LINE': return pre_states #
"""
;;provide-medical-attention
(:method (provide-medical-attention ?person)
in-hospital
((hospital ?hosp) (has-condition ?person ?cond)
(not (hospital-doesnt-treat ?hosp ?cond)))
((get-to ?person ?hosp) (!treat-in-hospital ?person ?hosp)))
(:method (provide-medical-attention ?person)
simple-on-site
((has-condition ?person ?cond) (not (serious-condition ?cond)))
((emt-treat ?person)))
"""
if task == 'PROVIDE-MEDICAL-ATTENTION': return pre_states
"""
;;;;;;;;;;;;;;;;;;; subgoals ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; clean-up-hazard
(:method (clean-up-hazard ?from ?to)
very-hazardous ;; just call the feds
((hazard-seriousness ?from ?to very-hazardous))
((!call fema))
normal ;; we can take care of it
((hazard-team ?ht))
((get-to ?ht ?from) (!clean-hazard ?ht ?from ?to)))
"""
if task == 'CLEAN-UP-HAZARD':
# kludge: should only add if child is call fema (needs tree not just op)
fromloc, toloc = op[1:]
pre_states[-1] = (objs, tuple(set(pre_states[-1][1]) | set((('HAZARD-SERIOUSNESS', fromloc, toloc, 'VERY-HAZARDOUS'),))))
return pre_states
"""
;; block-road - blocks off a road
(:method (block-road ?from ?to)
normal
((police-unit ?police))
(:unordered (set-up-cones ?from ?to)
(get-to ?police ?from)))
"""
if task == 'BLOCK-ROAD': return pre_states #
"""
;; unblock-road - unblocks a road
(:method (unblock-road ?from ?to)
normal
()
((take-down-cones ?from ?to)))
"""
if task == 'UNBLOCK-ROAD': return pre_states #
"""
;; get-electricity provides electricity to a site (if not already there)
(:method (get-electricity ?loc)
already-has-electricity ;; do nothing
((not (no-electricity ?loc)))
()
no-electricity
()
((generate-temp-electricity ?loc))
)
"""
if task == 'GET-ELECTRICITY': return pre_states #
"""
;; repair-pipe
(:method (repair-pipe ?from ?to) ;; repairs a pipe at location
normal
((water-crew ?crew))
((get-to ?crew ?from)
(set-up-cones ?from ?to)
(open-hole ?from ?to)
(!replace-pipe ?crew ?from ?to)
(close-hole ?from ?to)
(take-down-cones ?from ?to)))
"""
if task == 'REPAIR-PIPE': return pre_states
"""
;; open-hole
(:method (open-hole ?from ?to) ;; opens a hole in the street
normal
((backhoe ?backhoe))
((get-to ?backhoe ?from)
(!dig ?backhoe ?from)))
"""
if task == 'OPEN-HOLE': return pre_states # want toloc but no way to get it
"""
;; close-hole
(:method (close-hole ?from ?to) ;; opens a hole in the street
normal
((backhoe ?backhoe))
((get-to ?backhoe ?from)
(!fill-in ?backhoe ?from)))
"""
if task == 'CLOSE-HOLE': return pre_states # want toloc but no way to get it
"""
;; set-up-cones
(:method (set-up-cones ?from ?to) ;; sets up orange cones at road
normal
((work-crew ?crew))
((get-to ?crew ?from) (!place-cones ?crew)))
"""
if task == 'SET-UP-CONES': return pre_states # want toloc but no way to get it
"""
;; take-down-cones
(:method (take-down-cones ?from ?to) ;; takes down cones
normal
((work-crew ?crew))
((get-to ?crew ?from) (!pickup-cones ?crew)))
"""
if task == 'TAKE-DOWN-CONES': return pre_states # want toloc but no way to get it
"""
;; clear-wreck
(:method (clear-wreck ?from ?to) ;; gets rid of a wreck in any loc
normal
((wrecked-vehicle ?from ?to ?veh) (garbage-dump ?dump))
((tow-to ?veh ?dump)))
"""
if task == 'CLEAR-WRECK':
# kludge - can't get ?veh, use None as placeholder (it's never used by causes function)
fromloc, toloc = op[1:]
pre_states[-1] = (objs, tuple(pre_facts | set((('WRECKED-VEHICLE', fromloc, toloc, None),))))
return pre_states
"""
;; tow-to - tows a vehicle somewhere
(:method (tow-to ?veh ?to)
normal
((tow-truck ?ttruck) (vehicle ?veh) (atloc ?veh ?vehloc))
((get-to ?ttruck ?vehloc)
(!hook-to-tow-truck ?ttruck ?veh)
(get-to ?ttruck ?to)
(!unhook-from-tow-truck ?ttruck ?veh)))
"""
if task == 'TOW-TO': return pre_states #
"""
;; clear-tree
(:method (clear-tree ?tree) ;; this gets rid of a tree in any loc
normal
((tree-crew ?tcrew) (tree ?tree)
(atloc ?tree ?treeloc))
((get-to ?tcrew ?treeloc) (!cut-tree ?tcrew ?tree)
(remove-blockage ?tree)))
"""
if task == 'CLEAR-TREE': return pre_states #
"""
;; remove-blockage
(:method (remove-blockage ?stuff)
move-to-side-of-street
((work-crew ?crew) (atloc ?stuff ?loc))
((get-to ?crew ?loc)
(!carry-blockage-out-of-way ?crew ?stuff)))
(:method (remove-blockage ?stuff)
carry-away
((garbage-dump ?dump))
((get-to ?stuff ?dump)))
"""
if task == 'REMOVE-BLOCKAGE': return pre_states #
"""
;; declare-curfew
(:method (declare-curfew ?town)
normal
()
(:unordered (!call EBS) (!call police-chief)))
"""
if task == 'REMOVE-BLOCKAGE': return pre_states
"""
;; generate-temp-electricity
(:method (generate-temp-electricity ?loc)
with-generator
((generator ?gen))
((make-full-fuel ?gen) (get-to ?gen ?loc) (!hook-up ?gen ?loc)
(!turn-on ?gen)))
"""
if task == 'GENERATE-TEMP-ELECTRICITY': return pre_states #
"""
;; make-full-fuel - makes sure arg1 is full of fuel
(:method (make-full-fuel ?gen)
with-gas-can
((gas-can ?gc) (atloc ?gen ?genloc) (service-station ?ss))
((get-to ?gc ?ss) (add-fuel ?ss ?gc) (get-to ?gc ?genloc)
(!pour-into ?gc ?gen)))
(:method (make-full-fuel ?gen)
at-service-station
((service-station ?ss))
((get-to ?gen ?ss) (add-fuel ?ss ?gen)))
"""
if task == 'MAKE-FULL-FUEL': return pre_states #
"""
;; add-fuel (at service-station)
(:method (add-fuel ?ss ?obj)
normal
()
(:unordered (!pay ?ss) (!pump-gas-into ?ss ?obj)))
"""
if task == 'ADD-FUEL': return pre_states
"""
;; repair-line
(:method (repair-line ?crew ?lineloc)
with-tree
((tree ?tree) (atloc ?tree ?lineloc)
(atloc ?crew ?lineloc))
((shut-off-power ?crew ?lineloc)
(:unordered (clear-tree ?tree)
(!remove-wire ?crew ?lineloc))
(!string-wire ?crew ?lineloc) (turn-on-power ?crew ?lineloc))
without-tree
((atloc ?crew ?lineloc))
((shut-off-power ?crew ?lineloc)
(!remove-wire ?crew ?lineloc)
(!string-wire ?crew ?lineloc) (turn-on-power ?crew ?lineloc)))
"""
if task == 'REPAIR-LINE': return pre_states #
"""
;; shut-off-power
(:method (shut-off-power ?crew ?loc)
normal
((in-town ?loc ?town) (powerco-of ?town ?powerco))
(!call ?powerco))
"""
if task == 'SHUT-OFF-POWER': return pre_states # narrow loc to town through fixed state in causes
"""
;; turn-on-power
(:method (turn-on-power ?crew ?loc)
normal
((in-town ?loc ?town) (powerco-of ?town ?powerco))
(!call ?powerco))
"""
if task == 'TURN-ON-POWER': return pre_states # narrow loc to town through fixed state in causes
"""
;; shut-off-water
(:method (shut-off-water ?from ?to)
normal
((in-town ?from ?town) (waterco-of ?town ?waterco))
((!call ?waterco)))
"""
if task == 'SHUT-OFF-WATER': return pre_states # narrow loc to town through fixed state in causes
"""
;; turn-on-water
(:method (turn-on-water ?from ?to)
normal
((in-town ?from ?town) (waterco-of ?town ?waterco))
((!call ?waterco)))
"""
if task == 'TURN-ON-WATER': return pre_states # narrow loc to town through fixed state in causes
"""
;; emt-treat
(:method (emt-treat ?person)
emt
((emt-crew ?emt) (atloc ?person ?personloc))
((get-to ?emt ?personloc) (!treat ?emt ?person)))
"""
if task == 'EMT-TREAT': return pre_states
"""
;; stabilize
(:method (stabilize ?person)
emt
()
((emt-treat ?person)))
"""
if task == 'STABILIZE': return pre_states
"""
;; get-to
(:method (get-to ?obj ?place)
already-there
((atloc ?obj ?place))
())
(:method (get-to ?person ?place)
person-drives-themself
((not (atloc ?person ?place))
(person ?person) (vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc))
((drive-to ?person ?veh ?place)))
(:method (get-to ?veh ?place)
vehicle-gets-driven
((not (atloc ?veh ?place))
(person ?person)
(vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc)
)
((drive-to ?person ?veh ?place)))
(:method (get-to ?obj ?place)
as-cargo
((not (atloc ?obj ?place))
(vehicle ?veh)
(atloc ?obj ?objloc) (fit-in ?obj ?veh)
(not (non-ambulatory ?obj)))
((get-to ?veh ?objloc) (get-in ?obj ?veh) (get-to ?veh ?place)
(get-out ?obj ?veh))
with-ambulance ;; same as above, just with ambulance
((not (atloc ?obj ?place))
(atloc ?obj ?objloc) (ambulance ?veh) (fit-in ?obj ?veh)
)
((get-to ?veh ?objloc) (stabilize ?obj) (get-in ?obj ?veh)
(get-to ?veh ?place) (get-out ?obj ?veh))
)
"""
if task == 'GET-TO': return pre_states # all info in subs except for nop case
"""
(:method (drive-to ?person ?veh ?loc)
normal
((person ?person) (vehicle ?veh) (atloc ?veh ?vehloc)
(atloc ?person ?vehloc) (can-drive ?person ?veh))
((!navegate-vehicle ?person ?veh ?loc)))
"""
if task == 'DRIVE-TO': return pre_states # all info in subs
"""
(:method (get-in ?obj ?veh)
ambulatory-person
((atloc ?obj ?objloc) (atloc ?veh ?objloc)
(person ?obj) (not (non-ambulatory ?obj)))
(!climb-in ?obj ?veh)
load-in
((atloc ?obj ?objloc) (atloc ?veh ?objloc)
(person ?person) (can-lift ?person ?obj))
((get-to ?person ?objloc) (!load ?person ?obj ?veh)))
"""
if task == 'GET-IN': return pre_states # all info in subs
"""
(:method (get-out ?obj ?veh)
ambulatory-person
((person ?obj) (not (non-ambulatory ?obj)))
(!climb-out ?obj ?veh)
unload
((atloc ?veh ?vehloc) (person ?person) (can-lift ?person ?obj))
((get-to ?person ?vehloc) (!unload ?person ?obj ?veh)))
"""
if task == 'GET-OUT': return pre_states # all info in subs
# remaining operators (all primitive, empty preconds/adds/deletes)
return pre_states + pre_states[-1:]
def extract_leaves(tree):
"""
Extract the leaves of a plan decomposition tree in the Monroe corpus.
Inputs:
tree: the plan tree, of the form (node, subtree1, subtree2, ...)
node is a grounded operator of the form (name, arg1, arg2, ...)
Outputs:
leaves[i]: The i^{th} leaf, also a grounded operator of the form (name, arg1, arg2, ...)
"""
if type(tree[0])==str: # base case, "tree" is a node
return (tree,)
else: # recursive case, tree is a tree, recurse on subtrees
return reduce(lambda x,y: x+y, map(extract_leaves, tree[1:]))
def extract_objects(tree):
"""
Extract all "objects," the arguments occurring in any operator in a plan decomposition tree.
This omits static objects always present in every plan of the corpus (locations, etc)
Inputs:
tree: the plan tree, as in extract_leaves
Outputs:
objs: the set of all distinct objects occurring in the tree
"""
objs = set()
if type(tree[0])==str: # base case, "tree" is a node
objs |= set(tree[1:])
else: # recursive case
objs |= set(tree[0][1:])
for sub in tree[1:]:
objs |= extract_objects(sub)
objs -= set(locs) | set(watercos) | set(powercos) # remove static objects
return objs
def extract_children(tree):
"""
Extract the immediate child nodes of a tree root
Inputs:
tree: a plan decomposition tree
Outputs:
children: the immediate child nodes of root (with their own subtrees omitted)
"""
return tuple(child if type(child[0])==str else child[0] for child in tree[1:])
# def search_tree(tree):
# # used to rule out empty-case of get-to
# if type(tree[0]) != str:
# if tree[0][0]=='GET-TO' and len(tree)==1: return True
# return any([search_tree(sub) for sub in tree[1:]])
# return False
def populate_tree_states(leading_states, next_tree):
"""
Uses populate_states_from_op on every operator in a plan tree.
Implementation is recursive; should be called at the top level with:
leading_states = [(objs, ())]
next_tree = the full plan tree
Inputs:
leading_states: a list of states leading up to next_tree
next_tree: the next plan tree of operators being applied
Outputs:
states: leading states with new facts added, and new states resulting from the next_tree
"""
if type(next_tree[0])==str: # base case, "tree" is primitive operator
states = populate_states_from_op(leading_states, next_tree) # = pre_states + [post_state]
else: # recursive case, process each op in next_tree, starting with root
states = populate_states_from_op(leading_states, next_tree[0]) # = pre_states
for sub in next_tree[1:]:
states = populate_tree_states(states, sub) # = pre_states + post_states
return states
def preprocess_plan(plan_tree):
"""
Preprocess a single plan tree from the corpus, populating intermediate states.
The returned sequences contain elements of the form (state, task_name, (arg1, arg2, ...))
Inputs:
plan_tree: a plan tree from the monroe corpus, in python tuple format (as written by parse_monroe).
Outputs:
u: the top-level ground-truth (singleton) sequence
v: the immediate child sequence of u (ground-truth for modified Monroe experiments)
w: the bottom-level observed actions
"""
# pull out data
root = plan_tree[0]
children = extract_children(plan_tree)
objs = extract_objects(plan_tree)
actions = extract_leaves(plan_tree)
states = populate_tree_states([(tuple(objs), ())], plan_tree)
# recover the action indices covered by each child, so that the correct intermediate states are associated
indices = [0]
for subtree in plan_tree[1:]:
indices.append(indices[-1] + len(extract_leaves(subtree)))
# convert to (state, task, args) format
u = ((states[0], root[0], root[1:]),)
v = tuple((states[indices[k]], children[k][0], children[k][1:]) for k in range(len(children)))
w = tuple((states[i], actions[i][0], actions[i][1:]) for i in range(len(actions)))
return u, v, w
if __name__ == "__main__":
# Parse Monroe lisp to python
print('Parsing lisp...')
parse_monroe()
# preprocess each plan tree
print('Preprocessing plan trees...')
from monroe5000 import corpus
corpus = tuple(preprocess_plan(plan_tree) for plan_tree in corpus)
# Write preprocessed corpus to file
print('Writing to file...')
corpus_file = open('monroe_corpus.py','w')
corpus_file.write('corpus = [')
for example in corpus:
corpus_file.write('%s,\n'%str(example))
corpus_file.write(']\n')
corpus_file.close()
```
#### File: electron435/copct-master/monroe_experiments.py
```python
import time
import pickle as pkl
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import copct
import monroe_corpus.monroe_domain as md
from monroe_corpus.monroe_corpus import corpus
def run_sample(M, causes, u_correct, w, verbose=True, timeout=600, timeout_irr=300, max_tlcovs=13000000):
"""
Run experimental evaluation on one sample plan.
Inputs:
M: domain constant (maximum length of any effect sequence in the causal relation)
causes: handle to the causes function encoding the domain
u_correct: the "correct" cover against which copct is tested
w: sequence of observed actions (the plan) to be covered by copct
verbose: if True, print copct verbose output
timeout: timeout for explain
timeout_irr: timout for irredundancy
max_tlcovs: maximum top-level cover count for explain
Outputs:
result: dictionary with various key:value pairs summarizing the outcomes of the experiment.
"""
# run copct
start = time.clock()
status, tlcovs, g = copct.explain(causes, w, M=M, verbose=verbose, timeout=timeout, max_tlcovs=max_tlcovs)
runtime = time.clock()-start
# record execution info
result = {}
result["runtime"] = runtime
result["status"] = status
if not status == "Success": return result
# top-level results
result["correct"] = u_correct in [u for (u,_,_,_,_) in tlcovs]
result["|tlcovs|"] = len(tlcovs)
print("correct=%s, |tlcovs|=%d"%(result["correct"], result["|tlcovs|"]))
# compare parsimony criteria
criteria = [("_mc", copct.minCardinalityTLCovers),
("_md", copct.maxDepthTLCovers),
("_xd", copct.minimaxDepthTLCovers),
("_fsn", copct.minForestSizeTLCovers),
("_fsx", copct.maxForestSizeTLCovers),
("_mp", copct.minParametersTLCovers)]
for (label, fun) in criteria:
pruned_tlcovs, extremum = fun(tlcovs)
correct = u_correct in [u for (u,_,_,_,_) in pruned_tlcovs]
count = len(pruned_tlcovs)
result["correct%s"%label] = correct
result["|tlcovs%s|"%label] = count
result["extremum%s"%label] = extremum
print("%s: correct=%s, count=%d, extremum=%d"%(label, correct, count, extremum))
# special handling for irredundancy
status, tlcovs_irr = copct.irredundantTLCovers(tlcovs, timeout=timeout_irr)
result["irr_success"] = status
if not status: return result
result["correct_irr"] = u_correct in [u for (u,_,_,_,_) in tlcovs_irr]
result["|tlcovs_irr|"] = len(tlcovs_irr)
print("correct_irr=%s, count_irr=%d"%(result["correct_irr"], result["|tlcovs_irr|"]))
return result
def run_experiments(use_original=True, num_samples=None, filename=None, verbose=True, timeout=600, timeout_irr=300, max_tlcovs=13000000):
"""
Run experiments on many samples in the corpus.
Inputs:
use_original: if True, run on the original corpus, otherwise run on the modified
num_samples: number of randomly chosen sample plans from the corpus to use. Defaults to all of them.
filename: name of file in which to save results.
Defaults to "monroe_results.pkl" or "monroe_results_modified.pkl" depending on use_original flag.
verbose, timeout, timeout_irr, max_tlcovs: additional parameters for run_sample
Outputs:
results[s]: dictionary or results for s^th sample plan
"""
# Setup
samples = np.random.permutation(len(corpus))
if num_samples is not None:
samples = samples[:num_samples]
if filename is None:
if use_original: filename = "monroe_results.pkl"
else: filename = "monroe_results_modified.pkl"
# Run experiments
results = {}
for s in range(len(samples)):
sample = samples[s]
print("Starting sample %d of %d (plan # %d in %s corpus)..."%(s, len(samples), sample, "original" if use_original else "modified"))
if use_original:
u_correct = corpus[sample][0]
causes = md.causes
else:
u_correct = corpus[sample][1]
causes = md.mid_causes
w = corpus[sample][2]
results[sample] = run_sample(md.M, causes, u_correct, w, verbose=verbose, timeout=timeout, max_tlcovs=max_tlcovs, timeout_irr=timeout_irr)
results_file = open(filename, "w")
pkl.dump(results, results_file)
results_file.close()
print("%d of %d samples processed..."%(s+1, len(samples)))
return results
def show_results(filename="monroe_results.pkl"):
"""
Print/plot results shown in publications
Inputs:
filename: name of file where results are saved
"""
# load results
f = open(filename, "r")
results = pkl.load(f)
f.close()
# accuracy
for criterion in ["","_mc","_irr","_md","_xd", "_mp", "_fsn", "_fsx"]:
r = {s:results[s] for s in results if "correct%s"%criterion in results[s] and results[s]["correct"]}
num_correct = len([s for s in r if r[s]["correct%s"%criterion]])
if len(r) > 0:
print("%s: %d of %d (%.1f %%)"%(criterion, num_correct, len(r), 100.0*num_correct/len(r)))
else:
print("%s: %d of %d"%(criterion, num_correct, len(r)))
# specificity
counts = {}
for criterion in ["_mc", "_irr", "_md", "_xd", "_mp"]:
#counts[criterion] = [results[s]["|tlcovs%s|"%criterion] for s in results if "|tlcovs%s|"%criterion in results[s] and results[s]['correct']]
counts[criterion] = [results[s]["|tlcovs%s|"%criterion] for s in results if "|tlcovs%s|"%criterion in results[s]]
# count summaries
print("%d of %d samples have >= 100 MC covers"%(np.count_nonzero(np.array(counts["_mc"])>=100), len(counts["_mc"])))
print("%d of %d samples have 1 MC cover"%(np.count_nonzero(np.array(counts["_mc"])==1), len(counts["_mc"])))
print("%d samples (~90 %%) <= %d MC covers"%(int(np.floor(0.9*len(counts["_mc"]))), np.sort(counts["_mc"])[int(np.floor(0.9*len(counts["_mc"])))]))
print("%d of %d samples have 1 MP cover"%(np.count_nonzero(np.array(counts["_mp"])==1), len(counts["_mp"])))
print("%d samples (~90 %%) <= %d MP covers"%(int(np.floor(0.9*len(counts["_mp"]))), np.sort(counts["_mp"])[int(np.floor(0.9*len(counts["_mp"])))]))
# top-level vs irredundant
r = {k:results[k] for k in results if '|tlcovs_irr|' in results[k]}
print('top-level == irr in %d plans'%(len([k for k in r if r[k]['|tlcovs|'] == r[k]['|tlcovs_irr|']])))
# histogram
fig = plt.figure()
fig.subplots_adjust(bottom=0.15)
bins = np.arange(7)
greys = [str(g) for g in np.linspace(0.0,1.0,len(counts))]
_,bins,_ = plt.hist([np.log10(counts[c]) for c in ["_mc", "_irr", "_md", "_xd", "_mp"]], bins=bins, color=greys)
plt.xlabel('# of covers found')
plt.ylabel('# of times in corpus')
plt.legend(['MC','IR','MD','XD','MP'])
fig.canvas.draw()
ax = plt.gca()
old_ticks = ax.get_xticks()
new_ticks = []
labels = []
mpl.rcParams['mathtext.default'] = 'regular'
for b in range(len(bins)-1):
new_ticks.append(bins[b])
labels.append("|")
new_ticks.append(0.5*(bins[b]+bins[b+1]))
if b < len(bins)-2:
labels.append("$10^%d-10^%d$"%(b, b+1))
else:
labels.append("$>10^%d$"%b)
new_ticks.append(bins[-1])
labels.append("|")
ax.set_xticks(new_ticks)
ax.set_xticklabels(labels, rotation=0)
ax.set_ylim([0, len(results)])
plt.show()
# scatter
fig = plt.figure()
ax = plt.gca()
fig.subplots_adjust(bottom=0.15)
ax.scatter(np.log2([r[k]['|tlcovs|'] for k in r]), np.log2([r[k]['|tlcovs_irr|'] for k in r]))
plt.xlabel("# of top-level covers")
plt.ylabel("# of irredundant top-level covers")
ax.set_xlim([-0.5, 16])
ax.set_ylim([-0.5, 16])
fig.canvas.draw()
labels = ["$2^{%d}$"%x for x in (2*np.arange(-1, 9))]
ax.set_xticklabels(labels, rotation=0)
ax.set_yticklabels(labels, rotation=0)
fig.canvas.draw()
return results
def show_u_precise(filename="monroe_results.pkl"):
"""
Print/plot full experiment precision results shown in TCDS
Inputs:
filename: name of file where results are saved
"""
# load results
f = open(filename, "r")
results = pkl.load(f)
f.close()
# specificity
counts = {}
for criterion in ["", "_mc"]:
counts[criterion] = [results[s]["|tlcovs%s|"%criterion] for s in results if "|tlcovs%s|"%criterion in results[s]]
# count summaries
print("%d of %d samples have >= 100 MC covers"%(np.count_nonzero(np.array(counts["_mc"])>=100), len(counts["_mc"])))
print("%d of %d samples have 1 MC cover"%(np.count_nonzero(np.array(counts["_mc"])==1), len(counts["_mc"])))
print("%d samples (~90 %%) <= %d MC covers"%(int(np.floor(0.9*len(counts["_mc"]))), np.sort(counts["_mc"])[int(np.floor(0.9*len(counts["_mc"])))]))
# histogram
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
fig = plt.figure()
fig.subplots_adjust(bottom=0.15)
# greys = [str(g) for g in np.linspace(0.0,1.0,len(counts))]
# _,bins,_ = plt.hist([np.log2(counts[c]) for c in ["", "_mc"]], color=greys)
plt.hist([np.log2(counts[""]), np.log2(counts["_mc"])], bins=25, color=['white','black'])
plt.xlabel('# of covers found')
plt.ylabel('# of testing examples')
plt.legend(['Top-level covers','MC top-level covers'])
ax = plt.gca()
xticks = ax.get_xticks()
xtick_labels = []
mpl.rcParams['mathtext.default'] = 'regular'
for t in xticks:
xtick_labels.append('%d'%(2**t))
xtick_labels[-1] = ''
ax.set_xticklabels(xtick_labels, rotation=0)
plt.show()
return results
if __name__ == "__main__":
full_experiments = raw_input("Run full experiments? May use up to 32GB of RAM and over a week of CPU time. [y/n]")
# Run experiments.
if full_experiments == "y":
run_experiments() # original
run_experiments(use_original=False) # modified
else:
run_experiments(num_samples=50, max_tlcovs=1000) # original
run_experiments(num_samples=50, max_tlcovs=1000, use_original=False) # modified
# Show results
plt.ion()
results = show_results() # original
results_modified = show_results(filename="monroe_results_modified.pkl") # modified
raw_input("Enter to close...")
```
#### File: python_causal_compiler/compiler/Lexer.py
```python
from Token import Token
# Token Types
(LPAREN, RPAREN, COMMA, LBRACK, RBRACK, LCURLY, RCURLY, SEMI,
EQUALS, LESSTHAN, GREATERTHAN, LESSEQUAL, GREATEREQUAL, AND, OR, COLON, ID, INTEGER, CAUSES, DOT, QUOTE,
RULES, TYPE, ALL, CONT, IF, NOTEQUAL, STATE, PYTHON, DASH, EOF) = (
'LPAREN', 'RPAREN', 'COMMA', 'LBRACK', 'RBRACK', 'LCURLY',
'RCURLY', 'SEMI', 'EQUALS', 'LESSTHAN','GREATERTHAN', 'LESSEQUAL', 'GREATEREQUAL', 'AND', 'OR', 'COLON', 'ID',
'INTEGER', 'CAUSES', 'DOT', 'QUOTE', 'RULES', 'TYPE',
'ALL', 'CONT', 'IF', 'NOTEQUAL', 'STATE', 'PYTHON', 'DASH', 'EOF'
)
# Automatically tokenizes certain reserved keywords
RESERVED_KEYWORDS = {
'RULES': Token('RULES', 'RULES'),
'TYPE': Token('TYPE', 'TYPE'),
'ALL': Token('ALL', 'ALL'),
'CONT': Token('CONT', 'CONT'),
'STATE': Token('STATE', 'STATE'),
'PYTHON': Token('PYTHON', 'PYTHON'),
'if': Token('IF', 'IF'),
}
## Custom Lexer
#
# @brief The Lexer transforms the raw input text from the custom
# language into a list of tokens
class Lexer(object):
## Constructor
#
# @param text input causal knowledge text
def __init__(self, text):
## @var text
# Raw input code in custom language
self.text = text
## @var pos
# current index/position in input text
self.pos = 0
## @var current_char
# character at the current index/position
self.current_char = self.text[self.pos]
## Lexer Error
#
# @brief Notifies user of use of invalid/unrecognized character
#
# @retval none
def error(self):
raise Exception('Invalid character: {c}'.format(
c = self.current_char
))
## Advance
#
# @brief Changes current position and adjusts current character
# appropriately. Current character is equal to None if the
# position is at the end of the input text
#
# @retval none
def advance(self):
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None
else:
self.current_char = self.text[self.pos]
## Skip Whitespace
#
# @brief Ignores whitespace. Input can have arbitrary spacing.
#
# @retval none
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
## Integer
#
# @brief Identifies digits/strings of digits and returns them as integers
#
# @retval Integer integer representation of the character sequence
def integer(self):
result = ''
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
return int(result)
## Peek
#
# @brief Returns the next character without actually moving the current
# position. This is needed for certain Lexing decisions.
#
# @retval char next character after current_char
def peek(self):
peek_pos = self.pos + 1
if peek_pos > len(self.text) - 1:
return None
else:
return self.text[peek_pos]
## ID
#
# @brief Look in keywords for the given ID and return the corresponding
# token
#
#
# @retval Token token representing an id
def _id(self):
result = ''
while self.current_char is not None and (self.current_char.isalnum() or self.current_char == '-' or self.current_char == '_'):
result += self.current_char
self.advance()
result = result.replace('_', ' ')
token = RESERVED_KEYWORDS.get(result, Token(ID, result))
result2 = ''
if token.type == PYTHON:
self.advance()
self.advance()
while self.current_char != '#':
result2 += self.current_char
self.advance()
self.advance()
self.advance()
token = Token(PYTHON, result2)
return token
## Get Next Token
#
# Tokenizes the entire input
#
#
# @retval Token the next token
def get_next_token(self):
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isalpha():
return self._id()
if self.current_char.isdigit():
return Token(INTEGER, self.integer())
if self.current_char == ':' and self.peek() == '=':
self.advance()
self.advance()
return Token(CAUSES, ':=')
if self.current_char == ';':
self.advance()
return Token(SEMI, ';')
if self.current_char == '-':
self.advance()
return Token(DASH, '-')
if self.current_char == '(':
self.advance()
return Token(LPAREN, '(')
if self.current_char == ')':
self.advance()
return Token(RPAREN, ')')
if self.current_char == '[':
self.advance()
return Token(LBRACK, '[')
if self.current_char == ']':
self.advance()
return Token(RBRACK, ']')
if self.current_char == ',':
self.advance()
return Token(COMMA, ',')
if self.current_char == '{':
self.advance()
return Token(LCURLY, '{')
if self.current_char == '}':
self.advance()
return Token(RCURLY, '}')
if self.current_char == '=':
self.advance()
return Token(EQUALS, '=')
if self.current_char == '!' and self.peek() == '=':
self.advance()
self.advance()
return Token(NOTEQUAL,'!=')
if self.current_char == '<' and self.peek() != '=':
self.advance()
return Token(LESSTHAN,'<')
if self.current_char == '>' and self.peek() != '=':
self.advance()
return Token(GREATERTHAN, '>')
if self.current_char == '>' and self.peek() == '=':
self.advance()
self.advance()
return Token(GREATEREQUAL,'>=')
if self.current_char == '<' and self.peek() == '=':
self.advance()
self.advance()
return Token(LESSEQUAL, '<=')
if self.current_char == '{':
self.advance()
return Token(LCURLY, '{')
if self.current_char == '&' and self.peek() == '&':
self.advance()
self.advance()
return Token(AND, '&&')
if self.current_char == '|' and self.peek() == '|':
self.advance()
self.advance()
return Token(OR, '||')
if self.current_char == ':':
self.advance()
return Token(COLON, ':')
if self.current_char == '.':
self.advance()
return Token(DOT, '.')
if self.current_char == '\'':
self.advance()
return Token(QUOTE, '\'')
self.error()
return Token(EOF, None)
```
#### File: python_causal_compiler/compiler/run_acceptance.py
```python
from Lexer import Lexer
from Parser import Parser
from Facility_Domain_Compiler import Facility_Domain_Compiler
from Imitation_Compiler import Imitation_Compiler
import os
import sys
## Make Facility Domain
#
# Outputs the first python file that defines CO-PCT tree
def make_facility_domain(interpreter):
# out_file = os.path.normpath("./python_causal_compiler/compiler/output/acceptance.py")
out_file = os.path.normpath("./acceptance.py")
facility_domain_py = open(out_file, "w")
# This block should just be all the text requisite for the file not including cause stuff
# TODO: Make sure the template is right
print('Opening Facility Domain Template')
path = os.path.normpath("../../python_causal_compiler/compiler/templates/acceptance_template.txt")
template = open(path, "r").read()
# Actually compile input
print('Running Facility Domain Compiler')
result, M = interpreter.interpret()
inserted = template.replace(' # INSERT CAUSES HERE', result)
inserted = inserted.replace('M = 0 # INSERT M HERE', 'M = '+M)
facility_domain_py.write(inserted)
facility_domain_py.close()
return result
## Make Imitation
#
# Outputs the second python file that uses pyhop to traverse the
# CO-PCT tree
def make_imitation(interpreter):
out_file = os.path.normpath("./python_causal_compiler/compiler/output/imitation.py")
imitation_py = open(out_file, "w")
# This block should just be all the text requisite for the file not including cause stuff
# TODO: Make sure the template is right
print('Opening Imitation Template')
path = os.path.normpath("./python_causal_compiler/compiler/templates/imitation_template2.txt")
template = open(path, "r").read()
# Actually compile input
# TODO: Use the compiler for imitation, not the same one as facility domain!
print('Running Imitation Compiler')
result = interpreter.interpret()
inserted = template.replace('# INSERT METHODS HERE', result)
imitation_py.write(inserted)
imitation_py.close()
return result
## Run Facility Domain
#
# Runs the Facility Domain Compiler
def run_facility_domain(text):
print('Making Lexer')
lexer = Lexer(text)
print('Making Parser')
parser = Parser(lexer)
print('Making Facility Domain Compiler')
facility_compiler = Facility_Domain_Compiler(parser)
make_facility_domain(facility_compiler)
## Run Imitation
#
# Runs the Imitation Compiler
def run_imitation(text):
print('Making Lexer')
lexer = Lexer(text)
print('Making Parser')
parser = Parser(lexer)
print('Making Imitation Compiler')
imitation_compiler = Imitation_Compiler(parser)
make_imitation(imitation_compiler)
def main():
print('Opening Causal Input')
if len(sys.argv) < 3:
text = sys.argv[1]
else:
input_path = os.path.normpath(sys.argv[1])
text = open(input_path, 'r').read()
run_facility_domain(text)
#run_imitation(text)
if __name__ == '__main__':
main();
```
#### File: python_causal_compiler/compiler/Token.py
```python
class Token(object):
## Constructor
#
# @param type possible token type: VAR, COMMA, CAUSES, etc.
# @param value possible token value: int, float, string, ...
def __init__(self, type, value):
## @var type
# possible token type: VAR, COMMA, CAUSES, etc.
self.type = type
## @var value
# possible token value: int, float, string, None
self.value = value
## To String
#
# @retval String
def __str__(self):
return 'Token({type}, {value})'.format(
type = self.type,
value = repr(self.value)
)
def __repr__(self):
return self.__str__()
```
#### File: electron435/python_causal_compiler/test.py
```python
from compiler import Lexer
from compiler import Parser
from compiler import Facility_Domain_Compiler
from compiler import Imitation_Compiler
import difflib
def lexer_print_tokens(lexer):
token = lexer.get_next_token()
while token.type != 'EOF':
print (token)
token = lexer.get_next_token()
def test(textname, resultsname, testname0, keyname0, testname1, keyname1):
text = open(textname, 'r').read()
testresults = open(resultsname, 'a')
testresults.write("TESTING %s:\n" % textname)
makeFacility = True;
makeImitation = True;
testresults.write("makeFacility...\n")
if makeFacility:
testresults.write("\tLexer...\n")
facility_lexer = Lexer.Lexer(text)
testresults.write("\tParser...\n")
facility_parser = Parser.Parser(facility_lexer)
testresults.write("\tCompiler...\n")
facility_compiler = Facility_Domain_Compiler.Facility_Domain_Compiler(facility_parser)
result = facility_compiler.interpret()[0];
fc_file = open(testname0, 'w')
fc_file.write("%s" % result)
testlines = result.strip().splitlines()
keylines = open(keyname0, 'r').read().strip().splitlines()
difflinecount = 0;
# Print diffs of the files if they aren't equal
for line in difflib.unified_diff(keylines, testlines, fromfile=keyname0, tofile=testname0, lineterm=''):
if (difflinecount == 0):
testresults.write("--------- FAILED ---------\nDIFF:\n")
testresults.write("%s\n" % line)
difflinecount += 1
# No diffs, so must be same file
if (difflinecount == 0):
testresults.write("--------- PASSED ---------\n\n")
testresults.write("makeImitation...\n")
if makeImitation:
testresults.write("\tLexer...\n")
imitation_lexer = Lexer.Lexer(text)
testresults.write("\tParser...\n")
imitation_parser = Parser.Parser(imitation_lexer)
testresults.write("\tCompiler...\n")
imitation_compiler = Imitation_Compiler.Imitation_Compiler(imitation_parser)
result = imitation_compiler.interpret();
ic_file = open(testname1, 'w')
ic_file.write("%s" % result)
testlines = result.strip().splitlines()
keylines = open(keyname1, 'r').read().strip().splitlines()
difflinecount = 0;
# Print diffs of the files if they aren't equal
for line in difflib.unified_diff(keylines, testlines, fromfile=keyname1, tofile=testname1, lineterm=''):
if (difflinecount == 0):
testresults.write("--------- FAILED ---------\nDIFF:\n")
testresults.write("%s\n" % line)
difflinecount += 1
# No diffs, so must be same file
if (difflinecount == 0):
testresults.write("--------- PASSED ---------\n\n")
def main():
testresults = open('testfiles/RESULTS.txt', 'w')
test('causes.txt', 'testfiles/RESULTS.txt', 'testfiles/facility_compiler_test.txt', 'testfiles/facility_compiler_key.txt', 'testfiles/imitation_compiler_test.txt','testfiles/imitation_compiler_key.txt')
test('testfiles/test_conditionals.txt', 'testfiles/RESULTS.txt', 'testfiles/cond_fac_test.txt', 'testfiles/cond_fac_key.txt', 'testfiles/cond_imit_test.txt', 'testfiles/cond_imit_key.txt')
if __name__ == '__main__':
main();
``` |
{
"source": "Jhon-19/KGSystem",
"score": 2
} |
#### File: app/main/graph_utils.py
```python
from app import graph
from json import dumps, loads
from ..models import Node, Link, UserNodeAdd, UserLinkAdd
from flask_login import current_user
# 用来防止添加重复的节点
# 边和节点的id不一样
node_id_list = []
link_id_list = []
user_node_id_list = []
user_link_id_list = []
def clear_id_list():
global node_id_list, link_id_list, user_node_id_list, user_link_id_list
node_id_list = []
link_id_list = []
user_node_id_list = []
user_link_id_list = []
def format_node_adds(node_adds):
node_list = []
format_node_add_list(node_adds, node_list)
return dumps(node_list)
def format_link_adds(link_adds):
link_list = []
for link_add in link_adds:
current_id = link_add.user_link_id
if is_link_available(link_add):
link = Link(
current_id,
link_add.Type,
link_add.start_node,
link_add.end_node,
link_list
).get_dict()
link_list.append(link)
user_link_id_list.append(current_id)
return dumps(link_list)
def query_by_neo_id(neo_id):
find_node = 'MATCH (n)-[rel]-(m) ' \
'WHERE ID(n) = %s ' \
'RETURN ID(n) AS target_node_ID, ' \
'n AS target_node, ' \
'LABELS(n) AS target_node_label , ' \
'ID(rel) AS link_ID, TYPE(rel) AS link, ' \
'ID(m) AS neighbor_ID, m AS neighbor, ' \
'LABELS(m) AS neighbor_label, ' \
'ID(STARTNODE(rel)) AS start_node_ID, ' \
'ID(ENDNODE(rel)) AS end_node_ID' \
% neo_id
results = graph.run(find_node).data()
# print(dumps(results[0]))
results_json = loads(dumps(results))
node_list = []
link_list = []
for record in results_json:
id1 = record['target_node_ID']
if id1 not in node_id_list:
node1 = Node(
id1,
record['target_node_label'],
record_node=record['target_node']
).get_dict()
node_id_list.append(id1)
node_list.append(node1)
id2 = record['neighbor_ID']
if id2 not in node_id_list:
node2 = Node(
id2,
record['neighbor_label'],
record_node=record['neighbor']
).get_dict()
node_id_list.append(id2)
node_list.append(node2)
id_link = record['link_ID']
if id_link not in link_id_list:
link = Link(
id_link,
record['link'],
record['start_node_ID'], # d3.js中的id默认为节点数组中的下标
record['end_node_ID'],
link_list
).get_dict()
link_id_list.append(id_link)
link_list.append(link)
return dumps({'nodes': node_list, 'links': link_list})
def query_by_user_id(user_id):
node_list = []
link_list = []
query_user_nodes_links(node_list, link_list, node_id=user_id)
return dumps({'nodes': node_list, 'links': link_list})
def query_by_node_name(node_name):
node_list = []
link_list = []
# 查询用户数据库
query_user_nodes_links(node_list, link_list, node_name=node_name)
find_node = 'MATCH (n)-[rel]-(m) ' \
'WHERE n.name = "%s" OR n.title = "%s" ' \
'RETURN ID(n) AS target_node_ID, ' \
'n AS target_node, ' \
'LABELS(n) AS target_node_label , ' \
'ID(rel) AS link_ID, TYPE(rel) AS link, ' \
'ID(m) AS neighbor_ID, m AS neighbor, ' \
'LABELS(m) AS neighbor_label, ' \
'ID(STARTNODE(rel)) AS start_node_ID, ' \
'ID(ENDNODE(rel)) AS end_node_ID' \
% (node_name, node_name)
results = graph.run(find_node).data()
# print(dumps(results[0]))
results_json = loads(dumps(results))
for record in results_json:
id1 = record['target_node_ID']
if id1 not in node_id_list:
node1 = Node(
id1,
record['target_node_label'],
record_node=record['target_node']
).get_dict()
node_id_list.append(id1)
node_list.append(node1)
id2 = record['neighbor_ID']
if id2 not in node_id_list:
node2 = Node(
id2,
record['neighbor_label'],
record_node=record['neighbor']
).get_dict()
node_id_list.append(id2)
node_list.append(node2)
id_link = record['link_ID']
if id_link not in link_id_list:
link = Link(
id_link,
record['link'],
record['start_node_ID'],
record['end_node_ID'],
link_list
).get_dict()
link_id_list.append(id_link)
link_list.append(link)
return dumps({'nodes': node_list, 'links': link_list})
def query_one_by_neo_id(neo_id):
find_one_node = 'MATCH (n) WHERE ID(n) = %s ' \
'RETURN ID(n) AS target_node_ID,' \
' n AS target_node, LABELS(n) AS target_node_label' \
% neo_id
results = graph.run(find_one_node).data()
# print(dumps(results[0]))
results_json = loads(dumps(results[0]))
target_node = Node(
results_json['target_node_ID'],
results_json['target_node_label'],
record_node=results_json['target_node']
).get_dict()
return target_node
def query_user_nodes_links(node_list, link_list, node_name=None, node_id=None):
# 查询用户数据库
user_nodes = []
if node_name != None:
user_nodes = find_user_node_by_name(node_name)
elif node_id != None:
user_nodes = find_user_node_by_id(node_id)
user_links = UserLinkAdd.query.filter_by(
username=current_user.username,
).all()
format_node_add_list(user_nodes, node_list) #整理成dict列表的形式(类似于json数组)
if len(node_list) == 0:
return
target_node_id = node_list[0]['ID']
for user_link in user_links:
current_id = user_link.user_link_id
if is_related_link(user_link, target_node_id):
link = Link(
current_id,
user_link.Type,
user_link.start_node,
user_link.end_node,
link_list
).get_dict()
link_id_list.append(current_id)
link_list.append(link)
if user_link.start_node == target_node_id:
current_node_id = user_link.end_node
else:
current_node_id = user_link.start_node
if int(current_node_id) >= 0 and current_node_id not in node_id_list:
node_list.append(query_one_by_neo_id(current_node_id))
node_id_list.append(current_node_id)
elif int(current_node_id) < 0 and current_node_id not in user_node_id_list:
format_node_add_list(find_user_node_by_id(current_node_id), node_list)
def format_node_add_list(user_nodes, node_list):
for user_node in user_nodes:
current_id = user_node.user_node_id
if current_id not in user_node_id_list:
node = Node(
current_id,
[user_node.Label],
title=user_node.title
).get_dict()
user_node_id_list.append(current_id)
node_list.append(node)
def find_user_link_by_id(link_id):
return UserLinkAdd.query.filter_by(
username=current_user.username,
user_link_id=link_id
).all()
def find_user_node_by_id(node_id):
return UserNodeAdd.query.filter_by(
username=current_user.username,
user_node_id=int(node_id)
).all()
def find_user_node_by_name(node_name):
return UserNodeAdd.query.filter_by(
username=current_user.username,
title=node_name
).all()
def is_related_link(user_link, target_node_id):
return (user_link.user_link_id not in user_link_id_list) and \
(user_link.start_node == target_node_id or
user_link.end_node == target_node_id)
def is_link_available(link):
return (link.user_link_id not in user_link_id_list) and \
(link.start_node in user_node_id_list
and link.end_node in user_node_id_list) or \
(link.start_node in user_node_id_list
and link.end_node in node_id_list) or \
(link.start_node in node_id_list
and link.end_node in user_node_id_list) or \
(link.start_node in node_id_list
and link.end_node in node_id_list)
def is_node_in_list(target_node_id, id_list):
if target_node_id in id_list:
return True
```
#### File: KGSystem/app/models.py
```python
from . import db
from . import login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class UserNodeChange(db.Model):
__tablename__ = 'user_node_changes'
id = db.Column(db.Integer, primary_key=True)
neo_id = db.Column(db.Integer, nullable=False)
time = db.Column(db.BIGINT)
username = db.Column(db.String(20), nullable=False)
property = db.Column(db.String(10), nullable=False)
property_value = db.Column(db.String(255), nullable=False)
def __repr__(self):
return '<User %s, Neo_Id %r>' % (self.username, self.neo_id)
class UserLinkChange(db.Model):
__tablename__ = 'user_link_changes'
id = db.Column(db.Integer, primary_key=True)
neo_link_id = db.Column(db.Integer, nullable=False)
time = db.Column(db.BIGINT)
username = db.Column(db.String(20), nullable=False)
property = db.Column(db.String(10), nullable=False)
property_value = db.Column(db.String(255), nullable=False)
def __repr__(self):
return '<User %s, Neo_Id %r>' \
% (self.username, self.neo_link_id)
class UserNodeAdd(db.Model):
__tablename__ = 'user_node_adds'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.BIGINT)
username = db.Column(db.String(20), nullable=False)
user_node_id = db.Column(db.Integer, nullable=False)
Label = db.Column(db.String(10), nullable=False)
title = db.Column(db.String(10), nullable=False)
def __repr__(self):
return '<User %s, Neo_Id %r>' % (self.username, self.user_node_id)
class UserLinkAdd(db.Model):
__tablename__ = 'user_link_adds'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.BIGINT)
username = db.Column(db.String(20), nullable=False)
user_link_id = db.Column(db.Integer, nullable=False)
Type = db.Column(db.String(10), nullable=False)
start_node = db.Column(db.Integer, nullable=False)
end_node = db.Column(db.Integer, nullable=False)
def __repr__(self):
return '<User %s, Neo_Id %r, StartNode %s, EndNode %s>' \
% (self.username, self.user_link_id, self.start_node, self.end_node)
#定义前后端交互的Neo4j数据的json格式
class Node():
def __init__(self, ID, label_list, record_node=None, title=''):
self.ID = ID
self.Label = self.format_labels(label_list)
if record_node != None:
self.title = self.add_node_title(record_node)
else:
self.title = title
def get_dict(self):
node = {
'ID': self.ID,
'Label': self.Label,
'title': self.title
}
return node
def format_labels(self, label_list):
return ', '.join(label_list)
def add_node_title(self, record_node):
title = ''
if 'title' in record_node:
title = record_node['title']
elif 'name' in record_node:
title = record_node['name']
return title
class Link():
def __init__(self, ID, Type, source, target, link_list):
self.ID = ID
self.Type = Type
self.source = source
self.target = target
self.link_list = link_list
self.link_order = 0 # 如果节点之间有两条边,则记录每条边的顺序
self.order_link()
def get_dict(self):
link = {
'ID': self.ID,
'Type': self.Type,
'source': self.source, # d3.js中的id默认为节点数组中的下标
'target': self.target,
'link_order': self.link_order
}
return link
def order_link(self):
for link in self.link_list:
if self.is_same_link(link):
self.link_order += 1
if int(self.ID) < 0: #用户添加的边为反方向添加
self.link_order = -self.link_order-1
def is_same_link(self, link2):
return (self.source == link2['source'] and self.target == link2['target']) \
or (self.source == link2['target'] and self.target == link2['source'])
``` |
{
"source": "Jhon3/BestRoute_Graphs",
"score": 3
} |
#### File: Jhon3/BestRoute_Graphs/dataSet.py
```python
import numpy as np
import csv
import matplotlib.pyplot as plt
import networkx as nx
class DataSet:
def __init__(self, file):
self.file = file
self.dataSet = [[0]*27 for i in range(27)]
self.verticeState = {
0: "RN",
1: "PE",
2: "BA",
3: "PI",
4: "MA",
5: "CE",
6: "SE",
7: "AL",
8: "PB",
9: "AC",
10: "AP",
11: "AM",
12: "DF",
13: "ES",
14: "GO",
15: "MT",
16: "MS",
17: "MG",
18: "PA",
19: "PR",
20: "RJ",
21: "RS",
22: "RO",
23: "RR",
24: "SC",
25: "SP",
26: "TO"
}
self.loadCSV()
def loadCSV(self): #Carrega o CSV em forma de uma lista de listas
with open(self.file) as f:
data = [list(line) for line in csv.reader(f)]
newData = self.allRotuleToRepresentation(data)
cont = 0
for d in newData:
cont = cont+1
self.dataSet[int(d[0])][int(d[1])] = float(d[2])
def getDataSet(self): #Retorna o data set ja em forma de matriz
return self.dataSet
def allRotuleToRepresentation(self, data): #Converta os rotulos em numeros para cada vertice
for d in data:
for representation, rotule in self.verticeState.items():
if rotule == d[0]:
d[0] = str(representation)
if rotule == d[1]:
d[1] = str(representation)
return data
def rotuleToRepresentation(self, rotuleOrRepresentation): #Retorna a representação de um rotulo
vertice = None
if type(rotuleOrRepresentation) is str:
for representation, rotule in self.verticeState.items():
if rotule == rotuleOrRepresentation:
vertice = representation
elif type(rotuleOrRepresentation) is int:
vertice = self.verticeState.get(rotuleOrRepresentation)
return vertice
def getStates(self):
return list(self.verticeState.values())
```
#### File: Jhon3/BestRoute_Graphs/dijkstra.py
```python
from dataSet import DataSet
import numpy as np
class Dijkstra:
def __init__(self, source, target, graph, graphSize):
self.source = source
self.target = target
self.graph = graph
self.graphSize = graphSize
def dijkstraRoute(self):
m = self.graphSize
i = 0
vert = 0
k = 0
newDist = 0
minn = 0
M = [0 for x in range(m)] #Determina se o vertice ja foi visitado
L = [9999999999 for x in range(m)] #Infinito, determina o comprimento do caminho mais curto
A = [-1 for x in range(m)] #Determina o caminho mais curto entre origem e destino
route = []
vert = self.source
L[vert] = 0
while(vert != self.target and vert != -1): #enquanto nao chegou no destino ou o caminho não for inexistente
for i in range(m): #percorre os vertices adjacentes a vert
if self.graph[vert][i] != 0 and M[i] == 0: #se existir aresta e ela nao foi visitada ainda
newDist = L[vert] + self.graph[vert][i] #pega o custo da aresta(vet, i) e soma ao comprimento existente
if (L[i]>newDist): #se o comprimento do vertice i for maior do que o calculado, atualiza o valor
L[i] = newDist
A[i] = vert
M[vert] = 1
minn = 999999999
vert = -1
for i in range(m): #atuaiza o vertice da vez
if M[i] == 0 and L[i] < minn:
minn = L[i]
vert = i
if vert == self.target: #Pega o custo da origem até o destino
cost = L[self.target]
route.append(self.target) #Inclui o destino no caminho
while vert != self.source: #Pega o caminho de trás pra frente
route.append(A[vert])
vert = A[vert]
return (list(reversed(route)), cost)
return None
``` |
{
"source": "jhonaelramos/Odoo-14-Development-Essentials",
"score": 2
} |
#### File: Odoo-14-Development-Essentials/client_app/library_odoorpc.py
```python
import odoorpc
class LibraryAPI():
def __init__(self, host, port, db, user, pwd):
self.api = odoorpc.ODOO(host, port=port)
self.api.login(db, user, pwd)
self.uid = self.api.env.uid
self.model = "library.book"
self.Model = self.api.env[self.model]
def _execute(self, method, arg_list, kwarg_dict=None):
return self.api.execute(
self.model,
method, *arg_list, **kwarg_dict)
def search_read(self, title=None):
domain = [("name", "ilike", title)] if title else []
fields = ["id", "name"]
return self.Model.search_read(domain, fields)
def create(self, title):
vals = {"name": title}
return self.Model.create(vals)
def write(self, id, title):
vals = {"name": title}
self.Model.write(id, vals)
def unlink(self, id):
return self.Model.unlink(id)
``` |
{
"source": "JhonAI13/Curso_python",
"score": 4
} |
#### File: Curso_python/Curso-em-video/Aula_100.py
```python
from random import randrange
def sorteando(num):
print('Soteando 5 valores da lista: ', end='')
for c in range(0, 5):
n = randrange(10)
num.append(n)
print(f'{n} ', end=' ')
print('Pronto!')
def somando_pares(num):
soma = 0
for v in range(0, len(num)):
if num[v] / 2 == num[v] // 2:
soma += num[v]
print(f'Somando os valores pares de {num}, temos {soma}.')
num = list()
sorteando(num)
somando_pares(num)
```
#### File: Curso_python/Curso-em-video/Aula_103.py
```python
def ficha(nome='<DESCONHECIDO>', gols=0):
if gols.isnumeric():
gols = int(gols)
else:
gols = 0
if nome.strip() == '':
nome = '<DESCONHECIDO>'
print(f"O jogador {nome} fez {gols} gol(s) no campeonato.")
a = str(input('Nome do jogador: '))
b = str(input('Número de Gols:'))
ficha(a, b)
```
#### File: Curso_python/Curso-em-video/Aula_97.py
```python
def entre_linhas(mensagem):
l = len(mensagem) + 2
print('-' * l)
print(f' {mensagem}')
print('-' * l)
entre_linhas('<NAME>')
entre_linhas('Curso de python no youtube')
``` |
{
"source": "jhonalex06/Python_kids",
"score": 4
} |
#### File: Python_kids/scripts/example_10.py
```python
import turtle
t = turtle.Turtle()
#Screen Configuration
screen = turtle.Screen()
screen.setup(width = 1.0, height = 1.0)
def draw_leaf(size):
leaf_points = [[0, 7], [-1.2, 4.4], [-3, 5], [-2, 1],
[-4, 3], [-4.3, 1.5], [-7, 2.5], [-5.5, 0],
[-7, -1], [-3.3, -3.4], [-4, -5], [-0.2, -4.2],
[-0.2, -8], [0.2, -8], [0.2, -4.2], [4, -5],
[3.3, -3.4], [7, -1], [5.5, 0], [7, 2.5],
[4.3, 1.5], [4, 3], [2, 1], [3, 5],
[1.2, 4.4], [0, 7]]
t.color('red')
t.begin_fill()
for (x, y) in leaf_points:
t.goto(x * size, y * size)
t.end_fill()
draw_leaf(size=40)
turtle.done()
``` |
{
"source": "Jhonan01/Brick",
"score": 3
} |
#### File: bricks/index/routes.py
```python
from flask import request, render_template, session
from bricks.index import index_bp
from flask import Flask, url_for
from bricks.models import Usuario, Itens, Imagens, db, Lances
from werkzeug.security import generate_password_hash, check_password_hash
@index_bp.route('/index', methods = ['GET', 'POST'])
def index():
if request.method == 'POST':
query = Usuario.select(Usuario.usuario_login).where(Usuario.usuario_login == request.form.get('usuario')).get()
if('{0.usuario_login}'.format(query) == request.form.get('usuario')):
queryS = Usuario.select(Usuario.usuario_senha)
print(queryS)
for aux in queryS:
print(aux.usuario_senha)
if(check_password_hash (aux.usuario_senha, request.form.get('senha')) == True):
session['usuario'] = request.form.get('usuario')
usuario_local = request.form.get('usuario')
return render_template('index.html', logado='Você está logado')
return render_template('index.html', logado='Você não está logado')
@index_bp.route('/pesquisa', methods = ['GET', 'POST'])
def pesquisa():
if request.method == 'POST':
procuraTitulo = Itens.select().where(Itens.itens_categoria == request.form.get('pesquisa'))
d=[]
for iten in procuraTitulo:
imagens = Imagens.select().where(Imagens.imagens_id_itens_id == iten.id)
img=[]
j=0
for imagen in imagens:
img.insert(j,imagen.imagens_name_hash)
j+= 1
e = {
'id': iten.id,
'id_user': iten.itens_id_users_id,
'titulo': iten.itens_titulo,
'descricao': iten.itens_descricao,
'imagens': img
}
d.append(e)
quantiO = len(d)
return render_template('pesquisa.html', resultadoTitulo=d, quantidadeObjetos = quantiO)
return render_template('index.html')
@index_bp.route('/carregar_oferta/', methods = ['GET', 'POST'])
def carregar_oferta():
queryU = Usuario.get(Usuario.usuario_login == session['usuario'])
selecionarItens = Itens.select().where(Itens.itens_id_users_id == queryU.id)
s=[]
for a in selecionarItens:
selecionarImagens = Imagens.select().where(Imagens.imagens_id_itens_id == a.id)
imagem=[]
t=0
for teste in selecionarImagens:
imagem.insert(t, teste.imagens_name_hash)
t+=1
b = {
'id': a.id,
'id_user_lancador': a.itens_id_users_id,
'id_user_alvo': request.args.get('id_user_alvo'),
'id_item_alvo': request.args.get('id_item_alvo'),
'titulo': a.itens_titulo,
'descricao': a.itens_descricao,
'imagens': imagem
}
s.append(b)
print(s)
return render_template('carregar_oferta.html',si = s)
```
#### File: BricksO/bricks/__init__.py
```python
from flask import Flask
from bricks.config import Config
from peewee import *
db = SqliteDatabase('dados.db')
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
from bricks.lance import lance_bp
app.register_blueprint(lance_bp)
from bricks.login import login_bp
app.register_blueprint(login_bp)
from bricks.cadastro_usuario import cadastroUsuario_bp
app.register_blueprint(cadastroUsuario_bp)
from bricks.index import index_bp
app.register_blueprint(index_bp)
from bricks.incluir_item import incluirItem_bp
app.register_blueprint(incluirItem_bp)
return app
``` |
{
"source": "JhonArroyo/fastapi-python",
"score": 2
} |
#### File: JhonArroyo/fastapi-python/app.py
```python
from fastapi import FastAPI, status
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
#from fastapi.responses import HTMLResponse
from starlette.requests import Request
from starlette.status import HTTP_200_OK
from route.usser import usser
from typing import Optional
app = FastAPI(
title="FastAPI & MySQL",
description="This is my first RestAPI using Python",
version="0.1.0",
openapi_tags=[{
"name":"User_Internal",
"description": "Only for Debugger"
}]
)
templates = Jinja2Templates(directory="html")
app.mount("/css", StaticFiles(directory="css"), name="css")
@app.get("/", status_code=status.HTTP_200_OK, tags=["Index"])
async def html_index(request: Request):
return templates.TemplateResponse("index.html", context={"request": request})
app.include_router(usser)
``` |
{
"source": "jhonasiv/MPNet",
"score": 2
} |
#### File: python/lcmtypes/environment_t.py
```python
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import lcmtypes.region_3d_t
class environment_t(object):
__slots__ = ["operating", "goal", "num_obstacles", "obstacles"]
__typenames__ = ["lcmtypes.region_3d_t", "lcmtypes.region_3d_t", "int32_t", "lcmtypes.region_3d_t"]
__dimensions__ = [None, None, None, ["num_obstacles"]]
def __init__(self):
self.operating = lcmtypes.region_3d_t()
self.goal = lcmtypes.region_3d_t()
self.num_obstacles = 0
self.obstacles = []
def encode(self):
buf = BytesIO()
buf.write(environment_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
assert self.operating._get_packed_fingerprint() == lcmtypes.region_3d_t._get_packed_fingerprint()
self.operating._encode_one(buf)
assert self.goal._get_packed_fingerprint() == lcmtypes.region_3d_t._get_packed_fingerprint()
self.goal._encode_one(buf)
buf.write(struct.pack(">i", self.num_obstacles))
for i0 in range(self.num_obstacles):
assert self.obstacles[i0]._get_packed_fingerprint() == lcmtypes.region_3d_t._get_packed_fingerprint()
self.obstacles[i0]._encode_one(buf)
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != environment_t._get_packed_fingerprint():
raise ValueError("Decode error")
return environment_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = environment_t()
self.operating = lcmtypes.region_3d_t._decode_one(buf)
self.goal = lcmtypes.region_3d_t._decode_one(buf)
self.num_obstacles = struct.unpack(">i", buf.read(4))[0]
self.obstacles = []
for i0 in range(self.num_obstacles):
self.obstacles.append(lcmtypes.region_3d_t._decode_one(buf))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if environment_t in parents: return 0
newparents = parents + [environment_t]
tmphash = (0x8caabc2a2ba0f9c7+ lcmtypes.region_3d_t._get_hash_recursive(newparents)+ lcmtypes.region_3d_t._get_hash_recursive(newparents)+ lcmtypes.region_3d_t._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if environment_t._packed_fingerprint is None:
environment_t._packed_fingerprint = struct.pack(">Q", environment_t._get_hash_recursive([]))
return environment_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
```
#### File: python/lcmtypes/graph_t.py
```python
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import lcmtypes.vertex_t
import lcmtypes.edge_t
class graph_t(object):
__slots__ = ["num_vertices", "vertices", "num_edges", "edges"]
__typenames__ = ["int32_t", "lcmtypes.vertex_t", "int32_t", "lcmtypes.edge_t"]
__dimensions__ = [None, ["num_vertices"], None, ["num_edges"]]
def __init__(self):
self.num_vertices = 0
self.vertices = []
self.num_edges = 0
self.edges = []
def encode(self):
buf = BytesIO()
buf.write(graph_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">i", self.num_vertices))
for i0 in range(self.num_vertices):
assert self.vertices[i0]._get_packed_fingerprint() == lcmtypes.vertex_t._get_packed_fingerprint()
self.vertices[i0]._encode_one(buf)
buf.write(struct.pack(">i", self.num_edges))
for i0 in range(self.num_edges):
assert self.edges[i0]._get_packed_fingerprint() == lcmtypes.edge_t._get_packed_fingerprint()
self.edges[i0]._encode_one(buf)
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != graph_t._get_packed_fingerprint():
raise ValueError("Decode error")
return graph_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = graph_t()
self.num_vertices = struct.unpack(">i", buf.read(4))[0]
self.vertices = []
for i0 in range(self.num_vertices):
self.vertices.append(lcmtypes.vertex_t._decode_one(buf))
self.num_edges = struct.unpack(">i", buf.read(4))[0]
self.edges = []
for i0 in range(self.num_edges):
self.edges.append(lcmtypes.edge_t._decode_one(buf))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if graph_t in parents: return 0
newparents = parents + [graph_t]
tmphash = (0x49189ad7b639b453+ lcmtypes.vertex_t._get_hash_recursive(newparents)+ lcmtypes.edge_t._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if graph_t._packed_fingerprint is None:
graph_t._packed_fingerprint = struct.pack(">Q", graph_t._get_hash_recursive([]))
return graph_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
```
#### File: MPNet/enet/data_loader.py
```python
import os
from abc import ABC
from typing import Iterable
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
project_path = f"{os.path.abspath(__file__).split('mpnet')[0]}mpnet"
def load_perms(num, start_point=0):
perms = np.loadtxt(f'{project_path}/obs/perm.csv', delimiter=',')
assert num + start_point < len(perms), f"Dataset has shape {perms.shape}. Received request for " \
f"{num + start_point} data points."
perms = perms.reshape((-1, 7, 2))
return perms[start_point: start_point + num]
def create_samples(perm_unit, cached_perm={}):
samples = []
for obs in perm_unit:
if tuple(obs) not in cached_perm.keys():
sample = np.random.uniform(obs - 2.5, obs + 2.5, (200, 2))
samples.append(sample)
cached_perm[tuple(obs)] = sample
else:
samples.append(cached_perm[tuple(obs)])
samples = np.array(samples).flatten()
return samples
class EnvDataset(Dataset, ABC):
def __init__(self, size, start_point=0):
super().__init__()
self.perms = load_perms(size, start_point)
self.cached_perms = {}
def __len__(self):
return len(self.perms)
def __getitem__(self, item):
if torch.is_tensor(item):
item = item.tolist()
if isinstance(item, slice) or isinstance(item, Iterable):
samples = []
for perm in self.perms[item]:
sample = create_samples(perm, self.cached_perms)
samples.append(sample)
return np.array(samples)
else:
sample = create_samples(self.perms[item], self.cached_perms)
return torch.from_numpy(sample)
def loader(num_envs, batch_size, start_point=0):
batch_size = int(batch_size)
dataset = EnvDataset(num_envs, start_point)
if batch_size > 1:
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, shuffle=True, pin_memory=True)
else:
dataloader = DataLoader(dataset)
return dataloader
if __name__ == '__main__':
data = loader(300, 100, 0)
for n, (inp, ref) in enumerate(data):
print(inp)
```
#### File: MPNet/MPNet/neuralplanner.py
```python
import argparse
import os
from copy import deepcopy
import numpy as np
import torch
from shapely.geometry import LineString, MultiPolygon, Point, Polygon
# from MPNet.visualizer.visualizer import plot_path
project_path = f"{os.path.abspath(__file__).split('mpnet')[0]}mpnet"
def is_in_collision(x, env):
x = Point(x)
for obstacle in env:
if obstacle.contains(x):
return True
return False
def steer_to(start, end, env):
start = Point(start)
end = Point(end)
line = LineString([start, end])
for polygon in env:
if polygon.intersects(line):
return False
else:
return True
def feasibility_check(path, env) -> bool:
for i in range(0, len(path[:-1])):
ind = steer_to(path[i], path[i + 1], env)
if not ind:
return False
return True
def lvc(path, env):
# Iterate from the first beacon state to the second to last.
for i in range(0, len(path) - 1):
# Iterate from the last beacon state to the ith.
for j in range(len(path) - 1, i + 1, -1):
ind = steer_to(path[i], path[j], env)
if ind:
pc = []
for k in range(0, i + 1):
pc.append(path[k])
for k in range(j, len(path)):
pc.append(path[k])
return lvc(pc, env)
return path
def remove_invalid_beacon_states(path, env):
new_path = []
for state in path:
if not is_in_collision(state, env):
new_path.append(state)
else:
try:
new_path[-1] = np.mean([new_path[-1], new_path[-2]], axis=0)
except IndexError:
pass
for n in range(len(new_path) - 1, 0, -1):
if is_in_collision(new_path[n], env):
try:
new_path[n + 1] = np.mean([new_path[n + 1], new_path[n + 2]], axis=0)
except IndexError:
pass
new_path = np.array(new_path)
return new_path
def replan_path(previous_path, env, data_input, pnet, num_tries=12):
path = remove_invalid_beacon_states(previous_path, env)
feasible = feasibility_check(path, env)
tries = 0
target_reached = False
while not feasible and tries < num_tries:
tries += 1
replanned_path = [path[0]]
# Iterate through each consecutive beacon state
for i in range(0, len(path) - 1):
steerable, start, goal = bidirectional_replan_setup(env, path[i], path[i + 1], data_input)
if steerable:
replanned_path.append(path[i + 1])
else:
target_reached, rpath_1, rpath_2 = bidirectional_planning(pnet, start, goal, env)
replanned_path = list(np.concatenate([replanned_path, rpath_1, rpath_2[::-1]]))
if not target_reached:
if i < len(path) - 1:
replanned_path = np.concatenate([replanned_path, path[i + 1:]])
return False, replanned_path
replanned_path = np.array(replanned_path)
filtered_path, indexes = np.unique(replanned_path, axis=0, return_index=True)
filtered_path = filtered_path[np.argsort(indexes)]
lvc_replanned_path = lvc(filtered_path, env)
lvc_replanned_path = np.array(lvc_replanned_path)
feasible = feasibility_check(lvc_replanned_path, env)
if feasible:
path = lvc_replanned_path
break
elif not target_reached:
return False, lvc_replanned_path
else:
path = np.array(filtered_path)
path = remove_invalid_beacon_states(path, env)
if not feasible:
path = np.array(filtered_path)
return feasible, path
# Checks if it's necessary to replan this section
def bidirectional_replan_setup(env, start_point, goal_point, model_input):
start = deepcopy(model_input)
start[-4:] = torch.as_tensor([*start_point, *goal_point])
goal = deepcopy(start)
goal[-4:] = goal[[-2, -1, -4, -3]]
steerable = steer_to(start_point, goal_point, env)
return steerable, start, goal
def bidirectional_planning(pnet, origin, goal, env, steps=100):
result_1 = deepcopy(origin[-4:-2])
result_2 = deepcopy(goal[-4:-2])
path_1, path_2 = [result_1.numpy()], [result_2.numpy()]
tree, target_reached = False, False
step = 0
while not target_reached and step < steps:
step += 1
if not tree:
result_1 = pnet(origin)
result_1 = result_1.data.detach()
path_1.append(result_1.numpy())
origin[-4:-2] = result_1
goal[-2:] = result_1
else:
result_2 = pnet(goal)
result_2 = result_2.data.detach()
path_2.append(result_2.numpy())
goal[-4:-2] = result_2
origin[-2:] = result_2
tree = not tree
target_reached = steer_to(result_1.numpy(), result_2.numpy(), env)
return target_reached, path_1, path_2
def bidirectional_planner(pnet, env, model_input):
origin = deepcopy(model_input)
goal = deepcopy(origin)
goal[-4:] = goal[[-2, -1, -4, -3]]
target_reached, path_1, path_2 = bidirectional_planning(pnet, origin, goal, env)
return target_reached, path_1, path_2
def plan(pnet, env, data_input, detailed_results=False):
env = env_npy_to_polygon(env)
target_reached, path_1, path_2 = bidirectional_planner(pnet, env, data_input)
path = np.concatenate([path_1, path_2[::-1]])
if target_reached:
lvc_path = np.array(lvc(path, env))
feasible = feasibility_check(lvc_path, env)
if not feasible:
result, replanned_path = replan_path(lvc_path, env, data_input, pnet)
if not detailed_results:
return f"Replan {'Success' if result else 'Failure'}", replanned_path
else:
final_path = lvc(replanned_path, env)
return f"Replan {'Success' if result else 'Failure'}", path, lvc_path, replanned_path, final_path
else:
if not detailed_results:
return "Success", path
else:
return "Success", path, lvc_path, None, None
else:
if not detailed_results:
return "Failure", None
else:
return "Failure", path, None, None, None
def env_npy_to_polygon(env):
obstacles = []
for obstacle in env:
x, y = obstacle
obstacles.extend([[x - 2.5, y - 2.5], [x - 2.5, y + 2.5], [x + 2.5, y + 2.5], [x + 2.5, y - 2.5],
[x - 2.5, y - 2.5], [None, None]])
obstacles = np.array(obstacles)
obstacle = []
obstacles_polygon = []
for point in obstacles:
if None in point:
polygon = Polygon(obstacle)
obstacles_polygon.append(polygon)
obstacle = []
else:
obstacle.append(tuple(point))
env = MultiPolygon(obstacles_polygon)
return env
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pnet", default="", type=str)
parser.add_argument("--enet", default="", type=str)
parser.add_argument("--num_envs", default=1, type=int)
parser.add_argument("--num_trajs", default=1, type=int)
args = parser.parse_args()
```
#### File: MPNet/MPNet/performance.py
```python
import argparse
import json
import os
from time import time
import pandas as pd
from enet.data_loader import load_perms
from neuralplanner import plan
from pnet.data_loader import loader
from pnet.model import PNet
project_path = f"{os.path.abspath(__file__).split('mpnet')[0]}mpnet"
def setup_data(dataset, data, paths_per_env, selected=None):
mapping = {}
if not selected:
env_ids = pd.unique(data["Env ID"])
for env_id in env_ids:
df = data[data["Env ID"] == env_id]
if env_id == 100:
paths_per_env *= 10
selected_path_ids = df.sample(n=paths_per_env).index.tolist()
inputs = []
references = []
for path_id in selected_path_ids:
data_input, _, reference_path, _ = dataset[path_id, True]
inputs.append(data_input)
references.append(reference_path)
mapping[env_id] = {"input": inputs, "reference": references, "selected_ids": selected_path_ids}
else:
for env_id, selected_path_ids in selected.items():
inputs = []
references = []
for path_id in selected_path_ids:
data_input, _, reference_path, _ = dataset[path_id, True]
inputs.append(data_input)
references.append(reference_path)
mapping[int(env_id)] = {"input": inputs, "reference": references, "selected_ids": selected_path_ids}
return mapping
def run(args):
pnets = []
datasets = {}
dataset_mapping = {}
for n, model in enumerate(args.pnet):
pnets.append(PNet.load_from_checkpoint(model))
pnets[-1].freeze()
enet_ckpt = pnets[-1].training_config['enet']
enet_key = os.path.basename(enet_ckpt)
if enet_key not in datasets:
datasets[enet_key] = loader(enet_ckpt, f"{project_path}/valEnv", 110, 0, 1, True)
dataset_mapping[n] = datasets[enet_key]
pnets_basenames = [os.path.basename(p).split('.')[0] for p in args.pnet]
envs = load_perms(110, 0)
try:
with open(f"{project_path}/{args.output}/selected_results.json", "r") as f:
selected_results = json.load(f)
except FileNotFoundError:
selected_results = {}
for pnet, dataset, name in zip(pnets, dataset_mapping.values(), pnets_basenames):
data = pd.DataFrame(dataset.path_files, columns=["Env ID", "Path", "State ID"])
data['index'] = data.index
data = data[data["State ID"] == 0].drop(columns=["State ID"])
try:
with open(f"{project_path}/{args.output}/selected.json", "r") as f:
selected = json.load(f)
overall_data = setup_data(dataset, data, 50, selected)
except (FileNotFoundError, json.JSONDecodeError) as e:
overall_data = setup_data(dataset, data, 50)
selected = {int(k): list(overall_data[k]["selected_ids"]) for k in overall_data}
selected_start_n_goals = {int(k): [[list(ref[0]), list(ref[-1])] for ref in overall_data[k]["reference"]]
for k in overall_data}
with open(f"{project_path}/{args.output}/selected.json", "w") as f:
json.dump(selected, f)
with open(f"{project_path}/{args.output}/selected_points.json", "w") as f:
json.dump(selected_start_n_goals, f)
try:
with open(f"{project_path}/{args.output}/{name}.json", "r") as f:
results = json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
results = {"seen": {"Success": 0, "Failure": 0, "Replan Success": 0, "Replan Failure": 0
},
"unseen": {"Success": 0, "Failure": 0, "Replan Success": 0, "Replan Failure": 0,
}, "Time": {"Total" : [], "Success": [], "Failure": [],
"Replan Success": [], "Replan Failure": []}}
try:
paths = pd.read_json(f"{project_path}/{args.output}/paths.json", orient='table')
except ValueError:
paths = pd.DataFrame([], columns=['seen', 'model', 'id', 'env_id', 'result', 'initial', 'goal', 'bidir',
'lvc', 'replan', 'final'])
paths = paths.set_index(['model', 'id'])
for env_id, mapping in overall_data.items():
start_idx = len(paths.query(f'env_id == {env_id} and model == "{name}"'))
for data_input, selected_id in zip(mapping["input"][start_idx:], mapping["selected_ids"][start_idx:]):
if selected_id not in selected_results:
selected_results[selected_id] = {"Success" : [], "Failure": [], "Replan Success": [],
"Replan Failure": []}
start = time()
result, path, lvc_path, replanned, final_path = plan(pnet, envs[env_id], data_input,
detailed_results=True)
duration = time() - start
results["seen" if env_id < 100 else "unseen"][result] += 1
results["Time"]["Total"].append(duration)
results["Time"][result].append(duration)
paths = paths.append(pd.DataFrame(
[[env_id < 100, name, selected_id, env_id, result, data_input[-4:-2], data_input[-2:], path,
lvc_path, replanned, final_path]],
columns=['seen', 'model', 'id', 'env_id', 'result', 'initial', 'goal', 'bidir',
'lvc', 'replan', 'final']).set_index(['model', 'id']))
selected_results[selected_id][result].append(name)
paths.to_json(f"{project_path}/{args.output}/paths.json", default_handler=str, orient='table')
with open(f"{project_path}/{args.output}/{name}.json", "w") as f:
json.dump(results, f)
with open(f"{project_path}/{args.output}/selected_results.json", "w") as f:
json.dump(selected_results, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pnet", default="", nargs="+", type=str)
parser.add_argument("--output", default="data", type=str)
args = parser.parse_args()
run(args)
``` |
{
"source": "jhonasiv/rl-algorithms",
"score": 2
} |
#### File: 1 - Assault-v0/src/model.py
```python
import functools
import operator
from typing import Iterable
import torch
from torch import nn
from torchtyping import TensorType, patch_typeguard
from typeguard import typechecked
from rlalgs.value_based.model import DQNModel
patch_typeguard()
class AlienDQN(DQNModel):
def __init__(self, convolutional_layers: nn.Sequential, linear_layers: list,
input_dim: Iterable, device: torch.device):
super().__init__(convolutional_layers, device)
self._input_dim = input_dim
reshape_layer = self.get_last_conv_size()
first_linear_layer = [n for n, l in enumerate(linear_layers) if isinstance(l, nn.Linear)][0]
self._linear = nn.Sequential(
nn.Linear(reshape_layer, linear_layers[first_linear_layer].in_features),
*linear_layers).to(device)
@typechecked
def forward(self, x: TensorType[..., "batch"]) -> TensorType[..., "batch"]:
x = (x.movedim(-1, 0) / 255.).to(self.device)
x = self._model(x)
x = x.view(x.size(0), -1)
x = self._linear(x)
return x.T
def get_last_conv_size(self):
num_features_before_fcnn = functools.reduce(operator.mul, list(
self._model.cpu()(torch.rand(1, *self._input_dim[::-1])).shape))
self._model = self._model.to(self.device)
return num_features_before_fcnn
```
#### File: rlalgs/utils/functions.py
```python
import math
def constant_decay_function(variable, rate):
result = variable * rate
return result
def exponential_function(a, x, k, b, exp):
"""
Exponential function where
y = a * e^(-k * (x / b)^exp)
"""
result = a * math.exp(-k * (x / b) ** exp)
return result
def casted_exponential_function(a, x, k, b, exp):
"""
Exponential function where x is casted to int
y = a * e^(-k * int((x / b)^exp))
"""
result = a * math.exp(-k * int((x / b) ** exp))
return result
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.