metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jlibovicky/char-nmt-fairseq",
"score": 2
} |
#### File: char-nmt-fairseq/char_scripts/eval_with_bootsrap_resampling.py
```python
import argparse
import random
import numpy as np
import scipy.stats
import sacrebleu
from tqdm import trange
from comet.models import download_model
def load_file(fh):
sentences = []
for line in fh:
sentences.append(line.strip())
fh.close()
return sentences
def confidence_interval(data, confidence=0.95):
m, se = np.mean(data), scipy.stats.sem(data)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., len(data) - 1)
return m, h
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("src", type=argparse.FileType("r"))
parser.add_argument("ref", type=argparse.FileType("r"))
parser.add_argument("hyp", type=argparse.FileType("r"))
parser.add_argument("--use-comet", default=False, action="store_true")
parser.add_argument("--use-bertscore", default=False, action="store_true")
parser.add_argument("--n-samples", default=1000, type=int)
parser.add_argument("--confidence", default=0.95, type=int)
args = parser.parse_args()
srcs = load_file(args.src)
refs = load_file(args.ref)
hyps = load_file(args.hyp)
assert len(srcs) == len(refs) == len(hyps)
bleu_score = sacrebleu.BLEU().corpus_score(hyps, [refs], n_bootstrap=args.n_samples)
print(f"BLEU {bleu_score.score:.4f} {bleu_score._ci:.4f}")
chrf_score = sacrebleu.CHRF().corpus_score(hyps, [refs], n_bootstrap=args.n_samples)
print(f"chrF {chrf_score.score / 100:.6f} {chrf_score._ci / 100:.6f}")
comet = download_model("wmt-large-da-estimator-1719")
comet_data = [
{"src": src, "mt": hyp, "ref": ref}
for src, ref, hyp in zip(srcs, refs, hyps)]
comet_res = comet.predict(comet_data, cuda=True, show_progress=True)[1]
comet_mean, comet_int = confidence_interval(comet_res)
print(f"COMET {comet_mean:.6f} {comet_int:.6f}")
if __name__ == "__main__":
main()
```
#### File: char-nmt-fairseq/char_scripts/lemmas_pickle_to_json.py
```python
import argparse
import json
import pickle
import logging
import sys
from lemmas_from_training_data import LemmaStat
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"train_data_stats", type=argparse.FileType("rb"),
help="Pickle file with lemmas and forms from train data.")
args = parser.parse_args()
logging.info("Loading lemma stats from training data.")
lemma_stats = pickle.load(args.train_data_stats)
logging.info("Loaded.")
print("[")
logging.info("Iterate over hypotheses and references.")
for stat in lemma_stats.values():
if stat.count < 5:
continue
print(json.dumps(stat.to_dict()) + ",")
print("]")
if __name__ == "__main__":
main()
``` |
{
"source": "jlibovicky/char-nmt",
"score": 3
} |
#### File: jlibovicky/char-nmt/noisy_slope.py
```python
import argparse
import math
import os
import sys
import numpy as np
from scipy.stats import linregress
def load_file(path):
with open(path) as f:
return np.array([float(line.strip()) for line in f])
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"files", nargs="+", type=str, help="File with numbers.")
args = parser.parse_args()
non_existing = [path for path in args.files if not os.path.exists(path)]
if non_existing:
print(f"Files do not exists: {', '.join(non_existing)}",
file=sys.stderr)
exit(1)
if len(args.files) < 2:
print("Provide at least two series of numbers", file=sys.stderr)
exit(1)
all_series = [load_file(path) for path in args.files]
for path, series in zip(args.files, all_series):
if len(series) != 11:
print(f"Missing measurements in {path}.", file=sys.stderr)
exit(1)
noise_probailities = np.arange(0, 1.1, 0.1)
for i, series in enumerate(all_series):
slope, intercept, r_value, p_value, std_err = linregress(
noise_probailities, series)
print(slope / intercept)
if __name__ == "__main__":
main()
``` |
{
"source": "jlibovicky/char-nmt-two-step-decoder",
"score": 3
} |
#### File: jlibovicky/char-nmt-two-step-decoder/bigram_tokenizer.py
```python
import typing
from typing import List, Union
from collections import Counter
import numpy as np
import torch
from tqdm import trange
from char_tokenizer import BaseTokenizer, SPECIAL_SYMBOLS, postprocess_idx_list
class BigramTokenizer(BaseTokenizer):
def batch_encode_plus(
self,
text: Union[str, List[str]], # the sentence to be encoded
add_special_tokens: bool = True, # Add [CLS] and [SEP]
max_length: int = 512, # maximum length of a sentence
truncation: bool = False,
pad_to_max_length: bool =True, # Add [PAD]s
return_attention_mask: bool = True, # Generate the attention mask
return_tensors: str = "pt"):
if not add_special_tokens:
raise ValueError(
"Bigram tokenizer does not work without special symbols.")
if isinstance(text, str):
text = [text]
idx_list = []
for sent in text:
char_list = list(sent)
if add_special_tokens:
char_list = ["<s>"] + char_list + ["</s>"]
token_list = [self.bos_token_id]
for i in range(len(char_list) - 1):
bigram = char_list[i] + char_list[i + 1]
if bigram in self.str_to_idx:
token_list.append(self.str_to_idx[bigram])
else:
token_list.append(
self.str_to_idx.get(
char_list[i + 1], self.unk_token_id))
if max_length is not None and len(token_list) > max_length:
if truncation:
token_list = token_list[:max_length]
else:
raise ValueError(
"The sequence is too long and trunkation is disabled.")
idx_list.append(token_list)
return postprocess_idx_list(
idx_list, pad_to_max_length, return_tensors, return_attention_mask)
def decode(
self,
token_ids: Union[int, List[int], np.ndarray, torch.Tensor]) -> str:
if isinstance(token_ids, int):
token_ids = [token_ids]
if isinstance(token_ids, np.ndarray):
assert len(token_ids.shape) == 1
if isinstance(token_ids, torch.Tensor):
assert len(token_ids.shape) == 1
chars = []
for char_id in token_ids:
if char_id == self.bos_token_id:
continue
if char_id in [self.eos_token_id, self.pad_token_id]:
break
str_form = self.idx_to_str[char_id]
if str_form.endswith("</s>"):
break
chars.append(str_form[-1])
return "".join(chars)
def from_data(
text: List[str],
max_vocab: int = None,
max_lines: int = None,
min_frequency: int = None) -> BigramTokenizer:
"""Create char-level tokenizer from data."""
unigram_counter: typing.Counter[str] = Counter()
bigram_counter: typing.Counter[str] = Counter()
len_limit = len(text)
if max_lines is not None:
len_limit = min(max_lines, len_limit)
pbar = trange(len_limit, unit="sentences")
for _, sent in zip(pbar, text):
if not sent:
continue
unigram_counter.update(sent)
bigram_counter.update([f"<s>{sent[0]}", f"{sent[-1]}</s>"])
bigram_counter.update([
sent[j] + sent[j + 1] for j in range(len(sent) -1)])
pbar.close()
if min_frequency is not None:
vocab_counter = Counter({
key: count for key, count in vocab_counter.items()
if count > min_frequency})
if max_vocab is None:
vocab_list = list(unigram_counter.keys()) + list(bigram_counter.keys())
else:
vocab_list = [
tok for tok, _ in unigram_counter.most_common(max_vocab)]
vocab_list += [
tok for tok, _ in bigram_counter.most_common(max_vocab ** 2)]
vocab = SPECIAL_SYMBOLS + vocab_list
return BigramTokenizer(vocab)
```
#### File: jlibovicky/char-nmt-two-step-decoder/custom_chrf.py
```python
from typing import List
from collections import Counter
import string
import numpy as np
def pairwise_chrf(sentences: List[str], order: int = 6, beta: float = 2.0):
# 1. represent each sentece as n-grams
sentences = [
s.translate(str.maketrans("", "", string.whitespace))
for s in sentences]
sent_n_grams = [[
Counter([sent[i:i + o]
for i in range(len(sent) - o + 1)])
for o in range(1, order + 1)]
for sent in sentences]
# 2. prepare precision table
precisions = np.ones((len(sentences), len(sentences)))
# 3. compute the precisions
for i, sent_a in enumerate(sent_n_grams):
for j, sent_b in enumerate(sent_n_grams):
if i >= j:
continue
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for ngrams_a, ngrams_b in zip(sent_a, sent_b):
a_count = sum(ngrams_a.values())
b_count = sum(ngrams_b.values())
common_count = sum((ngrams_a & ngrams_b).values())
if a_count > 0 and b_count > 0:
avg_precision += common_count / a_count
avg_recall += common_count / b_count
effective_order += 1
if effective_order == 0:
avg_precision, avg_recall = 0.0, 0.0
else:
avg_precision /= effective_order
avg_recall /= effective_order
precisions[i, j] = avg_precision
precisions[j, i] = avg_recall
# 4. recall is transposed precision
recalls = precisions.T
# 5. compute score
beta_sq = beta ** 2
scores = (
(1 + beta_sq) * precisions * recalls /
((beta_sq * precisions) + recalls))
# 6. masked outliers
scores = np.where(
(precisions == 0) + (recalls == 0),
np.zeros((len(sentences), len(sentences))),
scores)
return scores
```
#### File: jlibovicky/char-nmt-two-step-decoder/label_smoothing.py
```python
import torch
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
class SmoothCrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, reduction="mean", smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets:torch.Tensor, n_classes:int, smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes),
device=targets.device) \
.fill_(smoothing /(n_classes-1)) \
.scatter_(1, targets.data.unsqueeze(1), 1.-smoothing)
return targets
def forward(self, inputs, targets):
targets = SmoothCrossEntropyLoss._smooth_one_hot(
targets, inputs.size(-1), self.smoothing)
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == "sum":
loss = loss.sum()
elif self.reduction == "mean":
loss = loss.mean()
return loss
```
#### File: jlibovicky/char-nmt-two-step-decoder/lr_scheduler.py
```python
from torch.optim.lr_scheduler import _LRScheduler
class NoamLR(_LRScheduler):
"""Implements the Noam Learning rate schedule.
This corresponds to increasing the learning rate linearly for the first
``warmup_steps`` training steps, and decreasing it thereafter
proportionally to the inverse square root of the step number, scaled by the
inverse square root of the dimensionality of the model. Time will tell if
this is just madness or it's actually important.
Parameters
----------
warmup_steps: ``int``, required.
The number of steps to linearly increase the learning rate.
"""
def __init__(self, optimizer, warmup_steps):
self.warmup_steps = warmup_steps
super().__init__(optimizer)
def get_lr(self):
last_epoch = max(1, self.last_epoch)
scale = (
self.warmup_steps ** 0.5 *
min(last_epoch ** (-0.5),
last_epoch * self.warmup_steps ** (-1.5)))
return [base_lr * scale for base_lr in self.base_lrs]
```
#### File: jlibovicky/char-nmt-two-step-decoder/seq_to_seq.py
```python
from typing import List, Tuple, Union
import torch
import torch.nn as nn
from encoder import Encoder, VanillaEncoder
from decoder import Decoder, VanillaDecoder
T = torch.Tensor
def compute_attention_entropy(
att_matrix: T, query_mask: T) -> float:
# att matrix is: batch x heads x q_len x k_len
# first entropy of each distribution, non-existing key positions
# must be asked out
prenorm_entropies = -(torch.log(att_matrix) * att_matrix)
prenorm_entropies[prenorm_entropies.isnan()] = 0.0
distr_entropies = prenorm_entropies.sum(3)
# shape: batch x head x q_len
# now average over relevant query positions
batch_head_entropies = (
distr_entropies * query_mask.unsqueeze(1)).sum(2) / query_mask.sum()
return batch_head_entropies.mean(0).mean(0).cpu().numpy()
class Seq2SeqModel(nn.Module):
def __init__(
self, vocab_size: Union[int, Tuple[int, int]],
conv_filters: List[int],
nar_output: bool = False,
char_embedding_dim: int = 128,
dim: int = 512,
shrink_factor: int = 5,
charformer_block_size: int = 5,
highway_layers: int = 2,
char_ff_layers: int = 2,
ff_dim: int = None,
layers: int = 6,
attention_heads: int = 8,
dropout: float = 0.1,
char_process_type: str = "conv",
vanilla_encoder: bool = False,
vanilla_decoder: bool = False,
share_char_repr: bool = False) -> None:
super().__init__()
self.layers = layers
if isinstance(vocab_size, tuple):
src_vocab_size, tgt_vocab_size = vocab_size
else:
src_vocab_size, tgt_vocab_size = vocab_size, vocab_size
if vanilla_encoder:
self.encoder: Union[Encoder, VanillaEncoder] = VanillaEncoder(
char_vocabulary_size=src_vocab_size,
dim=dim,
layers=layers,
ff_dim=ff_dim,
attention_heads=attention_heads,
dropout=dropout)
else:
self.encoder = Encoder(
vocab_size=src_vocab_size,
char_embedding_dim=char_embedding_dim,
conv_filters=conv_filters,
dim=dim,
shrink_factor=shrink_factor,
charformer_block_size=charformer_block_size,
highway_layers=highway_layers,
char_ff_layers=char_ff_layers,
ff_dim=ff_dim, layers=layers,
attention_heads=attention_heads,
dropout=dropout,
decoder_style_padding=share_char_repr,
char_process_type=char_process_type)
if vanilla_decoder:
self.decoder: Union[Decoder, VanillaDecoder] = VanillaDecoder(
char_vocabulary_size=tgt_vocab_size,
dim=dim,
layers=layers,
ff_dim=ff_dim,
attention_heads=attention_heads,
dropout=dropout,
encoder=self.encoder if ( # type: ignore
share_char_repr and vanilla_encoder) else None)
else:
self.decoder = Decoder(
char_vocabulary_size=tgt_vocab_size,
char_embedding_dim=char_embedding_dim,
conv_filters=conv_filters,
nar_output=nar_output,
dim=dim,
shrink_factor=shrink_factor,
highway_layers=highway_layers,
char_ff_layers=char_ff_layers,
layers=layers,
ff_dim=ff_dim,
attention_heads=attention_heads,
char_process_type=char_process_type,
dropout=dropout,
encoder=self.encoder if # type: ignore
share_char_repr else None)
def forward(
self, src_batch: T, src_mask: T, tgt_batch: T, tgt_mask: T,
loss_function: nn.Module,
log_details: bool = False) -> Tuple[T, T]:
encoded, enc_mask, enc_attention = self.encoder(src_batch, src_mask)
loss, details = self.decoder(
encoded, enc_mask, tgt_batch, tgt_mask, loss_function,
log_details=log_details)
if log_details:
details["enc_attentions"] = enc_attention
details["enc_attention_entropies"] = [
compute_attention_entropy(att, enc_mask)
for att in enc_attention]
shrinked_mask = details["decoder_mask"]
details["dec_attention_entropies"] = [
compute_attention_entropy(att, shrinked_mask)
for att in details["decoder_self_attention"]]
details["encdec_attention_entropies"] = [
compute_attention_entropy(att, shrinked_mask)
for att in details["decoder_self_attention"]]
return loss, details
@torch.no_grad()
def greedy_decode(
self, src_batch: T, input_mask: T,
eos_token_id: int, max_len: int = 400) -> Tuple[T, T]:
encoder_states, encoded_mask, _ = self.encoder(src_batch, input_mask)
decoded, mask = self.decoder.greedy_decode(
encoder_states, encoded_mask, eos_token_id, max_len=max_len)
return decoded, mask
@torch.no_grad()
def sample(
self, src_batch: T, input_mask: T,
n_samples: int,
eos_token_id: int, max_len: int = 400) -> List[Tuple[T, T]]:
encoder_states, encoded_mask, _ = self.encoder(src_batch, input_mask)
return [
self.decoder.greedy_decode(
encoder_states, encoded_mask, eos_token_id,
max_len=max_len,
sample=True)
for _ in range(n_samples)]
@torch.no_grad()
def beam_search(
self, src_batch: T, input_mask: T,
eos_token_id: int,
beam_size: int = 5,
len_norm: float = 0.5,
max_len: int = 400) -> Tuple[T, T]:
encoder_states, encoded_mask, _ = self.encoder(src_batch, input_mask)
decoded, mask = self.decoder.beam_search(
encoder_states, encoded_mask, eos_token_id,
beam_size=beam_size, len_norm=len_norm, max_len=max_len)
return decoded, mask
@property
def char_level_param_count(self) -> int:
"""Number of parameters in character processing layers."""
relevant_parts = []
if hasattr(self.encoder, "embeddings"):
relevant_parts = [self.encoder.embeddings]
if isinstance(self.encoder, Encoder):
relevant_parts.append(self.encoder.char_encoder)
if isinstance(self.decoder, VanillaDecoder):
relevant_parts.append(self.decoder.transformer.embeddings)
else:
relevant_parts.extend([
self.decoder.nar_proj, self.decoder.output_proj])
if not self.decoder.nar_output:
relevant_parts.append(self.decoder.char_decoder_rnn)
if not self.decoder.char_embeddings not in relevant_parts:
relevant_parts.extend([
self.decoder.char_embeddings, self.decoder.char_encoder])
char_parameters = {
p for part in relevant_parts for p in part.parameters()}
return sum(p.numel() for p in char_parameters)
``` |
{
"source": "jlibovicky/deep-learning-stories-analysis",
"score": 3
} |
#### File: data/plain_text_scripts/plaintext_charlesmartin14.py
```python
import sys
import re
def main():
has_started = False
for line in sys.stdin:
line = line.rstrip()
if line == '# Machine Learning':
continue
if re.match(r'^# [A-Za-z]', line):
has_started = True
if (re.match(r'.*Share this.*', line) or
re.match(r'[0-9]+ Comments$', line) or
line == '1 Comment' or
line == 'about these ads' or
line == 'About these ads' or
line == 'Leave a comment'):
exit()
if has_started:
print(line)
if __name__ == "__main__":
main()
``` |
{
"source": "jli/brin-overlap",
"score": 3
} |
#### File: jli/brin-overlap/brin_parser_test.py
```python
from datetime import datetime
from brin_parser import parse_datetime_tuple
def test_parse_datetime_tuple_millis_tz() -> None:
res = parse_datetime_tuple(
"{2016-11-02 05:41:14.537+00 .. 2019-01-20 22:59:06.511+00}"
)
assert res[0] == datetime(2016, 11, 2, 5, 41, 14)
assert res[1] == datetime(2019, 1, 20, 22, 59, 6)
def test_parse_datetime_tuple_millis_no_tz() -> None:
res = parse_datetime_tuple("{2016-11-02 05:41:14.537 .. 2019-01-20 22:59:06.511}")
assert res[0] == datetime(2016, 11, 2, 5, 41, 14)
assert res[1] == datetime(2019, 1, 20, 22, 59, 6)
def test_parse_datetime_tuple_no_millis_no_tz() -> None:
res = parse_datetime_tuple("{2016-11-02 05:41:14 .. 2019-01-20 22:59:06}")
assert res[0] == datetime(2016, 11, 2, 5, 41, 14)
assert res[1] == datetime(2019, 1, 20, 22, 59, 6)
``` |
{
"source": "jlice/leetcode-publisher",
"score": 2
} |
#### File: leetcode-publisher/src/app.py
```python
import glob
import json
import logging
import os
import re
import shutil
import sqlite3
import subprocess
import sys
from collections import defaultdict
from datetime import datetime
import yaml
from jinja2 import Template
from dao import Dao
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from leetcode import UserCN, UserEN
LP_PREFIX = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def console(*args, **kwargs):
sep = kwargs.get('sep') or ' '
logging.info(sep.join(map(str, args)))
args = tuple(str(arg).encode(sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding) for arg in args)
print(*args, **kwargs)
class RepoGen:
def __init__(self, conf):
self.conf = conf
self.user = None
self.all_submissions = []
self.new_ac_submissions = defaultdict(list)
self.new_ac_title_slugs = set()
self.solutions = defaultdict(list)
self.questions = {}
self.notes = {}
self.likes = {}
self.templates = {'solution': ''}
self.summary = None
self.dao = Dao(sqlite3.connect(os.path.join(LP_PREFIX, '_cache', 'leetcode.db')))
self.dao.prepare()
def main(self):
self.logger()
console('{0} leetcode publisher start {0}'.format('=' * 20))
# noinspection PyBroadException
try:
if self.login():
console('> Login successful!')
self.prepare_templates()
self.fetch_notes()
self.prepare_submissions()
self.prepare_solutions()
self.prepare_questions()
# self.prepare_likes()
self.prepare_render()
self.render_readme()
self.render_problems()
self.copy_source()
deploy_ret = self.deploy()
self.after_deploy(deploy_ret)
else:
console('> Login failed!')
except Exception as e:
logging.exception(e)
console('{0} leetcode publisher end {0}'.format('=' * 20))
@staticmethod
def logger():
log_file = os.path.join(LP_PREFIX, '_cache', 'log', '%s.log' % datetime.now().strftime('%Y-%m-%d'))
os.makedirs(os.path.dirname(log_file), exist_ok=True)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file, encoding='utf-8')
fh.setLevel(logging.DEBUG)
fh.setFormatter(logging.Formatter('%(asctime)s %(levelname)-7s | %(message)s'))
root.addHandler(fh)
sh = logging.StreamHandler()
sh.setLevel(logging.WARNING)
root.addHandler(sh)
def login(self):
self.conf['account']['domain'] = self.conf['account'].get('domain', 'en').lower()
domain = self.conf['account']['domain'].lower()
if domain == 'cn':
self.user = UserCN()
elif domain == 'en':
self.user = UserEN()
else:
raise ValueError("Unrecognized domain: '{}'".format(domain))
return self.user.login(self.conf['account']['user'], self.conf['account']['password'])
def prepare_templates(self):
self.get_solution_template()
def get_solution_template(self):
solution_txt = os.path.join(LP_PREFIX, 'templ', 'solution.txt')
if os.path.isfile(solution_txt):
with open(solution_txt, encoding='utf8') as fp:
self.templates['solution'] = fp.read()
def __submissions(self):
for subm in self.dao.get_submissions():
self.all_submissions.append({
'code': subm[0],
'compare_result': subm[1],
'id': subm[2],
'is_pending': subm[3],
'lang': subm[4],
'memory': subm[5],
'runtime': subm[6],
'status_display': subm[7],
'timestamp': subm[8],
'title': subm[9],
'url': subm[10]
})
self.all_submissions.sort(key=lambda sub: sub['timestamp'], reverse=True)
all_submission_ids = [submission['id'] for submission in self.all_submissions]
submission_offset = None
submission_offset_filename = os.path.join(LP_PREFIX, '_cache', 'submission_offset.txt')
if os.path.isfile(submission_offset_filename):
with open(submission_offset_filename, 'r', encoding='utf8') as f:
submission_offset = f.read().strip()
if submission_offset:
submission_offset = int(submission_offset)
has_next = True
stop_flag = False
page = 0
while has_next and not stop_flag:
page += 1
new_submissions = []
print('\r> Get submission record of page %d ' % page, end='', flush=True)
j = self.user.submissions(page)
has_next = j['has_next']
for sd in j['submissions_dump']:
if sd['id'] in all_submission_ids:
cache_pos = all_submission_ids.index(sd['id'])
if cache_pos >= 0:
for idx in range(cache_pos, len(all_submission_ids)):
submission = self.all_submissions[idx]
if submission_offset and submission['id'] <= submission_offset:
break
yield submission
stop_flag = True
break
if submission_offset and sd['id'] <= submission_offset:
stop_flag = True
break
new_submissions.append(sd)
yield sd
new_submissions.sort(key=lambda sub: sub['timestamp'], reverse=True)
self.dao.insert_submissions(new_submissions)
print('\r', end='', flush=True)
console('> Get submission record completed! ')
def prepare_submissions(self):
for sd in self.__submissions():
if sd['status_display'] != 'Accepted':
continue
if sd['title'] in self.new_ac_submissions:
if sd['lang'] in [sub['lang'] for sub in self.new_ac_submissions[sd['title']]]:
continue
self.new_ac_submissions[sd['title']].append(sd)
def get_pin_solutions(self):
pin_solutions = dict()
for slug, note in self.notes.items():
pin_solutions[slug] = list(map(int, re.findall(r'<!--&(\d+)-->', note)))
return pin_solutions
def prepare_solutions(self):
self.summary = self.summary or self.user.summary()
title_slug_map = dict()
for stat in self.summary['stat_status_pairs']:
title_slug_map[stat['stat']['question__title']] = stat['stat']['question__title_slug']
solu_file = os.path.join(LP_PREFIX, '_cache', 'solutions.json')
if os.path.exists(solu_file):
with open(solu_file, 'r', encoding='utf-8') as f:
self.solutions = json.load(f)
pin_solutions = self.get_pin_solutions()
submissions = defaultdict(list)
for submission in self.all_submissions:
if submission['title'] in title_slug_map:
submissions[title_slug_map[submission['title']]].append(submission)
console('> Get solutions')
counter_init, counter = 0, 0
for title, sublist in self.new_ac_submissions.items():
for sub in sublist[::-1]:
solu = None
timestamp = None
if title_slug_map.get(title):
if title_slug_map[title] in self.solutions:
for solution in self.solutions[title_slug_map[title]]:
if solution['submission_id'] == sub['id']:
solu = solution
solu['id'] = solu['submission_id']
break
if sub['title'] in title_slug_map and title_slug_map[sub['title']] in submissions:
for solution in submissions[title_slug_map[title]]:
if solution['id'] == sub['id']:
timestamp = solution['timestamp']
if '.beats' not in self.templates['solution'] and (
"'beats'" not in self.templates['solution']):
solu = solution
solu['submission_id'] = solu['id']
solu['title_slug'] = title_slug_map[title]
solu['language'] = solu['lang']
break
if solu is None:
solu = self.user.solution(sub['id'])
solu['id'] = solu['submission_id']
solu['lang'] = solu['language']
counter += 1
solu['timestamp'] = timestamp
console(title)
slug = solu['title_slug']
self.new_ac_title_slugs.add(slug)
if slug not in self.solutions:
self.solutions[slug] = [solu]
else:
for i in range(len(self.solutions[slug]) - 1, -1, -1):
if self.solutions[slug][i]['language'] == solu['language']:
if solu['submission_id'] not in pin_solutions.get(slug, []):
self.solutions[slug].pop(i)
if solu['id'] not in [subm.get('id', subm.get('submission_id')) for subm in self.solutions[slug]]:
self.solutions[slug].insert(0, solu)
if counter - counter_init > 50:
with open(solu_file, 'w', encoding='utf-8') as f:
json.dump(self.solutions, f)
counter_init = counter
# fetch remain pin solutions
for slug, solution_ids in pin_solutions.items():
for solution_id in solution_ids:
if solution_id not in self.solutions.get(slug, {}):
if solution_id not in [subm.get('id', subm.get('submission_id')) for subm in self.solutions[slug]]:
solution = self.user.solution(solution_id)
console(solution['title'])
self.solutions[slug].append(solution)
with open(solu_file, 'w', encoding='utf-8') as f:
json.dump(self.solutions, f)
def prepare_questions(self):
for que in self.dao.get_questions():
self.questions[que[10]] = {
'content': que[0],
'difficulty': que[1],
'dislikes': que[2],
'likes': que[3],
'questionFrontendId': que[4],
'questionId': que[5],
'similarQuestions': que[6],
'stats': que[7],
'status': que[8],
'title': que[9],
'titleSlug': que[10],
'topicTags': eval(que[11]),
'translatedContent': que[12],
'translatedTitle': que[13],
}
cn_user = UserCN() # Chinese version comes with translation
en_user = UserEN()
console('> Fix questionFrontendId')
for slug, question in self.questions.items():
try:
front_id = int(question['questionFrontendId'])
except ValueError:
pass
else:
if front_id > 5000:
console(slug)
self.questions[slug]['questionFrontendId'] = en_user.question(slug)['questionFrontendId']
console('> Get questions')
question_buffer = []
for slug in self.solutions:
if slug not in self.questions:
console(slug)
# if there is no the question in LeetCode China, try to search it in LeetCode main site instead
question_buffer.append(cn_user.question(slug) or en_user.question(slug))
self.questions[slug] = question_buffer[-1]
if len(question_buffer) == 100:
self.dao.insert_questions(question_buffer)
question_buffer = []
self.dao.insert_questions(question_buffer)
def fetch_notes(self):
console('> Get notes')
notes = self.user.notes()
self.notes = {
obj['question']['titleSlug']: obj['content'] for obj in notes
}
def prepare_notes(self):
"""Deprecated. Because of `fetch_notes`"""
note_file = os.path.join(LP_PREFIX, '_cache', 'notes.json')
if os.path.exists(note_file):
with open(note_file, 'r', encoding='utf-8') as f:
self.notes = json.load(f)
console('> Get notes')
for slug in self.solutions:
if slug not in self.notes or slug in self.new_ac_title_slugs:
console(slug)
self.notes[slug] = self.user.note(self.questions[slug]['questionId'])
with open(note_file, 'w', encoding='utf-8') as f:
json.dump(self.notes, f)
def prepare_likes(self):
like_file = os.path.join(LP_PREFIX, '_cache', 'likes.json')
if os.path.exists(like_file):
with open(like_file, 'r', encoding='utf-8') as f:
self.likes = json.load(f)
console('> Get likes')
for slug in self.solutions:
if slug not in self.likes or slug in self.new_ac_title_slugs:
console(slug)
self.likes[slug] = self.user.likes(slug)
with open(like_file, 'w', encoding='utf-8') as f:
json.dump(self.likes, f)
@staticmethod
def prepare_render():
# delete folder "repo"
shutil.rmtree(os.path.join(LP_PREFIX, 'repo'), ignore_errors=True)
os.makedirs(os.path.join(LP_PREFIX, 'repo'))
os.makedirs(os.path.join(LP_PREFIX, 'repo', 'problems'))
def render_readme(self):
self.summary = self.summary or self.user.summary()
console('> Render README.md')
# This determines how to sort the problems
ques_sort = sorted(
[(ques['questionFrontendId'], ques['titleSlug']) for ques in self.questions.values()],
key=lambda x: -int(x[0]))
# You can customize the template
tmpl = Template(open(os.path.join(LP_PREFIX, 'templ', 'README.md.txt'), encoding='utf-8').read())
readme = tmpl.render(questions=[self.questions[slug] for _, slug in ques_sort], likes=self.likes,
date=datetime.now(), summary=self.summary, conf=self.conf)
with open(os.path.join(LP_PREFIX, 'repo', 'README.md'), 'w', encoding='utf-8') as f:
f.write(readme)
def render_problems(self):
console('> Render problems')
# You can customize the template
tmpl = Template(
open(os.path.join(LP_PREFIX, 'templ', 'question.md.txt'), encoding='utf-8').read())
pin_solutions = self.get_pin_solutions()
# template for single solution
solution_templ = Template(self.templates['solution'])
for slug in self.solutions:
question = self.questions[slug]
note = self.notes.get(slug, "")
answer = note.replace('\n', '\n\n')
solutions = self.solutions[slug]
pins = pin_solutions.get(slug, [])
for solution in solutions:
submission_id = solution['submission_id']
if submission_id in pins:
answer = answer.replace('<!--&%s-->' % submission_id, solution_templ.render(solution=solution))
else:
answer += '\n\n%s\n' % solution_templ.render(solution=solution)
content = tmpl.render(question=question, note=note, solutions=solutions,
date=datetime.now(), conf=self.conf, answer=answer)
if sys.platform != 'win32':
content = content.replace('\r\n', '\n')
filename = '%s-%s.md' % (question['questionFrontendId'], slug)
with open(os.path.join(LP_PREFIX, 'repo', 'problems', filename), 'w', encoding='utf-8') as f:
f.write(content)
@staticmethod
def copy_source():
console('> Copy resources')
repo = os.path.join(LP_PREFIX, 'repo')
for src in glob.glob(os.path.join(LP_PREFIX, '_source', '*')):
console(os.path.relpath(src, LP_PREFIX))
dst = os.path.join(repo, os.path.basename(src))
if os.path.isdir(src):
if not os.path.isdir(dst):
shutil.copytree(src, dst)
else:
console("Directory '%s' already exist." % os.path.relpath(dst, LP_PREFIX))
else:
shutil.copy(src, repo)
def deploy(self):
if self.conf.get('repo'):
console('> Deploy to git repository')
repo = os.path.join(LP_PREFIX, 'repo')
cmds = []
os.chdir(repo)
shutil.rmtree(os.path.join(repo, '.git'), ignore_errors=True)
cmds.append('git init')
cmds.append('git add .')
cmds.append('git commit -m "Auto Deployment"')
for remote in self.conf['repo']:
cmds.append('git push -f -q %s master:master' % remote)
for cmd in cmds:
console(cmd)
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError:
console("Get error when run '%s'" % cmd)
return False
return True
def after_deploy(self, deploy_ret):
if deploy_ret:
submission_offset_filename = os.path.join(LP_PREFIX, '_cache', 'submission_offset.txt')
with open(submission_offset_filename, 'w', encoding='utf8') as f:
f.write('%s\n' % max([submission['id'] for submission in self.all_submissions]))
self.dao.close()
def _main():
conf_file = os.path.join(LP_PREFIX, 'config.yml')
if os.path.isfile(conf_file):
for ec in ('utf-8', 'gb18030', 'gb2312', 'gbk'):
try:
with open(conf_file, encoding=ec) as fp:
try:
# noinspection PyUnresolvedReferences
from yaml import FullLoader
conf = yaml.load(fp, Loader=FullLoader)
except ImportError:
conf = yaml.load(fp)
break
except UnicodeDecodeError:
continue
except yaml.YAMLError:
print('File does not conform to the YAML format specification:%s' % conf_file)
rg = RepoGen(conf)
rg.main()
else:
print('File does not exist: %s' % conf_file)
if __name__ == '__main__':
_main()
```
#### File: leetcode-publisher/src/dao.py
```python
class Dao:
def __init__(self, conn):
self.conn = conn
self.cur = conn.cursor()
def prepare(self):
self.cur.execute('''
CREATE TABLE IF NOT EXISTS submission (
code TEXT,
compare_result TEXT,
id INTEGER PRIMARY KEY,
is_pending TEXT,
lang TEXT,
memory TEXT,
runtime TEXT,
status_display TEXT,
timestamp INTEGER,
title TEXT,
url TEXT
)''')
self.cur.execute('''
CREATE TABLE IF NOT EXISTS question (
content TEXT,
difficulty TEXT,
dislikes INTEGER,
likes INTEGER,
questionFrontendId TEXT,
questionId TEXT PRIMARY KEY,
similarQuestions TEXT,
`stats` TEXT,
status TEXT,
title TEXT,
titleSlug TEXT,
topicTags TEXT,
translatedContent TEXT,
translatedTitle TEXT
)''')
def close(self):
self.cur.close()
self.conn.close()
def insert_submissions(self, submissions):
data = []
for submission in submissions:
data.append((
submission['code'],
submission['compare_result'],
submission['id'],
submission['is_pending'],
submission['lang'],
submission['memory'],
submission['runtime'],
submission['status_display'],
submission['timestamp'],
submission['title'],
submission['url']
))
self.cur.executemany('''
INSERT INTO submission VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', data)
self.conn.commit()
def insert_questions(self, questions):
data = []
for question in questions:
data.append((
question['content'],
question['difficulty'],
question['dislikes'],
question['likes'],
question['questionFrontendId'],
question['questionId'],
question['similarQuestions'],
question['stats'],
question['status'],
question['title'],
question['titleSlug'],
str([tag['name'] for tag in question['topicTags']]),
question['translatedContent'],
question['translatedTitle']
))
self.cur.executemany('''
INSERT INTO question VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', data)
self.conn.commit()
def get_submissions(self):
self.cur.execute('''SELECT * FROM submission''')
return self.cur.fetchall()
def get_questions(self):
self.cur.execute('''SELECT * FROM question''')
return self.cur.fetchall()
``` |
{
"source": "jlichter/pyClarion",
"score": 2
} |
#### File: pyClarion/components/propagators.py
```python
__all__ = [
"MaxNodes", "Repeater", "Lag", "ThresholdSelector", "ActionSelector",
"BoltzmannSelector", "Constants", "Stimulus"
]
from ..base import ConstructType, Symbol, Process, chunk, feature, lag
from .. import numdicts as nd
from .utils import group_by_dims
from typing import (
Tuple, Mapping, Set, NamedTuple, FrozenSet, Optional, Union, Dict,
Sequence, Container
)
from typing import Iterable, Any
from itertools import chain
from copy import copy
########################
### Node Propagators ###
########################
class MaxNodes(Process):
"""Computes the maximum recommended activation for each node in a pool."""
_serves = ConstructType.nodes
_ctype_map = {
ConstructType.features: ConstructType.feature,
ConstructType.chunks: ConstructType.chunk
}
def __init__(self, sources: Sequence[Symbol]):
super().__init__(expected=sources)
self.accept = ConstructType.node
def entrust(self, path):
super().entrust(path)
self.accept = self._ctype_map[self.client[-1].ctype]
def call(self, inputs):
data = self.extract_inputs(inputs)
d = nd.ew_max(*data)
d = nd.keep(d, func=lambda f: f.ctype in self.accept)
d = nd.squeeze(d)
return d
########################
### Flow Propagators ###
########################
class Repeater(Process):
"""Copies the output of a single source construct."""
_serves = (
ConstructType.flow_in | ConstructType.flow_h | ConstructType.buffer
)
def __init__(self, source: Symbol) -> None:
super().__init__(expected=(source,))
def call(self, inputs):
d, = self.extract_inputs(inputs)
return d
class Lag(Process):
"""Lags strengths for given set of features."""
_serves = ConstructType.flow_in | ConstructType.flow_bb
def __init__(self, source: Symbol, max_lag=1):
"""
Initialize a new `Lag` propagator.
:param source: Pool of features from which to computed lagged strengths.
:param max_lag: Do not compute lags beyond this value.
"""
super().__init__(expected=(source,))
self.max_lag = max_lag
def call(self, inputs):
d, = self.extract_inputs(inputs)
d = nd.transform_keys(d, func=lag, val=1)
d = nd.keep(d, func=self._filter)
return d
def _filter(self, f):
return f.ctype in ConstructType.feature and f.lag <= self.max_lag
############################
### Terminus Propagators ###
############################
class ThresholdSelector(Process):
"""
Propagator for extracting nodes above a thershold.
Targets feature nodes by default.
"""
_serves = ConstructType.terminus
def __init__(self, source: Symbol, threshold: float = 0.85):
super().__init__(expected=(source,))
self.threshold = threshold
def call(self, inputs):
d, = self.extract_inputs(inputs)
d = nd.threshold(d, th=self.threshold, keep_default=True)
return d
class BoltzmannSelector(Process):
"""Selects a chunk according to a Boltzmann distribution."""
_serves = ConstructType.terminus
def __init__(self, source, temperature=0.01, threshold=0.25):
"""
Initialize a ``BoltzmannSelector`` instance.
:param temperature: Temperature of the Boltzmann distribution.
"""
super().__init__(expected=(source,))
self.temperature = temperature
self.threshold = threshold
def call(self, inputs):
"""
Select chunks through an activation-based competition.
Selection probabilities vary with chunk strengths according to a
Boltzmann distribution.
"""
strengths, = self.extract_inputs(inputs)
thresholded = nd.threshold(strengths, th=self.threshold)
probabilities = nd.boltzmann(thresholded, self.temperature)
d = nd.draw(probabilities, n=1)
d = nd.with_default(d, default=0)
return d
class ActionSelector(Process):
"""
Selects actions and paramaters according to Boltzmann distributions.
Action and parameter features are selected from a given client interface.
For parameter features, if a parameter feature is found to be of a
singleton dimension (i.e., a dimension with only one value), it is treated
like a continuous parameter and its strength is included in the output. If
the parameter dimension has multiple values, one among them is
stochasticallly selected through a boltzmann distribution, as with action
commands.
"""
_serves = ConstructType.terminus
def __init__(self, source, interface, temperature):
"""
Initialize an ``ActionSelector`` instance.
:param dims: Registered action dimensions.
:param temperature: Temperature of the Boltzmann distribution.
"""
if source.ctype not in ConstructType.features:
raise ValueError("Expected source to be of ctype 'features'.")
super().__init__(expected=(source,))
self.interface = interface
self.temperature = temperature
def call(self, inputs):
"""
Select actionable chunks for execution.
Selection probabilities vary with feature strengths according to a
Boltzmann distribution. Probabilities for each target dimension are
computed separately.
"""
strengths, = self.extract_inputs(inputs)
cmds_by_dims = group_by_dims(self.interface.cmds)
params_by_dims = group_by_dims(self.interface.params)
items_by_dims = chain(cmds_by_dims.items(), params_by_dims.items())
d = nd.MutableNumDict(default=0)
for dim, fs in items_by_dims:
if len(fs) == 1: # output strength of singleton param dim
assert dim in params_by_dims
f, = fs
d[f] = strengths[f]
else: # select value for cmd dim or multivalue param dim
assert 1 < len(fs)
ipt = nd.NumDict({f: strengths[f] for f in fs})
prs = nd.boltzmann(ipt, self.temperature)
selection = nd.draw(prs, n=1)
d.update(selection)
return d
##########################
### Buffer Propagators ###
##########################
class Constants(Process):
"""
Outputs a constant activation pattern.
Useful for setting defaults and testing. Provides methods for updating
constants through external intervention.
"""
_serves = ConstructType.basic_construct
def __init__(self, strengths = None) -> None:
self._check_default(strengths)
super().__init__()
self.strengths = nd.squeeze(strengths) or nd.NumDict(default=0.0)
def call(self, inputs):
"""Return stored strengths."""
return self.strengths
@staticmethod
def _check_default(strengths):
if strengths.default != 0.0:
msg = "Unexpected default '{}', expected '0'."
raise ValueError(msg.format(strengths.default))
class Stimulus(Process):
"""Propagates externally provided stimulus."""
_serves = ConstructType.buffer
def __init__(self):
super().__init__()
self.stimulus = nd.MutableNumDict(default=0.0)
def input(self, data):
self.stimulus.update(data)
self.stimulus.squeeze()
def call(self, inputs):
d = self.stimulus
self.stimulus = nd.MutableNumDict(default=0.0)
return d
```
#### File: pyClarion/components/rules.py
```python
__all__ = ["Rule", "Rules", "AssociativeRules", "ActionRules"]
from ..base.symbols import ConstructType, Symbol, rule, chunk
from ..base.components import Process
from .. import numdicts as nd
from typing import (
Mapping, MutableMapping, TypeVar, Generic, Type, Dict, FrozenSet, Set,
Tuple, overload, cast
)
from types import MappingProxyType
class Rule(object):
"""Represents a rule form."""
__slots__ = ("_conc", "_weights")
def __init__(
self, conc: chunk, *conds: chunk, weights: Dict[chunk, float] = None
):
"""
Initialize a new rule.
If conditions contains items that do not appear in weights, weights is
extended to map each of these items to a weight of 1. If weights is
None, every cond is assigned a weight of 1.
If the weights sum to more than 1.0, they are renormalized such that
each weight w is mapped to w / sum(weights.values()).
:param conclusion: A chunk symbol for the rule conclusion.
:param conditions: A sequence of chunk symbols representing rule
conditions.
:param weights: An optional mapping from condition chunk symbols
to condition weights.
"""
# preconditions
if weights is not None:
if not set(conds).issuperset(weights):
ValueError("Keys of arg `weights` do not match conds.")
if not all(0 < v for v in weights.values()):
ValueError("Weights must be strictly positive.")
ws = nd.MutableNumDict(weights)
ws.extend(conds, value=1.0)
w_sum = nd.val_sum(ws)
if w_sum > 1.0:
ws /= w_sum
self._conc = conc
self._weights = nd.freeze(ws)
# postconditions
assert set(self._weights) == set(conds), "Each cond must have a weight."
assert nd.val_sum(ws) <= 1, "Inferred weights must sum to one or less."
def __repr__(self) -> str:
return "Rule(conc={}, weights={})".format(self.conc, self.weights)
def __eq__(self, other) -> bool:
if isinstance(other, Rule):
b = (
self.conc == other.conc and
nd.isclose(self.weights, other.weights)
)
return b
else:
return NotImplemented
@property
def conc(self) -> chunk:
"""Conclusion of rule."""
return self._conc
@property
def weights(self) -> nd.NumDict:
"""Conditions and condition weights of rule."""
return self._weights
def strength(self, strengths: nd.NumDict) -> float:
"""
Compute rule strength given condition strengths.
The rule strength is computed as the weighted sum of the condition
strengths in strengths.
Implementation based on p. 60 and p. 73 of Anatomy of the Mind.
"""
weighted = nd.keep(strengths, keys=self.weights) * self.weights
return nd.val_sum(weighted)
Rt = TypeVar("Rt", bound="Rule")
class Rules(MutableMapping[rule, Rt], Generic[Rt]):
"""A simple rule database."""
@overload
def __init__(self: "Rules[Rule]") -> None:
...
@overload
def __init__(self: "Rules[Rule]", *, max_conds: int) -> None:
...
@overload
def __init__(self, *, rule_type: Type[Rt]) -> None:
...
@overload
def __init__(self, *, max_conds: int, rule_type: Type[Rt]) -> None:
...
@overload
def __init__(
self, data: Mapping[rule, Rt], max_conds: int, rule_type: Type[Rt]
) -> None:
...
def __init__(
self,
data: Mapping[rule, Rt] = None,
max_conds: int = None,
rule_type: Type[Rt] = None
) -> None:
if data is None:
data = dict()
else:
data = dict(data)
self._data = data
self.max_conds = max_conds
self.Rule = rule_type if rule_type is not None else Rule
self._add_promises: MutableMapping[rule, Rt] = dict()
self._del_promises: Set[rule] = set()
def __repr__(self):
repr_ = "{}({})".format(type(self).__name__, repr(self._data))
return repr_
def __len__(self):
return len(self._data)
def __iter__(self):
yield from iter(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, val):
self._validate_rule_form(val)
if isinstance(val, self.Rule):
self._data[key] = val
else:
msg = "This rule database expects rules of type '{}'."
TypeError(msg.format(type(self.Rule.__name__)))
def __delitem__(self, key):
del self._data[key]
@property
def add_promises(self):
"""A view of promised additions."""
return MappingProxyType(self._add_promises)
@property
def del_promises(self):
"""A view of promised deletions."""
return frozenset(self._del_promises)
def define(
self,
r: rule,
conc: chunk,
*conds: chunk,
weights: Dict[chunk, float] = None
) -> rule:
"""
Add a new rule.
Returns the rule symbol.
"""
self[r] = self.Rule(conc, *conds, weights=weights)
return r
def contains_form(self, form):
"""
Check if the rule set contains a given rule form.
See Rule for details on rule forms.
"""
return any(form == entry for entry in self.values())
def request_add(self, r, form):
"""
Inform self of a new rule to be applied at a later time.
Adds the new rule on call to self.step().
If r is already member of self, will overwrite the existing rule. Does
not check for duplicate forms.
If an update is already registered for rule r, will throw an error.
Does not validate the rule form before registering the request.
Validation occurs at update time.
"""
if r in self._add_promises or r in self._del_promises:
msg = "Rule {} already registered for a promised update."
raise ValueError(msg.format(r))
else:
self._add_promises[r] = form
def request_del(self, r: rule) -> None:
"""
Inform self of an existing rule to be removed at update time.
The rule will be removed on call to step().
If r is not already a member of self, will raise an error.
"""
if r in self._add_promises or r in self._del_promises:
msg = "Rule {} already registered for a promised update."
raise ValueError(msg.format(r))
elif r not in self:
raise ValueError("Cannot delete non-existent rule.")
else:
self._del_promises.add(r)
def step(self):
"""Apply any promised updates."""
for r in self._del_promises:
del self[r]
self._del_promises.clear()
self.update(self._add_promises)
self._add_promises.clear()
def _validate_rule_form(self, form):
if self.max_conds is not None and len(form.weights) > self.max_conds:
msg = "Received rule with {} conditions; maximum allowed is {}."
raise ValueError(msg.format(len(form.weights), self.max_conds))
class RuleDBUpdater(Process):
"""Applies requested updates to a client Rules instance."""
_serves = ConstructType.updater
def __init__(self, rules: "Rules") -> None:
"""Initialize a Rules.Updater instance."""
super().__init__()
self.rules = rules
def __call__(
self, inputs: Mapping[Tuple[Symbol, ...], nd.NumDict]
) -> nd.NumDict:
"""Resolve all outstanding rule database update requests."""
self.rules.step()
return super().call(inputs)
class AssociativeRules(Process):
"""
Propagates activations among chunks through associative rules.
The strength of the conclusion is calculated as a weighted sum of condition
strengths. In cases where there are multiple rules with the same conclusion,
the maximum is taken.
Implementation based on p. 73-74 of Anatomy of the Mind.
"""
_serves = ConstructType.flow_tt
def __init__(self, source: Symbol, rules: Rules) -> None:
super().__init__(expected=(source,))
self.rules = rules
def call(
self, inputs: Mapping[Tuple[Symbol, ...], nd.NumDict]
) -> nd.NumDict:
strengths, = self.extract_inputs(inputs)
d = nd.MutableNumDict(default=0.0)
for r, form in self.rules.items():
s_r = form.strength(strengths)
d[form.conc] = max(d[form.conc], s_r)
d[r] = s_r
d.squeeze()
assert d.default == 0, "Unexpected output default."
return d
class ActionRules(Process):
"""
Propagates activations from condition to action chunks using action rules.
Action rules compete to be selected based on their rule strengths, which is
equal to the product of an action rule's weight and the strength of its
condition chunk. The rule strength of the selected action is then
propagated to its conclusion.
"""
_serves = ConstructType.flow_tt
def __init__(
self, source: Symbol, rules: Rules, temperature: float = .01
) -> None:
if rules.max_conds is None or rules.max_conds > 1:
msg = "Rule database must not accept multiple condition rules."
raise ValueError(msg)
super().__init__(expected=(source,))
self.rules = rules
self.temperature = temperature
def call(
self, inputs: Mapping[Tuple[Symbol, ...], nd.NumDict]
) -> nd.NumDict:
strengths, = self.extract_inputs(inputs)
d = nd.MutableNumDict(default=0)
for r, form in self.rules.items():
d[r] = form.strength(strengths)
probabilities = nd.boltzmann(d, self.temperature)
selection = nd.draw(probabilities, n=1)
d *= selection
d.squeeze()
d.max(nd.transform_keys(d, func=lambda r: self.rules[r].conc))
# postcondition
assert d.default == 0, "Unexpected output default."
return d
```
#### File: pyClarion/components/utils.py
```python
__all__ = [
"group_by", "group_by_ctype", "group_by_dims", "group_by_tags",
"group_by_vals", "group_by_lags"
]
from ..base import ConstructType, Symbol, feature
from typing import Tuple, Dict, Hashable, Iterable, Callable, TypeVar
T = TypeVar("T")
K = TypeVar("K")
#########################
### GROUPING FUNCTIONS ##
#########################
def group_by(
iterable: Iterable[T], key: Callable[[T], K]
) -> Dict[K, Tuple[T, ...]]:
"""Return a dict grouping items in iterable by values of the key func."""
groups: dict = {}
for item in iterable:
k = key(item)
groups.setdefault(k, []).append(item)
return {k: tuple(v) for k, v in groups.items()}
def group_by_ctype(
symbols: Iterable[Symbol]
) -> Dict[ConstructType, Tuple[Symbol, ...]]:
"""
Construct a dict grouping symbols by their construct types.
Returns a dict where each construct type is mapped to a tuple of symbols of
that type. Does not check for duplicates.
"""
# Ignore type of key due to mypy false alarm. - Can
key = Symbol.ctype.fget # type: ignore
return group_by(iterable=symbols, key=key)
def group_by_dims(
features: Iterable[feature]
) -> Dict[Tuple[Hashable, int], Tuple[feature, ...]]:
"""
Construct a dict grouping features by their dimensions.
Returns a dict where each dim is mapped to a tuple of features of that dim.
Does not check for duplicate features.
:param features: An iterable of features to be grouped by dimension.
"""
# Ignore type of key due to mypy false alarm. - Can
key = feature.dim.fget # type: ignore
return group_by(iterable=features, key=key)
def group_by_tags(
features: Iterable[feature]
) -> Dict[Hashable, Tuple[feature, ...]]:
"""
Construct a dict grouping features by their dimensions.
Returns a dict where each dim is mapped to a tuple of features of that dim.
Does not check for duplicate features.
:param features: An iterable of features to be grouped by dimension.
"""
# Ignore type of key due to mypy false alarm. - Can
key = feature.tag.fget # type: ignore
return group_by(iterable=features, key=key)
def group_by_vals(
features: Iterable[feature]
) -> Dict[Hashable, Tuple[feature, ...]]:
"""
Construct a dict grouping features by their values.
Returns a dict where each value is mapped to a tuple of features that have
that value. Does not check for duplicate features.
:param features: An iterable of features to be grouped by value.
"""
# Ignore type of key due to mypy false alarm. - Can
key = feature.val.fget # type: ignore
return group_by(iterable=features, key=key)
def group_by_lags(
features: Iterable[feature]
) -> Dict[Hashable, Tuple[feature, ...]]:
"""
Construct a dict grouping features by their lags.
Returns a dict where each lag value is mapped to a tuple of features of
that lag value. Does not check for duplicate features.
:param features: An iterable of features to be grouped by lag.
"""
# Ignore type of key due to mypy false alarm. - Can
key = feature.lag.fget # type: ignore
return group_by(iterable=features, key=key)
```
#### File: tests/test_base/test_components.py
```python
import pyClarion.base as clb
import pyClarion.numdicts as nd
import unittest
import unittest.mock as mock
class TestProcess(unittest.TestCase):
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_accepts_good_input_structure(self):
process = clb.Process(
expected=[clb.buffer("wm"), clb.terminus("selection")]
)
inputs = {
clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_rejects_incomplete_input(self):
process = clb.Process(
expected=[clb.chunks("in"), clb.terminus("selection")]
)
with self.assertRaises(RuntimeError):
inputs = {
# clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
class TestWrappedProcess(unittest.TestCase):
pass
``` |
{
"source": "jlidtke/remctl",
"score": 2
} |
#### File: remctl/python/remctl.py
```python
from typing import Iterable, Optional, Text, Tuple, Union
import _remctl
VERSION = "3.17"
RemctlOutput = Tuple[
str, Optional[bytes], Optional[int], Optional[int], Optional[int]
]
# Exception classes.
class RemctlError(Exception):
"""The underlying remctl library has returned an error."""
pass
class RemctlProtocolError(RemctlError):
"""A remctl protocol error occurred.
This exception is only used with the remctl.remctl() simple interface;
for the full interface, errors are returned as a regular output token.
"""
pass
class RemctlNotOpenedError(RemctlError):
"""No open connection to a server."""
pass
# Simple interface.
class RemctlSimpleResult:
"""An object holding the results from the simple interface."""
def __init__(self):
# type: () -> None
self.stdout = None # type: Optional[bytes]
self.stderr = None # type: Optional[bytes]
self.status = None # type: Optional[int]
def remctl(
host, # type: str
port=None, # type: Optional[Union[int, str]]
principal=None, # type: Optional[str]
command=[], # type: Iterable[Union[Text, bytes]]
):
# type: (...) -> RemctlSimpleResult
"""Simple interface to remctl.
Connect to HOST on PORT, using PRINCIPAL as the server principal for
authentication, and issue COMMAND. Returns the result as a
RemctlSimpleResult object, which has three attributes. stdout holds the
complete standard output, stderr holds the complete standard error, and
status holds the exit status.
"""
if port is None:
port = 0
else:
try:
port = int(port)
except ValueError:
raise TypeError("port must be a number: " + repr(port))
if (port < 0) or (port > 65535):
raise ValueError("invalid port number: " + repr(port))
if isinstance(command, (bytes, str, bool, int, float)):
raise TypeError("command must be a sequence or iterator")
# Convert the command to a list of bytes.
mycommand = [i if isinstance(i, bytes) else i.encode() for i in command]
if len(mycommand) < 1:
raise ValueError("command must not be empty")
# At this point, things should be sane. Call the low-level interface.
output = _remctl.remctl(host, port, principal, mycommand)
if output[0] is not None:
raise RemctlProtocolError(output[0])
result = RemctlSimpleResult()
setattr(result, "stdout", output[1])
setattr(result, "stderr", output[2])
setattr(result, "status", output[3])
return result
# Complex interface.
class Remctl:
def __init__(
self,
host=None, # type: Optional[str]
port=None, # type: Optional[Union[int, str]]
principal=None, # type: Optional[str]
):
# type: (...) -> None
self.r = _remctl.remctl_new()
self.opened = False
if host:
self.open(host, port, principal)
def set_ccache(self, ccache):
# type: (str) -> None
if not _remctl.remctl_set_ccache(self.r, ccache):
raise RemctlError(self.error())
def set_source_ip(self, source):
# type: (str) -> None
if not _remctl.remctl_set_source_ip(self.r, source):
raise RemctlError(self.error())
def set_timeout(self, timeout):
# type: (int) -> None
if not _remctl.remctl_set_timeout(self.r, timeout):
raise RemctlError(self.error())
def open(self, host, port=None, principal=None):
# type: (str, Optional[Union[int, str]], Optional[str]) -> None
if port is None:
port = 0
else:
try:
port = int(port)
except ValueError:
raise TypeError("port must be a number: " + repr(port))
if (port < 0) or (port > 65535):
raise ValueError("invalid port number: " + repr(port))
# At this point, things should be sane. Call the low-level interface.
if not _remctl.remctl_open(self.r, host, port, principal):
raise RemctlError(self.error())
self.opened = True
def command(self, comm):
# type: (Iterable[Union[Text, bytes]]) -> None
if not self.opened:
raise RemctlNotOpenedError("no currently open connection")
if isinstance(comm, (bytes, str, bool, int, float)):
raise TypeError("command must be a sequence or iterator")
# Convert the command to a list of strings.
commlist = [i if isinstance(i, bytes) else i.encode() for i in comm]
if len(commlist) < 1:
raise ValueError("command must not be empty")
# At this point, things should be sane. Call the low-level interface.
if not _remctl.remctl_commandv(self.r, commlist):
raise RemctlError(self.error())
def output(self):
# type: () -> RemctlOutput
if not self.opened:
raise RemctlNotOpenedError("no currently open connection")
output = _remctl.remctl_output(self.r)
if len(output) == 0:
raise RemctlError(self.error())
return output
def noop(self):
# type: () -> None
if not self.opened:
raise RemctlNotOpenedError("no currently open connection")
if not _remctl.remctl_noop(self.r):
raise RemctlError(self.error())
def close(self):
# type: () -> None
del self.r
self.r = None
self.opened = False
def error(self):
# type: () -> str
if not self.r:
# We do this instead of throwing an exception so that callers
# don't have to handle an exception when they are trying to find
# out why an exception occured.
return "no currently open connection"
return _remctl.remctl_error(self.r)
``` |
{
"source": "jlieberherr/learning-pt-routing",
"score": 2
} |
#### File: tests/a_default/ba_gtfs_parser_test.py
```python
from datetime import date
from zipfile import ZipFile
import pytest
from scripts.classes import Stop, Footpath, TripType
from scripts.gtfs_parser import (get_service_available_at_date_per_service_id,
get_trip_available_at_date_per_trip_id,
parse_gtfs,
create_beeline_footpaths)
from scripts.connectionscan_router import make_transitive
from scripts.helpers.funs import hhmmss_to_sec
PATH_GTFS_TEST_SAMPLE = "tests/resources/gtfsfp20192018-12-05_small.zip"
def test_gtfs_parser():
cs_data = parse_gtfs(PATH_GTFS_TEST_SAMPLE, date(2019, 1, 18), beeline_distance=200)
# stops
assert 89 + 11 == len(cs_data.stops_per_id)
def check_stop(stop_id, exp_code, exp_name, exp_easting, exp_northing, exp_is_station, exp_parent_station_id):
a_stop = cs_data.stops_per_id[stop_id]
assert stop_id == a_stop.id
assert exp_code == a_stop.code
assert exp_name == a_stop.name
assert exp_easting == a_stop.easting
assert exp_northing == a_stop.northing
assert exp_is_station == a_stop.is_station
assert (exp_parent_station_id if exp_parent_station_id else None) == a_stop.parent_station_id
check_stop("8500218:0:7", "", "Olten", 7.90768978414808, 47.3522319182299, False, "8500218P")
check_stop("8587654", "", "Glattbrugg, Glatthof", 8.56762812456551, 47.434511142518, False, None)
check_stop("8594553", "", "Op<NAME>", 8.57155376235766, 47.4326456250948, False, None)
check_stop("8501008P", "", "Genève", 6.14245533484329, 46.2102053471586, True, None)
# footpaths
assert (168 + (2 * 35) + (89 + 11) + 10) == len(cs_data.footpaths_per_from_to_stop_id)
def check_footpath(from_stop_id, to_stop_id, exp_walking_time):
a_footpath = cs_data.footpaths_per_from_to_stop_id[(from_stop_id, to_stop_id)]
assert from_stop_id == a_footpath.from_stop_id
assert to_stop_id == a_footpath.to_stop_id
assert exp_walking_time == a_footpath.walking_time
check_footpath("8500218:0:8", "8500218:0:7", 300)
check_footpath("8503000:0:34", "8503000:0:14", 420)
check_footpath("8501026:0:3", "8501026:0:1", 120)
check_footpath("8500218:0:7", "8500218P", 0)
check_footpath("8500218P", "8500218:0:7", 0)
check_footpath("8500218P", "8500218P", 0)
check_footpath("8503000:0:34", "8503000:0:34", 0)
# trips
def check_trip(trip_id, exp_nb_connections, exp_first_stop_id, exp_last_stop_id, exp_dep_first_stop,
exp_arr_last_stop, exp_trip_type=None):
trip = cs_data.trips_per_id[trip_id]
assert exp_nb_connections == len(trip.connections)
first_con = trip.connections[0]
last_con = trip.connections[-1]
assert exp_first_stop_id == first_con.from_stop_id
assert exp_last_stop_id == last_con.to_stop_id
assert hhmmss_to_sec(exp_dep_first_stop) == first_con.dep_time
assert hhmmss_to_sec(exp_arr_last_stop) == last_con.arr_time
if exp_trip_type is not None:
assert exp_trip_type == trip.trip_type
check_trip("2.TA.1-85-j19-1.1.H", 16, "8572668", "8572648", "06:01:00", "06:23:00")
check_trip("1.TA.1-85-j19-1.1.H", 16, "8572668", "8572648", "05:31:00", "05:53:00", exp_trip_type=TripType.UNKNOWN)
def check_connection_on_trip(trip_id, connection_index, exp_from_stop_id, exp_to_stop_id, exp_dep_time_hhmmss,
exp_arr_time_hhmmss):
trip = cs_data.trips_per_id[trip_id]
con = trip.connections[connection_index]
assert exp_from_stop_id == con.from_stop_id
assert exp_to_stop_id == con.to_stop_id
assert hhmmss_to_sec(exp_dep_time_hhmmss) == con.dep_time
assert hhmmss_to_sec(exp_arr_time_hhmmss) == con.arr_time
check_connection_on_trip("2.TA.1-85-j19-1.1.H", 0, "8572668", "8502095", "06:01:00", "06:04:00")
check_connection_on_trip("2.TA.1-85-j19-1.1.H", 1, "8502095", "8572666", "06:04:00", "06:05:00")
check_connection_on_trip("2.TA.1-85-j19-1.1.H", 1, "8502095", "8572666", "06:04:00", "06:05:00")
check_connection_on_trip("2.TA.1-85-j19-1.1.H", 15, "8572656", "8572648", "06:18:00", "06:23:00")
with pytest.raises(KeyError):
# noinspection PyStatementEffect
cs_data.trips_per_id["3.TA.90-73-Y-j19-1.2.H"]
with pytest.raises(KeyError):
# noinspection PyStatementEffect
cs_data.trips_per_id["471.TA.26-759-j19-1.5.R"]
def test_get_service_available_at_date_per_service_id_get_trip_available_at_date_per_trip_id():
with ZipFile(PATH_GTFS_TEST_SAMPLE, "r") as zip_file:
# calendar.txt and # calendar_dates.txt
service_available_at_date_per_service_id = \
get_service_available_at_date_per_service_id(zip_file, date(2019, 1, 18))
# 2019-01-18 was a friday
assert 46 == len(service_available_at_date_per_service_id)
assert service_available_at_date_per_service_id["TA+b0001"]
assert not service_available_at_date_per_service_id["TA+b02i1"]
assert not service_available_at_date_per_service_id["TA+b00va"] # removed by calendar_dates.txt
assert service_available_at_date_per_service_id["TA+b02ro"]
assert not service_available_at_date_per_service_id["TA+b03ur"] # removed by calendar_dates.txt
# trips.txt
trip_available_at_date_per_trip_id, route_id_per_trip_id = \
get_trip_available_at_date_per_trip_id(zip_file, service_available_at_date_per_service_id)
assert 2272 == len(trip_available_at_date_per_trip_id)
assert trip_available_at_date_per_trip_id["1.TA.1-85-j19-1.1.H"]
assert trip_available_at_date_per_trip_id["2.TA.1-85-j19-1.1.H"]
assert not trip_available_at_date_per_trip_id["471.TA.26-759-j19-1.5.R"]
assert not trip_available_at_date_per_trip_id["6.TA.6-1-j19-1.6.R"]
assert trip_available_at_date_per_trip_id["18.TA.6-1-j19-1.17.H"]
assert not trip_available_at_date_per_trip_id["41.TA.6-1-j19-1.37.R"]
assert not trip_available_at_date_per_trip_id["3.TA.90-73-Y-j19-1.2.H"]
assert 2272 == len(route_id_per_trip_id)
assert "1-85-j19-1" == route_id_per_trip_id["2.TA.1-85-j19-1.1.H"]
assert "26-759-j19-1" == route_id_per_trip_id["6.TA.26-759-j19-1.1.R"]
assert "6-1-j19-1" == route_id_per_trip_id["41.TA.6-1-j19-1.37.R"]
def test_create_beeline_footpaths():
wattwil = Stop("Wattwil", "", "", 9.0864591001338, 47.2994765484827)
wattwil_bahnhof = Stop("Wattwil, Bahnhof", "", "", 9.08679147678897, 47.2994582722627)
pontresina = Stop("Pontresina", "", "", 9.89607473321206, 46.4906506582138)
pontresina_bahnhof = Stop("Pontresina, Bahnhof", "", "", 9.89608371636491, 46.4910341056312)
pontresina_post = Stop("Pontresina, Post", "", "", 9.90654010627351, 46.4880901497136)
bern = Stop("Bern", "", "", 7.43911954873327, 46.9488249647708)
bern_bahnhof = Stop("<NAME>", "", "", 7.44020651022721, 46.948107473715)
stops = [
wattwil,
wattwil_bahnhof,
pontresina,
pontresina_bahnhof,
pontresina_post,
bern,
bern_bahnhof
]
stops_per_id = {s.id: s for s in stops}
footpaths = {
Footpath(wattwil.id, wattwil_bahnhof.id, 120),
Footpath(wattwil_bahnhof.id, wattwil.id, 120),
}
footpaths_per_from_to_stop_id = {(f.from_stop_id, f.to_stop_id): f for f in footpaths}
create_beeline_footpaths(stops_per_id, footpaths_per_from_to_stop_id, beeline_distance=100, walking_speed=2 / 3.6)
assert 120 == footpaths_per_from_to_stop_id[wattwil.id, wattwil_bahnhof.id].walking_time
assert 120 == footpaths_per_from_to_stop_id[wattwil_bahnhof.id, wattwil.id].walking_time
assert (pontresina.id, pontresina_bahnhof.id) in footpaths_per_from_to_stop_id
assert (pontresina_bahnhof.id, pontresina.id) in footpaths_per_from_to_stop_id
assert (pontresina.id, pontresina_post.id) not in footpaths_per_from_to_stop_id
assert (pontresina_post.id, pontresina.id) not in footpaths_per_from_to_stop_id
assert (bern.id, bern_bahnhof.id) not in footpaths_per_from_to_stop_id
assert (bern_bahnhof.id, bern.id) not in footpaths_per_from_to_stop_id
def test_make_transitive_simple():
footpaths = [
Footpath("s1", "s2", 60),
Footpath("s2", "s3", 70),
]
footpaths_per_from_to_stop_id = {(f.from_stop_id, f.to_stop_id): f for f in footpaths}
make_transitive(footpaths_per_from_to_stop_id)
assert 3 == len(footpaths_per_from_to_stop_id)
footpath_s1_s3 = footpaths_per_from_to_stop_id[("s1", "s3")]
assert "s1" == footpath_s1_s3.from_stop_id
assert "s3" == footpath_s1_s3.to_stop_id
assert 130 == footpath_s1_s3.walking_time
def test_make_transitive_two_iterations():
footpaths = [
Footpath("s1", "s2", 60),
Footpath("s2", "s3", 70),
Footpath("s3", "s4", 70),
]
footpaths_per_from_to_stop_id = {(f.from_stop_id, f.to_stop_id): f for f in footpaths}
make_transitive(footpaths_per_from_to_stop_id)
assert 6 == len(footpaths_per_from_to_stop_id)
assert 130 == footpaths_per_from_to_stop_id[("s1", "s3")].walking_time
assert 200 == footpaths_per_from_to_stop_id[("s1", "s4")].walking_time
assert 140 == footpaths_per_from_to_stop_id[("s2", "s4")].walking_time
def test_make_tranksitive_change_time():
footpaths = [
Footpath("s1", "s2", 60),
Footpath("s2", "s3", 70),
Footpath("s1", "s3", 140),
]
footpaths_per_from_to_stop_id = {(f.from_stop_id, f.to_stop_id): f for f in footpaths}
make_transitive(footpaths_per_from_to_stop_id)
assert 3 == len(footpaths_per_from_to_stop_id)
assert 130 == footpaths_per_from_to_stop_id[("s1", "s3")].walking_time
def test_make_transitive_nothing_to_do():
footpaths = [
Footpath("s1", "s2", 60),
Footpath("s2", "s3", 70),
Footpath("s1", "s3", 90),
]
footpaths_per_from_to_stop_id = {(f.from_stop_id, f.to_stop_id): f for f in footpaths}
make_transitive(footpaths_per_from_to_stop_id)
assert 3 == len(footpaths_per_from_to_stop_id)
```
#### File: tests/a_default/ca_connectionscan_data_test.py
```python
from datetime import date
import pytest
from scripts.classes import Connection, Footpath, Stop, Trip
from scripts.connectionscan_router import ConnectionScanData
from scripts.gtfs_parser import parse_gtfs
from scripts.helpers.my_logging import log_end
from tests.a_default.ba_gtfs_parser_test import PATH_GTFS_TEST_SAMPLE
def test_connectionscan_data_constructor_basic():
stops_per_id = {
"1": Stop("1", "c1", "n1", 0.0, 0.0),
"2": Stop("2", "c2", "n2", 1.0, 1.0),
"2a": Stop("2a", "c2a", "n2a", 1.1, 1.1),
"3": Stop("3", "c3", "n3", 3.0, 3.0),
}
footpaths_per_from_to_stop_id = {
("1", "1"): Footpath("1", "1", 60),
("2", "2"): Footpath("2", "2", 70),
("2a", "2a"): Footpath("2a", "2a", 71),
("3", "3"): Footpath("3", "3", 80),
("2", "2a"): Footpath("2", "2a", 75),
("2a", "2"): Footpath("2a", "2", 75),
}
con_1_1 = Connection("t1", "1", "2", 60, 70)
con_1_2 = Connection("t1", "2", "3", 72, 80)
con_2_1 = Connection("t2", "2", "3", 50, 59)
con_2_2 = Connection("t2", "3", "1", 60, 72)
trips_per_id = {
"t1": Trip("t1", [con_1_1, con_1_2]),
"t2": Trip("t2", [con_2_1, con_2_2])
}
cs_data = ConnectionScanData(stops_per_id, footpaths_per_from_to_stop_id, trips_per_id)
assert 4 == len(cs_data.stops_per_id)
assert 4 == len(cs_data.stops_per_id)
assert 2 == len(cs_data.trips_per_id)
assert [con_2_1, con_1_1, con_2_2, con_1_2] == cs_data.sorted_connections
def test_connectionscan_data_constructor_stop_id_not_consistent():
with pytest.raises(ValueError):
ConnectionScanData({"s1": Stop("s2", "", "", 0.0, 0.0)}, {}, {})
log_end(additional_message="test failed successful")
def test_connectionscan_data_constructor_from_stop_id_in_footpath_not_consistent():
with pytest.raises(ValueError):
ConnectionScanData({"s1": Stop("s1", "", "", 0.0, 0.0), "s2": Stop("s2", "", "", 0.0, 0.0)},
{("s2", "s2"): Footpath("s1", "s1", 60)}, {})
log_end(additional_message="test failed successful")
def test_connectionscan_data_constructor_to_stop_id_in_footpath_not_consistent():
with pytest.raises(ValueError):
ConnectionScanData({"s1": Stop("s1", "", "", 0.0, 0.0), "s2": Stop("s2", "", "", 0.0, 0.0)},
{("s2", "s1"): Footpath("s2", "s2", 60)}, {})
log_end(additional_message="test failed successful")
def test_connectionscan_data_constructor_stops_in_footpath_and_stops_not_consistent():
with pytest.raises(ValueError):
ConnectionScanData({"s1": Stop("s1", "", "", 0.0, 0.0)}, {("s1", "s2"): Footpath("s1", "s2", 60)}, {})
log_end(additional_message="test failed successful")
def test_connectionscan_data_constructor_trip_id_not_consistent():
with pytest.raises(ValueError):
ConnectionScanData({}, {}, {"t1": Trip("t", [])})
log_end(additional_message="test failed successful")
def test_connectionscan_data_constructor_stop_ids_in_trips_not_consistent_with_stops():
with pytest.raises(ValueError):
ConnectionScanData({"s1": Stop("s1", "", "", 0.0, 0.0)}, {},
{"t": Trip("t", [Connection("t", "s1", "s2", 30, 40)])})
log_end(additional_message="test failed successful")
def test_connectionscan_data_constructor_stops_per_name():
cs_data = parse_gtfs(PATH_GTFS_TEST_SAMPLE, date(2019, 1, 18))
assert "8507000P" == cs_data.stops_per_name["Bern"].id
assert "8502886" == cs_data.stops_per_name["Kirchleerau-Moosleerau, Post"].id
```
#### File: tests/a_default/cb_connectionscan_core_test.py
```python
from scripts.classes import Connection, Footpath, Stop, Trip
from scripts.connectionscan_router import ConnectionScanData
from scripts.helpers.funs import seconds_to_hhmmss, hhmmss_to_sec
fribourg = Stop("1", "FR", "Fribourg/Freiburg", 0.0, 0.0)
bern = Stop("2", "BN", "Bern", 0.0, 0.0)
zuerich_hb = Stop("3", "ZUE", "Zürich HB", 0.0, 0.0)
winterthur = Stop("4", "W", "Winterthur", 0.0, 0.0)
st_gallen = Stop("5", "SG", "St. Gallen", 0.0, 0.0)
interlaken_ost = Stop("6", "IO", "Interlaken Ost", 0.0, 0.0)
basel_sbb = Stop("7", "BS", "Basel SBB", 0.0, 0.0)
chur = Stop("8", "CH", "Chur", 0.0, 0.0)
thusis = Stop("9", "TH", "Thusis", 0.0, 0.0)
samedan = Stop("10", "SAM", "Samedan", 0.0, 0.0)
st_moritz = Stop("11", "SM", "St. Moritz", 0.0, 0.0)
bern_duebystrasse = Stop("12", "", "Bern, Dübystrasse", 0.0, 0.0)
koeniz_zentrum = Stop("13", "", "Köniz, Zentrum", 0.0, 0.0)
bern_bahnhof = Stop("14", "", "Bern, Bahnhof", 0.0, 0.0)
ostermundigen_bahnhof = Stop("15", "", "Ostermundigen, Bahnhof", 0.0, 0.0)
samedan_bahnhof = Stop("16", "", "Samedan, Bahnhof", 0.0, 0.0)
samedan_spital = Stop("17", "", "Samedan, Spital", 0.0, 0.0)
def create_test_connectionscan_data():
stops_per_id = {s.id: s for s in [
fribourg,
bern,
zuerich_hb,
winterthur,
st_gallen,
interlaken_ost,
basel_sbb,
chur,
thusis,
samedan,
st_moritz,
bern_duebystrasse,
koeniz_zentrum,
bern_bahnhof,
ostermundigen_bahnhof,
samedan_bahnhof,
samedan_spital,
]}
footpaths_per_from_stop_to_stop_id = {(s.id, s.id): Footpath(s.id, s.id, 2 * 60) for s in stops_per_id.values()}
footpaths_per_from_stop_to_stop_id[(zuerich_hb.id, zuerich_hb.id)] = Footpath(zuerich_hb.id, zuerich_hb.id, 7 * 60)
footpaths_per_from_stop_to_stop_id[(bern.id, bern.id)] = Footpath(bern.id, bern.id, 5 * 60)
footpaths_per_from_stop_to_stop_id[(bern_bahnhof.id, bern.id)] = Footpath(bern_bahnhof.id, bern.id, 5 * 60)
footpaths_per_from_stop_to_stop_id[(bern.id, bern_bahnhof.id)] = Footpath(bern.id, bern_bahnhof.id, 5 * 60)
footpaths_per_from_stop_to_stop_id[(chur.id, chur.id)] = Footpath(chur.id, chur.id, 4 * 60)
footpaths_per_from_stop_to_stop_id[(samedan.id, samedan_bahnhof.id)] = Footpath(samedan.id, samedan_bahnhof.id,
3 * 60)
footpaths_per_from_stop_to_stop_id[(samedan_bahnhof.id, samedan.id)] = Footpath(samedan_bahnhof.id, samedan.id,
3 * 60)
trips = []
trips += get_forth_and_back_trips(
[fribourg, bern, zuerich_hb, winterthur, st_gallen],
[22 * 60, 56 * 60, 26 * 60, 35 * 60],
[6 * 60, 9 * 60, 3 * 60],
hhmmss_to_sec("05:34:00"),
32,
30 * 60
)
trips += get_forth_and_back_trips(
[interlaken_ost, bern, basel_sbb],
[52 * 60, 55 * 60],
[12 * 60],
hhmmss_to_sec("05:00:00"),
16,
60 * 60
)
trips += get_forth_and_back_trips(
[basel_sbb, zuerich_hb, chur],
[53 * 60, 75 * 60],
[11 * 60],
hhmmss_to_sec("05:33:00"),
16,
60 * 60
)
trips += get_forth_and_back_trips(
[chur, thusis, samedan, st_moritz],
[30 * 60, 75 * 60, 12 * 60],
[2 * 60, 6 * 60],
hhmmss_to_sec("05:58:00"),
16,
60 * 60
)
trips += get_forth_and_back_trips(
[koeniz_zentrum, bern_duebystrasse, bern_bahnhof, ostermundigen_bahnhof],
[6 * 60, 7 * 60, 15 * 60],
[0, 0],
hhmmss_to_sec("05:00:00"),
10 * 16,
6 * 60
)
trips += get_forth_and_back_trips(
[samedan_bahnhof, samedan_spital],
[7 * 60],
[],
hhmmss_to_sec("15:00:00"),
1,
24 * 60 * 60
)
return ConnectionScanData(stops_per_id, footpaths_per_from_stop_to_stop_id, {t.id: t for t in trips})
def create_trips(stops, running_times, stop_times, first_departure, nb_trips, headway):
trips = []
for trip_index in range(nb_trips):
dep_first_stop = first_departure + trip_index * headway
trip_id = "{}_{}_{}_{}".format(stops[0].name, stops[-1].name, seconds_to_hhmmss(dep_first_stop), trip_index)
cons = []
arr = None
for stop_index in range(len(stops) - 1):
dep = dep_first_stop if stop_index == 0 else arr + stop_times[stop_index - 1]
arr = dep + running_times[stop_index]
cons += [Connection(trip_id, stops[stop_index].id, stops[stop_index + 1].id, dep, arr)]
trips += [Trip(trip_id, cons)]
return trips
def test_create_trips():
dep_first_trip_first_stop = 5 * 60 * 60 + 42 * 60
trips_fri_sg = create_trips(
[fribourg, bern, zuerich_hb, winterthur, st_gallen],
[14 * 60, 58 * 60, 20 * 60, 38 * 60],
[6 * 60, 5 * 60, 3 * 60],
dep_first_trip_first_stop,
32,
30 * 60)
assert len(trips_fri_sg) == 32
assert "1" == trips_fri_sg[3].connections[0].from_stop_id
assert "2" == trips_fri_sg[3].connections[0].to_stop_id
assert "2" == trips_fri_sg[3].connections[1].from_stop_id
assert "3" == trips_fri_sg[3].connections[1].to_stop_id
assert "4" == trips_fri_sg[3].connections[-1].from_stop_id
assert "5" == trips_fri_sg[3].connections[-1].to_stop_id
assert "08:12:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[0].dep_time)
assert "08:26:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[0].arr_time)
assert "08:32:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[1].dep_time)
assert "09:30:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[1].arr_time)
assert "09:35:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[2].dep_time)
assert "09:55:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[2].arr_time)
assert "09:58:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[3].dep_time)
assert "10:36:00" == seconds_to_hhmmss(trips_fri_sg[5].connections[3].arr_time)
def get_forth_and_back_trips(stops, running_times, stop_times, dep_first_trip, nb_trips, headway):
return create_trips(
stops,
running_times,
stop_times,
dep_first_trip,
nb_trips,
headway) + create_trips(
list(reversed(stops)),
list(reversed(running_times)),
list(reversed(stop_times)),
dep_first_trip,
nb_trips,
headway)
def test_get_forth_and_back_trips():
dep_first_trip_first_stop = 5 * 60 * 60 + 42 * 60
trips = get_forth_and_back_trips(
[fribourg, bern, zuerich_hb, winterthur, st_gallen],
[14 * 60, 58 * 60, 20 * 60, 38 * 60],
[6 * 60, 5 * 60, 3 * 60],
dep_first_trip_first_stop,
32,
30 * 60)
assert len(trips) == 64
trips_fri_sg = trips[:32]
trips_sg_fri = trips[32:65]
assert "1" == trips_fri_sg[0].connections[0].from_stop_id
assert "5" == trips_fri_sg[-1].connections[-1].to_stop_id
assert "5" == trips_sg_fri[0].connections[0].from_stop_id
assert "1" == trips_sg_fri[-1].connections[-1].to_stop_id
```
#### File: b_routers/task_1/ba_routing_unoptimized_earliest_arrival_test.py
```python
from scripts.connectionscan_router import ConnectionScanCore
from scripts.helpers.funs import seconds_to_hhmmss, hhmmss_to_sec
from tests.a_default.cb_connectionscan_core_test import (bern, zuerich_hb, samedan, samedan_spital, bern_duebystrasse,
basel_sbb, st_gallen, ostermundigen_bahnhof, bern_bahnhof)
from tests.a_default.cb_connectionscan_core_test import create_test_connectionscan_data
def test_unoptimized_earliest_arrival_bern_zuerich_hb():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "08:58:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern.id, zuerich_hb.id, hhmmss_to_sec("07:35:00")))
assert "08:58:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern.id, zuerich_hb.id, hhmmss_to_sec("08:02:00")))
assert cs_core.route_earliest_arrival(bern.id, zuerich_hb.id, hhmmss_to_sec("23:33:00")) is None
def test_unoptimized_earliest_arrival_bern_samedan():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "12:45:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern.id, samedan.id, hhmmss_to_sec("08:30:00")))
assert cs_core.route_earliest_arrival(bern.id, samedan.id, hhmmss_to_sec("21:00:00")) is None
def test_unoptimized_earliest_arrival_bern_samedan_spital():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "15:07:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern.id, samedan_spital.id, hhmmss_to_sec("07:30:00")))
def test_unoptimized_earliest_arrival_bern_duebystrasse_samedan():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "12:45:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern_duebystrasse.id, samedan.id, hhmmss_to_sec("07:30:00")))
def test_unoptimized_earliest_arrival_basel_st_gallen():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "09:41:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(basel_sbb.id, st_gallen.id, hhmmss_to_sec("07:30:00")))
def test_unoptimized_earliest_arrival_bern_duebystrasse_ostermundigen_bahnhof():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "12:34:00" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern_duebystrasse.id, ostermundigen_bahnhof.id, hhmmss_to_sec("12:09:46")))
def test_unoptimized_earliest_arrival_bern_bern():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "12:09:46" == seconds_to_hhmmss(cs_core.route_earliest_arrival(bern.id, bern.id, hhmmss_to_sec("12:09:46")))
def test_unoptimized_earliest_arrival_bern_bern_bahnhof():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "12:14:46" == seconds_to_hhmmss(
cs_core.route_earliest_arrival(bern.id, bern_bahnhof.id, hhmmss_to_sec("12:09:46")))
def test_unoptimized_earliest_arrival_by_name_bern_bern_bahnhof():
cs_data = create_test_connectionscan_data()
cs_core = ConnectionScanCore(cs_data)
assert "12:14:46" == seconds_to_hhmmss(
cs_core.route_earliest_arrival_by_name(bern.name, bern_bahnhof.name, "12:09:46"))
``` |
{
"source": "jlieberherr/python-playground",
"score": 3
} |
#### File: python-playground/tests/test_route_aggregation.py
```python
import unittest
from scripts.route_aggregation import aggregate_routes, get_routes_per_subsequent_stop_tuples, \
get_subsequent_stop_tuples
ROUTE_PER_ID = {
1: (1, 2, 3),
2: (1, 2, 3, 7),
3: (1, 2, 3, 4, 5, 6),
4: (16, 1, 2, 3, 4),
5: (4, 5, 6, 8),
6: (9, 10, 11, 12),
7: (10, 11, 12),
8: (13, 14, 15),
}
class RouteAggregationTests(unittest.TestCase):
def test_aggregate_routes_3(self):
aggregated_routes = aggregate_routes(ROUTE_PER_ID)
print aggregated_routes
self.assertEquals(3, len(aggregated_routes))
self.assertTrue({1, 2, 3, 4, 5} in aggregated_routes)
self.assertTrue({6, 7} in aggregated_routes)
self.assertTrue({8} in aggregated_routes)
def test_aggregate_routes_4(self):
aggregated_routes = aggregate_routes(ROUTE_PER_ID, nb_subsequent_stops=4)
print aggregated_routes
self.assertTrue({1} in aggregated_routes)
self.assertTrue({2} in aggregated_routes)
self.assertTrue({3, 4} in aggregated_routes)
self.assertTrue({5} in aggregated_routes)
self.assertTrue({6} in aggregated_routes)
self.assertTrue({7} in aggregated_routes)
self.assertTrue({8} in aggregated_routes)
def test_aggregate_routes_5(self):
aggregated_routes = aggregate_routes(ROUTE_PER_ID, nb_subsequent_stops=5)
print aggregated_routes
self.assertTrue({1} in aggregated_routes)
self.assertTrue({2} in aggregated_routes)
self.assertTrue({3} in aggregated_routes)
self.assertTrue({4} in aggregated_routes)
self.assertTrue({5} in aggregated_routes)
self.assertTrue({6} in aggregated_routes)
self.assertTrue({7} in aggregated_routes)
self.assertTrue({8} in aggregated_routes)
def test_get_routes_per_subsequent_stop_tuples(self):
routes_per_subsequent_stop_tuples = get_routes_per_subsequent_stop_tuples(ROUTE_PER_ID, 3)
self.assertEquals({1, 2, 3, 4}, routes_per_subsequent_stop_tuples[(1, 2, 3)])
self.assertEquals({3, 4}, routes_per_subsequent_stop_tuples[(2, 3, 4)])
self.assertEquals({2}, routes_per_subsequent_stop_tuples[(2, 3, 7)])
self.assertEquals({5}, routes_per_subsequent_stop_tuples[(5, 6, 8)])
self.assertTrue((1, 2, 4) not in routes_per_subsequent_stop_tuples)
def test_get_subsequent_stop_tuples(self):
subsequent_stop_tuples = get_subsequent_stop_tuples((1, 2, 3, 4, 5, 6), 3)
self.assertEquals({(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)}, subsequent_stop_tuples)
def test_get_subsequent_stop_tuples_short(self):
subsequent_stop_tuples = get_subsequent_stop_tuples((1, 2), 3)
self.assertEquals(set(), subsequent_stop_tuples)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jlieberherr/swiss-train-delay-distributions",
"score": 2
} |
#### File: swiss-train-delay-distributions/scripts/preprocessing.py
```python
import logging
from settings import init_logging, get_path_to_output
log = logging.getLogger(__name__)
def run_preprocessing():
init_logging(get_path_to_output(), "preprocessing.log")
log.info("log something")
return 3
if __name__ == "__main__":
run_preprocessing()
``` |
{
"source": "JLiekenbrock/lyrics-visualiser",
"score": 3
} |
#### File: lyrics-visualiser/tests/test_nlp.py
```python
from components import nlp
import numpy as np
import pandas as pd
from pandas import testing as tm
testlyrics = open('./tests/testdata/testsong.txt').read()
testlyricsclean = pd.read_pickle("tests/testdata/testsongclean")
testlyricsdist = np.load("tests/testdata/distances.npy")
def test_clean_lyrics():
tm.assert_series_equal(nlp.clean_lyrics(testlyrics), testlyricsclean)
def test_distances():
np.testing.assert_array_equal(nlp.distances(testlyricsclean), testlyricsdist)
``` |
{
"source": "jliendo/sdn_apps",
"score": 2
} |
#### File: jliendo/sdn_apps/fw_switch.py
```python
from ryu.lib.ip import ipv4_to_bin
from switch import Switch
class FWSwitch(Switch):
def __init__(self, *args, **kwargs):
super(FWSwitch, self).__init__(*args, **kwargs)
def permit_packet(self, dp, parser, pkt):
if pkt is not None:
if pkt.src_ip == "10.0.0.1" and pkt.dst_ip == "10.0.0.2":
match = parser.OFPMatch(
eth_type=0x0800,
ipv4_src="10.0.0.1",
ipv4_dst="10.0.0.2",
)
actions = []
priority = 10
self.add_flow(dp, priority, match, actions)
return False
return True
```
#### File: jliendo/sdn_apps/hub.py
```python
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
class Hub(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(Hub, self).__init__(*args, **kwargs)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
self.add_table_miss_entry(ev)
def add_table_miss_entry(self, ev):
dp = ev.msg.datapath
ofproto = dp.ofproto
parser = dp.ofproto_parser
match = parser.OFPMatch()
priority = 0
actions = [
parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)
]
self.add_flow(dp, priority, match, actions)
def add_flow(self, dp, priority, match, actions):
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions
)
]
mod = parser.OFPFlowMod(
datapath=dp,
priority=priority,
match=match,
instructions=inst
)
dp.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
in_port = msg.match['in_port']
dp = msg.datapath
ofp = dp.ofproto
ofp_parser = dp.ofproto_parser
actions = [
ofp_parser.OFPActionOutput(
ofp.OFPP_FLOOD
)
]
out = ofp_parser.OFPPacketOut(
datapath=dp,
buffer_id=msg.buffer_id,
in_port=in_port,
actions=actions,
)
dp.send_msg(out)
``` |
{
"source": "jliendo/topodiscovery",
"score": 3
} |
#### File: jliendo/topodiscovery/arp_response.py
```python
from pox.core import core
from pox.lib.revent import *
import pox.openflow.libopenflow_01 as of
from scapy.all import *
"""
We want to ARP response all ARP requests with our own IP/mac. The idea is to
test an algorithm in which there is no need to broadcast ARP requests through
the network to locate dpid/port of an IP address
ARP is an IP location services (dpid/port). All that there is to ARP is to
provide the host with a valid mac-address to where to send the IP packet. The
idea is to have the controller be this L2 "default-gateway" and let other
controller componentes (i.e. routing) to sort out where to send the host's IP
packets. As soon as the controller figures out where in the network is the IP
address located, then it does install flows to have bi-directional data-flowing.
Only then the controller gets out of the way of the flowing of data between
host-1 and host-2.
"""
log = core.getLogger()
class ArpResponse( EventMixin ):
def __init__(self):
# listen to all events from core
core.openflow.addListeners(self)
# mac address of the controller
# XXX how do we assign a MAC to the controller?
self.controller_mac = '00:00:ca:fe:ba:be'
# XXX do we have to preemptively install a flow so all ARP packets are sent to
# the controller???
def _handle_PacketIn(self, event):
pkt = Ether(event.data)
# if not an ARP packet then nothing to see, move along...
if not pkt[Ether].type == 0x0806:
return
# type is ARP, but do we really have an ARP packet?
if not ARP in pkt:
log.error('ARP_RESPONSE: Received bad ARP Packet')
return
# is it an ARP request?
if pkt[ARP].op == 1:
# XXX Have to check if the src hwaddr and paddr are already in the
# gmat, if not, then add it?
is_at = [x['mac'] for x in core.discovery.gmat if x['ip'] == pkt[ARP].pdst].pop()
src = is_at
dst = pkt[Ether].src
type = pkt[Ether].type
# we are proxy'ing for the pdst
hwsrc = is_at
psrc = pkt[ARP].pdst
hwdst = pkt[ARP].hwsrc
pdst =pkt[ARP].psrc
# arp-reply
op = 2
arp_reply = Ether(src=src, dst=dst, type=type)/\
ARP(hwsrc=hwsrc, psrc=psrc, hwdst=hwdst, pdst=pdst, op=op)
# create openflow message
log.debug('ARP_RESPONSE: Got ARP who-has for %s. Sent %s is-at %s' % (pkt[ARP].pdst, pkt[ARP].pdst, is_at))
msg = of.ofp_packet_out()
# send the arp reply from the same port the request was received
msg.actions.append(of.ofp_action_output(port = event.port))
msg.data = bytes(arp_reply)
event.connection.send(msg)
if pkt[ARP].op == 2:
# XXX got arp-response packet, refresh gmat?
log.debug('ARP_RESPONSE: got ARP Reply packet')
def launch():
if core.hasComponent('discovery'):
component = ArpResponse()
core.register('arp_response', component)
log.debug("ARP_RESPONSE: arp_response component registered")
else:
log.debug("ARP_RESPONSE: arp_response *not* loaded. Missing dependencies")
``` |
{
"source": "jliev/wealthbot_chatterbot",
"score": 2
} |
#### File: admin/models/subclass.py
```python
from django.db import models
class Subclass(models.Model):
class Meta:
db_table = 'subclasses'
asset_class = models.ForeignKey('webo_admin.AssetClass', on_delete=models.CASCADE, related_name='subclasses')
owner = models.ForeignKey('user.User', on_delete=models.CASCADE, blank=True, null=True)
source = models.ForeignKey('webo_admin.Subclass', on_delete=models.CASCADE, related_name='targets', blank=True, null=True)
name = models.CharField(max_length=255)
expected_performance = models.FloatField()
priority = models.IntegerField(blank=True, null=True)
tolerance_band = models.IntegerField(blank=True, null=True)
def __str__(self):
return str(self.pk) + ": " + self.name
```
#### File: wealthbot/chat/views.py
```python
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from datetime import datetime
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from client.forms import PortfolioForm
from client.models import ClientAccount, AccountGroup
from client.managers.clientPortfolioManager import ClientPortfolioManager
from client.managers.portfolioInformationManager import PortfolioInformationManager
def index(request):
return render(request, 'chat/index.html', {})
def room(request, room_name):
return render(request, 'chat/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
def portfolio(request):
clientPortfolioManager = ClientPortfolioManager()
# Get the user object
client = request.user
print('------index-----', client)
ria = client.profile.ria_user
# Get client's portfolio
clientPortfolio = clientPortfolioManager.getCurrentPortfolio(client=client)
if clientPortfolio is None:
clientPortfolio = clientPortfolioManager.getActivePortfolio(client=client)
if clientPortfolio is None:
raise Http404()
companyInformation = ria.riacompanyinformation
portfolio = clientPortfolio.portfolio
isQualified = manageQualified(
session=request.session,
companyInformation=companyInformation,
isQualified=True,
)
isFinal = False
# If client has final portfolio
if clientPortfolio.isAdvisorApproved():
isFinal = True
if client.profile.registration_step < 4:
profile = client.profile
profile.registration_step = 4
profile.save()
elif clientPortfolio.isProposed():
existWorkflow = None # Skip implementing workflow at this moment
portfolioInformationManager = PortfolioInformationManager()
clientAccounts = ClientAccount.objects.filter(client=client)
retirementAccounts = ClientAccount.objects.filter(client=client,
groupType__group__name=AccountGroup.GROUP_EMPLOYER_RETIREMENT)
form = PortfolioForm()
# Skip document at this moment
documents = {
'ria_investment_management_agreement': '#',
}
portfolio_information = portfolioInformationManager.getPortfolioInformation(user=client, model=portfolio,
isQualified=isQualified)
client.appointedBillingSpec.calcFeeTier()
data = {
'is_final': isFinal,
'client': client,
'client_accounts': clientAccounts,
'total': ClientAccount.getTotalScoreByClient(client=client),
'ria_company_information': companyInformation,
'has_retirement_account': True if retirementAccounts.exists() else False,
'portfolio_information': portfolio_information,
'show_sas_cash': containsSasCash(clientAccounts),
'is_use_qualified_models': companyInformation.is_use_qualified_models,
'form': form,
'signing_date': datetime.now(),
'documents': documents,
'action': 'client_portfolio',
}
return render(request, 'chat/portfolio_index.html', data)
def containsSasCash(accounts=None):
if accounts is not None:
for account in accounts:
if account.sas_cash is not None:
if account.sas_cash > 0:
return True
return False
def manageQualified(session, companyInformation, isQualified):
isUseQualified = companyInformation.is_use_qualified_models
if isUseQualified:
if isQualified != '':
setIsQualifiedModel(session=session, value=isQualified)
isQualified = getIsQualifiedModel(session=session)
else:
isQualified = False
return isQualified
```
#### File: client/forms/clientAccount.py
```python
from django import forms
from client.models import ClientAccount, AccountGroup, AccountGroupType
class ClientAccountForm(forms.ModelForm):
client = None
group = None
isAllowRetirementPlan = False
contributionTypes = []
contribution_type = forms.ChoiceField(
choices=contributionTypes,
widget=forms.RadioSelect(
)
)
class Meta:
model = ClientAccount
fields = (
'groupType',
'financial_institution',
'value',
'monthly_contributions',
'monthly_distributions',
'contribution_type',
)
def __init__(self, *args, **kwargs):
# print('1. Get into ClientAccountForm constructor')
self.client = kwargs.pop('user')
if 'group' in kwargs:
self.group = kwargs.pop('group')
else:
self.group = AccountGroup.GROUP_EMPLOYER_RETIREMENT
# print('1b. Group is %s' % self.group)
self.isAllowRetirementPlan = self.client.profile.ria_user.riacompanyinformation.is_allow_retirement_plan
if 'validateAdditionalFields' in kwargs:
self.validateAdditionalFields = kwargs.pop('validateAdditionalFields')
else:
self.validateAdditionalFields = True
# print('2. Construct contribution_type dict')
self.contributionTypes = [
('contributions', 'Contributions'),
('distributions', 'Distributions'),
('neither', 'Neither'),
]
# print('3. Call the parent modelform constructor')
super(ClientAccountForm, self).__init__(*args, **kwargs)
# print('4. Build different form depending on the account group')
# Build the form depending on the account group
if self.group == AccountGroup.GROUP_FINANCIAL_INSTITUTION:
self.buildFormForFinancialInstitution()
elif self.group == AccountGroup.GROUP_DEPOSIT_MONEY:
self.buildFormForDepositMoney()
elif self.group == AccountGroup.GROUP_OLD_EMPLOYER_RETIREMENT:
self.buildFormForOldEmployerRetirement()
elif self.group == AccountGroup.GROUP_EMPLOYER_RETIREMENT:
self.buildFormForEmployerRetirement()
else:
self.buildFormForManually()
contributionTypes = self.contributionTypes
# print('5. Build the contribution_type form')
if (self.group, self.group) in AccountGroup.getGroupChoices():
# print('6. Valid account group')
group = self.group
data = self.instance
# print('7. Data instance is')
# print(data)
# print('8. Form data have')
# print(self.data)
if self.data:
# Prepopulated form being submit
if 'contribution_type' in self.data:
# print('From field contribution_type is %s' % self.data['contribution_type'])
if self.data['contribution_type'] == "contributions":
self.fields['monthly_contributions'].label = 'Estimated Monthly Contributions'
del self.fields['monthly_distributions']
elif self.data['contribution_type'] == "distributions":
self.fields['monthly_distributions'].label = 'Estimated Monthly Distributions'
del self.fields['monthly_contributions']
else:
del self.fields['monthly_contributions']
del self.fields['monthly_distributions']
else:
if hasattr(data, 'monthly_contributions'):
self.fields['monthly_contributions'].label = 'Estimated Monthly Contributions'
self.fields['monthly_contributions'].initial = data.monthly_contributions
elif hasattr(data, 'monthly_distributions'):
self.fields['monthly_distributions'].label = 'Estimated Monthly Distributions'
self.fields['monthly_distributions'].initial = data.monthly_distributions
if 'contribution_type' in self.fields:
del self.fields['contribution_type']
else:
# New form being prepopulated
if (data is not None) and (data.pk is not None):
# If the client has chosen the contribution type
# print("Data instance exists with pk = %d" % data.pk)
if data.monthly_contributions is not None:
self.fields['monthly_contributions'].label = 'Estimated Monthly Contributions'
self.fields['monthly_contributions'].initial = data.monthly_contributions
elif data.monthly_distributions is not None:
self.fields['monthly_distributions'].label = 'Estimated Monthly Distributions'
self.fields['monthly_distributions'].initial = data.monthly_distributions
else:
# Else display the contribution type choices
# print("Data instance or pk not exists")
if group == AccountGroup.GROUP_EMPLOYER_RETIREMENT:
# Do not allow withdrawal for retirement plan
contributionTypes = [
('contributions', 'Contributions'),
('neither', 'None'),
]
self.fields['contribution_type'] = forms.ChoiceField(
choices=contributionTypes,
widget=forms.RadioSelect(
)
)
def save(self, commit=True):
clientAccount = super(ClientAccountForm, self).save(commit=False)
#type = self.cleaned_data['groupType']
#typeObj = AccountType.objects.get(name=type)
#groupObj = AccountGroup.objects.get(name=self.group)
#groupType = AccountGroupType.objects.get(group=groupObj, type=typeObj)
#clientAccount.groupType = groupType
if commit:
clientAccount.save()
return clientAccount
def buildFormForFinancialInstitution(self):
group = self.group
isAllowRetirementPlan = self.isAllowRetirementPlan
# Get the list of tuple of account types corresponding to the group
choices = [('', 'Select Type')]
groupObj = AccountGroup.objects.get(name=self.group)
types = AccountGroupType.objects.filter(group=groupObj)
for type in types:
choice = (type.type.name, type.type.name)
choices.append(choice)
self.fields['groupType'] = forms.ChoiceField(
choices=choices,
)
self.fields['groupType'].label = 'Account Type:'
self.fields['financial_institution'].label = 'Financial Institution:'
self.fields['value'].label = 'Estimated Value'
def buildFormForDepositMoney(self):
group = self.group
isAllowRetirementPlan = self.isAllowRetirementPlan
# Get the list of tuple of account types corresponding to the group
choices = [('', 'Select Type')]
groupObj = AccountGroup.objects.get(name=self.group)
types = AccountGroupType.objects.filter(group=groupObj)
for type in types:
# print("Type Choice")
# print(type.type.name)
choice = (type.type.name, type.type.name)
choices.append(choice)
self.fields['groupType'] = forms.ChoiceField(
choices=choices,
)
self.fields['groupType'].label = 'Account Type:'
del self.fields['financial_institution']
self.fields['value'].label = 'Estimated Value'
def buildFormForOldEmployerRetirement(self):
group = self.group
isAllowRetirementPlan = self.isAllowRetirementPlan
# Get the list of tuple of account types corresponding to the group
choices = [('', 'Select Type')]
groupObj = AccountGroup.objects.get(name=self.group)
types = AccountGroupType.objects.filter(group=groupObj).order_by(type.pk).reverse()
for type in types:
choice = (type.type.name, type.type.name)
choices.append(choice)
self.fields['groupType'] = forms.ChoiceField(
choices=choices,
)
self.fields['groupType'].label = 'Account Type:'
self.fields['financial_institution'].label = 'Former Employer:'
self.fields['value'].label = 'Estimated Value'
def buildFormForEmployerRetirement(self):
pass
```
#### File: client/forms/clientProfile.py
```python
from django import forms
from django.contrib.auth.forms import UserCreationForm
from user.models import Profile
class PercentageField(forms.fields.FloatField):
widget = forms.fields.TextInput(attrs={"class": "form-control"})
def is_number(self, val):
if val is None:
return False
try:
float(val)
return True
except ValueError:
return False
# def prepare_value(self, value):
# val = super(PercentageField, self).prepare_value(value)
# if is_number(val) and not isinstance(val, str):
# return str((float(val)*100))
# return val
def to_python(self, value):
super(PercentageField, self).to_python(value)
val = 10.0
if self.is_number(val=val):
return val/100
return val
def prepare_value(self, value):
val = super(PercentageField, self).prepare_value(value)
if self.is_number(val=val) and not isinstance(val, str):
return str((float(val)*100))
return val
class ClientProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
'first_name',
'last_name',
'street',
'city',
'is_different_address',
'mailing_street',
'mailing_city',
'birth_date',
'phone_number',
'marital_status',
'annual_income',
'estimated_income_tax',
'liquid_net_worth',
'employment_type',
)
widgets = {
'employment_type': forms.RadioSelect,
}
def __init__(self, *args, **kwargs):
super(ClientProfileForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget.attrs = {
'class': 'input-medium form-control',
'placeholder': '<NAME>',
}
self.fields['last_name'].widget.attrs = {
'class': 'input-medium form-control',
'placeholder': '<NAME>',
}
self.fields['birth_date'].widget = forms.TextInput(
attrs={
'class': 'jq-date input-small form-control',
'placeholder': 'MM-DD-YYYY',
}
)
self.fields['birth_date'].input_formats = ['%m-%d-%Y',]
self.fields['birth_date'].required = True
self.fields['marital_status'].widget.attrs = {
'class': 'form-control',
'id': 'wealthbot_client_bundle_profile_type_marital_status',
'placeholder': 'Choose an Option',
}
self.fields['street'].widget.attrs = {
'class': 'form-control',
}
self.fields['city'].widget.attrs = {
'class': 'form-control',
}
self.fields['is_different_address'].widget = forms.CheckboxInput(
attrs={
'class': 'form-control',
'id': 'wealthbot_client_bundle_profile_type_is_different_address'
}
)
self.fields['phone_number'].widget.attrs = {
'class': 'form-control',
'placeholder': '(###) ###-####',
'data-mask-type': 'phone'
}
self.fields['annual_income'].widget.attrs = {
'class': 'form-control',
'placeholder': 'Choose an Option',
}
self.fields['estimated_income_tax'] = PercentageField()
self.fields['liquid_net_worth'].widget.attrs = {
'class': 'form-control',
'placeholder': 'Choose an Option',
}
self.fields['employment_type'].required = True
self.fields['mailing_street'].widget.attrs = {
'class': 'form-control',
}
self.fields['mailing_city'].widget.attrs = {
'class': 'form-control',
}
def save(self, commit=True):
profile = super(ClientProfileForm, self).save(commit=False)
if commit:
profile.save()
return profile
```
#### File: client/forms/typedClientAccount.py
```python
from django import forms
from client.forms import ClientAccountForm
class TypedClientAccountForm(ClientAccountForm):
groupType = None
def __init__(self, *args, **kwargs):
self.groupType = kwargs.pop('groupType')
super(TypedClientAccountForm, self).__init__(*args, **kwargs)
def buildFormForFinancialInstitution(self):
if self.groupType is not None:
self.fields['groupType'].widget = forms.HiddenInput()
self.fields['groupType'].initial = self.groupType.type.name
else:
group = self.group
isAllowRetirementPlan = self.isAllowRetirementPlan
# Get the list of tuple of account types corresponding to the group
choices = [('', 'Select Type')]
groupObj = AccountGroup.objects.get(name=self.group)
types = AccountGroupType.objects.filter(group=groupObj)
for type in types:
choice = (type.type.name, type.type.name)
choices.append(choice)
self.fields['groupType'] = forms.ChoiceField(
choices=choices,
)
self.fields['groupType'].label = 'Account Type:'
self.fields['financial_institution'].label = 'Financial Institution:'
self.fields['value'].label = 'Estimated Deposit:'
def buildFormForDepositMoney(self):
if self.groupType is not None:
self.fields['groupType'].widget = forms.HiddenInput()
self.fields['groupType'].initial = self.groupType.type.name
else:
group = self.group
isAllowRetirementPlan = self.isAllowRetirementPlan
# Get the list of tuple of account types corresponding to the group
choices = [('', 'Select Type')]
groupObj = AccountGroup.objects.get(name=self.group)
types = AccountGroupType.objects.filter(group=groupObj)
for type in types:
choice = (type.type.name, type.type.name)
choices.append(choice)
self.fields['groupType'] = forms.ChoiceField(
choices=choices,
)
self.fields['groupType'].label = 'Account Type:'
del self.fields['financial_institution']
self.fields['value'].label = 'Estimated Deposit:'
```
#### File: client/managers/riskTolerance.py
```python
class RiskTolerance(object):
user = None
userAnswers = []
points = None
def __init__(self, user, userAnswers):
self.user = user
self.userAnswers = userAnswers
self.points = None
# Get answers points
def getPoints(self):
if self.points is None:
self.calculatePoints()
return self.points
# Returns suggested portfolio.
def getSuggestedPortfolio(self, allowedModels):
result = None
models = []
points = self.getPoints()
for model in allowedModels:
rating = model.risk_rating
if points == rating:
return model
if points > rating:
models.append(model)
if not models:
models = allowedModels
# Get the model with risk rating closet to the point
tmpDiff = None
ratingDiff = None
for model in models:
rating = model.risk_rating
tmpDiff = abs(points - rating)
if result is None:
ratingDiff = tmpDiff
result = model
else:
if tmpDiff < ratingDiff:
ratingDiff = tmpDiff
result = model
return result
# Recalculate answers points
def calculatePoints(self):
points = 50
for userAnswer in self.userAnswers:
points += userAnswer.answer.point
# map the points to correct the scale problem in original wealthbot code
if points < 41:
points = 1
elif points < 50:
points = 2
elif points < 59:
points = 3
else:
points = 4
self.points = points
# Get ria
def getRia(self):
if self.user.hasRole('ROLE_CLIENT'):
return self.user.profile.ria_user
return self.user
```
#### File: client/models/accountGroupType.py
```python
from django.db import models
from client.models import AccountGroup, AccountType
class AccountGroupType(models.Model):
class Meta:
db_table = 'client_account_group_types'
group = models.ForeignKey(AccountGroup, on_delete=models.CASCADE)
type = models.ForeignKey(AccountType, on_delete=models.CASCADE)
def __str__(self):
return str(self.pk) + ": " + self.group.name + " - " + self.type.name
```
#### File: client/models/accountType.py
```python
from django.db import models
class AccountType(models.Model):
class Meta:
db_table = 'client_account_types'
name = models.CharField(max_length=255)
def __str__(self):
return str(self.pk) + ": " + self.name
```
#### File: client/models/clientPortfolio.py
```python
from django.db import models
from admin.models import CeModel
class ClientPortfolio(models.Model):
class Meta:
db_table = 'client_portfolio'
client = models.ForeignKey('user.User', on_delete=models.CASCADE)
portfolio = models.ForeignKey(CeModel, on_delete=models.CASCADE)
# ENUM values status column
STATUS_PROPOSED = 'proposed'
STATUS_ADVISOR_APPROVED = 'advisor approved'
STATUS_CLIENT_ACCEPTED = 'client accepted'
STATUS_CHOICES = (
(STATUS_PROPOSED, 'proposed'),
(STATUS_ADVISOR_APPROVED, 'advisor approved'),
(STATUS_CLIENT_ACCEPTED, 'client accepted'),
)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default=STATUS_PROPOSED)
is_active = models.BooleanField(default=True)
approved_at = models.DateField(blank=True, null=True)
accepted_at = models.DateField(blank=True, null=True)
created_at = models.DateField(auto_now_add=True)
def __str__(self):
return str(self.pk) + ": " + self.client.username + ' - ' + self.portfolio.name
# Is proposed portfolio.
def isProposed(self):
return (self.status == self.STATUS_PROPOSED)
# Is advisor approved portfolio.
def isAdvisorApproved(self):
return (self.status == self.STATUS_ADVISOR_APPROVED)
# Is client accepted portfolio.
def isClientAccepted(self):
return (self.status == self.STATUS_CLIENT_ACCEPTED)
```
#### File: client/views/activityManager.py
```python
from client.models import Activity
from user.models import Profile
from django.db.models.signals import post_save
def saveActivityByObject(sender, **kwargs):
# print('Get into saveActivityByObject call')
# print(kwargs['instance'].registration_step)
if kwargs['created']:
# print('Get into create activity call')
client = kwargs['instance'].user
message = client.getActivityMessage()
# print(client)
# print(message)
if client is not None and message is not None:
# print('Go ahead to create the activity object')
# Create activity object
activity = Activity(
client_user=client,
client_status=client.profile.client_status,
first_name=client.profile.first_name,
last_name=client.profile.last_name,
ria_user=client.profile.ria_user,
message=message,
)
# Save activity object
# print('Save the activity object')
activity.save()
post_save.connect(saveActivityByObject, sender=Profile)
```
#### File: ria/templatetags/phone_format.py
```python
from django import template
register = template.Library()
@register.filter(name='phone_number')
def phone_number(number):
"""Convert a 10 character string into (xxx) xxx-xxxx."""
#print(number)
#first = number[0:3]
#second = number[3:6]
#third = number[6:10]
#return '(' + first + ')' + ' ' + second + '-' + third
return number
```
#### File: ria/views/billing.py
```python
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
@login_required
def billingIndex(request):
return HttpResponse('Not implemented yet, please come back later!')
```
#### File: ria/views/changeProfile.py
```python
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
@login_required
def changeProfileIndex(request, tab='company_profile'):
return HttpResponse('Not implemented yet, please come back later!')
@login_required
def profile(request, tab='profile'):
return HttpResponse('Not implemented yet, please come back later!')
```
#### File: ria/views/profile.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def companyProfile(request):
return HttpResponse('Not implemented yet, please come back later!')
```
#### File: user/models/profile.py
```python
from django.db import models
from django.core.exceptions import PermissionDenied
from django.http import Http404
from client.models import ClientAdditionalContact
class Profile(models.Model):
class Meta:
db_table = 'user_profiles'
user = models.OneToOneField('user.User', on_delete=models.CASCADE)
ria_user = models.ForeignKey('user.User', related_name='ria', on_delete=models.CASCADE, blank=True, null=True)
registration_step = models.IntegerField(default=0)
company = models.CharField(max_length=255, blank=True, null=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255, blank=True, null=True)
middle_name = models.CharField(max_length=255, blank=True, null=True)
nick_name = models.CharField(max_length=255, blank=True, null=True)
street = models.CharField(max_length=255, blank=True, null=True)
city = models.CharField(max_length=255, blank=True, null=True)
state_id = models.IntegerField(default=0, blank=True, null=True)
zip = models.CharField(max_length=255, blank=True, null=True)
is_different_address = models.BooleanField(null=True)
mailing_street = models.CharField(max_length=255, blank=True, null=True)
mailing_city = models.CharField(max_length=255, blank=True, null=True)
mailing_state_id = models.IntegerField(default=0, blank=True, null=True)
mailing_zip = models.CharField(max_length=255, blank=True, null=True)
birth_date = models.DateField(blank=True, null=True)
phone_number = models.CharField(max_length=255, blank=True, null=True)
CLIENT_MARITAL_STATUS_SINGLE = 'Single'
CLIENT_MARITAL_STATUS_MARRIED = 'Married'
CLIENT_MARITAL_STATUS_DIVORCED = 'Divorced'
CLIENT_MARITAL_STATUS_SEPARATED = 'Separated'
CLIENT_MARTIAL_STATUS_CHOICES = (
(CLIENT_MARITAL_STATUS_SINGLE, 'Single'),
(CLIENT_MARITAL_STATUS_MARRIED, 'Married'),
(CLIENT_MARITAL_STATUS_DIVORCED, 'Divorced'),
(CLIENT_MARITAL_STATUS_SEPARATED, 'Separated'),
)
marital_status = models.CharField(choices=CLIENT_MARTIAL_STATUS_CHOICES, max_length=20, blank=True, null=True)
CLIENT_ANNUAL_INCOME_VALUE1 = '$0-$50,000'
CLIENT_ANNUAL_INCOME_VALUE2 = '$50,001-$75,000'
CLIENT_ANNUAL_INCOME_VALUE3 = '$75,001-$100,000'
CLIENT_ANNUAL_INCOME_VALUE4 = '$100,001-$150,000'
CLIENT_ANNUAL_INCOME_VALUE5 = '$150,001-$250,000'
CLIENT_ANNUAL_INCOME_VALUE6 = '$250,001 +'
CLIENT_ANNUAL_INCOME_CHOICES = (
(CLIENT_ANNUAL_INCOME_VALUE1, '$0-$50,000'),
(CLIENT_ANNUAL_INCOME_VALUE2, '$50,001-$75,000'),
(CLIENT_ANNUAL_INCOME_VALUE3, '$75,001-$100,000'),
(CLIENT_ANNUAL_INCOME_VALUE4, '$100,001-$150,000'),
(CLIENT_ANNUAL_INCOME_VALUE5, '$150,001-$250,000'),
(CLIENT_ANNUAL_INCOME_VALUE6, '$250,001 +'),
)
annual_income = models.CharField(choices=CLIENT_ANNUAL_INCOME_CHOICES, max_length=50, blank=True, null=True)
estimated_income_tax = models.CharField(max_length=50, blank=True, null=True)
CLIENT_LIQUID_NET_WORTH_VALUE1 = '$0-$25,000'
CLIENT_LIQUID_NET_WORTH_VALUE2 = '$25,001-$50,000'
CLIENT_LIQUID_NET_WORTH_VALUE3 = '$50,001-$100,000'
CLIENT_LIQUID_NET_WORTH_VALUE4 = '$100,001-$200,000'
CLIENT_LIQUID_NET_WORTH_VALUE5 = '$200,001-$350,000'
CLIENT_LIQUID_NET_WORTH_VALUE6 = '$350,001-$700,000'
CLIENT_LIQUID_NET_WORTH_VALUE7 = '$700,001-$1,000,000'
CLIENT_LIQUID_NET_WORTH_VALUE8 = '$1,000,000 +'
CLIENT_LIQUID_NET_WORTH_CHOICES = (
(CLIENT_LIQUID_NET_WORTH_VALUE1, '$0-$25,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE2, '$25,001-$50,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE3, '$50,001-$100,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE4, '$100,001-$200,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE5, '$200,001-$350,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE6, '$350,001-$700,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE7, '$700,001-$1,000,000'),
(CLIENT_LIQUID_NET_WORTH_VALUE8, '$1,000,000 +'),
)
liquid_net_worth = models.CharField(choices=CLIENT_LIQUID_NET_WORTH_CHOICES, max_length=50, blank=True, null=True)
CLIENT_EMPLOYMENT_TYPE_EMPLOYED = 'Employed'
CLIENT_EMPLOYMENT_TYPE_SELF_EMPLOYED = 'Self-Employed'
CLIENT_EMPLOYMENT_TYPE_RETIRED = 'Retired'
CLIENT_EMPLOYMENT_TYPE_UNEMPLOYED = 'Unemployed'
CLIENT_EMPLOYMENT_TYPE_CHOICES = (
(CLIENT_EMPLOYMENT_TYPE_EMPLOYED, 'Employed'),
(CLIENT_EMPLOYMENT_TYPE_SELF_EMPLOYED, 'Self-Employed'),
(CLIENT_EMPLOYMENT_TYPE_RETIRED, 'Retired'),
(CLIENT_EMPLOYMENT_TYPE_UNEMPLOYED, 'Unemployed'),
)
employment_type = models.CharField(choices=CLIENT_EMPLOYMENT_TYPE_CHOICES, default=CLIENT_EMPLOYMENT_TYPE_EMPLOYED, max_length=50, blank=False, null=True)
suggested_portfolio_id = models.IntegerField(default=0, blank=True, null=True)
questionnaire_step = models.SmallIntegerField(default=0, blank=True, null=True)
withdraw_age = models.IntegerField(default=0, blank=True, null=True)
CLIENT_SOURCE_WEB = 'web'
CLIENT_SOURCE_IN_HOUSE = 'in-house'
CLIENT_SOURCE_CHOICES = (
(CLIENT_SOURCE_WEB, 'web'),
(CLIENT_SOURCE_IN_HOUSE, 'in-house'),
)
client_source = models.CharField(choices=CLIENT_SOURCE_CHOICES, max_length=10, blank=True, null=True)
CLIENT_ACCOUNT_MANAGED_ACCOUNT = 1
CLIENT_ACCOUNT_MANAGED_HOUSEHOLDER = 2
CLIENT_ACCOUNT_MANAGED_CHOICES = (
(CLIENT_ACCOUNT_MANAGED_ACCOUNT, 'Account Level'),
(CLIENT_ACCOUNT_MANAGED_HOUSEHOLDER, 'Householder Level'),
)
client_account_managed = models.SmallIntegerField(choices=CLIENT_ACCOUNT_MANAGED_CHOICES, blank=True, null=True)
CLIENT_STATUS_PROSPECT = 1
CLIENT_STATUS_CLIENT = 2
CLIENT_STATUS_CHOICES = (
(CLIENT_STATUS_PROSPECT, 'prospect'),
(CLIENT_STATUS_CLIENT, 'client'),
)
client_status = models.SmallIntegerField(choices=CLIENT_STATUS_CHOICES, blank=True, null=True)
PAYMENT_METHOD_DIRECT_DEBIT = 1
PAYMENT_METHOD_OUTSIDE_PAYMENT = 2
PAYMENT_METHOD_CHOICES = (
(PAYMENT_METHOD_DIRECT_DEBIT, 'Direct debit'),
(PAYMENT_METHOD_OUTSIDE_PAYMENT, 'Outside payment'),
)
paymentMethod = models.IntegerField(choices=PAYMENT_METHOD_CHOICES, default=0, blank=True, null=True)
def __str__(self):
return str(self.pk) + ": " + self.user.username
# Returns true if user is client with marital_status =
# CLIENT_MARITAL_STATUS_MARRIED and false otherwise.
def isMarried(self):
clientRole = 'ROLE_CLIENT'
if not self.user.hasRole(role=clientRole):
raise PermissionDenied('User does not have role: %s' % clientRole)
status = self.marital_status
if status == self.CLIENT_MARITAL_STATUS_MARRIED:
return True
return False
clientRegistrationSteps = [
'Created Login',
'Risk questionnaire',
'Information Intake',
'Suggested Portfolio',
'Advisor Approved Portfolio',
'Approved Portfolio',
'Application Screen',
'Completed All Applications',
]
def clientRegistrationStep(self):
return self.clientRegistrationSteps[self.registration_step]
# Get last_name
def lastName(self):
return self.last_name
# Get first_name
def firstName(self):
return self.first_name
# Get middle_name
def middleName(self):
return self.middle_name
# Get birth_date
def birthDate(self):
return self.birth_date
# Get marital_status
def maritalStatus(self):
return self.marital_status
# Get spouse_first_name
def spouseFirstName(self):
for contact in self.user.clientadditionalcontact_set.all():
if contact.type == ClientAdditionalContact.TYPE_SPOUSE:
return contact.spouse_first_name
return
# Get spouse_middle_name
def spouseMiddleName(self):
for contact in self.user.clientadditionalcontact_set.all():
if contact.type == ClientAdditionalContact.TYPE_SPOUSE:
return contact.spouse_middle_name
return
# Get spouse_last_name
def spouseLastName(self):
for contact in self.user.clientadditionalcontact_set.all():
if contact.type == ClientAdditionalContact.TYPE_SPOUSE:
return contact.spouse_last_name
return
# Get spouse_birth_date
def spouseBirthDate(self):
for contact in self.user.clientadditionalcontact_set.all():
if contact.type == ClientAdditionalContact.TYPE_SPOUSE:
return contact.spouse_birth_date
return
# Get phone_number
def phoneNumber(self):
return self.phone_number
# Get employment_type
def employmentType(self):
return self.employment_type
# Get annual_income
def annualIncome(self):
return self.annual_income
# Get liquid_net_worth
def liquidNetWorth(self):
return self.liquid_net_worth
# Get estimated_income_tax in percent format
def estimatedIncomeTaxPercent(self):
return "{0:.0%}".format(float(self.estimated_income_tax))
# Get client_account_managed as string.
def clientAccountManagedAsString(self):
level = self.client_account_managed
if level is None:
self.ria_user.riacompanyinformation.getAccountManagementAsString()
for choice in self.CLIENT_ACCOUNT_MANAGED_CHOICES:
if choice[0] == level:
return choice[1].lower()
raise Http404("Value of client account managed for key : %d doesn't exist." % level)
return
```
#### File: user/views/client.py
```python
from django.http import Http404
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from user.models import User, Profile
from client.models import ClientSettings
from admin.models import BillingSpec
from user.forms import ClientRegistrationForm
#import json
def registration(request, ria_id):
# If the user already logged-in,, then redirect to page to continue registration
print(request.user)
if request.user.is_authenticated:
redirectUrl = redirectIfUserExist(request.user)
if redirectUrl is not None:
return redirect(redirectUrl)
# Check if the ria_id exists in users table or not
ria = get_object_or_404(User, pk=ria_id)
# And if exists, then check if ria_id has valid role or not
if not ria.hasRole('ROLE_RIA'):
raise Http404("Ria user does not exist.")
# Check the group owned by the ria
group = None;
# Create the registration form
if request.method == 'POST':
print("post message:-------look at me-------------", request.POST)
form = ClientRegistrationForm(request.POST)
if form.is_valid():
form.instance.username = form.cleaned_data['email']
user = form.save(commit=False) # Get user obj for info assignment
# Validate the password
password = form.cleaned_data.get('password1')
try:
validate_password(password, user)
except ValidationError as e:
form.add_error('password1', e)
context = {
'form': form,
'ria': ria
}
return render(request, 'user/client_registration.html', context)
# Assign the email information
user.email = form.cleaned_data['email']
# Assign the group information
if group is None:
group = Group.objects.get(name="All")
# Assign the billing spec information
billingSpec = BillingSpec.objects.get(master=True, owner=ria)
user.appointedBillingSpec = billingSpec
user = form.save() # Save once to have valid user id for below many-to-many relation with group
user.groups.add(group)
user.save()
# Create and assign the user profile
profile = Profile(user=user, first_name=form.cleaned_data['first_name'],
last_name=form.cleaned_data['last_name'], registration_step=0)
# Assign the RIA information
profile.ria_user = ria
profile.client_status = Profile.CLIENT_STATUS_PROSPECT
profile.save()
# Create and assign the user client settings
clientSettings = ClientSettings(client=user)
clientSettings.save()
# Authenticate and login the newly created user
username = form.cleaned_data.get('email')
user = authenticate(username=username, password=password)
# that is when we have an actual user
print(user)
print(request.user)
login(request, user)
print(request.user)
return redirect('rx_client_profile_step_one')
else:
form = ClientRegistrationForm()
# Display the registration form
# params = {
# 'ria_id': ria.id
# }
# if group is not None:
# params['group'] = group.name
context = {
'form': form,
'ria': ria
# 'ria_company': ria.riacompanyinformation,
# 'params': params,
}
return render(request, 'user/client_registration.html', context)
#@login_required
def redirectIfUserExist(user):
# Return the redirect label from the given registration_step
if hasattr(user, 'profile'):
return {
1 : 'rx_client_profile_step_two',
2 : 'rx_client_profile_step_three',
3 : 'rx_client_finish_registration',
}.get(user.profile.registration_step, 'rx_client_profile_step_one')
``` |
{
"source": "j-liew/CMPUT404-assignment-webserver",
"score": 3
} |
#### File: j-liew/CMPUT404-assignment-webserver/server.py
```python
import socketserver
import os
# Copyright 2020 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2020 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
#print ("Got a request of: %s\n" % self.data)
request = self.data.decode().split()
# ignore any empty requests
if len(request) < 1:
return
method = request[0]
requestPath = request[1]
# only accept GET requests
if method != 'GET':
self.sendMethodNotAllowed()
return
# www folder path
basePath = os.getcwd() + '/www'
# verify that client is requesting from www folder
requestAbsPath = os.path.abspath(basePath + requestPath)
if requestAbsPath[:len(basePath)] != basePath:
self.sendNotFound()
return
# process request
while True:
try:
# open requested file
path = basePath + requestPath
f = open(path, 'r')
fileType = requestPath.split('.')[-1]
fileSize = os.path.getsize(path)
self.sendOk(f, fileType, fileSize)
except (FileNotFoundError, NotADirectoryError):
self.sendNotFound()
except IsADirectoryError:
# serve default page of directory
if requestPath[-1] == '/':
requestPath += 'index.html'
continue
# otherwise, use a redirect to correct the path ending
else:
newLocation = 'http://127.0.0.1:8080' + requestPath + '/'
self.sendRedirect(newLocation)
break
def sendOk(self, fileHandle, fileType, fileSize):
content = fileHandle.read()
status = 'HTTP/1.1 200 OK\r\n'
contentType = ''
if fileType == 'html':
contentType = 'Content-Type: text/html\r\n'
elif fileType == 'css':
contentType = 'Content-Type: text/css\r\n'
contentLength = 'Content-Length: ' + str(fileSize) + '\r\n'
headerEnd = '\r\n'
response = status + contentType + contentLength + headerEnd + content
self.request.sendall(bytes(response, 'utf-8'))
def sendRedirect(self, newLocation):
status = 'HTTP/1.1 301 Moved Permanently\r\n'
location = 'Location: ' + newLocation + '\r\n'
headerEnd = '\r\n'
response = status + location + headerEnd
self.request.sendall(bytes(response, 'utf-8'))
def sendNotFound(self):
content = "<h1>404 Not Found</h1>\n"
status = 'HTTP/1.1 404 Not Found\r\n'
contentType = 'Content-Type: text/html\r\n'
contentLength = 'Content-Length: ' + str(len(bytes(content, 'utf-8'))) + '\r\n'
headerEnd = '\r\n'
response = status + contentType + contentLength + headerEnd + content
self.request.sendall(bytes(response, 'utf-8'))
def sendMethodNotAllowed(self):
content = '<h1>405 Method Not Allowed</h1>\n'
status = 'HTTP/1.1 405 Method Not Allowed\r\n'
allow = 'Allow: GET\r\n'
contentType = 'Content-Type: text/html\r\n'
contentLength = 'Content-Length: ' + str(len(bytes(content, 'utf-8'))) + '\r\n'
headerEnd = '\r\n'
response = status + allow + contentType + headerEnd + content
self.request.sendall(bytes(response, 'utf-8'))
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
``` |
{
"source": "jlifts/social-bots",
"score": 3
} |
#### File: jlifts/social-bots/justlk.py
```python
import tweepy
import logging
from config import create_api
import json
import time
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class TListener(tweepy.StreamListener):
def __init__(self, api):
self.api = api
self.me = api.me()
def on_status(self, tweet):
logger.info(f"The tweet ids {tweet.id}")
# ignores replies and if I'm the author
if tweet.in_reply_to_status_id is not None or \
tweet.user.id == self.me.id:
return
# Liking if not liked yet
if not tweet.favorited:
try:
tweet.favorite()
time.sleep(180)
except Exception as e:
logger.error("There was a fav error", exec_info=True)
def on_error(self, status):
logger.error(status)
def main(keywords):
api = create_api()
tweets_listener = TListener(api)
stream = tweepy.Stream(api.auth, tweets_listener)
stream.filter(track=keywords, languages=["en"])
if __name__ == "__main__":
main(["#Crypto", "Ethereum", "$ETH", "DeFi", "#DeFi",
"programming", "bankless", "#NFT", "#Chainlink", "$LINK"])
```
#### File: jlifts/social-bots/my_autofn.py
```python
from multiprocessing import Process
import tweepy
import time
import logging
import json
import requests
from config import create_api
from follow import follow_followers
from justlk import TListener
from rply_BTC import check_mentions
#from rply_ETH import check_mention
from tagged import check_mentioning
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
#add in bitcoin current price retrieval
#Powered by CoinDesk
response = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
data = response.json()
#print(data["bpi"]["USD"]["rate"])
#If you want to see the price uncomment
#add in ethereum current price retrieval
response = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
data = response.json()
#print(data["bpi"]["USD"]["rate"])
#If you want to see the price uncomment
def streamer():
api = create_api()
tweets_listener = TListener(api)
stream = tweepy.Stream(api.auth, tweets_listener)
stream.filter(track=["crypto", "ethereum", "DeFi", "science","covid","coffee", "bankless"], languages=["en"])
def follow():
api = create_api()
while True:
follow_followers(api)
logger.info("Sleeping...")
time.sleep(180)
def BTC():
api = create_api()
since_id = 1
while True:
since_id = check_mentions(api, ["BTC", "Bitcoin"], since_id)
logger.info("Searching...")
time.sleep(1000)
def tagged():
api = create_api()
since_ids = 1
while True:
since_ids = check_mentioning(api, since_ids)
logger.info("Searching...")
time.sleep(300)
#def ETH():
#api = create_api()
#since_ids = 1
#while True:
#since_ids = check_mention(api, ["ETH", "Ethereum", "Ether", "the best crypto"], since_ids)
#logger.info("Searching...")
#time.sleep(1000)
if __name__ == "__main__":
p1 = Process(target=streamer)
p1.start()
p2 = Process(target=follow)
p2.start()
p3 = Process(target=tagged)
p3.start()
#p4 = Process(target=reply)
#p4.start()
p5 = Process(target=BTC)
p5.start()
#p6 = Process(target=ETH)
#p6.start()
``` |
{
"source": "JLightning/xo",
"score": 2
} |
#### File: xo/contrib/django-settings.py
```python
from django.db.backends.signals import connection_created
def activate_foreign_keys(sender, connection, **kwargs):
"""Enable integrity constraint with sqlite."""
if connection.vendor == 'sqlite':
cursor = connection.cursor()
cursor.execute('PRAGMA foreign_keys = ON;')
connection_created.connect(activate_foreign_keys)
``` |
{
"source": "jlikhuva/loompy",
"score": 2
} |
#### File: loompy/loompy/cell_calling.py
```python
import logging
import numpy as np
import scipy.sparse as sparse
import scipy.stats as sp_stats
# Simple Good-Turing estimator.
# Based on S implementation in
# <NAME> & <NAME> (1995) Good-turing frequency estimation without tears,
# Journal of Quantitative Linguistics, 2:3, 217-237, DOI: 10.1080/09296179508590051
class SimpleGoodTuringError(Exception):
pass
def _averaging_transform(r, nr):
d = np.concatenate((np.ones(1, dtype=int), np.diff(r)))
dr = np.concatenate((
0.5 * (d[1:] + d[0:-1]),
np.array((d[-1],), dtype=float),
))
return nr.astype(float) / dr
def _rstest(r, coef):
return r * np.power(1 + 1 / r, 1 + coef)
def simple_good_turing(xr, xnr):
"""Make a Simple Good-Turing estimate of the frequencies.
Args:
xr (np.array(int)): Non-zero item frequencies
xnr (np.array(int)): Non-zero frequencies of frequencies
Returns:
(rstar (np.array(float)), p0 (float)):
rstar: The adjusted non-zero frequencies
p0: The total probability of unobserved items
"""
xr = xr.astype(float)
xnr = xnr.astype(float)
xN = np.sum(xr * xnr)
# Get Linear Good-Turing estimate
xnrz = _averaging_transform(xr, xnr)
slope, intercept, _, _, _ = sp_stats.linregress(np.log(xr), np.log(xnrz))
if slope > -1:
raise SimpleGoodTuringError("The log-log slope is > -1 (%d); the SGT estimator is not applicable to these data." % slope)
xrst = _rstest(xr, slope)
xrstrel = xrst / xr
# Get traditional Good-Turing estimate
xrtry = xr == np.concatenate((xr[1:] - 1, np.zeros(1)))
xrstarel = np.zeros(len(xr))
xrstarel[xrtry] = (xr[xrtry] + 1) / xr[xrtry] * np.concatenate((xnr[1:], np.zeros(1)))[xrtry] / xnr[xrtry]
# Determine when to switch from GT to LGT estimates
tursd = np.ones(len(xr))
for i in range(len(xr)):
if xrtry[i]:
tursd[i] = float(i + 2) / xnr[i] * np.sqrt(xnr[i + 1] * (1 + xnr[i + 1] / xnr[i]))
xrstcmbrel = np.zeros(len(xr))
useturing = True
for r in range(len(xr)):
if not useturing:
xrstcmbrel[r] = xrstrel[r]
else:
if np.abs(xrstrel[r] - xrstarel[r]) * (1 + r) / tursd[r] > 1.65:
xrstcmbrel[r] = xrstarel[r]
else:
useturing = False
xrstcmbrel[r] = xrstrel[r]
# Renormalize the probabilities for observed objects
sumpraw = np.sum(xrstcmbrel * xr * xnr / xN)
xrstcmbrel = xrstcmbrel * (1 - xnr[0] / xN) / sumpraw
p0 = xnr[0] / xN
return (xr * xrstcmbrel, p0)
def sgt_proportions(frequencies):
"""Use Simple Good-Turing estimate to adjust for unobserved items
Args:
frequencies (np.array(int)): Nonzero frequencies of items
Returns:
(pstar (np.array(float)), p0 (float)):
pstar: The adjusted non-zero proportions
p0: The total probability of unobserved items
"""
if len(frequencies) == 0:
raise ValueError("Input frequency vector is empty")
if np.count_nonzero(frequencies) != len(frequencies):
raise ValueError("Frequencies must be greater than zero")
freqfreqs = np.bincount(frequencies.astype(np.int64))
assert freqfreqs[0] == 0
use_freqs = np.flatnonzero(freqfreqs)
if len(use_freqs) < 10:
raise SimpleGoodTuringError("Too few non-zero frequency items (%d). Aborting SGT." % len(use_freqs))
rstar, p0 = simple_good_turing(use_freqs, freqfreqs[use_freqs])
# rstar contains the smoothed frequencies.
# Map each original frequency r to its smoothed rstar.
rstar_dict = dict(zip(use_freqs, rstar))
rstar_sum = np.sum(freqfreqs[use_freqs] * rstar)
rstar_i = np.fromiter((rstar_dict[f] for f in frequencies), dtype=float, count=len(frequencies))
pstar = (1 - p0) * (rstar_i / rstar_sum)
assert np.isclose(p0 + np.sum(pstar), 1)
return (pstar, p0)
def adjust_pvalue_bh(p):
""" Multiple testing correction of p-values using the Benjamini-Hochberg procedure """
descending = np.argsort(p)[::-1]
# q = p * N / k where p = p-value, N = # tests, k = p-value rank
scale = float(len(p)) / np.arange(len(p), 0, -1)
q = np.minimum(1, np.minimum.accumulate(scale * p[descending]))
# Return to original order
return q[np.argsort(descending)]
def eval_multinomial_loglikelihoods(matrix, profile_p, max_mem_gb=0.1):
"""Compute the multinomial log PMF for many barcodes
Args:
matrix (scipy.sparse.csc_matrix): Matrix of UMI counts (feature x barcode)
profile_p (np.ndarray(float)): Multinomial probability vector
max_mem_gb (float): Try to bound memory usage.
Returns:
log_likelihoods (np.ndarray(float)): Log-likelihood for each barcode
"""
gb_per_bc = float(matrix.shape[0] * matrix.dtype.itemsize) / (1024**3)
bcs_per_chunk = max(1, int(round(max_mem_gb / gb_per_bc)))
num_bcs = matrix.shape[1]
loglk = np.zeros(num_bcs)
for chunk_start in range(0, num_bcs, bcs_per_chunk):
chunk = slice(chunk_start, chunk_start + bcs_per_chunk)
matrix_chunk = matrix[:, chunk].transpose().toarray()
n = matrix_chunk.sum(1)
loglk[chunk] = sp_stats.multinomial.logpmf(matrix_chunk, n, p=profile_p)
return loglk
def simulate_multinomial_loglikelihoods(profile_p, umis_per_bc, num_sims=1000, jump=1000, n_sample_feature_block=1000000, verbose=False):
"""Simulate draws from a multinomial distribution for various values of N.
Uses the approximation from Lun et al. ( https://www.biorxiv.org/content/biorxiv/early/2018/04/04/234872.full.pdf )
Args:
profile_p (np.ndarray(float)): Probability of observing each feature.
umis_per_bc (np.ndarray(int)): UMI counts per barcode (multinomial N).
num_sims (int): Number of simulations per distinct N value.
jump (int): Vectorize the sampling if the gap between two distinct Ns exceeds this.
n_sample_feature_block (int): Vectorize this many feature samplings at a time.
Returns:
(distinct_ns (np.ndarray(int)), log_likelihoods (np.ndarray(float)):
distinct_ns is an array containing the distinct N values that were simulated.
log_likelihoods is a len(distinct_ns) x num_sims matrix containing the
simulated log likelihoods.
"""
distinct_n = np.flatnonzero(np.bincount(umis_per_bc.astype(np.int64)))
loglk = np.zeros((len(distinct_n), num_sims), dtype=float)
sampled_features = np.random.choice(len(profile_p), size=n_sample_feature_block, p=profile_p, replace=True)
k = 0
log_profile_p = np.log(profile_p)
for sim_idx in range(num_sims):
curr_counts = np.ravel(sp_stats.multinomial.rvs(distinct_n[0], profile_p, size=1))
curr_loglk = sp_stats.multinomial.logpmf(curr_counts, distinct_n[0], p=profile_p)
loglk[0, sim_idx] = curr_loglk
for i in range(1, len(distinct_n)):
step = distinct_n[i] - distinct_n[i - 1]
if step >= jump:
# Instead of iterating for each n, sample the intermediate ns all at once
curr_counts += np.ravel(sp_stats.multinomial.rvs(step, profile_p, size=1))
curr_loglk = sp_stats.multinomial.logpmf(curr_counts, distinct_n[i], p=profile_p)
assert not np.isnan(curr_loglk)
else:
# Iteratively sample between the two distinct values of n
for n in range(distinct_n[i - 1] + 1, distinct_n[i] + 1):
j = sampled_features[k]
k += 1
if k >= n_sample_feature_block:
# Amortize this operation
sampled_features = np.random.choice(len(profile_p), size=n_sample_feature_block, p=profile_p, replace=True)
k = 0
curr_counts[j] += 1
curr_loglk += log_profile_p[j] + np.log(float(n) / curr_counts[j])
loglk[i, sim_idx] = curr_loglk
return distinct_n, loglk
def compute_ambient_pvalues(umis_per_bc, obs_loglk, sim_n, sim_loglk):
"""Compute p-values for observed multinomial log-likelihoods
Args:
umis_per_bc (nd.array(int)): UMI counts per barcode
obs_loglk (nd.array(float)): Observed log-likelihoods of each barcode deriving from an ambient profile
sim_n (nd.array(int)): Multinomial N for simulated log-likelihoods
sim_loglk (nd.array(float)): Simulated log-likelihoods of shape (len(sim_n), num_simulations)
Returns:
pvalues (nd.array(float)): p-values
"""
assert len(umis_per_bc) == len(obs_loglk)
assert sim_loglk.shape[0] == len(sim_n)
# Find the index of the simulated N for each barcode
sim_n_idx = np.searchsorted(sim_n, umis_per_bc)
num_sims = sim_loglk.shape[1]
num_barcodes = len(umis_per_bc)
pvalues = np.zeros(num_barcodes)
for i in range(num_barcodes):
num_lower_loglk = np.sum(sim_loglk[sim_n_idx[i], :] < obs_loglk[i])
pvalues[i] = float(1 + num_lower_loglk) / (1 + num_sims)
return pvalues
def estimate_profile_sgt(matrix, barcode_indices, nz_feat):
""" Estimate a gene expression profile by Simple Good Turing.
Args:
raw_mat (sparse matrix): Sparse matrix of all counts
barcode_indices (np.array(int)): Barcode indices to use
nz_feat (np.array(int)): Indices of features that are non-zero at least once
Returns:
profile (np.array(float)): Estimated probabilities of length len(nz_feat).
"""
# Initial profile estimate
prof_mat = matrix[:, barcode_indices]
profile = np.ravel(prof_mat[nz_feat, :].sum(axis=1))
zero_feat = np.flatnonzero(profile == 0)
# Simple Good Turing estimate
p_smoothed, p0 = sgt_proportions(profile[np.flatnonzero(profile)])
# Distribute p0 equally among the zero elements.
p0_i = p0 / len(zero_feat)
profile_p = np.repeat(p0_i, len(nz_feat))
profile_p[np.flatnonzero(profile)] = p_smoothed
assert np.isclose(profile_p.sum(), 1.0)
return profile_p
# Construct a background expression profile from barcodes with <= T UMIs
def est_background_profile_sgt(matrix, use_bcs):
""" Estimate a gene expression profile on a given subset of barcodes.
Use Good-Turing to smooth the estimated profile.
Args:
matrix (scipy.sparse.csc_matrix): Sparse matrix of all counts
use_bcs (np.array(int)): Indices of barcodes to use (col indices into matrix)
Returns:
profile (use_features, np.array(float)): Estimated probabilities of length use_features.
"""
# Use features that are nonzero anywhere in the data
use_feats = np.flatnonzero(np.asarray(matrix.sum(1)))
# Estimate background profile
bg_profile_p = estimate_profile_sgt(matrix, use_bcs, use_feats)
return (use_feats, bg_profile_p)
# <NAME>'s version (Aug 2019)
def call_cells(matrix: sparse.csr_matrix, expected_n_cells: int = 5000) -> np.ndarray:
"""
Determine likely true cells among the barcodes by contrasting with the ambient RNA profile
Args:
matrix: expression matrix
expected_n_cells: expected number of true cells in the sample
Returns:
calls: vector of bools indicating true cell barcodes
"""
n_barcodes = matrix.shape[1]
expected_n_cells = min(expected_n_cells, n_barcodes // 5)
total_umis = np.array(matrix.sum(axis=0))[0] # total UMIs per barcode
# upper limit of UMIs for barcodes considered ambient, calculated as greatest UMI count after removing twice the expected number of cells
max_ambient_umis = np.percentile(total_umis, 100 * (n_barcodes - expected_n_cells * 2) / n_barcodes)
# median number of UMIs among the top expected_n_cells barcodes
median_initial_umis = np.median(total_umis[total_umis > np.percentile(total_umis, 100 * (n_barcodes - expected_n_cells) / n_barcodes)])
min_cell_umis = int(max(500, median_initial_umis * 0.1)) # 10% of median, but at least 500 UMIs
# Ambient RNA beads, covering the range 20 to max_amient_umis
ambient_bcs = (total_umis < max_ambient_umis) & (total_umis > 20)
if ambient_bcs.sum() == 0:
# No beads were ambient, because cells had very low UMIs
logging.warning("No ambient RNA beads were found; maybe sample had too few cells?")
return max_ambient_umis, np.ones_like(total_umis)
try:
eval_features, ambient_profile_p = est_background_profile_sgt(matrix, ambient_bcs)
except SimpleGoodTuringError as e:
logging.error(e)
return max_ambient_umis, np.ones_like(total_umis)
# Evaluate candidate barcodes
eval_bcs = total_umis > min_cell_umis
eval_mat = matrix[eval_features, :][:, eval_bcs]
# Compute observed log-likelihood of barcodes being generated from ambient RNA
obs_loglk = eval_multinomial_loglikelihoods(eval_mat, ambient_profile_p)
# Simulate log likelihoods
distinct_ns, sim_loglk = simulate_multinomial_loglikelihoods(ambient_profile_p, total_umis[eval_bcs], num_sims=1000, verbose=True)
# Compute p-values
pvalues = compute_ambient_pvalues(total_umis[eval_bcs], obs_loglk, distinct_ns, sim_loglk)
pvalues_adj = adjust_pvalue_bh(pvalues)
pvalues_adj_all = np.ones_like(total_umis)
pvalues_adj_all[eval_bcs] = pvalues_adj
return max_ambient_umis, pvalues_adj_all
```
#### File: loompy/loompy/loom_view.py
```python
from typing import *
import loompy
import numpy as np
class LoomView:
"""
An in-memory loom dataset
"""
def __init__(self, layers: loompy.LayerManager, row_attrs: loompy.AttributeManager, col_attrs: loompy.AttributeManager, row_graphs: loompy.GraphManager, col_graphs: loompy.GraphManager, *, filename: str = "", file_attrs: loompy.GlobalAttributeManager = None) -> None:
self.filename = filename
self.view = loompy.ViewManager(self)
self.layers = layers
self.shape = [layer.shape for (name, layer) in layers.items()][0]
self.ra = row_attrs
self.ca = col_attrs
self.row_graphs = row_graphs
self.col_graphs = col_graphs
self.attrs = file_attrs
# Compatibility with loompy v1.x
self.layer = layers
self.row_attrs = row_attrs
self.col_attrs = col_attrs
def __getitem__(self, slice_: Union[str, Tuple[Union[int, np.ndarray, slice], Union[int, np.ndarray, slice]]]) -> np.ndarray:
"""
Get a slice of the main matrix.
Args:
slice: A 2D slice object (see http://docs.h5py.org/en/latest/high/dataset.html) or np.ndarrays or ints
Returns:
A numpy matrix
"""
if type(slice_) is str:
return self.layers[slice_]
else:
return self.layers[""][slice_]
def _repr_html_(self) -> str:
"""
Return an HTML representation of the loom view, showing the upper-left 10x10 corner.
"""
return loompy.to_html(self)
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
"""
if axis not in (0, 1):
raise ValueError("Axis must be 0 (rows) or 1 (columns)")
for layer in self.layers.values():
layer._permute(ordering, axis=axis)
if axis == 0:
if self.row_graphs is not None:
for g in self.row_graphs.values():
g._permute(ordering)
for a in self.row_attrs.values():
a._permute(ordering)
elif axis == 1:
if self.col_graphs is not None:
for g in self.col_graphs.values():
g._permute(ordering)
for a in self.col_attrs.values():
a._permute(ordering)
``` |
{
"source": "jlillest/geodjango-tigerleaflet-example",
"score": 2
} |
#### File: geodjango-tigerleaflet-example/config/views.py
```python
from django.views.generic import TemplateView
from tigerleaflet.models import State
class Index(TemplateView):
template_name = "pages/country.html"
def get_context_data(self, **kwargs):
return { 'title': "Welcome to the tigerleaflet demo!"}
class StateView(TemplateView):
template_name = "pages/state.html"
def get_context_data(self, **kwargs):
state_code = self.kwargs['state']
state_name = State.objects.get(usps_code=state_code.upper()).name
context = { 'title': "Showing " + state_name,
'state': state_code
}
return context
class CountyView(TemplateView):
template_name = "pages/county.html"
def get_context_data(self, **kwargs):
state_code = self.kwargs['state']
county = self.kwargs['county']
state_name = State.objects.get(usps_code=state_code.upper()).name
county_name = county.replace('_', ' ').title()
context = { 'title' : county_name + ", " + state_name,
'state' : state_code,
'county': county,
}
return context
``` |
{
"source": "jlim13/football_755",
"score": 3
} |
#### File: football_755/755_project/play_agent.py
```python
import sys
sys.path.insert(0, '/app/football/')
from gfootball.env import football_env
from gfootball.env import config
print (football_env.__file__)
#O -> my team (left)
#1 -> opposing team (right)
class ObservationDebugger:
def __init__(self):
#only conerned with left player
self.observations = {} #key,value -> step, observation
self.step_ct = 0
def process_observation(self, step):
this_obs, this_action = self.observations[step]
this_ball_owned_team = this_obs['ball_owned_team']
this_ball_owned_player = this_obs['left_agent_controlled_player']
print (this_ball_owned_team, this_ball_owned_player, step, this_obs['score'], this_action )
for i in range(30):
prev_obs, prev_action = self.observations[step-i-1]
prev_ball_owned_team = prev_obs['ball_owned_team']
prev_ball_owned_player = prev_obs['left_agent_controlled_player']
print (prev_ball_owned_team, prev_ball_owned_player, step-i-1, prev_obs['score'], prev_action)
exit()
def add_observation(self, obs, action):
self.observations[self.step_ct] = (obs, action)
self.step_ct += 1
class Rectangle(object):
def __init__(self, xrange, yrange, zrange):
self.xrange = xrange # (xmin, xmax)
self.yrange = yrange
self.zrange = zrange
def contains_point(self, p):
if not all(hasattr(p, loc) for loc in 'xyz'):
raise TypeError("Can only check if 3D points are in the rect")
return all([self.xrange[0] <= p.x <= self.xrange[1],
self.yrange[0] <= p.y <= self.yrange[1],
self.zrange[0] <= p.z <= self.zrange[1]])
class Point(object):
def __init__(self, x, y ,z):
self.x = x
self.y = y
self.z = z
def __iter__(self):
yield from (self.x, self.y, self.z)
def __str__(self):
return "str {} {} {}".format(self.x, self.y, self.z)
ckpt_path = 'corner_ckpt_all/00200'
players = ["ppo2_cnn:left_players=1,policy=impala_cnn,checkpoint={0}".format(ckpt_path)]
cfg = config.Config({
'action_set':'default',
'dump_full_episodes': False,
'real_time':False,
'players' : players,
'level':'academy_pass_and_shoot_with_keeper'
})
env = football_env.FootballEnv(cfg)
env.reset()
obsDebugger = ObservationDebugger()
my_score = 0
opp_score = 0
step = 0
total_diff = 0.0
total_eps = 0
Opponent_GOAL = Rectangle(xrange = (.7, 1.1), yrange = (-.12,.12), zrange = (0, 2.5))
while True:
obs, rew, done, info = env.step([])
ball_pos = obs['ball']
# ball_point = Point(ball_pos[0], ball_pos[1], ball_pos[2])
# ball_on_targ = Opponent_GOAL.contains_point(ball_point)
# if not rew == 0:
# print (rew)
# print (info)
# exit()
if rew == 1.0:
my_score += 1
if rew == -1.0:
opp_score += 1
if done:
diff = my_score - opp_score
total_diff += diff
my_score = 0
opp_score = 0
env.reset()
total_eps += 1
if total_eps == 100:
break
print (total_diff)
print (total_diff/total_eps)
print (ckpt_path)
``` |
{
"source": "jlim13/lifelines",
"score": 2
} |
#### File: lifelines/examples/mixture_cure_model.py
```python
from lifelines.fitters import ParametricRegressionFitter
from autograd.scipy.special import expit
from autograd import numpy as np
from lifelines.utils.safe_exp import safe_exp
exp = safe_exp
dot = np.dot
class MixtureCureModel(ParametricRegressionFitter):
"""
Models two "cure" possibilities: default, repay
"""
_fitted_parameter_names = ["beta_repay", "c_repay", "c_default", "beta_default"]
def _cumulative_hazard(self, params, T, Xs):
p_default_ = exp(dot(Xs["c_default"], params["c_default"]))
p_repay_ = exp(dot(Xs["c_repay"], params["c_repay"]))
p_cure_ = 1.0
p_default = p_default_ / (p_cure_ + p_default_ + p_repay_)
p_repay = p_repay_ / (p_cure_ + p_default_ + p_repay_)
p_cure = p_cure_ / (p_cure_ + p_default_ + p_repay_)
# cox like hazards.
sf_default = exp(-exp(dot(Xs["beta_default"], params["beta_default"])) * T ** 2)
sf_repay = exp(-exp(dot(Xs["beta_repay"], params["beta_repay"])) * T)
sf_cure = 1.0
return -np.log((p_repay * sf_repay) + (p_default * sf_default) + (p_cure * sf_cure))
swf = MixtureCureModel(penalizer=0.001)
rossi = load_rossi()
rossi["intercept"] = 1.0
rossi["week"] = rossi["week"] / 54.0
covariates = {
"beta_default": rossi.columns,
"beta_repay": rossi.columns,
"c_default": rossi.columns,
"c_repay": rossi.columns,
}
swf.fit(rossi, "week", event_col="arrest", regressors=covariates, timeline=np.linspace(0, 2))
swf.print_summary(2)
``` |
{
"source": "jlim13/pytorch-CycleGAN-and-pix2pix",
"score": 3
} |
#### File: pytorch-CycleGAN-and-pix2pix/data/semialigned_dataset.py
```python
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import torchvision
import numpy as np
class SemiAlignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
#unaligned data
self.unaligned_dir_A = os.path.join(opt.dataroot,'unaligned', opt.phase + 'A') # create a path '/path/to/data/trainA'
self.unaligned_dir_B = os.path.join(opt.dataroot, 'unaligned', opt.phase + 'B') # create a path '/path/to/data/trainB'
self.unaligned_A_paths = sorted(make_dataset(self.unaligned_dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.unaligned_B_paths = sorted(make_dataset(self.unaligned_dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.unaligned_A_size = len(self.unaligned_A_paths) # get the size of dataset A
self.unaligned_B_size = len(self.unaligned_B_paths) # get the size of dataset B
#aligned data
self.aligned_dir_A = os.path.join(opt.dataroot,'aligned', opt.phase + 'A') # create a path '/path/to/data/trainA'
self.aligned_dir_B = os.path.join(opt.dataroot, 'aligned', opt.phase + 'B') # create a path '/path/to/data/trainB'
self.aligned_A_paths = sorted(make_dataset(self.aligned_dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.aligned_B_paths = sorted(make_dataset(self.aligned_dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.aligned_A_size = len(self.aligned_A_paths) # get the size of dataset A
self.aligned_B_size = len(self.aligned_B_paths) # get the size of dataset B
#create a dict to easily map pairs
#when we call __getitem__ for aligned, we will sample from aligned_A_paths
self.aligned_glossary = {}
for im in self.aligned_B_paths:
label = im.split('/')[-2]
if not label in self.aligned_glossary:
self.aligned_glossary[label] = [im]
else:
self.aligned_glossary[label].append(im)
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
flip = random.randint(0, 1)
if flip == 0: #unaligned
A_path = self.unaligned_A_paths[index % self.unaligned_A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.unaligned_B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.unaligned_B_size - 1)
B_path = self.unaligned_B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
else: #aligned
A_path = self.aligned_A_paths[index % self.aligned_A_size]
label = A_path.split('/')[-2]
aligned_B_paths = self.aligned_glossary[label]
if self.opt.serial_batches: # make sure index is within then range
print ("here")
index_B = index % len(aligned_B_paths)
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, len(aligned_B_paths) - 1)
B_path = aligned_B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# A_img = torchvision.transforms.functional.crop(A_img, top = 300 , left =0, height = 632-300 , width = 312)
# B_img = torchvision.transforms.functional.crop(B_img, top = 300 , left =0, height = 632-300 , width = 312)
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.unaligned_A_size, self.unaligned_B_size)
``` |
{
"source": "jlim13/pytorch.sngan_projection",
"score": 2
} |
#### File: jlim13/pytorch.sngan_projection/evaluation.py
```python
import os
import numpy as np
import torchvision
import metrics.fid
import utils
def prune_dict(real_dict):
pruned_dict = {x: [] for x in range(len(real_dict.keys()))}
for k,v in real_dict.items():
flattened_vals = [item for sublist in v for item in sublist]
flattened_vals = np.asarray(flattened_vals)
pruned_dict[k] = flattened_vals
return pruned_dict
def evaluate(args, current_iter, gen, device,
inception_model=None, eval_iter=None,to_save=False):
"""Evaluate model using 100 mini-batches."""
calc_fid = (inception_model is not None) and (eval_iter is not None)
num_batches = args.n_eval_batches
gen.eval()
fake_list, real_list = [], []
conditional = args.cGAN
class_fake_dict = {x: [] for x in range(args.num_classes)}
class_real_dict = {x: [] for x in range(args.num_classes)}
for i in range(1, num_batches + 1):
if conditional:
class_id = i % args.num_classes
else:
class_id = None
fake = utils.generate_images(
gen, device, args.batch_size, args.gen_dim_z,
args.gen_distribution, class_id=class_id
)
if calc_fid and i <= args.n_fid_batches:
real_data_sample = next(eval_iter)
for real_class_label in range(args.num_classes):
real_labels = real_data_sample[1].cpu().numpy()
these_real_labels = real_labels[real_labels == real_class_label]
these_real_ims = real_data_sample[0].cpu().numpy()[real_labels == real_class_label]
class_real_dict[real_class_label].append(these_real_ims)
real_list.append((real_data_sample[0].cpu().numpy() + 1.0) / 2.0)
class_fake_dict[class_id].append((fake.cpu().numpy() + 1.0) / 2.0)
fake_list.append((fake.cpu().numpy() + 1.0) / 2.0)
if to_save:
# Save generated images.
root = args.eval_image_root
if conditional:
root = os.path.join(root, "class_id_{:04d}".format(i))
if not os.path.isdir(root):
os.makedirs(root)
fn = "image_iter_{:07d}_batch_{:04d}.png".format(current_iter, i)
torchvision.utils.save_image(
fake, os.path.join(root, fn), nrow=4, normalize=True, scale_each=True
)
#prune dicts
class_real_dict = prune_dict(class_real_dict)
class_fake_dict = prune_dict(class_fake_dict)
#calc intra-FID scores
for class_idx in range(args.num_classes):
real_images = class_real_dict[class_idx]
fake_images = class_fake_dict[class_idx]
print ("Class Number: {} | Number of real images {}. Number of fake images {}".format(class_idx, len(real_images), len(fake_images)))
mu_fake, sigma_fake = metrics.fid.calculate_activation_statistics(
fake_images, inception_model, args.batch_size, device=device
)
mu_real, sigma_real = metrics.fid.calculate_activation_statistics(
real_images, inception_model, args.batch_size, device=device
)
fid_score = metrics.fid.calculate_frechet_distance(
mu_fake, sigma_fake, mu_real, sigma_real
)
print ("Class Label {} || Fid Score {}".format(class_idx, fid_score))
# Calculate FID scores
if calc_fid:
fake_images = np.concatenate(fake_list)
real_images = np.concatenate(real_list)
print ("Number of real images {}. Number of fake images {}".format(len(real_images), len(fake_images)))
mu_fake, sigma_fake = metrics.fid.calculate_activation_statistics(
fake_images, inception_model, args.batch_size, device=device
)
mu_real, sigma_real = metrics.fid.calculate_activation_statistics(
real_images, inception_model, args.batch_size, device=device
)
fid_score = metrics.fid.calculate_frechet_distance(
mu_fake, sigma_fake, mu_real, sigma_real
)
else:
fid_score = -1000
gen.train()
return fid_score
``` |
{
"source": "jlim262/ps3-controller",
"score": 3
} |
#### File: jlim262/ps3-controller/main.py
```python
from ps3_controller import PS3Controller
from event_handler import EventHandler
class MyHandler(EventHandler):
def __init__(self):
super().__init__()
def handle_x(self):
return {'stop':True}
if __name__ == "__main__":
handler = MyHandler()
controller = PS3Controller(handler)
controller.run()
print('Program is stopped')
``` |
{
"source": "jlim262/py-socket-programming",
"score": 4
} |
#### File: jlim262/py-socket-programming/tcp_chat_client.py
```python
import socket
import sys
import argparse
host = 'localhost'
data_payload = 2048
def dumb_chat_client(port):
""" A dumb chat client """
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the server
print(f"Connecting to {host} port {port}")
sock.connect((host, port))
# Send data
try:
while True:
# Send data
message = input("> ")
if message == 'q':
break
sock.sendall(message.encode('utf-8'))
# Look for the response
data = sock.recv(data_payload)
print(f"server> {data.decode()}")
except socket.error as e:
print(f"Socket error: {str(e)}")
except Exception as e:
print(f"Other exception: {str(e)}")
finally:
print("Closing connection to the server")
sock.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Socket Server Example')
parser.add_argument('--port', action="store",
dest="port", type=int, required=True)
given_args = parser.parse_args()
port = given_args.port
dumb_chat_client(port)
``` |
{
"source": "jlim262/shri-kit",
"score": 2
} |
#### File: motion_arbiter/src/sentence_classifier.py
```python
import os
import pickle
import re
import warnings
warnings.filterwarnings('ignore', category=RuntimeWarning)
import nltk.data
import rospkg
import rospy
from nltk import NaiveBayesClassifier, classify, pos_tag, word_tokenize
from nltk.corpus import stopwords
from mind_msgs.msg import EntitiesIndex, Reply, ReplyAnalyzed
STOPWORDS = set(stopwords.words('english'))
def dialogue_act_features(sentence):
features = {}
sentence_filtered = re.sub("[\'.,#!?:-]", '', sentence)
for word in word_tokenize(sentence_filtered):
if word not in STOPWORDS:
features['contains({})'.format(word.lower())] = True
return features
class SentenceClassifier:
def __init__(self):
self.sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
try:
with open(rospkg.RosPack().get_path('motion_arbiter') + '/config/classifier.pickle') as f:
self.classifier = pickle.load(f)
except IOError as e:
rospy.logerr(e)
exit(-1)
rospy.loginfo('Loaded classifier succeed.')
rospy.Subscriber('reply', Reply, self.handle_domain_reply)
self.pub_reply_analyzed = rospy.Publisher('reply_analyzed', ReplyAnalyzed, queue_size=10)
rospy.loginfo("\033[93m[%s]\033[0m initialized." % rospy.get_name())
def handle_domain_reply(self, msg):
sents = self.sent_detector.tokenize(msg.reply.strip())
msg = ReplyAnalyzed()
msg.header.stamp = rospy.Time.now()
for sent in sents:
# sperate tags and text
sent_tags = re.findall('(%[^}]+%)', sent)
sent_text = re.sub('(%[^}]+%)', '', sent).strip()
# if task manager select intent we use it, or we use classifier for select intent
result = ''
remain_tags = ''
if not any('sm=' in tag for tag in sent_tags):
feature = dialogue_act_features(sent_text)
result = self.classifier.classify(feature)
if sent_tags != []:
remain_tags = sent_tags[0]
else:
tag_text = sent_tags[0].strip('{}').split('|')
matching = [s for s in tag_text if "sm=" in s]
if len(matching) > 1:
rospy.logwarn('Only one sm tags allowed...')
result = matching[0].split('=')[1]
for s in tag_text:
if not "sm=" in s:
remain_tags += s + '|'
if remain_tags != '':
remain_tags = '{' + remain_tags.rstrip('|') + '}'
# select entities
entity = EntitiesIndex()
for i in pos_tag(word_tokenize(sent_text)):
if(i[1] in ['RB', 'PRP', 'NN', 'PRP$']):
entity.entity.append(i[0])
entity.entity_index.append(sent_text.index(i[0]))
msg.entities.append(entity)
msg.sents.append(remain_tags + ' ' + sent_text)
msg.act_type.append(result + '/%d'%len(sent_text))
self.pub_reply_analyzed.publish(msg)
if __name__ == '__main__':
rospy.init_node('sentence_classifier', anonymous=False)
m = SentenceClassifier()
rospy.spin()
```
#### File: motion_renderer/src/motion_renderer.py
```python
import json
import random
import rospy
import actionlib
from std_msgs.msg import String
from mind_msgs.msg import RenderSceneAction, RenderSceneFeedback, RenderSceneResult
from mind_msgs.msg import RenderItemAction, RenderItemGoal
from mind_msgs.srv import GetInstalledGestures
class MotionRenderer:
is_rendering = {}
render_client = {}
cb_start = {}
cb_done = {}
def __init__(self):
rospy.init_node('motion_renderer', anonymous=False)
self.server = actionlib.SimpleActionServer('render_scene', RenderSceneAction, self.execute_callback, False)
self.server.register_preempt_callback(self.preempt_callback)
self.server.start()
rospy.loginfo('\033[94m[%s]\033[0m wait for render item...'%rospy.get_name())
try:
rospy.wait_for_service('get_installed_gestures')
except rospy.exceptions.ROSException as e:
rospy.logerr(e)
quit()
self.gazefocus_pub = rospy.Publisher('gaze_focusing', String, queue_size=10)
self.render_client['say'] = actionlib.SimpleActionClient('render_speech', RenderItemAction)
self.render_client['say'].wait_for_server()
self.render_client['sm'] = actionlib.SimpleActionClient('render_gesture', RenderItemAction)
self.render_client['sm'].wait_for_server()
self.get_motion = rospy.ServiceProxy('get_installed_gestures', GetInstalledGestures)
json_data = self.get_motion()
self.motion_tag = json.loads(json_data.gestures)
rospy.loginfo('\033[94m[%s]\033[0m success to get motion_tag from gesture server' % rospy.get_name())
self.render_client['expression'] = actionlib.SimpleActionClient('render_facial_expression', RenderItemAction)
self.render_client['expression'].wait_for_server()
rospy.loginfo('\033[94m[%s]\033[0m ready to connect facial_expression'%rospy.get_name())
self.render_client['screen'] = actionlib.SimpleActionClient('render_screen', RenderItemAction)
self.render_client['screen'].wait_for_server()
rospy.loginfo('\033[94m[%s]\033[0m ready to connect screen'%rospy.get_name())
self.render_client['mobility'] = actionlib.SimpleActionClient('render_mobility', RenderItemAction)
self.render_client['mobility'].wait_for_server()
rospy.loginfo('\033[94m[%s]\033[0m ready to connect mobility'%rospy.get_name())
self.render_client['sound'] = actionlib.SimpleActionClient('render_sound', RenderItemAction)
self.render_client['sound'].wait_for_server()
rospy.loginfo('\033[94m[%s]\033[0m ready to connect sound'%rospy.get_name())
# Status flags
self.is_rendering['say'] = False
self.is_rendering['sm'] = False
self.is_rendering['expression'] = False
self.is_rendering['screen'] = False
self.is_rendering['sound'] = False
self.is_rendering['mobility'] = False
self.return_to_last_expression = False
# Register callback functions.
self.cb_start['say'] = self.handle_render_say_start
self.cb_done['say'] = self.handle_render_say_done
self.cb_start['sm'] = self.handle_render_sm_start
self.cb_done['sm'] = self.handle_render_sm_done
self.cb_start['expression'] = self.handle_render_expression_start
self.cb_done['expression'] = self.handle_render_expression_done
self.cb_start['screen'] = self.handle_render_screen_start
self.cb_done['screen'] = self.handle_render_screen_done
self.cb_start['mobility'] = self.handle_render_mobility_start
self.cb_done['mobility'] = self.handle_render_mobility_done
self.cb_start['sound'] = self.handle_render_sound_start
self.cb_done['sound'] = self.handle_render_sound_done
rospy.loginfo("\033[94m[%s]\033[0m initialized." % rospy.get_name())
rospy.spin()
def handle_render_say_done(self, state, result):
self.is_rendering['say'] = False
def handle_render_say_start(self):
self.is_rendering['say'] = True
def handle_render_sm_done(self, state, result):
self.is_rendering['sm'] = False
def handle_render_sm_start(self):
self.is_rendering['sm'] = True
def handle_render_expression_done(self, state, result):
self.is_rendering['expression'] = False
def handle_render_expression_start(self):
self.is_rendering['expression'] = True
def handle_render_screen_done(self, state, result):
self.is_rendering['screen'] = False
def handle_render_screen_start(self):
self.is_rendering['screen'] = True
def handle_render_mobility_done(self, state, result):
self.is_rendering['mobility'] = False
def handle_render_mobility_start(self):
self.is_rendering['mobility'] = True
def handle_render_sound_done(self, state, result):
self.is_rendering['sound'] = False
def handle_render_sound_start(self):
self.is_rendering['sound'] = True
def preempt_callback(self):
rospy.logwarn('\033[94m[%s]\033[0m rendering preempted.' % rospy.get_name())
for k in self.is_rendering.keys():
if self.is_rendering[k]:
self.render_client[k].cancel_all_goals()
def execute_callback(self, goal):
rospy.loginfo('\033[94m[%s]\033[0m rendering started.' % rospy.get_name())
result = RenderSceneResult()
render_scene = json.loads(goal.render_scene)
render_scene_time = {}
for k, v in render_scene.items():
if k != 'emotion' and k != 'br' and v['render'] != '':
render_scene_time[k] = v['offset']
try:
if render_scene['expression'] != {}:
# render_scene['expression']['render'] = render_scene['expression']['render'].rstrip('~')
self.return_to_last_expression = False
except KeyError:
pass
delay_time = render_scene['br']['time']
for i in range(delay_time * 10):
rospy.sleep(0.1)
# Sort by delay time
scene_item_sorted_by_time = sorted(render_scene_time, key=render_scene_time.get)
first_offset_time = render_scene[scene_item_sorted_by_time[0]]['offset']
rospy.sleep(first_offset_time)
for i in range(0, len(scene_item_sorted_by_time) - 1):
if scene_item_sorted_by_time[i] == 'gaze':
focusing_name = render_scene[scene_item_sorted_by_time[i]]['render']
self.gazefocus_pub.publish(focusing_name)
else:
item_goal = RenderItemGoal()
item_goal.name = scene_item_sorted_by_time[i]
item_goal.data = render_scene[scene_item_sorted_by_time[i]]['render']
# if item_goal.data == '':
# continue
self.render_client[scene_item_sorted_by_time[i]].send_goal(
goal=item_goal,
done_cb=self.cb_done[scene_item_sorted_by_time[i]],
active_cb=self.cb_start[scene_item_sorted_by_time[i]])
while not rospy.is_shutdown() and not self.is_rendering[scene_item_sorted_by_time[i]]:
rospy.sleep(0.01)
pass
delta_time = render_scene[scene_item_sorted_by_time[i+1]]['offset'] - render_scene[scene_item_sorted_by_time[i]]['offset']
rospy.sleep(delta_time)
if scene_item_sorted_by_time[-1] == 'gaze':
focusing_name = render_scene[scene_item_sorted_by_time[-1]]['render']
self.gazefocus_pub.publish(focusing_name)
else:
item_goal = RenderItemGoal()
item_goal.name = scene_item_sorted_by_time[-1]
item_goal.data = render_scene[scene_item_sorted_by_time[-1]]['render']
self.render_client[scene_item_sorted_by_time[-1]].send_goal(
goal=item_goal,
done_cb=self.cb_done[scene_item_sorted_by_time[-1]],
active_cb=self.cb_start[scene_item_sorted_by_time[-1]])
while not rospy.is_shutdown() and not self.is_rendering[scene_item_sorted_by_time[-1]]:
rospy.sleep(0.01)
pass
while not rospy.is_shutdown():
rendering = False
for i in scene_item_sorted_by_time:
if i != 'gaze':
rendering = rendering or self.is_rendering[i]
if not rendering:
break
rospy.sleep(0.1)
self.gazefocus_pub.publish('')
if self.return_to_last_expression:
item_goal = RenderItemGoal()
item_goal.name = 'expression'
item_goal.data = render_scene['emotion']['current_emotion']
self.render_client['expression'].send_goal(
goal=item_goal,
done_cb=self.cb_done['expression'],
active_cb=self.cb_start['expression'])
while not rospy.is_shutdown() and not self.is_rendering['expression']:
rospy.sleep(0.01)
while not rospy.is_shutdown() and self.is_rendering['expression']:
rospy.sleep(0.01)
self.return_to_last_expression = False
'''
if goal.emotion == 'neutral':
self.pub_face_emotion.publish(
SetFacialExpression.NEUTRAL, goal.emotion_intensity)
elif goal.emotion == 'happiness':
self.pub_face_emotion.publish(
SetFacialExpression.HAPPINESS, goal.emotion_intensity)
elif goal.emotion == 'surprise':
self.pub_face_emotion.publish(
SetFacialExpression.HAPPINESS, goal.emotion_intensity)
elif goal.emotion == 'anger':
self.pub_face_emotion.publish(
SetFacialExpression.HAPPINESS, goal.emotion_intensity)
elif goal.emotion == 'sadness':
self.pub_face_emotion.publish(
SetFacialExpression.HAPPINESS, goal.emotion_intensity)
elif goal.emotion == 'disgust':
self.pub_face_emotion.publish(
SetFacialExpression.HAPPINESS, goal.emotion_intensity)
elif goal.emotion == 'fear':
self.pub_face_emotion.publish(
SetFacialExpression.HAPPINESS, goal.emotion_intensity)
if goal.gesture != '':
# When robot requested play gesture, the idle motion is disabled temporary
self.is_playing_now = True
# print goal.gesture
recv_data = goal.gesture.split(':')
if recv_data[0] == 'sm':
# print recv_data
if recv_data[1] in self.motion_tag:
gesture_name = self.motion_tag[recv_data[1]][
random.randrange(0, len(self.motion_tag[recv_data[1]]))]
else:
gesture_name = recv_data[1]
elif recv_data[0] == 'pm':
gesture_name = recv_data[1]
gesture_goal = GestureActionGoal(gesture=gesture_name)
self.gesture_client.send_goal(goal=gesture_goal, done_cb=self.gesture_done_cb,
feedback_cb=self.gesture_playing_cb, active_cb=self.gesture_start_cb)
# rospy.sleep(2)
if goal.say != '':
# When robot is speaking, the speech_recognition is disabled temporary
self.is_speaking_now = True
self.is_gesture_only = False
speech_goal = SpeechActionGoal(say=goal.say)
self.speech_client.send_goal(goal=speech_goal, done_cb=self.speech_done_cb,
feedback_cb=self.speech_speaking_cb, active_cb=self.speech_start_cb)
else:
# Gesture Only
self.is_gesture_only = True
while self.is_speaking_now or self.is_playing_now:
# rospy.logwarn('%d %d'%(self.is_speaking_now, self.is_playing_now))
if self.is_gesture_only:
rospy.sleep(0.2)
continue
if not self.is_speaking_now and self.is_playing_now:
self.sync_count_gesture += 1
if self.sync_count_gesture > 3:
self.gesture_client.cancel_all_goals()
self.sync_count_gesture = 0
rospy.sleep(0.2)
self.pub_face_emotion.publish(SetFacialExpression.PREVIOUS_FACE, 1.0)
'''
rospy.loginfo('\033[94m[%s]\033[0m rendering completed.' % rospy.get_name())
result.result = True
self.server.set_succeeded(result)
if __name__ == '__main__':
m = MotionRenderer()
``` |
{
"source": "jlim262/text2emotional-speech",
"score": 2
} |
#### File: src/dataset/tweet_index_dataset.py
```python
import os
import numpy as np
import pandas as pd
import torch
from torch.nn import functional as F
from transformers import AutoTokenizer, AutoConfig
from built.registry import Registry
@Registry.register(category='dataset')
class TweetIndexDatasetBase(torch.utils.data.Dataset):
def __init__(self, transformer_path, csv_path, transformer_type='roberta', train=False, split='train', max_len=96, inference=False):
df = pd.read_csv(csv_path)
self.sentiment2target = {'neutral': 0, 'positive': 1, 'negative': 2}
self.df = df.dropna().reset_index(drop=True)
self.max_len = max_len
self.labeled = 'selected_text' in self.df
self.transformer_type = transformer_type
self.tokenizer = self.get_tokenizer(self.transformer_type)
self.inference = inference
def get_tokenizer(self, transformer):
config = AutoConfig.from_pretrained(transformer)
tokenizer = AutoTokenizer.from_pretrained(
transformer, config=config)
return tokenizer
def __getitem__(self, index):
row = self.df.iloc[index]
try:
tweet = " " + " ".join(row.text.lower().split())
except:
raise RuntimeError(f'{row}')
sentiment = row.sentiment
encoded = self.tokenizer(
tweet,
sentiment,
max_length=self.max_len,
padding='max_length',
truncation=True,
add_special_tokens=True,
return_attention_mask=True,
return_token_type_ids=True,
return_offsets_mapping=True,
return_tensors='pt')
input_ids = encoded['input_ids']
attention_mask = encoded['attention_mask']
token_type_ids = encoded['token_type_ids']
offsets = encoded['offset_mapping']
char_centers = [(x[0] + x[1]) / 2 for x in offsets]
inputs = {}
inputs['input_ids'] = torch.squeeze(input_ids)
inputs['attention_mask'] = torch.squeeze(attention_mask)
inputs['token_type_ids'] = torch.squeeze(token_type_ids)
inputs['offsets'] = torch.squeeze(offsets)
inputs['tweet'] = tweet
targets = {}
if self.inference:
targets = torch.tensor(np.nan)
else:
# target = torch.tensor(self.sentiment2target[row.sentiment])
start_idx, end_idx, selected_text = self.get_target_idx(row, tweet, offsets)
targets['start_idx'] = start_idx
targets['end_idx'] = end_idx
targets['selected_text'] = selected_text
return inputs, targets
# ids, masks, tweet, offsets, sentiment_id, sentiment_target, char_centers = self.get_input_data(
# row)
# data['ids'] = ids
# data['masks'] = masks
# data['tweet'] = tweet
# data['offsets'] = offsets
# data['sentiment_id'] = sentiment_id
# data['sentiment_target'] = sentiment_target
# data['char_centers'] = char_centers
# target['sentiment_id'] = sentiment_id
# target['sentiment_target'] = sentiment_target
# if self.labeled:
# start_idx, end_idx, selected_text = self.get_target_idx(
# row, tweet, offsets)
# data['start_idx'] = start_idx
# data['end_idx'] = end_idx
# data['selected_text'] = selected_text
# target['start_idx'] = start_idx
# target['end_idx'] = end_idx
# target['selected_text'] = selected_text
# target['offsets'] = offsets
# target['tweet'] = tweet
# return data, target
def __len__(self):
return len(self.df)
def get_target_idx(self, row, tweet, offsets):
start_idx = 0
end_idx = 0
try:
tweet = " " + " ".join(str(tweet).split())
selected_text = " ".join(row.selected_text.lower().split())
if len(selected_text) != selected_text.count(' '):
start_idx = tweet.find(selected_text)
end_idx = start_idx + len(selected_text)
char_targets = [0] * len(tweet)
if start_idx != None and end_idx != None:
for ct in range(start_idx, end_idx):
char_targets[ct] = 1
target_idx = []
for j, (offset1, offset2) in enumerate(offsets):
if sum(char_targets[offset1: offset2]) > 0:
target_idx.append(j)
start_idx = target_idx[0]
end_idx = target_idx[-1]
except:
print("selected_text is empty with spaces")
return start_idx, end_idx, selected_text
```
#### File: src/dataset/tweet_sentiment_dataset.py
```python
import os
import numpy as np
import pandas as pd
import torch
from torch.nn import functional as F
from transformers import AutoTokenizer, AutoConfig
from built.registry import Registry
@Registry.register(category='dataset')
class TweetSentimentDataset(torch.utils.data.Dataset):
def __init__(self, transformer_path, csv_path, transformer_type='roberta', train=False, split='train', max_len=96, inference=False):
df = pd.read_csv(csv_path)
self.sentiment2target = {'neutral': 0, 'positive': 1, 'negative': 2}
self.df = df.dropna().reset_index(drop=True)
self.max_len = max_len
self.labeled = 'selected_text' in self.df
self.transformer_type = transformer_type
self.tokenizer = self.get_tokenizer(self.transformer_type)
self.inference = inference
def get_tokenizer(self, transformer):
config = AutoConfig.from_pretrained(transformer)
tokenizer = AutoTokenizer.from_pretrained(
transformer, config=config)
return tokenizer
def __getitem__(self, index):
row = self.df.iloc[index]
try:
tweet = " " + " ".join(row.text.lower().split())
except:
raise RuntimeError(f'{row}')
encoded = self.tokenizer(
tweet,
max_length=self.max_len,
padding='max_length',
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
return_tensors='pt')
input_ids = encoded['input_ids']
attention_mask = encoded['attention_mask']
token_type_ids = encoded['token_type_ids']
inputs = {}
inputs['input_ids'] = torch.squeeze(input_ids)
inputs['attention_mask'] = torch.squeeze(attention_mask)
inputs['token_type_ids'] = torch.squeeze(token_type_ids)
inputs['tweet'] = tweet
if self.inference:
target = torch.tensor(np.nan)
else:
target = torch.tensor(self.sentiment2target[row.sentiment])
return inputs, target
def __len__(self):
return len(self.df)
```
#### File: src/logger/tweet_logger.py
```python
import os
import logging
import torch
import numpy as np
from sklearn import metrics
from built.logger import LoggerBase
from built.registry import Registry
@Registry.register(category="hooks")
class BaseLogger(LoggerBase):
def log_extras(self, inputs: any, targets: any, outputs: any):
pass
```
#### File: src/splitter/traintest_splitter.py
```python
import pandas as pd
from sklearn.model_selection import train_test_split
from built.registry import Registry
# @Registry.register(category="splitter")
class TraintestSplitter(object):
def __init__(self, csv_path, train_csv_path, test_csv_path, ratio=0.8, shuffle=True, random_state=42):
self.csv_path = csv_path
self.train_csv_path = train_csv_path
self.test_csv_path = test_csv_path
self.ratio = ratio
self.shuffle = shuffle
self.random_state = random_state
self.target_key = None
def split(self):
df = self.read_file()
train, test = train_test_split(
df, train_size=self.ratio, shuffle=self.shuffle, random_state=self.random_state, stratify=df[self.target_key])
train.to_csv(self.train_csv_path, index=False)
test.to_csv(self.test_csv_path, index=False)
def read_file(self):
pass
@Registry.register(category="splitter")
class SentimentDataSplitter(TraintestSplitter):
def __init__(self, csv_path, train_csv_path, test_csv_path, ratio=0.8, shuffle=True, random_state=42):
super().__init__(csv_path, train_csv_path,
test_csv_path, ratio, shuffle, random_state)
self.target_key = 'sentiment'
def read_file(self):
return pd.read_csv(self.csv_path)
@Registry.register(category="splitter")
class EmotionDataSplitter(TraintestSplitter):
def __init__(self, csv_path, train_csv_path, test_csv_path, ratio=0.8, shuffle=True, random_state=42):
super().__init__(csv_path, train_csv_path,
test_csv_path, ratio, shuffle, random_state)
self.target_key = 'Emotion'
def read_file(self):
return pd.read_csv(self.csv_path, sep='\t', encoding='utf-16')
@Registry.register(category="splitter")
class Emotion13DataSplitter(TraintestSplitter):
def __init__(self, csv_path, train_csv_path, test_csv_path, ratio=0.8, shuffle=True, random_state=42):
super().__init__(csv_path, train_csv_path,
test_csv_path, ratio, shuffle, random_state)
self.target_key = 'Emotion'
def read_file(self):
return pd.read_csv(self.csv_path)
``` |
{
"source": "jlim262/toy-cyclegan",
"score": 3
} |
#### File: jlim262/toy-cyclegan/cyclegan.py
```python
import os
import torch
import torch.nn as nn
import itertools
from torch.nn import init
from collections import OrderedDict
from networks import Generator, Discriminator
from utils.image_pool import ImagePool
from utils.util import mkdirs
class Cyclegan():
"""
This class defines the cyclegan model, loss functions and optimizers.
A is the source domain and B is the target domain.
Generators:
self.netG_A: generates fake_B by self.netG_A(real_A)
self.netG_B: generates fake_A by self.netG_B(real_B)
Descriminators:
self.netD_A: discriminates between fake_B and real_B
self.netD_B: discriminates between fake_A and real_A
"""
def __init__(self, device):
self.device = device
self.netG_A = self.__init_weights(Generator(3, use_dropout=False).to(self.device))
self.netG_B = self.__init_weights(Generator(3, use_dropout=False).to(self.device))
self.netD_A = self.__init_weights(Discriminator(3).to(self.device))
self.netD_B = self.__init_weights(Discriminator(3).to(self.device))
self.criterion_gan = nn.MSELoss()
self.criterion_cycle = nn.L1Loss()
self.criterion_idt = nn.L1Loss()
self.optimizer_G = torch.optim.Adam(itertools.chain(
self.netG_A.parameters(), self.netG_B.parameters()), lr=0.0002, betas=(0.5, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(
self.netD_A.parameters(), self.netD_B.parameters()), lr=0.0002, betas=(0.5, 0.999))
self.optimizers = [self.optimizer_G, self.optimizer_D]
self.fake_A_pool = ImagePool(50)
self.fake_B_pool = ImagePool(50)
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
self.lambda_A = 10
self.lambda_B = 10
self.lambda_idt = 0.5
self.save_dir = './models'
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def __init_weights(self, net, init_gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.normal_(m.weight.data, 0.0, init_gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func)
return net
def __optimize_G(self, real_A, real_B):
self.set_requires_grad([self.netD_A, self.netD_B], False)
self.optimizer_G.zero_grad()
def generator_loss(real_source, target_generator, target_discriminator):
# generator should generate a fake image which can fool discriminator
fake_target = target_generator(real_source)
prediction_of_fake_target = target_discriminator(fake_target)
loss = self.criterion_gan(
prediction_of_fake_target, torch.tensor(1.0).to(self.device).expand_as(prediction_of_fake_target))
return loss
def cycle_loss(real_source, target_generator, source_generator):
fake_target = target_generator(real_source)
reconstructed_source = source_generator(fake_target)
loss = self.criterion_cycle(reconstructed_source, real_source)
return loss
def identity_loss(real_source, target_generator):
fake_target = target_generator(real_source)
loss = self.criterion_idt(fake_target, real_source)
return loss
self.loss_G_A = generator_loss(real_A, self.netG_A, self.netD_A)
self.loss_G_B = generator_loss(real_B, self.netG_B, self.netD_B)
self.loss_cycle_A = cycle_loss(real_A, self.netG_A, self.netG_B) * self.lambda_A
self.loss_cycle_B = cycle_loss(real_B, self.netG_B, self.netG_A) * self.lambda_B
self.loss_idt_A = identity_loss(real_B, self.netG_A) * self.lambda_B * self.lambda_idt
self.loss_idt_B = identity_loss(real_A, self.netG_B) * self.lambda_A * self.lambda_idt
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
self.optimizer_G.step()
def __optimize_D(self, real_A, real_B):
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad()
def discriminator_loss(real_source, real_target, fake_target_pool, target_generator, target_discriminator):
# discriminator should predict fake_target as False because fake_target is not a real image
fake_target = target_generator(real_source)
fake_target = fake_target_pool.query(fake_target)
prediction_of_fake_target = target_discriminator(
fake_target.detach())
loss_fake = self.criterion_gan(
prediction_of_fake_target, torch.tensor(0.0).to(self.device).expand_as(prediction_of_fake_target))
# Also, discriminator should predict real_target as True because real_target is a real image
prediction_of_real_target = target_discriminator(real_target)
loss_real = self.criterion_gan(
prediction_of_real_target, torch.tensor(1.0).to(self.device).expand_as(prediction_of_real_target))
loss = (loss_fake + loss_real) * 0.5
return loss
# optimize netD_A
self.loss_D_A = discriminator_loss(real_A, real_B, self.fake_B_pool, self.netG_A, self.netD_A)
self.loss_D_A.backward()
# optimize netD_B
self.loss_D_B = discriminator_loss(real_B, real_A, self.fake_A_pool, self.netG_B, self.netD_B)
self.loss_D_B.backward()
self.optimizer_D.step()
def optimize_parameters(self, real_A, real_B):
real_A = real_A.to(self.device)
real_B = real_B.to(self.device)
self.__optimize_G(real_A, real_B)
self.__optimize_D(real_A, real_B)
def forward(self, real_A, real_B):
real_A = real_A.to(self.device)
real_B = real_B.to(self.device)
return self.netG_A(real_A), self.netG_B(real_B)
def save_networks(self, epoch):
mkdirs(self.save_dir)
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if torch.cuda.is_available():
torch.save(net.cpu().state_dict(), save_path)
net.to(self.device)
else:
torch.save(net.cpu().state_dict(), save_path)
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
print('loading the model from %s' % load_path)
state_dict = torch.load(load_path, map_location=self.device)
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
``` |
{
"source": "jlimmm/distiller",
"score": 2
} |
#### File: distiller/distiller/thresholding.py
```python
import torch
import numpy as np
from distiller.norms import *
__all__ = ["threshold_mask", "GroupThresholdMixin",
"group_threshold_binary_map", "group_threshold_mask"]
def threshold_mask(param, threshold):
"""Create a threshold mask for the provided parameter tensor using
magnitude thresholding.
Arguments:
param: a parameter tensor which should be pruned.
threshold: the pruning threshold.
Returns:
prune_mask: The pruning mask.
"""
return torch.gt(torch.abs(param), threshold).type(param.type())
class GroupThresholdMixin(object):
"""A mixin class to add group thresholding capabilities
TODO: this does not need to be a mixin - it should be made a simple function. We keep this until we refactor
"""
def group_threshold_mask(self, param, group_type, threshold, threshold_criteria):
ret = group_threshold_mask(param, group_type, threshold, threshold_criteria)
if isinstance(ret, tuple):
return ret[0]
return ret
def group_threshold_binary_map(param, group_type, threshold, threshold_criteria):
"""Return a threshold binary map for the provided parameter and group type.
This function thresholds a parameter tensor, using the provided threshold.
Thresholding is performed by breaking the parameter tensor into groups as
specified by group_type, computing the norm of each group instance using
threshold_criteria, and then thresholding that norm. The result is called
binary_map and contains 1s where the group norm was larger than the threshold
value, zero otherwise.
Args:
param: The parameter to mask
group_type: The elements grouping type (structure).
One of:2D, 3D, Channels, Row, Cols
threshold: The threshold
threshold_criteria: The thresholding criteria.
('Mean_Abs', 'Mean_L1', 'L1') - thresholds the entire element group using the mean of the
absolute values of the tensor elements.
('Mean_L2', 'L2') - - thresholds the entire element group using the L2 norm
'Max' - thresholds the entire group using the magnitude of the largest
element in the group.
Returns:
binary_map
"""
if isinstance(threshold, torch.Tensor):
threshold = threshold.item()
length_normalized = 'Mean' in threshold_criteria
if threshold_criteria in ('Mean_Abs', 'Mean_L1', 'L1'):
norm_fn = l1_norm
elif threshold_criteria in ('Mean_L2', 'L2'):
norm_fn = l2_norm
elif threshold_criteria == 'Max':
norm_fn = max_norm
else:
raise ValueError("Illegal threshold_criteria %s", threshold_criteria)
if group_type == '2D':
assert param.dim() == 4, "This thresholding is only supported for 4D weights"
thresholds = param.new_full((param.size(0) * param.size(1),), threshold)
norms = kernels_norm(param, norm_fn, length_normalized=length_normalized)
elif group_type == 'Rows':
assert param.dim() == 2, "This regularization is only supported for 2D weights"
thresholds = param.new_full((param.size(0),), threshold)
norms = sub_matrix_norm(param, norm_fn, group_len=1, length_normalized=length_normalized, dim=1)
elif group_type == 'Cols':
assert param.dim() == 2, "This regularization is only supported for 2D weights"
thresholds = param.new_full((param.size(1),), threshold)
norms = sub_matrix_norm(param, norm_fn, group_len=1, length_normalized=length_normalized, dim=0)
elif group_type == '3D' or group_type == 'Filters':
assert param.dim() == 4 or param.dim() == 3, "This pruning is only supported for 3D and 4D weights"
n_filters = param.size(0)
thresholds = param.new_full((n_filters,), threshold)
norms = filters_norm(param, norm_fn, length_normalized=length_normalized)
elif group_type == 'Channels':
assert param.dim() == 4, "This thresholding is only supported for 4D weights"
n_channels = param.size(1)
thresholds = param.new_full((n_channels,), threshold)
norms = channels_norm(param, norm_fn, length_normalized=length_normalized)
binary_map = norms.gt(thresholds).type(param.type())
return binary_map
def group_threshold_mask(param, group_type, threshold, threshold_criteria, binary_map=None):
"""Return a threshold mask for the provided parameter and group type.
Args:
param: The parameter to mask
group_type: The elements grouping type (structure).
One of:2D, 3D, Channels, Row, Cols
threshold: The threshold
threshold_criteria: The thresholding criteria.
'Mean_Abs' thresholds the entire element group using the mean of the
absolute values of the tensor elements.
'Max' thresholds the entire group using the magnitude of the largest
element in the group.
binary_map:
Returns:
(mask, binary_map)
"""
assert group_type in ('2D', 'Rows', 'Cols', '3D', 'Filters', 'Channels')
if binary_map is None:
binary_map = group_threshold_binary_map(param, group_type, threshold, threshold_criteria)
# Now let's expand back up to a 4D mask
return expand_binary_map(param, group_type, binary_map)
def expand_binary_map(param, group_type, binary_map):
"""Expands a binary_map to the shape of the provided parameter.
Args:
param: The parameter to mask
group_type: The elements grouping type (structure).
One of:2D, 3D, 4D, Channels, Row, Cols
binary_map: the binary map that matches the specified `group_type`
Returns:
(mask, binary_map)
"""
assert group_type in ('2D', 'Rows', 'Cols', '3D', 'Filters', '4D', 'Channels')
assert binary_map is not None
# Now let's expand back up to a 4D mask
if group_type == 'Channels' and param.dim() == 2:
group_type = 'Cols'
if group_type == '2D':
a = binary_map.expand(param.size(2) * param.size(3),
param.size(0) * param.size(1)).t()
return a.view(*param.shape), binary_map
elif group_type == 'Rows':
return binary_map.expand(param.size(1), param.size(0)).t(), binary_map
elif group_type == 'Cols':
return binary_map.expand(*param.shape), binary_map
elif group_type == '3D' or group_type == 'Filters':
a = binary_map.expand(np.prod(param.shape[1:]), param.size(0)).t()
return a.view(*param.shape), binary_map
elif group_type == 'Channels':
num_filters, num_channels = param.size(0), param.size(1)
a = binary_map.expand(num_filters, num_channels)
c = a.unsqueeze(-1)
d = c.expand(num_filters, num_channels, param.size(2) * param.size(3)).contiguous()
return d.view(*param.shape), binary_map
```
#### File: auto_compression/amc/parallel-finetune.py
```python
import os
import glob
import math
import shutil
import torch
import torch.multiprocessing as multiprocessing
from torch.multiprocessing import Process, set_start_method
import pandas as pd
from collections import OrderedDict
import csv
import distiller.apputils.image_classifier as classifier
class _CSVLogger(object):
def __init__(self, fname, headers):
"""Create the CSV file and write the column names"""
with open(fname, 'w') as f:
writer = csv.writer(f)
writer.writerow(headers)
self.fname = fname
def add_record(self, fields):
# We close the file each time to flush on every write, and protect against data-loss on crashes
with open(self.fname, 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
f.flush()
class FTStatsLogger(_CSVLogger):
def __init__(self, fname):
headers = ['dir', 'name', 'macs', 'search_top1', 'top1', 'top5', 'loss']
super().__init__(fname, headers)
class FinetuningTask(object):
def __init__(self, args):
self.args = args
def __call__(self, data_loader):
return finetune_checkpoint(*self.args, data_loader)
# Boiler-plat code (src: https://pymotw.com/2/multiprocessing/communication.html)
class FinetuningProcess(Process):
def __init__(self, task_queue, result_queue, data_loader):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.data_loader = data_loader
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
print('%s: Exiting' % proc_name)
self.task_queue.task_done()
break
print('executing on %s: %s' % (proc_name, next_task))
answer = next_task(self.data_loader)
self.task_queue.task_done()
self.result_queue.put(answer)
return
def finetune_directory(ft_dir, stats_file, app_args, cleanup_ft_dir=False, checkpoints=None):
"""Fine tune all the checkpoint files we find in the immediate-directory specified.
For each checkpoint file we find, we create and queue a FinetuningTask.
A FinetuningProcess will pickup the FinetuningTask and process it.
"""
print("Fine-tuning directory %s" % ft_dir)
if not checkpoints:
# Get a list of the checkpoint files
checkpoints = glob.glob(os.path.join(ft_dir, "*checkpoint.pth.tar"))
assert checkpoints
# We create a subdirectory, where we will write all of our output
ft_output_dir = os.path.join(ft_dir, "ft")
os.makedirs(ft_output_dir, exist_ok=True)
print("Writing results to directory %s" % ft_output_dir)
app_args.output_dir = ft_output_dir
# Multi-process queues
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
# Create and launch the fine-tuning processes
processes = []
n_processes = min(app_args.processes, len(checkpoints))
for i in range(n_processes):
# Pre-load the data-loaders of each fine-tuning process once
app = classifier.ClassifierCompressor(app_args, script_dir=os.path.dirname(__file__))
data_loader = classifier.load_data(app.args)
# Delete log directories
shutil.rmtree(app.logdir)
processes.append(FinetuningProcess(tasks, results, data_loader))
# Start the process
processes[-1].start()
n_gpus = torch.cuda.device_count()
# Enqueue all of the fine-tuning tasks
for (instance, ckpt_file) in enumerate(checkpoints):
tasks.put(FinetuningTask(args=(ckpt_file, instance%n_gpus, app_args)))
# Push an end-of-tasks marker
for i in range(len(processes)):
tasks.put(None)
# Wait until all tasks finish
tasks.join()
# Start printing results
results_dict = OrderedDict()
while not results.empty():
result = results.get()
results_dict[result[0]] = result[1]
# Read the results of the AMC experiment (we'll want to use some of the data)
import pandas as pd
df = pd.read_csv(os.path.join(ft_dir, "amc.csv"))
assert len(results_dict) > 0
# Log some info for each checkpoint
for ckpt_name in sorted (results_dict.keys()):
net_search_results = df[df["ckpt_name"] == ckpt_name[:-len("_checkpoint.pth.tar")]]
search_top1 = net_search_results["top1"].iloc[0]
normalized_macs = net_search_results["normalized_macs"].iloc[0]
log_entry = (ft_output_dir, ckpt_name, normalized_macs,
search_top1, *results_dict[ckpt_name])
print("%s <> %s: %.2f %.2f %.2f %.2f %.2f" % log_entry)
stats_file.add_record(log_entry)
if cleanup_ft_dir:
# cleanup: remove the "ft" directory
shutil.rmtree(ft_output_dir)
def finetune_checkpoint(ckpt_file, gpu, app_args, loaders):
# Usually when we train, we also want to look at and graph, the validation score of each epoch.
# When we run many fine-tuning sessions at once, we don't care to look at the validation score.
# However, we want to perform a sort-of "early-stopping" in which we use the checkpoint of the
# best performing training epoch, and not the checkpoint created by the last epoch.
# We evaluate what is the "best" checkpoint by looking at the validation accuracy
name = os.path.basename(ckpt_file)
print("Fine-tuning checkpoint %s" % name)
app_args.gpus = str(gpu)
app_args.name = name
app_args.deprecated_resume = ckpt_file
app = classifier.ClassifierCompressor(app_args, script_dir=os.path.dirname(__file__))
app.train_loader, app.val_loader, app.test_loader = loaders
best = [float("-inf"), float("-inf"), float("inf")]
for epoch in range(app.args.epochs):
validate = epoch >= math.floor((1 - app.args.validate_enable_factor) * app.args.epochs)
top1, top5, loss = app.train_validate_with_scheduling(epoch, validate=validate, verbose=False)
if validate:
if top1 > best[0]:
best = [top1, top5, loss]
if app.args.validate_enable_factor == 0:
# We did not validate, so our score is the performance on the Test dataset
return (name, app.test())
return (name, best)
if __name__ == '__main__':
def get_immediate_subdirs(a_dir):
subdirs = [os.path.join(a_dir, name) for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name)) and name != "ft"]
subdirs.sort()
return subdirs
def add_parallel_args(argparser):
group = argparser.add_argument_group('parallel fine-tuning')
group.add_argument('--processes', type=int, default=4,
help="Number of parallel experiment processes to run in parallel")
group.add_argument('--scan-dir', metavar='DIR', required=True, help='path to checkpoints')
group.add_argument('--output-csv', metavar='DIR', required=True,
help='Name of the CSV file containing the output')
group.add_argument('--top-performing-chkpts', action='store_true', default=False,
help='Fine tune only the best performing discovered checkpoints (sorted by search-Top1)')
group.add_argument('--validate-enable-factor', type=float, default=0.2,
help="What portion of the epochs to validate (0=never validate; 1=validate after every "
"epoch; 0<factor<1 validate the last factor*ags.epochs epcohs."
"In the latter case, the reported score for the fine-tuned model is the "
"best performing validation score.")
return argparser
def get_best_checkpoints(ft_dir, best_nets=5):
df_amc_results = pd.read_csv(os.path.join(ft_dir, "amc.csv"))
top1_sorted_df_amc_results = df_amc_results.sort_values(by=['top1'], ascending=False)
top1_sorted_df_amc_results = top1_sorted_df_amc_results[0:best_nets]
checkpoints = [os.path.join(ft_dir, ckpt + "_checkpoint.pth.tar")
for ckpt in top1_sorted_df_amc_results.ckpt_name]
return checkpoints
try:
set_start_method('forkserver')
except RuntimeError:
pass
# Parse arguments
argparser = classifier.init_classifier_compression_arg_parser()
add_parallel_args(argparser)
app_args = argparser.parse_args()
print("Starting fine-tuning")
stats_file = FTStatsLogger(os.path.join(app_args.scan_dir, app_args.output_csv))
ft_dirs = get_immediate_subdirs(app_args.scan_dir)
for ft_dir in ft_dirs:
checkpoints = None
if app_args.top_performing_chkpts:
checkpoints = get_best_checkpoints(ft_dir)
finetune_directory(ft_dir, stats_file, app_args, checkpoints=checkpoints)
```
#### File: rl_libs/hanlab/hanlab_if.py
```python
from examples.auto_compression.amc.rl_libs.hanlab.agent import DDPG, train
import logging
msglogger = logging.getLogger()
class ArgsContainer(object):
def __init__(self):
pass
class RlLibInterface(object):
"""Interface to a hanlab DDPG impelementation."""
def solve(self, env, args):
msglogger.info("AMC: Using hanlab")
agent_args = ArgsContainer()
agent_args.bsize = args.batch_size
agent_args.tau = 0.01
agent_args.discount = 1.
agent_args.epsilon = 50000
agent_args.init_delta = 0.5
agent_args.delta_decay = 0.95
agent_args.warmup = env.amc_cfg.ddpg_cfg.num_heatup_episodes
agent_args.lr_c = env.amc_cfg.ddpg_cfg.critic_lr
agent_args.lr_a = env.amc_cfg.ddpg_cfg.actor_lr
agent_args.hidden1 = 300
agent_args.hidden2 = 300
agent_args.rmsize = env.amc_cfg.ddpg_cfg.replay_buffer_size
agent_args.window_length = 1
agent_args.train_episode = (env.amc_cfg.ddpg_cfg.num_heatup_episodes +
env.amc_cfg.ddpg_cfg.num_training_episodes)
agent_args.output = "."
agent = DDPG(args.observation_len, 1, agent_args)
train(agent_args.train_episode, agent, env, agent_args.output, agent_args.warmup)
```
#### File: seq2seq/models/encoder.py
```python
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from distiller.modules import *
import seq2seq.data.config as config
class ResidualRecurrentEncoder(nn.Module):
def __init__(self, vocab_size, hidden_size=128, num_layers=8, bias=True,
dropout=0, batch_first=False, embedder=None):
super(ResidualRecurrentEncoder, self).__init__()
self.batch_first = batch_first
self.rnn_layers = nn.ModuleList()
self.rnn_layers.append(
nn.LSTM(hidden_size, hidden_size, num_layers=1, bias=bias,
batch_first=batch_first, bidirectional=True))
self.rnn_layers.append(
nn.LSTM((2 * hidden_size), hidden_size, num_layers=1, bias=bias,
batch_first=batch_first))
for _ in range(num_layers - 2):
self.rnn_layers.append(
nn.LSTM(hidden_size, hidden_size, num_layers=1, bias=bias,
batch_first=batch_first))
self.dropout = nn.Dropout(p=dropout)
if embedder is not None:
self.embedder = embedder
else:
self.embedder = nn.Embedding(vocab_size, hidden_size,
padding_idx=config.PAD)
# Adding submodules for basic ops to allow quantization:
self.eltwiseadd_residuals = nn.ModuleList([EltwiseAdd() for _ in range(2, len(self.rnn_layers))])
def forward(self, inputs, lengths):
x = self.embedder(inputs)
# bidirectional layer
x = pack_padded_sequence(x, lengths.cpu().numpy(),
batch_first=self.batch_first)
x, _ = self.rnn_layers[0](x)
x, _ = pad_packed_sequence(x, batch_first=self.batch_first)
# 1st unidirectional layer
x = self.dropout(x)
x, _ = self.rnn_layers[1](x)
# the rest of unidirectional layers,
# with residual connections starting from 3rd layer
for i in range(2, len(self.rnn_layers)):
residual = x
x = self.dropout(x)
x, _ = self.rnn_layers[i](x)
x = self.eltwiseadd_residuals[i-2](x, residual)
return x
```
#### File: GNMT/seq2seq/utils.py
```python
from contextlib import contextmanager
import os
import logging.config
import numpy as np
import torch
from torch.nn.utils.rnn import pack_padded_sequence
import seq2seq.data.config as config
def barrier():
""" Calls all_reduce on dummy tensor."""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
@contextmanager
def sync_workers():
""" Gets distributed rank and synchronizes workers at exit"""
rank = get_rank()
yield rank
barrier()
def setup_logging(log_file='log.log'):
"""Setup logging configuration
"""
class RankFilter(logging.Filter):
def __init__(self, rank):
self.rank = rank
def filter(self, record):
record.rank = self.rank
return True
rank = get_rank()
rank_filter = RankFilter(rank)
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(rank)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(rank)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').addFilter(rank_filter)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, skip_first=True):
self.reset()
self.skip = skip_first
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
if self.skip:
self.skip = False
else:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def batch_padded_sequences(seq, batch_first=False, sort=False):
if sort:
key = lambda item: len(item[1])
indices, seq = zip(*sorted(enumerate(seq), key=key, reverse=True))
else:
indices = range(len(seq))
lengths = [len(sentence) for sentence in seq]
batch_length = max(lengths)
seq_tensor = torch.LongTensor(batch_length, len(seq)).fill_(config.PAD)
for idx, sentence in enumerate(seq):
end_seq = lengths[idx]
seq_tensor[:end_seq, idx].copy_(sentence[:end_seq])
if batch_first:
seq_tensor = seq_tensor.t()
return seq_tensor, lengths, indices
def debug_tensor(tensor, name):
logging.info(name)
tensor = tensor.float().cpu().numpy()
logging.info('MIN: {min} MAX: {max} AVG: {mean} STD: {std} NAN: {nans} INF: {infs}'
.format(min=tensor.min(), max=tensor.max(), mean=tensor.mean(),
std=tensor.std(), nans=np.isnan(tensor).sum(), infs=np.isinf(tensor).sum()))
``` |
{
"source": "jlimsf/FewShotMotionTransfer",
"score": 2
} |
#### File: jlimsf/FewShotMotionTransfer/validate.py
```python
def inference(model, config, device_idxs=[0]):
config['phase'] = 'inference'
config['hflip'] = False
dataset = TransferDataSet(config['target_root'], config['source_root'], config)
data_loader = DataLoader(dataset, batch_size=config['batchsize'], num_workers=4, pin_memory=True, shuffle=False)
device = torch.device("cuda:" + str(device_idxs[0]))
image_size = config['resize']
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
folder = os.path.join(config["output_folder"], config["name"])
if not os.path.exists(folder):
os.system("mkdir -p "+folder)
print ("Writing to folder: {}".format(folder))
writer = cv2.VideoWriter(os.path.join(folder, config['output_name']), fourcc, 24, (image_size*3, image_size))
print (config['output_name'])
with torch.no_grad():
try:
iterator = tqdm(enumerate(data_loader), total=len(data_loader))
for i, data in iterator:
data_gpu = {key: item.to(device) for key, item in data.items()}
mask, fake_image, real_image, body, coordinate, texture = model(data_gpu, "inference")
label = utils.d_colorize(data_gpu["body"]).cpu().numpy()
B, _, H, W = coordinate.size()
real_image = data['image'].cpu().numpy()
fake_image = np.clip(fake_image.cpu().numpy(), 0, 1)
outputs = np.concatenate((real_image, label, fake_image), axis=3)
for output in outputs:
write_image = (output[::-1].transpose((1, 2, 0)) * 255).astype(np.uint8)
writer.write(write_image)
except Exception as e:
print(traceback.format_exc())
writer.release()
writer.release()
``` |
{
"source": "jlin27/Ax",
"score": 3
} |
#### File: ax/core/base.py
```python
from datetime import datetime
import numpy as np
import pandas as pd
from ax.utils.common.equality import (
datetime_equals,
equality_typechecker,
same_elements,
)
from ax.utils.common.typeutils import numpy_type_to_python_type
class Base(object):
"""Metaclass for core Ax classes."""
@equality_typechecker
def __eq__(self, other: "Base"):
for field in self.__dict__.keys():
self_val = getattr(self, field)
other_val = getattr(other, field)
self_val = numpy_type_to_python_type(self_val)
other_val = numpy_type_to_python_type(other_val)
if type(self_val) != type(other_val):
return False
if field == "_experiment":
# prevent infinite loop when checking equality of Trials
equal = self_val.name == other_val.name
elif isinstance(self_val, list):
equal = same_elements(self_val, other_val)
elif isinstance(self_val, np.ndarray):
equal = np.array_equal(self_val, other_val)
elif isinstance(self_val, datetime):
equal = datetime_equals(self_val, other_val)
elif isinstance(self_val, pd.DataFrame):
equal = self_val.equals(other_val)
else:
equal = self_val == other_val
if not equal:
return False
return True
```
#### File: ax/plot/slice.py
```python
from copy import deepcopy
from typing import Any, Dict, Optional
import numpy as np
from ax.core.observation import ObservationFeatures
from ax.modelbridge.base import ModelBridge
from ax.plot.base import AxPlotConfig, AxPlotTypes
from ax.plot.helper import (
TNullableGeneratorRunsDict,
get_fixed_values,
get_grid_for_parameter,
get_plot_data,
get_range_parameter,
)
def plot_slice(
model: ModelBridge,
param_name: str,
metric_name: str,
generator_runs_dict: TNullableGeneratorRunsDict = None,
relative: bool = False,
density: int = 50,
slice_values: Optional[Dict[str, Any]] = None,
fixed_features: Optional[ObservationFeatures] = None,
) -> AxPlotConfig:
"""Plot predictions for a 1-d slice of the parameter space.
Args:
model: ModelBridge that contains model for predictions
param_name: Name of parameter that will be sliced
metric_name: Name of metric to plot
generator_runs_dict: A dictionary {name: generator run} of generator runs
whose arms will be plotted, if they lie in the slice.
relative: Predictions relative to status quo
density: Number of points along slice to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the status quo values will
be used if there is a status quo, otherwise the mean of numeric
parameters or the mode of choice parameters. Ignored if
fixed_features is specified.
fixed_features: An ObservationFeatures object containing the values of
features (including non-parameter features like context) to be set
in the slice.
"""
if generator_runs_dict is None:
generator_runs_dict = {}
parameter = get_range_parameter(model, param_name)
grid = get_grid_for_parameter(parameter, density)
plot_data, raw_data, cond_name_to_parameters = get_plot_data(
model=model, generator_runs_dict=generator_runs_dict, metric_names={metric_name}
)
if fixed_features is not None:
slice_values = fixed_features.parameters
else:
fixed_features = ObservationFeatures(parameters={})
fixed_values = get_fixed_values(model, slice_values)
prediction_features = []
for x in grid:
predf = deepcopy(fixed_features)
predf.parameters = fixed_values.copy()
predf.parameters[param_name] = x
prediction_features.append(predf)
f, cov = model.predict(prediction_features)
f_plt = f[metric_name]
sd_plt = np.sqrt(cov[metric_name][metric_name])
config = {
"arm_data": plot_data,
"arm_name_to_parameters": cond_name_to_parameters,
"f": f_plt,
"fit_data": raw_data,
"grid": grid,
"metric": metric_name,
"param": param_name,
"rel": relative,
"setx": fixed_values,
"sd": sd_plt,
"is_log": parameter.log_scale,
}
return AxPlotConfig(config, plot_type=AxPlotTypes.SLICE)
``` |
{
"source": "jlin27/ClassyVision-1",
"score": 2
} |
#### File: dataset/transforms/util.py
```python
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torchvision.transforms as transforms
from . import ClassyTransform, build_transforms, register_transform
class ImagenetConstants:
"""Constant variables related to the image classification.
MEAN: often used to be subtracted from image RGB value. Computed on ImageNet.
STD: often used to divide the image RGB value after mean centering. Computed
on ImageNet.
CROP_SIZE: the size of image cropping which is often the input to deep network.
RESIZE: the size of rescaled image.
"""
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
CROP_SIZE = 224
RESIZE = 256
@register_transform("apply_transform_to_key")
class ApplyTransformToKey:
"""Serializable class that applies a transform to a key specified field in samples.
"""
def __init__(self, transform: Callable, key: Union[int, str] = "input") -> None:
"""The constructor method of ApplyTransformToKey class.
Args:
transform: a callable function that takes sample data of type dict as input
key: the key in sample whose corresponding value will undergo
the transform
"""
self.key: Union[int, str] = key
self.transform: Callable = transform
@classmethod
def from_config(cls, config: Dict[str, Any]):
transform = build_transforms(config["transforms"])
return cls(transform=transform, key=config["key"])
def __call__(
self, sample: Union[Tuple[Any], Dict[str, Any]]
) -> Union[Tuple[Any], Dict[str, Any]]:
"""Updates sample by applying a transform to the value at the specified key.
Args:
sample: input sample which will be transformed
"""
if sample is None:
return sample
# Asserts + deal with tuple immutability
convert_to_tuple = False
if isinstance(sample, dict):
assert (
self.key in sample
), "This transform only supports dicts with key '{}'".format(self.key)
elif isinstance(sample, (tuple, list)):
assert self.key < len(
sample
), "This transform only supports tuples / lists with key less "
"than {length}, key provided {key}".format(length=len(sample), key=self.key)
# Convert to list for transformation
if isinstance(sample, tuple):
convert_to_tuple = True
sample = list(sample)
sample[self.key] = self.transform(sample[self.key])
if convert_to_tuple:
sample = tuple(sample)
return sample
@register_transform("imagenet_augment")
class ImagenetAugmentTransform(ClassyTransform):
"""The default image transform with data augmentation.
It is often useful for training models on Imagenet. It sequentially resizes
the image into a random scale, takes a random spatial cropping, randomly flips
the image horizontally, transforms PIL image data into a torch.Tensor and
normalizes the pixel values by mean subtraction and standard deviation division.
"""
def __init__(
self,
crop_size: int = ImagenetConstants.CROP_SIZE,
mean: List[float] = ImagenetConstants.MEAN,
std: List[float] = ImagenetConstants.STD,
):
"""The constructor method of ImagenetAugmentTransform class.
Args:
crop_size: expected output size per dimension after random cropping
mean: a 3-tuple denoting the pixel RGB mean
std: a 3-tuple denoting the pixel RGB standard deviation
"""
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
"""Callable function which applies the tranform to the input image.
Args:
image: input image that will undergo the transform
"""
return self.transform(img)
@register_transform("imagenet_no_augment")
class ImagenetNoAugmentTransform(ClassyTransform):
"""The default image transform without data augmentation.
It is often useful for testing models on Imagenet. It sequentially resizes
the image, takes a central cropping, transforms PIL image data into a
torch.Tensor and normalizes the pixel values by mean subtraction and standard
deviation division.
"""
def __init__(
self,
resize: int = ImagenetConstants.RESIZE,
crop_size: int = ImagenetConstants.CROP_SIZE,
mean: List[float] = ImagenetConstants.MEAN,
std: List[float] = ImagenetConstants.STD,
):
"""The constructor method of ImagenetNoAugmentTransform class.
Args:
resize: expected image size per dimension after resizing
crop_size: expected size for a dimension of central cropping
mean: a 3-tuple denoting the pixel RGB mean
std: a 3-tuple denoting the pixel RGB standard deviation
"""
self.transform = transforms.Compose(
[
transforms.Resize(resize),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
"""Callable function which applies the tranform to the input image.
Args:
image: input image that will undergo the transform
"""
return self.transform(img)
@register_transform("generic_image_transform")
class GenericImageTransform(ClassyTransform):
"""Default transform for images used in the classification task
This transform does several things. First, it expects a tuple or
list input (torchvision datasets supply tuples / lists). Second,
it applies a user-provided image transforms to the first entry in
the tuple (again, matching the torchvision tuple format). Third,
it transforms the tuple to a dict sample with entries "input" and
"target".
The defaults are for the standard imagenet augmentations
This is just a convenience wrapper to cover the common
use-case. You can get the same behavior by composing `torchvision
transforms <https://pytorch.org/docs/stable/torchvision/transforms.html>`_
+ :class:`ApplyTransformToKey` + :class:`TupleToMapTransform`.
"""
def __init__(
self, transform: Optional[Callable] = None, split: Optional[str] = None
):
"""Constructor for GenericImageTransfrom
Only one of the two arguments (*transform*, *split*) should be specified.
Args:
transform: A callable or ClassyTransform to be applied to the image only
split: 'train' or 'test'
"""
assert (
split is None or transform is None
), "If split is not None then transform must be None"
assert split in [None, "train", "test"], (
"If specified, split should be either 'train' or 'test', "
"instead got {}".format(split)
)
self._transform = transform
if split is not None:
self._transform = (
ImagenetAugmentTransform()
if split == "train"
else ImagenetNoAugmentTransform()
)
@classmethod
def from_config(cls, config: Dict[str, Any]):
transform = None
if "transforms" in config:
transform = build_transforms(config["transforms"])
split = config.get("split")
return cls(transform, split)
def __call__(self, sample: Tuple[Any]):
"""Applied transform to sample
Args:
sample: A tuple with length >= 2. The first entry should
be the image data, the second entry should be the
target data.
"""
image = sample[0]
transformed_image = (
self._transform(image) if self._transform is not None else image
)
new_sample = {"input": transformed_image, "target": sample[1]}
# Any additional metadata is just appended under index of tuple
if len(sample) > 2:
for i in range(2, len(sample)):
new_sample[str(i)] = sample[i]
return new_sample
@register_transform("tuple_to_map")
class TupleToMapTransform(ClassyTransform):
"""A transform which maps image data from tuple to dict.
This transform has a list of keys (key1, key2, ...),
takes a sample of the form (data1, data2, ...) and
returns a sample of the form {key1: data1, key2: data2, ...}
It is useful for mapping output from datasets like the `PyTorch
ImageFolder <https://github.com/pytorch/vision/blob/master/torchvision/
datasets/folder.py#L177>`_ dataset (tuple) to dict with named data fields.
If sample is already a dict with the required keys, pass sample through.
"""
def __init__(self, list_of_map_keys: List[str]):
"""The constructor method of TupleToMapTransform class.
Args:
list_of_map_keys: a list of dict keys that in order will be mapped
to items in the input data sample list
"""
self._map_keys = list_of_map_keys
def __call__(self, sample):
"""Transform sample from type tuple to type dict.
Args:
sample: input sample which will be transformed
"""
# If already a dict/map with appropriate keys, exit early
if isinstance(sample, dict):
for key in self._map_keys:
assert (
key in sample
), "Sample {sample} must be a tuple or a dict with keys {keys}".format(
sample=str(sample), keys=str(self._map_keys)
)
return sample
assert len(sample) == len(self._map_keys), (
"Provided sample tuple must have same number of keys "
"as provided to transform"
)
output_sample = {}
for idx, s in enumerate(sample):
output_sample[self._map_keys[idx]] = s
return output_sample
DEFAULT_KEY_MAP = TupleToMapTransform(["input", "target"])
def build_field_transform_default_imagenet(
config: Optional[List[Dict[str, Any]]],
default_transform: Optional[Callable] = None,
split: Optional[bool] = None,
key: Union[int, str] = "input",
key_map_transform: Optional[Callable] = DEFAULT_KEY_MAP,
) -> Callable:
"""Returns a ApplyTransformToKey which applies a transform on the specified key.
The transform is built from the config, if it is not None.
Otherwise, uses one of the two mutually exclusive args: If
default_transform is not None, it is used. If split is not None,
imagenet transforms are used, using augmentation for "train", no
augmentation otherwise.
This function also provides an additional
function for mapping from tuples (or other keys) to a desired set
of keys
Args:
config: field transform config
default_transform: used if config is None
split: split for dataset, e.g. "train" or "test"
key: Key to apply transform to
key_map_transform: Used to produce desired map / keys
(e.g. for torchvision datasets, default samples is a
tuple so this argument can be used to map
(input, target) -> {"input": input, "target": target})
"""
assert (
default_transform is None or split is None
), "Can only specify one of default_transform and split"
if config is None:
if default_transform is not None:
transform = default_transform
elif split is not None:
transform = (
ImagenetAugmentTransform()
if split == "train"
else ImagenetNoAugmentTransform()
)
else:
raise ValueError("No transform config provided with no defaults")
else:
transform = build_transforms(config)
transform = ApplyTransformToKey(transform, key=key)
if key_map_transform is None:
return transform
return transforms.Compose([key_map_transform, transform])
def default_unnormalize(img):
"""Default unnormalization transform which undo the "transforms.Normalize".
Specially, it cancels out mean subtraction and standard deviation division.
Args:
img (torch.Tensor): image data to which the transform will be applied
"""
# TODO T39752655: Allow this to be configurable
img = img.clone()
for channel, std, mean in zip(img, ImagenetConstants.STD, ImagenetConstants.MEAN):
channel.mul_(std).add_(mean)
return img
```
#### File: classy_vision/heads/__init__.py
```python
import copy
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_head import ClassyHead
FILE_ROOT = Path(__file__).parent
HEAD_REGISTRY = {}
HEAD_CLASS_NAMES = set()
def register_head(name):
"""Registers a ClassyHead subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyHead from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyHead subclass, like this:
.. code-block:: python
@register_head("my_head")
class MyHead(ClassyHead):
...
To instantiate a head from a configuration file, see
:func:`build_head`."""
def register_head_cls(cls):
if name in HEAD_REGISTRY:
raise ValueError("Cannot register duplicate head ({})".format(name))
if not issubclass(cls, ClassyHead):
raise ValueError(
"Head ({}: {}) must extend ClassyHead".format(name, cls.__name__)
)
if cls.__name__ in HEAD_CLASS_NAMES:
raise ValueError(
"Cannot register head with duplicate class name ({})".format(
cls.__name__
)
)
HEAD_REGISTRY[name] = cls
HEAD_CLASS_NAMES.add(cls.__name__)
return cls
return register_head_cls
def build_head(config):
"""Builds a ClassyHead from a config.
This assumes a 'name' key in the config which is used to determine what
head class to instantiate. For instance, a config `{"name": "my_head",
"foo": "bar"}` will find a class that was registered as "my_head"
(see :func:`register_head`) and call .from_config on it."""
assert "name" in config, "Expect name in config"
assert "unique_id" in config, "Expect a global unique id in config"
assert config["name"] in HEAD_REGISTRY, "unknown head"
name = config["name"]
head_config = copy.deepcopy(config)
del head_config["name"]
return HEAD_REGISTRY[name].from_config(head_config)
# automatically import any Python files in the heads/ directory
import_all_modules(FILE_ROOT, "classy_vision.heads")
from .fully_connected_head import FullyConnectedHead # isort:skip
from .fully_convolutional_linear_head import FullyConvolutionalLinearHead # isort:skip
from .identity_head import IdentityHead # isort:skip
__all__ = [
"ClassyHead",
"FullyConnectedHead",
"FullyConvolutionalLinearHead",
"IdentityHead",
"build_head",
"register_head",
]
```
#### File: classy_vision/losses/classy_loss.py
```python
from typing import Any, Dict
import torch.nn as nn
class ClassyLoss(nn.Module):
"""
Base class to calculate the loss during training.
This implementation of :class:`torch.nn.Module` allows building
the loss object from a configuration file.
"""
def __init__(self):
"""
Constructor for ClassyLoss.
"""
super(ClassyLoss, self).__init__()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyLoss":
"""Instantiates a ClassyLoss from a configuration.
Args:
config: A configuration for a ClassyLoss.
Returns:
A ClassyLoss instance.
"""
raise NotImplementedError()
def forward(self, output, target):
"""
Compute the loss for the provided sample.
Refer to :class:`torch.nn.Module` for more details.
"""
raise NotImplementedError
def get_optimizer_params(self, bn_weight_decay=False):
"""Gets optimizer params.
The default implementation is very simple. Most losses have no learned
parameters, so this is rarely needed.
"""
params = [
param for param in self.parameters(recurse=True) if param.requires_grad
]
return {"regularized_params": params, "unregularized_params": []}
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyLoss.
The returned state is used for checkpointing. Note that most losses are
stateless and do not need to save any state.
Returns:
A state dictionary containing the state of the loss.
"""
return self.state_dict()
def set_classy_state(self, state: Dict[str, Any]) -> None:
"""Set the state of the ClassyLoss.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the loss from a checkpoint. Note
that most losses are stateless and do not need to load any state.
"""
return self.load_state_dict(state)
def has_learned_parameters(self) -> bool:
"""Does this loss have learned parameters?"""
return any(
len(params) > 0 for (_, params) in self.get_optimizer_params().items()
)
``` |
{
"source": "jlin27/pytorch-dp",
"score": 3
} |
#### File: pytorch-dp/examples/mnist.py
```python
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchdp import PrivacyEngine
from torchvision import datasets, transforms
from tqdm import tqdm
# Precomputed characteristics of the MNIST dataset
MNIST_MEAN = 0.1307
MNIST_STD = 0.3081
class SampleConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
def name(self):
return "SampleConvNet"
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
criterion = nn.CrossEntropyLoss()
losses = []
for _batch_idx, (data, target) in enumerate(tqdm(train_loader)):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
losses.append(loss.item())
if not args.disable_dp:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(args.delta)
print(
f"Train Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"(ε = {epsilon:.2f}, δ = {args.delta}) for α = {best_alpha}"
)
else:
print(f"Train Epoch: {epoch} \t Loss: {np.mean(losses):.6f}")
def test(args, model, device, test_loader):
model.eval()
criterion = nn.CrossEntropyLoss()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in tqdm(test_loader):
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item() # sum up batch loss
pred = output.argmax(
dim=1, keepdim=True
) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
return correct / len(test_loader.dataset)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"-b",
"--batch-size",
type=int,
default=64,
metavar="B",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1024,
metavar="TB",
help="input batch size for testing (default: 1024)",
)
parser.add_argument(
"-n",
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"-r",
"--n-runs",
type=int,
default=1,
metavar="R",
help="number of runs to average on (default: 1)",
)
parser.add_argument(
"--lr",
type=float,
default=.1,
metavar="LR",
help="learning rate (default: .1)",
)
parser.add_argument(
"--sigma",
type=float,
default=1.0,
metavar="S",
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=1.0,
metavar="C",
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
metavar="D",
help="Target delta (default: 1e-5)",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="GPU ID for this process (default: 'cuda')",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="Save the trained model (default: false)",
)
parser.add_argument(
"--disable-dp",
action="store_true",
default=False,
help="Disable privacy training and just train with vanilla SGD",
)
parser.add_argument(
"--data-root",
type=str,
default="../mnist",
help="Where MNIST is/will be stored",
)
args = parser.parse_args()
device = torch.device(args.device)
kwargs = {"num_workers": 1, "pin_memory": True}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
args.data_root,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))]
),
),
batch_size=args.batch_size,
shuffle=True,
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
args.data_root,
train=False,
transform=transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))]
),
),
batch_size=args.test_batch_size,
shuffle=True,
**kwargs,
)
run_results = []
for _ in range(args.n_runs):
model = SampleConvNet().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0)
if not args.disable_dp:
privacy_engine = PrivacyEngine(
model,
batch_size=args.batch_size,
sample_size=len(train_loader.dataset),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
privacy_engine.attach(optimizer)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
run_results.append(test(args, model, device, test_loader))
if len(run_results) > 1:
print("Accuracy averaged over {} runs: {:.2f}% ± {:.2f}%".format(
len(run_results),
np.mean(run_results) * 100,
np.std(run_results) * 100
)
)
repro_str = (
f"{model.name()}_{args.lr}_{args.sigma}_"
f"{args.max_per_sample_grad_norm}_{args.batch_size}_{args.epochs}"
)
torch.save(run_results, f"run_results_{repro_str}.pt")
if args.save_model:
torch.save(model.state_dict(), f"mnist_cnn_{repro_str}.pt")
if __name__ == "__main__":
main()
``` |
{
"source": "jlin816/wge",
"score": 3
} |
#### File: miniwob-sandbox/turk-api/fix-email-nlp-demos.py
```python
import sys, os, shutil, re, argparse, json, gzip
from codecs import open
from collections import defaultdict, Counter
EMAIL_INBOX_PATTERNS = [
('delete', r'Find the email by (.*) and click the trash icon to (.*) it\.', ['by', 'task']),
('forward', r'Find the email by (.*) and (.*) that email to (.*)\.', ['by', 'task', 'to']),
('important', r'Find the email by (.*) and click the (.*) icon to mark it as important\.', ['by', 'task']),
('reply', r'Find the email by (.*) and (.*) to them with the text "(.*)"\.', ['by', 'task', 'message']),
]
def extract_email_inbox(utterance):
for task, regex, keys in EMAIL_INBOX_PATTERNS:
match = re.match(regex, utterance)
if match:
return dict(list(zip(keys, match.groups())))
raise ValueError('Bad email-inbox utterance: {}'.format(utterance))
NL_TEMPLATES = [
'Find the email by (?P<NAME>[^ ]*) and forward that email to (?P<DEST>[^ ]*).',
'Locate the email by (?P<NAME>[^ ]*). Forward that email to (?P<DEST>[^ ]*).',
'Look for the email from (?P<NAME>[^ ]*) and forward to (?P<DEST>[^ ]*).',
'Forward to (?P<DEST>[^ ]*) the email from (?P<NAME>[^ ]*).',
'Send (?P<DEST>[^ ]*) the email you got from (?P<NAME>[^ ]*).',
'Go to the email by (?P<NAME>[^ ]*). Send it to (?P<DEST>[^ ]*).',
'Send to (?P<DEST>[^ ]*) the email you got from (?P<NAME>[^ ]*).',
'Forward the email from (?P<NAME>[^ ]*) to (?P<DEST>[^ ]*).',
'Forward to (?P<DEST>[^ ]*) the email from (?P<NAME>[^ ]*).',
'Send (?P<DEST>[^ ]*) the email from (?P<NAME>[^ ]*).',
'Please find the message by (?P<NAME>[^ ]*), then send it to (?P<DEST>[^ ]*).',
'Please forward the information from (?P<NAME>[^ ]*) to (?P<DEST>[^ ]*).',
'(?P<DEST>[^ ]*) wants the email you got from (?P<NAME>[^ ]*).',
'(?P<DEST>[^ ]*) wants the email (?P<NAME>[^ ]*) sent to you.',
'The mail by (?P<NAME>[^ ]*) should be forwarded to (?P<DEST>[^ ]*).',
'Please forward to (?P<DEST>[^ ]*) the email by (?P<NAME>[^ ]*).',
'Give (?P<DEST>[^ ]*) the message you received from (?P<NAME>[^ ]*),',
'Forward the mail by (?P<NAME>[^ ]*) to (?P<DEST>[^ ]*).',
'Go to the message from (?P<NAME>[^ ]*) and send it to (?P<DEST>[^ ]*).',
'(?P<DEST>[^ ]*) is waiting for the email by (?P<NAME>[^ ]*).',
'(?P<NAME>[^ ]*) wants his or her message to be sent to (?P<DEST>[^ ]*).',
'I want the mail by (?P<NAME>[^ ]*) to be sent to (?P<DEST>[^ ]*).',
'Forward to (?P<DEST>[^ ]*) the email you got from (?P<NAME>[^ ]*).',
'Please forward the message from (?P<NAME>[^ ]*) to (?P<DEST>[^ ]*).',
'Please find the mail by (?P<NAME>[^ ]*). Forward it to (?P<DEST>[^ ]*).',
'Navigate to the message from (?P<NAME>[^ ]*) and send it to (?P<DEST>[^ ]*).',
'Forward (?P<DEST>[^ ]*) the email from (?P<NAME>[^ ]*).',
'Forward (?P<DEST>[^ ]*) the message (?P<NAME>[^ ]*) sent you.',
'Send (?P<DEST>[^ ]*) the information (?P<NAME>[^ ]*) sent to you.',
'Search for the mail (?P<NAME>[^ ]*) sent you and send it to (?P<DEST>[^ ]*).',
]
NL_TEMPLATES = [re.compile(x) for x in NL_TEMPLATES]
def extract_email_inbox_forward_nl(utterance):
for regex in NL_TEMPLATES:
match = regex.match(utterance)
if match:
return {
'by': match.group('NAME'),
'to': match.group('DEST'),
}
raise ValueError('Bad email-inbox utterance: {}'.format(utterance))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=['all', 'forward'])
parser.add_argument('filename')
args = parser.parse_args()
with gzip.open(args.filename) as fin:
data = json.load(fin)
utterance = data['utterance']
if args.mode == 'all':
data['fields'] = extract_email_inbox(utterance)
elif args.mode == 'forward':
data['fields'] = extract_email_inbox_forward_nl(utterance)
outfile = args.filename.replace('.json.gz', '-fixed.json.gz')
with gzip.open(outfile, 'w') as fout:
json.dump(data, fout, separators=(',', ':'))
print('{} -> {}'.format(args.filename, outfile), file=sys.stderr)
if __name__ == '__main__':
main()
```
#### File: miniwob-sandbox/turk-api/run.py
```python
import sys, os, shutil, argparse, re, json, traceback
from codecs import open
from configparser import RawConfigParser
from collections import defaultdict
import boto
from boto.mturk.connection import MTurkRequestError
from boto.mturk.price import Price
from boto.mturk.qualification import (Qualifications, Requirement,
LocaleRequirement, AdultRequirement,
NumberHitsApprovedRequirement,
PercentAssignmentsAbandonedRequirement,
PercentAssignmentsApprovedRequirement,
PercentAssignmentsRejectedRequirement,
PercentAssignmentsReturnedRequirement,
PercentAssignmentsSubmittedRequirement)
from boto.mturk.question import ExternalQuestion
# 1 Batch has many HITs; 1 HIT has many assignments
# The Worker accepts an assignment, then either submits or returns it.
# The Requester reviews an assignment
# There are 5 HIT states:
# - Assignable:
# The HIT is not expired *AND* at least 1 assignment has not been accepted
# I.e., a Worker can accept an assignment from this HIT.
# Extending a HIT will change the HIT to Assignable state.
# - Unassignable: <-- This name is the worst name ever
# No assignment is in open state,
# and at least one assignment is in assigned state.
# I.e., no Workers can accept an assignment from this HIT.
# - Reviewable:
# *ALL* assignments of the HIT are submitted *OR* the HIT expired.
# The Requester can get the list of all reviewable HITs by calling
# GetReviewableHITs.
# Note that even though all assignments are approved or rejected, the HIT
# will still be in Reviewable state. The Requester must call DisposeHIT
# to dispose the HIT.
# - Reviewing (optional):
# The Requester manually called SetHITAsReviewing on the HIT.
# Benefit: GetReviewableHITs does not return HITs in Reviewing state.
# - Disposed:
# The HIT has been deleted and can no longer be retrieved.
# (HITs are automatically disposed after 120 days.)
# There are 6 assignment states:
# - Open (a Worker can accept the assignment)
# - Expired
# - Assigned (a Worker is working on the assignment)
# - Submitted (a Worker has submitted the assignment)
# - Approved
# - Rejected
class MTurkWrapper(object):
SANDBOX = 'mechanicalturk.sandbox.amazonaws.com'
PREVIEW_REAL = 'https://www.mturk.com/mturk/preview?groupId='
PREVIEW_SANDBOX = 'https://workersandbox.mturk.com/mturk/preview?groupId='
def __init__(self, sandbox=True):
self.sandbox = sandbox
def load(self):
if self.sandbox:
print("Using SANDBOX ...")
self.mtc = boto.connect_mturk(host=MTurkWrapper.SANDBOX)
else:
print("Using REAL MTURK!")
self.mtc = boto.connect_mturk()
def get_account_balance(self):
return self.mtc.get_account_balance()
################ CREATE HIT ################
def _replace_variables(self, s, variables):
for key, value in variables.items():
s = re.sub(r'\$\{' + key + r'\}', str(value), s, flags=re.I)
assert '${' not in s, s
return s
def create_batch(self, properties, variables_list, maxhits=None):
'''Create a new batch of HITs.
Return (list of hit_ids, hit_type_id).
'variables_list' is a list of dicts {key1: value1, key2: value2, ...}
Strings of the form '${key1}' in question and annotation
will be replaced with value1 and so on.
'''
if maxhits:
variables_list = variables_list[:maxhits]
if input('Creating %d HITs. Continue? (y/N) '
% len(variables_list)).lower() != 'y':
return
# Register the HIT type
if 'hittypeid' in properties:
hit_type_id = properties['hittypeid']
else:
result = self.mtc.register_hit_type(
properties['title'], properties['description'],
properties['reward'], properties['assignmentduration'],
properties['keywords'], properties['autoapprovaldelay'],
properties['qualifications'])
hit_type_id = result[0].HITTypeId
# Reading parameters for individual HITs
hit_ids = []
for i, variables in enumerate(variables_list):
question = ExternalQuestion(
self._replace_variables(properties['url'], variables),
properties['frameheight'])
annotation = self._replace_variables(
properties['annotation'], variables)
if isinstance(properties['assignments'], int):
max_assignments = properties['assignments']
else:
max_assignments = properties['assignments'][i]
if max_assignments <= 0:
print('(%5d/%5d)' % (i + 1, len(variables_list)), end=' ')
print('Skipped because assignments <= 0')
continue
result = self.mtc.create_hit(
hit_type=hit_type_id, question=question,
annotation=annotation, lifetime=properties['hitlifetime'],
max_assignments=max_assignments)
hit_id = result[0].HITId
hit_ids.append(hit_id)
assert hit_type_id == result[0].HITTypeId
print('(%5d/%5d)' % (i + 1, len(variables_list)), end=' ')
print('Created HIT', hit_id)
print(('DONE! %d HITs created. Preview the HITs here:'
% len(variables_list)))
if self.sandbox:
print(MTurkWrapper.PREVIEW_SANDBOX + hit_type_id)
else:
print(MTurkWrapper.PREVIEW_REAL + hit_type_id)
return hit_ids, hit_type_id
def extend_batch(self, hit_ids, assignments_increment=None,
expiration_increment=None):
'''Extend a batch of HITs.'''
print('Extending batch ...')
print('Assignment +=', assignments_increment)
print('Expiration +=', expiration_increment)
for i, hit_id in enumerate(hit_ids):
self.mtc.extend_hit(hit_id,
assignments_increment=assignments_increment,
expiration_increment=expiration_increment)
print('(%5d/%5d)' % (i + 1, len(hit_ids)), end=' ')
print('Extended', hit_id)
print('Done!')
################ GET RESULTS ################
def get_batch(self, hit_ids, status=None):
'''Return a list of SUBMITTED assignments in the batch.
Parameter 'status' can be one of
- None (everything)
- 'Submitted' (neither approved nor rejected yet)
- 'Approved'
- 'Rejected'
- 'Approved,Rejected' (either approved or rejected)
'''
print('Getting submitted assignments ...')
assignments = []
total_max_assignments = 0
for i, hit_id in enumerate(hit_ids):
result_set = self.mtc.get_assignments(hit_id, status, page_size=100)
hit = self.mtc.get_hit(hit_id)[0]
max_assignments = int(hit.MaxAssignments)
total_max_assignments += max_assignments
print('(%5d/%5d)' % (i + 1, len(hit_ids)), end=' ')
print(hit_id, ':', result_set.NumResults, '/', max_assignments, 'assignments')
assignments.extend(result_set)
print('DONE! %d / %d assignments retrieved.' % (len(assignments), total_max_assignments))
return assignments
################ APPROVE / REJECT ################
def _read_mapping(self, mapping):
'''Return a list of (id, reason)
mapping can be one of the following:
- list or tuple of ids (reason = None)
- dict from string (id) to string (reason)
- dict from string (reason) to list or tuple (ids)
'''
if isinstance(mapping, (list, tuple)):
return [(x, None) for x in mapping]
elif isinstance(mapping, dict):
items = list(mapping.items())
if isinstance(items[0][1], (list, tuple)):
return [(x, reason) for (reason, ids) in items for x in ids]
else:
return items
assert False, 'mapping has incorrect type %s' % type(mapping)
def approve_assignments(self, mapping):
mapping = self._read_mapping(mapping)
if input('Approving %d assignments. Continue? (y/N) '
% len(mapping)).lower() != 'y':
return
for assignment_id, reason in mapping:
try:
self.mtc.approve_assignment(assignment_id, reason)
print('Approved %s (%s)' % (assignment_id, reason))
except Exception as e:
print(e)
def reject_assignments(self, mapping):
mapping = self._read_mapping(mapping)
if input('Rejecting %d assignments. Continue? (y/N) '
% len(mapping)).lower() != 'y':
return
for assignment_id, reason in mapping:
self.mtc.reject_assignment(assignment_id, reason)
print('Rejected %s (%s)' % (assignment_id, reason))
def approve_rejected_assignments(self, mapping):
mapping = self._read_mapping(mapping)
if input('Resurrecting %d assignments. Continue? (y/N) '
% len(mapping)).lower() != 'y':
return
for assignment_id, reason in mapping:
self.mtc.approve_rejected_assignment(assignment_id, reason)
print('Resurrected %s (%s)' % (assignment_id, reason))
def grant_bonus(self, data):
'''data = list of (worker_id, assignment_id, bonus_amount, reason)'''
if input('Granting bonus to %d Turkers. Continue? (y/N) '
% len(data)).lower() != 'y':
return
for worker_id, assignment_id, bonus_amount, reason in data:
bonus_amount = Price(float(bonus_amount))
self.mtc.grant_bonus(worker_id, assignment_id, bonus_amount, reason)
print('Granted %s to %s (%s)' % (bonus_amount, worker_id, reason))
def block_workers(self, mapping):
mapping = self._read_mapping(mapping)
pass
def unblock_workers(self, mapping):
mapping = self._read_mapping(mapping)
pass
################ CLEAN UP ################
def delete_batch(self, hit_ids):
'''Delete the HITs:
- Try to dispose the HIT.
- If failed (because the conditions of dispose_hit are not met),
expire the HIT, approve the remaining assignments, and
re-dispose the HIT.
'''
if input('Deleting %d HITs. Continue? (y/N) '
% len(hit_ids)).lower() != 'y':
return
for i, hit_id in enumerate(hit_ids):
status = self.mtc.get_hit(hit_id)[0].HITStatus
if status == 'Disposed':
print('(%5d/%5d)' % (i + 1, len(hit_ids)), end=' ')
print('HIT', hit_id, 'already disposed.')
continue
try:
self.mtc.dispose_hit(hit_id)
print('(%5d/%5d)' % (i + 1, len(hit_ids)), end=' ')
print('Disposed HIT', hit_id)
except MTurkRequestError as e:
print('Trying to dispose HIT', hit_id, '...')
try:
self.mtc.expire_hit(hit_id)
result_set = self.mtc.get_assignments(
hit_id, 'Submitted', page_size=100)
if len(result_set) > 0:
print('Approving %d assignments ...' % len(result_set))
for assignment in result_set:
self.mtc.approve_assignment(assignment.AssignmentId)
self.mtc.dispose_hit(hit_id)
print('(%5d/%5d)' % (i + 1, len(hit_ids)), end=' ')
print('Disposed HIT', hit_id)
except MTurkRequestError as e:
traceback.print_exc()
exit(1)
print('DONE! %d HITs disposed.' % len(hit_ids))
def early_expire_hits(self, hit_ids):
'''Expire several HITs'''
if input('Expiring %d HITs. Continue? (y/N) '
% len(hit_ids)).lower() != 'y':
return
for i, hit_id in enumerate(hit_ids):
self.mtc.expire_hit(hit_id)
print('(%5d/%5d)' % (i + 1, len(hit_ids)), end=' ')
print('Expired HIT', hit_id)
print('DONE! %d HITs expired.' % len(hit_ids))
def dispose_batch(self, hit_ids):
'''Dispose HITs such that
- the HIT is in REVIEWABLE state, and
- all assignments approved or rejected.
If not all conditions are met, an error is thrown.
Warning: After disposing the HIT, the Requester can no longer approve
the rejected assignments.
The results can still be downloaded until 120 days after.
'''
pass
def disable_hit(self, hit_ids):
'''Deal with HITs that are NOT REVIEWABLE:
- Remove HITs from marketplace
- Approve all submitted assignments (+ Pay workers)
(that haven't been accepted or rejected),
- Dispose of the HITs and all assignment data.
Assignment results data CANNOT be retreived in the future!
'''
pass
################ EMERGENCY ################
def get_all_hits(self):
'''Return the list of all (HIT id, HIT type id)'''
for x in self.mtc.get_all_hits():
print('%s\t%s' % (x.HITId, x.HITTypeId))
################################################################
class RecordWrapper(object):
def __init__(self, basedir):
assert os.path.isdir(basedir)
self.basedir = basedir
self.dirname = os.path.basename(os.path.realpath(basedir))
def _get_filename(self, extension, check=False):
filename = os.path.join(self.basedir, self.dirname + '.' + extension)
if check and os.path.exists(filename):
confirm = input('%s exists. Overwrite? (Yes/No/Rename) ' % filename)
if confirm.lower() == 'r':
suffix = 0
while os.path.exists(filename + '.conflict.' + str(suffix)):
suffix += 1
return filename + '.conflict.' + str(suffix)
if confirm.lower() != 'y':
return None
return filename
TIME_MULTIPLIERS = {'s': 1, 'm': 60, 'h': 60 * 60, 'd': 60 * 60 * 24,
'w': 60 * 60 * 24 * 7}
def _parse_time(self, timespec):
if timespec[-1] in RecordWrapper.TIME_MULTIPLIERS:
return int(float(timespec[:-1]) *
RecordWrapper.TIME_MULTIPLIERS[timespec[-1]])
return int(timespec)
QUALIFICATIONS = {'adult': AdultRequirement,
'numapproved': NumberHitsApprovedRequirement,
'%abandoned': PercentAssignmentsAbandonedRequirement,
'%approved': PercentAssignmentsApprovedRequirement,
'%rejected': PercentAssignmentsRejectedRequirement,
'%returned': PercentAssignmentsReturnedRequirement,
'%submitted': PercentAssignmentsSubmittedRequirement}
COMPARATORS = {'<': 'LessThan', '<=': 'LessThanOrEqualTo',
'>': 'GreaterThan', '>=': 'GreaterThanOrEqualTo',
'=': 'EqualTo', '!=': 'NotEqualTo'}
def read_config(self):
'''Return (properties, variables_list)'''
filename = self._get_filename('config')
parser = RawConfigParser()
parser.read(filename)
properties = {}
if parser.has_option('properties', 'hittypeid'):
properties['hittypeid'] = parser.get('properties', 'hittypeid')
else:
# Create a new HIT Type ID if not present
for key in ('title', 'description', 'keywords'):
properties[key] = parser.get('properties', key)
properties['reward'] = Price(parser.getfloat('properties', 'reward'))
for key in ('assignmentduration', 'autoapprovaldelay'):
properties[key] = self._parse_time(parser.get('timing', key))
# Qualifications
requirements = []
if parser.has_option('qualifications', 'locale'):
requirements.append(LocaleRequirement(
'EqualTo', parser.get('qualifications', 'locale'), True))
for key in RecordWrapper.QUALIFICATIONS:
if parser.has_option('qualifications', key):
value = parser.get('qualifications', key)
comparator = ''.join(x for x in value if not x.isdigit())
value = int(value[len(comparator):])
requirements.append(RecordWrapper.QUALIFICATIONS[key](
RecordWrapper.COMPARATORS[comparator], value, True))
properties['qualifications'] = Qualifications(requirements)
# Other properties
properties['annotation'] = parser.get('properties', 'annotation')
properties['assignments'] = parser.get('properties', 'assignments')
try:
properties['assignments'] = int(properties['assignments'])
except ValueError:
properties['assignments'] = self.read_assignment_amounts(properties['assignments'])
properties['hitlifetime'] = self._parse_time(parser.get('timing', 'hitlifetime'))
# Question
properties['url'] = parser.get('question', 'url')
properties['frameheight'] = parser.get('question', 'frameheight')
# Input
n = parser.getint('input', 'numhits')
if isinstance(properties['assignments'], list):
assert len(properties['assignments']) == n, (len(properties['assignments']), n)
variables_list = [dict() for i in range(n)]
for key in parser.options('input'):
if key != 'numhits':
value = parser.get('input', key)
if value[0] == '[':
value = json.loads(value)
assert len(value) == n
for i in range(n):
variables_list[i][key] = value[i]
elif '-' in value:
start, end = [int(x) for x in value.split('-')]
assert end - start + 1 == n
for i in range(n):
variables_list[i][key] = start + i
else:
for i in range(n):
variables_list[i][key] = value
return properties, variables_list
def read_assignment_amounts(self, suffix):
filename = self._get_filename(suffix)
with open(filename, 'r', 'utf8') as fin:
return [int(x) for x in fin if x.strip()]
def read_increments(self):
'''Return (assignments_increment, expiration_increment)'''
a_i = input('Assignment increment: ')
try:
a_i = int(a_i) or None
except:
print('Invalid input "%s". Set to None.' % a_i)
a_i = None
e_i = input('Expiration increment: ')
try:
e_i = self._parse_time(e_i) or None
except:
print('Invalid input "%s". Set to None.' % e_i)
e_i = None
print('>>> Assignment +=', a_i)
print('>>> Expiration +=', e_i)
if input('Is this OK? (Yes/No) ').lower()[:1] == 'y':
return (a_i, e_i)
return self.read_increments()
def write_success(self, hit_ids, hit_type_id):
filename = self._get_filename('success', check=True)
if not filename:
return
with open(filename, 'w', 'utf8') as fout:
print('\t'.join(('hitId', 'hitTypeId')), file=fout)
for hit_id in hit_ids:
print('\t'.join((hit_id, hit_type_id)), file=fout)
def read_success(self):
'''Return HIT IDs'''
with open(self._get_filename('success')) as fin:
return [line.split()[0] for line in fin.readlines()[1:]]
def read_expire(self):
'''Return HIT IDs'''
with open(self._get_filename('expire')) as fin:
return [line.split()[0] for line in fin.readlines()[1:]]
ASSIGNMENT_FIELDS = (
'AssignmentId', 'WorkerId', 'HITId',
'AssignmentStatus', # Submitted / Approved / Rejected
'AcceptTime', 'SubmitTime', 'AutoApprovalTime',
'ApprovalTime', 'RejectionTime',
)
def write_results(self, assignments):
filename = self._get_filename('results', check=False)
if not filename:
return
records = []
statistics = defaultdict(int)
for assignment in assignments:
statistics[assignment.AssignmentStatus] += 1
record = {'metadata': {}, 'answers': {}}
for key in RecordWrapper.ASSIGNMENT_FIELDS:
try:
record['metadata'][key] = getattr(assignment, key)
except AttributeError:
pass # Ignore field
for answer in assignment.answers[0]:
record['answers'][answer.qid] = answer.fields[0]
records.append(record)
with open(filename, 'w', 'utf8') as fout:
json.dump(records, fout, ensure_ascii=False, indent=2,
separators=(',', ': '), sort_keys=True)
print(('Wrote %d records to %s' % (len(records), filename)))
for key, value in statistics.items():
print('%12s: %6d / %6d (%8.3f%%)' % (key, value, len(records),
value * 100.0 / len(records)))
def read_results(self):
'''Return a list of {'metadata': {...}, 'answers': {...}}'''
filename = self._get_filename('results')
with open(filename, 'r', 'utf8') as fin:
return json.load(fin)
def _read_approve_or_reject(self, fin):
'''Return a mapping from assignment_id to reason
Format:
# Reason for assignment IDs below <-- The first one is optional
Assignment ID
Assignment ID
...
# Reason for worker IDs below
Assignment ID
...
'''
mapping = {}
reason = ''
for line in fin:
line = line.strip()
if line.startswith('#'):
reason = line[1:].strip()
elif line:
mapping[line] = reason
return mapping
def read_approve(self):
filename = self._get_filename('approve')
if not os.path.exists(filename):
return None
with open(filename, 'r', 'utf8') as fin:
return self._read_approve_or_reject(fin)
def read_reject(self):
filename = self._get_filename('reject')
if not os.path.exists(filename):
return None
with open(filename, 'r', 'utf8') as fin:
return self._read_approve_or_reject(fin)
def read_tsv(self, extension):
"""If all else fails..."""
filename = self._get_filename(extension)
if not os.path.exists(filename):
return None
with open(filename, 'r', 'utf8') as fin:
return [x.strip().split('\t') for x in fin if x.strip()]
################################################################
class Actions(object):
ACTIONS = ('getbalance', 'create', 'extend', 'get', 'clean',
'grade', 'approve', 'reject', 'expire', 'bonus',
'getallhits')
def __init__(self, sandbox=True, basedir=None):
self.mturk_w = MTurkWrapper(sandbox=sandbox)
if basedir:
self.record_w = RecordWrapper(basedir)
else:
self.record_w = None
def getbalance(self, args):
""" Print the balance and exit.
Does not require any file, but you still need to specify a dummy directory
in the command line.
To get real MTurk balance, add the --real flag.
"""
self.mturk_w.load()
print(self.mturk_w.get_account_balance())
def create(self, args):
""" Create a batch of HITs.
Requires [name].config containing the HIT configurations.
See the example config file.
Creates [name].success containing created HIT IDs.
Make sure you have enough balance first.
Otherwise it is pretty difficult to fix the error.
"""
properties, variables_list = self.record_w.read_config()
print('=' * 40)
for key in sorted(properties):
print(key, ':', properties[key])
print('=' * 40)
self.mturk_w.load()
response = self.mturk_w.create_batch(
properties, variables_list, maxhits=args.maxhits)
if response:
hit_ids, hit_type_id = response
self.record_w.write_success(hit_ids, hit_type_id)
def extend(self, args):
""" Extend an existing batch of HITs.
Requires [name].success containing HIT IDs (created by |create|).
Creates a new [name].success file; the old file will be backed up.
You will be prompted to enter the amount of time and assignments per HIT to add.
Either fields can be left blank.
Time = number of seconds, but you can use shorthands like 1d (= 1 day)
"""
hit_ids = self.record_w.read_success()
assignments_increment, expiration_increment =\
self.record_w.read_increments()
self.mturk_w.load()
self.mturk_w.extend_batch(hit_ids,
assignments_increment=assignments_increment,
expiration_increment=expiration_increment)
def get(self, args):
""" Retrieve Turker's work for a batch of HITs.
Requires [name].success containing HIT IDs (created by |create|).
Creates [name].results, a JSON file containing the results.
"""
hit_ids = self.record_w.read_success()
self.mturk_w.load()
assignments = self.mturk_w.get_batch(hit_ids)
self.record_w.write_results(assignments)
def clean(self, args):
""" Remove a batch of HITs from Amazon permanently.
Requires [name].success containing HIT IDs (created by |create|).
You should only call |clean| on sandbox tasks.
For the real tasks, just leave it on Amazon.
"""
hit_ids = self.record_w.read_success()
self.mturk_w.load()
self.mturk_w.delete_batch(hit_ids)
def grade(self, args):
""" Perform |reject| and then |approve|. (Shortcut)
Requires at least one of [name].approve and [name].reject
See |approve| and |reject| for file description.
After all assignments are approved or rejected, back up the [name].approve
and [name].reject by renaming them as [name].approve-## and [name].reject-##
(## = number).
"""
mapping_rej = self.record_w.read_reject()
mapping_app = self.record_w.read_approve()
if not (mapping_rej or mapping_app):
print('Nothing to reject or approve.')
exit(0)
i = 1
while os.path.exists(self.record_w._get_filename('approve-%02d' % i)) \
or os.path.exists(self.record_w._get_filename('reject-%02d' % i)):
i += 1
print('Reject, Approve, and move files to ...-%02d' % i)
self.mturk_w.load()
if mapping_rej:
self.mturk_w.reject_assignments(mapping_rej)
shutil.move(self.record_w._get_filename('reject'),
self.record_w._get_filename('reject-%02d' % i))
else:
print('No assignment to reject.')
if mapping_app:
self.mturk_w.approve_assignments(mapping_app)
shutil.move(self.record_w._get_filename('approve'),
self.record_w._get_filename('approve-%02d' % i))
else:
print('No assignment to approve.')
def approve(self, args):
""" Approve assignments from the given list.
It is better to use |grade| since it also handles |reject| and backs up files.
Requires [name].approve containing one assignment ID per line.
To give a feedback message to the approved assignments, add a comment line
in [name].approve (like "# Your answer is awesome."). All assignments
after that comment line will have that message. Later comment lines
override the previous ones.
"""
mapping = self.record_w.read_approve()
self.mturk_w.load()
self.mturk_w.approve_assignments(mapping)
def reject(self, args):
""" Reject assignments from the given list.
It is better to use |grade| since it also handles |approve| and backs up files.
Requires [name].reject containing one assignment ID per line.
To give a feedback message to the rejected assignments, add a comment line
in [name].reject (like "# Your answer is nonsense."). All assignments
after that comment line will have that message. Later comment lines
override the previous ones.
"""
mapping = self.record_w.read_reject()
self.mturk_w.load()
self.mturk_w.reject_assignments(mapping)
def expire(self, args):
""" Immediately expire a batch of HITs.
Requires [name].success containing HIT IDs (created by |create|).
"""
hit_ids = self.record_w.read_expire()
self.mturk_w.load()
self.mturk_w.early_expire_hits(hit_ids)
def bonus(self, args):
""" Give bonus to workers in a list.
Requires [name].bonus containing one worker ID per line.
To give a feedback message to the approved workers, add a comment line
in [name].bonus (like "# Your work is awesome."). All workers
after that comment line will have that message. Later comment lines
override the previous ones.
"""
data = self.record_w.read_tsv('bonus')
self.mturk_w.load()
self.mturk_w.grant_bonus(data)
def getallhits(self, args):
""" Get the list of all HITs ever published in the account.
If something fails, use this as a last resort for debugging stuff.
"""
self.mturk_w.load()
self.mturk_w.get_all_hits()
################################################################
class CustomArgumentParser(argparse.ArgumentParser):
def error(self, message):
print('ERROR:', message, file=sys.stderr)
self.print_help()
sys.exit(2)
if __name__ == '__main__':
parser = CustomArgumentParser()
parser.add_argument('--real', action='store_false', dest='sandbox', default=True,
help="Use the real MTurk instead of sandbox")
parser.add_argument('--maxhits', type=int,
help='Maximum number of HITs (for debugging in sandbox)')
parser.add_argument('dir',
help="Base directory")
parser.add_argument('action',
help="action to take (%s)" % ', '.join(Actions.ACTIONS) +
" Read the Action class in run.py to see what each action does")
args = parser.parse_args()
# If action comes before dir (typo) ...
if not os.path.exists(args.dir) and os.path.exists(args.action):
args.dir, args.action = args.action, args.dir
# Perform action
actions = Actions(args.sandbox, args.dir)
if hasattr(actions, args.action.lower()):
getattr(actions, args.action.lower())(args)
else:
print("Action '%s' not recognized" % args.action)
``` |
{
"source": "jlind062/flippin_flask",
"score": 3
} |
#### File: flippin/scrapers/scraper_craigslist.py
```python
import scrapy
import sys
import time
from scrapy.crawler import CrawlerProcess
class CraigslistSpider(scrapy.Spider):
name = "craigslist_spider"
def __init__(self, *a, **kw):
super(CraigslistSpider, self).__init__(*a, **kw)
self.start_urls = kw.get("start_urls")
self.city = kw.get("city")
self.time = time.strftime('%Y-%m-%d %H:%M:%S')
self.allowed_domains = ["craigslist.ca", "craigslist.com"]
print(self.start_urls)
def parse(self, response):
item_selector = '//p[@class="result-info"]'
for listings in response.xpath(item_selector):
price_selector = ".//span[@class='result-price']/text()"
name_selector = 'a ::text'
address_selector = 'a/@href'
url = listings.xpath(address_selector).extract_first()
yield {
'name': listings.css(name_selector).extract_first().strip(),
'price': self.clean_price(listings.xpath(price_selector).extract_first()),
'address': url,
'city': self.city,
'category_code': self.get_category_code(url),
'city_code': self.get_city_code(url),
'business': self.is_business(url),
'scan_date': self.time
}
next_page_selector = './/link[@rel="next"]/@href'
next_page = response.xpath(next_page_selector).extract_first()
if next_page:
yield scrapy.Request(
response.urljoin(next_page),
callback=self.parse
)
@staticmethod
def __after_nth_substring(string, substring, n):
# return the string after nth occurrence of the substring
string_trim = string
for i in range(0, n):
string_trim = string_trim[string_trim.find(substring) + 1:]
return string_trim
@staticmethod
def clean_price(price):
# if possible, remove the $, commas, and convert price to a float
if price is None:
return "Invalid"
else:
price = price.replace(",", "")
return float(price[1:])
@staticmethod
def get_city_code(url):
# returns the city code which is stored between the 3rd and 4th /
url = CraigslistSpider.__after_nth_substring(url, '/', 3)
return url[:url.index('/')]
@staticmethod
def is_business(url):
# the listing is marked as a business posting if the poster identifier's
# 3rd character is a d. Identifier is stored after the 4th /
if CraigslistSpider.__after_nth_substring(url, '/', 4)[2] == 'd':
return 1
else:
return 0
@staticmethod
def get_category_code(url):
# returns the city code which is stored between the 4th and 5th /
url = CraigslistSpider.__after_nth_substring(url, '/', 4)
return url[:url.index('/')]
class Main:
@staticmethod
def run(**kwargs):
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'FEED_FORMAT': 'csv',
'LOG_ENABLED': kwargs.get('logging'),
'DOWNLOAD_DELAY': '2',
'RANDOMIZE_DOWNLOAD_DELAY': '2'
})
Main.add_crawler(process, path=kwargs.get('path'), start_urls=[kwargs.get('start_urls')],
city=kwargs.get('city'))
process.start()
@staticmethod
def add_crawler(crawler_process, **kwargs):
# adds the spider to the crawler process by adding custom spider to process
# creating custom spider class based off of original spider to override the custom_settings
# class variable which is instantiated before the __init__ of the original spider class
# this hacky fix is required to run multiple spiders with different feed exports
custom_spider = type("custom_spider", (CraigslistSpider,),
{"custom_settings": {'FEED_URI': kwargs.get('path')}})
crawler_process.crawl(custom_spider, start_urls=kwargs.get('start_urls'),
city=kwargs.get('city'))
if __name__ == "__main__":
Main.run(path=sys.argv[1], logging=sys.argv[3],
city=sys.argv[4], start_urls=sys.argv[2])
```
#### File: flippin_flask/flippin/upload_results.py
```python
import csv
import re
import os
import scrape
import json
class CSVManager:
def __init__(self, **kwargs):
self.path = kwargs.get('path')
self.sources = kwargs.get('sources')
if not self.get_csvs():
raise NotADirectoryError("There is no scan data in directory '%s" % self.path)
self.csvs = self.get_csvs()
def get_csvs(self):
# get all the csv from the results directory
csvs = []
for file in os.listdir(self.path):
if file[:file.find(" ")] in self.sources:
csvs.append(self.path + "/" + file)
return csvs
def write_csvs(self, db):
# writes data from every csv
for path in self.csvs:
self.write_data(path, db)
@staticmethod
def write_data(path, db):
def check_valid(data):
# checks to see if price is valid and that there is no item with the same url in the db
if data[1] is not "Invalid" and data[6] == '0':
if db.check_primary(db.save_table, "address", data[2]) == 0:
return True
else:
return False
# writes all the data
with open(path) as f:
reader = csv.reader(f)
for row in reader:
if check_valid(row) is True:
source = re.findall("(?<=/)[A-Za-z]+(?=\s)", path)[0]
db.write_scan([row[0].replace("'", "").encode('ascii', 'replace').decode(),
int(row[1][:row[1].find('.')]), row[2], row[7][:row[7].find(' ')],
row[4], row[3], row[7], "1", source])
def move_scans(self):
# puts all the csv files from the results directory to the old subdirectory
# which contains a folder for each source
for path in self.csvs:
filename = self.__after_nth_substring(path, '/', 6)
source = filename[:filename.index(' ')]
subdirectory = path[:path.index(filename)] + "old/" + source + "/"
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
new_path = subdirectory + filename
os.rename(path, new_path)
@staticmethod
def __after_nth_substring(string, substring, n):
# return the string after nth occurrence of the substring
string_trim = string
for i in range(0, n):
string_trim = string_trim[string_trim.find(substring) + 1:]
return string_trim
def main():
# writes all the valid items from the scan csvs to the server
with open('config.json', 'r') as f:
config = json.load(f)
results_db = scrape.ScanDBManager(config["LOCAL"], config["BASIC"])
scan_sources = results_db.get_sources()
manager = CSVManager(path=config["BASIC"]["result_path"],
sources=scan_sources)
print(manager.path)
print(manager.sources)
print(manager.csvs)
manager.write_csvs(results_db)
manager.move_scans()
if __name__ == "__main__":
main()
``` |
{
"source": "jlindbloom/fmax",
"score": 3
} |
#### File: fmax/models/models.py
```python
import numpy as np
import pymc3 as pm
import fmax as fm
class ForecastModel:
"""Model class for handling forecasting given
observed sequence of attempts.
"""
def __init__(self,
record_data,
time_index=None,
fcast_len = 0,
prior_parameters = {
'mu' : {
'mean' : 11,
'std' : 3,
},
'sigma' : {
'lam' : 1
}
},
kind="max",
attempt_distribution="gaussian",
train="all",
fcast_test_data=None,
drop_first=False
):
self.record_data = record_data
self.kind = kind
self.prior_parameters = prior_parameters
self.attempt_distribution = attempt_distribution
self.n_obs = len(record_data)
self.fcast_len = fcast_len
self.train = train
self.fcast_test_data = fcast_test_data
self.drop_first = drop_first
# Make sure fcast_len and test_data agree
if self.fcast_test_data is not None:
assert fcast_len == len(self.fcast_test_data), "fcast_len must equal len(fcast_test_data)."
# Handle time index
if time_index is None:
self.master_index = [i for i in range(self.n_obs)]
self.fcast_index = [i for i in range(self.n_obs, self.fcast_len)]
self.master_with_fcast_index = [i for i in range(self.n_obs+self.fcast_len)]
self.tot_index_len = self.n_obs + self.fcast_len
else:
print("I haven't done this case yet...")
self.master_index = time_index
# Split into training and testing:
if self.train == "all":
self.train_data = self.record_data
self.train_index = self.master_index
self.test_data = None
self.test_index = None
self.fcast_index = [i for i in range(len(self.train_data), len(self.train_data) + self.fcast_len)]
else:
idx_train_max = int(train*len(self.master_index))
self.train_data = self.record_data[:idx_train_max]
self.train_index = self.master_index[:idx_train_max]
self.test_data = self.record_data[idx_train_max:]
self.test_index = self.master_index[idx_train_max:]
self.fcast_index = [i for i in range(len(self.train_index) + self.fcast_len)]
# Get jump/flat data
self.jump_data, self.flat_data = fm.jump_flat_split(self.train_data, kind=self.kind)
# Init PyMC3 model
self.init_pymc_model(self.prior_parameters)
def init_pymc_model(self, prior_parameters):
""" Create a PyMC3 model
"""
# Define model
with pm.Model() as self.pymc_model:
# Initialize priors for the distribution of each attempt
attempts_mean_mu = prior_parameters['mu']['mean']
attempts_mean_sigma = prior_parameters['mu']['std']
attempts_stdev_lam = prior_parameters['sigma']['lam']
priors = {
'mu' : pm.Normal('mu', mu=attempts_mean_mu, sigma=attempts_mean_sigma),
'sigma' : pm.Exponential('sigma', lam=attempts_stdev_lam),
}
# Get random sampling and likelihood for the kind of attempt
loglike = fm.get_loglikelihood_fn(
attempts = self.attempt_distribution,
kind = self.kind,
)
#loglike = fm.gumbel_attempts_min
# Create switch variable between posterior predictive and forecasting
# 0 is posterior predictive, 1 is forecasting
random_switch = pm.Data('random_switch', 0.0)
# Random sampler
posterior_predictive_sampler = fm.get_random_fn(
attempts=self.attempt_distribution,
kind=self.kind,
n_periods=self.fcast_len,
past_obs=self.train_data,
)
likelihood = pm.DensityDist('path',
loglike, random=posterior_predictive_sampler,
observed = {'jump_data':self.jump_data,
'flat_data':self.flat_data,
**priors}
)
# Track the log likelihood on the holdout set if available
if self.fcast_test_data is not None:
holdout_jump_data, holdout_flat_data = fm.jump_flat_split(self.fcast_test_data, kind=self.kind)
log_like_holdout = pm.Deterministic("log_like_holdout", loglike(holdout_jump_data, holdout_flat_data, **priors))
def fit(self,
chains=2,
draws=20000,
tune=5000,
):
"""Fits a PyMC model to the training data.
"""
with self.pymc_model:
self.trace = pm.sample(draws=draws,
chains=chains,
tune=tune,
cores=1,
target_accept=0.99,
return_inferencedata=True,
idata_kwargs={"density_dist_obs": False}
)
def forecast(self):
"""Samples the posterior predictive (includes past and future).
"""
with self.pymc_model:
pm.set_data({'random_switch':1})
self.forecast_ppc = pm.sample_posterior_predictive(self.trace)
self.forecast_samples = self.forecast_ppc['path']
def posterior_predictive(self):
""" Samples the posterior predictive distributions of the observations.
"""
with self.pymc_model:
pm.set_data({'random_switch':0})
if self.fcast_test_data is None:
self.posterior_predictive_ppc = pm.sample_posterior_predictive(self.trace)
else:
self.posterior_predictive_ppc = pm.sample_posterior_predictive(self.trace, var_names=['path', 'log_like_holdout'])
self.posterior_predictive_samples = self.posterior_predictive_ppc['path']
class WeibullForecastModel:
"""Model class for handling forecasting given
observed sequence of attempts.
"""
def __init__(self,
record_data,
time_index=None,
fcast_len = 0,
prior_parameters = {
'mu' : {
'mean' : 11,
'std' : 3,
},
'sigma' : {
'lam' : 1
}
},
kind="max",
attempt_distribution="gaussian",
train="all",
fcast_test_data=None,
drop_first=False
):
self.record_data = record_data
self.kind = kind
self.prior_parameters = prior_parameters
self.attempt_distribution = attempt_distribution
self.n_obs = len(record_data)
self.fcast_len = fcast_len
self.train = train
self.fcast_test_data = fcast_test_data
self.drop_first = drop_first
# Make sure fcast_len and test_data agree
if self.fcast_test_data is not None:
assert fcast_len == len(self.fcast_test_data), "fcast_len must equal len(fcast_test_data)."
# Handle time index
if time_index is None:
self.master_index = [i for i in range(self.n_obs)]
self.fcast_index = [i for i in range(self.n_obs, self.fcast_len)]
self.master_with_fcast_index = [i for i in range(self.n_obs+self.fcast_len)]
self.tot_index_len = self.n_obs + self.fcast_len
else:
print("I haven't done this case yet...")
self.master_index = time_index
# Split into training and testing:
if self.train == "all":
self.train_data = self.record_data
self.train_index = self.master_index
self.test_data = None
self.test_index = None
self.fcast_index = [i for i in range(len(self.train_data), len(self.train_data) + self.fcast_len)]
else:
idx_train_max = int(train*len(self.master_index))
self.train_data = self.record_data[:idx_train_max]
self.train_index = self.master_index[:idx_train_max]
self.test_data = self.record_data[idx_train_max:]
self.test_index = self.master_index[idx_train_max:]
self.fcast_index = [i for i in range(len(self.train_index) + self.fcast_len)]
# Get jump/flat data
self.jump_data, self.flat_data = fm.jump_flat_split(self.train_data, kind=self.kind)
if self.drop_first:
self.jump_data = self.jump_data[1:]
# Init PyMC3 model
self.init_pymc_model(self.prior_parameters)
def init_pymc_model(self, prior_parameters):
""" Create a PyMC3 model
"""
# Define model
with pm.Model() as self.pymc_model:
# Initialize priors for the distribution of each attempt
alpha_lower = prior_parameters["alpha"]["lower"]
alpha_upper = prior_parameters["alpha"]["upper"]
beta_lower = prior_parameters["beta"]["lower"]
beta_upper = prior_parameters["beta"]["upper"]
priors = {
#'mu' : pm.Normal('mu', mu=attempts_mean_mu, sigma=attempts_mean_sigma),
#'sigma' : pm.Exponential('sigma', lam=attempts_stdev_lam),
"alpha": pm.Uniform("alpha", lower=alpha_lower, upper=alpha_upper),
"beta": pm.Uniform("beta", lower=beta_lower, upper=beta_upper)
}
# Get random sampling and likelihood for the kind of attempt
loglike = fm.get_loglikelihood_fn(
attempts = self.attempt_distribution,
kind = self.kind
)
#loglike = fm.gumbel_attempts_min
# Create switch variable between posterior predictive and forecasting
# 0 is posterior predictive, 1 is forecasting
random_switch = pm.Data('random_switch', 0.0)
# Random sampler
posterior_predictive_sampler = fm.get_random_fn(
attempts=self.attempt_distribution,
kind=self.kind,
n_periods=self.fcast_len,
past_obs=self.train_data,
)
likelihood = pm.DensityDist('path',
loglike, random=posterior_predictive_sampler,
observed = {'jump_data':self.jump_data,
'flat_data':self.flat_data,
**priors}
)
# Track the log likelihood on the holdout set if available
if self.fcast_test_data is not None:
holdout_jump_data, holdout_flat_data = fm.jump_flat_split(self.fcast_test_data, kind=self.kind)
log_like_holdout = pm.Deterministic("log_like_holdout", loglike(holdout_jump_data, holdout_flat_data, **priors))
def fit(self,
chains=2,
draws=20000,
tune=5000,
):
"""Fits a PyMC model to the training data.
"""
with self.pymc_model:
self.trace = pm.sample(draws=draws,
chains=chains,
tune=tune,
cores=1,
target_accept=0.99,
return_inferencedata=True,
idata_kwargs={"density_dist_obs": False}
)
def forecast(self):
"""Samples the posterior predictive (includes past and future).
"""
with self.pymc_model:
pm.set_data({'random_switch':1})
self.forecast_ppc = pm.sample_posterior_predictive(self.trace)
self.forecast_samples = self.forecast_ppc['path']
def posterior_predictive(self):
""" Samples the posterior predictive distributions of the observations.
"""
with self.pymc_model:
pm.set_data({'random_switch':0})
if self.fcast_test_data is None:
self.posterior_predictive_ppc = pm.sample_posterior_predictive(self.trace)
else:
self.posterior_predictive_ppc = pm.sample_posterior_predictive(self.trace, var_names=['path', 'log_like_holdout'])
self.posterior_predictive_samples = self.posterior_predictive_ppc['path']
``` |
{
"source": "jlindero/lab1",
"score": 3
} |
#### File: lab1/src/line_follower.py
```python
import collections
import sys
import rospy
import numpy as np
from geometry_msgs.msg import PoseArray, PoseStamped
from ackermann_msgs.msg import AckermannDriveStamped
import utils
# The topic to publish control commands to
PUB_TOPIC = '/vesc/high_level/ackermann_cmd_mux/input/nav_0'
'''
Follows a given plan using constant velocity and PID control of the steering angle
'''
class LineFollower:
"""
Initializes the line follower
plan: A list of length T that represents the path that the robot should follow
Each element of the list is a 3-element numpy array of the form [x,y,theta]
pose_topic: The topic that provides the current pose of the robot as a PoseStamped msg
plan_lookahead: If the robot is currently closest to the i-th pose in the plan,
then it should navigate towards the (i+plan_lookahead)-th pose in the plan
translation_weight: How much the error in translation should be weighted in relation
to the error in rotation
rotation_weight: How much the error in rotation should be weighted in relation
to the error in translation
kp: The proportional PID parameter
ki: The integral PID parameter
kd: The derivative PID parameter
error_buff_length: The length of the buffer that is storing past error values
speed: The speed at which the robot should travel
"""
def __init__(self, plan, pose_topic, plan_lookahead, translation_weight,
rotation_weight, kp, ki, kd, error_buff_length, speed):
# Store the passed parameters
self.plan = plan
self.plan_lookahead = plan_lookahead
# Normalize translation and rotation weights
self.translation_weight = translation_weight / (translation_weight+rotation_weight)
self.rotation_weight = rotation_weight / (translation_weight+rotation_weight)
self.kp = kp
self.ki = ki
self.kd = kd
# The error buff stores the error_buff_length most recent errors and the
# times at which they were received. That is, each element is of the form
# [time_stamp (seconds), error]. For more info about the data struct itself, visit
# https://docs.python.org/2/library/collections.html#collections.deque
self.error_buff = collections.deque(maxlen=error_buff_length)
self.speed = speed
# YOUR CODE HERE
self.cmd_pub = # Create a publisher to PUB_TOPIC
self.pose_sub = # Create a subscriber to pose_topic, with callback 'self.pose_cb'
'''
Computes the error based on the current pose of the car
cur_pose: The current pose of the car, represented as a numpy array [x,y,theta]
Returns: (False, 0.0) if the end of the plan has been reached. Otherwise, returns
(True, E) - where E is the computed error
'''
def compute_error(self, cur_pose):
"""
Find the first element of the plan that is in front of the robot, and remove
any elements that are behind the robot. To do this:
Loop over the plan (starting at the beginning) For each configuration in the plan
If the configuration is behind the robot, remove it from the plan
Will want to perform a coordinate transformation to determine if
the configuration is in front or behind the robot
If the configuration is in front of the robot, break out of the loop
"""
while len(self.plan) > 0:
# YOUR CODE HERE
pass
# Check if the plan is empty. If so, return (False, 0.0)
# YOUR CODE HERE
# At this point, we have removed configurations from the plan that are behind
# the robot. Therefore, element 0 is the first configuration in the plan that is in
# front of the robot. To allow the robot to have some amount of 'look ahead',
# we choose to have the robot head towards the configuration at index 0 + self.plan_lookahead
# We call this index the goal_index
goal_idx = min(0+self.plan_lookahead, len(self.plan)-1)
# Compute the translation error between the robot and the configuration at goal_idx in the plan
# YOUR CODE HERE
# Compute the total error
# Translation error was computed above
# Rotation error is the difference in yaw between the robot and goal configuration
# Be careful about the sign of the rotation error
# YOUR CODE HERE
error = # self.translation_weight * translation_error + self.rotation_weight * rotation_error
return True, error
'''
Uses a PID control policy to generate a steering angle from the passed error
error: The current error
Returns: The steering angle that should be executed
'''
def compute_steering_angle(self, error):
now = rospy.Time.now().to_sec() # Get the current time
# Compute the derivative error using the passed error, the current time,
# the most recent error stored in self.error_buff, and the most recent time
# stored in self.error_buff
# YOUR CODE HERE
# Add the current error to the buffer
self.error_buff.append((error, now))
# Compute the integral error by applying rectangular integration to the elements
# of self.error_buff: https://chemicalstatistician.wordpress.com/2014/01/20/rectangular-integration-a-k-a-the-midpoint-rule/
# YOUR CODE HERE
# Compute the steering angle as the sum of the pid errors
# YOUR CODE HERE
return #self.kp*error + self.ki*integ_error + self.kd * deriv_error
'''
Callback for the current pose of the car
msg: A PoseStamped representing the current pose of the car
This is the exact callback that we used in our solution, but feel free to change it
'''
def pose_cb(self, msg):
cur_pose = np.array([msg.pose.position.x,
msg.pose.position.y,
utils.quaternion_to_angle(msg.pose.orientation)])
success, error = self.compute_error(cur_pose)
if not success:
# We have reached our goal
self.pose_sub = None # Kill the subscriber
self.speed = 0.0 # Set speed to zero so car stops
delta = self.compute_steering_angle(error)
# Setup the control message
ads = AckermannDriveStamped()
ads.header.frame_id = '/map'
ads.header.stamp = rospy.Time.now()
ads.drive.steering_angle = delta
ads.drive.speed = self.speed
# Send the control message
self.cmd_pub.publish(ads)
def main():
rospy.init_node('line_follower', anonymous=True) # Initialize the node
"""
Load these parameters from launch file
We provide suggested starting values of params, but you should
tune them to get the best performance for your system
Look at constructor of LineFollower class for description of each var
'Default' values are ones that probably don't need to be changed (but you could for fun)
'Starting' values are ones you should consider tuning for your system
"""
# YOUR CODE HERE
plan_topic = # Default val: '/planner_node/car_plan'
pose_topic = # Default val: '/sim_car_pose/pose'
plan_lookahead = # Starting val: 5
translation_weight = # Starting val: 1.0
rotation_weight = # Starting val: 0.0
kp = # Startinig val: 1.0
ki = # Starting val: 0.0
kd = # Starting val: 0.0
error_buff_length = # Starting val: 10
speed = # Default val: 1.0
raw_input("Press Enter to when plan available...") # Waits for ENTER key press
# Use rospy.wait_for_message to get the plan msg
# Convert the plan msg to a list of 3-element numpy arrays
# Each array is of the form [x,y,theta]
# Create a LineFollower object
# YOUR CODE HERE
rospy.spin() # Prevents node from shutting down
if __name__ == '__main__':
main()
```
#### File: lab1/src/utils.py
```python
import rospy
import numpy as np
from std_msgs.msg import Header
from geometry_msgs.msg import Quaternion
from nav_msgs.srv import GetMap
import tf.transformations
import tf
import matplotlib.pyplot as plt
'''
Convert an angle in radians into a quaternion message.
In:
angle: The yaw angle in radians
Out:
The Quaternion message
'''
def angle_to_quaternion(angle):
return Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))
'''
Convert a quaternion message into an angle in radians.
In:
q: The quaternion message
Out:
The yaw angle
'''
def quaternion_to_angle(q):
x, y, z, w = q.x, q.y, q.z, q.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))
return yaw
'''
Returns a rotation matrix that applies the passed angle (in radians)
In:
theta: The desired rotation angle
Out:
The corresponding rotation matrix
'''
def rotation_matrix(theta):
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
''' Get the map from the map server
In:
map_topic: The service topic that will provide the map
Out:
map_img: A numpy array with dimensions (map_info.height, map_info.width).
A zero at a particular location indicates that the location is impermissible
A one at a particular location indicates that the location is permissible
map_info: Info about the map, see
http://docs.ros.org/kinetic/api/nav_msgs/html/msg/MapMetaData.html
for more info
'''
def get_map(map_topic):
rospy.wait_for_service(map_topic)
map_msg = rospy.ServiceProxy(map_topic, GetMap)().map
array_255 = np.array(map_msg.data).reshape((map_msg.info.height, map_msg.info.width))
map_img = np.zeros_like(array_255, dtype=bool)
map_img[array_255==0] = 1
return map_img, map_msg.info
'''
Convert a pose in the world to a pixel location in the map image
In:
pose: The pose in the world to be converted. Should be a list or tuple of the
form [x,y,theta]
map_info: Info about the map (returned by get_map)
Out:
The corresponding pose in the pixel map - has the form [x,y,theta]
where x and y are integers
'''
def world_to_map(pose, map_info):
scale = map_info.resolution
angle = -quaternion_to_angle(map_info.origin.orientation)
config = [0.0,0.0,0.0]
# translation
config[0] = (1.0/float(scale))*(pose[0] - map_info.origin.position.x)
config[1] = (1.0/float(scale))*(pose[1] - map_info.origin.position.y)
config[2] = pose[2]
# rotation
c, s = np.cos(angle), np.sin(angle)
# we need to store the x coordinates since they will be overwritten
temp = np.copy(config[0])
config[0] = int(c*config[0] - s*config[1])
config[1] = int(s*temp + c*config[1])
config[2] += angle
return config
'''
Convert a pixel location in the map to a pose in the world
In:
pose: The pixel pose in the map. Should be a list or tuple of the form [x,y,theta]
map_info: Info about the map (returned by get_map)
Out:
The corresponding pose in the world - has the form [x,y,theta]
'''
def map_to_world(pose,map_info):
scale = map_info.resolution
angle = quaternion_to_angle(map_info.origin.orientation)
# rotate
config = np.array([pose[0],map_info.height-pose[1],pose[2]])
# rotation
c, s = np.cos(angle), np.sin(angle)
# we need to store the x coordinates since they will be overwritten
temp = np.copy(config[0])
config[0] = c*config[0] - s*config[1]
config[1] = s*temp + c*config[1]
# scale
config[:2] *= float(scale)
# translate
config[0] += map_info.origin.position.x
config[1] += map_info.origin.position.y
config[2] += angle
return config
``` |
{
"source": "j-lindfors/bitesofpy-challenges",
"score": 3
} |
#### File: bitesofpy-challenges/97/bite_97.py
```python
from collections import defaultdict
import os
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
# prep data
tmp = os.getenv("TMP", "/tmp")
page = 'us_holidays.html'
holidays_page = os.path.join(tmp, page)
urlretrieve(
f'https://bites-data.s3.us-east-2.amazonaws.com/{page}',
holidays_page
)
with open(holidays_page) as f:
content = f.read()
holidays = defaultdict(list)
def get_us_bank_holidays(content=content):
"""Receive scraped html output, make a BS object, parse the bank
holiday table (css class = list-table), and return a dict of
keys -> months and values -> list of bank holidays"""
soup = BeautifulSoup(content, "html.parser")
list_table = soup.find_all("tr")
for i in list_table[1:]:
month = i.time.string[-5:-3]
event = i.a.string.strip()
holidays[month].append(event)
return holidays
if __name__ == '__main__':
result = get_us_bank_holidays(content)
print(result)
``` |
{
"source": "jlindo33/moviepy",
"score": 4
} |
#### File: audio/fx/multiply_volume.py
```python
from moviepy.decorators import audio_video_fx
@audio_video_fx
def multiply_volume(clip, factor):
"""Returns a clip with audio volume multiplied by the
value `factor`. Can be applied to both audio and video clips.
This effect is loaded as a clip method when you use moviepy.editor,
so you can just write ``clip.multiply_volume(2)``
Examples
--------
>>> from moviepy import AudioFileClip
>>> music = AudioFileClip('music.ogg')
>>> new_clip = clip.multiply_volume(2) # doubles audio volume
>>> new_clip = clip.multiply_volume(0.5) # half audio
"""
return clip.transform(
lambda get_frame, t: factor * get_frame(t), keep_duration=True
)
```
#### File: video/fx/margin.py
```python
import numpy as np
from moviepy.decorators import apply_to_mask
from moviepy.video.VideoClip import ImageClip
@apply_to_mask
def margin(
clip,
margin_size=None,
left=0,
right=0,
top=0,
bottom=0,
color=(0, 0, 0),
opacity=1.0,
):
"""
Draws an external margin all around the frame.
:param margin_size: if not ``None``, then the new clip has a margin_size of
size ``margin_size`` in pixels on the left, right, top, and bottom.
:param left, right, top, bottom: width of the margin in pixel
in these directions.
:param color: color of the margin.
:param mask_margin: value of the mask on the margin. Setting
this value to 0 yields transparent margins.
"""
if (opacity != 1.0) and (clip.mask is None) and not (clip.is_mask):
clip = clip.add_mask()
if margin_size is not None:
left = right = top = bottom = margin_size
def make_bg(w, h):
new_w, new_h = w + left + right, h + top + bottom
if clip.is_mask:
shape = (new_h, new_w)
bg = np.tile(opacity, (new_h, new_w)).astype(float).reshape(shape)
else:
shape = (new_h, new_w, 3)
bg = np.tile(color, (new_h, new_w)).reshape(shape)
return bg
if isinstance(clip, ImageClip):
im = make_bg(clip.w, clip.h)
im[top : top + clip.h, left : left + clip.w] = clip.img
return clip.image_transform(lambda pic: im)
else:
def filter(gf, t):
pic = gf(t)
h, w = pic.shape[:2]
im = make_bg(w, h)
im[top : top + h, left : left + w] = pic
return im
return clip.transform(filter)
```
#### File: moviepy/tests/test_AudioClips.py
```python
import os
import numpy as np
import pytest
from moviepy.audio.AudioClip import (
AudioArrayClip,
AudioClip,
CompositeAudioClip,
concatenate_audioclips,
)
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.utils import close_all_clips
from tests.test_helper import TMP_DIR
def test_audioclip():
make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)]
audio = AudioClip(make_frame, duration=2, fps=22050)
audio.write_audiofile(os.path.join(TMP_DIR, "audioclip.mp3"), bitrate="16")
assert os.path.exists(os.path.join(TMP_DIR, "audioclip.mp3"))
clip = AudioFileClip(os.path.join(TMP_DIR, "audioclip.mp3"))
# TODO Write better tests; find out why the following fail
# assert clip.duration == 2
# assert clip.fps == 22050
# assert clip.reader.bitrate == 16
close_all_clips(locals())
def test_audioclip_io():
# Generate a random audio clip of 4.989 seconds at 44100 Hz,
# and save it to a file.
input_array = np.random.random((220000, 2)) * 1.98 - 0.99
clip = AudioArrayClip(input_array, fps=44100)
clip.write_audiofile(os.path.join(TMP_DIR, "random.wav"))
# Load the clip.
# The loaded clip will be slightly longer because the duration is rounded
# up to 4.99 seconds.
# Verify that the extra frames are all zero, and the remainder is identical
# to the original signal.
clip = AudioFileClip(os.path.join(TMP_DIR, "random.wav"))
output_array = clip.to_soundarray()
np.testing.assert_array_almost_equal(
output_array[: len(input_array)], input_array, decimal=4
)
assert (output_array[len(input_array) :] == 0).all()
close_all_clips(locals())
def test_concatenate_audioclips_render():
"""Concatenated AudioClips through ``concatenate_audioclips`` should return
a clip that can be rendered to a file.
"""
make_frame_440 = lambda t: [np.sin(440 * 2 * np.pi * t)]
make_frame_880 = lambda t: [np.sin(880 * 2 * np.pi * t)]
clip_440 = AudioClip(make_frame_440, duration=0.01, fps=44100)
clip_880 = AudioClip(make_frame_880, duration=0.000001, fps=22050)
concat_clip = concatenate_audioclips((clip_440, clip_880))
concat_clip.write_audiofile(os.path.join(TMP_DIR, "concatenate_audioclips.mp3"))
assert concat_clip.duration == clip_440.duration + clip_880.duration
close_all_clips(locals())
def test_concatenate_audioclips_CompositeAudioClip():
"""Concatenated AudioClips through ``concatenate_audioclips`` should return
a CompositeAudioClip whose attributes should be consistent:
- Returns CompositeAudioClip.
- Their fps is taken from the maximum of their audios.
- Audios are placed one after other:
- Duration is the sum of their durations.
- Ends are the accumulated sum of their durations.
- Starts are the accumulated sum of their durations, but first start is 0
and lastest is ignored.
- Channels are the max channels of their clips.
"""
frequencies = [440, 880, 1760]
durations = [2, 5, 1]
fpss = [44100, 22050, 11025]
clips = [
AudioClip(
lambda t: [np.sin(frequency * 2 * np.pi * t)], duration=duration, fps=fps
)
for frequency, duration, fps in zip(frequencies, durations, fpss)
]
concat_clip = concatenate_audioclips(clips)
# should return a CompositeAudioClip
assert isinstance(concat_clip, CompositeAudioClip)
# fps of the greatest fps passed into it
assert concat_clip.fps == 44100
# audios placed on after other
assert concat_clip.duration == sum(durations)
assert list(concat_clip.ends) == list(np.cumsum(durations))
assert list(concat_clip.starts), list(np.cumsum([0, *durations[:-1]]))
# channels are maximum number of channels of the clips
assert concat_clip.nchannels == max(clip.nchannels for clip in clips)
close_all_clips(locals())
def test_CompositeAudioClip_by__init__():
"""The difference between the CompositeAudioClip returned by
``concatenate_audioclips`` and a CompositeAudioClip created using the class
directly, is that audios in ``concatenate_audioclips`` are played one after
other and AudioClips passed to CompositeAudioClip can be played at different
times, it depends on their ``start`` attributes.
"""
frequencies = [440, 880, 1760]
durations = [2, 5, 1]
fpss = [44100, 22050, 11025]
starts = [0, 1, 2]
clips = [
AudioClip(
lambda t: [np.sin(frequency * 2 * np.pi * t)], duration=duration, fps=fps
).with_start(start)
for frequency, duration, fps, start in zip(frequencies, durations, fpss, starts)
]
compound_clip = CompositeAudioClip(clips)
# should return a CompositeAudioClip
assert isinstance(compound_clip, CompositeAudioClip)
# fps of the greatest fps passed into it
assert compound_clip.fps == 44100
# duration depends on clips starts and durations
ends = [start + duration for start, duration in zip(starts, durations)]
assert compound_clip.duration == max(ends)
assert list(compound_clip.ends) == ends
assert list(compound_clip.starts) == starts
# channels are maximum number of channels of the clips
assert compound_clip.nchannels == max(clip.nchannels for clip in clips)
close_all_clips(locals())
def test_concatenate_audioclip_with_audiofileclip():
# stereo A note
make_frame = lambda t: np.array(
[np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)]
).T
clip1 = AudioClip(make_frame, duration=1, fps=44100)
clip2 = AudioFileClip("media/crunching.mp3")
concat_clip = concatenate_audioclips((clip1, clip2))
concat_clip.write_audiofile(
os.path.join(TMP_DIR, "concat_clip_with_file_audio.mp3")
)
assert concat_clip.duration == clip1.duration + clip2.duration
def test_concatenate_audiofileclips():
clip1 = AudioFileClip("media/crunching.mp3").subclip(1, 4)
# Checks it works with videos as well
clip2 = AudioFileClip("media/big_buck_bunny_432_433.webm")
concat_clip = concatenate_audioclips((clip1, clip2))
concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
assert concat_clip.duration == clip1.duration + clip2.duration
close_all_clips(locals())
def test_audioclip_mono_max_volume():
# mono
make_frame_440 = lambda t: np.sin(440 * 2 * np.pi * t)
clip = AudioClip(make_frame_440, duration=1, fps=44100)
max_volume = clip.max_volume()
assert isinstance(max_volume, float)
assert max_volume > 0
@pytest.mark.parametrize(("nchannels"), (2, 4, 8, 16))
@pytest.mark.parametrize(("channel_muted"), ("left", "right"))
def test_audioclip_stereo_max_volume(nchannels, channel_muted):
def make_frame(t):
frame = []
# build channels (one of each pair muted)
for i in range(int(nchannels / 2)):
if channel_muted == "left":
# if muted channel is left, [0, sound, 0, sound...]
frame.append(np.sin(t * 0))
frame.append(np.sin(440 * 2 * np.pi * t))
else:
# if muted channel is right, [sound, 0, sound, 0...]
frame.append(np.sin(440 * 2 * np.pi * t))
frame.append(np.sin(t * 0))
return np.array(frame).T
clip = AudioClip(make_frame, fps=44100, duration=1)
max_volume = clip.max_volume(stereo=True)
# if `stereo == True`, `AudioClip.max_volume` returns a Numpy array`
assert isinstance(max_volume, np.ndarray)
assert len(max_volume) == nchannels
# check channels muted and with sound
for i, channel_max_volume in enumerate(max_volume):
if i % 2 == 0:
if channel_muted == "left":
assert channel_max_volume == 0
else:
assert channel_max_volume > 0
else:
if channel_muted == "right":
assert channel_max_volume == 0
else:
assert channel_max_volume > 0
if __name__ == "__main__":
pytest.main()
```
#### File: moviepy/tests/test_Clip.py
```python
import copy
import pytest
from moviepy.Clip import Clip
from moviepy.video.VideoClip import BitmapClip
def test_clip_equality():
bitmap = [["RR", "RR"], ["RB", "RB"]]
different_bitmap = [["RR", "RB"], ["RB", "RB"]]
clip = BitmapClip(bitmap, fps=1)
same_clip = BitmapClip(bitmap, fps=1)
different_clip = BitmapClip(different_bitmap, fps=1)
assert clip == same_clip
assert clip != different_clip
@pytest.mark.parametrize(
"copy_func",
(
lambda clip: clip.copy(),
lambda clip: copy.copy(clip),
lambda clip: copy.deepcopy(clip),
),
ids=(
"clip.copy()",
"copy.copy(clip)",
"copy.deepcopy(clip)",
),
)
def test_clip_copy(copy_func):
"""Clip must be copied with `.copy()` method, `copy.copy()` and
`copy.deepcopy()` (same behaviour).
"""
clip = Clip()
other_clip = Clip()
# shallow copy of clip
for attr in clip.__dict__:
setattr(clip, attr, "foo")
copied_clip = copy_func(clip)
# assert copied attributes
for attr in copied_clip.__dict__:
assert getattr(copied_clip, attr) == getattr(clip, attr)
# other instances are not edited
assert getattr(copied_clip, attr) != getattr(other_clip, attr)
if __name__ == "__main__":
pytest.main()
```
#### File: moviepy/tests/test_compositing.py
```python
import os
import pytest
from moviepy.utils import close_all_clips
from moviepy.video.compositing.CompositeVideoClip import clips_array
from moviepy.video.compositing.concatenate import concatenate_videoclips
from moviepy.video.fx.resize import resize
from moviepy.video.VideoClip import BitmapClip, ColorClip
from tests.test_helper import TMP_DIR
def test_clips_array():
red = ColorClip((1024, 800), color=(255, 0, 0))
green = ColorClip((1024, 800), color=(0, 255, 0))
blue = ColorClip((1024, 800), color=(0, 0, 255))
video = clips_array([[red, green, blue]])
with pytest.raises(ValueError): # duration not set
video.fx(resize, width=480).write_videofile(
os.path.join(TMP_DIR, "test_clips_array.mp4")
)
close_all_clips(locals())
def test_clips_array_duration():
# NOTE: anyone knows what behaviour this sets ? If yes please replace
# this comment.
red = ColorClip((256, 200), color=(255, 0, 0))
green = ColorClip((256, 200), color=(0, 255, 0))
blue = ColorClip((256, 200), color=(0, 0, 255))
video = clips_array([[red, green, blue]]).with_duration(5)
with pytest.raises(AttributeError): # fps not set
video.write_videofile(os.path.join(TMP_DIR, "test_clips_array.mp4"))
# this one should work correctly
red.fps = green.fps = blue.fps = 30
video = clips_array([[red, green, blue]]).with_duration(5)
video.write_videofile(os.path.join(TMP_DIR, "test_clips_array.mp4"))
close_all_clips(locals())
def test_concatenate_self():
clip = BitmapClip([["AAA", "BBB"], ["CCC", "DDD"]], fps=1)
target = BitmapClip([["AAA", "BBB"], ["CCC", "DDD"]], fps=1)
concatenated = concatenate_videoclips([clip])
concatenated.write_videofile(os.path.join(TMP_DIR, "test_concatenate_self.mp4"))
assert concatenated == target
def test_concatenate_floating_point():
"""
>>> print("{0:.20f}".format(1.12))
1.12000000000000010658
This test uses duration=1.12 to check that it still works when the clip duration is
represented as being bigger than it actually is. Fixed in #1195.
"""
clip = ColorClip([100, 50], color=[255, 128, 64], duration=1.12).with_fps(25.0)
concat = concatenate_videoclips([clip])
concat.write_videofile(os.path.join(TMP_DIR, "concat.mp4"), preset="ultrafast")
close_all_clips(locals())
``` |
{
"source": "jlindsey/vault_update",
"score": 3
} |
#### File: vault_update/vault_update/cli.py
```python
import argparse
import json
import os
import sys
import tempfile
from subprocess import check_output, call, CalledProcessError
__vault_url__ = os.environ['VAULT_URL']
def parse_args():
parser = argparse.ArgumentParser(description='Update an entry in Vault')
parser.add_argument('key', metavar='KEY',
help='vault entry keypath (excluding the leading "secret/")')
return parser.parse_args()
def main():
args = parse_args()
try:
raw_val = check_output(['vault', 'read', '-address=%s' % __vault_url__,
'-format=json', 'secret/%s' % args.key])
except CalledProcessError:
print("ERR: Unable to read vault data")
sys.exit(1)
data = json.loads(raw_val)['data']
temp = tempfile.NamedTemporaryFile(suffix='.json', delete=False)
with temp.file as f:
f.write(json.dumps(data, indent=2, separators=(',', ': ')))
try:
call([os.environ['EDITOR'], temp.name])
except CalledProcessError:
print("ERR: Unable to open your EDITOR")
sys.exit(1)
with open(temp.name, 'r') as f:
try:
data = json.loads(f.read())
except ValueError as e:
print("ERR: Unable to parse JSON")
print(e.message)
sys.exit(1)
vault_args = ['vault', 'write', '-address=%s' % __vault_url__, 'secret/%s' % args.key]
for key, val in data.iteritems():
vault_args.append('%s=%s' % (key, val))
try:
call(vault_args)
except CalledProcessError:
print("ERR: Unable to write data back to Vault key")
sys.exit(1)
``` |
{
"source": "jlin/inventory",
"score": 2
} |
#### File: inventory/api/tests.py
```python
from django.test import TestCase
from django.test.client import Client
try:
import json
except:
from django.utils import simplejson as json
from MacroExpansion import MacroExpansion
from KeyValueTree import KeyValueTree
from truth.models import Truth, KeyValue as TruthKeyValue
class TestMacroExpansion(TestCase):
fixtures = ['testdata.json']
def test_import(self):
try:
from MacroExpansion import MacroExpansion
except:
raise(BaseException('Unable to import Macro Expansion'))
try:
from KeyValueTree import KeyValueTree
except:
raise(BaseException('Unable to import KeyValueTree'))
def test_key_value_not_found(self):
m = MacroExpansion('host:fake-hostname2:ip_address')
self.assertEqual(m.output(),'10.99.32.1')
def test_key_value_found(self):
m = MacroExpansion('host:fake-hostname2:ip_address')
self.assertEqual(m.output(),'10.99.32.1')
#TODO Add checks for setting every property of a sytem through the api
class SystemApi(TestCase):
fixtures = ['testdata.json']
new_hostname = 'new_hostname999'
def setup(self):
self.client = Client()
def test_get_system_not_found_by_id(self):
resp = self.client.get('/api/system/-1/', follow=True)
self.assertEqual(404, resp.status_code)
def test_get_system_by_id(self):
resp = self.client.get('/api/system/1/', follow=True)
self.assertEqual(200, resp.status_code)
def test_get_system_by_hostname(self):
resp = self.client.get('/api/system/asfdasfasfasdfasfasdfsadf/', follow=True)
self.assertEqual(404, resp.status_code)
resp = self.client.get('/api/system/fake-hostname2/', follow=True)
self.assertEqual(200, resp.status_code)
def test_key_value_tree(self):
tree = KeyValueTree('fake-hostname2').final
self.assertEqual(tree['nic.0.ipv4_address.0'],'10.99.32.1')
def test_key_value_api(self):
resp = self.client.get('/api/keyvalue/?keystore=fake-hostname2', follow=True)
#print resp.content
self.assertEqual(json.loads(resp.content)['truth:test:cluster_name'], 'Test Cluster Name')
self.assertEqual(json.loads(resp.content)['host:fake-hostname1:nic.0.ipv4_address.0'], u'10.99.32.3')
resp = self.client.get('/api/keyvalue/?key=ip_address', follow=True)
self.assertEqual(json.loads(resp.content)['host:fake-hostname1:ip_address'], '10.99.32.3')
resp = self.client.get('/api/keyvalue/?key=cluster_owner', follow=True)
self.assertEqual(json.loads(resp.content)['truth:test:cluster_owner'], 'The Cluster Owner')
resp = self.client.get('/api/keyvalue/?value=10.99.32.3', follow=True)
self.assertEqual(json.loads(resp.content)['host:fake-hostname1:ip_address'], '10.99.32.3')
class DHCPApi(TestCase):
fixtures = ['testdata.json']
def setup(self):
self.client = Client()
def test_get_single_scope(self):
resp = self.client.get('/api/keyvalue/?key_type=dhcp_scopes', follow=True)
scope_list = json.loads(resp.content)
self.assertEqual(scope_list[0]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[0]['dhcp.scope.start'], '10.0.1.0')
self.assertEqual(scope_list[0]['dhcp.scope.end'], '10.0.1.255')
self.assertEqual(scope_list[0]['dhcp.scope.name'], 'phx-vlan73')
def test_get_second_scope(self):
resp = self.client.get('/api/keyvalue/?key_type=dhcp_scopes', follow=True)
scope_list = json.loads(resp.content)
"""self.assertEqual(scope_list[1]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[1]['dhcp.scope.start'], '10.0.0.0')
self.assertEqual(scope_list[1]['dhcp.scope.end'], '10.0.0.255')
self.assertEqual(scope_list[1]['dhcp.scope.name'], 'phx-vlan81')"""
def test_get_multiple_scopes(self):
resp = self.client.get('/api/keyvalue/?key_type=dhcp_scopes', follow=True)
scope_list = json.loads(resp.content)
"""self.assertEqual(scope_list[0]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[0]['dhcp.scope.start'], '10.0.1.0')
self.assertEqual(scope_list[0]['dhcp.scope.end'], '10.0.1.255')
self.assertEqual(scope_list[0]['dhcp.scope.name'], 'phx-vlan73')
self.assertEqual(scope_list[1]['dhcp.is_scope'], 'True')
self.assertEqual(scope_list[1]['dhcp.scope.start'], '10.0.0.0')
self.assertEqual(scope_list[1]['dhcp.scope.end'], '10.0.0.255')
self.assertEqual(scope_list[1]['dhcp.scope.name'], 'phx-vlan81')"""
def test_get_system_by_scope(self):
resp = self.client.get('/api/keyvalue/?key_type=system_by_scope&scope=phx-vlan73', follow=True)
system_list = json.loads(resp.content)
self.assertEqual(system_list[0]['nic.0.mac_address.0'],'00:00:00:00:00:AA')
self.assertEqual(system_list[0]['nic.0.ipv4_address.0'],'10.99.32.1')
self.assertEqual(system_list[0]['nic.1.mac_address.0'],'00:00:00:00:00:BB')
self.assertEqual(system_list[0]['nic.1.ipv4_address.0'],'10.99.32.2')
def test_get_adapters_by_system(self):
resp = self.client.get('/api/keyvalue/?key_type=adapters_by_system&system=fake-hostname2', follow=True)
system_list = json.loads(resp.content)
#print system_list
def test_delete_network_adapter(self):
resp = self.client.delete('/api/keyvalue/1/', {'system_hostname':'fake-hostname2', 'adapter_number':'0', 'key_type':'delete_network_adapter'}, follow=True)
#print "The content is %s" % resp.content
```
#### File: inventory/api_v1/system_handler.py
```python
from piston.handler import BaseHandler, rc
from systems.models import System, SystemRack,SystemStatus,NetworkAdapter,KeyValue,ServerModel,Allocation
from truth.models import Truth, KeyValue as TruthKeyValue
from dhcp.DHCP import DHCP as DHCPInterface
from dhcp.models import DHCP
from MacroExpansion import MacroExpansion
from KeyValueTree import KeyValueTree
import re
try:
import json
except:
from django.utils import simplejson as json
from django.test.client import Client
from django.db.models import Q
from settings import API_ACCESS
class SystemHandler(BaseHandler):
allowed_methods = API_ACCESS
model = System
#fields = ('id', 'asset_tag', 'oob_ip', 'hostname', 'operating_system', ('system_status',('status', 'id')))
exclude = ()
def read(self, request, system_id=None):
model = System
base = model.objects
#return base.get(id=453)
if 'name_search' in request.GET:
try:
s = System.objects.filter(hostname__contains=request.GET['name_search'])
except:
resp = rc.NOT_FOUND
return resp
if s is not None:
return s
if 'search' in request.GET:
search_q = Q()
has_criteria = False
systems = None
if 'asset_tag' in request.GET:
has_criteria = True
search_q &= Q(asset_tag=request.GET['asset_tag'])
if 'serial' in request.GET:
has_criteria = True
search_q &= Q(serial=request.GET['serial'])
if 'rack_order' in request.GET:
has_criteria = True
search_q &= Q(rack_order=request.GET['rack_order'])
if 'switch_ports' in request.GET:
has_criteria = True
search_q &= Q(switch_ports=request.GET['switch_ports'])
if 'system_rack_id' in request.GET:
has_criteria = True
try:
sr = SystemRack.objects.get(id=request.GET['system_rack_id'])
search_q &= Q(system_rack=sr)
except:
pass
if has_criteria:
systems = System.with_related.filter(search_q).order_by('hostname')
if systems is not None and len(systems) > 0:
return systems
else:
resp = rc.NOT_FOUND
return resp
elif system_id:
try:
try:
s = System.objects.get(id=system_id)
except:
pass
try:
s = System.objects.get(hostname=system_id)
except:
pass
if s is not None:
return s
except:
resp = rc.NOT_FOUND
return resp
else:
#return base.filter(id_gt=400) # Or base.filter(...)
return base.all()
def create(self, request, system_id=None):
s = System()
s.hostname = system_id
try:
s.save()
resp = rc.CREATED
resp.write('json = {"id":%i, "hostname":"%s"}' % (s.id, s.hostname))
except:
resp = rc.BAD_REQUEST
resp.write('Unable to Create Host')
return resp
def delete(self, request, system_id=None):
try:
try:
s = System.objects.get(id=system_id)
except:
pass
try:
s = System.objects.get(hostname=system_id)
except:
pass
id = s.id
hostname = s.hostname
s.delete()
resp = rc.ALL_OK
resp.write('json = {"id":%i, "hostname":"%s"}' % (id, hostname))
except:
resp = rc.NOT_FOUND
resp.write("Unable to find system")
return resp
def update(self, request, system_id=None):
model = System
if request.method == 'PUT':
try:
try:
s = System.objects.get(id=system_id)
except:
pass
try:
s = System.objects.get(hostname=system_id)
except:
pass
if 'allocation' in request.POST:
try:
sa = Allocation.objects.get(id=request.POST['allocation'])
s.allocation = sa
except Exception, e:
pass
resp = rc.NOT_FOUND
resp.write("Server Not Found %s" % e)
if 'server_model' in request.POST:
try:
sm = ServerModel.objects.get(id=request.POST['server_model'])
s.server_model = sm
except:
pass
#resp = rc.NOT_FOUND
#resp.write("Server Not Found")
if 'system_status' in request.POST:
ss = None
try:
ss = SystemStatus.objects.get(status=request.POST['system_status'])
s.system_status = ss
except:
pass
if ss is None:
try:
ss = SystemStatus.objects.get(id=request.POST['system_status'])
s.system_status = ss
except:
pass
if 'system_rack' in request.POST:
try:
sr = SystemRack.objects.get(id=request.POST['system_rack'])
s.system_rack = sr
except:
pass
#resp = rc.NOT_FOUND
#resp.write("System Rack Not Found")
if 'location' in request.POST:
s.location = request.POST['location']
if 'asset_tag' in request.POST:
s.asset_tag = request.POST['asset_tag']
if 'switch_ports' in request.POST:
s.switch_ports = request.POST['switch_ports']
if 'serial' in request.POST:
s.serial = request.POST['serial']
if 'rack_order' in request.POST:
s.rack_order = request.POST['rack_order']
if 'purchase_price' in request.POST:
s.purchase_price = request.POST['purchase_price']
if 'oob_ip' in request.POST:
s.oob_ip = request.POST['oob_ip']
if 'hostname' in request.POST:
s.hostname = request.POST['hostname']
if 'notes' in request.POST:
s.notes = request.POST['notes']
s.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i, "hostname":"%s"}' % (s.id, s.hostname))
except:
resp = rc.NOT_FOUND
resp.write("System Updated")
return resp
```
#### File: inventory/api_v2/dhcp_handler.py
```python
from piston.handler import BaseHandler, rc
from systems.models import System, SystemRack,SystemStatus,NetworkAdapter,KeyValue,ScheduledTask
from truth.models import Truth, KeyValue as TruthKeyValue
from dhcp.DHCP import DHCP as DHCPInterface
from dhcp.models import DHCP
from MacroExpansion import MacroExpansion
from KeyValueTree import KeyValueTree
import re
try:
import json
except:
from django.utils import simplejson as json
from django.test.client import RequestFactory
from settings import API_ACCESS
from api_v2.keyvalue_handler import KeyValueHandler
factory = RequestFactory()
class DHCPHandler(BaseHandler):
allowed_methods = API_ACCESS
exclude = ()
def read(self, request, dhcp_scope=None, dhcp_action=None):
if dhcp_scope and dhcp_action == 'get_scopes':
tasks = []
for task in ScheduledTask.objects.get_all_dhcp():
tasks.append(task.task)
#ScheduledTask.objects.delete_all_dhcp()
return tasks
if dhcp_scope and dhcp_action == 'get_scopes_with_names':
truths = Truth.objects.select_related().filter(keyvalue__key='is_dhcp_scope',keyvalue__value='True').order_by('name')
truth_list = []
for t in truths:
truth_list.append({'name':t.name.strip(),'description':t.description.strip()})
return truth_list
if dhcp_scope and dhcp_action == 'view_hosts':
scope_options = []
h = KeyValueHandler()
request = factory.get('/api/v2/keyvalue/?key_type=system_by_scope&scope=%s' %
dhcp_scope, follow=True)
hosts = h.read(request)
adapter_list = []
for host in hosts:
if 'hostname' in host:
the_url = '/api/v2/keyvalue/?key_type=adapters_by_system_and_scope&dhcp_scope=%s&system=%s' % (dhcp_scope, host['hostname'])
try:
request = factory.get('/api/v2/keyvalue/?key_type=adapters_by_system_and_scope&dhcp_scope=%s&system=%s'
% (dhcp_scope, host['hostname']))
adapter_list.append(h.read(request))
except:
pass
d = DHCPInterface(scope_options, adapter_list)
return d.get_hosts()
def create(self, request, dhcp_scope=None, dhcp_action=None):
if dhcp_scope and dhcp_action == 'add_scheduled_task':
try:
task = ScheduledTask(type='dhcp',task=dhcp_scope)
task.save()
except Exception, e:
pass
#print e
return rc.ALL_OK
else:
return rc.NOT_FOUND
def delete(self, request, dhcp_scope=None, dhcp_action=None):
if dhcp_scope and dhcp_action == 'remove_scheduled_task':
try:
task = ScheduledTask.objects.get(type='dhcp',task=dhcp_scope)
task.delete()
except:
pass
return rc.ALL_OK
```
#### File: inventory/base/views.py
```python
from django.contrib import messages
from django.forms import ValidationError
from django.db import IntegrityError
from django.shortcuts import redirect, render, get_object_or_404
from django.views.generic import DeleteView
from django.views.generic import DetailView
from django.views.generic import CreateView
from django.views.generic import UpdateView
from django.views.generic import ListView
class BaseListView(ListView):
"""
Inherit ListView to specify our pagination.
"""
template_name = 'base/list.html'
class BaseDetailView(DetailView):
template_name = 'base/detail.html'
extra_context = None
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context['form_title'] = "{0} Details".format(
self.form_class.Meta.model.__name__
)
# extra_context takes precedence over original values in context
if self.extra_context:
context = dict(context.items() + self.extra_context.items())
return context
class BaseCreateView(CreateView):
template_name = "base/form.html"
extra_context = None
def post(self, request, *args, **kwargs):
try:
obj = super(BaseCreateView, self).post(request, *args, **kwargs)
# redirect back to form if errors
except (IntegrityError, ValidationError), e:
messages.error(request, str(e))
request.method = 'GET'
return super(BaseCreateView, self).get(request, *args, **kwargs)
return obj
def get(self, request, *args, **kwargs):
return super(BaseCreateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['form_title'] = "Create {0}".format(
self.form_class.Meta.model.__name__
)
# extra_context takes precedence over original values in context
if self.extra_context:
context = dict(context.items() + self.extra_context.items())
return context
class BaseUpdateView(UpdateView):
template_name = "base/form.html"
extra_context = None
def __init__(self, *args, **kwargs):
super(UpdateView, self).__init__(*args, **kwargs)
def get_form(self, form_class):
form = super(BaseUpdateView, self).get_form(form_class)
return form
def post(self, request, *args, **kwargs):
try:
obj = super(BaseUpdateView, self).post(request, *args, **kwargs)
except ValidationError, e:
messages.error(request, str(e))
request.method = 'GET'
return super(BaseUpdateView, self).get(request, *args, **kwargs)
return obj
def get(self, request, *args, **kwargs):
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Add extra template variables such as form title
"""
context = super(UpdateView, self).get_context_data(**kwargs)
context['form_title'] = "Update {0}".format(
self.form_class.Meta.model.__name__
)
# extra_context takes precedence over original values in context
if self.extra_context:
context = dict(context.items() + self.extra_context.items())
return context
class BaseDeleteView(DeleteView):
template_name = 'base/confirm_delete.html'
success_url = '/'
def get_object(self, queryset=None):
obj = super(BaseDeleteView, self).get_object()
return obj
def delete(self, request, *args, **kwargs):
# Get the object to delete
obj = get_object_or_404(
self.form_class.Meta.model, pk=kwargs.get('pk', 0)
)
try:
view = super(BaseDeleteView, self).delete(request, *args, **kwargs)
except ValidationError, e:
messages.error(request, "Error: {0}".format(' '.join(e.messages)))
return redirect(obj)
messages.success(request, "Deletion Successful")
return view
class Base(DetailView):
def get(self, request, *args, **kwargs):
return render(request, "base/base.html")
```
#### File: core/group/models.py
```python
from django.db import models
from core.mixins import ObjectUrlMixin
from core.keyvalue.base_option import DHCPKeyValue, CommonOption
class Group(models.Model, ObjectUrlMixin):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
#parent_group = models.ForeignKey('Group', null=True, blank=True)
def details(self):
return (
("Name", self.name),
)
class Meta:
db_table = "group"
unique_together = ("name",)
def __str__(self):
return "{0}".format(self.name)
def __repr__(self):
return "<Group: {0}>".format(self)
@classmethod
def get_api_fields(cls):
return ['name']
class GroupKeyValue(DHCPKeyValue, CommonOption):
obj = models.ForeignKey(Group, related_name='keyvalue_set', null=False)
class Meta:
db_table = "group_key_value"
unique_together = ("key", "value")
def _aa_description(self):
return
```
#### File: core/group/views.py
```python
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from core.group.models import Group
from core.group.forms import GroupForm
from core.views import (
CoreDeleteView, CoreListView, CoreCreateView, CoreUpdateView
)
class GroupView(object):
model = Group
queryset = Group.objects.all()
form_class = GroupForm
class GroupDeleteView(GroupView, CoreDeleteView):
pass
class GroupListView(GroupView, CoreListView):
template_name = "core/core_list.html"
class GroupCreateView(GroupView, CoreCreateView):
template_name = "core/core_form.html"
class GroupUpdateView(GroupView, CoreUpdateView):
template_name = "group/group_edit.html"
def group_detail(request, group_pk):
group = get_object_or_404(Group, pk=group_pk)
attrs = group.keyvalue_set.all()
return render(request, "group/group_detail.html", {
"group": group,
"attrs": attrs
})
```
#### File: hwadapter/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'HWAdapter'
db.create_table('hwadapter', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('enable_dhcp', self.gf('django.db.models.fields.BooleanField')(default=True)),
('name', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('mac', self.gf('django.db.models.fields.CharField')(max_length=17)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['group.Group'], null=True, blank=True)),
('sreg', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='hwadapter_set', null=True, to=orm['static.StaticReg'])),
))
db.send_create_signal('hwadapter', ['HWAdapter'])
# Adding unique constraint on 'HWAdapter', fields ['mac', 'sreg']
db.create_unique('hwadapter', ['mac', 'sreg_id'])
# Adding model 'HWAdapterKeyValue'
db.create_table('hwadapter_key_value', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('is_option', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_statement', self.gf('django.db.models.fields.BooleanField')(default=False)),
('has_validator', self.gf('django.db.models.fields.BooleanField')(default=False)),
('obj', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keyvalue_set', to=orm['hwadapter.HWAdapter'])),
))
db.send_create_signal('hwadapter', ['HWAdapterKeyValue'])
# Adding unique constraint on 'HWAdapterKeyValue', fields ['key', 'value', 'obj']
db.create_unique('hwadapter_key_value', ['key', 'value', 'obj_id'])
def backwards(self, orm):
# Removing unique constraint on 'HWAdapterKeyValue', fields ['key', 'value', 'obj']
db.delete_unique('hwadapter_key_value', ['key', 'value', 'obj_id'])
# Removing unique constraint on 'HWAdapter', fields ['mac', 'sreg']
db.delete_unique('hwadapter', ['mac', 'sreg_id'])
# Deleting model 'HWAdapter'
db.delete_table('hwadapter')
# Deleting model 'HWAdapterKeyValue'
db.delete_table('hwadapter_key_value')
models = {
'domain.domain': {
'Meta': {'object_name': 'Domain', 'db_table': "'domain'"},
'delegated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'master_domain': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['domain.Domain']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'purgeable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'soa': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['soa.SOA']", 'null': 'True', 'blank': 'True'})
},
'group.group': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'Group', 'db_table': "'group'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'hwadapter.hwadapter': {
'Meta': {'unique_together': "(('mac', 'sreg'),)", 'object_name': 'HWAdapter', 'db_table': "'hwadapter'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '17'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'sreg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hwadapter_set'", 'null': 'True', 'to': "orm['static.StaticReg']"})
},
'hwadapter.hwadapterkeyvalue': {
'Meta': {'unique_together': "(('key', 'value', 'obj'),)", 'object_name': 'HWAdapterKeyValue', 'db_table': "'hwadapter_key_value'"},
'has_validator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_option': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_statement': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'obj': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keyvalue_set'", 'to': "orm['hwadapter.HWAdapter']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'soa.soa': {
'Meta': {'unique_together': "(('primary', 'contact', 'description'),)", 'object_name': 'SOA', 'db_table': "'soa'"},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'expire': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1209600'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minimum': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'primary': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'refresh': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'retry': ('django.db.models.fields.PositiveIntegerField', [], {'default': '86400'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2013062501'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'})
},
'static.staticreg': {
'Meta': {'unique_together': "(('ip_upper', 'ip_lower', 'label', 'domain'),)", 'object_name': 'StaticReg', 'db_table': "'static_reg'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['domain.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reverse_staticreg_set'", 'null': 'True', 'to': "orm['domain.Domain']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']", 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['view.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'system_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemType']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'warranty_end': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'warranty_start': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'view.view': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'View', 'db_table': "'view'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['hwadapter']
```
#### File: core/keyvalue/tests.py
```python
from django.test import TestCase
from django.test.client import Client
from core.group.models import Group
from mozdns.tests.utils import create_fake_zone
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
class KVApiTests(TestCase):
def setUp(self):
self.c = Client()
create_fake_zone('10.in-addr.arpa', suffix='')
root_domain = create_fake_zone('foobar.mozilla.com', suffix='')
system = create_fake_host(hostname="asdf.mozilla.com")
sreg = StaticReg.objects.create(
label='foo', domain=root_domain, system=system,
ip_type='4', ip_str='10.0.0.0'
)
g = Group.objects.create(name="foo")
self.test_objs = (
('groupkeyvalue', g),
('staticregkeyvalue', sreg),
('keyvalue', system),
)
def testCRUD(self):
for obj_class, o in self.test_objs:
self.do_stuff(obj_class, o)
def do_stuff(self, obj_class, o):
key = 'foo'
value = 'bar'
create = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/create/'.format(
kv_class=obj_class, obj_pk=o.pk
)
detail = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/list/'.format(
kv_class=obj_class, obj_pk=o.pk
)
resp1 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp1.status_code, 201)
resp2 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp2.status_code, 400)
resp3 = self.c.get(detail)
self.assertEqual(resp3.status_code, 200)
resp4 = self.c.get(detail)
self.assertEqual(resp4.status_code, 200)
self.assertTrue(1, len(o.keyvalue_set.all()))
kv = o.keyvalue_set.all()[0]
update = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/update/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
new_value = "happy magic"
resp5 = self.c.post(update, {'key': key, 'value': new_value})
self.assertEqual(resp5.status_code, 200)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value)
# Does bad update do what it's supposed to?
resp6 = self.c.post(update, {'key': key, 'value': ''})
self.assertEqual(resp6.status_code, 400)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value) # Should be no change
delete = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/delete/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
resp6 = self.c.post(delete, {'key': key, 'value': new_value})
self.assertEqual(resp6.status_code, 204)
self.assertEqual(0, len(o.keyvalue_set.all()))
class TestCaseUtils(object):
def localize_url(self, url):
if 'en-US' not in url:
url = url.replace('mozdns', 'en-US/mozdns')
return url
```
#### File: core/network/utils.py
```python
from core.network.models import Network
def calc_networks(network, nq=None):
network.update_network()
eldars = []
sub_networks = []
if not nq:
nq = Network.objects.all()
for pnet in nq.order_by('prefixlen', 'ip_upper', 'ip_lower'):
pnet.update_network()
if pnet.pk == network.pk:
continue
if pnet.network.overlaps(network.network):
if pnet.prefixlen > network.prefixlen:
sub_networks.append(pnet)
else:
eldars.append(pnet)
return eldars, sub_networks
def calc_parent(network):
eldars, sub_net = calc_networks(network)
if not eldars:
return []
parent = list(reversed(sorted(eldars, key=lambda n: n.prefixlen)))[0]
return parent
def calc_parent_str(network_str, ip_type):
network = Network(network_str=network_str, ip_type=ip_type)
return calc_parent(network)
def calc_networks_str(network_str, ip_type):
network = Network(network_str=network_str, ip_type=ip_type)
return calc_networks(network)
def calc_top_level_networks(site):
networks = list(
site.network_set.order_by('prefixlen', 'ip_upper', 'ip_lower')
)
nq = Network.objects.filter(site=site)
tlns = []
while True:
try:
cur = networks.pop(0)
except IndexError:
break
super_nets, sub_nets = calc_networks(cur, nq=nq)
if not super_nets:
tlns.append(cur)
for sn in sub_nets:
try:
networks.remove(sn)
except ValueError:
pass # The network might have a different site
def ncmp(n1, n2):
pd = n1.prefixlen - n2.prefixlen
if pd != 0:
return pd
n_u_d = n1.ip_upper - n2.ip_upper
if n_u_d != 0:
return n_u_d
return n1.ip_lower - n2.ip_lower
#return sorted(tlns, cmp=lambda n1, n2: int(ncmp(n1, n2) % 2))
return tlns
```
#### File: range/tests/ip_chooser_tests.py
```python
from django.core.urlresolvers import reverse
from django.test import Client
from django.test import TestCase
from core.range.models import Range
from core.network.models import Network
from core.site.models import Site
from core.vlan.models import Vlan
from core.range.ip_choosing_utils import (
calc_template_ranges, integrate_real_ranges
)
import ipaddr
import simplejson as json
class ChooserOverlapTests(TestCase):
def setUp(self):
self.n1 = Network.objects.create(
network_str='10.8.0.0/24', ip_type='4'
)
def test_contained_in_template(self):
# 1 to 15 is templated to be special purpose
r1 = Range.objects.create(
start_str='10.8.0.2', end_str='10.8.0.14', network=self.n1
)
trs = calc_template_ranges(self.n1)
rs = integrate_real_ranges(self.n1, trs)
self.assertEqual(len(rs), len(trs))
rs = sorted(rs, key=lambda r: int(ipaddr.IPv4Address(r['start'])))
self.assertEqual(r1.start_str, rs[0]['start'])
self.assertEqual(r1.end_str, rs[0]['end'])
self.assertEqual(r1.pk, rs[0]['pk'])
def test_not_in_template(self):
# 1 to 15 is templated to be special purpose
r1 = Range.objects.create(
start_str='10.8.0.2', end_str='10.8.0.14', network=self.n1
)
trs = calc_template_ranges(self.n1)
trs = sorted(trs, key=lambda r: int(ipaddr.IPv4Address(r['start'])))
# remove the first range that would have conflicted
trs.pop(0)
rs = integrate_real_ranges(self.n1, trs)
rs = sorted(rs, key=lambda r: int(ipaddr.IPv4Address(r['start'])))
self.assertEqual(r1.start_str, rs[0]['start'])
self.assertEqual(r1.end_str, rs[0]['end'])
self.assertEqual(r1.pk, rs[0]['pk'])
def test_overlaps_two_ranges(self):
# 1 to 15 is templated to be special purpose
# 16 to 127 is templated to be multi-host pools
r1 = Range.objects.create(
start_str='10.8.0.10', end_str='10.8.0.100', network=self.n1
)
trs = calc_template_ranges(self.n1)
n_trs = len(trs)
rs = integrate_real_ranges(self.n1, trs)
# We should have lost one range
self.assertEqual(n_trs - 1, len(rs))
rs = sorted(rs, key=lambda r: int(ipaddr.IPv4Address(r['start'])))
self.assertEqual(r1.start_str, rs[0]['start'])
self.assertEqual(r1.end_str, rs[0]['end'])
self.assertEqual(r1.pk, rs[0]['pk'])
class ChooserTests(TestCase):
def setUp(self):
"""
We need to setup a realistic set of objects to test the related engine
sites = s1 s2 s3
vlans = v1 v2 v3
networks = n1 n2 n3 n4 n5
Site Relationships:
n1 -> s1
n2 -> s1
n3 -> s3
n4 -> s2
n5 -> s2
n6 -> None
n7 -> s2
n8 -> None
Vlan Relationships:
n1 -> v1
n2 -> v1
n3 -> v2
n4 -> v3
n5 -> v3
n6 -> v3
n7 -> None
n8 -> None
"""
self.client = Client()
#sites = s1 s2 s3
self.s1 = Site.objects.create(full_name="s1")
self.s2 = Site.objects.create(full_name="s2")
self.s3 = Site.objects.create(full_name="s3")
#vlans = v1 v2 v3
self.v1 = Vlan.objects.create(name="v1", number=1)
self.v2 = Vlan.objects.create(name="v2", number=2)
self.v3 = Vlan.objects.create(name="v3", number=3)
#networks = n1 n2 n3 n4 n5
#Relationships:
n_t = "10.0.{0}.0/24" # network_str template
#n1 -> s1
#n1 -> v1
self.n1 = Network.objects.create(
network_str=n_t.format(1), site=self.s1, vlan=self.v1, ip_type='4'
)
#n2 -> s1
#n2 -> v1
self.n2 = Network.objects.create(
network_str=n_t.format(2), site=self.s2, vlan=self.v2, ip_type='4'
)
#n3 -> s3
#n3 -> v2
self.n3 = Network.objects.create(
network_str=n_t.format(3), site=self.s3, vlan=self.v2, ip_type='4'
)
#n4 -> s2
#n4 -> v3
self.n4 = Network.objects.create(
network_str=n_t.format(4), site=self.s2, vlan=self.v3, ip_type='4'
)
#n5 -> s2
#n5 -> v3
self.n5 = Network.objects.create(
network_str=n_t.format(5), site=self.s2, vlan=self.v3, ip_type='4'
)
#n6 -> None
#n6 -> v3
self.n6 = Network.objects.create(
network_str=n_t.format(6), site=None, vlan=self.v3, ip_type='4'
)
#n7 -> s2
#n7 -> None
self.n7 = Network.objects.create(
network_str=n_t.format(7), site=self.s2, vlan=None, ip_type='4'
)
#n8 -> None
#n8 -> None
self.n8 = Network.objects.create(
network_str=n_t.format(8), site=None, vlan=None, ip_type='4'
)
def test_one_site(self):
state = {
'networks': [],
'sites': [self.s1.pk],
'vlans': [],
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['sites']))
self.assertEqual(self.s1.pk, r['sites'][0]['value'])
self.assertEqual(0, len(r['vlans']))
self.assertEqual(0, len(r['networks']))
def test_two_sites(self):
state = {
'networks': [],
'sites': [self.s1.pk, self.s2.pk],
'vlans': [],
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['sites']))
self.assertEqual(self.s1.pk, r['sites'][0]['value'])
self.assertEqual(0, len(r['vlans']))
self.assertEqual(0, len(r['networks']))
def test_two_sites_un_related_net(self):
state = {
'networks': [self.n3.pk],
'sites': [self.s1.pk, self.s2.pk],
'vlans': [],
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['sites']))
self.assertEqual(self.s1.pk, r['sites'][0]['value'])
self.assertEqual(0, len(r['vlans']))
self.assertEqual(0, len(r['networks']))
def test_two_sites_un_related_nets_and_vlans(self):
state = {
'networks': [self.n3.pk, self.n4.pk],
'sites': [self.s1.pk, self.s2.pk],
'vlans': [self.n3.vlan.pk, self.n4.vlan.pk],
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['sites']))
self.assertEqual(self.s1.pk, r['sites'][0]['value'])
self.assertEqual(0, len(r['vlans']))
self.assertEqual(0, len(r['networks']))
def test_related_site_network_vlan(self):
n = self.s1.network_set.all()[0]
state = {
'networks': [n.pk],
'sites': [self.s1.pk],
'vlans': [n.vlan.pk]
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['networks']))
self.assertEqual(n.pk, r['networks'][0]['value'])
self.assertEqual(1, len(r['sites']))
self.assertEqual(self.s1.pk, r['sites'][0]['value'])
self.assertEqual(1, len(r['vlans']))
self.assertEqual(n.vlan.pk, r['vlans'][0]['value'])
def test_related_site_network_vlan_with_unrelated_objects(self):
state = {
'networks': [self.n1.pk, self.n4.pk],
'sites': [self.s1.pk, self.s2.pk],
'vlans': [self.n1.vlan.pk]
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['networks']))
self.assertEqual(self.n1.pk, r['networks'][0]['value'])
self.assertEqual(1, len(r['sites']))
self.assertEqual(self.s1.pk, r['sites'][0]['value'])
self.assertEqual(1, len(r['vlans']))
self.assertEqual(self.n1.vlan.pk, r['vlans'][0]['value'])
def test_choose_network_with_no_site(self):
state = {
'networks': [self.n6.pk],
'sites': [self.s1.pk, self.s2.pk],
'vlans': [self.n6.vlan.pk]
}
state['choice'] = ['network', self.n6.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['networks']))
self.assertEqual(self.n6.pk, r['networks'][0]['value'])
self.assertEqual(0, len(r['sites']))
self.assertEqual(1, len(r['vlans']))
self.assertEqual(self.n6.vlan.pk, r['vlans'][0]['value'])
def test_choose_network_with_no_site_no_vlan(self):
state = {
'networks': [self.n8.pk],
'sites': [self.s1.pk, self.s2.pk],
'vlans': [self.n6.vlan.pk]
}
state['choice'] = ['network', self.n8.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
r = json.loads(resp.content)
self.assertEqual(1, len(r['networks']))
self.assertEqual(self.n8.pk, r['networks'][0]['value'])
self.assertEqual(0, len(r['sites']))
self.assertEqual(0, len(r['vlans']))
def test_related_site_network_vlan_multiple_choices(self):
state = {
'networks': [self.n1.pk, self.n3.pk],
'sites': [self.n1.site.pk, self.n3.site.pk],
'vlans': [self.n1.vlan.pk, self.n3.vlan.pk]
}
state['choice'] = ['site', self.s1.pk]
resp = self.client.post(
'/en-US' + reverse('find-related'),
json.dumps(state),
content_type='application/json'
)
self.assertEqual(200, resp.status_code)
new_state = json.loads(resp.content)
self.assertEqual(1, len(new_state['networks']))
self.assertEqual(self.n1.pk, new_state['networks'][0]['value'])
self.assertEqual(1, len(new_state['sites']))
self.assertEqual(self.n1.site.pk, new_state['sites'][0]['value'])
self.assertEqual(1, len(new_state['vlans']))
self.assertEqual(self.n1.vlan.pk, new_state['vlans'][0]['value'])
# Make another choice
new_state['choice'] = ['vlan', self.n1.vlan.pk]
self.assertEqual(1, len(new_state['networks']))
self.assertEqual(self.n1.pk, new_state['networks'][0]['value'])
self.assertEqual(1, len(new_state['sites']))
self.assertEqual(self.n1.site.pk, new_state['sites'][0]['value'])
self.assertEqual(1, len(new_state['vlans']))
self.assertEqual(self.n1.vlan.pk, new_state['vlans'][0]['value'])
```
#### File: static/tests/A_tests.py
```python
from django.test import TestCase
from django.core.exceptions import ValidationError
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
from mozdns.domain.models import Domain
from mozdns.address_record.models import AddressRecord
from mozdns.ip.utils import ip_to_domain_name
class AStaticRegTests(TestCase):
def create_domain(self, name, ip_type=None, delegated=False):
if ip_type is None:
ip_type = '4'
if name in ('arpa', 'in-addr.arpa', 'ip6.arpa'):
pass
else:
name = ip_to_domain_name(name, ip_type=ip_type)
d = Domain(name=name, delegated=delegated)
d.clean()
self.assertTrue(d.is_reverse)
return d
def setUp(self):
Domain.objects.all().delete()
self.arpa = self.create_domain(name='arpa')
self.arpa.save()
self.i_arpa = self.create_domain(name='in-addr.arpa')
self.i_arpa.save()
self.c = Domain(name="ccc")
self.c.save()
self.f_c = Domain(name="foo.ccc")
self.f_c.save()
self.r1 = self.create_domain(name="10")
self.r1.save()
self.n = create_fake_host(hostname="foo.mozilla.com")
self.n.clean()
self.n.save()
def do_add_sreg(self, label, domain, ip_str, ip_type='4'):
r = StaticReg(
label=label, domain=domain, ip_str=ip_str,
ip_type=ip_type, system=self.n
)
r.clean()
r.save()
repr(r)
return r
def do_add_a(self, label, domain, ip_str, ip_type='4'):
a = AddressRecord(label=label, domain=domain, ip_str=ip_str,
ip_type=ip_type)
a.clean()
a.save()
return a
def do_delete(self, r):
ip_str = r.ip_str
fqdn = r.fqdn
r.delete()
self.assertFalse(
AddressRecord.objects.filter(ip_str=ip_str, fqdn=fqdn))
def test1_conflict_add_sreg_first(self):
# Add an sreg and make sure A can't exist.
label = "foo4"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
self.do_add_sreg(**kwargs)
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
self.assertRaises(ValidationError, self.do_add_a, **kwargs)
def test1_conflict_add_A_first(self):
# Add an A and make sure an sreg can't exist.
label = "foo5"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
self.do_add_a(**kwargs)
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
self.assertRaises(ValidationError, self.do_add_sreg, **kwargs)
def test2_conflict_add_sreg_first(self):
# Add an sreg and update an existing A to conflict. Test for exception.
label = "fo99"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
self.do_add_sreg(**kwargs)
ip_str = "10.0.0.3"
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
a = self.do_add_a(**kwargs)
a.ip_str = "10.0.0.2"
self.assertRaises(ValidationError, a.save)
def test2_conflict_add_A_first(self):
# Add an A and update and existing sreg to conflict. Test for
# exception.
label = "foo98"
domain = self.f_c
ip_str = "10.0.0.2"
# Add A
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
self.do_add_a(**kwargs)
# Add StaticReg with diff IP
ip_str = "10.0.0.3"
kwargs = {'label': label, 'domain': domain, 'ip_str': ip_str}
sreg = self.do_add_sreg(**kwargs)
# Conflict the IP on the sreg
sreg.ip_str = "10.0.0.2"
self.assertRaises(ValidationError, sreg.save)
```
#### File: static/tests/system_integration.py
```python
from django.test import TestCase
from core.registration.static.models import StaticReg
from core.registration.static.models import StaticRegKeyValue
from systems.tests.utils import create_fake_host
from mozdns.domain.models import Domain
from mozdns.address_record.models import AddressRecord
from mozdns.ip.utils import ip_to_domain_name
class SystemIntegrationTests(TestCase):
def create_domain(self, name, ip_type=None, delegated=False):
if ip_type is None:
ip_type = '4'
if name in ('arpa', 'in-addr.arpa', 'ip6.arpa'):
pass
else:
name = ip_to_domain_name(name, ip_type=ip_type)
d = Domain(name=name, delegated=delegated)
d.clean()
self.assertTrue(d.is_reverse)
return d
def setUp(self):
self.arpa = self.create_domain(name='arpa')
self.arpa.save()
self.i_arpa = self.create_domain(name='in-addr.arpa')
self.i_arpa.save()
self.c = Domain(name="ccc")
self.c.save()
self.f_c = Domain(name="foo.ccc")
self.f_c.save()
self.r1 = self.create_domain(name="10")
self.r1.save()
self.n = create_fake_host(hostname="foo.mozilla.com")
self.n.clean()
self.n.save()
def do_add(self, label, domain, ip_str, ip_type='4'):
r = StaticReg(
label=label, domain=domain, ip_str=ip_str, ip_type=ip_type,
system=self.n
)
r.clean()
r.save()
repr(r)
return r
def do_delete(self, r):
ip_str = r.ip_str
fqdn = r.fqdn
r.delete()
self.assertFalse(
AddressRecord.objects.filter(ip_str=ip_str, fqdn=fqdn))
def test1_create(self):
label = "foo"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain,
'ip_str': ip_str}
sreg = self.do_add(**kwargs)
sreg.update_attrs()
def bad_get():
sreg.attrs.primary
self.assertRaises(AttributeError, bad_get)
x = StaticRegKeyValue.objects.filter(key='primary', obj=sreg)
self.assertFalse(x)
sreg.attrs.primary = '1'
self.assertEqual(sreg.attrs.primary, '1')
x = StaticRegKeyValue.objects.filter(key='primary', obj=sreg)
self.assertEqual(x[0].value, '1')
def test6_create(self):
label = "foo"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain,
'ip_str': ip_str}
sreg = self.do_add(**kwargs)
sreg.update_attrs()
sreg.update_attrs()
sreg.update_attrs()
def bad_get():
sreg.attrs.primary
self.assertRaises(AttributeError, bad_get)
sreg.attrs.primary = '1'
self.assertEqual(sreg.attrs.primary, '1')
def test2_create(self):
label = "foo"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain,
'ip_str': ip_str}
sreg = self.do_add(**kwargs)
sreg.update_attrs()
sreg.attrs.primary = '2'
self.assertEqual(sreg.attrs.primary, '2')
del sreg.attrs.primary
def bad_get():
sreg.attrs.primary
self.assertRaises(AttributeError, bad_get)
sreg.attrs.primary = '3'
self.assertEqual(sreg.attrs.primary, '3')
def test1_del(self):
label = "foo"
domain = self.f_c
ip_str = "10.0.0.2"
kwargs = {'label': label, 'domain': domain,
'ip_str': ip_str}
sreg = self.do_add(**kwargs)
sreg.update_attrs()
sreg.attrs.primary = '88'
self.assertEqual(sreg.attrs.primary, '88')
del sreg.attrs.primary
def bad_get():
sreg.attrs.primary
self.assertRaises(AttributeError, bad_get)
```
#### File: search/compiler/invdsl.py
```python
from parsley import wrapGrammar
from ometa.grammar import OMeta
from ometa.runtime import OMetaBase
from core.search.compiler.invparsley import grammar
name = 'InvDSL'
B = OMeta.makeGrammar(grammar, name=name).createParserClass(
OMetaBase, globals()
)
class ICompiler(B):
def directive(self, e, d, v):
raise NotImplemented()
def regexpr(self, r):
raise NotImplemented()
def text(self, t):
raise NotImplemented()
def compile(self, initial, values):
raise NotImplemented()
def OR_op(self, a, b):
raise NotImplemented()
def AND_op(self, a, b):
raise NotImplemented()
def NOT_op(self, a):
raise NotImplemented()
class DebugCompiler(ICompiler):
def directive(self, e, d, v):
return (e, (d, v))
def regexpr(self, r):
return r
def text(self, t):
return t
def compile(self, initial, values):
ret = initial
for op, value in values:
ret = op(ret, value)
return ret
def OR_op(self):
return lambda a, b: ('OR', a, b)
def AND_op(self):
return lambda a, b: ('AND', a, b)
def NOT_op(self):
return lambda a: ('NOT', a)
def make_debug_compiler():
return wrapGrammar(DebugCompiler)
```
#### File: search/compiler/invfilter.py
```python
import operator
import re
import ipaddr
from itertools import izip
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.fields import FieldDoesNotExist
from django.db.models import Q
from django.db.models.related import RelatedObject
from mozdns.address_record.models import AddressRecord
from mozdns.cname.models import CNAME
from mozdns.domain.models import Domain
from mozdns.mx.models import MX
from mozdns.nameserver.models import Nameserver
from mozdns.ptr.models import PTR
from mozdns.srv.models import SRV
from mozdns.soa.models import SOA
from mozdns.sshfp.models import SSHFP
from mozdns.txt.models import TXT
from mozdns.view.models import View
from core.registration.static.models import StaticReg
from core.site.models import Site
from core.hwadapter.models import HWAdapter
from core.service.models import Service
from core.network.models import Network
from core.network.utils import calc_networks_str
from core.utils import IPFilter, one_to_two
from core.vlan.models import Vlan
from core.utils import start_end_filter, resolve_ip_type
from core.search.utils import objects_to_Q
from systems.models import System, SystemRack, Allocation
class BadDirective(Exception):
pass
class BadType(Exception):
pass
searchables = (
('A', AddressRecord),
('CNAME', CNAME),
('DOMAIN', Domain),
('HWADAPTER', HWAdapter),
('MX', MX),
('NS', Nameserver),
('PTR', PTR),
('SOA', SOA),
('SRV', SRV),
('SSHFP', SSHFP),
('SREG', StaticReg),
('SYS', System),
('RACK', SystemRack),
('TXT', TXT),
('NET', Network),
('SITE', Site),
('VLAN', Vlan),
('SERVICE', Service),
('ALLOCATION', Allocation),
)
dsearchables = dict(searchables)
class _Filter(object):
"""The Base class of different filters. Implement these methods"""
ntype = "FILTER"
def __str__(self):
return self.value
def __repr__(self):
return "<{0}>".format(self)
def compile_Q(self):
pass
def build_filter(filter_, fields, filter_type):
# rtucker++
final_filter = Q()
for t in fields:
final_filter = final_filter | Q(
**{"{0}__{1}".format(t, filter_type): filter_})
return final_filter
class TextFilter(_Filter):
def __init__(self, rvalue):
self.value = rvalue
def compile_Q(self):
# Value is the search term
result = []
for name, Klass in searchables:
result.append(
build_filter(self.value, Klass.search_fields, 'icontains')
)
return result
class REFilter(TextFilter):
num_match = re.compile("\{(\d+)-(\d+)\}")
def _expand_number_regex(self, value):
"""
We want to turn something like /hp-node{31-40}.phx1 into
'/hp-node(31|32|33|34|35|36|37|38|39|40).phx1'
"""
matches = self.num_match.findall(value)
for low, high in matches:
padding = min(len(low), len(high))
if int(low) >= int(high):
continue
new_value = ""
for i in xrange(int(low), int(high) + 1):
new_value += "{0}|".format(str(i).rjust(padding, '0'))
new_value = '(' + new_value.strip('|') + ')'
value = value.replace('{{{0}-{1}}}'.format(low, high), new_value)
return value
def compile_Q(self):
result = []
value = self._expand_number_regex(self.value)
for name, Klass in searchables:
result.append(build_filter(value, Klass.search_fields, 'regex'))
return result
def verify_dattribute(base_class, dattribute, dvalue):
"""
We have to make sure that this field the user is specifying makes sense to
send down to django. For example, if the user searches for
sys.operating_system__wat, we should make sure a 'wat' field exists on the
operation_system class.
We are preemptively looking for these types of errors, as opposed to
waiting for django to raise a FeildError, because this way we can more
easily send helpful compiler errors to the user.
"""
# split on relational boundaries
field_path = dattribute.split('__')
cur_class = base_class
prev_field_name = ''
should_end = False # becomes True when a non-relational field is seen
for field_name in field_path:
if should_end:
# On the last iteration we saw a non-relational field. Any more
# field lookups after are going to fail, so raise an error.
raise BadDirective(
"The '{0}' in '{1}' is invalid because '{2}' isn't a "
"relational field so has not field called '{0}'"
.format(field_name, dattribute, prev_field_name)
)
try:
field = cur_class._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
possible_fields = cur_class._meta.get_all_field_names()
raise BadDirective(
"The field '{0}' isn't a field on the {1} class. Possible "
"fields are {2}".format(
field_name, cur_class.__name__, ', '.join(possible_fields)
)
)
if hasattr(field, 'rel') and bool(field.rel):
# the field is relational
cur_class = field.rel.to
elif hasattr(field, 'model') and isinstance(field, RelatedObject):
# its a related set
cur_class = field.model
else:
# If we re-enter this loop a BadDirective error will be raised
should_end = True
prev_field_name = field_name
# Note: If it is a relational set field the user _can_ ask whether or not
# the field is "NULL"
if not should_end and dvalue.upper() != "NULL":
# The for loop above left off looking at a relational field. The user
# did not specify a field on the class, so raise an error.
possible_fields = cur_class._meta.get_all_field_names()
raise BadDirective(
"You need to specify a field to search on the {0} class. Your "
"choices are {1}. For example: {2}".format(
cur_class.__name__,
', '.join(possible_fields),
"{0}__{1}".format(dattribute, possible_fields[0])
)
)
def build_dattribute_qset(dclass, eq, dattribute, dvalue, r_opts=set()):
# r_opts = restricted operators. Operators that should not be supported.
if eq in r_opts:
raise BadDirective(
"The {0} operator is not supported when searching the {1} "
"attribute".format(eq, dattribute)
)
verify_dattribute(dclass, dattribute, dvalue)
if dvalue.startswith('/'):
regex = True
dvalue = dvalue[1:]
else:
regex = False
if eq == "~":
if regex:
raise BadDirective(
"Combining the fuzzy search (~) with a regex patter (/) makes "
"no sense!"
)
search = {"{0}{1}".format(dattribute, "__icontains"): dvalue}
elif eq == '>':
search = {"{0}__gt".format(dattribute): dvalue}
elif eq == '>=':
search = {"{0}__gte".format(dattribute): dvalue}
elif eq == '<':
search = {"{0}__lt".format(dattribute): dvalue}
elif eq == '<=':
search = {"{0}__lte".format(dattribute): dvalue}
elif regex:
search = {"{0}__regex".format(dattribute): dvalue}
else:
# If dvalue is the string "NULL" tack on "__isnull" to dattribute and
# make dvalue=True. This will allow the user to search for NULL values.
# If they want to actually search for the string null (which is
# unlikely, they can use a regex match)
if dvalue.upper() == "NULL":
search = {"{0}__isnull".format(dattribute): True}
else:
search = {dattribute: dvalue}
return Q(**search)
def build_dattribute_qsets(directive, eq, dattribute, dvalue):
dclass = dsearchables[directive] # our caller should have checked this
search_q = build_dattribute_qset(
dclass, eq, dattribute, dvalue
)
result = []
for name, Klass in searchables:
if Klass == dclass:
result.append(search_q)
else:
result.append(None)
return result
class DirectiveFilter(_Filter):
def __init__(self, eq, directive, dvalue):
self.eq = eq
self.directive, self.dattribute = directive
self.dvalue = dvalue
def strict_equality_guard(self):
if self.eq != '=' and self.eq != '=:':
raise BadDirective(
"The {0} directive only supports the strict equality operator "
"(i.e. '=').".format(self.directive)
)
def compile_Q(self):
# The way self.strict_equality_guard() is being called can be improved
# by moving all the build_* methods into DirectiveFilter class and
# using a @strict_equality_guard decorator. Right now its a bit
# redundant.
if self.directive == 'view':
self.strict_equality_guard()
return build_view_qsets(self.dvalue)
elif self.directive == 'network':
self.strict_equality_guard()
return build_network_qsets(self.dvalue)
elif self.directive == 'vlan':
self.strict_equality_guard()
return build_vlan_qsets(self.dvalue)
elif self.directive == 'zone':
self.strict_equality_guard()
return build_zone_qsets(self.dvalue)
elif self.directive == 'range':
self.strict_equality_guard()
return build_range_qsets(self.dvalue)
elif self.directive == 'type':
self.strict_equality_guard()
return build_rdtype_qsets(self.dvalue)
elif self.directive == 'site':
self.strict_equality_guard()
return build_site_qsets(self.dvalue)
elif self.directive == 'ip':
self.strict_equality_guard()
return build_ip_qsets(self.dvalue)
elif self.directive == 'service' and not self.dattribute:
return build_service_qsets(self.eq, self.dattribute, self.dvalue)
# If we haven't already hit a hardcoded directive, try searching
# searchables to see if we can introspect one of the tables using
# dattribute
elif self.dattribute and self.directive.upper() in dsearchables:
return build_dattribute_qsets(
self.directive.upper(), self.eq, self.dattribute, self.dvalue
)
else:
raise BadDirective(
"Unknown Directive '{0}'".format(self.directive)
)
def build_rdtype_qsets(rdtype):
"""This function needs to filter out all records of a certain rdtype (like
A or CNAME). Any filter produced here has to be able to be negated. We use
the fact that every object has a pk > -1. When a qset is negated the query
becomes pk <= -1.
"""
rdtype = rdtype.upper() # Let's get consistent
select = Q(pk__gt=-1)
no_select = Q(pk__lte=-1)
result = []
found_type = False
for name, Klass in searchables:
if name == rdtype:
result.append(select)
found_type = True
else:
result.append(no_select)
if not found_type:
raise BadType("Type '{0}' does not exist!".format(rdtype))
return result
def _get_deps(service, seen, depth=-1):
if not depth:
return []
providers = []
for dep in service.providers.all():
if dep.provider in seen:
continue
providers.append(dep.provider)
seen.append(dep.provider)
providers += _get_deps(dep.provider, seen)
return providers
def build_service_qsets(eq, dattribute, dvalue):
if not dattribute: # no introspection
service_q = Q(name=dvalue)
else:
# TODO: why is my linter is complaining about {x, y, ..} being invalid
# set syntax?
r_opts = set(['>', '>=', '<', '<='])
service_q = build_dattribute_qset(
Service, eq, dattribute, dvalue, r_opts
)
services = Service.objects.filter(service_q)
if not services.exists():
raise BadDirective(
"There no services found when searching for '{0}'.".format(dvalue)
)
service_qs = map(build_service_single_qset, services)
return reduce(_combine, service_qs)
def build_service_single_qset(service):
"""
Filter and return the objects associated with a service. Pull in all
objects the service depends upon.
"""
dependencies = _get_deps(service, [service])
dependencies.append(service)
service_q = objects_to_Q(dependencies)
system_q = Q(pk__lte=-1) # by default match nothing
for s in dependencies:
system_q = system_q | objects_to_Q(s.systems.all())
allocation_q = Q(pk__lte=-1) # by default match nothing
for s in dependencies:
allocation_q = allocation_q | objects_to_Q(s.allocations.all())
# Get necessary services
result = []
for name, Klass in searchables:
if name == 'SERVICE':
result.append(service_q)
elif name == 'SYS':
result.append(system_q)
elif name == 'ALLOCATION':
result.append(allocation_q)
else:
result.append(None)
return result
def build_view_qsets(view_name):
"""Filter based on DNS views."""
view_name = view_name.lower() # Let's get consistent
try:
view = View.objects.get(name=view_name)
except ObjectDoesNotExist:
raise BadDirective("'{0}' isn't a valid view.".format(view_name))
view_filter = Q(views=view) # This will slow queries down due to joins
q_sets = []
select = Q(pk__gt=-1)
for name, Klass in searchables:
if name == 'SOA':
q_sets.append(select) # SOA's are always public and private
elif hasattr(Klass, 'views'):
q_sets.append(view_filter)
else:
q_sets.append(None)
return q_sets
def build_ipf_qsets(q):
"""Filter based on IP address views.
:param q: A filter for a certain IP or IP range
:type q: Q
"""
q_sets = []
for name, Klass in searchables:
if name == 'A' or name == 'SREG' or name == 'PTR':
q_sets.append(q)
else:
q_sets.append(None)
return q_sets
def build_range_qsets(range_):
try:
start, end = range_.split(',')
except ValueError:
raise BadDirective("Specify a range using the format: start,end")
start_ip_type, _ = resolve_ip_type(start)
end_ip_type, _ = resolve_ip_type(end)
if start_ip_type != end_ip_type or not start_ip_type or not end_ip_type:
raise BadDirective("Couldn not resolve IP type of {0} and "
"{1}".format(start, end))
try:
istart, iend, ipf_q = start_end_filter(start, end, start_ip_type)
except (ValidationError, ipaddr.AddressValueError), e:
raise BadDirective(str(e))
return build_ipf_qsets(ipf_q)
def build_ip_qsets(ip_str):
"""
Possible objects returned:
* A/PTR/SREG
* Network
* Vlan
* Site
"""
ip_type, Klass = resolve_ip_type(ip_str)
NetworkCls = ipaddr.IPv4Network if ip_type == '4' else ipaddr.IPv6Network
try:
ip = NetworkCls(ip_str).network
network_str = str(NetworkCls(ip_str))
except ipaddr.AddressValueError:
raise BadDirective("{0} isn't a valid "
"IP address.".format(ip_str))
except ipaddr.NetmaskValueError, e:
raise BadDirective(
"The netmask '{0}' doesn't make any sense.".format(e)
)
try:
network = Network.objects.get(network_str=network_str)
network_q = objects_to_Q([network])
site = network.site
vlan = network.vlan
except Network.DoesNotExist:
parents, children = calc_networks_str(str(NetworkCls(ip_str)), ip_type)
network_q = objects_to_Q(parents) | objects_to_Q(children)
# Find the site. This will be the site of the smallest network that is
# in parents or if there are no parents, the largest child.
site = None
vlan = None
for parent in reversed(parents):
if parent.site:
site = parent.site
vlan = parent.vlan
break
if not site:
for child in children:
if child.site:
site = child.site
vlan = child.vlan
break
ip_upper, ip_lower = one_to_two(int(ip))
ipf_qs = build_ipf_qsets(Q(ip_upper=ip_upper, ip_lower=ip_lower))
q_sets = []
for q, (name, Klass) in izip(ipf_qs, searchables):
if name == 'NET':
q_sets.append(network_q)
elif name == 'SITE' and site:
q_sets.append(Q(pk=site.pk))
elif name == 'VLAN' and vlan:
q_sets.append(Q(pk=vlan.pk))
else:
q_sets.append(q)
return q_sets
def build_network_qsets(network_str):
ip_type, Klass = resolve_ip_type(network_str)
try:
network = Klass(network_str)
ipf = IPFilter(network.network, network.broadcast, ip_type)
except (ipaddr.AddressValueError, ipaddr.NetmaskValueError):
raise BadDirective(
"{0} isn't a valid network.".format(network_str)
)
return build_ipf_qsets(ipf.Q)
def build_site_single_qset(site):
site_q = build_ipf_qsets(site.compile_Q())
q_sets = []
for q, (name, Klass) in izip(site_q, searchables):
if name in ('RACK', 'NET'):
q_sets.append(Q(site=site))
elif name == 'SITE':
q_sets.append(Q(pk=site.pk) | Q(parent=site))
else:
q_sets.append(q)
return q_sets
def _combine(q1, q2):
"""
Given two lists of Q objects, return one list of ORed Q objects
"""
q_sets = []
for x, y in izip(q1, q2):
# q1 and q2 should have symmetry with regards to where None values
# exist
if not (x and y):
q_sets.append(None)
else:
q_sets.append(x | y)
return q_sets
def build_site_qsets(site_name):
# Look for a more specific results first
sites = Site.objects.filter(full_name=site_name)
if not sites:
sites = Site.objects.filter(name=site_name)
if not sites:
raise BadDirective(
"{0} isn't a valid site.".format(site_name)
)
site_qs = map(build_site_single_qset, sites)
return reduce(_combine, site_qs)
def resolve_vlans(vlan_str):
"""
case 0: vlan_str is <name>,<number>
case 1: vlan_str is <number>,<name>
case 2: vlan_str is <number>
case 3: vlan_str is <name>
"""
try:
# Case 0
vlan_name, vlan_number = vlan_str.split(',')
if vlan_name.isdigit():
vlan_name, vlan_number = vlan_number, vlan_name
vlans = Vlan.objects.filter(number=vlan_number, name=vlan_name)
except ValueError:
# Case 1 and 2
if vlan_str.isdigit():
vlans = Vlan.objects.filter(number=vlan_str)
else:
vlans = Vlan.objects.filter(name=vlan_str)
if not vlans.exists():
raise BadDirective(
"{0} doesn't resolve to a vlan Inventory knows "
"about.".format(vlan_str)
)
return vlans
def make_vlan_q_set(vlan):
ip_qs = build_ipf_qsets(vlan.compile_Q())
q_sets = []
for q, (name, Klass) in izip(ip_qs, searchables):
if name in ('NET'):
q_sets.append(vlan.compile_network_Q())
elif name == 'VLAN':
q_sets.append(Q(pk=vlan.pk))
else:
q_sets.append(q)
return q_sets
def build_vlan_qsets(vlan_str):
"""
To use this directive you should use the 'vlan=:' directive. Vlan's have a
number and a name and some vlan's have the same number/name. If you specify
a vlan name that maps back to two different vlans, both vlans and their
corresponding objects will be displayed. To specify a vlan number -and-
name, comma seperate (no spaces in between) the number and name. Some
examples::
vlan=:foo,23
vlan=:23,foo
vlan=:foo
vlan=:23
"""
vlans = resolve_vlans(vlan_str)
vlan_qs = map(make_vlan_q_set, vlans)
def OR(l1, l2):
q_sets = []
for i, j in izip(l1, l2):
if i is None and j is None:
q_sets.append(None)
else:
q_sets.append(i | j)
return q_sets
return reduce(OR, vlan_qs)
def build_zone_qsets(zone):
"""The point of this filter is to first find the root of a dns zone
specified by zone and then build a query to return all records in this
zone.
"""
try:
root_domain = Domain.objects.get(name=zone)
# This might not actually be the root of a zone, but functionally we
# don't really care.
except ObjectDoesNotExist:
raise BadDirective("'{0}' part of a valid zone.".format(zone))
if not root_domain.soa:
raise BadDirective("'{0}' part of a valid zone.".format(zone))
def _get_zone_domains(domain):
domains = [domain]
for sub_domain in domain.domain_set.filter(soa=domain.soa):
domains += _get_zone_domains(sub_domain)
return domains
zone_domains = _get_zone_domains(root_domain)
domains = [Q(domain=domain) for domain in zone_domains]
reverse_domains = [Q(reverse_domain=domain) for domain in zone_domains]
zone_query = reduce(operator.or_, domains, Q())
reverse_zone_query = reduce(operator.or_, reverse_domains, Q())
result = []
for name, Klass in searchables:
if hasattr(Klass, 'domain'):
result.append(zone_query)
elif hasattr(Klass, 'reverse_domain'):
result.append(reverse_zone_query)
elif name == 'SOA':
result.append(Q(pk=root_domain.soa.pk))
else:
result.append(None)
return result
```
#### File: search/compiler/invschema.py
```python
from django.db.models.related import RelatedObject
from core.search.compiler.invfilter import dsearchables, searchables
def discover_for_class(dclass, depth=3):
"""
Loop over all fields on a class and add them to the field_names list. If a
relational field is found, recursively call discover_for_class to find the
fields on the related class. Keep track of depth so we don't get into
a recursive loop or end up with overly verbose output.
"""
field_names = []
if not depth:
return field_names
opts = dclass._meta
# Appending a related model's fields to field_names after the
# non-relational fields will make the output prettier and more organized
relational_fields = []
for field in opts.fields:
if hasattr(field, 'rel') and bool(field.rel):
# the field is relational
relational_fields.append((field, field.rel.to))
elif hasattr(field, 'model') and isinstance(field, RelatedObject):
# its a related set
relational_fields.append((field, field.model))
else:
field_names.append(field.name)
# recursively sort out the related fields we saw
for rfield, klass in relational_fields:
field_names += map(
lambda ifield: "{0}__{1}".format(rfield.name, ifield),
discover_for_class(klass, depth=depth - 1)
)
return field_names
def prepend_dtype(search_fields, dtype):
return map(lambda field: "{0}.{1}".format(dtype, field), search_fields)
def discover_help():
system_search_fields = discover_for_class(dsearchables['SYS'])
# get rid of system_rack__location because we don't use it anymore.
system_search_fields = filter(
lambda field: "system_rack__location" not in field,
system_search_fields
)
service_search_fields = discover_for_class(dsearchables['SERVICE'])
return {
'SYS': prepend_dtype(system_search_fields, 'sys'),
'SERVICE': prepend_dtype(service_search_fields, 'service')
}
def discover_all():
base_schema = discover_help()
for class_name, Klass in searchables:
if class_name in base_schema:
continue
search_fields = discover_for_class(Klass)
base_schema[class_name] = prepend_dtype(
search_fields, class_name.lower()
)
return base_schema
# Cache the schema so we don't have to recalculate
# HELP_SEARCH_SCHEMA is printed to html
HELP_SEARCH_SCHEMA = discover_help()
# SEACH_SCHEMA is never dumped completely. A use can ask specifically for a
# class' schema and have it be looked up here.
SEARCH_SCHEMA = discover_all()
```
#### File: search/tests/test_system_search.py
```python
from django.test import TestCase
from systems.tests.utils import create_fake_host
from systems.models import System
from core.search.compiler.django_compile import compile_to_django
from core.search.compiler.invschema import discover_all
class SystemTests(TestCase):
def setUp(self):
System.objects.all().delete()
self.hostname = "searching.mozilla.com"
self.notes = "foo bar baz"
s = create_fake_host(hostname=self.hostname)
s.notes = self.notes
s.save()
self.status = s.system_status.status
def cleanUp(self):
System.objects.all().delete()
def test_system_field_search_status(self):
res, error = compile_to_django(
"sys.system_status__status={0}".format(self.status)
)
self.assertFalse(error)
self.assertEqual(1, res['SYS'].count())
self.assertEqual(self.hostname, res['SYS'][0].hostname)
def test_system_field_search_hostname(self):
res, error = compile_to_django(
"sys.hostname={0}".format(self.hostname)
)
self.assertFalse(error)
self.assertEqual(1, res['SYS'].count())
self.assertEqual(self.hostname, res['SYS'][0].hostname)
def test_system_field_search_notes(self):
res, error = compile_to_django(
'sys.notes="{0}"'.format(self.notes)
)
self.assertFalse(error)
self.assertEqual(1, res['SYS'].count())
self.assertEqual(self.hostname, res['SYS'][0].hostname)
def test_system_field_search_notes_re(self):
res, error = compile_to_django(
'sys.notes=/^foo'.format()
)
self.assertFalse(error)
self.assertEqual(1, res['SYS'].count())
self.assertEqual(self.hostname, res['SYS'][0].hostname)
def test_system_field_search_notes_fuzzy(self):
res, error = compile_to_django(
'sys.notes~bar'.format()
)
self.assertFalse(error)
self.assertEqual(1, res['SYS'].count())
self.assertEqual(self.hostname, res['SYS'][0].hostname)
def test_system_field_search_null_system_rack(self):
res, error = compile_to_django(
'sys.system_rack=Null'
)
self.assertFalse(error)
self.assertEqual(1, res['SYS'].count())
self.assertEqual(self.hostname, res['SYS'][0].hostname)
def test_system_search_schema(self):
self.assertTrue('sys.system_rack__site__name' in discover_all()['SYS'])
def test_system_compile_inequality_search(self):
res, error = compile_to_django(
#'sys.warranty_end<2014-01-01 AND sys.warranty_start>=2014-01-01'
'sys.warranty_end<2014-01-01 AND sys.warranty_start>2014-01-01'
)
self.assertFalse(error)
```
#### File: core/service/constants.py
```python
def make_choices(cs):
dcs = dict(
(l.lower(), l) for l in cs
)
dcs[''] = 'Unknown' # add the default
return dcs
USAGE_FREQUENCY = make_choices((
'Constantly',
'Daily',
'Periodicly',
'Occasionally',
'Rarely',
'Never',
'Unknown'
))
IMPACT = make_choices((
'High',
'Low',
'Medium',
))
```
#### File: service/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Dependency'
db.create_table('service_dependency', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dependant', self.gf('django.db.models.fields.related.ForeignKey')(related_name='providers', to=orm['service.Service'])),
('provider', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dependants', to=orm['service.Service'])),
))
db.send_create_signal('service', ['Dependency'])
# Adding model 'Service'
db.create_table('service', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parent_service', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='subservices', null=True, to=orm['service.Service'])),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['site.Site'], null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('alias', self.gf('django.db.models.fields.CharField')(max_length=511, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=511, blank=True)),
('category', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('business_owner', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('tech_owner', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('used_by', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('usage_frequency', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('impact', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('service', ['Service'])
# Adding M2M table for field allocations on 'Service'
m2m_table_name = db.shorten_name('service_allocations')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('service', models.ForeignKey(orm['service.service'], null=False)),
('allocation', models.ForeignKey(orm['systems.allocation'], null=False))
))
db.create_unique(m2m_table_name, ['service_id', 'allocation_id'])
# Adding M2M table for field systems on 'Service'
m2m_table_name = db.shorten_name('service_systems')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('service', models.ForeignKey(orm['service.service'], null=False)),
('system', models.ForeignKey(orm['systems.system'], null=False))
))
db.create_unique(m2m_table_name, ['service_id', 'system_id'])
def backwards(self, orm):
# Deleting model 'Dependency'
db.delete_table('service_dependency')
# Deleting model 'Service'
db.delete_table('service')
# Removing M2M table for field allocations on 'Service'
db.delete_table(db.shorten_name('service_allocations'))
# Removing M2M table for field systems on 'Service'
db.delete_table(db.shorten_name('service_systems'))
models = {
'service.dependency': {
'Meta': {'object_name': 'Dependency'},
'dependant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'providers'", 'to': "orm['service.Service']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependants'", 'to': "orm['service.Service']"})
},
'service.service': {
'Meta': {'object_name': 'Service', 'db_table': "'service'"},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '511', 'blank': 'True'}),
'allocations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['systems.Allocation']", 'symmetrical': 'False', 'blank': 'True'}),
'business_owner': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'depends_on': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['service.Service']", 'symmetrical': 'False', 'through': "orm['service.Dependency']", 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '511', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impact': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'parent_service': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subservices'", 'null': 'True', 'to': "orm['service.Service']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['site.Site']", 'null': 'True', 'blank': 'True'}),
'systems': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['systems.System']", 'symmetrical': 'False', 'blank': 'True'}),
'tech_owner': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'usage_frequency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'used_by': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'site.site': {
'Meta': {'unique_together': "(('full_name',),)", 'object_name': 'Site', 'db_table': "'site'"},
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['site.Site']", 'null': 'True', 'blank': 'True'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'system_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemType']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'warranty_end': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'warranty_start': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['site.Site']", 'null': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['service']
```
#### File: core/site/forms.py
```python
from django import forms
from django.core.exceptions import ValidationError
from core.site.models import Site
class SiteForm(forms.ModelForm):
full_name = forms.CharField()
class Meta:
model = Site
include = ('full_name',)
exclude = ('parent', 'name')
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError, e:
if 'full_name' in e.message_dict:
e.message_dict['__all__'] = e.message_dict['full_name']
self._update_errors(e.message_dict)
```
#### File: core/site/views.py
```python
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from core.site.models import Site
from core.site.forms import SiteForm
from core.site.utils import get_vlans
from core.views import CoreDeleteView, CoreListView
from core.views import CoreCreateView, CoreUpdateView
class SiteView(object):
model = Site
queryset = Site.objects.all().order_by('name')
form_class = SiteForm
class SiteDeleteView(SiteView, CoreDeleteView):
success_url = '/core/site/'
def delete_site(request, site_pk):
get_object_or_404(Site, pk=site_pk)
if request.method == 'POST':
return render(request, 'site/site_confirm_delete.html')
else:
return render(request, 'site/site_confirm_delete.html')
class SiteListView(SiteView, CoreListView):
template_name = 'site/site_list.html'
class SiteCreateView(SiteView, CoreCreateView):
template_name = 'core/core_form.html'
class SiteUpdateView(SiteView, CoreUpdateView):
template_name = 'site/site_edit.html'
def site_detail(request, site_pk):
from systems.models import SystemStatus
# TODO, make this a top level import when SystemStatus is in it's own app
site = get_object_or_404(Site, pk=site_pk)
return render(request, 'site/site_detail.html', {
'site': site,
'vlans': get_vlans(site),
'child_sites': site.site_set.all(),
'attrs': site.keyvalue_set.all(),
'statuses': SystemStatus.objects.all()
})
```
#### File: inventory/core/utils.py
```python
from django.db.models import Q
from django.core.exceptions import ValidationError
from settings.local import people_who_need_to_know_about_failures
from settings.local import inventorys_email
from email.mime.text import MIMEText
import ipaddr
import smtplib
import re
import urllib
# http://dev.mysql.com/doc/refman/5.0/en/miscellaneous-functions.html
# Prevent this case http://people.mozilla.com/~juber/public/t1_t2_scenario.txt
# TODO, put this in a try accept and always unlock things
def locked_function(lock_name, timeout=10):
def decorator(f):
def new_function(*args, **kwargs):
from django.db import connection
cursor = connection.cursor()
cursor.execute(
"SELECT GET_LOCK('{lock_name}', {timeout});".format(
lock_name=lock_name, timeout=timeout
)
)
ret = f(*args, **kwargs)
cursor.execute(
"SELECT RELEASE_LOCK('{lock_name}');".format(
lock_name=lock_name
)
)
return ret
return new_function
return decorator
def fail_mail(content, subject='Inventory is having issues.',
to=people_who_need_to_know_about_failures,
from_=inventorys_email):
"""Send email about a failure."""
if not to:
return
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = inventorys_email
# msg['To'] = to
s = smtplib.SMTP('localhost')
s.sendmail(from_, to, msg.as_string())
s.quit()
class IPFilterSet(object):
"""The IPFilterSet expects that all IPFilters added to it are of the same
type. This might be useful later.
"""
def __init__(self):
self.ipfs = []
def add(self, ipf):
self.ipfs.append(ipf)
def pprint(self):
for ipf in self.ipfs:
print ipf
def pprint_intersect(self):
for intersect in self.calc_intersect():
print intersect
def calc_intersect(self):
"""
This is where the magic comes from. Given a list of IPFilter objects,
figure the ranges that are common to all the IPFilters, and create a
new list of IPFilter objects that represent this range.
"""
def trim(self, r, rs, ip_type):
if not (rs and r):
return r
r1 = rs[0]
rx = self.intersect(r, r1, ip_type)
return self.trim(rx, rs[1:], ip_type)
def intersect(self, r1, r2, ip_type):
"""
Cases:
* Subset or equal
* Left intersect
* Right intersect
* No intersect
"""
if r1.start > r2.end:
return None
# We have intersection somewhere.
if r1.end == r2.end and r1.start == r1.end:
# r1 is subset of r2
# Low High
# r1 |---------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end < r2.end:
# r1 is subset of r2
# Low High
# r1 |-------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end > r2.start:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r1.start_upper, r1.start_lower,
r2.end_upper, r2.end_lower)
if r1.start < r2.start and r1.end < r2.end:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r2.start_upper, r2.start_lower,
r1.end_upper, r1.end_lower)
class IPFilter(object):
def __init__(self, start, end, ip_type, object_=None):
self.object_ = object_ # The composite object (it can be None)
self.ip_type = ip_type
self.start, self.end, self.Q = start_end_filter(start, end, ip_type)
def __str__(self):
return "{0} -- {1}".format(self.start, self.end)
def __repr__(self):
return str(self)
def start_end_filter(start, end, ip_type):
ip_type = ip_type
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
istart = IPKlass(start)
iend = IPKlass(end)
if int(istart) > int(iend):
raise ValidationError("start cannot be greater than end")
start_upper, start_lower = one_to_two(int(istart))
end_upper, end_lower = one_to_two(int(iend))
# Equal uppers. Lower must be within.
if start_upper == end_upper:
q = Q(ip_upper=start_upper,
ip_lower__gte=start_lower,
ip_lower__lte=end_lower,
ip_type=ip_type)
else:
q = Q(ip_upper__gt=start_upper, ip_upper__lt=end_upper,
ip_type=ip_type)
return istart, iend, q
def overlap(r1, r2, ip_type=None, cast_to_int=False):
if cast_to_int:
if ip_type == '4':
IP = ipaddr.IPv4Address
elif ip_type == '6':
IP = ipaddr.IPv6Address
else:
raise Exception('Not using overlap right. Missing ip_type')
to_int = lambda r: (int(IP(r[0])), int(IP(r[1])))
return _overlap(to_int(r1), to_int(r2))
else:
return _overlap(r1, r2)
def _overlap(r1, r2):
# Make r1 always larger than r2
size = lambda r: abs(r[0] - r[1])
if size(r1) > size(r2):
(r1_start, r1_end), (r2_start, r2_end) = r1, r2
else:
# They could be the same size
(r1_start, r1_end), (r2_start, r2_end) = r2, r1
if r1_start > r2_end or r1_end < r2_start: # no overlap
return None
if r1_start <= r2_start and r1_end >= r2_end:
# r2 is subset of r1 or equal
# Low High
# r1 |---------|
# r2 |-------|
# rx |---------|
# OR
# Low High
# r1 |---------|
# r2 |---------|
# rx |---------|
return r2
if r1_start >= r2_start and r1_end >= r2_end:
# Low High
# r1 |-----------|
# r2 |---------|
# rx |------|
return r1_start, r2_end
if r1_start <= r2_start and r1_end <= r2_end:
# Low High
# r1 |-----------|
# r2 |---------|
# rx |------|
return r2_start, r1_end
def networks_to_Q(networks):
"""Take a list of network objects and compile a Q that matches any object
that exists in one of those networks."""
q = Q(pk__lt=-1)
for network in networks:
network.update_ipf()
q = q | network.ipf.Q
return q
def two_to_four(start, end):
start_upper = start >> 64
start_lower = start & (1 << 64) - 1
end_upper = end >> 64
end_lower = end & (1 << 64) - 1
return start_upper, start_lower, end_upper, end_lower
def one_to_two(ip):
return (ip >> 64, ip & (1 << 64) - 1)
def two_to_one(upper, lower):
return long(upper << 64) + long(lower)
def four_to_two(start_upper, start_lower, end_upper, end_lower):
start = (start_upper << 64) + start_lower
end = (end_upper << 64) + end_lower
return start, end
def int_to_ip(ip, ip_type):
"""A wrapper that converts a 32 or 128 bit integer into human readable IP
format."""
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
return str(IPKlass(ip))
def ip_to_int(ip, ip_type):
"""A wrapper that converts a string to 32 or 128 bit integer"""
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
return int(IPKlass(ip))
def resolve_ip_type(ip_str):
if ip_str.find(':') > -1:
Klass = ipaddr.IPv6Network
ip_type = '6'
elif ip_str.find('.') > -1:
Klass = ipaddr.IPv4Network
ip_type = '4'
else:
Klass = None
ip_type = None
return ip_type, Klass
def to_a(text, obj, use_absolute_url=True):
if use_absolute_url:
return "<a href='{0}'>{1}</a>".format(obj.get_absolute_url(), text)
else:
return "<a href='{0}'>{1}</a>".format(obj, text)
def create_key_index(kvs):
index = {}
for kv in kvs:
index[kv['key']] = kv
return index
def mozillian(name):
return "https://mozillians.org/en-US/search/?q={0}".format(
urllib.quote_plus(name)
)
def mozillian_a(name):
return "<a href='{0}'>{1}</a>".format(mozillian(re.escape(name)), name)
```
#### File: core/vlan/models.py
```python
from django.db import models
from django.db.models import Q
from core.mixins import ObjectUrlMixin, CoreDisplayMixin
from mozdns.domain.models import Domain
from core.utils import networks_to_Q
from core.keyvalue.models import KeyValue
class Vlan(models.Model, ObjectUrlMixin, CoreDisplayMixin):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
number = models.PositiveIntegerField()
search_fields = ('name', 'number')
template = (
"{name:$lhs_just} {rdtype:$rdtype_just} {number:$rhs_just}"
)
class Meta:
db_table = "vlan"
unique_together = ("name", "number")
def __str__(self):
return "{0} {1}".format(self.number, self.name)
def __repr__(self):
return "<Vlan {0}>".format(str(self))
@classmethod
def get_api_fields(cls):
return ['name', 'number']
@property
def rdtype(self):
return 'VLAN'
def details(self):
return (
("Name", self.name),
("Number", self.number),
)
def compile_Q(self):
"""Compile a Django Q that will match any IP inside this vlan."""
return networks_to_Q(self.network_set.all())
def compile_network_Q(self):
"""
Compile a Django Q that will match all networks that relate to this
vlan.
"""
def combine(q, n):
return q | Q(pk=n.pk)
return reduce(combine, self.network_set.all(), Q(pk__lt=-1))
def find_domain(self):
"""
This memeber function will look at all the Domain objects and attempt
to find an approriate domain that corresponds to this VLAN.
"""
for network in self.network_set.all():
if network.site:
expected_name = "{0}.{1}.mozilla.com".format(
self.name, network.site.get_site_path())
try:
domain = Domain.objects.get(name=expected_name)
except Domain.DoesNotExist:
continue
return domain.name
return None
class VlanKeyValue(KeyValue):
obj = models.ForeignKey(Vlan, related_name='keyvalue_set', null=False)
class Meta:
db_table = "vlan_key_value"
unique_together = ("key", "value")
def _aa_description(self):
return
```
#### File: inventory/decommission/views.py
```python
from django.http import HttpResponse
from django.core.exceptions import ValidationError
from django.db import transaction
from decommission.decommission_utils import BadData, decommission_host
import MySQLdb
import simplejson as json
import reversion
def decommission_(main_blob, load_json=True):
try:
if load_json:
json_blob = json.loads(main_blob)
else:
json_blob = main_blob
except ValueError, e: # Can't find JSONDecodeError
return None, {'errors': str(e)}
try:
systems = json_blob['systems']
except (KeyError, TypeError):
return None, {'errors': 'Main JSON needs to have a key "systems".'}
options = json_blob.get('options', {})
opts = {
'decommission_system_status': 'decommissioned',
'decommission_sreg': True,
'convert_to_sreg': True,
'remove_dns': True
}
opts.update(options)
commit = json_blob.get('commit', False)
comment = json_blob.get('comment', '')
if not isinstance(systems, list):
return None, {'errors': 'Was expecting {"systems": [...]}'}
@transaction.commit_manually
def do_decommission():
messages = []
if reversion.revision_context_manager.is_active():
reversion.set_comment(comment)
try:
for i, hostname in enumerate(systems):
messages += decommission_host(hostname, opts, comment)
except BadData, e:
transaction.rollback()
return None, {
'errors': 'Found an issue while processing system with '
'hostname {0}. {1}'.format(hostname, e.msg)
}
except ValidationError, e:
transaction.rollback()
field_errors = ''
if hasattr(e, 'message_dict'):
for field, errors in e.message_dict.iteritems():
field_errors += "{0}: {1} ".format(field, ' '.join(errors))
else:
field_errors = ', '.join(e.messages)
transaction.rollback()
return None, {
'errors': 'Found an issue while processing system with '
'hostname {0}. {1}'.format(hostname, field_errors)
}
except MySQLdb.Warning, e:
transaction.rollback()
return None, {
'errors': 'Found an issue while processing system with '
'hostname {0}. {1}'.format(hostname, e.message)
}
except Exception, e:
transaction.rollback()
return None, {
'errors': 'Please tell someone about this error: {0}'.format(e), # noqa
}
else:
if commit:
transaction.commit()
else:
transaction.rollback()
json_blob['messages'] = messages
return json_blob, None
return do_decommission()
def decommission(request):
raw_data = request.raw_post_data
if not raw_data:
return HttpResponse(json.dumps({'errors': 'what do you want?'}))
systems, errors = decommission_(raw_data)
return HttpResponse(
json.dumps(systems or errors), status=400 if errors else 200
)
```
#### File: inventory/decorators/printqueries.py
```python
import os, time
COLORS = {'blue':34, 'cyan':36, 'green':32, 'grey':30, 'magenta':35, 'red':31, 'white':37, 'yellow':33}
RESET = '\033[0m'
def print_queries(filter=None):
""" Print all queries executed in this funnction. """
def wrapper1(func):
def wrapper2(*args, **kwargs):
from django.db import connection
sqltime, longest, numshown = 0.0, 0.0, 0
initqueries = len(connection.queries)
starttime = time.time()
result = func(*args, **kwargs)
for query in connection.queries[initqueries:]:
sqltime += float(query['time'].strip('[]s'))
longest = max(longest, float(query['time'].strip('[]s')))
if not filter or filter in query['sql']:
numshown += 1
querystr = colored('\n[%ss] ' % query['time'], 'yellow')
querystr += colored(query['sql'], 'blue')
print querystr
numqueries = len(connection.queries) - initqueries
numhidden = numqueries - numshown
runtime = round(time.time() - starttime, 3)
proctime = round(runtime - sqltime, 3)
print colored("------", 'blue')
print colored('Total Time: %ss' % runtime, 'yellow')
print colored('Proc Time: %ss' % proctime, 'yellow')
print colored('Query Time: %ss (longest: %ss)' % (sqltime, longest), 'yellow')
print colored('Num Queries: %s (%s hidden)\n' % (numqueries, numhidden), 'yellow')
return result
return wrapper2
return wrapper1
def colored(text, color=None):
""" Colorize text {red, green, yellow, blue, magenta, cyan, white}. """
if os.getenv('ANSI_COLORS_DISABLED') is None and 1 == 2:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
text += RESET
return text
```
#### File: inventory/dhcp/compare_tests.py
```python
from django.test import TestCase
from django.test.client import Client
import json
from dhcp.DHCPHash import DHCPHash, compare_lists, DHCPHashCompare
class DHCPMigrateTest(TestCase):
fixtures = ['testdata.json']
def setUp(self):
self.new_file = """
host foofake1.db.phx1.mozilla.com {
hardware ethernet AA:BB:46:83:BA:F0;
fixed-address 10.99.99.11;
filename "foo.bar.tar.gz";
option host-name "foofake1.db.phx1.mozilla.com";
option domain-name-servers "10.0.0.1,10.0.0.2";
option domain-name "mozilla.com";
}
host foofake1.db.phx1.mozilla.com {
hardware ethernet AA:BB:46:83:BA:F4;
fixed-address 10.99.99.11;
}
host foofake2.db.phx1.mozilla.com {
hardware ethernet AA:BB:05:72:18:38;
fixed-address 10.99.99.12;
}"""
self.client = Client()
def test1_hash_string_accepted(self):
d = DHCPHash(self.new_file)
self.assertEqual(d.list_string, self.new_file)
"""def test2_remove_formatting(self):
d = DHCPHash(self.new_file)
unformatted_string = "host foofake1.db.phx1.mozilla.com-asdfasdfasdfdsfa"
unformatted_string += ' {hardware ethernet AA:BB:46:83:BA:F0;fixed-address 10.99.99.11;}\n'
unformatted_string += 'host foofake1.db.phx1.mozilla.com-asdfadsf {hardware ethernet AA:BB:46:83:BA:F4;fixed-address 10.99.99.11;}\n'
unformatted_string += 'host foofake2.db.phx1.mozilla.com-asdfasdf {hardware ethernet AA:BB:05:72:18:38;fixed-address 10.99.99.12;}\n'
unformatted = d.remove_formatting(d.list_string)"""
def test3_test_split(self):
d = DHCPHash(self.new_file)
unformatted = d.remove_formatting(d.list_string)
the_list = d.split_lines(unformatted)
self.assertEqual(len(the_list), 3)
def test4_create_hash(self):
d = DHCPHash(self.new_file)
unformatted = d.remove_formatting(d.list_string)
the_list = d.split_lines(unformatted)
hashed_list = d.hash_list(the_list)
self.assertEqual(hashed_list[0]['host'], 'foofake1.db.phx1.mozilla.com')
self.assertEqual(hashed_list[0]['hardware ethernet'], 'AA:BB:46:83:BA:F0')
self.assertEqual(hashed_list[0]['fixed-address'], '10.99.99.11')
self.assertEqual(hashed_list[0]['option domain-name'], 'mozilla.com')
self.assertEqual(hashed_list[0]['option host-name'], 'foofake1.db.phx1.mozilla.com')
self.assertEqual(hashed_list[0]['option domain-name-servers'], '10.0.0.1,10.0.0.2')
def test5_host_missing_from_one_list(self):
d = DHCPHash(self.new_file)
unformatted = d.remove_formatting(d.list_string)
the_list = d.split_lines(unformatted)
hashed_list = d.hash_list(the_list)
second_hash = list(hashed_list)
self.assertEqual(hashed_list, second_hash)
second_hash.pop()
self.assertNotEqual(hashed_list, second_hash)
resp = compare_lists(hashed_list, second_hash)
self.assertNotEqual(resp, None)
def test6_host_different_from_one_list(self):
a = DHCPHash(self.new_file)
b = DHCPHash(self.new_file)
a_hashed_list = a.get_hash()
b_hashed_list = b.get_hash()
self.assertEqual(a_hashed_list, b_hashed_list)
self.assertEqual(compare_lists(a_hashed_list, b_hashed_list), None)
self.assertNotEqual(id(a_hashed_list), id(b_hashed_list))
a_hashed_list[0]['host'] = 'im.fake.yep'
self.assertNotEqual(compare_lists(a_hashed_list, b_hashed_list), None)
def test7_hardware_ethernet_different_from_one_list(self):
a = DHCPHash(self.new_file)
b = DHCPHash(self.new_file)
a_hashed_list = a.get_hash()
b_hashed_list = b.get_hash()
self.assertEqual(a_hashed_list, b_hashed_list)
self.assertEqual(compare_lists(a_hashed_list, b_hashed_list), None)
self.assertNotEqual(id(a_hashed_list), id(b_hashed_list))
a_hashed_list[0]['hardware ethernet'] = '00:00:00:AA:BB:AB'
self.assertNotEqual(compare_lists(a_hashed_list, b_hashed_list), None)
def test8_extra_option_in_one_list_different_from_one_list(self):
a = DHCPHash(self.new_file)
b = DHCPHash(self.new_file)
a_hashed_list = a.get_hash()
b_hashed_list = b.get_hash()
self.assertEqual(a_hashed_list, b_hashed_list)
self.assertEqual(compare_lists(a_hashed_list, b_hashed_list), None)
self.assertNotEqual(id(a_hashed_list), id(b_hashed_list))
a_hashed_list[1]['filename'] = 'asdfasfdasdf.tar.gz'
self.assertNotEqual(compare_lists(a_hashed_list, b_hashed_list), None)
def test9_initial_dhcp_hash_compare(self):
a = DHCPHash(self.new_file)
b = DHCPHash(self.new_file)
a_hashed_list = a.get_hash()
b_hashed_list = b.get_hash()
dc = DHCPHashCompare(a_hashed_list, 'KeyValue List', b_hashed_list, 'StaticINTR Generated')
identical, lists = dc.compare_lists(a_hashed_list, b_hashed_list)
self.assertTrue(identical)
self.assertEqual(lists[0], lists[1])
def test10_initial_dhcp_hash_compare_missing_host(self):
a = DHCPHash(self.new_file)
b = DHCPHash(self.new_file)
a_hashed_list = a.get_hash()
b_hashed_list = b.get_hash()
del a_hashed_list[1]
del b_hashed_list[1]
del a_hashed_list[1]
## Pick the first object from the hash and give it a new and different value
a_hashed_list[0]['hardware ethernet'] = '00:00:00:00:00:00'
a_hashed_list[0]['fixed-address'] = '10.0.0.1'
dc = DHCPHashCompare(a_hashed_list, 'KeyValue List', b_hashed_list, 'StaticINTR Generated')
identical, lists = dc.compare_lists(a_hashed_list, b_hashed_list)
self.assertFalse(identical)
msg = dc.analyze()
print msg
```
#### File: inventory/dhcp/views.py
```python
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
try:
import json
except:
from django.utils import simplejson as json
import _mysql_exceptions
import models
import forms
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import NetworkAdapter
#import adapters.dhcp
from middleware.restrict_to_remote import allow_anyone
from DHCP import DHCP
from django.test.client import RequestFactory
from django.template.defaulttags import URLNode
from django.conf import settings
from jinja2.filters import contextfilter
from django.utils import translation
from libs.jinja import jinja_render_to_response
from api_v2.keyvalue_handler import KeyValueHandler
from core.registration.static.models import StaticReg
from core.dhcp.render import render_sregs
factory = RequestFactory()
def showall(request):
dhcp_scopes = models.DHCP.objects.all()
h = KeyValueHandler()
request = factory.get('/en-US/api/keyvalue/?key=is_dhcp_scope', follow=True)
obj = h.read(request)
dhcp_scopes = []
for key in obj.iterkeys():
dhcp_scopes.append(key.split(":")[1])
return jinja_render_to_response('dhcp/index.html', {
'dhcp_scopes': dhcp_scopes,
})
def new(request):
error_message = ''
if request.method == 'POST':
form = forms.AddDHCPScopeForm(request.POST)
if form.is_valid():
truth_exists = False
try:
tmp = Truth.objects.get(name=form.cleaned_data['scope_name'])
truth_exists = True
except:
pass
if truth_exists is False:
t = Truth(name=form.cleaned_data['scope_name'], description=form.cleaned_data['scope_description'])
t.save()
k = TruthKeyValue(truth=t,key='is_dhcp_scope',value='True')
k.save()
return redirect('/dhcp/edit/%s' % t.name)
else:
error_message = 'DHCP Scope Exists'
else:
form = forms.AddDHCPScopeForm()
return jinja_render_to_response('dhcp/new.html', {
"form": form ,
"error_message":error_message
})
def override_file(request, dhcp_scope):
if request.method == 'POST':
form = forms.DHCPScopeOverrideForm(request.POST)
if form.is_valid():
do = models.DHCPOverride.objects.get(dhcp_scope=dhcp_scope)
do.override_text = form.cleaned_data['override_text']
do.save()
#s = form.save()
#return redirect('/dhcp/show/')
else:
try:
do = models.DHCPOverride.objects.get(dhcp_scope=dhcp_scope)
except:
do = models.DHCPOverride(dhcp_scope=dhcp_scope)
do.save()
form = forms.DHCPScopeOverrideForm(initial={'dhcp_scope':dhcp_scope, 'override_text':do.override_text})
return jinja_render_to_response('dhcp/override.html', {
"form": form,
'dhcp_scope':dhcp_scope
},
RequestContext(request))
def showfile(request, dhcp_scope):
#scope = get_object_or_404(models.DHCP, pk=id)
#truth = Truth.objects.get(name='phx-vlan73')
#scope = TruthKeyValue(truth=truth)
try:
d = models.DHCPFile.objects.get(dhcp_scope=dhcp_scope)
content = d.file_text
except Exception, e:
content = """This file has not been stored in inventory yet.
To get it stored. Make an innocous change to a hosts key/value entry.
An example would be to change the nic name from nic0 to nic1 then back to nic0 again and click save.
Once the file gets regenerated, it will be stored here"""
sregs = StaticReg.objects.filter(
hwadapter_set__keyvalue_set__key='dhcp_scope',
hwadapter_set__keyvalue_set__value=dhcp_scope
)
content += '\n\n' + render_sregs(sregs)
output = content.replace("\n","<br />")
return render_to_response('dhcp/showfile.html', {
"output": output
},
RequestContext(request))
def create(request):
if request.method == 'POST':
form = forms.AddDHCPScopeForm(request.POST)
if form.is_valid():
pass
#s = form.save()
#return redirect('/dhcp/show/')
else:
form = forms.AddDHCPScopeForm()
return render_to_response('dhcp/new.html', {
"form": form
},
RequestContext(request))
def edit(request, dhcp_scope):
h = KeyValueHandler()
trequest = factory.get('/api/keyvalue/?keystore=%s' % dhcp_scope, follow=True)
instance = h.read(trequest)
initial = {}
initial['scope_name'] = dhcp_scope
##A bunch of try/catch blocks to create key/value pairs if one does not exist
try:
initial['scope_start'] = instance['dhcp.scope.start']
except:
trequest = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.scope.start', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['scope_start'] = ''
try:
initial['scope_end'] = instance['dhcp.scope.end']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.scope.end', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['scope_end'] = ''
try:
initial['scope_netmask'] = instance['dhcp.scope.netmask']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.scope.netmask', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['scope_netmask'] = ''
try:
initial['pool_start'] = instance['dhcp.pool.start']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.pool.start', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['pool_start'] = ''
try:
initial['pool_end'] = instance['dhcp.pool.end']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.pool.end', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['pool_end'] = ''
try:
initial['ntp_server1'] = instance['dhcp.option.ntp_server.0']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.option.ntp_server.0', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['ntp_server1'] = ''
try:
initial['ntp_server2'] = instance['dhcp.option.ntp_server.1']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.option.ntp_server.1', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['ntp_server2'] = ''
try:
initial['router'] = instance['dhcp.option.router.0']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.option.router.0', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['router'] = ''
try:
initial['domain_name'] = instance['dhcp.option.domain_name.0']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.option.domain_name.0', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['domain_name'] = ''
try:
initial['dns_server1'] = instance['dhcp.dns_server.0']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.dns_server.0', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['dns_server1'] = ''
try:
initial['dns_server2'] = instance['dhcp.dns_server.1']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.dns_server.1', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['dns_server2'] = ''
try:
initial['allow_booting'] = instance['dhcp.pool.allow_booting.0']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.pool.allow_booting.0', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['allow_booting'] = ''
try:
initial['allow_bootp'] = instance['dhcp.pool.allow_bootp.0']
except:
treqeust = factory.post('/en-US/api/keyvalue/%s/' % dhcp_scope, {'key':'dhcp.pool.allow_bootp.0', 'value':'', 'truth_name':dhcp_scope}, follow=True)
h.create(trequest)
initial['allow_bootp'] = ''
if request.method == 'POST':
form = forms.EditDHCPScopeForm(request.POST)
if form.is_valid():
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.scope.start', 'value':form.cleaned_data['scope_start']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.scope.end', 'value':form.cleaned_data['scope_end']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.scope.netmask', 'value':form.cleaned_data['scope_netmask']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.pool.start', 'value':form.cleaned_data['pool_start']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.pool.end', 'value':form.cleaned_data['pool_end']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.option.ntp_server.0', 'value':form.cleaned_data['ntp_server1']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.option.ntp_server.1', 'value':form.cleaned_data['ntp_server2']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.dns_server.0', 'value':form.cleaned_data['dns_server1']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.dns_server.1', 'value':form.cleaned_data['dns_server2']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.option.domain_name.0', 'value':form.cleaned_data['domain_name']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.option.router.0', 'value':form.cleaned_data['router']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.pool.allow_booting.0', 'value':form.cleaned_data['allow_booting']}, follow=True)
h.update(trequest, dhcp_scope)
trequest = factory.post('/en-US/api/v2/keyvalue/%s/' % dhcp_scope, {'truth_id': dhcp_scope, 'key':'dhcp.pool.allow_bootp.0', 'value':form.cleaned_data['allow_bootp']}, follow=True)
h.update(trequest, dhcp_scope)
else:
form = forms.EditDHCPScopeForm(initial=initial)
return jinja_render_to_response('dhcp/edit.html', {
"form": form,
'dhcp_scope': dhcp_scope
})
def delete(request, dhcp_scope):
try:
scope = Truth.objects.get(name=dhcp_scope)
TruthKeyValue.objects.filter(truth=scope).delete()
scope.delete()
return redirect('/dhcp/show/')
except:
return redirect('/dhcp/show/')
```
#### File: inventory/libs/DHCPHelper.py
```python
from systems.models import ScheduledTask, KeyValue, System
import re
class DHCPHelper(object):
scopes_to_generate = []
def __init__(self):
pass
def get_scopes_to_generate(self):
return ScheduledTask.objects.get_all_dhcp()
def systems_by_scope(self, scope):
keyvalue_pairs = KeyValue.objects.filter(key__contains='dhcp_scope',value=scope).filter(key__startswith='nic.')
#Iterate through the list and get all of the key/value pairs
tmp_list = []
for row in keyvalue_pairs:
keyvalue = KeyValue.objects.filter(obj=row.obj)
tmp_dict = {}
for kv in keyvalue:
tmp_dict[kv.key] = kv.value
tmp_dict['hostname'] = row.obj.hostname
appendable = True
for the_items in tmp_list:
if 'hostname' not in the_items:
appendable = True
elif the_items['hostname'] == row.obj.hostname:
appendable = False
if appendable is True:
tmp_list.append(tmp_dict)
return tmp_list
def adapters_by_system_and_scope(self, system, scope):
dhcp_scope = scope
system = System.objects.get(hostname=system)
keyvalue_pairs = KeyValue.objects.filter(key__startswith='nic.').filter(obj=system).order_by('key')
#Iterate through the list and get all of the key/value pairs
tmp_dict = {}
adapter_ids = []
final_list = []
for kv in keyvalue_pairs:
tmp_dict[kv.key] = kv.value
for k in tmp_dict.iterkeys():
matches = re.match('nic\.(\d+).*',k)
if matches.group is not None:
dhcp_scope_match = 'nic.%s.dhcp_scope.0' % matches.group(1)
ip_address_match = 'nic.%s.ipv4_address.0' % matches.group(1)
if matches.group(1) not in adapter_ids and ip_address_match in tmp_dict and dhcp_scope_match in tmp_dict and tmp_dict[dhcp_scope_match] == dhcp_scope:
adapter_ids.append(matches.group(1))
adapter_ids.sort()
for a in adapter_ids:
adapter_name = ''
mac_address = ''
dhcp_hostname = ''
dhcp_filename = ''
dhcp_domain_name = ''
ipv4_address = ''
dhcp_domain_name_servers = ''
if 'nic.%s.ipv4_address.0' % a in tmp_dict:
ipv4_address = tmp_dict['nic.%s.ipv4_address.0' % a]
if 'nic.%s.name.0' % a in tmp_dict:
adapter_name = tmp_dict['nic.%s.name.0' % a]
if 'nic.%s.mac_address.0' % a in tmp_dict:
mac_address = tmp_dict['nic.%s.mac_address.0' % a]
if 'nic.%s.dhcp_hostname.0' % a in tmp_dict and 'nic.%s.option_hostname.0' % a not in tmp_dict:
dhcp_hostname = tmp_dict['nic.%s.dhcp_hostname.0' % a]
if 'nic.%s.option_hostname.0' % a in tmp_dict:
dhcp_hostname = tmp_dict['nic.%s.option_hostname.0' % a]
if 'nic.%s.dhcp_filename.0' % a in tmp_dict:
dhcp_filename = tmp_dict['nic.%s.dhcp_filename.0' % a]
if 'nic.%s.dhcp_domain_name.0' % a in tmp_dict:
dhcp_domain_name = tmp_dict['nic.%s.dhcp_domain_name.0' % a]
if 'nic.%s.dhcp_domain_name_servers.0' % a in tmp_dict:
dhcp_domain_name_servers = tmp_dict['nic.%s.dhcp_domain_name_servers.0' % a]
final_list.append({'system_hostname':system.hostname, 'ipv4_address':ipv4_address, 'adapter_name':adapter_name, 'mac_address':mac_address, 'option_hostname': dhcp_hostname, 'dhcp_hostname':dhcp_hostname, 'dhcp_filename':dhcp_filename, 'dhcp_domain_name':dhcp_domain_name, 'dhcp_domain_name_servers':dhcp_domain_name_servers})
return final_list
```
#### File: inventory/libs/Rack.py
```python
from KeyValueTree import KeyValueTree
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import KeyValue as KeyValue
from django.test.client import RequestFactory
from api_v2.keyvalue_handler import KeyValueHandler
import json
factory = RequestFactory()
class Rack:
rack_name = None
tree = None
kv = None
ru = None
width = None
systems = []
ethernet_patch_panel_24 = []
ethernet_patch_panel_48 = []
def __init__(self, rack_name):
self.systems = []
self.rack_name = rack_name
self.kv = Truth.objects.select_related('truth_key_value').get(name=self.rack_name)
self.system_list = KeyValue.objects.select_related('system').filter(value__contains="truth:%s" % (self.rack_name))
self.ethernet_patch_panel_24 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 24)
self.ethernet_patch_panel_48 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 48)
import pdb
h = KeyValueHandler()
for s in self.system_list:
request = factory.get('/api/v2/keyvalue/?keystore=%s' % (s.system.hostname), follow=True)
tree = h.read(request)
system_ru = self._get_system_ru(tree)
system_image = self._get_system_image(tree)
system_slot = self._get_system_slot(tree)
self.systems.append({
"system_name":s.system.hostname,
"system_id":s.system.id,
"system_ru":system_ru,
"system_image":system_image,
'system_slot':system_slot,
'operating_system':str(s.system.operating_system),
'server_model': str(s.system.server_model),
'oob_ip': str(s.system.oob_ip),
})
self.systems = sorted(self.systems, key=lambda k: k['system_slot'])
try:
self.ru = self.kv.keyvalue_set.get(key='rack_ru').value
except:
self.ru = 42
try:
self.width = self.kv.keyvalue_set.get(key='rack_width').value
except:
self.width = 30
def _get_ethernet_patch_panels(self, tree, type, port_count):
ret = []
for i in tree.keyvalue_set.all():
match_string = "%i_port_%s_patch_panel" % (port_count, type)
if str(i.key) == match_string:
ret.append(i.value)
return ret
def _get_system_ru(self, tree):
for i in tree.iterkeys():
try:
if 'system_ru' in i.split(':'):
return tree[i]
except:
pass
return 4
def _get_system_image(self, tree):
for i in tree.iterkeys():
try:
if 'system_image' in i.split(':'):
return tree[i]
except:
pass
return None
def _get_system_slot(self, tree):
for i in tree.iterkeys():
try:
if 'system_slot' in i.split(':'):
return tree[i]
except:
pass
return 1
```
#### File: inventory/mcsv/resolver.py
```python
from django.core.exceptions import (
MultipleObjectsReturned, ValidationError, FieldError
)
from systems import models as sys_models
import datetime
import re
class Generics(object):
def generic_integer(self, name, values, default=None):
def validate(s, value):
if not value.isdigit():
raise ValidationError(
"{0} {1} was not the right type".format(name, value)
)
setattr(s, name, value)
return s
bundle = {
'name': name,
'values': values,
'handler': validate
}
return bundle
def generic_float(self, name, values, default=None):
def validate(s, value):
try:
value = str(float(value))
except ValueError:
raise ValidationError(
"{0} {1} coult not be coerced into a float".format(
name, value)
)
setattr(s, name, value)
return s
bundle = {
'name': name,
'values': values,
'handler': validate
}
return bundle
def generic_char(self, name, values, default=None):
bundle = {
'name': name,
'values': values,
'handler': lambda s, c: setattr(s, name, c) or s
}
return bundle
def generic_kevalue(self, re_patterns):
"""
Validate a keyvalue header
"""
def patterns_match(value):
for pattern in re_patterns:
if pattern.match(value):
return True
return False
def create_kv(s, key, value):
return sys_models.KeyValue.objects.get_or_create(
obj=s, key=key, value=value
)[0]
bundle = {
'name': 'key_value',
'match_func': patterns_match,
'handler': create_kv
}
return bundle
class Resolver(Generics):
def make_tagger(tagged_methods):
def tag(func):
tagged_methods[func.__name__] = func
return func
return tag
metas = {}
meta = make_tagger(metas)
system_attrs = {}
system_attr = make_tagger(system_attrs)
system_relateds = {}
system_related = make_tagger(system_relateds)
system_kvs = {}
system_kv = make_tagger(system_kvs)
system_kv_patterns = []
for key_type in (
'mac_address', 'ip_address', 'name', 'hostname', 'dhcp_scope',
'option_hostname', 'dhcp_filename', 'dhcp_domain_name',
'dhcp_domain_name_servers'
):
system_kv_patterns.append('nic.\d+.{0}.\d+'.format(key_type))
system_kv_patterns.append('mgmt.\d+.{0}.\d+'.format(key_type))
system_kv_patterns.append('system.hostname.alias.\d+')
@meta
def primary_attribute(self, **kwargs):
def _primary_attribute(s, header, value, **kwargs):
try:
_, s._primary_attr = map(
lambda s: s.strip(), header.split('%')
)
except ValueError:
raise ValidationError(
"The primary_attribute header must be in the form "
"'primary_attribute%<system-attribute-header>'"
)
s._primary_value = getattr(
self.get_related(header, value, sys_models.System),
s._primary_attr
)
return s
bundle = {
'name': 'primary_attribute',
'filter_fields': ['asset_tag', 'hostname'],
'values': ['primary_attribute'],
'handler': _primary_attribute,
}
return bundle
@system_kv
def all_system_keyvalue(self, **kwargs):
patterns = []
for key_pattern in self.system_kv_patterns:
patterns.append(re.compile(key_pattern))
return self.generic_kevalue(patterns)
@system_attr
def rack_order(self, **kwargs):
name = 'rack_order'
values = ['rack_order']
return self.generic_float(name, values, **kwargs)
@system_attr
def notes(self, **kwargs):
name = 'notes'
values = ['notes']
return self.generic_char(name, values, **kwargs)
@system_attr
def license(self, **kwargs):
name = 'license'
values = ['license']
return self.generic_char(name, values, **kwargs)
@system_attr
def asset_tag(self, **kwargs):
name = 'asset_tag'
values = ['asset_tag']
return self.generic_char(name, values, **kwargs)
@system_attr
def serial(self, **kwargs):
name = 'serial'
values = ['serial']
return self.generic_char(name, values, **kwargs)
@system_attr
def switch_ports(self, **kwargs):
name = 'switch_ports'
values = ['switch_ports']
return self.generic_char(name, values, **kwargs)
@system_attr
def patch_panel_port(self, **kwargs):
name = 'patch_panel_port'
values = ['patch_panel_port']
return self.generic_char(name, values, **kwargs)
@system_attr
def purchase_price(self, **kwargs):
name = 'purchase_price'
values = ['purchase_price']
return self.generic_char(name, values, **kwargs)
@system_attr
def ram(self, **kwargs):
name = 'ram'
values = ['ram']
return self.generic_char(name, values, **kwargs)
@system_attr
def oob_switch_port(self, **kwargs):
name = 'oob_switch_port'
values = ['oob_switch_&_port', 'oob_switch_ports']
return self.generic_char(name, values, **kwargs)
@system_attr
def oob_ip(self, **kwargs):
name = 'oob_ip'
values = ['oob_ip']
return self.generic_char(name, values, **kwargs)
@system_attr
def hostname(self, **kwargs):
name = 'hostname'
values = ['host_name', 'hostname']
return self.generic_char(name, values, **kwargs)
@system_attr
def purchase_date(self, **kwargs):
name = 'purchase_date'
values = ['purchase_date']
return self.generic_char(name, values, **kwargs)
def gen_parse_date(self, field):
def parse_date(s, value, **kwargs):
d = datetime.datetime.strptime(value, "%Y-%m-%d").date()
setattr(s, field, d)
return s
return parse_date
@system_attr
def warranty_start(self, **kwargs):
name = 'warranty_start'
values = ['warranty_start']
bundle = self.generic_char(name, values, **kwargs)
bundle['handler'] = self.gen_parse_date(name)
return bundle
@system_attr
def warranty_end(self, **kwargs):
name = 'warranty_end'
values = ['warranty_end']
bundle = self.generic_char(name, values, **kwargs)
bundle['handler'] = self.gen_parse_date(name)
return bundle
def cannot_find(self, field, value):
raise ValidationError(
"Unfortunatly, we could not determine a {0} to use given the "
"value '{1}'".format(field, value)
)
def get_related_simple(self, field, value, Klass):
search = {field: value}
obj = self.get_realted_from_dict(search, Klass)
if obj:
return obj
obj = self.get_related_from_pk(value, Klass)
if obj:
return obj
self.cannot_find(field, value)
def get_related(self, field, value, Klass, delimiter='%'):
"""
Try to find delimited headers, fall back to normal get_realted_simple
if they don't exist.
"""
fields = map(lambda s: s.strip(), field.split('%'))
if '%' not in field or len(fields) < 1:
raise ValidationError(
"We need to determine what fields to search for when looking "
"for objects coresponding to the {0} header. Please specify "
"some filter fields by doing something like: "
"{0}%({1})'".format(
field, ' | '.join(self.get_field_names(Klass))
)
)
fields = fields[1:]
values = map(lambda s: s.strip(), value.split('%'))
search = dict(zip(fields, values))
try:
obj = self.get_realted_from_dict(search, Klass)
except FieldError, e:
raise Exception(
"When trying to use resolve a(n) {0}, got the error "
"{1}".format(Klass.__name__, str(e))
)
if obj:
return obj
self.cannot_find(field, value)
def get_realted_from_dict(self, search, Klass):
try:
return Klass.objects.get(**search)
except (MultipleObjectsReturned, Klass.DoesNotExist):
pass
def get_related_from_pk(self, value, Klass):
try:
return Klass.objects.get(pk=value)
except Klass.DoesNotExist:
pass
# XXX this should really be in the classes themselves
def get_field_names(self, Klass):
return [field.name for field in Klass._meta.fields]
@system_related
def systemrack(self, **kwargs):
def _systemrack(s, header, value):
s.system_rack = self.get_related(
header, value, sys_models.SystemRack
)
return s
filter_fields = self.get_field_names(sys_models.SystemRack)
filter_fields[filter_fields.index('location')] = 'location__name'
bundle = {
'name': 'systemrack',
'filter_fields': filter_fields,
'values': ['system_rack'],
'handler': _systemrack
}
return bundle
@system_related
def system_status(self, **kwargs):
def _system_status(s, header, value):
s.system_status = self.get_related(
header, value, sys_models.SystemStatus
)
return s
bundle = {
'name': 'systemstatus',
'filter_fields': self.get_field_names(sys_models.SystemStatus),
'values': ['system_status'],
'handler': _system_status
}
return bundle
@system_related
def server_model(self, **kwargs):
def _server_model(s, header, value):
s.server_model = self.get_related(
header, value, sys_models.ServerModel
)
return s
bundle = {
'name': 'server_model',
'filter_fields': self.get_field_names(sys_models.ServerModel),
'values': ['server_model'],
'handler': _server_model
}
return bundle
@system_related
def operating_system(self, **kwargs):
def _operating_system(s, header, value):
sm = self.get_related(header, value, sys_models.OperatingSystem)
s.operating_system = sm
return s
bundle = {
'name': 'operating_system',
'filter_fields': self.get_field_names(sys_models.OperatingSystem),
'values': ['operating_system'],
'handler': _operating_system
}
return bundle
@system_related
def allocation(self, **kwargs):
def _allocation(s, header, value):
s.allocation = self.get_related(
header, value, sys_models.Allocation
)
return s
bundle = {
'name': 'allocation',
'filter_fields': self.get_field_names(sys_models.Allocation),
'values': ['allocation'],
'handler': _allocation
}
return bundle
@system_related
def system_type(self, **kwargs):
def _system_type(s, header, value):
s.system_type = self.get_related(
header, value, sys_models.SystemType
)
return s
bundle = {
'name': 'system_type',
'filter_fields': self.get_field_names(sys_models.SystemType),
'values': ['system_type'],
'handler': _system_type
}
return bundle
```
#### File: inventory/migrate_dns/import_utils.py
```python
from migrate_dns.zone_migrate import populate_forward_dns, populate_reverse_dns
from dns import zone
from iscpy.iscpy_dns.named_importer_lib import MakeNamedDict
from mozdns.view.models import View
import settings
import os
# Add zones that should not be imported here
black_list = (
'svc.mozilla.com',
'services.mozilla.com',
)
PRIVATE = os.path.join(settings.ZONE_PATH, "config/zones.private")
PUBLIC = os.path.join(settings.ZONE_PATH, "config/zones.public")
def show_possible_imports(zones_file, view):
CONFIG = os.path.join(settings.ZONE_PATH, zones_file)
zones = MakeNamedDict(open(CONFIG).read())
m_c = ('python manage.py dns_migrate_single_zone {view} {zone_name} '
'$ZONES_PREFIX/{fname}')
for zone_name, zone_meta in zones['orphan_zones'].iteritems():
print m_c.format(
view=view, zone_name=zone_name, fname=zone_meta['file']
)
def do_import():
private_zones = MakeNamedDict(open(PRIVATE).read())
public_zones = MakeNamedDict(open(PUBLIC).read())
View.objects.get_or_create(name='public')
View.objects.get_or_create(name='private')
for zone_name, zone_meta in private_zones['orphan_zones'].iteritems():
if zone_name in black_list:
continue
handle_zone(zone_name, zone_meta, False, True)
for zone_name, zone_meta in public_zones['orphan_zones'].iteritems():
if zone_name in black_list:
continue
handle_zone(zone_name, zone_meta, True, False)
def migrate_single_zone(view_name, zone_name, zone_file):
if view_name not in ('public', 'private', 'both'):
print "view must be 'public' or 'private'"
return
zone_meta = {'file': zone_file}
if view_name == 'private':
handle_zone(zone_name, zone_meta, False, True)
elif view_name == 'public':
handle_zone(zone_name, zone_meta, True, False)
elif view_name == 'both':
handle_zone(zone_name, zone_meta, True, True)
def get_zone_data(zone_name, filepath, dirpath):
cwd = os.getcwd()
os.chdir(dirpath)
mzone = zone.from_file(filepath, zone_name, relativize=False)
os.chdir(cwd)
return mzone
def handle_zone(zone_name, zone_meta, public, private):
if not zone_meta['file']:
print "No zone file for {0}".format(zone_name)
return
print "Importing {0}. View: {1}".format(zone_name,
'public' if public else 'private')
mzone = get_zone_data(zone_name, zone_meta['file'], settings.ZONE_PATH)
views = []
if public:
views.append(View.objects.get(name='public'))
if private:
views.append(View.objects.get(name='private'))
if zone_name.endswith(('in-addr.arpa', 'ip6.arpa')):
direction = 'reverse'
else:
direction = 'forward'
if direction == 'reverse':
populate_reverse_dns(mzone, zone_name, views)
else:
populate_forward_dns(mzone, zone_name, views)
```
#### File: inventory/migrate_dns/zone_migrate.py
```python
from mozdns.address_record.models import AddressRecord
from mozdns.cname.models import CNAME
from mozdns.domain.models import Domain
from mozdns.mx.models import MX
from mozdns.nameserver.models import Nameserver
from mozdns.ptr.models import PTR
from mozdns.soa.models import SOA
from mozdns.srv.models import SRV
from mozdns.txt.models import TXT
from mozdns.domain.utils import *
from mozdns.ip.models import ipv6_to_longs
from mozdns.view.models import View
from mozdns.utils import ensure_domain
from settings import ZONE_PATH
import dns
import dns.zone
import pdb
import ipaddr
from copy import deepcopy
import datetime
def buildzone3(job):
if job == "external":
from migrate_dns.migrate.zone_configs.external import external
configs = external
if job == "private_reverse":
from migrate_dns.migrate.zone_configs.private_reverse import private_reverse
configs = private_reverse
if job == "net":
from migrate_dns.migrate.zone_configs.mozilla_net import mozilla_net
configs = mozilla_net
if job == "org":
from migrate_dns.migrate.zone_configs.mozilla_org import mozilla_org
configs = mozilla_org
if job == "com":
from migrate_dns.migrate.zone_configs.mozilla_com_dc_zone_config import mozilla_com_dcs
configs = mozilla_com_dcs
build_from_config(configs)
def build_from_config(configs):
for config in configs:
zone_path = config['path']
root_domain_name = config['zone_name']
name_reversed = config['name_reversed']
ztype = config['direction']
view = config['view']
relative_path = config['relative_path']
migrate_zone(root_domain_name, name_reversed, zone_path, ztype, view, relative_path)
def migrate_zone(root_domain_name, name_reversed, zone_path, ztype, view, relative_path):
if view == "both":
private , _ = View.objects.get_or_create(name="private")
public , _ = View.objects.get_or_create(name="public")
views = [private, public]
else:
view_obj, _ = View.objects.get_or_create(name=view)
views = [view_obj]
try:
if ztype == 'r':
if name_reversed:
root_domain_name = '.'.join(reversed(root_domain_name.split('.'))
) + ".in-addr.arpa"
else:
root_domain_name = '.'.join(root_domain_name.split('.')) + ".in-addr.arpa"
svn_zone = collect_svn_zone(root_domain_name, zone_path, ZONE_PATH)
except dns.zone.NoSOA, e:
print "----------------------"
print "ERROR: NoSOA()"
print zone_path
print "----------------------"
return
if ztype == 'f':
print "++ Migrating {0} {1}".format(root_domain_name, zone_path)
populate_forward_dns(svn_zone, root_domain_name, views)
if ztype == 'r':
print "++ Migrating {0} {1}".format(root_domain_name, zone_path)
populate_reverse_dns(svn_zone, root_domain_name, views)
def null_zone_tree(domain, clobber_soa):
"""Starting at domain, change any domain's soa that is clobber_soa to None.
"""
if domain.soa is None:
pass # Keep searching (even though you won't find anything)
elif domain.soa == clobber_soa:
pass # Kill it with fire!
elif domain.soa != clobber_soa:
return # We hit a new zone. Leave if alone
else:
# Oh fuck
pdb.set_trace()
pass
# Let's go deeper. (TWSS)
for child_domain in domain.domain_set.all():
null_zone_tree(child_domain, clobber_soa)
domain.soa = None
domain.save()
return
def color_zone_tree(domain, clobber_soa, new_soa):
"""
This function will take the domain tree and give domain's their 'assumed'
correct soa. This is an in order traversal.
"""
if domain.soa == clobber_soa:
pass # We are changing this soa
elif domain.soa == new_soa:
pass # We still need to check the child domains
elif domain.soa is None and not domain.delegated:
pass # This domain doesn't have an soa and isn't delegated. It's
# likely it was just created. Set it's SOA.
elif domain.soa != clobber_soa and domain.soa is not None:
return # It's a different zone. We've arrived at our base case.
else:
# Oh fuck
pdb.set_trace()
pass
domain.soa = new_soa
domain.save()
# Let's go deeper. (TWSS)
for child_domain in domain.domain_set.all():
color_zone_tree(child_domain, clobber_soa, new_soa)
return
def populate_forward_dns(zone, root_domain_name, views):
soa = migrate_soa(zone, root_domain_name)
root_domain = ensure_domain(root_domain_name, force=True)
migrate_A(zone, root_domain, soa, views)
migrate_AAAA(zone, root_domain, soa, views)
migrate_CNAME(zone, root_domain, soa, views)
migrate_NS(zone, root_domain, soa, views)
migrate_MX(zone, root_domain, soa, views)
migrate_TXT(zone, root_domain, soa, views)
migrate_SRV(zone, root_domain, soa, views)
if root_domain.soa == soa:
clobber_soa = None
else:
clobber_soa = root_domain.soa
null_zone_tree(root_domain, clobber_soa)
color_zone_tree(root_domain, clobber_soa, soa)
def populate_reverse_dns(zone, root_domain_name, views):
ensure_domain("arpa", force=True)
ensure_domain("in-addr.arpa", force=True)
ensure_domain("ip6.arpa", force=True)
soa = migrate_soa(zone, root_domain_name)
root_domain = ensure_domain(root_domain_name, force=True)
migrate_NS(zone, root_domain, soa, views)
migrate_MX(zone, root_domain, soa, views)
migrate_PTR(zone, root_domain, soa, views)
if root_domain.soa == soa:
clobber_soa = None
else:
clobber_soa = root_domain.soa
null_zone_tree(root_domain, clobber_soa)
color_zone_tree(root_domain, root_domain.soa, soa)
def migrate_PTR(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('PTR'):
fqdn = rdata.target.to_text().strip('.')
if fqdn.find('unused') != -1:
print "Skipping "+name.to_text()+" "+fqdn
continue
# 4.3.2.1.IN-ADDR.ARPA. --> 1.2.3.4
name = name.to_text().lower().strip('.')
if name.endswith('.in-addr.arpa'):
ip_type = '4'
ip_str = name.replace('.in-addr.arpa','')
ip_str = '.'.join(list(reversed(ip_str.split('.'))))
ip_upper, ip_lower = 0, ipaddr.IPv4Address(ip_str)
elif name.endswith('.ip6.arpa'):
ip_type = '6'
ip_str = name.replace('.ip6.arpa','')
chunks = [''.join(ip_str.split('.')[i:i+4]) for i in xrange(0, len(ip_str.split('.')), 4)]
ip_str = ':'.join(chunks)[::-1]
ip_upper, ip_lower = ipv6_to_longs(ip_str)
else:
print "We so fucked. Lol"
pdb.set_trace()
continue
if ip_str == '10.2.171.IN':
print "Skipping "+ip_str+" "+fqdn
continue
print str(name) + " PTR " + str(fqdn)
ptr = PTR.objects.filter(name=fqdn, ip_upper=ip_upper,
ip_lower=ip_lower, ip_type=ip_type)
if ptr:
ptr = ptr[0]
else:
ptr = PTR(
name=fqdn, ip_str=ip_str, ip_type=ip_type,
description=rdata.comment
)
ptr.full_clean()
ptr.save()
if views:
for view in views:
ptr.views.add(view)
ptr.save()
def migrate_soa(zone, root_domain_name):
for (name, ttl, rdata) in zone.iterate_rdatas('SOA'):
print str(name) + " SOA " + str(rdata)
exists = SOA.objects.filter(minimum=rdata.minimum,
contact=rdata.rname.to_text().strip('.'),
primary=rdata.mname.to_text().strip('.'), description="SOA for"
" {0}".format(root_domain_name))
if exists:
soa = exists[0]
else:
new_serial = int(datetime.datetime.now().strftime("%Y%m%d01"))
soa = SOA(serial=new_serial, minimum=rdata.minimum,
contact=rdata.rname.to_text().strip('.'),
primary=rdata.mname.to_text().strip('.'), description="SOA for"
" {0}".format(root_domain_name))
soa.clean()
soa.save()
return soa
def migrate_A(zone, root_domain, soa, views):
names = []
for (name, ttl, rdata) in zone.iterate_rdatas('A'):
names.append((name.to_text().strip('.'), rdata, ttl))
sorted_names = list(sorted(names, cmp=lambda n1, n2: -1 if
len(n1[0].split('.'))> len(n2[0].split('.')) else 1))
for name, rdata, ttl in sorted_names:
if not ttl:
ttl = 3600
print str(name) + " A " + str(rdata)
if name.startswith("unusedspace"):
print "Skipping {0} A {1}".format(name, rdata)
continue
exists_domain = Domain.objects.filter(name=name)
if exists_domain:
label = ''
domain = exists_domain[0]
else:
label = name.split('.')[0]
domain_name = '.'.join(name.split('.')[1:])
domain = ensure_domain(domain_name, force=True)
if AddressRecord.objects.filter(
label=label, domain=domain, ip_str=rdata.to_text(),
ip_type='4').exists():
a = AddressRecord.objects.get(
label=label, domain=domain, ip_str=rdata.to_text(),
ip_type='4'
)
else:
a = AddressRecord.objects.create(
label=label, domain=domain, ip_str=rdata.to_text(), ip_type='4',
description=rdata.comment, ttl=ttl
)
for view in views:
a.views.add(view)
a.save()
def migrate_AAAA(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('AAAA'):
name = name.to_text().strip('.')
print str(name) + " AAAA " + str(rdata)
exists_domain = Domain.objects.filter(name=name)
if exists_domain:
label = ''
domain = exists_domain[0]
else:
label = name.split('.')[0]
if label.startswith('unused'):
continue
domain_name = '.'.join(name.split('.')[1:])
domain = ensure_domain(domain_name, force=True)
ip_upper, ip_lower = ipv6_to_longs(rdata.to_text())
if AddressRecord.objects.filter(label=label,
domain=domain, ip_upper=ip_upper, ip_lower=ip_lower,
ip_type='6').exists():
a = AddressRecord.objects.get(
label=label, domain=domain, ip_type='6', ip_upper=ip_upper,
ip_lower=ip_lower
)
else:
a = AddressRecord(
label=label, domain=domain, ip_str=rdata.to_text(),
ip_type='6', description=rdata.comment, ttl=ttl
)
a.clean()
a.save()
for view in views:
a.views.add(view)
a.save()
def migrate_NS(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('NS'):
name = name.to_text().strip('.')
print str(name) + " NS " + str(rdata)
domain_name = '.'.join(name.split('.')[1:])
domain = ensure_domain(name, force=True)
if Nameserver.objects.filter(domain=domain,
server=rdata.target.to_text().strip('.')):
ns = Nameserver.objects.get(
domain=domain, server=rdata.target.to_text().strip('.'),
)
else:
ns = Nameserver.objects.create(
domain=domain, server=rdata.target.to_text().strip('.'),
description=rdata.comment, ttl=ttl
)
for view in views:
ns.views.add(view)
ns.save()
def migrate_MX(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('MX'):
name = name.to_text().strip('.')
print str(name) + " MX " + str(rdata)
exists_domain = Domain.objects.filter(name=name)
if exists_domain:
label = ''
domain = exists_domain[0]
else:
label = name.split('.')[0]
domain_name = '.'.join(name.split('.')[1:])
domain = ensure_domain(domain_name, force=True)
priority = rdata.preference
server = rdata.exchange.to_text().strip('.')
if MX.objects.filter(label=label, domain=domain, server=server,
priority=priority):
mx = MX.objects.get(
label=label, domain=domain, server=server, priority=priority,
)
else:
mx = MX.objects.create(
label=label, domain=domain, server=server, priority=priority,
ttl=ttl, description=rdata.comment
)
for view in views:
mx.views.add(view)
mx.save()
def migrate_CNAME(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('CNAME'):
name = name.to_text().strip('.')
print str(name) + " CNAME " + str(rdata)
exists_domain = Domain.objects.filter(name=name)
if exists_domain:
label = ''
domain = exists_domain[0]
else:
label = name.split('.')[0]
domain_name = name.split('.')[1:]
domain = ensure_domain('.'.join(domain_name), force=True)
data = rdata.target.to_text().strip('.')
if CNAME.objects.filter(label=label, domain=domain,
target=data).exists():
cn = CNAME.objects.get(
label=label, domain=domain, target=data
)
else:
cn = CNAME(
label=label, domain=domain, target=data,
description=rdata.comment, ttl=ttl
)
cn.full_clean()
cn.save()
for view in views:
cn.views.add(view)
cn.save()
def migrate_TXT(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('TXT'):
name = name.to_text().strip('.')
print str(name) + " TXT " + str(rdata)
exists_domain = Domain.objects.filter(name=name)
if exists_domain:
label = ''
domain = exists_domain[0]
else:
label = name.split('.')[0]
domain_name = name.split('.')[1:]
domain = ensure_domain('.'.join(domain_name), force=True)
data = rdata.to_text().strip('"')
if TXT.objects.filter(label=label, domain=domain,
txt_data=data).exists():
txt = TXT.objects.get(
label=label, domain=domain, txt_data=data
)
else:
txt = TXT(
label=label, domain=domain, txt_data=data,
description=rdata.comment, ttl=ttl
)
txt.full_clean()
txt.save()
for view in views:
txt.views.add(view)
txt.save()
def migrate_SRV(zone, root_domain, soa, views):
for (name, ttl, rdata) in zone.iterate_rdatas('SRV'):
target = rdata.target.to_text().strip('.')
port = rdata.port
weight = rdata.weight
prio = rdata.priority
name = name.to_text().strip('.')
print str(name) + " SRV " + str(rdata)
exists_domain = Domain.objects.filter(name=name)
if exists_domain:
label = ''
domain = exists_domain[0]
else:
label = name.split('.')[0]
domain_name = name.split('.')[1:]
domain = ensure_domain('.'.join(domain_name), force=True)
if SRV.objects.filter(label = label, domain = domain,
target=target, port=port, weight=weight,
priority=prio).exists():
srv = SRV.objects.get(
label=label, domain=domain, target=target, port=port,
weight=weight, priority=prio
)
else:
srv = SRV(
label=label, domain=domain, target=target, port=port,
weight=weight, priority=prio, description=rdata.comment,
ttl=ttl
)
srv.full_clean()
srv.save()
for view in views:
srv.views.add(view)
srv.save()
def get_clobbered(domain_name):
classes = [MX, AddressRecord, CNAME, TXT, SRV]
clobber_objects = [] # Objects that have the same name as a domain
for Klass in classes:
objs = Klass.objects.filter(fqdn=domain_name)
for obj in objs:
obj_views = [view.name for view in obj.views.all()]
new_obj = deepcopy(obj)
new_obj.id = None
clobber_objects.append((new_obj, obj_views))
if Klass == AddressRecord:
kwargs = {"check_cname": False}
else:
kwargs = {}
obj.delete(**kwargs)
return clobber_objects
```
#### File: inventory/mozdns/api.py
```python
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from tastypie import fields
from tastypie.exceptions import HydrationError
from tastypie.resources import Resource
from tastypie.resources import ModelResource
from mozdns.domain.models import Domain
from mozdns.cname.models import CNAME
from mozdns.cname.forms import CNAMEForm
from mozdns.view.models import View
from tastypie.validation import FormValidation
from tastypie.authorization import Authorization
from tastypie.api import Api
import pdb
class CommonDNSResource(Resource):
domain = fields.CharField() # User passes string, in hydrate we find a
# domain
views = fields.ListField(null=True, blank=True)
# User passes list of view names, in hydrate we
# make these the actual views
def dehydrate(self, bundle):
# Every DNS Resource should have a domain
bundle.data['views'] = [view.name for view in bundle.obj.views.all()]
bundle.data['domain'] = bundle.obj.domain.name
return bundle
def hydrate(self, bundle):
# Every DNS Resource should have a domain
domain_name = bundle.data.get('domain', '')
try:
domain = Domain.objects.get(name=domain_name)
except ObjectDoesNotExist:
raise HydrationError("Couldn't find domain "
"{0}".format(domain_name))
bundle.data['domain'] = domain
return bundle
def obj_update(self, bundle, request=None, skip_errors=False, **kwargs):
pdb.set_trace()
obj = bundle.obj
views = self.extract_views(bundle)
bundle = self.full_hydrate(bundle)
if bundle.errors:
self.error_response(bundle.errors, request)
self.apply_commit(
obj, bundle.data) # bundle should only have valid data.
# If it doesn't errors will
# be thrown
self.apply_custom_hydrate(obj, bundle, action='update')
return self.save_commit(request, obj, bundle, views)
def extract_views(self, bundle):
views = []
# We have to remove views from data because those need to be added
# later in a seperate step
for view_name in bundle.data.pop('views', []):
try:
views.append(View.objects.get(name=view_name))
except ObjectDoesNotExist:
raise HydrationError("Couldn't find the view "
"{0}".format(view_name))
return views
def apply_commit(self, obj, commit_data):
for k, v in commit_data.iteritems():
if k == 'resource_uri':
continue
setattr(obj, k, v)
return obj
def obj_create(self, bundle, request=None, **kwargs):
"""
A generic version of creating a dns object. The strategy is simple: get
bundle.data to the point where we call Class(**bundle.data) which
creates an object. We then clean it and then save it. Finally we save
any views that were in bundle.
"""
Klass = self._meta.object_class
views = self.extract_views(bundle)
bundle = self.full_hydrate(bundle)
if bundle.errors:
self.error_response(bundle.errors, request)
# Create the Object
try:
obj = Klass(**bundle.data)
except ValueError, e:
pdb.set_trace()
except TypeError, e:
bundle.errors['error_messages'] = e.message
self.error_response(bundle.errors, request)
return self.save_commit(request, obj, bundle, views)
def save_commit(self, request, obj, bundle, views):
try:
obj.full_clean()
except ValidationError, e:
bundle.errors['error_messages'] = str(e)
self.error_response(bundle.errors, request)
except Exception, e:
pdb.set_trace()
obj.save()
# We remove the views so that deletion works.
orig_views = [view for view in obj.views.all()]
for view in orig_views:
obj.views.remove(view)
for view in views:
obj.views.add(view)
# Now save those views we saved
bundle.obj = obj
return bundle
def apply_custom_hydrate(self, obj, bundle, action=None):
print "No custom hydrate"
return bundle
class CNAMEResource(CommonDNSResource, ModelResource):
class Meta:
queryset = CNAME.objects.all()
fields = CNAME.get_api_fields() + ['domain', 'views']
authorization = Authorization()
allowed_methods = ['get', 'post', 'patch']
validation = FormValidation(form_class=CNAMEForm)
v1_dns_api = Api(api_name="v1_dns")
v1_dns_api.register(CNAMEResource())
```
#### File: inventory/mozdns/mixins.py
```python
from settings import MOZDNS_BASE_URL
from gettext import gettext as _
from string import Template
class DisplayMixin(object):
# Knobs
justs = {
'pk_just': 10,
'rhs_just': 1,
'ttl_just': 1,
'rdtype_just': 4,
'rdclass_just': 3,
'prio_just': 1,
'lhs_just': 40,
'extra_just': 1
}
def bind_render_record(self, pk=False, show_ttl=False):
template = Template(self.template).substitute(**self.justs)
bind_name = self.fqdn + "."
if show_ttl:
ttl_ = self.ttl
else:
ttl_ = '' if self.ttl is None else self.ttl
return template.format(
bind_name=bind_name, rdtype=self.rdtype, rdclass='IN',
ttl_=ttl_, **vars(self)
)
class ObjectUrlMixin(object):
"""
This is a mixin that adds important url methods to a model. This
class uses the ``_meta.db_table`` instance variable of an object to
calculate URLs. Because of this, you must use the app label of your
class when declaring urls in your urls.py.
"""
# TODO. using app_label breaks shit. Go through all the models and
# assign a better field. Something like "url handle". TODO2. Using
# db_table for now. It looks weird, but it works.
def get_absolute_url(self):
return self.get_fancy_edit_url()
def get_history_url(self):
return "/reversion_compare/history_view/{0}/{1}/".format(
self.rdtype, self.pk
)
def get_edit_url(self):
"""
Return the edit url of an object.
"""
return self.get_fancy_edit_url()
def get_fancy_edit_url(self):
return MOZDNS_BASE_URL + _(
"/record/update/{0}/{1}/").format(self.rdtype, self.pk)
def get_delete_url(self):
"""
Return the delete url of an object.
"""
return MOZDNS_BASE_URL + "/{0}/{1}/delete/".format(
self._meta.db_table, self.pk
)
def get_create_url(self):
"""
Return the create url of the type of object.
"""
return MOZDNS_BASE_URL + "/{0}/create/".format(self._meta.db_table)
def get_delete_redirect_url(self):
return '/core/search/'
class DBTableURLMixin(object):
def get_fancy_edit_url(self):
return self.get_edit_url()
def get_edit_url(self):
"""
Return the delete url of an object.
"""
return MOZDNS_BASE_URL + "/{0}/{1}/update/".format(
self._meta.db_table, self.pk
)
def get_delete_url(self):
"""
Return the delete url of an object.
"""
return MOZDNS_BASE_URL + "/{0}/{1}/delete/".format(
self._meta.db_table, self.pk
)
def get_absolute_url(self):
"""
Return the delete url of an object.
"""
return MOZDNS_BASE_URL + "/{0}/{1}/".format(
self._meta.db_table, self.pk
)
def get_create_url(self):
"""
Return the create url of the type of object.
"""
return MOZDNS_BASE_URL + "/{0}/create/".format(self._meta.db_table)
```
#### File: management/commands/bindbuild.py
```python
from mozdns.mozbind.build import build_dns
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
build_dns()
```
#### File: mozdns/mozbind/models.py
```python
from django.db import models
class DNSBuildRun(models.Model):
"""
Everytime the DNS build scripts are ran, one of these objects is
created to track which zones we have built and which zones we haven't built
(since nothing has changed in them). :class:`BuildManifest` objects
relate back to a :class:`DNSBuildRun` instance and represent one zone's
state.
"""
log = models.TextField()
# stats_json = models.JSONField("stats", max_length=max_length)
def record(self, root_domain, soa, zfiles, zhash):
bm = BuildManifest(zname=root_domain.name, files=','.join(zfiles),
zhash=zhash, build_run=self)
bm.save()
return bm
def stash(self, k, v):
self.stats_json[k] = v
def get_manifests(self, **kwargs):
return BuildManifest.objects.filter(build_run=self, **kwargs)
class BuildManifest(models.Model):
max_length = 256
zname = models.CharField(max_length=max_length)
files = models.CharField(max_length=max_length)
zhash = models.CharField(max_length=max_length)
build_run = models.ForeignKey(DNSBuildRun)
# stats_json = models.JSONField("stats", max_length=max_length)
def stash(self, k, v):
self.stats_json[k] = v
```
#### File: mozbind/tests/build_tests.py
```python
import os
import subprocess
import shutil
import shlex
from django.test.client import Client
from django.test import TestCase
from mozdns.soa.models import SOA
from mozdns.domain.models import Domain
from mozdns.address_record.models import AddressRecord
from mozdns.view.models import View
from mozdns.tests.utils import random_label, random_byte
from mozdns.mozbind.builder import DNSBuilder, BuildError
from mozdns.delete_zone.utils import delete_zone_helper
from mozdns.tests.utils import create_fake_zone
from core.task.models import Task
from settings.dnsbuilds import TEST_PREFIX
TEST_PREFIX = TEST_PREFIX.rstrip('/')
class MockBuildScriptTests(TestCase):
def setUp(self):
Task.objects.all().delete()
for soa in SOA.objects.all():
delete_zone_helper(soa.root_domain.name)
Domain.objects.get_or_create(name="arpa")
Domain.objects.get_or_create(name="in-addr.arpa")
self.r1, _ = Domain.objects.get_or_create(name="10.in-addr.arpa")
Domain.objects.get_or_create(name="com")
Domain.objects.get_or_create(name="mozilla.com")
self.cleint = Client()
# Build file system assets
self.stage_dir = '{0}/stage/inv_zones/'.format(TEST_PREFIX)
self.svn_dir = '{0}/dnsconfig/'.format(TEST_PREFIX)
self.prod_dir = '{0}/dnsconfig/inv_zones/'.format(TEST_PREFIX)
self.prod_dir2 = '{0}/dnsconfig/inv_zones2/'.format(TEST_PREFIX)
self.svn_repo = '{0}/svn_repo'.format(TEST_PREFIX)
self.lock_file = '{0}/lock.fake'.format(TEST_PREFIX)
self.stop_update = '{0}/stop.update'.format(TEST_PREFIX)
self.re_test_file = '{0}/re_test'.format(TEST_PREFIX)
#os.chdir(os.path.join(TEST_PREFIX, ".."))
if os.path.isdir(TEST_PREFIX):
shutil.rmtree(TEST_PREFIX)
os.makedirs(TEST_PREFIX)
#os.makedirs(self.svn_repo)
command_str = "svnadmin create {0}".format(self.svn_repo)
rets = subprocess.Popen(shlex.split(command_str),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = rets.communicate()
self.assertEqual(0, rets.returncode, stderr)
command_str = "svn co file://{0} {1}".format(self.svn_repo,
self.prod_dir)
rets = subprocess.Popen(shlex.split(command_str),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = rets.communicate()
self.assertEqual(0, rets.returncode, stderr)
command_str = "svn co file://{0} {1}".format(self.svn_repo,
self.prod_dir2)
rets = subprocess.Popen(shlex.split(command_str),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = rets.communicate()
self.assertEqual(0, rets.returncode, stderr)
def get_post_data(self, random_str):
"""Return a valid set of data"""
return {
'root_domain': '{0}.{0}.mozilla.com'.format(
random_label() + random_str),
'soa_primary': 'ns1.mozilla.com',
'soa_contact': 'noc.mozilla.com',
'nameserver_1': 'ns1.mozilla.com',
'nameserver_2': 'ns2.mozilla.com',
'nameserver_3': 'ns3.mozilla.com',
'ttl_1': random_byte(),
'ttl_2': random_byte(),
'ttl_3': random_byte(),
}
def test_build_zone(self):
create_fake_zone('asdf1')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update)
b.build_dns()
create_fake_zone('asdf2')
b.build_dns()
create_fake_zone('asdf3')
create_fake_zone('asdf4')
b.build_dns()
create_fake_zone('asdf5')
b.build_dns()
def test_change_a_record(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update)
b.svn_lines_changed(b.PROD_DIR)
b.PUSH_TO_PROD = False
root_domain = create_fake_zone('asdfz1')
b.build_dns() # This won't check anything in since PUSH_TO_PROD==False
self.assertEqual((28, 0), b.svn_lines_changed(b.PROD_DIR))
b.PUSH_TO_PROD = True
b.build_dns() # This checked stuff in
# no lines should have changed
b.build_dns()
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
# Now add a record.
a, c = AddressRecord.objects.get_or_create(
label='', domain=root_domain, ip_str="10.0.0.1", ip_type='4'
)
a.views.add(View.objects.get_or_create(name='private')[0])
if not c:
a.ttl = 8
a.save()
# We just updated a zone so a full build shouldn't be triggered
self.assertFalse(Task.dns_full.all())
# we should see one zone being rebuilt
self.assertEqual(1, Task.dns_incremental.all().count())
self.assertTrue(SOA.objects.get(pk=root_domain.soa.pk).dirty)
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = False # Task isn't deleted
b.build_dns() # Serial get's incrimented
# Since push-to-prod is false, we should still see the tasks in the
# same state
self.assertFalse(Task.dns_full.all())
self.assertEqual(1, Task.dns_incremental.all().count())
self.assertEqual(
SOA.objects.get(pk=root_domain.soa.pk).serial, tmp_serial + 1
)
# The dirty bit should still be true because we didn't check things in
self.assertTrue(SOA.objects.get(pk=root_domain.soa.pk).dirty)
# added new record (1) and new serials (2 for both views), old serials
# removed.
self.assertEqual((3, 2), b.svn_lines_changed(b.PROD_DIR))
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = True
b.build_dns()
# Since push-to-prod is true all tasks should be back 0
self.assertFalse(Task.dns_full.all())
self.assertFalse(Task.dns_incremental.all())
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
# Serial is again incremented because PUSH_TO_PROD was False during the
# last build.
# deleted so we should still see this soa being rebuilt.
self.assertEqual(
SOA.objects.get(pk=root_domain.soa.pk).serial, tmp_serial + 1
)
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
# no lines should have changed if we would have built again
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = False
b.build_dns()
# Nothing changed
self.assertFalse(Task.dns_full.all())
self.assertFalse(Task.dns_incremental.all())
self.assertEqual(SOA.objects.get(pk=root_domain.soa.pk).serial,
tmp_serial)
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
def test_one_file_svn_lines_changed(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update)
test_file = os.path.join(self.prod_dir, 'test')
with open(test_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(self.prod_dir)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test_file, 'w+') as fd:
fd.write('line 1\nline 2\n')
lc = b.svn_lines_changed(self.prod_dir)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(self.prod_dir)
self.assertEqual((0, 1), lc)
b.svn_checkin(lc)
def test_too_many_config_lines_changed(self):
create_fake_zone('asdf86')
root_domain1 = create_fake_zone('asdf87')
root_domain2 = create_fake_zone('asdf88')
root_domain3 = create_fake_zone('asdf89')
create_fake_zone('asdf90')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True,
STOP_UPDATE_FILE=self.stop_update)
self.assertTrue(Task.dns_full.all())
self.assertFalse(Task.dns_incremental.all().count())
b.build_dns()
self.assertFalse(Task.dns_full.all())
self.assertFalse(Task.dns_incremental.all())
# deleting one ns
for ns in root_domain1.nameserver_set.all():
ns.delete()
self.assertTrue(Task.dns_full.all())
self.assertEqual(1, Task.dns_incremental.all().count())
b.build_dns() # One zone removed should be okay
for ns in root_domain2.nameserver_set.all():
ns.delete()
for ns in root_domain3.nameserver_set.all():
ns.delete()
b.PUSH_TO_PROD = True
self.assertRaises(BuildError, b.build_dns)
def test_two_file_svn_lines_changed(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update)
test1_file = os.path.join(self.prod_dir, 'test1')
test2_file = os.path.join(self.prod_dir, 'test2')
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\nline 1.2\n')
with open(test2_file, 'w+') as fd:
fd.write('line 2.1\nline 2.2\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((3, 0), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 2), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\nline 1.2\n')
with open(test2_file, 'w+') as fd:
fd.write('line 2.3\nline 2.4\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((4, 3), lc)
b.svn_checkin(lc)
def test_svn_conflict(self):
"""
This uses tasks as a block box measurement to see if conflicts are
being handled
"""
root_domain = create_fake_zone('conflict')
b1 = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True,
STOP_UPDATE_FILE=self.stop_update)
b1.build_dns() # This checked stuff in
# Check the repo out somewhere else
command_str = "svn co file://{0} {1}".format(
self.svn_repo, self.prod_dir2
)
b1.shell_out(command_str)
# Calculate the path to the zone file so we can tamper with it.
fm = b1.get_file_meta(
View.objects.get(name='public'), root_domain,
root_domain.soa
)
# Make local changes
fname = fm['prod_fname'].replace(self.prod_dir, self.prod_dir2)
with open(fname, 'a') as fd:
fd.write(";foobar")
# Check those changes in.
b1.PROD_DIR = self.prod_dir2 # Cheat and swap the dirs
b1.vcs_checkin()
b1.PROD_DIR = self.prod_dir # Fix our little cheat
b1.FORCE_BUILD = True # Force a build
# Add something to the end of the file to cause a collision
a = AddressRecord.objects.create(
label="zeenada", domain=root_domain, ip_type='4',
ip_str='255.0.0.0'
)
a.views.add(View.objects.get(name='public'))
# We should have conflicts here. See if we detect it by
# counting how many tasks need to be serviced. If the number remains
# the same that means we aborted the build due to a conflict
pre_task_count = Task.objects.all().count()
b1.build_dns()
post_task_count = Task.objects.all().count()
self.assertEqual(pre_task_count, post_task_count)
# Conflicts should be resolved. Let's see if we build successfully
pre_task_count = Task.objects.all().count()
b1.build_dns()
post_task_count = Task.objects.all().count()
self.assertTrue(pre_task_count != 0)
self.assertEqual(0, post_task_count)
def test_orphan_soa(self):
SOA.objects.create(
primary='foo.foo', contact='foo.foo', description='SOA for testing'
)
b1 = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True,
STOP_UPDATE_FILE=self.stop_update)
b1.build_dns()
def svn_info(self):
command_str = "svn info {0}".format(self.prod_dir)
rets = subprocess.Popen(shlex.split(command_str),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = rets.communicate()
self.assertEqual(0, rets.returncode)
def test_build_svn(self):
print "This will take a while, be patient..."
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True)
b.build_dns()
#self.svn_info()
s = SOA.objects.all()
if len(s) > 0:
s[0].dirty = True
s[0].save()
b.build_dns()
#self.svn_info()
b.build_dns()
#self.svn_info()
def test_svn_lines_changed(self):
pass
def test_build_staging(self):
if os.path.isdir(self.stage_dir):
shutil.rmtree(self.stage_dir)
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file)
b.build_staging()
# Make sure it made the staging dir
self.assertTrue(os.path.isdir(self.stage_dir))
# Ensure if fails if the directory exists
self.assertRaises(BuildError, b.build_staging)
# There shouldn't be errors because force=True
b.build_staging(force=True)
self.assertTrue(os.path.isdir(self.stage_dir))
b.clear_staging()
self.assertFalse(os.path.isdir(self.stage_dir))
self.assertRaises(BuildError, b.clear_staging)
b.clear_staging(force=True)
self.assertFalse(os.path.isdir(self.stage_dir))
def test_lock_unlock(self):
if os.path.exists(self.lock_file):
os.remove(self.lock_file)
b1 = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file)
b2 = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file)
b3 = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file)
self.assertFalse(os.path.exists(self.lock_file))
self.assertTrue(b1.lock())
self.assertTrue(os.path.exists(self.lock_file))
self.assertTrue(b1.unlock())
self.assertTrue(b1.lock())
self.assertFalse(b2.lock())
self.assertFalse(b2.lock())
self.assertTrue(b1.unlock())
self.assertTrue(b2.lock())
self.assertFalse(b1.lock())
self.assertTrue(b2.unlock())
self.assertTrue(b3.lock())
self.assertFalse(b1.lock())
self.assertFalse(b2.lock())
self.assertFalse(b1.unlock())
self.assertFalse(b2.unlock())
self.assertTrue(b3.unlock())
self.assertTrue(b1.lock())
self.assertTrue(b1.unlock())
def test_stop_update(self):
if os.path.exists(self.stop_update):
os.remove(self.stop_update)
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file,
STOP_UPDATE_FILE=self.stop_update)
open(self.stop_update, 'w+').close()
try:
self.assertTrue(b.stop_update_exists())
finally:
os.remove(self.stop_update)
def test_sanity_checks(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file,
STOP_UPDATE_FILE=self.stop_update)
with open(self.re_test_file, 'w+') as fd:
fd.write("mozilla.com. IN A 172.16.31.10")
SANITY_CHECKS = [
(self.re_test_file, (
(r'^mozilla\.com\.\s+(\d+\s+)?IN\s+A\s+'),
)),
]
b.re_sanity_check(SANITY_CHECKS)
with open(self.re_test_file, 'w+') as fd:
fd.write("foo.com. IN A 172.16.31.10")
self.assertRaises(BuildError, b.re_sanity_check, SANITY_CHECKS)
```
#### File: mozbind/tests/dirty_soa.py
```python
from django.test import TestCase
from mozdns.soa.models import SOA
from mozdns.srv.models import SRV
from mozdns.txt.models import TXT
from mozdns.ptr.models import PTR
from mozdns.mx.models import MX
from mozdns.cname.models import CNAME
from mozdns.address_record.models import AddressRecord
from mozdns.nameserver.models import Nameserver
from mozdns.tests.utils import create_fake_zone
from mozdns.delete_zone.views import delete_zone_helper
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
from core.task.models import Task
class AddRemoveSOATests(TestCase):
def test_new_zone(self):
self.assertFalse(Task.dns_incremental.all())
self.assertFalse(Task.dns_full.all())
root_domain = create_fake_zone("asdfasd.mozilla.com", suffix="")
self.assertEqual(1, Task.dns_full.all().count())
Task.dns_full.all().delete()
domain_name = root_domain.name
delete_zone_helper(domain_name)
self.assertEqual(1, Task.dns_full.all().count())
class DirtySOATests(TestCase):
def setUp(self):
self.r1 = create_fake_zone("10.in-addr.arpa", suffix="")
self.sr = self.r1.soa
self.sr.dirty = False
self.sr.save()
self.dom = create_fake_zone("bgaz", suffix="")
self.soa = self.dom.soa
self.soa.dirty = False
self.soa.save()
self.rdom = create_fake_zone("123.in-addr.arpa", suffix="")
self.rsoa = self.r1.soa
self.rsoa.dirty = False
self.rsoa.save()
self.s = create_fake_host(hostname="foo.mozilla.com")
self.s.save()
Task.dns_full.all().delete()
def test_print_soa(self):
self.assertTrue(self.soa.bind_render_record() not in ('', None))
self.assertTrue(self.rsoa.bind_render_record() not in ('', None))
def generic_dirty(self, Klass, create_data, update_data, local_soa,
tdiff=1, full=False):
Task.dns_incremental.all().delete() # Delete all tasks
local_soa.dirty = False
local_soa.save()
rec = Klass(**create_data)
rec.full_clean()
rec.save()
self.assertTrue(rec.bind_render_record() not in ('', None))
local_soa = SOA.objects.get(pk=local_soa.pk)
self.assertTrue(local_soa.dirty)
self.assertEqual(tdiff, Task.dns_incremental.all().count())
if full:
self.assertTrue(Task.dns_full.all().count())
else:
self.assertFalse(Task.dns_full.all().count())
# Now try updating
Task.dns_incremental.all().delete() # Delete all tasks
Task.dns_full.all().delete() # Delete all tasks
local_soa.dirty = False
local_soa.save()
local_soa = SOA.objects.get(pk=local_soa.pk)
self.assertFalse(local_soa.dirty)
for k, v in update_data.iteritems():
setattr(rec, k, v)
rec.save()
local_soa = SOA.objects.get(pk=local_soa.pk)
self.assertTrue(local_soa.dirty)
self.assertEqual(tdiff, Task.dns_incremental.all().count())
if full:
self.assertTrue(Task.dns_full.all().count())
else:
self.assertFalse(Task.dns_full.all().count())
# Now delete
Task.dns_incremental.all().delete() # Delete all tasks
Task.dns_full.all().delete() # Delete all tasks
local_soa.dirty = False
local_soa.save()
local_soa = SOA.objects.get(pk=local_soa.pk)
self.assertFalse(local_soa.dirty)
rec.delete()
local_soa = SOA.objects.get(pk=local_soa.pk)
self.assertTrue(local_soa.dirty)
self.assertEqual(tdiff, Task.dns_incremental.all().count())
if full:
self.assertTrue(Task.dns_full.all().count())
else:
self.assertFalse(Task.dns_full.all().count())
def test_dirty_a(self):
create_data = {
'label': 'asdf',
'domain': self.dom,
'ip_str': '10.2.3.1',
'ip_type': '4'
}
update_data = {
'label': 'asdfx',
}
self.generic_dirty(AddressRecord, create_data, update_data, self.soa)
def test_dirty_sreg(self):
create_data = {
'label': 'asdf1',
'domain': self.dom,
'ip_str': '10.2.3.1',
'ip_type': '4',
'system': self.s,
}
update_data = {
'label': 'asdfx1',
}
self.generic_dirty(
StaticReg, create_data, update_data, self.soa, tdiff=2
)
def test_dirty_cname(self):
create_data = {
'label': 'asdf2',
'domain': self.dom,
'target': 'foo.bar.com',
}
update_data = {
'label': 'asdfx2',
}
self.generic_dirty(CNAME, create_data, update_data, self.soa)
def test_dirty_ptr(self):
create_data = {
'ip_str': '10.2.3.4',
'ip_type': '4',
'name': 'foo.bar.com',
}
update_data = {
'label': 'asdfx2',
}
self.generic_dirty(PTR, create_data, update_data, local_soa=self.sr)
def test_dirty_mx(self):
create_data = {
'label': '',
'domain': self.dom,
'priority': 10,
'server': 'foo.bar.com',
}
update_data = {
'label': 'asdfx3',
}
self.generic_dirty(MX, create_data, update_data, self.soa)
def test_dirty_ns(self):
create_data = {
'domain': self.dom,
'server': 'foo.bar.com',
}
update_data = {
'label': 'asdfx4',
}
# We expect nameserver changes to trigger a full rebuild
self.generic_dirty(
Nameserver, create_data, update_data, self.soa, full=True
)
def test_dirty_soa(self):
self.soa.dirty = False
self.soa.refresh = 123
self.soa.save()
self.assertTrue(self.soa.dirty)
def test_dirty_srv(self):
create_data = {
'label': '_asdf7',
'domain': self.dom,
'priority': 10,
'port': 10,
'weight': 10,
'target': 'foo.bar.com',
}
update_data = {
'label': '_asdfx4',
}
self.generic_dirty(SRV, create_data, update_data, self.soa)
def test_dirty_txt(self):
create_data = {
'label': 'asdf8',
'domain': self.dom,
'txt_data': 'some shit',
}
update_data = {
'label': 'asdfx5',
}
self.generic_dirty(TXT, create_data, update_data, self.soa)
```
#### File: mozdns/mozbind/zone_builder.py
```python
from django.db.models import Q
from mozdns.address_record.models import AddressRecord
from mozdns.cname.models import CNAME
from mozdns.mx.models import MX
from mozdns.nameserver.models import Nameserver
from mozdns.ptr.models import PTR
from mozdns.srv.models import SRV
from mozdns.txt.models import TXT
from mozdns.sshfp.models import SSHFP
from mozdns.view.models import View
from core.registration.static.models import StaticReg
from gettext import gettext as _
from core.utils import fail_mail
DEFAULT_TTL = 3600
def render_soa_only(soa, root_domain):
params = {
'ttl': soa.ttl,
'root_domain': root_domain.name,
'primary': soa.primary,
'contact': soa.contact,
'refresh': str(soa.refresh),
'retry': str(soa.retry),
'expire': str(soa.expire),
'minimum': soa.minimum
}
BUILD_STR = _("$TTL {minimum}\n"
"{root_domain}. {ttl} IN SOA {primary}. {contact}. (\n"
"\t\t{{serial}} ; Serial\n"
"\t\t{refresh} ; Refresh\n"
"\t\t{retry} ; Retry\n"
"\t\t{expire} ; Expire\n"
"\t\t{minimum} ; Minimum\n"
")\n\n".format(**params))
return BUILD_STR
def render_rdtype(rdtype_set, **kwargs):
BUILD_STR = ""
for obj in rdtype_set:
BUILD_STR += _(obj.bind_render_record(**kwargs) + "\n")
return BUILD_STR
def _render_forward_zone(default_ttl, nameserver_set, mx_set,
addressrecord_set, interface_set, cname_set, srv_set,
txt_set, sshfp_set):
BUILD_STR = ""
BUILD_STR += render_rdtype(nameserver_set)
BUILD_STR += render_rdtype(mx_set)
BUILD_STR += render_rdtype(txt_set)
BUILD_STR += render_rdtype(sshfp_set)
BUILD_STR += render_rdtype(srv_set)
BUILD_STR += render_rdtype(cname_set)
BUILD_STR += render_rdtype(interface_set, rdtype='A/AAAA')
BUILD_STR += render_rdtype(addressrecord_set)
return BUILD_STR
def render_forward_zone(view, mega_filter):
data = _render_forward_zone(
default_ttl=DEFAULT_TTL,
nameserver_set=Nameserver.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('server'),
mx_set=MX.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('id'),
addressrecord_set=AddressRecord.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('pk', 'ip_type', 'fqdn', 'ip_upper', 'ip_lower'),
interface_set=StaticReg.objects.
filter(decommissioned=False).
filter(mega_filter).
filter(views__name=view.name).
order_by('pk', 'ip_type', 'fqdn', 'ip_upper', 'ip_lower'),
cname_set=CNAME.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('fqdn'),
srv_set=SRV.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('pk', 'fqdn'),
txt_set=TXT.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('pk', 'fqdn'),
sshfp_set=SSHFP.objects.
filter(mega_filter).
filter(views__name=view.name).
order_by('pk', 'fqdn'),
)
return data
def _render_reverse_zone(default_ttl, nameserver_set, mx_set, interface_set,
ptr_set):
BUILD_STR = ''
BUILD_STR += render_rdtype(nameserver_set)
BUILD_STR += render_rdtype(mx_set)
BUILD_STR += render_rdtype(ptr_set)
BUILD_STR += render_rdtype(interface_set, reverse=True, rdtype='PTR')
return BUILD_STR
def render_reverse_zone(view, domain_mega_filter, rdomain_mega_filter):
data = _render_reverse_zone(
default_ttl=DEFAULT_TTL,
nameserver_set=Nameserver.objects.
filter(domain_mega_filter).
filter(views__name=view.name).
order_by('server'),
mx_set=MX.objects.
filter(domain_mega_filter).
filter(views__name=view.name).order_by('id'),
interface_set=StaticReg.objects.
filter(decommissioned=False).
filter(rdomain_mega_filter).
filter(views__name=view.name).
order_by('pk', 'ip_type', 'label', 'ip_upper', 'ip_lower'),
ptr_set=PTR.objects.
filter(rdomain_mega_filter).
filter(views__name=view.name).
order_by('pk', 'ip_upper', 'ip_lower'),
)
return data
def build_zone_data(view, root_domain, soa, logf=None):
"""
This function does the heavy lifting of building a zone. It coordinates
getting all of the data out of the db into BIND format.
:param soa: The SOA corresponding to the zone being built.
:type soa: SOA
:param root_domain: The root domain of this zone.
:type root_domain: str
"""
ztype = 'reverse' if root_domain.is_reverse else 'forward'
if (soa.has_record_set(view=view, exclude_ns=True) and
not root_domain.nameserver_set.filter(views=view).exists()):
msg = ("The {0} zone has a records in the {1} view, but there are "
"no nameservers in that view. Use the search string 'zone=:{0} "
"view=:{1}' to find the troublesome records".format(
root_domain, view.name)
)
fail_mail(msg, subject="Shitty edge case detected.")
logf('LOG_WARNING', msg)
domains = soa.domain_set.all().order_by('name')
# Bulid the mega filter!
domain_mega_filter = Q(domain=root_domain)
for domain in domains:
domain_mega_filter = domain_mega_filter | Q(domain=domain)
rdomain_mega_filter = Q(reverse_domain=root_domain)
for reverse_domain in domains:
rdomain_mega_filter = rdomain_mega_filter | Q(
reverse_domain=reverse_domain)
soa_data = render_soa_only(soa=soa, root_domain=root_domain)
try:
if ztype == "forward":
view_data = render_forward_zone(view, domain_mega_filter)
else:
view_data = render_reverse_zone(view, domain_mega_filter,
rdomain_mega_filter)
except View.DoesNotExist:
view_data = ""
if view_data:
view_data = soa_data + view_data
return view_data
```
#### File: mozdns/mx/search_indexes.py
```python
from haystack import indexes
from mozdns.mx.models import MX
from mozdns.mozdns_index import MozdnsIndex
class MXIndex(MozdnsIndex, indexes.Indexable):
server = indexes.CharField(model_attr='server')
def get_model(self):
return MX
```
#### File: mozdns/ptr/forms.py
```python
from django import forms
from mozdns.ptr.models import PTR
from mozdns.forms import BaseForm
class PTRForm(BaseForm):
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
include = ('name', 'ip_str', 'ip_type', 'ttl', 'views', 'description')
widgets = {'views': forms.CheckboxSelectMultiple}
```
#### File: mozdns/ptr/views.py
```python
from django.core.exceptions import ObjectDoesNotExist
from mozdns.views import MozdnsCreateView
from mozdns.views import MozdnsDeleteView
from mozdns.views import MozdnsDetailView
from mozdns.views import MozdnsListView
from mozdns.views import MozdnsUpdateView
from mozdns.ptr.forms import PTRForm
from mozdns.ptr.models import PTR
from mozdns.domain.models import Domain
from core.network.utils import calc_parent_str
class PTRView(object):
model = PTR
form_class = PTRForm
queryset = PTR.objects.all()
class PTRDeleteView(PTRView, MozdnsDeleteView):
""" """
class PTRDetailView(PTRView, MozdnsDetailView):
""" """
template_name = "ptr/ptr_detail.html"
class PTRCreateView(PTRView, MozdnsCreateView):
def get_form(self, *args, **kwargs):
initial = self.get_form_kwargs()
if 'ip_type' in self.request.GET and 'ip_str' in self.request.GET:
ip_str = self.request.GET['ip_str']
ip_type = self.request.GET['ip_type']
network = calc_parent_str(ip_str, ip_type)
if network and network.vlan and network.site:
expected_name = "{0}.{1}.mozilla.com".format(
network.vlan.name, network.site.get_site_path())
try:
domain = Domain.objects.get(name=expected_name)
except ObjectDoesNotExist:
domain = None
if domain:
initial['initial'] = {'ip_str': ip_str,
'name': "." + domain.name,
'ip_type': ip_type}
else:
initial['initial'] = {'ip_str': ip_str, 'ip_type': ip_type}
return PTRForm(**initial)
class PTRUpdateView(PTRView, MozdnsUpdateView):
""" """
class PTRListView(PTRView, MozdnsListView):
""" """
```
#### File: soa/tests/increment.py
```python
from django.test import TestCase
from mozdns.soa.models import SOA
import datetime
class SOAIncrementTests(TestCase):
def to_date(self, date):
year, month, day = map(lambda s: int(s), (
date[:4], date[4:6], date[6:]
))
return datetime.date(year, month, day)
def test_serial_equal_date(self):
# Case ==
serial = '2111111111'
date = self.to_date('21111111')
new_serial = SOA.calc_serial(serial, date)
self.assertEqual(int(serial) + 1, new_serial)
def test_serial_less_than_date(self):
# Case serial < date
serial = '2111101111'
date_str = '21111111'
date = self.to_date(date_str)
new_serial = SOA.calc_serial(serial, date)
self.assertEqual(int(date_str + '00'), new_serial)
def test_serial_greater_than_date(self):
# Case serial > date
serial = '2111111111'
date = self.to_date('21111011')
new_serial = SOA.calc_serial(serial, date)
self.assertEqual(int(serial) + 1, new_serial)
def test_incremented_serial(self):
soa = SOA.objects.create(
description="foobar baz", contact='fooba.mozilla.com',
primary='ns1.mozilla.com')
old_serial = soa.serial
new_serial = soa.get_incremented_serial()
self.assertEqual(old_serial + 1, new_serial)
def test_day_greater_than_month_max_serial(self):
# If the we start the day at YYYYMMDD00 and we have so many changes
# that we end up with a DD > 31 (worst case) or MM > 12, we will have
# issues incrementing the date because the data won't be parsed by
# datetime.date
soa = SOA.objects.create(
description="foobar baz", contact='<EMAIL>',
primary='ns1.mozilla.com', serial='2012083200')
new_serial = soa.get_incremented_serial()
correct_serial = int(datetime.datetime.now().strftime('%Y%m%d00'))
self.assertEqual(correct_serial, new_serial)
```
#### File: inventory/mozdns/super_views.py
```python
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from mozdns.address_record.models import AddressRecord
from mozdns.ptr.models import PTR
from mozdns.domain.models import Domain
import simplejson as json
import pdb
def mozdns_home(request):
domains = Domain.objects.filter(
is_reverse=False).order_by('name').order_by('soa__description')
return render(request, 'mozdns/mozdns.html', {
'domains': domains,
})
def commit_record(request):
pdb.set_trace()
commit_data = json.loads(request.raw_post_data)
record_type = commit_data.pop("rtype", None)
if not record_type:
commit_data["errors"] = {"__all__": "No record type."}
return HttpResponse(commit_data)
if record_type == "A":
commit_data = add_ip_type_to_commit(commit_data)
commit_data = add_domain_to_commit(commit_data)
Klass = AddressRecord
elif record_type == "PTR":
commit_data = add_ip_type_to_commit(commit_data)
Klass = PTR
try:
obj = Klass(**commit_data)
except ValueError, e:
commit_data["errors"] = e.message_dict
return return_without_domain(commit_data)
try:
obj.full_clean()
except ValidationError, e:
commit_data["errors"] = e.message_dict
print commit_data
return return_without_domain(commit_data)
try:
obj.save()
except ValidationError, e:
commit_data["errors"] = e.message_dict
return return_without_domain(commit_data)
commit_data['success'] = obj.get_absolute_url()
return return_without_domain(commit_data)
def return_without_domain(commit_data):
if "domain" in commit_data:
commit_data["domain"] = commit_data["domain"].name
return_data = json.dumps(commit_data)
return HttpResponse(return_data)
def add_ip_type_to_commit(commit_data):
# Let's guess the IP type. ':' means IPv6
ip_str = commit_data.get("ip_str", "")
if ip_str.find(':') > -1:
commit_data["ip_type"] = '6'
else:
commit_data["ip_type"] = '4'
return commit_data
def add_domain_to_commit(commit_data):
commit_data["domain"] = get_object_or_404(Domain,
name=commit_data["domain"])
return commit_data
```
#### File: mozdns/txt/tests.py
```python
from django.test import TestCase
from django.core.exceptions import ValidationError
from mozdns.txt.models import TXT
from mozdns.domain.models import Domain
class TXTTests(TestCase):
def setUp(self):
self.o = Domain(name="org")
self.o.save()
self.o_e = Domain(name="oregonstate.org")
self.o_e.save()
def do_generic_add(self, data):
txt = TXT(**data)
txt.__repr__()
txt.save()
self.assertTrue(txt.details())
self.assertTrue(txt.get_absolute_url())
self.assertTrue(txt.get_edit_url())
self.assertTrue(txt.get_delete_url())
rtxt = TXT.objects.filter(**data)
self.assertTrue(len(rtxt) == 1)
return txt
def do_remove(self, data):
txt = self.do_generic_add(data)
txt.delete()
rmx = TXT.objects.filter(**data)
self.assertTrue(len(rmx) == 0)
def test_add_remove_txt(self):
label = "asdf"
data = "asdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "asdf"
data = "asdfasfd"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "df"
data = "aasdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "12314"
data = "dd"
data = {'label': label, 'txt_data': data, 'domain': self.o}
self.do_generic_add(data)
def test_bad_data(self):
label = "asdf"
data = '"dfa f'
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.assertRaises(ValidationError, self.do_generic_add, data)
```
#### File: inventory/MozInvAuthorization/BaseACL.py
```python
from django.core.exceptions import PermissionDenied
class BaseACL(object):
request = None
user = None
def __init__(self, request):
self.request = request
if request.user.username and request.user.username != '':
self.user = request.user.username
else:
self.user = request.META['REMOTE_USER']
def check_create(self, allowed = None):
pass
def check_read(self, allowed = None):
pass
def check_update(self, allowed = None):
pass
def check_delete(self, allowed = None):
pass
"""
check_for_permission currently just looks at a setting var
main purpose for existance is to allow easy extension to look for group membership via ldap
"""
def check_for_permission(self, user, acl_list):
if user is None or user == '' or user not in acl_list:
raise PermissionDenied('You do not have permission to delete this license.')
```
#### File: inventory/MozInvAuthorization/UnmanagedSystemACL.py
```python
from django.core.exceptions import PermissionDenied
from MozInvAuthorization.BaseACL import BaseACL
from settings import USER_SYSTEM_ALLOWED_DELETE
class UnmanagedSystemACL(BaseACL):
def __init__(self, request):
self.request = request
if request.user.username and request.user.username != '':
self.user = self.request.user.username
else:
self.user = self.request.META['REMOTE_USER']
def check_delete(self, allowed = None):
if allowed:
allowed = allowed
else:
allowed = USER_SYSTEM_ALLOWED_DELETE
self.check_for_permission(self.user, allowed)
```
#### File: inventory/oncall/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from oncall.models import OncallAssignment
from oncall.forms import OncallForm
from oncall.constants import ONCALL_TYPES
import simplejson as json
def getoncall(request, oncall_type):
"""
Returns information about who is oncall. Oncall types include 'desktop',
'sysadmin', and 'services'.
Use ?format=<format> to determine the format of the response.
Format 'json':
{
"irc_nic": <IRC nick>,
"ldap_username": <Username>,
"pager_type": <Pager type>,
"pager_number": <Pager number>,
"epager_address": <Epager address>
}
Format 'delimited':
<IRC nick>:<Username>:<Pager type>:<Pager number>:<Epager address>
Format 'meta':
The field names returned by 'delimited'
You can use the 'meta' format like you would use the first line of a CSV to
determine what fields are being returned by 'delimited'.
"""
if oncall_type not in ONCALL_TYPES:
return HttpResponse('nobody')
profile = OncallAssignment.objects.get(
oncall_type=oncall_type
).user.get_profile()
format = request.GET.get('format', 'basic')
if format == 'basic':
response = profile.irc_nick
elif format in ('json', 'delimited', 'meta'):
attrs = (
("irc_nic", profile.irc_nick or ''),
("ldap_username", profile.user.username or ''),
("pager_type", profile.pager_type or ''),
("pager_number", profile.pager_number or ''),
("epager_address", profile.epager_address or '')
)
if format == 'json':
response = json.dumps(dict(attrs))
elif format == 'delimited':
response = ':'.join([el[1] for el in attrs])
elif format == 'meta':
response = ':'.join([el[0] for el in attrs])
return HttpResponse(response)
def oncall(request):
changes = []
if request.method == 'POST':
form = OncallForm(request.POST)
if form.is_valid():
changes = form.save()
else:
return render(
request,
'oncall/oncall_form.html',
{'form': form, 'changes': changes}
)
initial = {}
for onct in ONCALL_TYPES:
try:
cur = OncallAssignment.objects.get(oncall_type=onct)
cur_onc_name = cur.user.username
except OncallAssignment.DoesNotExist:
cur_onc_name = ''
initial[onct] = cur_onc_name
form = OncallForm(initial=initial)
return render(
request,
'oncall/oncall_form.html',
{'form': form, 'changes': changes}
)
```
#### File: inventory/reports/forms.py
```python
from django import forms
from django.forms.extras.widgets import SelectDateWidget
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
import models
from django.forms.widgets import RadioSelect, CheckboxSelectMultiple
from systems.models import Allocation, SystemStatus, OperatingSystem
from core.site.models import Site
class MultiSelectFormField(forms.MultipleChoiceField):
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super(MultiSelectFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value and self.required:
raise forms.ValidationError(self.error_messages['required'])
if value and self.max_choices and len(value) > self.max_choices:
raise forms.ValidationError('You must select a maximum of %s choice%s.'
% (apnumber(self.max_choices), pluralize(self.max_choices)))
return value
class ReportForm(forms.Form):
system_type = MultiSelectFormField(
required=True,
choices=[
('SYSTEM', 'SYSTEM'),
#('UNMANAGED', 'UNMANAGED'),
] )
output = forms.ChoiceField(
required=False,
choices=[
('SCREEN', 'SCREEN'),
('CSV', 'CSV'),
] )
system_status = forms.MultipleChoiceField(
required=False,
widget=CheckboxSelectMultiple(attrs={'class': 'system_status'}),
choices=[('-1', 'All')] + [(m.id, m) for m in SystemStatus.objects.all()])
site = forms.MultipleChoiceField(
required=False,
widget=CheckboxSelectMultiple(attrs={'class': 'system_site'}),
choices=[('-1', 'All')] + [(m.id, m) for m in Site.objects.all()])
allocation = forms.ChoiceField(
required=False,
choices=[('', 'All')] + [(m.id, m)
for m in Allocation.objects.all()])
operating_system = forms.CharField(
max_length=72,
required = False
)
server_models = forms.CharField(
max_length=72,
required = False
)
```
#### File: inventory/reversion_compare/helpers.py
```python
import difflib
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
try:
# http://code.google.com/p/google-diff-match-patch/
from diff_match_patch import diff_match_patch
except ImportError:
google_diff_match_patch = False
else:
google_diff_match_patch = True
dmp = diff_match_patch()
#google_diff_match_patch = False # manually disable, for testing
def highlight_diff(diff_text):
"""
Simple highlight a diff text in the way pygments do it ;)
"""
html = ['<pre class="highlight">']
for line in diff_text.splitlines():
line = escape(line)
if line.startswith("+"):
line = '<ins>%s</ins>' % line
elif line.startswith("-"):
line = '<del>%s</del>' % line
html.append(line)
html.append("</pre>")
html = "\n".join(html)
return html
SEMANTIC = 1
EFFICIENCY = 2
# Change from ndiff to unified_diff if old/new values are more than X lines:
LINE_COUNT_4_UNIFIED_DIFF = 4
def format_range(start, stop):
"""
Convert range to the "ed" format
difflib._format_range_unified() is new in python 2.7
see also: https://github.com/jedie/django-reversion-compare/issues/5
"""
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{0}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{0},{1}'.format(beginning, length)
def unified_diff(a, b, n=3, lineterm='\n'):
r"""
simmilar to the original difflib.unified_diff except:
- no fromfile/tofile and no fromfiledate/tofiledate info lines
- newline before diff control lines and not after
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), lineterm=''):
... print line # doctest: +NORMALIZE_WHITESPACE
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in difflib.SequenceMatcher(None, a, b).get_grouped_opcodes(n):
first, last = group[0], group[-1]
file1_range = format_range(first[1], last[2])
file2_range = format_range(first[3], last[4])
if not started:
started = True
yield '@@ -{0} +{1} @@'.format(file1_range, file2_range)
else:
yield '{0}@@ -{1} +{2} @@'.format(lineterm, file1_range, file2_range)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in ('replace', 'delete'):
for line in a[i1:i2]:
yield '-' + line
if tag in ('replace', 'insert'):
for line in b[j1:j2]:
yield '+' + line
def html_diff(value1, value2, cleanup=SEMANTIC):
"""
Generates a diff used google-diff-match-patch is exist or ndiff as fallback
The cleanup parameter can be SEMANTIC, EFFICIENCY or None to clean up the diff
for greater human readibility.
"""
value1 = force_unicode(value1)
value2 = force_unicode(value2)
if google_diff_match_patch:
# Generate the diff with google-diff-match-patch
diff = dmp.diff_main(value1, value2)
if cleanup == SEMANTIC:
dmp.diff_cleanupSemantic(diff)
elif cleanup == EFFICIENCY:
dmp.diff_cleanupEfficiency(diff)
elif cleanup is not None:
raise ValueError("cleanup parameter should be one of SEMANTIC, EFFICIENCY or None.")
html = dmp.diff_prettyHtml(diff)
html = html.replace("¶<br>", "</br>") # IMHO mark paragraphs are needlessly
else:
# fallback: use built-in difflib
value1 = value1.splitlines()
value2 = value2.splitlines()
if len(value1) > LINE_COUNT_4_UNIFIED_DIFF or len(value2) > LINE_COUNT_4_UNIFIED_DIFF:
diff = unified_diff(value1, value2, n=2)
else:
diff = difflib.ndiff(value1, value2)
diff_text = "\n".join(diff)
html = highlight_diff(diff_text)
html = mark_safe(html)
return html
def compare_queryset(first, second):
"""
Simple compare two querysets (used for many-to-many field compare)
XXX: resort results?
"""
result = []
for item in set(first).union(set(second)):
if item not in first: # item was inserted
item.insert = True
elif item not in second: # item was deleted
item.delete = True
result.append(item)
return result
if __name__ == "__main__":
import doctest
print doctest.testmod(
# verbose=True
verbose=False
)
```
#### File: inventory/reversion_compare/__init__.py
```python
import os
import time
import warnings
import subprocess
__version__ = (0, 3, 4)
VERSION_STRING = '.'.join(str(part) for part in __version__)
#VERBOSE = True
VERBOSE = False
def _error(msg):
if VERBOSE:
warnings.warn(msg)
return ""
def get_commit_timestamp(path=None):
if path is None:
path = os.path.abspath(os.path.dirname(__file__))
try:
process = subprocess.Popen(
# %ct: committer date, UNIX timestamp
["/usr/bin/git", "log", "--pretty=format:%ct", "-1", "HEAD"],
shell=False, cwd=path,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
except Exception, err:
return _error("Can't get git hash: %s" % err)
process.wait()
returncode = process.returncode
if returncode != 0:
return _error(
"Can't get git hash, returncode was: %r"
" - git stdout: %r"
" - git stderr: %r"
% (returncode, process.stdout.readline(), process.stderr.readline())
)
output = process.stdout.readline().strip()
try:
timestamp = int(output)
except Exception, err:
return _error("git log output is not a number, output was: %r" % output)
try:
return time.strftime(".%m%d", time.gmtime(timestamp))
except Exception, err:
return _error("can't convert %r to time string: %s" % (timestamp, err))
VERSION_STRING += get_commit_timestamp()
if __name__ == "__main__":
print VERSION_STRING
```
#### File: inventory/scripts/decomm_scl1.py
```python
__import__('inventory_context')
from django.db.models import Q
from systems.models import System
from systems.models import KeyValue
from systems.models import SystemStatus
from core.search.compiler.django_compile import compile_to_django
decomm = SystemStatus.objects.get(status='decommissioned')
system_map = [False] * System.objects.all().order_by('-pk')[0].pk
def mark_systems(search, key, value, extra=None):
print ""
print "-----------------------------------"
print "Systems getting Key: {0} Value: {1}".format(key, value)
systems = compile_to_django(search)[0]['SYS'].filter(
~Q(system_status=decomm)
)
for s in systems:
if extra:
extra(s, key, value)
else:
print s.hostname
system_map[s.pk] = True
KeyValue.objects.get_or_create(obj=s, key=key, value=value)
search_decomm = "(talos-r3 OR linux64-ix-slave OR linux-ix-slave OR releng-puppet OR buildbot-master) AND scl1 AND type=:sys" # noqa
mark_systems(search_decomm, 'decomm.scl1', 'decommission')
search_migrate = "(talos-r4 OR EDID OR panda OR foopy OR mobile-imaging) AND scl1 AND type=:sys" # noqa
mark_systems(search_migrate, 'decomm.scl1', 'migrate')
search_ = "(bld-centos6-hp OR bld-linux64-ix OR w64-ix) AND scl1 AND type=:sys" # noqa
mark_systems(search_, 'decomm.scl1', 'replace')
search_ = "/scl1.mozilla.(com|net|org) type=:sys" # noqa
def add_tbd(system, key, value):
if system_map[system.pk]:
return
print system.hostname
system_map[system.pk] = True # redundant
KeyValue.objects.get_or_create(obj=system, key=key, value=value)
mark_systems(search_, 'decomm.scl1', 'tbd', extra=add_tbd)
```
#### File: inventory/scripts/output_reverse_dns_zones.py
```python
import sys
import os
try:
import json
except:
from django.utils import simplejson as json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.base'
from django.test.client import Client
def main():
client = Client()
reverse_dns_zones = []
reverse_dns_zones = json.loads(client.get('/api/v2/reverse_dns/1/get_reverse_dns_zones/').content)
output_dir = "/etc/dnsconfig-autodeploy";
for dns_zone in reverse_dns_zones:
dir = dns_zone.split("-")[0]
output_file = '-'.join(dns_zone.split("-")[1:])
final_destination_file = "%s/%s/%s" % (output_dir,dir, output_file)
output_text = client.get('/api/v2/reverse_dns/%s/view_hosts/' % dns_zone).content
output_text = output_text[1:-1]
#f = open(final_destination_file,"w")
output_text = output_text.replace("\\n","\n")
output_text = output_text.replace('\\"','"')
#print output_text
#f.write(output_text)
#f.close()
#if len(reverse_dns_zones) > 0:
# os.chdir(output_dir)
# os.system('/usr/bin/svn update')
# os.system('/usr/bin/svn add * --force')
# os.system('/usr/bin/svn commit -m "Autogenerated addition from inventory"')
#os.system('/usr/bin/git push origin master')
if __name__ == '__main__':
main()
```
#### File: inventory/scripts/set_3600_ttl_to_None.py
```python
from inventory_context import *
from django.db.models import Q
from core.search.compiler.invfilter import searchables
from mozdns.soa.models import SOA
def fix_ttl_for_rdtype(manager):
manager.objects.filter(Q(ttl=3600)).update(ttl=None)
manager.objects.filter(Q(ttl=0)).update(ttl=None)
rdtypes = ('A', 'CNAME', 'NS', 'TXT', 'SRV', 'PTR', 'SRV', 'MX', 'SSHFP')
for s_rdtype, manager in searchables:
if s_rdtype in rdtypes:
fix_ttl_for_rdtype(manager)
for s in SOA.objects.all():
s.minimum = 3600
s.save()
```
#### File: inventory/slurpee/tests.py
```python
from django.test import Client, TestCase
from slurpee.models import ExternalData
from slurpee.constants import P_OVERLAY
from systems.tests.utils import create_fake_host
import simplejson as json
class ExternalDataTests(TestCase):
def setUp(self):
serial = 'asdf'
self.external_serial = serial + 'asdf'
self.s = create_fake_host(
hostname='fakehost.mozilla.com', serial=serial
)
ExternalData.objects.create(
system=self.s,
name='serial',
source_name='serial',
data=self.external_serial, # conflict data
source='foo-source',
policy=P_OVERLAY
)
self.c = Client()
def test_conflicts_page(self):
"""Animals that can speak are correctly identified"""
resp = self.c.get(
"/slurpee/conflicts/?search={0}".format(self.s.hostname),
follow=True
)
self.assertEqual(200, resp.status_code)
def test_sync(self):
"""Animals that can speak are correctly identified"""
resp = self.c.post("/en-US/systems/sync_external_data/", {
'attr': 'serial',
'source': 'foo-source',
'system_pk': self.s.pk
})
self.assertEqual(200, resp.status_code, json.loads(resp.content))
# Refresh the object cache
s = self.s.__class__.objects.get(pk=self.s.pk)
self.assertEqual(self.external_serial, s.serial)
```
#### File: inventory/slurpee/views.py
```python
import MySQLdb
from django.shortcuts import render
from core.search.compiler.django_compile import search_type
from slurpee.constants import P_OVERLAY
def conflict_attrs(s):
return s.externaldata_set.filter(policy=P_OVERLAY)
def get_conflicts(s):
conflicts = []
for ed in conflict_attrs(s):
if s.external_data_conflict(ed.name):
conflicts.append(ed)
return conflicts
def show_conflicts(request):
"""
This view is temporary. It can be used to convert a system using KV objects
for dhcp into the new Sreg and HW scheme.
"""
if request.GET:
search = request.GET.get('search', '')
records, error = search_type(search, 'SYS')
try:
total = records.count()
records = records
except MySQLdb.OperationalError, e:
if "Got error " in str(e) and " from regexp" in str(e):
# This is nasty. If the user is using an invalid regex
# patter, the db might shit a brick
total = 0
records = []
else:
raise
return render(request, 'slurpee/conflicts.html', {
'search': search,
'total': total,
'records': records,
'get_conflicts': get_conflicts,
'getattr': getattr
})
else:
return render(request, 'slurpee/conflicts.html', {
'search': '',
'total': 0
})
```
#### File: inventory/systems/models.py
```python
from django.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save
from django.db.models.query import QuerySet
from django.contrib.auth.models import User
from dhcp.models import DHCP
from settings import BUG_URL
from mozdns.validation import validate_name
from core.validation import validate_mac
from core.site.models import Site
from core.keyvalue.mixins import KVUrlMixin
from core.keyvalue.models import KeyValue as BaseKeyValue
from core.mixins import CoreDisplayMixin
from core.utils import create_key_index
from core.tests.utils import Refresher
import datetime
import re
import socket
import math
class QuerySetManager(models.Manager):
def get_query_set(self):
return self.model.QuerySet(self.model)
def __getattr__(self, attr, *args):
return getattr(self.get_query_set(), attr, *args)
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(
self._reset_state, sender=self.__class__,
dispatch_uid='{0}-DirtyFieldsMixin-sweeper'.format(
self.__class__.__name__)
)
self._reset_state()
def _reset_state(self, *args, **kwargs):
self._original_state = self._as_dict()
def _as_dict(self):
return dict([
(f.attname, getattr(self, f.attname))
for f in self._meta.local_fields
])
def get_dirty_fields(self):
new_state = self._as_dict()
return dict([
(key, value) for key, value
in self._original_state.iteritems() if value != new_state[key]
])
class BuildManager(models.Manager):
def get_query_set(self):
return super(BuildManager, self).get_query_set().filter(
allocation__name='release'
)
class SystemWithRelatedManager(models.Manager):
def get_query_set(self):
objects = super(SystemWithRelatedManager, self).get_query_set()
return objects.select_related(
'operating_system',
'server_model',
'allocation',
'system_rack',
)
class Allocation(models.Model):
name = models.CharField(max_length=255, blank=False)
search_fields = ('name',)
class Meta:
db_table = u'allocations'
ordering = ['name']
def __unicode__(self):
return self.name
def __str__(self):
return self.name
@classmethod
def get_api_fields(cls):
return ('name',)
def clean(self):
# Normaliz our name
label_lists = self.name.split(':')
label_lists = [ll.split() for ll in label_lists]
label_lists = [map(lambda i: i.title(), ll) for ll in label_lists]
label_lists = [' '.join(ll) for ll in label_lists]
self.name = ' : '.join(label_lists)
def save(self, *args, **kwargs):
self.full_clean()
super(Allocation, self).save(*args, **kwargs)
class ScheduledTask(models.Model):
task = models.CharField(max_length=255, blank=False, unique=True)
type = models.CharField(max_length=255, blank=False)
objects = QuerySetManager()
class QuerySet(QuerySet):
def delete_all_reverse_dns(self):
self.filter(type='reverse_dns_zone').delete()
def delete_all_dhcp(self):
self.filter(type='dhcp').delete()
def dns_tasks(self):
return self.filter(type='dns')
def get_all_dhcp(self):
return self.filter(type='dhcp')
def get_all_reverse_dns(self):
return self.filter(type='reverse_dns_zone')
def get_next_task(self, type=None):
if type is not None:
try:
return self.filter(type=type)[0]
except:
return None
else:
return None
def get_last_task(self, type=None):
if type is not None:
try:
return self.filter(type=type)[-1]
except:
return None
else:
return None
class Meta:
db_table = u'scheduled_tasks'
ordering = ['task']
class Contract(models.Model):
contract_number = models.CharField(max_length=255, blank=True)
support_level = models.CharField(max_length=255, blank=True)
contract_link = models.CharField(max_length=255, blank=True)
phone = models.CharField(max_length=40, blank=True)
expiration = models.DateTimeField(null=True, blank=True)
system = models.ForeignKey('System')
created_on = models.DateTimeField(null=True, blank=True)
updated_on = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'contracts'
class Location(models.Model):
name = models.CharField(unique=True, max_length=255, blank=True)
address = models.TextField(blank=True, null=True)
note = models.TextField(blank=True, null=True)
class Meta:
db_table = u'locations'
ordering = ['name']
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/systems/locations/show/{0}/'.format(self.pk)
def get_edit_url(self):
return self.get_absolute_url()
class PortData(models.Model):
ip_address = models.CharField(max_length=15, blank=True)
port = models.IntegerField(blank=True)
protocol = models.CharField(max_length=3, blank=True)
state = models.CharField(max_length=13, blank=True)
service = models.CharField(max_length=64, blank=True)
version = models.CharField(max_length=128, blank=True)
def __unicode__(self):
return self.ip_address
class Meta:
db_table = u'port_data'
class AdvisoryData(models.Model):
ip_address = models.CharField(max_length=15, blank=True)
advisory = models.TextField(blank=True)
title = models.TextField(blank=True)
severity = models.FloatField(blank=True)
references = models.TextField(blank=True)
class Meta:
db_table = u'advisory_data'
def __unicode__(self):
return self.ip_address
class ApiManager(models.Manager):
def get_query_set(self):
results = super(ApiManager, self).get_query_set()
return results
class KeyValue(BaseKeyValue, KVUrlMixin):
obj = models.ForeignKey('System', null=True)
objects = models.Manager()
expanded_objects = ApiManager()
class Meta:
db_table = u'key_value'
def __unicode__(self):
return self.key if self.key else ''
def __repr__(self):
return "<{0}: '{1}'>".format(self.key, self.value)
def save(self, *args, **kwargs):
if re.match('^nic\.\d+\.mac_address\.\d+$', self.key):
self.value = self.value.replace('-', ':')
self.value = validate_mac(self.value)
if self.key is None:
self.key = ''
if self.value is None:
self.value = ''
super(KeyValue, self).save(*args, **kwargs)
class NetworkAdapter(models.Model):
system_id = models.IntegerField()
mac_address = models.CharField(max_length=255)
ip_address = models.CharField(max_length=255)
adapter_name = models.CharField(max_length=255)
system_id = models.CharField(max_length=255)
switch_port = models.CharField(max_length=128)
filename = models.CharField(max_length=64)
option_host_name = models.CharField(max_length=64)
option_domain_name = models.CharField(max_length=128)
dhcp_scope = models.ForeignKey(DHCP, null=True, blank=True)
switch_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'network_adapters'
def save(self, *args, **kwargs):
self.full_clean() # Calls field.clean() on all fields.
super(NetworkAdapter, self).save(*args, **kwargs)
def get_system_host_name(self):
systems = System.objects.filter(id=self.system_id)
if systems:
for system in systems:
return system.hostname
else:
return ''
class Mac(models.Model):
system = models.ForeignKey('System')
mac = models.CharField(unique=True, max_length=17)
class Meta:
db_table = u'macs'
class OperatingSystem(models.Model):
name = models.CharField(max_length=255, blank=True)
version = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'operating_systems'
ordering = ['name', 'version']
def __unicode__(self):
return "%s - %s" % (self.name, self.version)
@classmethod
def get_api_fields(cls):
return ('name', 'version')
class ServerModel(models.Model):
vendor = models.CharField(max_length=255, blank=True)
model = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True, null=True)
part_number = models.CharField(max_length=255, blank=True, null=True)
class Meta:
db_table = u'server_models'
ordering = ['vendor', 'model']
def __unicode__(self):
return u"%s - %s" % (self.vendor, self.model)
@classmethod
def get_api_fields(cls):
return ('vendor', 'model', 'part_number', 'description')
class SystemRack(models.Model):
name = models.CharField(max_length=255)
site = models.ForeignKey(Site, null=True)
location = models.ForeignKey('Location', null=True)
search_fields = ('name', 'site__name')
class Meta:
db_table = u'system_racks'
ordering = ['name']
def __str__(self):
return "%s - %s" % (
self.name, self.site.full_name if self.site else ''
)
def __unicode__(self):
return str(self)
@classmethod
def get_api_fields(cls):
return ('name', 'location', 'site')
def get_absolute_url(self):
return '/en-US/systems/racks/?rack={0}'.format(self.pk)
def get_edit_url(self):
return '/en-US/systems/racks/edit/{0}/'.format(self.pk)
def delete(self, *args, **kwargs):
self.system_set.clear()
super(SystemRack, self).delete(*args, **kwargs)
def systems(self):
return self.system_set.select_related().order_by('rack_order')
class SystemType(models.Model):
type_name = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'system_types'
def __unicode__(self):
return self.type_name
@classmethod
def get_api_fields(cls):
return ('type_name',)
class SystemStatus(models.Model):
status = models.CharField(max_length=255, blank=True)
color = models.CharField(max_length=255, blank=True)
color_code = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'system_statuses'
ordering = ['status']
def __unicode__(self):
return self.status
@classmethod
def get_api_fields(cls):
return ('status',)
class System(Refresher, DirtyFieldsMixin, CoreDisplayMixin, models.Model):
YES_NO_CHOICES = (
(0, 'No'),
(1, 'Yes'),
)
# Related Objects
operating_system = models.ForeignKey(
'OperatingSystem', blank=True, null=True)
allocation = models.ForeignKey('Allocation', blank=True, null=True)
system_type = models.ForeignKey('SystemType', blank=True, null=True)
system_status = models.ForeignKey('SystemStatus', blank=True, null=True)
server_model = models.ForeignKey('ServerModel', blank=True, null=True)
system_rack = models.ForeignKey('SystemRack', blank=True, null=True)
hostname = models.CharField(
unique=True, max_length=255, validators=[validate_name]
)
serial = models.CharField(max_length=255, blank=True, null=True)
created_on = models.DateTimeField(null=True, blank=True)
updated_on = models.DateTimeField(null=True, blank=True)
oob_ip = models.CharField(max_length=30, blank=True, null=True)
asset_tag = models.CharField(max_length=255, blank=True, null=True)
notes = models.TextField(blank=True, null=True)
licenses = models.TextField(blank=True, null=True)
rack_order = models.DecimalField(
null=True, blank=True, max_digits=6, decimal_places=2)
switch_ports = models.CharField(max_length=255, blank=True, null=True)
patch_panel_port = models.CharField(max_length=255, blank=True, null=True)
oob_switch_port = models.CharField(max_length=255, blank=True, null=True)
purchase_date = models.DateField(null=True, blank=True)
purchase_price = models.CharField(max_length=255, blank=True, null=True)
change_password = models.DateTimeField(null=True, blank=True)
ram = models.CharField(max_length=255, blank=True, null=True)
is_dhcp_server = models.IntegerField(
choices=YES_NO_CHOICES, blank=True, null=True)
is_dns_server = models.IntegerField(
choices=YES_NO_CHOICES, blank=True, null=True)
is_puppet_server = models.IntegerField(
choices=YES_NO_CHOICES, blank=True, null=True)
is_nagios_server = models.IntegerField(
choices=YES_NO_CHOICES, blank=True, null=True)
is_switch = models.IntegerField(
choices=YES_NO_CHOICES, blank=True, null=True)
warranty_start = models.DateField(blank=True, null=True, default=None)
warranty_end = models.DateField(blank=True, null=True, default=None)
objects = models.Manager()
build_objects = BuildManager()
with_related = SystemWithRelatedManager()
search_fields = (
"hostname", "serial", "notes", "asset_tag",
"oob_ip", "system_rack__site__full_name", "system_rack__name"
)
template = (
"{hostname:$lhs_just} {oob_ip_str:$rdtype_just} INV "
"{rdtype:$rdtype_just} {asset_tag_str} {serial_str}"
)
class Meta:
db_table = u'systems'
def __str__(self):
return self.hostname
@classmethod
def get_api_fields(cls):
return [
'operating_system', 'server_model', 'allocation', 'system_rack',
'system_type', 'system_status', 'hostname', 'serial', 'oob_ip',
'asset_tag', 'notes', 'rack_order', 'switch_ports',
'patch_panel_port', 'oob_switch_port', 'purchase_date',
'purchase_price', 'change_password', 'warranty_start',
'warranty_end',
]
@property
def primary_ip(self):
try:
first_ip = self.keyvalue_set.filter(
key__contains='ipv4_address').order_by('key')[0].value
return first_ip
except:
return None
@property
def primary_reverse(self):
try:
return str(socket.gethostbyaddr(self.primary_ip)[0])
except:
return None
@property
def notes_with_link(self):
if not self.notes:
return ''
notes = self.notes
pattern = '([bB]ug#?\D#?(\d+))'
matches = re.findall(pattern, notes)
for raw_text, bug_number in matches:
bug_url = '<a href="{0}{1}">{2}</a>'.format(
BUG_URL, bug_number, raw_text
)
notes = notes.replace(raw_text, bug_url, 1)
return notes
@classmethod
def field_names(cls):
return [field.name for field in cls._meta.fields]
@classmethod
def rack_ordering(cls, systems):
"""
A generator that sorts the systems by whole rack_order value (in
descending order) and then sub sorts the decimal part of rack_order in
ascending order.
I.e.
45.00
44.00
43.00
31.00
31.01
31.02
21.00
11.01
11.02
11.03
11.04
1.00
(See bug 999204)
"""
if isinstance(systems, QuerySet):
systems = list(systems)
systems = list(reversed(sorted(systems, key=lambda s: s.rack_order)))
i = 0
cur_integer = None
while True:
if i >= len(systems):
break
if systems[i].rack_order is None:
yield systems[i]
i += 1
continue
cur_integer = math.floor(systems[i].rack_order)
j = i
while (
(j + 1) < len(systems) and
systems[j + 1].rack_order is not None and
math.floor(systems[j + 1].rack_order) == cur_integer
):
j += 1
new_i = j + 1
while j >= i:
yield systems[j]
j -= 1
i = new_i
@classmethod
def get_bulk_action_list(cls, query, fields=None, show_related=True):
"""
Return a list of serialized system objects and their related objects to
be used in the bulk_action api.
This function will serialize and export StaticReg objects and their
accompanying HWAdapter objects
"""
if not fields:
fields = cls.get_api_fields() + ['pk']
# Pull in all system blobs and tally which pks we've seen. In one swoop
# pull in all staticreg blobs and put them with their systems.
sys_t_bundles = cls.objects.filter(query).values_list(*fields)
sys_d_bundles = {}
sys_pks = []
for t_bundle in sys_t_bundles:
d_bundle = dict(zip(fields, t_bundle))
system_hostname = d_bundle['hostname']
sys_d_bundles[system_hostname] = d_bundle
sys_d_bundles[system_hostname]['keyvalue_set'] = create_key_index(
cls.keyvalue_set.related.model.objects.filter(
obj=d_bundle['pk']
).values('key', 'value', 'pk')
)
if show_related:
sys_pks.append(d_bundle['pk'])
sys_q = Q(system__in=sys_pks)
# Note that CNAMEs are pulled in during this call
sreg_bundles = cls.staticreg_set.related.model.get_bulk_action_list(
sys_q
)
hw_q = Q(sreg__system__in=sys_pks)
hw_bundles = (
cls.staticreg_set.related.model.
hwadapter_set.related.model.get_bulk_action_list(hw_q)
)
# JOIN staticreg, hw_adapter ON sreg_pk
for sreg_pk, hw_bundle in hw_bundles.iteritems():
sreg_bundles[sreg_pk]['hwadapter_set'] = hw_bundle
for sreg_pk, sreg_bundle in sreg_bundles.iteritems():
system = sreg_bundle.pop('system__hostname')
sys_d_bundles[system].setdefault(
'staticreg_set', {}
)[sreg_bundle['name']] = sreg_bundle
return sys_d_bundles
@property
def rdtype(self):
return 'SYS'
def bind_render_record(self, **kwargs):
data = {
'oob_ip_str': self.oob_ip or 'None',
'asset_tag_str': self.asset_tag or 'None',
'serial_str': self.serial or 'None'
}
return super(System, self).bind_render_record(**data)
def save(self, *args, **kwargs):
self.save_history(kwargs)
self.full_clean()
super(System, self).save(*args, **kwargs)
def clean(self):
# Only do this validation on new systems. Current data is so poor that
# requireing existing systems to have this data is impossible
if self.pk:
return
if not self.is_vm():
self.validate_warranty()
self.validate_serial()
if not self.system_status:
self.system_status, _ = SystemStatus.objects.get_or_create(
status='building'
)
self.validate_allocation()
def is_vm(self):
if not self.system_type:
return False
return (
False if self.system_type.type_name.find('Virtual Server') == -1
else True
)
def validate_system_type(self):
if not self.system_type:
raise ValidationError(
"Server Type is a required field"
)
def validate_allocation(self):
if (not self.allocation and self.system_status and
self.system_status.status == 'decommissioned'):
raise ValidationError(
"Systems that are not decommissioned require an allocation"
)
def validate_serial(self):
if not self.serial:
raise ValidationError(
"Serial numbers are reruied for non VM systems"
)
def validate_warranty(self):
# If pk is None we are a new system. New systems are required to have
# their warranty data set
if self.pk is None and not bool(self.warranty_end):
raise ValidationError(
"Warranty Data is required for non virtual systems"
)
if bool(self.warranty_start) ^ bool(self.warranty_end):
raise ValidationError(
"Warranty must have a start and end date"
)
if not self.warranty_start:
return
if self.warranty_start.timetuple() > self.warranty_end.timetuple():
raise ValidationError(
"warranty start date should be before the end date"
)
def save_history(self, kwargs):
request = kwargs.pop('request', None)
try:
changes = self.get_dirty_fields()
if changes:
system = System.objects.get(id=self.id)
save_string = ''
for k, v in changes.items():
if k == 'system_status_id':
k = 'System Status'
ss = SystemStatus.objects.get(id=v)
v = ss
if k == 'operating_system_id':
k = 'Operating System'
ss = OperatingSystem.objects.get(id=v)
v = ss
if k == 'server_model_id':
k = 'Server Model'
ss = ServerModel.objects.get(id=v)
v = ss
save_string += '%s: %s\n\n' % (k, v)
try:
remote_user = request.META['REMOTE_USER']
except Exception:
remote_user = 'changed_user'
tmp = SystemChangeLog(
system=system,
changed_by=remote_user,
changed_text=save_string,
changed_date=datetime.datetime.now()
)
tmp.save()
except Exception:
pass
if not self.pk:
self.created_on = datetime.datetime.now()
self.updated_on = datetime.datetime.now()
def get_edit_url(self):
return "/systems/edit/{0}/".format(self.pk)
def get_absolute_url(self):
return "/systems/show/{0}/".format(self.pk)
def get_next_key_value_adapter(self):
"""
Return the first found adapter from the
key value store. This will go away,
once we are on the StaticReg
based system
"""
ret = {}
ret['mac_address'] = None
ret['ip_address'] = None
ret['num'] = None
ret['dhcp_scope'] = None
ret['name'] = 'nic0'
key_value = self.keyvalue_set.filter(
key__startswith='nic', key__icontains='mac_address')[0]
m = re.search('nic\.(\d+)\.mac_address\.0', key_value.key)
ret['num'] = int(m.group(1))
key_value_set = self.keyvalue_set.filter(
key__startswith='nic.%s' % ret['num'])
if len(key_value_set) > 0:
for kv in key_value_set:
m = re.search('nic\.\d+\.(.*)\.0', kv.key)
if m:
ret[m.group(1)] = str(kv.value)
return ret
else:
return False
def delete_key_value_adapter_by_index(self, index):
"""
Delete a set of key_value items by index
if index = 0
delete where keyvalue.name startswith nic.0
"""
self.keyvalue_set.filter(key__startswith='nic.%i' % index).delete()
return True
def external_data_conflict(self, attr):
if not hasattr(self, attr):
return False
val = getattr(self, attr)
if not val:
return False
for ed in self.externaldata_set.filter(name=attr):
if (attr == 'oob_ip' and
ed.data == val.strip().lstrip('ssh').strip()):
return False
elif ed.data.upper() != val.upper():
return True
return False
def get_updated_fqdn(self):
allowed_domains = [
'mozilla.com',
'scl3.mozilla.com',
'phx.mozilla.com',
'phx1.mozilla.com',
'mozilla.net',
'mozilla.org',
'build.mtv1.mozilla.com',
'build.mozilla.org',
]
reverse_fqdn = self.primary_reverse
if self.primary_ip and reverse_fqdn:
current_hostname = str(self.hostname)
if current_hostname and current_hostname != reverse_fqdn:
res = reverse_fqdn.replace(current_hostname, '').strip('.')
if res in allowed_domains:
self.update_host_for_migration(reverse_fqdn)
elif not self.primary_ip or self.primary_reverse:
for domain in allowed_domains:
updated = False
if not updated:
try:
fqdn = socket.gethostbyaddr(
'%s.%s' % (self.hostname, domain)
)
if fqdn:
self.update_host_for_migration(fqdn[0])
updated = True
except Exception:
pass
if not updated:
pass
#print "Could not update hostname %s" % (self.hostname)
def update_host_for_migration(self, new_hostname):
if new_hostname.startswith(self.hostname):
kv = KeyValue(
obj=self, key='system.hostname.alias.0', value=self.hostname
)
kv.save()
try:
self.hostname = new_hostname
self.save()
except Exception, e:
print "ERROR - %s" % (e)
def get_switches(self):
return System.objects.filter(is_switch=1)
def check_for_adapter(self, adapter_id):
adapter_id = int(adapter_id)
if adapter_id in self.get_adapter_numbers():
return True
return False
def check_for_adapter_name(self, adapter_name):
adapter_name = str(adapter_name)
if adapter_name in self.get_nic_names():
return True
return False
def get_nic_names(self):
adapter_names = []
pairs = KeyValue.objects.filter(
obj=self, key__startswith='nic', key__contains='adapter_name'
)
for row in pairs:
m = re.match('^nic\.\d+\.adapter_name\.\d+', row.key)
if m:
adapter_names.append(str(row.value))
return adapter_names
def get_adapter_numbers(self):
nic_numbers = []
pairs = KeyValue.objects.filter(obj=self, key__startswith='nic')
for row in pairs:
m = re.match('^nic\.(\d+)\.', row.key)
if m:
match = int(m.group(1))
if match not in nic_numbers:
nic_numbers.append(match)
return nic_numbers
def get_adapter_count(self):
return len(self.get_adapter_numbers())
class SystemChangeLog(models.Model):
changed_by = models.CharField(max_length=255)
changed_date = models.DateTimeField()
changed_text = models.TextField()
system = models.ForeignKey(System)
class Meta:
db_table = u'systems_change_log'
class UserProfile(models.Model):
PAGER_CHOICES = (
('epager', 'epager'),
('sms', 'sms'),
)
user = models.ForeignKey(User, unique=True)
is_desktop_oncall = models.BooleanField()
is_sysadmin_oncall = models.BooleanField()
is_services_oncall = models.BooleanField()
is_mysqldba_oncall = models.BooleanField()
is_pgsqldba_oncall = models.BooleanField()
is_netop_oncall = models.BooleanField()
is_metrics_oncall = models.BooleanField()
current_desktop_oncall = models.BooleanField()
current_sysadmin_oncall = models.BooleanField()
current_services_oncall = models.BooleanField()
current_mysqldba_oncall = models.BooleanField()
current_pgsqldba_oncall = models.BooleanField()
current_netop_oncall = models.BooleanField()
current_metrics_oncall = models.BooleanField()
irc_nick = models.CharField(max_length=128, null=True, blank=True)
api_key = models.CharField(max_length=255, null=True, blank=True)
pager_type = models.CharField(
choices=PAGER_CHOICES, max_length=255, null=True, blank=True
)
pager_number = models.CharField(max_length=255, null=True, blank=True)
epager_address = models.CharField(max_length=255, null=True, blank=True)
objects = QuerySetManager()
class Meta:
db_table = u'user_profiles'
def __str__(self):
return "{0}".format(self.user.username)
def __repr__(self):
return "<UserProfile {0}>".format(self.user.username)
class QuerySet(QuerySet):
def get_all_desktop_oncall(self):
self.filter(is_desktop_oncall=1)
def get_current_desktop_oncall(self):
self.filter(current_desktop_oncall=1).select_related()
def get_all_services_oncall(self):
self.filter(is_services_oncall=1)
def get_current_services_oncall(self):
self.filter(current_services_oncall=1).select_related()
def get_all_sysadmin_oncall(self):
self.filter(is_sysadmin_oncall=1)
def get_current_sysadmin_oncall(self):
self.filter(current_sysadmin_oncall=1).select_related()
def get_all_metrics_oncall(self):
self.filter(is_metrics_oncall=1)
def get_current_metrics_oncall(self):
self.filter(current_metrics_oncall=1).select_related()
```
#### File: inventory/user_systems/forms.py
```python
from django import forms
from django.forms.extras.widgets import SelectDateWidget
from models import UnmanagedSystem, Owner, UserLicense, UnmanagedSystemType
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
from datetime import datetime, timedelta
def return_data_if_true(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
field_name = f.__name__.split("_", 1)[1]
data = self.cleaned_data[field_name]
if data:
return data
return f(self, *args, **kwargs)
return wrapper
class CSVForm(forms.Form):
csv = forms.FileField()
class UserSystemForm(forms.ModelForm):
date_purchased = forms.DateField(widget=SelectDateWidget(years=range(1999,datetime.today().year + 2)), initial=datetime.now())
loaner_return_date = forms.DateField(widget=SelectDateWidget(), initial=datetime.now(), required=False)
system_type = forms.ModelChoiceField(
queryset=UnmanagedSystemType.objects.all(), empty_label="(Required)",
required=True
)
class Meta:
model = UnmanagedSystem
fields = ('owner',
'serial',
'asset_tag',
'date_purchased',
'system_type',
'cost',
'cost_center',
'is_loaned',
'is_loaner',
'bug_number',
'loaner_return_date',
'operating_system',
'server_model',
'notes')
class OwnerForm(forms.ModelForm):
class Meta:
model = Owner
fields = ['name', 'user_location', 'email', 'note']
class UserLicenseForm(forms.ModelForm):
purchase_date = forms.DateField(
required=False,
widget=SelectDateWidget(
years=range(1999, datetime.today().year + 2),
),
initial=datetime.now()
)
@return_data_if_true
def clean_owner(self):
name = self.data.get('js_owner_name')
#user_location = self.data.get('js_owner_user_location')
email = self.data.get('js_owner_email')
note = self.data.get('js_owner_note')
if name is not None:
owner, c = Owner.objects.get_or_create(
name = name,
#user_location=user_location,
email = email,
note = note)
return owner
class Meta:
model = UserLicense
fields = ['username', 'version', 'license_type', 'license_key',
'owner', 'user_operating_system', 'purchase_date']
```
#### File: inventory/user_systems/models.py
```python
from django.db import models
from systems.models import OperatingSystem, ServerModel
from datetime import datetime, timedelta, date
from django.db.models.query import QuerySet
from settings import USER_SYSTEM_ALLOWED_DELETE, FROM_EMAIL_ADDRESS, UNAUTHORIZED_EMAIL_ADDRESS, BUG_URL
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
# Create your models here.
YES_NO_CHOICES = (
(0, 'No'),
(1, 'Yes'),
)
OS_CHOICES = (
(1, 'Mac OS'),
(2, 'Windows'),
)
class QuerySetManager(models.Manager):
def get_query_set(self):
return self.model.QuerySet(self.model)
def __getattr__(self, attr, *args):
return getattr(self.get_query_set(), attr, *args)
class UserOperatingSystem(models.Model):
name = models.CharField(max_length=128, blank=False)
def __unicode__(self):
return self.name
class UnmanagedSystemType(models.Model):
name = models.CharField(max_length=128, blank=False)
def __unicode__(self):
return self.name
class Meta:
db_table = 'unmanaged_system_types'
class CostCenter(models.Model):
cost_center_number = models.IntegerField()
name = models.CharField(max_length=255, blank=True)
def __unicode__(self):
return '%s - %s' % (self.cost_center_number, self.name)
class Meta:
db_table = 'cost_centers'
class UnmanagedSystem(models.Model):
serial = models.CharField(max_length=255, blank=True)
asset_tag = models.CharField(max_length=255, blank=True)
operating_system = models.ForeignKey(OperatingSystem, blank=True, null=True)
owner = models.ForeignKey('Owner', blank=True, null=True)
system_type = models.ForeignKey('UnmanagedSystemType', blank=False, null=True)
server_model = models.ForeignKey(ServerModel, blank=True, null=True)
created_on = models.DateTimeField(null=True, blank=True)
updated_on = models.DateTimeField(null=True, blank=True)
date_purchased = models.DateField(null=True, blank=True)
cost = models.CharField(max_length=50, blank=True)
cost_center = models.ForeignKey('CostCenter', null=True, blank=True)
bug_number = models.CharField(max_length=255, blank=True)
notes = models.TextField(blank=True)
is_loaned = models.IntegerField(choices=YES_NO_CHOICES, blank=True, null=True)
is_loaner = models.IntegerField(choices=YES_NO_CHOICES, blank=True, null=True)
loaner_return_date = models.DateTimeField(null=True, blank=True)
objects = QuerySetManager()
search_fields = (
'serial',
'asset_tag',
'owner__name',
'server_model__vendor',
'notes',
'server_model__model'
)
def delete(self, *args, **kwargs):
super(UnmanagedSystem, self).delete(*args, **kwargs)
def save(self):
if not self.id:
self.created_on = datetime.now()
self.updated_on = datetime.now()
super(UnmanagedSystem, self).save()
def __unicode__(self):
try:
server_model = self.server_model
except ServerModel.DoesNotExist:
server_model = ""
return "%s - %s - %s" % (server_model, self.asset_tag, self.serial)
class QuerySet(QuerySet):
def get_all_loaners(self):
return self.filter(is_loaner=1)
def get_loaners_due(self):
return_date = date.today()
return self.filter(loaner_return_date__lte=return_date)
def get_bug_url(self):
bug_id = ''
if self.bug_number:
bug_id = self.bug_number
return "%s%s" % (BUG_URL, bug_id)
@models.permalink
def get_absolute_url(self):
return ('user-system-show', [self.id])
class Meta:
db_table = u'unmanaged_systems'
class History(models.Model):
change = models.CharField(max_length=1000)
changed_by = models.CharField(max_length=128, null=True, blank=True)
system = models.ForeignKey(UnmanagedSystem)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
"%s: %s" % (self.created, self.change)
class Meta:
ordering = ['-created']
class Owner(models.Model):
name = models.CharField(unique=True, max_length=255, blank=True)
address = models.TextField(blank=True)
note = models.TextField(blank=True)
user_location = models.ForeignKey('UserLocation', blank=True, null=True)
email = models.CharField(max_length=255, blank=True)
search_fields = (
'name',
'note',
'email',
)
def __unicode__(self):
return self.name
def upgradeable_systems(self):
return self.unmanagedsystem_set.filter(
date_purchased__lt=datetime.now() - timedelta(days=730))
@models.permalink
def get_absolute_url(self):
return ('owner-show', [self.id])
def delete(self):
UserLicense.objects.filter(owner=self).update(owner=None)
UnmanagedSystem.objects.filter(owner=self).update(owner=None)
super(Owner, self).delete()
class Meta:
db_table = u'owners'
ordering = ['name']
class UserLicense(models.Model):
username = models.CharField(max_length=255, blank=True)
version = models.CharField(max_length=255, blank=True)
license_type = models.CharField(max_length=255, blank=True)
license_key = models.CharField(max_length=255, blank=False)
owner = models.ForeignKey('Owner', blank=True, null=True)
purchase_date = models.DateField(blank=True, null=True, default=None)
#user_operating_system = models.IntegerField(choices=OS_CHOICES, blank=True, null=True)
user_operating_system = models.ForeignKey('UserOperatingSystem', blank=True, null=True)
search_fields = (
'username',
'version',
'license_type',
'license_key',
'owner__name',
'user_operating_system__name',
)
def delete(self, *args, **kwargs):
super(UserLicense, self).delete(*args, **kwargs)
def __unicode__(self):
return "%s - %s" % (self.license_type, self.license_key)
@models.permalink
def get_absolute_url(self):
return ('license-show', [self.id])
class Meta:
db_table = u'user_licenses'
ordering = ['license_type']
class UserLocation(models.Model):
city = models.CharField(unique=True, max_length=255, blank=True)
country = models.CharField(unique=True, max_length=255, blank=True)
created_at = models.DateTimeField(null=True, blank=True)
updated_at = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "%s - %s" % (self.city, self.country)
class Meta:
db_table = u'user_locations'
```
#### File: inventory/user_systems/tests.py
```python
import sys
import sys
import os
_base = os.path.dirname(__file__)
site_root = os.path.realpath(os.path.join(_base, '../'))
sys.path.append(site_root)
import manage
from django.test import TestCase
import models
from test_utils import setup_test_environment,TestCase
setup_test_environment()
from django.contrib.auth.models import User
from django.test.client import Client
class OwnerTest(TestCase):
fixtures = ['user_systems_test_data']
def setUp(self):
self.client = Client()
def test_owner_list(self):
resp = self.client.get('/user_systems/owners/', follow=True)
print User.objects.all()
self.assertEqual(resp.status_code,200)
print resp.context[0]['owner_list']
self.assertTrue(len(resp.context[0]['owner_list']) > 0)
def test_owner_show(self):
resp = self.client.get('/user_systems/owners/show/1/', follow=True)
self.assertEqual(resp.status_code,200)
print resp.context[0]['owner_list']
self.assertTrue(len(resp.context[0]['owner_list']) > 0)
```
#### File: inventory/user_systems/views.py
```python
import csv
import operator
from django.template import RequestContext
from django.forms.extras.widgets import SelectDateWidget
from django.db import connection
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils import simplejson as json
from django.http import HttpResponse, HttpResponseRedirect
from django.core.mail import send_mail
import forms
import models
from systems import models as system_models
from datetime import datetime, timedelta
from libs import ldap_lib
import settings
from settings.local import USER_SYSTEM_ALLOWED_DELETE, FROM_EMAIL_ADDRESS, UNAUTHORIZED_EMAIL_ADDRESS
from settings import BUG_URL as BUG_URL
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect, get_object_or_404, render
from libs.jinja import render_to_response as render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from MozInvAuthorization.UnmanagedSystemACL import UnmanagedSystemACL
def license_version_search(request):
query = request.GET.get('query')
tmp = [str(m['version']) for m in models.UserLicense.objects.filter(version__icontains=query).values('version').distinct()]
versions = list(set(tmp))
ret_dict = {}
ret_dict['query'] = query
ret_dict['suggestions'] = versions
ret_dict['data'] = versions
return HttpResponse(json.dumps(ret_dict))
def license_type_search(request):
query = request.GET.get('query')
types = [m['license_type'] for m in models.UserLicense.objects.filter(license_type__icontains=query).values('license_type').distinct()]
ret_dict = {}
ret_dict['query'] = query
ret_dict['suggestions'] = types
ret_dict['data'] = types
return HttpResponse(json.dumps(ret_dict))
@csrf_exempt
def owners_quicksearch_ajax(request):
"""Returns systems sort table"""
search = request.POST['quicksearch']
filters = [Q(**{"%s__icontains" % t: search})
for t in models.Owner.search_fields]
owners = models.Owner.objects.filter(
reduce(operator.or_, filters))
return render_to_response('user_systems/owners_quicksearch.html', {
'owners': owners,
},
RequestContext(request))
@csrf_exempt
def license_edit(request, object_id):
license = get_object_or_404(models.UserLicense, pk=object_id)
if request.method == 'POST':
form = forms.UserLicenseForm(request.POST, instance=license)
if form.is_valid():
form.save()
return HttpResponseRedirect('/user_systems/licenses/')
else:
form = forms.UserLicenseForm(instance=license)
return render_to_response('user_systems/userlicense_form.html', {
'form': form,
},
RequestContext(request))
def owner_list(request):
owners = models.Owner.objects.select_related('user_location').all()
upgradeable_users = models.Owner.objects.filter(unmanagedsystem__date_purchased__lt=datetime.now() - timedelta(days=730)).distinct().count()
return render_to_response('user_systems/owner_list.html', {
'owner_list': owners,
'upgradeable_users':upgradeable_users,
},
RequestContext(request))
def owner_show(request, object_id):
owner = get_object_or_404(models.Owner, pk=object_id)
return render_to_response('user_systems/owner_detail.html', {
'owner': owner,
},
RequestContext(request))
def owner_delete(request, object_id):
owner = get_object_or_404(models.Owner, pk=object_id)
if request.method == "POST":
owner.delete()
return HttpResponseRedirect('/user_systems/owners/')
else:
return render_to_response('user_systems/owner_confirm_delete.html', {
'owner': owner,
},
RequestContext(request))
@csrf_exempt
def owner_edit(request, object_id):
owner = get_object_or_404(models.Owner, pk=object_id)
initial = {}
if request.method == 'POST':
form = forms.OwnerForm(request.POST, instance=owner)
if form.is_valid():
form.save()
return HttpResponseRedirect('/user_systems/owners/')
else:
form = forms.OwnerForm(instance=owner)
return render_to_response('user_systems/owner_form.html', {
'form': form,
},
RequestContext(request))
def owner_create(request):
initial = {}
if request.method == 'POST':
form = forms.OwnerForm(request.POST, initial=initial)
if form.is_valid():
form.save()
return HttpResponseRedirect('/user_systems/owners/')
else:
form = forms.OwnerForm(initial=initial)
return render_to_response('user_systems/owner_form.html', {
'form': form,
},
RequestContext(request))
def license_new(request):
initial = {}
if request.method == 'POST':
form = forms.UserLicenseForm(request.POST, initial=initial)
if form.is_valid():
form.save()
return HttpResponseRedirect('/user_systems/licenses/')
else:
form = forms.UserLicenseForm(initial=initial)
return render_to_response('user_systems/userlicense_form.html', {
'form': form,
},
RequestContext(request))
def license_quicksearch_ajax(request):
"""Returns systems sort table"""
# Try to get quicksearch from post
# If fail, try to get from GET
# return None otherwise
search = request.GET.get('quicksearch', None)
if search:
filters = [Q(**{"%s__icontains" % t: search})
for t in models.UserLicense.search_fields]
licenses = models.UserLicense.objects.filter(
reduce(operator.or_, filters))
else:
licenses = None
return render_to_response('user_systems/license_quicksearch.html', {
'licenses': licenses,
},
RequestContext(request))
@csrf_exempt
def user_system_quicksearch_ajax(request):
"""Returns systems sort table"""
search = request.POST['quicksearch']
filters = [
Q(**{"%s__icontains" % t: search})
for t in models.UnmanagedSystem.search_fields
]
systems = models.UnmanagedSystem.objects.filter(
reduce(operator.or_, filters)
)
# For some reason systems are referencing server model classes that do not
# exist. I have no idea how this happened but it did. If accessing the
# server model causes and error set the server_model to None so we don't
# get an exception later during page loads.
# TOOD: Clean up defunt server model references
for s in systems:
try:
s.server_model
except:
s.server_model = None
return render_to_response(
'user_systems/quicksearch.html',
{'systems': systems, 'BUG_URL': BUG_URL},
RequestContext(request)
)
@csrf_exempt
def user_system_view(request, template, data, instance=None):
if request.method == 'POST':
post_data = request.POST.copy()
owner, c = models.Owner.objects.get_or_create(
name=request.POST['owner_name'])
post_data['owner'] = owner.id
try:
os, c = models.OperatingSystem.objects.get_or_create(
name=request.POST['js_os_name'],
version=request.POST['js_os_version'])
post_data['operating_system'] = os.id
except KeyError:
pass
try:
server_model, c = models.ServerModel.objects.get_or_create(
vendor=request.POST['js_server_model_vendor'],
model=request.POST['js_server_model_model'])
post_data['server_model'] = server_model.id
except KeyError:
pass
if instance:
old_owner = instance.owner
old_serial = instance.serial
old_asset_tag = instance.asset_tag
old_notes = instance.notes
else:
old_owner = None
old_serial = None
old_asset_tag = None
old_notes = None
form = forms.UserSystemForm(post_data, instance=instance)
if form.is_valid():
saved_instance = form.save()
if not instance or old_notes != saved_instance.notes:
if old_notes:
models.History(
change="Notes changed from %s" % old_notes,
changed_by=get_changed_by(request),
system=saved_instance).save()
if saved_instance.notes:
models.History(
change="Notes changed to %s" % saved_instance.notes,
changed_by=get_changed_by(request),
system=saved_instance).save()
else:
models.History(
change="System has no Notes",
changed_by=get_changed_by(request),
system=saved_instance).save()
if not instance or old_asset_tag != saved_instance.asset_tag:
if old_asset_tag:
models.History(
change="Asset Tag changed from %s" % old_asset_tag,
changed_by=get_changed_by(request),
system=saved_instance).save()
if saved_instance.asset_tag:
models.History(
change="Asset Tag changed to %s" % saved_instance.asset_tag,
changed_by=get_changed_by(request),
system=saved_instance).save()
else:
models.History(
change="System has no Asset Tag",
changed_by=get_changed_by(request),
system=saved_instance).save()
if not instance or old_serial != saved_instance.serial:
if old_serial:
models.History(
change="Serial changed from %s" % old_serial,
changed_by=get_changed_by(request),
system=saved_instance).save()
if saved_instance.serial:
models.History(
change="Serial changed to %s" % saved_instance.serial,
changed_by=get_changed_by(request),
system=saved_instance).save()
else:
models.History(
change="System has no serial",
changed_by=get_changed_by(request),
system=saved_instance).save()
if not instance or old_owner != saved_instance.owner:
if old_owner:
models.History(
change="Owner changed from %s" % old_owner,
changed_by=get_changed_by(request),
system=saved_instance).save()
if saved_instance.owner:
models.History(
change="Owner changed to %s" % saved_instance.owner,
changed_by=get_changed_by(request),
system=saved_instance).save()
else:
models.History(
change="System has no owner",
changed_by=get_changed_by(request),
system=saved_instance).save()
return redirect('user-system-list')
else:
form = forms.UserSystemForm(instance=instance)
data['form'] = form
if settings.USE_LDAP:
the_owner_list = ldap_lib.get_all_names()
else:
the_owner_list = []
the_owner_list.append("STOCK")
the_owner_list.append("STOCK-SFO")
the_owner_list.append("STOCK-MTV")
the_owner_list.append("STOCK-TOR")
the_owner_list.append("STOCK-LON")
the_owner_list.append("STOCK-PAR")
the_owner_list.append("STOCK-PDX")
the_owner_list.append("desktop-mtv1")
the_owner_list.append("desktop-sfo1")
the_owner_list.append("desktop-tor1")
the_owner_list.append("desktop-lon1")
the_owner_list.append("desktop-par1")
the_owner_list.append("desktop-yvr1")
the_owner_list.append("Release Engineering")
data['owner_json'] = json.dumps(the_owner_list)
#data['owner_json'] = json.dumps(ldap_lib.get_all_names())
#data['owner_json'].append("Stock")
return render_to_response(template, data, RequestContext(request))
def get_changed_by(request):
try:
remote_user = request.META['REMOTE_USER']
except:
remote_user = None
return remote_user
#def license_new(request):
# return render_to_response('user_systems/userlicense_new.html')
def license_show(request, object_id):
license = get_object_or_404(models.UserLicense, pk=object_id)
return render_to_response('user_systems/userlicense_detail.html', {
'license': license,
},RequestContext(request) )
def license_index(request):
from settings import BUG_URL as BUG_URL
system_list = models.UserLicense.objects.select_related('owner').all()
paginator = Paginator(system_list, 25)
if 'page' in request.GET:
page = request.GET.get('page')
else:
page = 1
try:
systems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
systems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
systems = paginator.page(paginator.num_pages)
return render_to_response('user_systems/userlicense_list.html', {
'license_list': systems,
'BUG_URL': BUG_URL
},RequestContext(request) )
def user_system_index(request):
from settings import BUG_URL as BUG_URL
system_list = models.UnmanagedSystem.objects.select_related('owner', 'server_model', 'operating_system').order_by('owner__name')
paginator = Paginator(system_list, 25)
if 'page' in request.GET:
page = request.GET.get('page')
else:
page = 1
try:
systems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
systems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
systems = paginator.page(paginator.num_pages)
return render_to_response('user_systems/unmanagedsystem_list.html', {
'user_system_list': systems,
'BUG_URL': BUG_URL
},RequestContext(request) )
def license_delete(request, object_id):
license = get_object_or_404(models.UserLicense, pk=object_id)
try:
license.delete()
return HttpResponseRedirect( reverse('license-list') )
except PermissionDenied, e:
return render_to_response('user_systems/unauthorized_delete.html', {
'content': 'You do not have permission to delete this license',
},
RequestContext(request))
def unmanaged_system_delete(request, object_id):
#Dummy comment
user_system = get_object_or_404(models.UnmanagedSystem, pk=object_id)
if request.method == 'POST':
try:
acl = UnmanagedSystemACL(request)
acl.check_delete()
user_system_notes = user_system.notes
user_system.delete()
send_mail('System Deleted', '%s Deleted by %s\nSystem Notes:\n%s' % (user_system, request.user.username, user_system_notes), FROM_EMAIL_ADDRESS, UNAUTHORIZED_EMAIL_ADDRESS, fail_silently=False)
return HttpResponseRedirect( reverse('user-system-list') )
except PermissionDenied, e:
send_mail('Unauthorized System Delete Attempt', 'Unauthorized Attempt to Delete %s by %s' % (user_system, request.user.username), FROM_EMAIL_ADDRESS, UNAUTHORIZED_EMAIL_ADDRESS, fail_silently=False)
return render_to_response('user_systems/unauthorized_delete.html', {
'content': 'You do not have permission to delete this system',
},
RequestContext(request))
else:
return render_to_response('user_systems/unmanagedsystem_confirm_delete.html', {
'owner': user_system,
},
RequestContext(request))
def show_by_model(request, object_id):
system_list = models.UnmanagedSystem.objects.filter(server_model=models.ServerModel.objects.get(id=object_id))
if 'show_all' in request.GET:
paginator = Paginator(system_list, system_list.count())
else:
paginator = Paginator(system_list, 25)
if 'page' in request.GET:
page = request.GET.get('page')
else:
page = 1
try:
systems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
systems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
systems = paginator.page(paginator.num_pages)
return render_to_response('user_systems/unmanagedsystem_list.html', {
'user_system_list': systems,
'show_all': True,
},
RequestContext(request))
def user_system_show(request, object_id):
system = get_object_or_404(models.UnmanagedSystem, id=object_id)
#system = models.UnmanagedSystem.objects.select_related(
# 'owner', 'server_model', 'operating_system'
# ).filter(asset_tag=id).order_by('owner__name')
#system = get_object_or_404(models.UnmanagedSystem
return render_to_response('user_systems/unmanagedsystem_detail.html', {
'user_system': system,
'settings': settings,
},
RequestContext(request))
def user_system_show_by_asset_tag(request, id):
system = get_object_or_404(models.UnmanagedSystem, asset_tag=id)
#system = models.UnmanagedSystem.objects.select_related(
# 'owner', 'server_model', 'operating_system'
# ).filter(asset_tag=id).order_by('owner__name')
#system = get_object_or_404(models.UnmanagedSystem
return render_to_response('user_systems/unmanagedsystem_detail.html', {
'user_system': system,
},
RequestContext(request))
def user_system_new(request):
return user_system_view(
request,
'user_systems/unmanagedsystem_create.html',
{})
@csrf_exempt
def user_system_edit(request, id):
system = get_object_or_404(models.UnmanagedSystem, pk=id)
return user_system_view(
request,
'user_systems/unmanagedsystem_modify.html', {
'system': system},
system)
def user_system_csv(request):
systems = models.UnmanagedSystem.objects.all().order_by('owner__name')
try:
ref_split = request.META['HTTP_REFERER'].split('/')
type, id = ref_split[-3:-1]
if type == 'model':
systems = systems.filter(server_model__id=id)
except:
pass
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=user_systems.csv'
writer = csv.writer(response)
writer.writerow(['Owner', 'Location', 'Serial', 'Asset Tag',
'Operating System', 'Model', 'Date Purchased', 'Cost'])
for s in systems:
try:
location = s.owner.user_location
except AttributeError:
location = ''
writer.writerow([s.owner, location, s.serial, s.asset_tag,
s.operating_system, s.server_model, s.date_purchased, s.cost])
return response
def fillin_csv(request):
"""Important columns:
4: serial number
6: employee
7: location
"""
if request.method == 'POST':
f = forms.CSVForm(request.POST, request.FILES)
if f.is_valid():
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=ComputerEquipment.csv'
in_csv = csv.reader(f.cleaned_data['csv'].read().splitlines())
out_csv = csv.writer(response)
for row in in_csv:
if row[4]:
serials = [i.strip() for i in row[4].split(';')]
owners = [
str(i.owner)
for i in models.UnmanagedSystem.objects.filter(
serial__in=serials).filter(owner__isnull=False)]
locations = [
str(i.system_rack.location)
for i in system_models.System.objects.filter(
serial__in=serials).filter(system_rack__location__isnull=False)]
locations += [
str(i.owner.user_location)
for i in models.UnmanagedSystem.objects.filter(
serial__in=serials).filter(owner__user_location__isnull=False)]
if owners:
row[6] = "; ".join(owners)
if locations:
row[7] = "; ".join(locations)
out_csv.writerow(row)
return response
else:
f = forms.CSVForm()
return render_to_response(
'user_systems/fillin_csv.html',
{'form': f},
RequestContext(request))
``` |
{
"source": "jlinkemeyer/MLinPractice",
"score": 4
} |
#### File: code/feature_extraction/emoji_count.py
```python
from code.feature_extraction.feature_extractor import FeatureExtractor
import emoji
import numpy as np
class EmojiCount(FeatureExtractor):
"""Class for extracting the number of emojis within a tweet"""
# constructor
def __init__(self, input_column):
"""Initialize EmojiCount with the given input and output column."""
super().__init__([input_column], "{0}_emojicount".format(input_column))
# don't need to fit, so don't overwrite _set_variables()
def _get_values(self, inputs):
"""Compute the number of emojis in the tweet"""
emoji_count = []
for tweet in inputs[0]:
# extract the emojis from the tweet
extracted_emojis = ''.join(character for character in tweet if character in emoji.UNICODE_EMOJI['en'])
# count how many emojis were extracted
count = len(extracted_emojis)
# save the number of emojis in list emoji_count
emoji_count.append(count)
result = np.array(emoji_count).reshape(-1, 1)
return result
```
#### File: code/feature_extraction/posting_time.py
```python
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from code.feature_extraction.feature_extractor import FeatureExtractor
from code.util import POSTING_TIME_INTERVALS, POSTING_TIME_CATEGORIES, SUFFIX_AFTERNOON, SUFFIX_EVENING, SUFFIX_MORNING, SUFFIX_NIGHT
class PostingTime(FeatureExtractor):
"""Class for extracting the posting daytime (morning, noon, evening, or night)"""
# constructor
def __init__(self, input_column):
"""Initialize PostingTime with the given input and output column."""
super().__init__([input_column], "{0}_posting_time".format(input_column))
def get_feature_name(self):
"""
Overrides the get_feature_name method from the subclass and returns the
four feature names instead of just one
"""
prefix = self._feature_name
return [prefix + SUFFIX_AFTERNOON, prefix + SUFFIX_EVENING, prefix + SUFFIX_MORNING, prefix + SUFFIX_NIGHT]
# don't need to fit, so don't overwrite _set_variables()
def _get_values(self, inputs):
"""Extract the one hot encoding of four daytimes based on the posting time"""
# Extract the posting hour from the provided posting time strings
posting_hours = pd.to_datetime(inputs[0], format = "%H:%M:%S").dt.hour
# Categorize the posting hours into different daytimes
posting_daytimes = pd.cut(posting_hours,
bins = POSTING_TIME_INTERVALS,
labels = POSTING_TIME_CATEGORIES,
include_lowest = True)
# Create a one hot encoder and fit it to the different categories
fit_categories = np.array(POSTING_TIME_CATEGORIES).reshape(-1, 1)
one_hot_encoder = OneHotEncoder()
one_hot_encoder_fitted = one_hot_encoder.fit(fit_categories)
# Reshape the categorized daytimes to match the one hot encoder input
# requirements, then transform categories to one hot encodings
posting_daytimes_reshaped = np.array(posting_daytimes).reshape(-1, 1)
result = one_hot_encoder_fitted.transform(posting_daytimes_reshaped).toarray()
return result
```
#### File: test/feature_extraction/bigrams_test.py
```python
import unittest
import pandas as pd
import numpy as np
from code.feature_extraction.bigrams import Bigrams
class BigramsTest(unittest.TestCase):
"""Test class to test the Bigrams feature functionality."""
def setUp(self):
"""Set up the variables and examples before running the tests."""
self.INPUT_COLUMN = "input"
self.bigrams = Bigrams(self.INPUT_COLUMN)
self.input_text = pd.DataFrame(["['this', 'is', 'just', 'a', 'test', 'to', 'see', 'if', 'it', 'works']",
"['this', 'is', 'just', 'a', 'test', 'to', 'see', 'if', 'it', 'works']",
"['my', 'cat', 'is', 'eating', 'the', 'whole', 'day', 'long']",
"['my', 'cat', 'is', 'eating', 'the', 'whole', 'day', 'long']",
"['cup', 'of', 'tee']"])
# call _set_variables and _get_values with the given input text
self.set_bigram_variables = self.bigrams._set_variables(self.input_text)
self.bigram_values = self.bigrams._get_values(self.input_text)
def test_input_columns(self):
"""Test if the input column is correct."""
self.assertListEqual(self.bigrams._input_columns, [self.INPUT_COLUMN])
def test_result(self):
"""Test if results are correct for different examples."""
output_text = np.array([True, True, True, True, False]).reshape(-1,1)
self.assertListEqual(list(self.bigram_values), list(output_text))
def test_length(self):
"""Test if the the output array has the correct length."""
output = 5
# the output length should be 5
self.assertEqual(len(self.bigram_values), output)
if __name__ == '__main__':
unittest.main()
```
#### File: test/feature_extraction/contains_photo_test.py
```python
import unittest
import pandas as pd
import numpy as np
from code.feature_extraction.contains_photo import ContainsPhoto
class ContainsPhotoTest(unittest.TestCase):
"""Test class to test the ContainsPhoto functionality."""
def setUp(self):
"""Set up the variables and examples before running the tests."""
self.INPUT_COLUMN = "input"
self.contains_photo = ContainsPhoto(self.INPUT_COLUMN)
self.inputs = pd.DataFrame(["[https://jooinn.com/images/dramatic-landscape-7.jpg",
"[]",
"[empty]"])
self.photo_values = self.contains_photo._get_values(self.inputs)
def test_input_columns(self):
"""Test if the input column is correct."""
self.assertListEqual(self.contains_photo._input_columns, [self.INPUT_COLUMN])
def test_length(self):
"""Test if the the output array has the correct length."""
output = 3
# the output length should be 3
self.assertEqual(len(self.photo_values), output)
def test_result(self):
"""Test if results are correct for different examples."""
output = np.array([True, False, False]).reshape(-1,1)
self.assertListEqual(list(self.photo_values), list(output))
if __name__ == '__main__':
unittest.main()
```
#### File: test/feature_extraction/hashtags_count_test.py
```python
import unittest
import numpy as np
from code.feature_extraction.hashtags_count import HashtagsCount
class HashtagsCountTest(unittest.TestCase):
"""Test class to test HashtagCount feature functionality."""
def setUp(self):
"""Set up the variables before running the tests."""
self.INPUT_COLUMN = "input"
self.hashtags_count = HashtagsCount(self.INPUT_COLUMN)
def test_input_columns(self):
"""Test the input column is correct."""
self.assertListEqual(self.hashtags_count._input_columns, [self.INPUT_COLUMN])
def test_result(self):
"""Test if results are correct for different examples."""
# Case 1: five hashtags in a string according the the data frame's
# structure for the hashtags column
input_text = np.array('[\'count\', \'the\', \'number\', \'of\', \'hastags\']').reshape(-1,1)
desired_output = 5
actual_output = self.hashtags_count._get_values(input_text)
error_message = 'HashtagCount does not count the number of hashtags correctly.'
self.assertEqual(actual_output, desired_output, error_message)
# Case 2: No hashtag in tweet.
input_text2 = np.array('[]').reshape(-1,1)
desired_output2 = 0
actual_output2 = self.hashtags_count._get_values(input_text2)
error_message2 = 'HashtagCount cannot deal with case where no hashtag is contained in a tweet.'
self.assertEqual(actual_output2, desired_output2, error_message2)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jlin/monero",
"score": 2
} |
#### File: tests/functional_tests/util_resources.py
```python
from __future__ import print_function
import subprocess
import psutil
def available_ram_gb():
ram_bytes = psutil.virtual_memory().available
kilo = 1024.0
ram_gb = ram_bytes / kilo**3
return ram_gb
def get_time_pi_seconds(cores, app_dir='.'):
app_path = '{}/cpu_power_test'.format(app_dir)
time_calc = subprocess.check_output([app_path, str(cores)])
decoded = time_calc.decode('utf-8')
miliseconds = int(decoded)
return miliseconds / 1000.0
``` |
{
"source": "jlinoff/rsa_demo",
"score": 4
} |
#### File: rsa_demo/rsa_demo/eea.py
```python
import random
from typing import Tuple
def decompose(n: int) -> Tuple[int, int]:
'''
Citation: https://jeremykun.com/2013/06/16/miller-rabin-primality-test/
Citation: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
'''
exponentOfTwo = 0
while n % 2 == 0:
n = n // 2
exponentOfTwo += 1
return exponentOfTwo, n
def isWitness(possibleWitness: int, p: int, exponent: int, remainder: int) -> bool:
'''
Citation: https://jeremykun.com/2013/06/16/miller-rabin-primality-test/
Citation: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
'''
possibleWitness = pow(possibleWitness, remainder, p)
if possibleWitness == 1 or possibleWitness == p - 1:
return False
for _ in range(exponent):
possibleWitness = pow(possibleWitness, 2, p)
if possibleWitness == p - 1:
return False
return True
def is_prime0(p: int, accuracy: int=100) -> bool:
'''
Citation: https://jeremykun.com/2013/06/16/miller-rabin-primality-test/
Citation: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
Miller-Rabin primality test.
'''
if p == 2 or p == 3: return True
if p < 2: return False
exponent, remainder = decompose(p - 1)
for _ in range(accuracy):
possibleWitness = random.randint(2, p - 2)
if isWitness(possibleWitness, p, exponent, remainder):
return False
return True
def is_prime1(n: int, t: int=8) -> bool:
"""
Citation: https://rosettacode.org/wiki/Miller%E2%80%93Rabin_primality_test#Python
Citation: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
Miller-Rabin primality test.
A return value of False means n is certainly not prime. A return value of
True means n is very likely a prime.
"""
#Miller-Rabin test for prime
if n==0 or n==1 or n==4 or n==6 or n==8 or n==9:
return False
if n==2 or n==3 or n==5 or n==7:
return True
s = 0
d = n-1
while d%2==0:
d>>=1
s+=1
assert(2**s * d == n-1)
def trial_composite(a):
if pow(a, d, n) == 1:
return False
for i in range(s):
if pow(a, 2**i * d, n) == n-1:
return False
return True
for _ in range(t): # number of trials
a = random.randrange(2, n)
if trial_composite(a):
return False
return True
def is_prime2(candidate: int, num_tests:int =128) -> bool:
'''
Citation: https://medium.com/@prudywsh/how-to-generate-big-prime-numbers-miller-rabin-49e6e6af32fb
Citation: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
Miller-Rabin primality test.
Test if a number is prime
Args:
candidate - the number to test
num_tests - the number of tests to do
return True if candidate is prime
'''
# Simple tests for small numbers.
if candidate in [2, 3, 5, 7]:
return True
if candidate <= 1 or candidate % 2 == 0:
return False
# Decompose to find the remainder and exponent.
exponent = 0
remainder = candidate - 1
while remainder & 1 == 0:
exponent += 1
remainder //= 2
assert(2 ** exponent * remainder == candidate - 1)
# Check witnesses.
for _ in range(num_tests):
accuracy = random.randrange(2, candidate - 1)
possible_witness = pow(accuracy, remainder, candidate)
if possible_witness not in (1, candidate - 1):
j = 1
while j < exponent and possible_witness != candidate - 1:
possible_witness = pow(possible_witness, 2, candidate)
if possible_witness == 1:
return False
j += 1
if possible_witness != candidate - 1:
return False
return True
def is_prime3(candidate: int, num_tests: int=128) -> bool:
'''
Citation: https://inventwithpython.com/rabinMiller.py
Citation: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
Miller-Rabin primality test.
Returns True if candidate is a prime number.
'''
remainder = candidate - 1
exponent = 0
while remainder % 2 == 0:
# keep halving remainder while it is even (and use exponent
# to count how many times we halve remainder)
remainder = remainder // 2
exponent += 1
# try to falsify candidate's primality num_tests times
for _ in range(num_tests):
accuracy = random.randrange(2, candidate - 1)
possible_witness = pow(accuracy, remainder, candidate)
if possible_witness != 1:
# this test does not apply if possible_witness is 1.
i = 0
while possible_witness != (candidate - 1):
if i == exponent - 1:
return False
i = i + 1
possible_witness = (possible_witness ** 2) % candidate
return True
def generate_prime_candidate(nbits: int) -> int:
'''
Generate a candidate prime number composed of n-bits.
Set the MSB and LSB to 1.
'''
candidate = random.getrandbits(nbits)
candidate |= (1 << (nbits - 1)) | 1
return candidate
```
#### File: rsa_demo/rsa_demo/encrypt.py
```python
import argparse
import base64
import math
import os
import random
import struct
import sys
import textwrap
from typing import Tuple
from pyasn1.type import univ
from pyasn1.codec.der import decoder as der_decoder
# Is the sys.path.append really needed?
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from rsa_demo import __version__
from rsa_demo.utils import err, infov, infovv
def getopts() -> argparse.Namespace:
'''
Get the command line options.
'''
def gettext(string):
'''
Convert to upper case to make things consistent.
'''
lookup = {
'usage: ': 'USAGE:',
'positional arguments': 'POSITIONAL ARGUMENTS',
'optional arguments': 'OPTIONAL ARGUMENTS',
'show this help message and exit': 'Show this help message and exit.\n ',
}
return lookup.get(string, string)
argparse._ = gettext # to capitalize help headers
base = os.path.basename(sys.argv[0])
#name = os.path.splitext(base)[0]
usage = '\n {0} [OPTIONS]'.format(base)
desc = 'DESCRIPTION:{0}'.format('\n '.join(__doc__.split('\n')))
epilog = f'''\
EXAMPLES:
# Example 1: help
$ {base} --help
# Example 2: RSA encrypt a file
$ {base} -k test01.pub -i plaintext -o ciphertext
# Example 3: RSA encrypt a file keep the binary format.
$ {base} -k test01.pub -i plaintext -o ciphertext -b
'''
afc = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage,
epilog=epilog + ' \n')
parser.add_argument('-b', '--binary',
action='store_true',
help='''\
Do not base64 encode the output.
Generate a binary file.
''')
parser.add_argument('-i', '--input',
action='store',
type=str,
metavar=('FILE'),
help='''\
The file to encrypt.
''')
parser.add_argument('-k', '--key',
action='store',
required=True,
type=str,
metavar=('FILE'),
help='''\
The public key file. Two formats are supported:
1. SSH RSA public key format.
2. PKCS#1 (RSA) PEM public key format.
The program figures out the format.
''')
parser.add_argument('-o', '--output',
action='store',
type=str,
metavar=('FILE'),
help='''\
The encrypted file.
The default is stdout.
''')
parser.add_argument('-s', '--seed',
action='store',
type=int,
help='''\
Specify a random seed. This helps with demo's but
is not at all secure.
''')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='''\
Output a lot of information about the intermediate steps.
''')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s version {0}'.format(__version__),
help='''\
Show program's version number and exit.
''')
opts = parser.parse_args()
return opts
def read_pem_rsa_pubkey(path: str) -> Tuple[int, int]:
'''
Read the PEM encoded file.
'''
# Really cheap and cheerful approach.
with open(path, 'r') as ifp:
first, *b64, last = ifp.readlines()
assert first.strip() == '-----BEGIN RSA PUBLIC KEY-----'
assert last.strip() == '-----END RSA PUBLIC KEY-----'
b64_str = ''.join([o.strip() for o in b64])
b64_bytes = base64.b64decode(b64_str)
# This is decoded raw, with no structure, that is why
# recursion is disabled.
_, msg = der_decoder.decode(b64_bytes, asn1Spec=univ.Sequence(), recursiveFlag=False)
modulus, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
pubexp, _ = der_decoder.decode(msg, asn1Spec=univ.Integer())
return int(modulus), int(pubexp)
def read_ssh_rsa_pubkey(path: str) -> Tuple[str, int, int]:
'''
Read the SSH RSA public key file.
'''
with open(path, 'r') as ifp:
data_str = ifp.readlines()[0].split(None)[1]
data = base64.b64decode(data_str)
fields = []
while data:
# len fld is always 4 bytes
dlen = struct.unpack('>I', data[:4])[0]
val = data[4:dlen+4]
data = data[4+dlen:]
fields.append((dlen, val))
assert len(fields) == 3
alg = fields[0][1]
pubexp = int.from_bytes(fields[1][1], byteorder='big')
modulus = int.from_bytes(fields[2][1], byteorder='big')
return str(alg, 'utf-8'), pubexp, modulus
def read_public_key_file(opts: argparse.Namespace, path: str) -> Tuple[int, int]:
'''
Figure out which type of file this is and read it.
'''
infov(opts, f'opening key file: {path}')
with open(path, 'r') as ifp:
data = ifp.read()
if '-----BEGIN RSA PUBLIC KEY-----' in data:
infov(opts, f'key type: PKCS#1 (RSA) PEM public key file')
return read_pem_rsa_pubkey(path)
if 'ssh-rsa' in data:
infov(opts, f'key type: SSH RSA public key file')
_, pub, mod = read_ssh_rsa_pubkey(path)
return mod, pub
err(f'unrecognized file format in {path}.')
return -1, -1
def read_input(opts: argparse.Namespace) -> bytes:
'''
Read the input data.
There are two possible sources:
1. A file specified by the -i option.
2. stdin.
In both cases, all data is read into memory which
limits the file size to available memory.
'''
if opts.input:
with open(opts.input, 'rb') as ifp:
return ifp.read()
infov(opts, 'reading from stdin, type ^D on a new line to exit')
return bytes(sys.stdin.read(), 'utf-8')
def encrypt(opts: argparse.Namespace, modulus: int, pubexp: int) -> None:
'''
Encrypt the input using RSA.
'''
infov(opts, 'reading the input data')
plaintext = read_input(opts)
infov(opts, f'read {len(plaintext)} bytes')
num_bits = int(math.ceil(math.log(modulus, 2)))
bytes_per_block = num_bits // 8 # based on bits
infov(opts, f'num_bits: {num_bits}')
infov(opts, f'bytes/block: {bytes_per_block}')
assert bytes_per_block < 0xffff # we only allocate 2 bytes for padding
padding = 0
while len(plaintext) % bytes_per_block:
padding += 1
plaintext += b'x'
infov(opts, f'padding: {padding}')
assert (len(plaintext) % bytes_per_block) == 0
ciphertext = bytes([])
for i in range(0, len(plaintext), bytes_per_block):
end = i + bytes_per_block
block = plaintext[i:end]
# Convert the block to an integer for computation.
# Arbitrarily chose big endian because consistency is needed and
# 'big' is fewer letters than 'little'. Also because 'big' is
# 'network order'.
block_int = int.from_bytes(block, 'big')
# Encrypt.
# Use the fast modular exponentiation algorithm provided by
# python.
block_enc_int = int(pow(block_int, pubexp, modulus))
# Add to the encrypted bytes array.
# The MSB is always zero.
block_bytes = block_enc_int.to_bytes(bytes_per_block + 1, byteorder='big')
ciphertext += block_bytes
# Setup the prefix.
version = 0
prefix = bytes('joes-rsa', 'utf-8')
prefix += version.to_bytes(2, 'big')
prefix += padding.to_bytes(2, 'big')
ciphertext = prefix + ciphertext
# At this point the data is encrypted.
# If the user did not specify binary output, output in base64.
if opts.binary:
encb = ciphertext
encs = str(ciphertext)
mode = 'wb'
else:
b64 = base64.b64encode(ciphertext)
data_str = str(b64, 'utf-8').rstrip()
data_str = '\n'.join(textwrap.wrap(data_str, 64))
encs = f'''\
-----BEGIN JOES RSA ENCRYPTED DATA-----
{data_str}
-----END JOES RSA ENCRYPTED DATA-----
'''
mode = 'w'
# Write out the data.
if opts.output:
infov(opts, f'writing to {opts.output}')
with open(opts.output, mode) as ofp:
if opts.binary:
ofp.write(encb)
else:
ofp.write(encs)
else:
infov(opts, 'writing to stdout')
sys.stdout.write(encs)
def main() -> None:
'''
main
'''
opts = getopts()
if opts.seed:
random.seed(opts.seed)
modulus, pubexp = read_public_key_file(opts, opts.key)
infovv(opts, f'modulus: 0x{modulus:x}')
infovv(opts, f'pubexp : 0x{pubexp:x}')
encrypt(opts, modulus, pubexp)
infov(opts, 'done')
if __name__ == '__main__':
main()
```
#### File: rsa_demo/rsa_demo/read_rsa_ssh_public.py
```python
import base64
import struct
import sys
from typing import Tuple
def read_ssh_rsa_pubkey(path: str) -> Tuple[str, int, int]:
'''
Read the SSH RSA public key file.
'''
with open(path, 'r') as ifp:
data_str = ifp.readlines()[0].split(None)[1]
data = base64.b64decode(data_str)
fields = []
while data:
# len fld is always 4 bytes
dlen = struct.unpack('>I', data[:4])[0]
val = data[4:dlen+4]
data = data[4+dlen:]
fields.append((dlen, val))
assert len(fields) == 3
alg = fields[0][1]
pubexp = int.from_bytes(fields[1][1], byteorder='big')
modulus = int.from_bytes(fields[2][1], byteorder='big')
return str(alg, 'utf-8'), pubexp, modulus
def main() -> None:
'''main'''
for path in sys.argv[1:]:
print(f'{path}')
alg, pubexp, modulus = read_ssh_rsa_pubkey(path)
print(f' algorithm = {alg}')
print(f' pubexp = 0x{pubexp:x}')
print(f' modulus = 0x{modulus:x}')
if __name__ == '__main__':
main()
```
#### File: pyasn1/type/univ.py
```python
from typing import Any
class Integer():
def __init__(self, value: Any = None) -> None: ...
class Sequence():
def __init__(self) -> None: ...
def setComponentByPosition(self, idx: int, value: Any) -> None: ...
``` |
{
"source": "jlintula/pyircfs",
"score": 2
} |
#### File: pyircfs/lib/handler.py
```python
import connection, events
import time, logging
Event = events.Event
LOG_FILENAME = "pyircfs.log"
#logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
# datefmt='%m-%d %H:%M:%S',)
CHANCHARS = '*#+!&'
def is_channel(target):
return target[0] in CHANCHARS
class ConnectionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Handler:
def _find_handler_classes(self, key):
"""searches for a given key in classes in the events.py module
@return a list of (command, handler class) tuples"""
# there has to be a better way :O
classes = [] # list of (command, handler class) tuples
for i in events.__dict__.keys():
if hasattr(events.__dict__[i], key):
for j in getattr(events.__dict__[i], key):
classes.append((j, events.__dict__[i]))
return classes
def _get_handlers(self, htype, command):
"""returns a list of eventstore objects for given message, instantiates
ones from the handler class if needed and adds all other commands the same
class is able to receive to the object list
@param htype either 'reply' or 'command'
@param command the command/reply the store should be interested in
@return list of objects to send an event to"""
if htype == 'reply':
hlist = self.reply_handler_classes
slist = self.reply_stores
elif htype == 'command':
hlist = self.command_handler_classes
slist = self.command_stores
else:
return []
classes = []
objects = []
wildcard_added = False
# search if there are suitable objects already somewhere and use them if possible:
for i in slist:
if i[0] == command:
objects.append(i[1])
if i[0] == '*':
objects.append(i[1])
wildcard_added = True
if (len(objects) > 1 and wildcard_added) or (objects and not wildcard_added):
return objects
for i in hlist: # ok then, search for classes
if i[0] == command: # search for corresponding store objects
if not issubclass(i[1], events.PrivmsgStore):
# Privmsg/channel stores are created only on
# PRIVMSG/NOTICE, not in _handle_server_message
classes.append(i[1])
# now, classes contains only those classes that don't have instantiated objects
for i in classes:
# print "we are here with ", i
obj = self._create_new_store(i) # create a new store instance from the class
# find out what other commands and replies the same class
# supports and add them too:
for j in self.reply_handler_classes:
if j[1] == i:
self.reply_stores.append((j[0], obj))
for j in self.command_handler_classes:
if j[1] == i:
self.command_stores.append((j[0], obj))
#slist.append((command, obj))
objects.append(obj)
return objects
def _get_free_id(self):
"""returns a free unique id"""
self._next_id += 1
return self._next_id
def _create_new_store(self, class_, *args, **kwargs):
"""creates a new eventstore object and assigns it an unique id.
@param class_ the class to create an instance from
@param *args, **kwargs are passed to the class
@return the created object"""
id = self._get_free_id()
obj = class_(id=id, handler=self, *args, **kwargs)
self.all_stores[id] = obj
[x(obj) for x in self.new_store_callbacks]
return obj
def __init__(self):
self.command_handler_classes = self._find_handler_classes('command_handlers')
self.reply_handler_classes = self._find_handler_classes('reply_handlers')
self.command_stores = []
self.reply_stores = []
self.privmsg_stores = []
self.all_stores = {}
self.joined_when_disconnected = []
self.new_store_callbacks = []
self._next_id = 0
self.connection = None
self.connection_status = (0, '')
self.connection_status_timestamp = 0
self.nicknames = []
self.username = ""
self.nickname = ""
def connect(self, server, nicknames, username, realname, port=6667, password=""):
"""Tries to connect to the IRC server."""
#self.nickname = nickname
self.nicknames = nicknames
self.username = username
self.realname = realname
self.server = server
self.port = port
#self.connection = connection.connect(self, server, port)
self.connection = connection.Connection(server, port,
self.receive_message,
self.receive_status)
self.connection.start()
while not self.connection_status[0] == 1:
time.sleep(0.2)
if self.connection_status[0] == 103:
raise ConnectionError(self.connection_status[1])
if password:
self.send_command('PASS', password)
self.send_command('NICK', nicknames[0])
self.send_command('USER', '%s 0 * :%s' % (username, realname))
def __str__(self):
ret = "i'm a handler"
if self.connection:
ret += ", connected as %s" % self.connection
return ret
def remove_store(self, id):
"""removes all references to a store"""
try:
store = self.all_stores[id]
except KeyError:
raise ValueError("unknown store")
self.all_stores.pop(id)
while store in self.privmsg_stores:
self.privmsg_stores.remove(store)
for storelist in [self.reply_stores, self.command_stores]:
to_remove = []
for i in storelist:
if i[1] == store:
to_remove.append(i)
for i in to_remove:
storelist.remove(i)
# and finally tell the store about it
store.remove()
def get_store_id(self, store):
for id in self.all_stores:
if self.all_stores[id] == store:
return id
return None
def close(self):
self.connection.close()
def receive_message(self, message):
"""handles messages coming from the connection and hands them to
_handle_privmsg or _handle_server_message depending on message type"""
logging.debug("receive_message: received %s" % message)
tmp = message.split(' ')
# parses the received message to prefix/cmd/params:
if message[0] == ":":
prefix = tmp[0]
cmd = tmp[1]
params = ' '.join(tmp[2:])
else:
prefix = ""
cmd = tmp[0]
params = ' '.join(tmp[1:])
ev = Event(prefix=prefix, command=cmd, params=params)
#print "RECV: prefix %s cmd %s params %s " % (prefix, cmd, params)
if cmd == 'JOIN':
# JOINs are a special case
# - we need to create a privmsg store for them if one doesn't
# exist
self._get_privmsg_handlers(params[1:])
# now a store is created for the channel if one didn't exist
# already - we don't need the actual instance anywhere in here,
# but now _handle_server_message has somewhere to send the JOIN too
if cmd in ["PRIVMSG", "NOTICE"]:
self._handle_privmsg(ev)
else:
self._handle_server_message(ev)
def _handle_privmsg(self, event):
logging.debug("_handle_privmsg: event %s" % event)
#if event.params[0] in '*#+!&':
if is_channel(event.params[0]):
target = event.params.split()[0]
else:
try:
target = event.prefix[1:event.prefix.index('!')]
except ValueError: # no nickname could be found
logging.debug("hmm? couldn't extract nickname from event")
return
stores = self._get_privmsg_handlers(target)
[store.add_event(event) for store in stores]
def _get_privmsg_handlers(self, target):
logging.debug("ENTER _get_privmsg_handlers, target: %s" % target)
s = [x for x in self.privmsg_stores if x.target.lower() == target.lower()]
if not s:
logging.debug("_get_privmsg_handlers: no existing store found")
if is_channel(target[0]):
s.append(self._create_new_store(events.ChannelStore, target=target, name="_"+target))
replies = events.ChannelStore.reply_handlers
else:
s.append(self._create_new_store(events.PrivmsgStore, target=target, name="_"+target))
replies = events.PrivmsgStore.reply_handlers
self.privmsg_stores.append(s[-1])
for r in replies:
self.reply_stores.append((r, s[-1])) # TODO ADD ID
for i in self.reply_stores:
if i[0] == '*':
s.append(i[1])
logging.debug("_get_privmsg_handlers: returning stores: %s" % [str(x) for x in s])
return s
def _handle_server_message(self, event):
handlers = self._get_handlers('reply', event.command)
for h in handlers:
answer = h.add_event(event)
if answer:
[self.connection.send(msg) for msg in answer]
def send_command(self, command, params):
if self.connection_status[0] not in (1, 10) or \
(self.connection_status[0] == 1 and \
command not in ['PASS', 'USER', 'NICK']):
raise ConnectionError("not connected")
command = command.upper()
handlers = self._get_handlers('command', command)
if not handlers:
raise ValueError("unknown command")
for h in handlers:
to_send = h.generate_event(command, params)
if to_send:
for msg in to_send:
self.connection.send(msg)
def send_message(self, target, message, type="PRIVMSG"):
logging.debug("ENTER send_message: target %s message %s type %s" % (target, message, type))
#if message.startswith('wait'):
# time.sleep(10)
if not self.connection_status[0] == 10:
raise ConnectionError("not connected")
store = self._get_privmsg_handlers(target)[0]
logging.debug("send_message: store resolved as %s" % store)
to_send = store.generate_event(type, message)
logging.debug("send_message: to_send: %s" % to_send)
if to_send:
for msg in to_send:
self.connection.send(msg)
def send_notice(self, target, message):
self.send_message(target, message, type="NOTICE")
def create_privmsg_store(self, target):
self._get_privmsg_handlers(target)
def create_command_store(self, target):
if not self._get_handlers('command', target.upper()):
raise ValueError('unknown command')
def receive_status(self, statusno, statusdesc):
"""receives a tuple of status messages (number, description) from the connection object
and (maybe) acts accordingly
0: not connected
1: connecting (socket opened)
10: connection open and free to use
100: disconnected by user request
101: disconnected by server
102: disconnected for some other reason (?)
103: network error when connecting
104: all nicknames in use
105: bad server password
messages can be sent only when status == 10
(USER, PASS and NICK may be sent when status == 1)
"""
#print "sain jotain statusta: %s %s" % (statusno, statusdesc)
self.connection_status_timestamp = time.time()
self.connection_status = (statusno, statusdesc)
# when disconnected, save names of channels that were joined at the
# time, and send an informational event to them
if self.connection_status[0] in (100, 101, 102):
for i in self.privmsg_stores:
if isinstance(i, events.ChannelStore):
if i.joined:
self.joined_when_disconnected.append(i.target)
i.joined = False
disconnect_event = Event(prefix="", command="", params=statusdesc,
generated=True, informational=True)
for i in self.all_stores:
self.all_stores[i].add_event(disconnect_event)
def reconnect(self):
"""if disconnected, reconnects to a server an rejoins channels
"""
if self.connection_status[0] == 10:
raise ValueError("already connected!")
self.connect(self.server, self.nicknames, self.username, self.realname, self.port)
for channel in self.joined_when_disconnected:
self.send_command('JOIN', channel)
self.joined_when_disconnected = []
def list_reply_stores(self):
"""returns list of unique reply stores"""
names = []
stores = []
for i in self.reply_stores:
if not i[1] in stores:
names.append(i[1].name)
stores.append(i[1])
return dict(zip(names, stores)) # I suppose names are unique
def list_command_stores(self):
"""returns list of unique command stores"""
names = []
stores = []
for i in self.command_stores:
if not i[1] in stores and not (hasattr(i[1], 'internal') and i[1].internal):
# do not list stores that are there already, and don't list those
# that are "internal" either (particularly PingES)
names.append(i[0])
stores.append(i[1])
return dict(zip([x.lower() for x in names], stores))
def list_privmsg_stores(self, filter=None):
"""returns list of unique privmsg stores
@param filter return only privmsg or channels if 'privmsg' or 'channel'"""
d = {}
for i in self.privmsg_stores:
if filter == 'privmsg':
if isinstance(i, events.PrivmsgStore):
d[i.target] = i
elif filter == 'channel':
if isinstance(i, events.ChannelStore):
d[i.target] = i
else:
d[i.target] = i
return d
def list_info_stores(self):
"""returns list of reply stores that don't take any commands,
aka "informational" stores (errors, etc?)"""
names = []
stores = []
for i in self.reply_stores:
found = False
for j in self.command_stores:
if j[1] == i[1]:
found = True
for j in self.privmsg_stores:
if j == i[1]:
found = True
if not found:
names.append(i[1].name)
stores.append(i[1])
return dict(zip(names, stores))
``` |
{
"source": "JLippai/PyMM1",
"score": 3
} |
#### File: JLippai/PyMM1/mm1.py
```python
import sys
import numpy as np
import matplotlib.pyplot as plt
import heapq
LAMBDA = 1.0
MU = 2.0
LIMIT_SWITCH = 0
LIMIT_VALUE = 10000
FIGURE_SAVE = 1
if len(sys.argv) > 4:
LAMBDA = float(sys.argv[1])
MU = float(sys.argv[2])
LIMIT_SWITCH = int(sys.argv[3])
if LIMIT_SWITCH == 0:
LIMIT_VALUE = int(sys.argv[4])
else:
LIMIT_SWITCH = 1
LIMIT_VALUE = int(sys.argv[4])
if len(sys.argv) > 5:
FIGURE_SAVE = int(sys.argv[5])
rho = LAMBDA/MU
rates = {'a':LAMBDA, 'd':MU}
infeasibleEvents = {'all':['INIT'], 0:['d']}
feasibleEvents = {'all':['a', 'd']}
QUEUE = 0
EVENTHEAP = [(0, 'INIT'),]
arrivals = []
departures = []
def newLifetime(event):
"""Generate a new event's lifetime, with the Poisson parameter specified in the rates map"""
return -1/rates[event]*np.log(1 - np.random.rand())
def updateState(event, queues):
"""Update the queue length as a function of current state and triggering event.
Also updates the array of queue lengths.
"""
global QUEUE
QUEUE += (event[1] == 'a') - (event[1] == 'd')
queues.append(QUEUE)
def updateFeasibleEvents(outgoing_event, times):
""" Update the scheduled event list given the current queue length and the event (with lifetime)
of the event that triggered the current state. Also updates the time array times.
"""
global EVENTHEAP
global arrivals
global departures
global feasibleEvents
global infeasibleEvents
try:
a = feasibleEvents[QUEUE]
except:
feasibleEvents[QUEUE] = []
try:
a = infeasibleEvents[QUEUE]
except:
infeasibleEvents[QUEUE] = []
EVENTHEAP = [(time - outgoing_event[0], event) for (time, event) in EVENTHEAP if event not in infeasibleEvents[QUEUE]]
residualEvents = [event for (time, event) in EVENTHEAP]
for event in ((set(feasibleEvents[QUEUE]).union(feasibleEvents['all'])) - set(residualEvents)) - (set(infeasibleEvents[QUEUE]).union(infeasibleEvents['all'])):
heapq.heappush(EVENTHEAP, (newLifetime(event), event))
if (times == []):
times.append(outgoing_event[0])
else:
times.append(times[-1] + outgoing_event[0])
if (outgoing_event[1] == 'a'):
arrivals.append(times[-1])
else:
departures.append(times[-1])
def runSimulation():
"""Updates the system until the maximum time or number of departures is reached,
and returns the array of times at which events occurred, the array of the queue length
at those times, the average queue length, and the average system time
"""
departureCount = 0
times = []
queues = []
arrivalCountArray = [0]
while (True):
new_event = heapq.heappop(EVENTHEAP)
if (new_event[1] == 'd'):
departureCount += 1
arrivalCountArray.append(0)
elif (new_event[1] == 'a'):
arrivalCountArray.append(1)
updateState(new_event, queues)
updateFeasibleEvents(new_event, times)
if (LIMIT_SWITCH):
if (departureCount >= LIMIT_VALUE):
break
else:
if (times[-1] >= LIMIT_VALUE):
break
tarray = np.array(times)
qarray = np.array(queues)
q_substantive = qarray[:-1]
difft = np.diff(tarray)
u = np.sum(q_substantive*difft)
L = u/tarray[-1]
S = u/len(arrivals)
return tarray, qarray, arrivalCountArray, L, S
def main():
tarray, qarray, arrivalCountArray, L, S = runSimulation()
print("lambda = %.1f, mu = %.1f, rho = %.4f\nAvg queue, Avg sys time\n%.6f, %.6f"%(LAMBDA, MU, rho, L, S))
if (FIGURE_SAVE):
if (LIMIT_SWITCH):
runtimeLabel = 'Departures'
else:
runtimeLabel = 'Time'
uarray = np.cumsum(qarray[:-1]*np.diff(tarray))
qdiff = np.diff(qarray)
plt.bar(tarray, qarray, edgecolor="none")
plt.title("Sample path of queue length vs. %s"%runtimeLabel.lower())
plt.xlabel("%s"%runtimeLabel)
plt.ylabel("Queue length")
plt.xlim(0, tarray[-1])
plt.savefig("samplepath.png")
plt.close()
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(tarray[1:], uarray/tarray[1:], label=r"$\bar x$")
plt.plot(tarray[1:], uarray/np.cumsum(arrivalCountArray[1:]), label=r"$\bar s$")
plt.legend()
plt.title(r"Estimators $\bar x$ and $\bar s$ as functions of %s"%runtimeLabel.lower())
plt.xlabel("%s"%runtimeLabel)
plt.ylabel("Estimators")
plt.xlim(0, tarray[-1])
plt.savefig("estimators.png")
plt.close()
if __name__ == '__main__':
main()
``` |
{
"source": "jlips24/SecretSanta",
"score": 3
} |
#### File: SecretSanta/helpers/EmailHelpers.py
```python
import smtplib
import os
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from dotenv import load_dotenv
load_dotenv()
def read_template(filename):
with open(filename, 'r', encoding='utf-8') as template_file:
template_file_content = template_file.read()
return Template(template_file_content)
def send_emails(people, template, extra=None):
print("Sending Messages")
s = smtplib.SMTP(host=os.getenv("EMAIL_HOST"), port=int(os.getenv("EMAIL_PORT")))
s.starttls()
s.login(os.getenv("EMAIL_USER"), os.getenv("EMAIL_PASSWORD"))
message_number = 1
total_messages = len(people)
for person in people:
print(f"({round((message_number/total_messages)*100, 2)}%)Sending message to {person[0]} at {person[1]}: message {message_number} of {total_messages}")
msg = MIMEMultipart()
msg['From'] = "Open Source Christmas"
msg['To'] = person[1]
if (template == "SecretSanta"):
message_template = read_template('templates/SecretSantaEmail.txt')
# Add in the reciprient name and match name to the message template
reciprient = person[0]
match_name = people[person[2]][0]
message = message_template.substitute(RECIPRIENT_NAME=reciprient, MATCH_NAME=match_name, MAX_MONEY=extra[0])
# Setup the parameters of the message
msg['Subject']="Your Secret Santa match is ready"
# Add in the message body
msg.attach(MIMEText(message, 'plain'))
elif (template == "WhiteElephant"):
message_template = read_template('templates/WhiteElephant.txt')
# Add in the reciprient name and match name to the message template
reciprient = person[0]
turn = person[2]
message = message_template.substitute(RECIPRIENT_NAME=reciprient, TURN_NUMBER=turn)
# Setup the parameters of the message
msg['Subject']="White Elephant info"
# Add in the message body
msg.attach(MIMEText(message, 'plain'))
elif (template == "WhiteElephantMaster"):
message_template = read_template('templates/WhiteElephantMaster.txt')
# Add in the reciprient name and match name to the message template
reciprient = person[0]
message = message_template.substitute(RECIPRIENT_NAME=reciprient, PERSON_LIST=extra)
# Setup the parameters of the message
msg['Subject']="White Elephant info (master list)"
# Add in the message body
msg.attach(MIMEText(message, 'plain'))
# Send the message
try:
s.send_message(msg)
except smtplib.SMTPRecipientsRefused:
print(f"The mail server had an error sending the notification to {msg['To']}")
del msg
message_number += 1
``` |
{
"source": "JLira/ThinkPython2",
"score": 4
} |
#### File: ThinkPython2/code/right_justify.py
```python
def rigth_justify(s,n=70):
return (' ' * len(s) + s[:])*(n//(len(s)*2))
rigth_justify('monty')
``` |
{
"source": "jlirochon/python-sdk",
"score": 2
} |
#### File: jlirochon/python-sdk/agent.py
```python
import socket
import os
import sys
import json
from blackfire.exceptions import *
import _blackfire_profiler as _bfext
from collections import defaultdict
from blackfire.utils import urlparse, get_logger, IS_PY3, parse_qsl, read_blackfireyml_content, \
replace_bad_chars, get_time, unquote, UC, unicode_or_bytes
log = get_logger(__name__)
_blackfire_keys = None
class Protocol(object):
MAX_RECV_SIZE = 4096
MAX_SEND_SIZE = 4096
ENCODING = 'utf-8'
HEADER_MARKER = '\n'
MARKER = '\n\n'
if IS_PY3:
HEADER_MARKER = bytes(HEADER_MARKER, ENCODING)
MARKER = bytes(MARKER, ENCODING)
class Connection(object):
def __init__(self, agent_socket, agent_timeout):
self.agent_socket = agent_socket
self.agent_timeout = agent_timeout
self._closed = False
self.agent_response = None
# parse & init sock params
sock_parsed = urlparse(self.agent_socket)
if sock_parsed.scheme == "unix":
family = socket.AF_UNIX
self._sock_addr = sock_parsed.path
elif sock_parsed.scheme == "tcp":
family = socket.AF_INET
# there are some URLs like: tcp://[::]:10666 which might contain
# `:` in the host section. That is why we use rsplit(...) below
host, port = sock_parsed.netloc.rsplit(':', 1)
# is this a IPv6 address?
if host.startswith('['):
host = host[1:-1]
family = socket.AF_INET6
self._sock_addr = (
host,
int(port),
)
else:
raise BlackfireApiException(
"Unsupported socket type. [%s]" % (sock_parsed.scheme)
)
# init the real socket
self._socket = socket.socket(family, socket.SOCK_STREAM)
self._socket.settimeout(self.agent_timeout)
# it is advised to disable NAGLE algorithm
try:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except:
pass
def __del__(self):
try:
self.close()
except:
pass
def _contains_blackfireyaml_header(self, recv_wnd):
BFYAML_HDR = 'blackfire_yml=true'
if IS_PY3:
BFYAML_HDR = bytes(BFYAML_HDR, Protocol.ENCODING)
return BFYAML_HDR in recv_wnd
def connect(self, config=None):
# check if signature is valid even before connecting to the Agent
if config and _blackfire_keys and not _blackfire_keys.is_expired():
sig = replace_bad_chars(unquote(config.signature))
msg = config.challenge_raw
signature_verified = False
for key in _blackfire_keys:
signature_verified = _bfext._verify_signature(key, sig, msg)
log.debug("_verify_signature(key=%s, sig=%s, msg=%s) returned %s." % \
(key, sig, msg, signature_verified))
if signature_verified:
break
if not signature_verified:
raise BlackfireInvalidSignatureError(
'Invalid signature received. (%s)' % (sig)
)
log.debug('Signature verified.')
log.debug("Connecting to agent at %s." % str(self._sock_addr))
try:
self._socket.connect(self._sock_addr)
except Exception as e:
raise BlackfireApiException(
'Agent connection failed.[%s][%s]' % (e, self.agent_socket)
)
# if no config provided, it is APM case
if config:
self._write_prolog(config)
def close(self):
if self._closed:
return
self._socket.close()
self._closed = True
log.debug("Agent connection closed.")
def send(self, data):
# Agent expects data is written in chunks
try:
while (data):
self._socket.sendall(data[:Protocol.MAX_SEND_SIZE])
data = data[Protocol.MAX_SEND_SIZE:]
except Exception as e:
raise BlackfireApiException(
'Agent send data failed.[%s][%s]' % (e, data)
)
def recv(self):
result = ''
if IS_PY3:
result = bytes(result, Protocol.ENCODING)
try:
while (True):
data = self._socket.recv(Protocol.MAX_RECV_SIZE)
if not len(data):
# other side indicated no more data will be sent
raise Exception('Agent closed the connection.')
result += data
# when blackfire_yaml header is present in the recv_window
# do not try to read until Protocol.MARKER found. This will
# be a header only msg
if self._contains_blackfireyaml_header(result) and \
result.endswith(Protocol.HEADER_MARKER):
break
if result.endswith(Protocol.MARKER):
break
except Exception as e:
raise BlackfireApiException('Agent recv data failed.[%s]' % (e))
return result
def _write_prolog(self, config):
global _blackfire_keys
blackfire_yml = bool(int(config.args.get('flag_yml', '1')))
blackfire_yml_content = None
if blackfire_yml:
blackfire_yml_content = read_blackfireyml_content()
log.debug('Sending .blackfire.yml along with profile data.')
bf_probe_header = 'python-%s, config' % (sys.hexversion)
# recv timespan entries if timespan enabled
recv_timespan = bool(int(config.args.get('flag_timespan', '0')))
if recv_timespan:
bf_probe_header += ', timespan'
# it is an expected situation to not have the bf_yaml file in place
# even it is defined as a flag
if blackfire_yml_content:
bf_probe_header += ', blackfire_yml'
# blackfire.yaml asked from build&scenarios? Agent will not wait
# for anymore data when noop is seen
if config.is_blackfireyml_asked():
bf_probe_header += ', noop'
if bool(int(config.args.get('no_pruning', '0'))):
bf_probe_header += ', no_pruning'
if bool(int(config.args.get('no_anon', '0'))):
bf_probe_header += ', no_anon'
headers = {
'Blackfire-Query':
'%s&signature=%s&%s' % (
config.challenge_raw,
config.signature,
config.args_raw,
),
'Blackfire-Probe':
bf_probe_header,
}
# add Blackfire-Auth header if server_id/server_token are defined as
# env. vars
bf_server_id = os.environ.get('BLACKFIRE_SERVER_ID')
bf_server_token = os.environ.get('BLACKFIRE_SERVER_TOKEN')
if bf_server_id and bf_server_token:
headers['Blackfire-Auth'
] = '%s:%s' % (bf_server_id, bf_server_token)
hello_req = BlackfireRequest(headers=headers)
self.send(hello_req.to_bytes())
log.debug("SEND hello_req ('%s')", hello_req.to_bytes())
response_raw = self.recv()
self.agent_response = BlackfireResponse().from_bytes(response_raw)
_blackfire_keys = self.agent_response.get_blackfire_keys()
if self.agent_response.status_code != BlackfireResponse.StatusCode.OK:
raise BlackfireApiException(
'Invalid response received from Agent. [%s]' %
(self.agent_response)
)
log.debug("RECV hello_req response. ('%s')", self.agent_response)
if self.agent_response.status_val_dict.get('blackfire_yml') == 'true':
blackfire_yml_req = BlackfireRequest(
headers={'Blackfire-Yaml-Size': len(blackfire_yml_content)},
data=blackfire_yml_content,
)
self.send(blackfire_yml_req.to_bytes())
log.debug(
"SEND blackfire_yml_req ('%s')", blackfire_yml_req.to_bytes()
)
# as we send blackfire_yml back, the first agent_response should include
# some extra params that might be changed with blackfire_yml file.
# e.x: fn-args, timespan entries, metric defs.
response_raw = self.recv()
blackfire_yml_response = BlackfireResponse(
).from_bytes(response_raw)
if blackfire_yml_response.status_code != BlackfireResponse.StatusCode.OK:
raise BlackfireApiException(
'Invalid response received from Agent to blackfire_yml request. [%s]'
% (blackfire_yml_response)
)
# There can be Blackfire-Fn-Args + Blackfire-Const, Blackfire-Keys all
# update the .args dict
self.agent_response.args.update(blackfire_yml_response.args)
log.debug(
"RECV blackfire_yml_req response. ('%s')",
blackfire_yml_response.to_bytes()
)
class BlackfireMessage(object):
def to_bytes(self):
pass
def save(self, path):
with open(path, "wb") as f:
f.write(self.to_bytes())
class BlackfireKeys(object):
def __init__(self, keys):
'''Parses the received Blackfire-Keys line and presents necessary fields
as attributes.
keys: a string that contains Blackfire-Keys entries.
e.g: max_age (secs);Key1, Key2, Key3
'''
self._keys_raw = keys
keys = keys.split(',')
max_age, key1 = keys[0].split(';')
keys = [key1] + keys[1:]
keys = list(map(replace_bad_chars, keys))
self._keys = keys
self._expiration_time = get_time() + int(max_age)
def is_expired(self):
return self._expiration_time <= get_time()
def __iter__(self):
return iter(self._keys)
def __repr__(self):
return "keys=%s, expiration_time=%s" % (
self._keys, self._expiration_time
)
class BlackfireResponseBase(BlackfireMessage):
TIMESPAN_KEY = 'Blackfire-Timespan'
FN_ARGS_KEY = 'Blackfire-Fn-Args'
CONSTANTS_KEY = 'Blackfire-Const'
BLACKFIRE_KEYS_KEY = 'Blackfire-Keys'
def get_blackfire_keys(self):
keys = self.args.get(self.BLACKFIRE_KEYS_KEY, [])
if len(keys) == 1: # defensive
# Blackfire-Keys is not repeated like other headers. Keys are sent
# in a single line as comma separated values
return BlackfireKeys(keys[0])
def get_timespan_selectors(self):
result = {'^': set(), '=': set()}
ts_selectors = self.args.get(self.TIMESPAN_KEY, [])
for ts_sel in ts_selectors:
if ts_sel[0] not in ['^', '=']:
log.warning("Ignoring invalid timespan selector '%s'.", ts_sel)
continue
result[ts_sel[0]].add(ts_sel[1:])
return result
def get_constants(self):
return self.args.get(self.CONSTANTS_KEY, [])
def get_instrumented_funcs(self):
result = {}
# convert the fn-args string to dict for faster lookups on C side
fn_args = self.args.get(self.FN_ARGS_KEY, [])
for fn_arg in fn_args:
fn_name, arg_ids_s = fn_arg.rsplit(" ", 1)
fn_name = fn_name.strip()
if fn_name in result:
log.warning(
"Function '%s' is already instrumented. Ignoring fn-args directive %s.",
fn_name, fn_arg
)
continue
arg_ids = []
for arg_id in arg_ids_s.strip().split(','):
if arg_id.isdigit():
arg_ids.append(int(arg_id))
else:
arg_ids.append(arg_id)
result[fn_name] = arg_ids
return result
class BlackfireRequest(BlackfireMessage):
__slots__ = 'headers', 'data'
def __init__(self, headers=None, data=None):
if not headers:
headers = {}
self.headers = {}
for k, v in headers.items():
# these headers are not expected to be lower-case
if k not in [
'Blackfire-Query', 'Blackfire-Probe', 'Blackfire-Yaml-Size'
]:
self.headers[k.lower()] = v
continue
self.headers[k] = v
self.data = data
def to_bytes(self):
result = ''
# There are multiple BlackfireRequest messages between Agent->Probe. If this
# message contains file-format or Blackfire-Query header, we make sure it is the first line
# in the protocol. While this is not mandatory, this is to comply with other
# probes.
if 'file-format' in self.headers:
result += 'file-format: %s\n' % (self.headers['file-format'])
if 'Blackfire-Query' in self.headers:
result += 'Blackfire-Query: %s\n' % (
self.headers['Blackfire-Query']
)
for k, v in self.headers.items():
if k in ['Blackfire-Query', 'file-format']:
continue
result += '%s: %s\n' % (UC(k), UC(v))
if len(self.headers):
result += '\n'
if self.data:
result += str(self.data)
# Py2 note:
# Py2 treats the string as ASCII encoded unless you explicitly do it.
# As we have used UC() on most of the headers passed to this function,
# we are safe to encode to Protocol.ENCODING directly here
return unicode_or_bytes(result)
def from_bytes(self, data):
data = data.decode(Protocol.ENCODING)
dsp = data.split(Protocol.MARKER.decode(Protocol.ENCODING))
header_lines = []
if len(dsp) == 3:
header_lines = dsp[0]
self.data = dsp[1] + '\n' + dsp[2] # timespan + trace?
elif len(dsp) == 2:
header_lines, self.data = dsp
elif len(dsp) == 1:
header_lines = dsp[0]
else:
raise BlackfireApiException(
'Invalid BlackfireRequest message. [%s]' % (data)
)
header_lines = header_lines.split('\n')
for line in header_lines:
spos = line.find(':')
if spos > -1:
self.headers[line[:spos].strip()] = line[spos + 1:].strip()
return self
def __repr__(self):
container_dict = {"headers": self.headers, "data": self.data}
return json.dumps(container_dict, indent=4)
class BlackfireAPMRequest(BlackfireRequest):
def to_bytes(self):
result = ''
# APM protocol requires the first header to be FileFormat
result += 'file-format: %s\n' % (self.headers['file-format'])
for k, v in self.headers.items():
if k == 'file-format':
continue
result += '%s: %s\n' % (k, v)
if self.data is not None:
result += str(self.data)
result += '\n\n'
if IS_PY3:
result = bytes(result, Protocol.ENCODING)
return result
class BlackfireAPMResponse(BlackfireResponseBase):
TIMESPAN_KEY = 'timespan'
FN_ARGS_KEY = 'fn-args'
def __init__(self):
self.args = defaultdict(list)
self.key_pages = []
self.raw_data = ''
self.update_config = False
def __repr__(self):
return self.raw_data
def from_bytes(self, data):
if IS_PY3:
data = data.decode(Protocol.ENCODING)
self.raw_data = data.strip()
lines = self.raw_data.split('\n')
# first line is the status line
resp = lines[0].split(':')
resp_type = resp[0]
resp_val = resp[1]
if resp_type == 'Blackfire-Error':
raise BlackfireAPMException(
'Agent could not send APM trace. reason=%s' % (resp_val)
)
resp_type = resp_type.strip()
self.status_val = resp_val.strip()
self.status_val_dict = dict(parse_qsl(self.status_val))
if 'false' in self.status_val_dict['success']:
raise BlackfireAPMStatusFalseException(
self.status_val_dict.get(
'error', "status=False and no error received from Agent."
)
)
self.update_config = False if self.status_val_dict.get(
'update_config', 'false'
) == 'false' else True
key_page = None
for line in lines[1:]:
line = line.strip()
# every key-page entry starts with `key-page(` and endswith `)`
if line.startswith('key-page('):
key_page = {}
continue
elif line.startswith(')'):
self.key_pages.append(key_page)
key_page = None
continue
# split only first occurrence
resp_key, resp_val = line.split(':', 1)
resp_key = resp_key.strip()
resp_val = resp_val.strip()
# are we parsing a key-page entry?
if key_page is not None:
key_page[resp_key] = resp_val
else:
# there are arguments which occur multiple times with different
# values (e.g: fn-args)
# e.g:
# timespan: =mysql_connect
# timespan: =mysql_query
# timespan: ^PDO::
# fn-args: file_get_contents 1,2
# fn-args: PDO::query 1
self.args[resp_key].append(resp_val)
return self
class BlackfireResponse(BlackfireResponseBase):
class StatusCode:
OK = 0
ERR = 1
def __init__(self):
self.status_code = BlackfireResponse.StatusCode.OK
self.status_val = None
self.raw_data = None
self.args = defaultdict(list)
def from_bytes(self, data):
if IS_PY3:
data = data.decode(Protocol.ENCODING)
self.status_code = BlackfireResponse.StatusCode.OK
self.raw_data = data.strip()
lines = self.raw_data.split('\n')
# first line is the status line
resp_type, resp_val = lines[0].split(':')
resp_type = resp_type.strip()
self.status_val = resp_val.strip()
self.status_val_dict = dict(parse_qsl(self.status_val))
if resp_type == 'Blackfire-Error':
self.status_code = BlackfireResponse.StatusCode.ERR
for line in lines[1:]:
resp_key, resp_val = line.split(':', 1)
resp_key = resp_key.strip()
resp_val = resp_val.strip()
# there are arguments which occur multiple times with different
# values (e.g: fn-args)
self.args[resp_key].append(resp_val)
return self
def to_bytes(self):
result = ''
# add the status line
if self.status_code == BlackfireResponse.StatusCode.ERR:
result += 'Blackfire-Error: '
elif self.status_code == BlackfireResponse.StatusCode.OK:
result += 'Blackfire-Response: '
result += self.status_val
# add .args
if len(self.args) > 0:
result += '\n'
for arg_key, arg_values in self.args.items():
for arg_val in arg_values:
result += '%s: %s\n' % (arg_key, arg_val)
if IS_PY3:
result = bytes(result, Protocol.ENCODING)
return result
def __repr__(self):
return "status_code=%s, args=%s, status_val=%s" % (
self.status_code, self.args, self.status_val
)
```
#### File: jlirochon/python-sdk/constants.py
```python
import sys
import sysconfig
from blackfire.utils import get_logger
log = get_logger(__name__)
def _get_sys_config_params(*args):
result = []
for arg in args:
v = sysconfig.get_config_var(arg)
if v:
result += [v.strip()]
return result
def _on_except(func=None, return_val=None):
def inner_func(func):
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
return return_val
return _wrapper
return inner_func
class BlackfireConstants(object):
'''
This constants are sent back to the Agent in `Constants` headers and they appear
as runtime.constants metric defs./scenarios...etc
'''
@classmethod
def get(cls, val):
fn = getattr(cls, val.lower(), None)
if fn is None:
log.error("Unsupported Blackfire-Const value. [%s]", val)
return None
return fn()
# Constant definitions
@classmethod
@_on_except(return_val="0.0.0")
def python_version(cls):
return "%d.%d.%d" % (
sys.version_info.major, sys.version_info.minor,
sys.version_info.micro
)
@classmethod
@_on_except()
def python_debug_flag(cls):
return bool(sysconfig.get_config_var('Py_DEBUG'))
@classmethod
@_on_except()
def python_pgo_flag(cls):
return '-fprofile-use' in _get_sys_config_params(
'PY_CFLAGS', 'PY_CFLAGS_NODIST'
)
@classmethod
@_on_except()
def django_version(cls):
import django
return django.get_version()
@classmethod
@_on_except()
def flask_version(cls):
import flask
return flask.__version__
@classmethod
@_on_except()
def django_debug_flag(cls):
from django.conf import settings
return settings.DEBUG
@classmethod
@_on_except()
def django_db_conn_max_age(cls):
from django.db import connection
return connection.settings_dict['CONN_MAX_AGE']
@classmethod
@_on_except()
def flask_debug_flag(cls):
from flask import current_app
return current_app.debug
@classmethod
@_on_except()
def is_flask_app(cls):
# current_app will throw an error if not called from a context a request
from flask import current_app
return True
@classmethod
@_on_except()
def is_django_app(cls):
from django.conf import settings
# configure() will throw error if Django app is configured properly
settings.configure()
return True
```
#### File: hooks/nw/__init__.py
```python
import threading
from blackfire.utils import ContextDict
_nw = ContextDict('bf_nw_counters')
class NwCounters:
def __init__(self):
self.i = 0
self.o = 0
def get_counters():
if _nw.get('enabled'):
counters = _nw.get('counters')
if counters is None:
counters = NwCounters()
_nw.set('counters', counters)
return counters
def enable():
"""
We need an API to explicitly enable() the `nw` hooks because BF itself uses
socket APIs to communicate with the Agent. With this API, we make sure those
happen after Agent communication and just before profiled application starts.
"""
_nw.set('enabled', True)
def disable():
_nw.set('enabled', False)
```
#### File: python-sdk/hooks/utils.py
```python
import os
import sys
from blackfire import probe, generate_config, agent, apm
from blackfire.utils import get_logger, UC, unicode_or_bytes
from blackfire.exceptions import *
log = get_logger(__name__)
def format_exc_for_display(e):
# filename:lineno and exception message
try:
_, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return "%s:%s %s" % (fname, exc_tb.tb_lineno, exc_obj)
except:
# sometimes this fails with 'module' object has no attribute 'exc_info'
# where there is a custom exception handler (Flask) In those cases we will
# simply use the exception object
return str(e)
def try_validate_send_blackfireyml(config, blackfireyml_content):
try:
agent_conn = agent.Connection(config.agent_socket, config.agent_timeout)
agent_conn.connect(config=config)
resp_line = str(agent_conn.agent_response.status_val)
if blackfireyml_content is None:
resp_line += '&no-blackfire-yaml'
else:
# convert .blackfire.yml contents to UTF-8 encoded string and get the
# length according to that. Otherwise blackfire-yml-size we set here
# might be inconsistent with Content-Length header
blackfireyml_content = UC(blackfireyml_content)
blackfireyml_content = unicode_or_bytes(blackfireyml_content)
resp_line += '&blackfire-yml-size=%d' % (len(blackfireyml_content))
return ('X-Blackfire-Response', resp_line)
except Exception as e:
log.exception(e)
def try_enable_probe(
query, client_id=None, client_token=None, title=None, ctx_var=None
):
probe_err = new_probe = None
try:
config = generate_config(
query=query,
client_id=client_id,
client_token=client_token,
title=title,
ctx_var=ctx_var
)
new_probe = probe.Probe(config=config)
new_probe.clear_traces()
new_probe.enable()
except BlackfireInvalidSignatureError:
# do not send error if signature validation fails
probe_err = BlackfireInvalidSignatureError
log.error("Signature validation failed. [%s]", config)
except Exception as e:
# TODO: Is this really quote or urlencode?
probe_err = ('X-Blackfire-Error', '101 ' + format_exc_for_display(e))
log.exception(e)
return probe_err, new_probe
def try_end_probe(
new_probe, response_status_code, response_len, controller_name, framework,
**kwargs
):
try:
agent_status_val = new_probe.get_agent_prolog_response().status_val
headers = {}
headers['Response-Code'] = response_status_code
headers['Response-Bytes'] = response_len
# custom transaction name overrides controller name setting
headers['controller-name'
] = new_probe.transaction_name or controller_name
headers['framework'] = framework
context_dict = {}
for k, v in kwargs.items():
if v:
context_dict[k] = v
headers['Context'] = context_dict
new_probe.end(headers=headers)
return ('X-Blackfire-Response', agent_status_val)
except Exception as e:
log.exception(e)
return ('X-Blackfire-Error', '101 ' + format_exc_for_display(e))
def try_apm_start_transaction(**kwargs):
try:
return apm._start_transaction(**kwargs)
except Exception as e:
log.exception(e)
def try_apm_stop_and_queue_transaction(**kwargs):
try:
apm._stop_and_queue_transaction(**kwargs)
except Exception as e:
log.exception(e)
def add_probe_response_header(http_response, probe_response):
# do not add any response header if signature is invalid
if probe_response is BlackfireInvalidSignatureError:
return
http_response[probe_response[0]] = probe_response[1]
``` |
{
"source": "jlirochon/splinter",
"score": 3
} |
#### File: splinter/request_handler/request_handler.py
```python
import sys
import base64
from .status_code import StatusCode
if sys.version_info[0] > 2:
from http import client as http_client
from urllib.parse import urlparse
else:
import httplib as http_client # NOQA
from urlparse import urlparse # NOQA
class RequestHandler(object):
def connect(self, url):
if not (url.startswith("file:") or url.startswith("about:")):
self.request_url = url
self._create_connection()
self._store_response()
self.conn.close()
else:
self.status_code = StatusCode(200, 'Ok')
def ensure_success_response(self):
"""
Guarantee the success on response.
If response is not success, raises an
:class:`HttpResponseError <splinter.request_handler.status_code.HttpResponseError>`
exception.
"""
self.status_code.is_valid_response()
def _store_response(self):
self.response = self.conn.getresponse()
self.status_code = StatusCode(self.response.status, self.response.reason)
def _create_connection(self):
self._parse_url()
if self.scheme == 'https':
self.conn = http_client.HTTPSConnection(self.host, self.port)
else:
self.conn = http_client.HTTPConnection(self.host, self.port)
self.conn.putrequest('GET', self.path)
self.conn.putheader('User-agent', 'python/splinter')
if self.auth:
self.conn.putheader("Authorization", "Basic %s" % self.auth)
self.conn.endheaders()
def _parse_url(self):
parsed_url = urlparse(self.request_url)
if parsed_url.username and parsed_url.password:
login = '%s:%s' % (parsed_url.username, parsed_url.password)
if sys.version_info[0] > 2:
self.auth = base64.standard_b64encode(login.encode('utf-8')).decode("utf-8")
else:
self.auth = base64.standard_b64encode(login)
else:
self.auth = None
self.host = parsed_url.hostname
self.port = parsed_url.port
self.path = parsed_url.path
self.scheme = parsed_url.scheme
if parsed_url.query:
self.path = parsed_url.path + "?" + parsed_url.query
```
#### File: splinter/tests/test_webdriver_phantomjs.py
```python
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
class PhantomJSBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("phantomjs")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_get_alert(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_right_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').right_click()
def test_double_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').double_click()
def test_access_prompts_and_be_able_to_fill_then(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_and_accept_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_can_work_on_popups(self):
# FIXME: Check https://github.com/detro/ghostdriver/issues/180 to see if
# we can implement this test
pass
class PhantomJSBrowserTestWithCustomHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
custom_headers = {'X-Splinter-Customheaders-1': 'Hello',
'X-Splinter-Customheaders-2': 'Bye'}
cls.browser = Browser("phantomjs", custom_headers=custom_headers)
def test_create_a_phantomjs_with_custom_headers(self):
self.browser.visit(EXAMPLE_APP + 'headers')
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-1: Hello'))
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-2: Bye'))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
``` |
{
"source": "jlisee/gitit-salt",
"score": 3
} |
#### File: extensions/_modules/cabal.py
```python
import logging
import os
import shutil
# Project imports
import salt
log = logging.getLogger(__name__)
def _check_cabal_bin():
"""
Make sure we have cabal installed.
"""
which_result = __salt__['cmd.which_bin'](['cabal'])
if which_result is None:
raise CommandNotFoundError('Could not find a `cabal` binary')
def refresh_db(user=None):
"""
Downloads the latest hackage dependency information.
"""
_check_cabal_bin()
return __salt__['cmd.run_all']('cabal update', runas=user)
def version(*names, **kwargs):
"""
Returns dict matching package to installed version, None for not installed.
"""
# Make sure we have cabal install
_check_cabal_bin()
# Parse out the user argument
user = kwargs.get('user', None)
ret = {}
for name in names:
# Get the info about the package
res = __salt__['cmd.run_all']('cabal info %s' % name, runas=user)
# Check results
output = res['stdout']
# Parse out the installed line
lines = [l.strip() for l in output.split('\n')]
for line in lines:
# Only work on valid liens
if line.count(':'):
# Break apart the line into the type and value sections
parts = line.split(':')
key = parts[0].strip()
value = (':'.join(parts[1:])).strip()
if key == 'Versions installed':
# Determine if the package is installed or not
if value == '[ Not installed ]':
package_name = None
else:
package_name = [i.strip() for i in value.split(',')]
# Store our output
ret[name] = package_name
return ret
# Store an internal version of this to deal with shadowing caused by
# function arguments
_version = version
def install(name, version=None, refresh=False, flags=None, user=None):
"""
Install the given cabal package.
"""
# Make sure we have cabal install
_check_cabal_bin()
# Make sure we have proper return
ret = {
'result' : True,
'comment' : '',
'changes' : {}
}
# Refresh as needed
if salt.utils.is_true(refresh):
res = refresh_db(user=user)
# Bail out if there was an error
if 0 != res['retcode']:
ret['result'] = False
ret['comment'] = 'update failed: %s' % res['stderr']
return ret
# Now lets install the package
package = name
# Form our flag string
if flags:
flags_str = ' --flags="%s"' % ' '.join(flags)
else:
flags_str = ''
# Form the package string with arguments first
if version:
package_str = '%s-%s' % (name, version)
else:
package_str = name
args = (package_str, flags_str)
res = __salt__['cmd.run_all']('cabal install %s%s' % args, runas=user)
output = res['stdout']
# Parse results to determine what we have done
if 0 == res['retcode']:
if output.count('Registering '):
# We have had success parse the package version out the return
# information, from a string like "Registering bzlib-0.5.0.4..."
lines = output.split('\n')
for line in lines:
if line.count('Registering ') and line.count(package_str):
_, name_version = line.split()
version_parts = name_version.split('-')
raw_version = ''.join(version_parts[1:])
version = raw_version[:-3]
# Now store the results
ret['changes'][name] = version
# Drop out
break
else:
# Failure gather the results and report them
ret['result'] = False
ret['comment'] = res['stderr']
return ret
def uninstall(name, version, user=None):
# Make sure we have cabal install
_check_cabal_bin()
# Make sure we have proper return
ret = {
'result' : True,
'comment' : '',
'changes' : {}
}
# Get the GHC version
res = __salt__['cmd.run_all']('ghc --numeric-version', runas=user)
ghc_version = res['stdout']
# Setup our changes dict to store what havoc we are about to wreck on cabal
ret['changes'] = {
'unregistered' : [],
'removed' : []
}
# Un-register the package
name_version = '%s-%s' % (name, version)
args = (ghc_version, name_version)
res = __salt__['cmd.run_all']('ghc-pkg-%s unregister %s' % args,
runas=user)
# Handle errors
if 0 != res['retcode']:
ret['result'] = False
ret['comment'] = 'ghc-unregister failed: %s' % res['stderr']
return ret
# Record removal
ret['changes']['unregistered'].append('ghc-unregister %s' % name_version)
# Remove library files
if user:
user_dir = os.path.expanduser('~' + user)
else:
user_dir = '/root'
pkg_dir = os.path.join(user_dir, '.cabal', 'lib', name_version)
version_dir = os.path.join(pkg_dir, 'ghc-%s' % ghc_version)
shutil.rmtree(version_dir)
ret['changes']['removed'].append('removed directory: ' + version_dir)
# Clear out possibly empty directory
if len(os.listdir(pkg_dir)) == 0:
shutil.rmtree(pkg_dir)
ret['changes']['removed'].append('removed directory: ' + pkg_dir)
# We are done
ret['comment'] += 'Un-installed %s. ' % name_version
return ret
``` |
{
"source": "jlisee/workmuch",
"score": 2
} |
#### File: jlisee/workmuch/timeutil.py
```python
import os
import ctypes
import math
import time as _time
# Begin nanosleep code
# Under the ZPL, Please see the full text: http://www.zope.org/Resources/ZPL
# Changes: by Joseph Lisee on Jan 20, 2008
try:
# Linux
try:
_libc = ctypes.CDLL("libc.so.6")
except OSError:
_libc = None
if _libc is None:
# MAC OS-X
try:
_libc = ctypes.CDLL("libc.dylib", ctypes.RTLD_GLOBAL)
except OSError:
raise ImportError
# Define the timespec structure in python
class _TIMESPEC(ctypes.Structure):
_fields_ = [('secs', ctypes.c_long),
('nsecs', ctypes.c_long),
]
_libc.nanosleep.argtypes = \
[ctypes.POINTER(_TIMESPEC), ctypes.POINTER(_TIMESPEC)]
def nanosleep(sec, nsec):
sleeptime = _TIMESPEC()
sleeptime.secs = sec
sleeptime.nsecs = nsec
remaining = _TIMESPEC()
_libc.nanosleep(sleeptime, remaining)
return (remaining.secs, remaining.nsecs)
except ImportError:
# if ctypes is not available or no reasonable library is found we provide
# a dummy which uses time.sleep
def nanosleep(sec, nsec):
_time.sleep(sec + (nsec * 0.000000001))
# End nanosleep code
def sleep(seconds):
"""
Sleeps the current thread the given number of seconds useing nanosleep
@type seconds: float
@param seconds: The number of seconds to sleep
"""
# Round down to our seconds
secs = math.floor(float(seconds))
# Convert the remainder to nano seconds
nsecs = (seconds - secs) * 1e9;
nanosleep(long(secs), long(nsecs))
def time():
"""
Returns the time since program start
Due to some odd platform differences different time module functions
have different accuracies, on different platforms. The function takes
that into account.
@rtype: double
@return: Seconds since program start
"""
# This is most accuracte on Linux and Mac
if 'posix' == os.name:
return _time.time()
# This on on Windows
else:
return _time.clock()
``` |
{
"source": "jlisee/xpkg",
"score": 3
} |
#### File: python/xpkg/core.py
```python
import json
import os
import tarfile
from collections import defaultdict
# Project Imports
from xpkg import build
from xpkg import linux
from xpkg import util
from xpkg import paths
xpkg_root_var = 'XPKG_ROOT'
xpkg_tree_var = 'XPKG_TREE'
xpkg_repo_var = 'XPKG_REPO'
xpkg_local_cache_var = 'XPKG_LOCAL_CACHE'
def parse_dependency(value):
"""
Basic support for version expression. Right now it just parses
mypackage==1.0.0 -> ('mypackage', '1.0.0')
mypackage -> ('mypackage', None)
"""
# Split into parts
parts = value.split('==')
# We always have name
name = parts[0]
# Pull out the version, or report an error
if len(parts) == 1:
version = None
elif len(parts) == 2:
version = parts[1]
else:
raise Exception('Invalid package expression: "%s"' % value)
return (name, version)
class Exception(BaseException):
pass
class InstallDatabase(object):
"""
Manages the on disk database of packages.
"""
def __init__(self, env_dir):
# Package db location
self._db_dir = self.db_dir(env_dir)
self._db_path = os.path.join(self._db_dir, 'db.yml')
# Create package database if it doesn't exist
if not os.path.exists(self._db_path):
self._create_db()
# Load database
self._load_db()
def _create_db(self):
"""
Create database
"""
# Create directory
if not os.path.exists(self._db_dir):
os.makedirs(self._db_dir)
# Create empty db file if needed
if not os.path.exists(self._db_path):
with open(self._db_path, 'w') as f:
f.write('')
def _load_db(self):
"""
Load DB from disk.
"""
self._db = util.yaml_load(open(self._db_path))
# Handle the empty database case
if self._db is None:
self._db = {}
# Build a list of directories and the counts of package that reference
# them
self._gen_dir_counts()
def _save_db(self):
"""
Save DB to disk.
"""
with open(self._db_path, 'w') as f:
util.yaml_dump(self._db, f)
def _gen_dir_counts(self):
"""
Generates reference counts of directories, that can be used to see
if a package is the last one using that directory.
"""
self._dirs = defaultdict(int)
for data in self._db.itervalues():
for d in data['dirs']:
self._dirs[d] += 1
def mark_installed(self, name, info):
"""
Marks the current package installed
"""
# Mark package with the current installed version
self._db[name] = info
# Save the data to disk
self._save_db()
def mark_removed(self, name):
"""
Marks the current package installed
"""
# Mark package with the current installed version
del self._db[name]
# Save the data to disk
self._save_db()
def iter_packages(self):
"""
Returns an iterator of (package, version) pairs
"""
for k in self._db.iteritems():
yield k
def get_info(self, name):
"""
Return the information on the installed package, returns None if it
doesn't exist.
"""
return self._db.get(name, None)
def installed(self, name, version=None):
"""
Returns true if the given package is installed, supplying no version
will return true if any version is installed.
"""
info = self.get_info(name)
if info:
if version:
return version == info.get('version', None)
else:
return True
else:
return False
def get_rdepends(self, name):
"""
Get all the packages which depend on this package
"""
rdepends = []
for pkg_name, info in self._db.iteritems():
deps = info.get('dependencies', [])
for dep in deps:
dep_name, version = parse_dependency(dep)
if dep_name == name:
rdepends.append(pkg_name)
return rdepends
def dir_references(self, d):
"""
Returns how many packages are using this directory.
"""
return self._dirs[d]
@staticmethod
def db_dir(root):
"""
Returns the db directory relative to the given root.
"""
return os.path.join(root, 'var', 'xpkg')
class Settings(object):
"""
Settings for the current environment.
TODO: add versioning to the on disk format
"""
def __init__(self, path):
"""
Create settings object with the stored settings from the given path.
"""
# Load the settings data if the file exists
if os.path.exists(path):
settings_data = util.yaml_load(open(path))
else:
settings_data = None
# Lookup data based on the presence of the configuration data
if settings_data is None:
toolset_dict = None
self.name = 'none'
else:
toolset_dict = settings_data.get('toolset', None)
self.name = settings_data.get('name', 'unknown')
# Create toolset if possible otherwise get the default
if toolset_dict is None:
self.toolset = build.Toolset.lookup_by_name(build.DefaultToolsetName)
else:
self.toolset = build.Toolset.create_from_dict(toolset_dict)
class Environment(object):
"""
This class manages the local package environment.
"""
SETTINGS_PATH = os.path.join('var', 'xpkg', 'env.yml')
@staticmethod
def init(env_dir, name, toolset_name=None):
"""
Initialize the environment in the given directory.
"""
# Bail out with an error if the environment already exists
if Environment.env_exists(env_dir):
raise Exception('Environment already exists in: %s' % env_dir)
# Create the empty db file (this triggers database file creation)
pdb = InstallDatabase(env_dir)
# Make sure we have a valid ld.so symlink
linux.update_ld_so_symlink(env_dir)
# Lookup our toolset and translate to dict
toolset = build.Toolset.lookup_by_name(toolset_name)
# Create our settings dict and write it disk
settings = {
'name' : name,
'toolset' : toolset.to_dict(),
}
# For path to our settings files, and save it
settings_path = os.path.join(env_dir, Environment.SETTINGS_PATH)
with open(settings_path, 'w') as f:
util.yaml_dump(settings, f)
def __init__(self, env_dir=None, create=False, tree_path=None,
repo_path=None, verbose=False):
"""
env_dir - path to the environment dir
create - create the environment if it does exist
tree_path - URL for a XPD tree
repo_path - URL for a XPA package archive
verbose - print all build commands to screen
"""
if env_dir is None:
if xpkg_root_var in os.environ:
self._env_dir = os.environ[xpkg_root_var]
else:
raise Exception("No XPKG_ROOT not defined, can't find environment")
else:
self._env_dir = env_dir
self.root = self._env_dir
self.verbose = verbose
# Error out if we are not creating and environment and this one does
# not exist
if not self.env_exists(self._env_dir) and not create:
raise Exception('No Xpkg environment found in "%s"' % self._env_dir)
# Create environment if needed
if not self.env_exists(self._env_dir) and create:
self.init(self._env_dir, 'default', build.DefaultToolsetName)
# If needed this will setup the empty environment
self._pdb = InstallDatabase(self._env_dir)
# Load the settings
settings = Settings(self.env_settings_path(self._env_dir))
self.name = settings.name
self.toolset = settings.toolset
def get_paths(base_path, env_var):
"""
Parse class argument and environment variables to get path.
"""
# Get the raw path from our given value, or the environment variable
raw_path = None
if base_path:
raw_path = base_path
elif env_var in os.environ:
raw_path = os.environ[env_var]
else:
raw_path = None
# Turn that raw path into a list
if raw_path:
paths = raw_path.split(':')
else:
paths = []
return paths
# Setup the package tree to either load from the given path or return
# no packages
self.tree_paths = get_paths(tree_path, xpkg_tree_var)
if len(self.tree_paths) == 1:
self._tree = FilePackageTree(self.tree_paths[0])
elif len(self.tree_paths) > 0:
trees = [FilePackageTree(t) for t in self.tree_paths]
self._tree = CombinePackageSource(trees)
else:
self._tree = EmptyPackageSource()
# Setup the package repository so we can install pre-compiled packages
self.repo_paths = get_paths(repo_path, xpkg_repo_var)
if len(self.repo_paths) == 1:
self._repo = FilePackageRepo(self.repo_paths[0])
elif len(self.repo_paths) > 0:
repos = [FilePackageRepo(t) for t in self.repo_paths]
self._repo = CombinePackageSource(repos)
else:
self._repo = EmptyPackageSource()
# Make sure the package cache is created
self._xpa_cache_dir = self.xpa_cache_dir(self._env_dir)
util.ensure_dir(self._xpa_cache_dir)
def install(self, input_val):
"""
Installs the desired input this can be any of the following:
path/to/description/package.xpd
path/to/binary/package.xpa
package
package==version
"""
# Check to make sure the install is allowed
self._install_check(input_val)
# Install our input
if input_val.endswith('.xpa'):
# We have a binary package so install it
self._install_xpa(input_val)
elif input_val.endswith('.xpd'):
# Path is an xpd file load that then install
xpd = XPD(input_val)
self._install_xpd(xpd)
else:
# The input_val is a package name so parse out the desired version
# and name
name, version = self._parse_install_input(input_val)
# First try and find the xpa (pre-compiled) version of the package
xpa = self._repo.lookup(name, version)
if xpa:
# Install the XPA
self._install_xpa(xpa)
else:
# No binary package try, so lets try and find a description in
# the package tree
xpd_data = self._tree.lookup(name, version)
if xpd_data is None:
msg = "Cannot find description for package: %s" % input_val
raise Exception(msg)
# Install the XPD
self._install_xpd(xpd_data)
def build_xpd(self, xpd, dest_path, verbose=False):
"""
Builds the given package from it's package description (XPD) data.
Returns the path to the package.
"""
# Determine if we are doing a verbose build
verbose_build = verbose or self.verbose
# Make sure all dependencies are properly installed
self._install_deps(xpd, build=True)
# Build the package and return the path
builder = build.BinaryPackageBuilder(xpd)
res = builder.build(dest_path, environment=self,
output_to_file=not verbose_build)
return res
def _install_xpd(self, xpd, build_into_env=False):
"""
Builds package and directly installs it into the given environment.
xpd - an XPD describing the package to install.
"""
# Make sure all dependencies are properly installed
self._install_deps(xpd)
if not build_into_env:
# Build the package as XPD and place it into our cache
print 'BUILDING(XPD): %s-%s' % (xpd.name, xpd.version)
xpa_paths = self.build_xpd(xpd, self._xpa_cache_dir)
# Now install from the xpa package(s) in our cache
for xpa_path in xpa_paths:
print 'INSTALLING(XPD from XPA): %s' % xpa_path
self._install_xpa(xpa_path)
else:
# Build the package(s) and install directly into our environment
builder = build.PackageBuilder(xpd)
infos = builder.build(self._env_dir, environment=self,
output_to_file=not self.verbose)
for info in infos:
self._mark_installed(info['name'], info)
def _install_xpa(self, path):
"""
Install the given binary Xpkg package.
"""
# Open up the package
if isinstance(path, XPA):
xpa = path
else:
xpa = XPA(path)
# Grab the meta data
info = xpa.info
# Make sure all dependencies are properly installed
self._install_deps(xpa)
print 'INSTALLING(XPA): %s-%s' % (info['name'], info['version'])
# Install the files into the target environment location
xpa.install(self._env_dir)
# Mark the package install
self._mark_installed(info['name'], info)
def _install_deps(self, info, build=False):
"""
Makes sure all the dependencies for the given package are properly
installed.
The object should have a property 'dependencies' which is a list of the
following form: ['package', 'package==1.2.2']
TODO: handle proper version checks someday
"""
# Get the full dep list based on whether we need the build dependencies
deps = info.dependencies
if build:
# Resolve all the build dependencies
build_deps = self._resolve_build_deps(info.build_dependencies)
deps = info.dependencies + build_deps
# Install or report a version conflict for each dependency as needed
for dep in deps:
# Parse the name and version out of the dependency expression
depname, version = self._parse_install_input(dep)
# Check whether the package is installed
installed, version_match = self._is_package_installed(depname, version)
if not installed:
# Not installed so install the package
self.install(dep)
elif installed and not version_match:
# Installed but we have the wrong version, so lookup the current
# package version and throw and error
current_version = self._pdb.get_info(depname)['version']
args = (info.name, info.version, depname, current_version,
version)
msg = '%s-%s requires package %s at version: %s, but: %s ' \
'is installed'
raise Exception(msg % args)
def _install_check(self, input_val):
"""
Checks for the package already being installed, or if there is a
conflicting version installed.
"""
# Get all the different packages that could be in an input (and XPD can
# describe multiple packages)
package_infos = self._load_package_info(input_val)
for name, version in package_infos:
# Check to see if we already have a version of that package
# installed and if so what version
installed, version_match = self._is_package_installed(name, version)
if installed:
current_version = self._pdb.get_info(name)['version']
# Bail out if we already have the package installed, or we already
# have a different version installed
if installed and version_match:
args = (name, current_version)
raise Exception('Package %s already at version: %s' % args)
elif installed:
args = (name, current_version, version)
msg = 'Package %s already at version: %s conflicts with: %s'
raise Exception(msg % args)
def _load_package_info(self, input_val):
"""
Gets all the package info based on the input value, which can be an
the path to a XPD, or XPA file, or package==version string.
"""
# Get all name version pairs from the input
packages = []
if input_val.endswith('.xpa'):
# Grab the name out of the XPA metadata
xpa = XPA(input_val)
name = xpa.info['name']
version = xpa.info['version']
packages.append((name, version))
elif input_val.endswith('.xpd'):
# Path is an xpd file load that then install
xpd_data = util.load_xpd(input_val)
# Check for all those package combinations
if 'packages' in xpd_data:
for name, data in xpd_data['packages'].iteritems():
# Default to main version if one doesn't exist
if data:
version = data.get('version', xpd_data['version'])
else:
version = xpd_data['version']
packages.append((name, version))
else:
packages.append((xpd_data['name'], xpd_data['version']))
else:
# The input_val must be a package name so try to find the xpd
# so first try to find the package in a pre-compile manner
name, version = self._parse_install_input(input_val)
packages.append((name, version))
return packages
def _is_package_installed(self, name, version):
"""
Returns a tuple saying whether the package is installed, and if so
it's the proper version, example:
(installed, version_match, pkgname, version)
"""
installed = self._pdb.installed(name)
version_match = self._pdb.installed(name, version)
return (installed, version_match)
def _resolve_build_deps(self, build_deps):
"""
Uses the current toolset to resolve build dependencies. Any build
dependency started with "tl:" is resolved using the current toolset.
"""
# Use the toolset to resolve all of our deps
final_deps = []
for dep in build_deps:
if dep.startswith('tl:'):
# Use the toolset to translate the dep
new_dep = self.toolset.lookup_build_dep(dep[3:])
else:
# Not a toolset dep just include it directly
new_dep = dep
# Only include if we have a valid dep
if len(new_dep):
final_deps.append(new_dep)
return final_deps
def _mark_installed(self, name, info):
"""
Marks the package installed and updates the so link as needed.
"""
self._pdb.mark_installed(info['name'], info)
linux.update_ld_so_symlink(self._env_dir)
def remove(self, name):
"""
Removes the given package from the environment.
"""
# Determine if another package depends on this one
rdepends = self._pdb.get_rdepends(name)
if len(rdepends) > 0:
args = (name, ', '.join(rdepends))
raise Exception("Can't remove %s required by: %s" % args)
# Remove all the files from the db
info = self._pdb.get_info(name)
if info:
# First we remove the files
for f in sorted(info['files']):
full_path = os.path.join(self._env_dir, f)
# We use lexists to test for existence here, because we don't
# want to de-reference symbolic links, we want to know if the
# link file itself is present.
if os.path.lexists(full_path):
os.remove(full_path)
else:
# TODO: Log a warning here
print 'WARNING: package %s file not found: %s' % (name, full_path)
# Now remove the directories (reverse so we remove the deeper,
# dirs first)
# TODO: don't try remove directories that are owned by other
# packages
for d in sorted(info['dirs'], reverse=True):
full_path = os.path.join(self._env_dir, d)
# We use lexists to test for existence here, because we don't
# want to de-reference symbolic links, we want to know if the
# link file itself is present.
if os.path.lexists(full_path):
if len(os.listdir(full_path)) == 0:
os.rmdir(full_path)
elif self._pdb.dir_references(d) == 1:
# Only warn when we are the last package referencing this dir
print 'WARNING: not removing dir, has files:',full_path
else:
# TODO: Log a warning here
print 'WARNING: package %s directory not found: %s' % (name, full_path)
# Remove the package from the database
self._pdb.mark_removed(name)
# Update the ld.so as needed
linux.update_ld_so_symlink(self._env_dir)
else:
print 'Package %s not installed.' % name
def jump(self, program='bash', args=[]):
"""
Jump into the desired environment
"""
# Setup the environment variables
self.apply_env_variables()
# Setup up the PS1 (this doesn't work)
os.environ['PS1'] = '(xpkg:%s) \u@\h:\w\$' % self.name
# Step into shell
os.execvp(program, [program] + args)
def get_env_variables(self):
"""
TODO: make this plugable so we can easily port this to multiple
platforms.
"""
# Set our path vars, defining different separators based on whether we
# are directly lists of compiler flags
cflags = '-I%s' % os.path.join(self._env_dir, 'include')
# Get our list of library directories
lib_dirs = [os.path.join(self._env_dir, 'lib')]
if util.is_64bit():
lib_dirs.append(os.path.join(self._env_dir, 'lib64'))
# For our LDFLAGS and LD_LIBRARY_PATH variables
ldflags = ' '.join(['-L%s' % l for l in lib_dirs])
ld_library_path = os.pathsep.join(lib_dirs)
# Default list of bin paths
bin_paths = [os.path.join(self._env_dir, 'bin')]
# Extra directories which we want on the path if they exist
extra_bin_dirs = ['usr/bin', 'usr/sbin', 'sbin']
for d in extra_bin_dirs:
full_path = os.path.join(self._env_dir, d)
if os.path.exists(full_path):
bin_paths.append(full_path)
env_paths = {
'PATH' : (os.pathsep.join(bin_paths), os.pathsep),
'LD_LIBRARY_PATH' : (ld_library_path, os.pathsep),
'CFLAGS' : (cflags, ' '),
'CCFLAGS' : (cflags, ' '),
'CPPFLAGS' : (cflags, ' '),
'LDFLAGS' : (ldflags, ' '),
}
return env_paths
def get_toolset_env_info(self):
subs = {'LD_SO_PATH' : paths.ld_linux_path(self._env_dir)}
return self.toolset.get_env_var_info(subs)
def apply_env_variables(self, overwrite=False):
"""
Change the current environment variables so that we can use the things
are in that environment.
overwrite - over write local environment variables, try to limit the
effect of other things installed on the system.
"""
env_paths = self.get_env_variables()
# Place the paths into our environment
for varname, pathinfo in env_paths.iteritems():
varpath, sep = pathinfo
cur_var = os.environ.get(varname, None)
if cur_var and not overwrite:
os.environ[varname] = varpath + sep + cur_var
else:
os.environ[varname] = varpath
# Setup the Xpkg path
os.environ[xpkg_root_var] = self._env_dir
# Apply toolset environment variables
# TODO: only use this sub on linux
subs = {'LD_SO_PATH' : paths.ld_linux_path(self._env_dir)}
self.toolset.apply_env_vars(subs)
def _parse_install_input(self, value):
"""
Basic support for version based installs. Right now it just parses
mypackage==1.0.0 -> ('mypackage', '1.0.0')
mypackage -> ('mypackage', None)
"""
return parse_dependency(value)
@staticmethod
def env_exists(env_dir):
"""
Returns true if the environment has been setup.
"""
return os.path.exists(Environment.env_settings_path(env_dir))
@staticmethod
def env_settings_path(env_dir):
"""
Full path to the settings dir.
"""
return os.path.join(env_dir, Environment.SETTINGS_PATH)
@staticmethod
def xpa_cache_dir(root):
"""
The directory we hold current built packages.
"""
return os.path.join(root, 'var', 'xpkg', 'cache')
@staticmethod
def local_cache_dir():
"""
Local user cache directory.
"""
if xpkg_local_cache_var in os.environ:
return os.environ[xpkg_local_cache_var]
else:
return os.path.expanduser(os.path.join('~', '.xpkg', 'cache'))
@staticmethod
def log_dir(root):
"""
The directory we place build logs
"""
return os.path.join(root, 'var', 'xpkg', 'log')
class XPA(object):
"""
Represents a package archive. The xpkg.yml format is:
{
'name' : 'hello',
'version' : '1.0.0',
'description' : 'My hello world package',
'dependencies' : ['libgreet'],
'dirs' : [
'bin'
],
'files' : [
'bin/hello'
],
'install_path_offsets' : {
'install_dir' : '/tmp/install-list',
'binary_files' : {
'bin/hello' : [12947, 57290]
},
'sub_binary_files' : {
'bin/hello' : [[1000,1050), [7562,7590,7610]]
},
'text_files' : {
'share/hello/msg.txt' : [5, 100]
}
}
}
"""
def __init__(self, xpa_path, input_name=None, info=None):
"""
Parses the metadata out of the XPA file.
"""
# Ensure that the package exists before we open it
if not os.path.exists(xpa_path):
args = (input_name, xpa_path)
msg = 'XPA path for package "%s" does not exist: "%s"' % args
raise Exception(msg)
# Only save the XPA path so we don't keep the tarfile itself open
self._xpa_path = xpa_path
# If not given the manifest info, read it out of the XPA
if info is None:
# Read the manifest out of the XPA
self.info = self._read_info(xpa_path)
else:
self.info = info
self.name = self.info['name']
self.version = self.info['version']
self.dependencies = self.info.get('dependencies', [])
# We have no build deps, because were already built, but we want to
# maintain a similar interface
self.build_dependencies = []
def install(self, path):
"""
Extract all the files in the package to the destination directory.
"""
# Extract all the files
with tarfile.open(self._xpa_path) as tar:
file_tar = tar.extractfile('files.tar.gz')
with tarfile.open(fileobj = file_tar) as file_tar:
file_tar.extractall(path)
# Fix up the install paths
self._fix_install_paths(path)
def _read_info(self, xpa_path):
"""
Read the manifest data out of the xpa_path.
"""
with tarfile.open(xpa_path) as tar:
# Pull out and parse the metadata
return util.yaml_load(tar.extractfile('xpkg.yml'))
def _fix_install_paths(self, dest_path):
"""
Given the package info go in and replace all occurrences of the original
install path with the new install path.
"""
offset_info = self.info['install_path_offsets']
# Make sure the type is a string, incase it because unicode somehow
# TODO: see if our caching layer is giving us unicode strings
install_dir = str(offset_info['install_dir'])
# Make sure we have enough space in binary files to replace the string
install_len = len(install_dir)
dest_len = len(dest_path)
if install_len < dest_len:
args = (dest_path, dest_len)
msg = 'Install directory path "%s" exceeds length limit of %d'
raise Exception(msg % args)
# Helper function for replacement
def replace_env_in_files(files, old, new, len_check=False,
replace=None):
"""
Read the full file, do the replace then write it out
len_check - when true it makes sure the file length hasn't changed
this important for binary files.
replace - an optional external function to use for replacement,
passed the file file_path, contents, old, and new string.
"""
for file_path in files:
full_path = os.path.join(dest_path, file_path)
contents = open(full_path).read()
if replace:
results = replace(file_path, contents, old, new)
else:
results = contents.replace(old, new)
# Check to make sure the length hasn't changed
if len_check:
len_contents = len(contents)
len_results = len(results)
args = (len_contents, len_results)
msg = 'Len changed from %d to %d' % args
assert len_contents == len_results, msg
# Write out the final results
with open(full_path, 'w') as f:
f.write(results)
# Do a simple find and replace in all text files
replace_env_in_files(files = offset_info['text_files'],
old = install_dir,
new = dest_path)
# Create a null padded replacement string for complete instances of
# null binary strings only.
null_install_dir = install_dir + '\0'
null_install_len = len(null_install_dir)
padded_env = dest_path + ('\0' * (null_install_len - dest_len))
assert(len(padded_env) == len(null_install_dir))
# For binary replaces find and replace with a null padded string
replace_env_in_files(files = offset_info['binary_files'],
old = null_install_dir,
new = padded_env,
len_check = True)
# Define a function to do our binary substring replacements
def binary_sub_replace(file_path, contents, old, new):
"""
This is not very efficient at all, but it does the job for now.
"""
assert old == install_dir, "install dir not string to replace"
assert new == dest_path, "dest path not replacement string"
offsets = offset_info['sub_binary_files'][file_path]
for offset_list in offsets:
# Get the start of our all our install strings and the location
# of the null terminator
first_offset = offset_list[0]
null_offset = offset_list[-1]
# Grab the original string
input_str = contents[first_offset:null_offset]
# Find and replace all the install strings
output_str = input_str.replace(install_dir, dest_path)
# Length of string we are editing
initial_len = len(input_str)
# Length of the string we are replacing it with
replace_len = len(output_str)
# Build a full replacement string null padding to make up the
# difference
replacer = output_str + ('\0' * (initial_len - replace_len))
# Now lets replace that
results = contents[0:first_offset] + replacer + contents[null_offset:]
# Make sure we haven't effected length before moving on
assert len(contents) == len(results)
contents = results
return contents
# Do our binary substring replacements
replace_env_in_files(files = offset_info['sub_binary_files'],
old = install_dir,
new = dest_path,
len_check = True,
replace=binary_sub_replace)
class XPD(object):
"""
A Xpkg description file, it explains how to build one or more packages.
"""
def __init__(self, path, data=None):
"""
Load and parse the given XPD
"""
# Save path
self.path = path
# Load our data
if data is None:
self._data = util.load_xpd(path)
else:
self._data = data
# Read fields and define properties
self.name = self._data['name']
self.version = self._data['version']
self.dependencies = self._data.get('dependencies', [])
self.build_dependencies = self._data.get('build-dependencies', [])
self.description = self._data.get('description', '')
def packages(self):
"""
Return a list of all the packages in this file, each item contains:
{
'name' : 'package-name',
'version' : '1.2.4',
'description' : 'My awesome package',
'dirs' : ['dir'],
'files' : ['dir/a'],
'dependencies' : ['another-pkg'],
}
"""
results = []
if 'packages' in self._data:
results = self._get_multi_packages()
else:
results.append({
'name' : self.name,
'version' : self.version,
'description' : self.description,
'files' : [],
'dependencies' : self.dependencies,
})
return results
def _get_multi_packages(self):
"""
Get the package info for each sub package, sorted in a order such that
you don't need to install different ones.
"""
# Get all the internal packages
packages = self._data['packages']
pkg_names = set(packages.keys())
# Build a graph of the dependencies amongst the packages in this XPD
dep_graph = {}
for name, data in self._data['packages'].iteritems():
if data:
for dep in data.get('dependencies', []):
if dep in pkg_names:
dep_graph.setdefault(name, []).append(dep)
else:
dep_graph[name] = []
# Topologically sort them so we start with the package that has no
# dependencies
sorted_names = sorted(util.topological_sort(dep_graph))
# Produce the package data in sorted form
results = []
for pkg_name in sorted_names:
pkg_data = packages.get(pkg_name)
if pkg_data is None:
pkg_data = {}
# Lookup the version and dependencies, for this package, but fall
# back full package version
results.append({
'name' : pkg_name,
'version' : pkg_data.get('version', self.version),
'description' : pkg_data.get('description', self.description),
'dirs' : pkg_data.get('dirs', []),
'files' : pkg_data.get('files', []),
'dependencies' : pkg_data.get('dependencies', self.dependencies),
})
return results
class EmptyPackageSource(object):
"""
A source of package descriptions or binary packages with nothing in it.
"""
def lookup(self, package, version=None):
return None
class CombinePackageSource(object):
"""
A simple way to query multiple package sources (trees, or repos).
"""
def __init__(self, sources):
self._sources = sources
def lookup(self, package, version=None):
"""
Get the most recent version of the package in any source, or the
version specified if it exists in any.
"""
if version:
# We have a version so search our trees in order until we find it
for source in self._sources:
result = source.lookup(package, version)
# Bail out if we have found the package
if result:
break
else:
# With no version we grab all version of the package then get the
# most recent
# Grab all the package versions
pkgs = []
for source in self._sources:
result = source.lookup(package)
if result:
pkgs.append(result)
# If we have any packages sort by the version
if len(pkgs) > 0:
sorter = lambda a,b: util.compare_versions(a.version, b.version)
sorted_pkgs = sorted(pkgs, cmp=sorter)
# Get the data for the most recent version
result = sorted_pkgs[-1]
else:
result = None
return result
class FilePackageTree(object):
"""
Allows for named and versioned lookup of packages from a directory full of
description.
"""
def __init__(self, path):
# Holds the package information
self._db = PackageDatabase()
# Make sure our path exists
if not os.path.exists(path):
raise Exception('Package tree path "%s" does not exist' % path)
# Create our cache
self._cache = FileParseCache(path, 'tree')
# Get information on all the dicts found in the directory
for full_path in util.match_files(path, '*.xpd'):
self._load_xpd(full_path)
# Save cached info
self._cache.save_to_disk()
def lookup(self, package, version=None):
"""
Returns the xpd data for the desired package, None if the package is
not present.
"""
xpd_path = self._db.lookup(name=package, version=version)
if xpd_path:
result = XPD(xpd_path)
else:
result = None
return result
def _load_xpd(self, xpd_path):
"""
Loads the packages found in the given XPD
@todo - Handle erroneous input more robustly
"""
# Load the data through the cache
data = self._cache.load(xpd_path, lambda p: XPD(p)._data)
# Create the description
xpd = XPD(xpd_path, data=data)
# Store each package in for the description in our index
for package_data in xpd.packages():
# Read the version, defaulting the full description version if there
# is none for this package
self._db.store(name=package_data['name'],
version=package_data['version'],
data=xpd_path)
class FilePackageRepo(object):
"""
Allows for named and versioned lookup of pre-built binary packages from a
directory full of them.
The JSON caching results is about 4 times faster than PyYAML using
the C loader.
"""
def __init__(self, path):
#print 'Build package repo from dir:',path
# Holds are information
self._db = PackageDatabase()
# Make sure our path exists
if not os.path.exists(path):
raise Exception('Package repo path "%s" does not exist' % path)
# Create our cache
cache = FileParseCache(path, 'repo')
# Get information on all the dicts found in the directory
for full_path in util.match_files(path, '*.xpa'):
# Load the data through the cache
info = cache.load(full_path, lambda p: XPA(p).info)
xpa = XPA(full_path, info=info)
# Store the object in our repo
self._db.store(name=xpa.name, version=xpa.version, data=xpa)
# Save cached info
cache.save_to_disk()
def lookup(self, package, version=None):
"""
Returns the XPA representing binary package, if it doesn't exist None is
returned.
"""
return self._db.lookup(name=package, version=version)
class FileParseCache(object):
"""
Cache for the tree and file parser. This takes advantage of the
speed advantage of the JSON parser (and maybe some better future
optimized format)
"""
def __init__(self, path, name):
self._path = path
# Determine the path to our cache
cache_root = Environment.local_cache_dir()
hash_key = self._path + name
hash_file = 'md5-%s.json' % util.hash_string(hash_key)
self._cache_path = os.path.join(cache_root, name, hash_file)
# Load the cache from disk
self.load_from_disk()
def load(self, path, load_func):
"""
Loads data from a cache of this structure:
{
'full/path/to/repo/file.xpa' : {
'mtime' : 1339007845.0,
'data' : {
....
}
}
}
Arguments:
path - we are loading
load_func - takes path, returns dict we are caching
Return None if nothing is found in the cache for this path.
"""
load = False
# Stat the desired file
mtime = os.stat(path).st_mtime
# Check for file in cache
if path in self._cache:
# If the current file is newer than the cache, load it
if mtime > self._cache[path]['mtime']:
load = True
else:
load = True
if load:
# Load data
data = load_func(path)
# Update the cache
self._cache[path] = {
'mtime' : mtime,
'data' : data,
}
else:
# Load from cache
data = self._cache[path]['data']
# Return XPA
return data
def load_from_disk(self):
"""
Load the cached JSON file.
"""
if os.path.exists(self._cache_path):
self._cache = json.load(open(self._cache_path))
else:
self._cache = {}
def save_to_disk(self):
"""
Saves XPA info manifests to JSON cache file.
"""
cache_dir, _ = os.path.split(self._cache_path)
util.ensure_dir(cache_dir)
with open(self._cache_path, 'w') as f:
json.dump(self._cache, f)
class PackageDatabase(object):
"""
Stores information about packages, right now just does version and name
look ups. Will eventually support more advanced queries.
"""
def __init__(self):
self._db = {}
def store(self, name, version, data):
"""
Stores the desired package data by name and version.
"""
self._db.setdefault(name, {})[version] = data
def lookup(self, name, version=None):
"""
Grabs the data for the specific packages, returning either the specific
package, or the most recent version. If the version can't be found,
None is returned.
Currently the data is the path to the archive itself.
"""
# Get all versions of a package
versions = self._db.get(name, [])
res = None
if len(versions):
if version and (version in versions):
# Version specified and we have it
res = versions[version]
elif version is None:
# Sorted the version data pairs
sorted_versions = sorted(
versions.items(),
cmp = lambda a,b: util.compare_versions(a[0], b[0]))
# Get the data for the most recent version
return sorted_versions[-1][1]
return res
``` |
{
"source": "jlittek/Anki-Vector",
"score": 3
} |
#### File: jlittek/Anki-Vector/Main.py
```python
import threading
import sys
import time
import anki_vector
import numpy as np
from random import randint
from cv2 import cv2
from anki_vector.util import distance_mm, speed_mmps, degrees, Angle, Pose
from anki_vector.events import Events
import math
def handle_object_observed(robot, event_type, event):
"""whenever the CustomMarker used for the goal comes into view the observed position becomes the new goal position
:param robot: instance of robot
:param event_type: default parameter
:param event: default parameter
"""
for obj in robot.world.visible_custom_objects:
if obj.custom_type == anki_vector.objects.CustomObjectTypes.CustomType00:
robot.goal_pose = obj.pose
def drive_for_search(robot):
"""Turn in place to look for the ball
:param robot: instance of the robot
"""
while True:
while robot.ball_not_found:
robot.motors.set_wheel_motors(-15,15)
time.sleep(randint(2,4))
def getMiddleOfElement_area(img, bildRGB):
"""analyze the "white spots" found in serach_ball
:param img: picture for analyzing
:param bildRGB: image for plotting the result on the screen
:return: if middle of ball is found and it's size and vertical position
:rtype: bool, int, double, bool
"""
contours, hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
found_cont=False
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 20:
if area > 3500:
print("BALLL")
return True, 640/2, area, True # Ball found and close to it
print(area)
try:
# Compute the middle of the area identified as the ball:
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(bildRGB, (cX, cY), 7, (255, 255, 255), -1)
return True, cX, area, False # Ball found, but not close enough
except:
pass
return False, 640/2, None, False # Ball not found
def change_direction(area, middle):
"""Turn towards the ball in order to drive straight to it.
Turn faster if the ball is at the border of the robots view, drive faster if the ball is far away
:param area: size from ball on captured image
:param middle: horizontal position from ball
"""
d = middle - 320
a = math.sqrt(50/area)/2
robot.motors.set_wheel_motors(80*d/320, -80*d/320)
robot.motors.set_wheel_motors(60*a+60, 60*a+60)
def search_ball(robot):
""" search ball on captured picture
:param robot: instance of robot
"""
print("searching ball")
# Counter how many camera images without finding the ball:
frames = 0
while True:
img = np.array(robot.camera.latest_image.raw_image)
bildRGB = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
bildBlur = cv2.GaussianBlur(bildRGB, (3,3), 1)
bildHSV = cv2.cvtColor(bildBlur, cv2.COLOR_BGR2HSV)
imgHSV = bildHSV
lower = np.array([0, 116, 148])
upper = np.array([30, 229, 255])
mask = cv2.inRange(imgHSV,lower,upper)
imgContour = img.copy()
success, middle, area, goal = getMiddleOfElement_area(mask, bildRGB)
# Show the camera image and the computed results from above:
cv2.namedWindow("Camera")
cv2.imshow("Camera", bildRGB)
cv2.namedWindow("Mask")
cv2.imshow("Mask", mask)
# Ball found?:
if success==True:
robot.ball_not_found = False
frames = 0
if robot.drivegoal==False:
robot.behavior.set_lift_height(1.0)
if goal==True and robot.drivegoal==False:
robot.behavior.set_lift_height(0.0)
robot.motors.stop_all_motors()
print("drive_to_goal")
robot.behavior.drive_straight(distance_mm(-150), speed_mmps(100))
print("I got the ball.")
x = robot.goal_pose.position.x-robot.pose.position.x
y = robot.pose.position.y
distance_to_goal = math.sqrt(x*x+y*y)
angle_to_goal = np.rad2deg(np.arcsin(x/distance_to_goal))
print("alpha:", angle_to_goal)
# Decide wether tu turn clockwise or counterclockwise:
if y > 0:
robot.behavior.turn_in_place(degrees(-(90-angle_to_goal)), is_absolute=True)
else:
robot.behavior.turn_in_place(degrees((90-angle_to_goal)), is_absolute=True)
# Drive to the goal and check if yet reached.
robot.motors.set_wheel_motors(100,100)
robot.drivegoal = True
drive_goal = threading.Thread(target=drive_to_goal, args=[robot, x, y])
drive_goal.start()
elif robot.drivegoal==False:
change_direction(area, middle)
else: # not found
frames = frames + 1
if(frames > 1): # Threshold to avoid false positives
robot.drivegoal = False
robot.ball_not_found = True
if cv2.waitKey(1) & 0xFF == ord('q'):
robot.disconnect()
sys.exit()
return False
def drive_to_goal(robot, x, y):
"""Check wether the robot is already at the goal. If so, stop, otherwise drive to goal
:param robot: instance of robot
:param x: vertical distance between goal and robot
:param y: horizontal distance between goal and robot
"""
while robot.drivegoal:
x = robot.goal_pose.position.x - robot.pose.position.x
y = robot.pose.position.y
if x < 50 and abs(y) < 50:
print("Goal")
robot.drivegoal = False
robot.disconnect()
sys.exit()
break
robot.motors.stop_all_motors()
return
def map(robot):
"""Map to track the robot's path during the game
:param robot: instance of robot
"""
map_height = 160*3
map_widht = 100*3
blank_image = np.zeros(shape=[map_height, map_widht, 3], dtype=np.uint8)
cv2.circle(blank_image, center=(150,map_height-15 *3), radius=4, color=(0, 255, 0), thickness=20) #Start
cv2.rectangle(blank_image,(40*3,0),(60*3,6),(255,0,0),12)
while True:
xcm = int(robot.pose.position.x/10)
ycm = int(robot.pose.position.y/10)
cv2.circle(blank_image, center=(150-ycm*3,map_height-(15*3+xcm*3)), radius=2, color=(0, 0, 255), thickness=2)
if(robot.ball_not_found):
cv2.putText(blank_image,"Ball not found ",(1,map_height-5),cv2.FONT_HERSHEY_SIMPLEX,0.4,(255,0,0))
else:
cv2.putText(blank_image,"Ball found ",(1,map_height-5),cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,255,0))
cv2.namedWindow("map")
cv2.imshow("map", blank_image)
if cv2.waitKey(1) & 0xFF==ord('q'):
sys.exit()
def initialize():
"""Initialize the robot and the game constraints
:return: instance of Robot()
:rtype: robot
"""
robot = anki_vector.Robot()
robot.connect()
robot.camera.init_camera_feed()
robot.behavior.set_lift_height(0.0)
robot.behavior.set_head_angle(degrees(0))
robot.goal_pose = Pose(x=(160-15)*10, y=0, z=0, angle_z=anki_vector.util.Angle(degrees=0))
robot.events.subscribe(handle_object_observed, Events.object_observed)
robot.enable_custom_object_detection = True
robot.world.define_custom_wall(anki_vector.objects.CustomObjectTypes.CustomType00, anki_vector.objects.CustomObjectMarkers.Triangles5, width_mm=200.0, height_mm=300.0, marker_width_mm=170.0, marker_height_mm=170.0)
robot.behavior.say_text("I'm ready!")
robot.ball_not_found = True
robot.drivegoal = False
return robot
# Starting the robot and afterwards the game:
robot = initialize()
print("robot started")
# Starting the map:
initmap = threading.Thread(target=map, args=[robot])
initmap.start()
print("Map started")
# Starting searching Thread:
drive_around_thread = threading.Thread(target=drive_for_search, args=[robot])
drive_around_thread.start()
print("drive_around started")
search_ball(robot)
``` |
{
"source": "jlitzingerdev/mqttpacket",
"score": 2
} |
#### File: mqttpacket/v311/_builders.py
```python
import struct
from typing import Union # pylint: disable=unused-import
import attr
import six
from . import _constants
_CONNECT_REMAINING_LENGTH = 10
PROTOCOL_NAME = 'MQTT'.encode('utf-8')
def _check_none_or_text(_instance, attribute, value):
if value is not None and not isinstance(value, six.text_type):
raise TypeError('{} must be None or text'.format(attribute))
def _check_will_message(instance, _attribute, value):
if value is not None and instance.will_topic is None:
raise ValueError('Will topic must be set with will message')
def _check_will_topic(instance, _attribute, value):
if value is not None and instance.will_message is None:
raise ValueError('Will message must be set with will topic')
def _check_password(instance, _attribute, value):
if value is not None and instance.username is None:
raise ValueError('Password requires username.')
def _check_will_qos(instance, _attribute, value):
if value != 0x00 and instance.will_topic is None:
raise ValueError('Will QOS requires topic/message')
def encode_remainining_length(remaining_length):
# type: (int) -> bytes
"""Encode the remaining length for the packet.
:returns: Encoded remaining length
:rtype: bytes
"""
encoding = True
encoded_bytes = bytearray()
encoded_byte = 0
while encoding:
encoded_byte = remaining_length % 128
remaining_length //= 128
if remaining_length:
encoded_byte |= 0x80
else:
encoding = False
encoded_bytes.append(encoded_byte)
return bytes(encoded_bytes)
def encode_string(text):
"""Encode a string as per MQTT spec: two byte length, UTF-8 data"""
if not isinstance(text, six.text_type):
raise TypeError('text must be unicode')
encoded_text = text.encode('utf-8')
text_len = struct.pack('!H', len(encoded_text))
return b''.join([text_len, encoded_text])
@attr.s
class ConnectSpec(object):
"""
Data class for connection related options.
"""
username = attr.ib(
default=None,
validator=_check_none_or_text,
)
password = attr.ib(
default=None,
validator=[
_check_none_or_text,
_check_password,
],
)
will_topic = attr.ib(
default=None,
validator=[
_check_none_or_text,
_check_will_topic
],
)
will_message = attr.ib(
default=None,
validator=[
_check_none_or_text,
_check_will_message
]
)
will_qos = attr.ib(
default=0x00,
validator=[
attr.validators.in_(_constants.VALID_QOS),
_check_will_qos,
]
)
def flags(self):
"""Get the flags for this connect spec."""
flags = 0x02
if self.will_topic:
flags |= 0x04
flags |= (self.will_qos << 3)
if self.username:
flags |= 0x80
if self.password:
flags |= 0x40
return flags
def payload(self):
"""Return the encoded connect options."""
parts = []
if self.username:
parts.append(encode_string(self.username))
if self.password:
parts.append(encode_string(self.password))
if self.will_topic:
parts.append(encode_string(self.will_topic))
parts.append(encode_string(self.will_message))
return b''.join(parts)
def connect(client_id, keepalive=60, connect_spec=None):
"""Create a CONNECT packet
:param client_id: The id of the client.
:type client_id: unicode
:param keepalive: How long to keep the network alive, default
60s.
:type keepalive: int
:param connect_spec: The spec for this connection or None
:type connect_spec: mqttpacket.ConnectSpec
:returns: A connect packet.
:rtype: bytes
"""
remaining_length = 0
msg = six.int2byte(
(_constants.MQTT_PACKET_CONNECT << 4),
)
parts = [msg]
encoded_conn_spec = b''
if connect_spec is not None:
encoded_conn_spec = connect_spec.payload()
meta = struct.pack(
"!H4sBBH",
0x0004,
PROTOCOL_NAME,
_constants.PROTOCOL_LEVEL,
connect_spec.flags(),
keepalive,
)
remaining_length += len(encoded_conn_spec)
else:
meta = struct.pack(
"!H4sBBH",
0x0004,
PROTOCOL_NAME,
_constants.PROTOCOL_LEVEL,
0x02,
keepalive,
)
remaining_length += len(meta)
encoded_client_id = b''
if client_id:
encoded_client_id = encode_string(client_id)
remaining_length += len(encoded_client_id)
parts.append(encode_remainining_length(remaining_length))
parts.append(meta)
parts.append(encoded_client_id)
parts.append(encoded_conn_spec)
return b''.join(parts)
def pingreq():
"""
Create a PINGREQ packet.
"""
return b'\xc0\x00'
def _validate_qos(_instance, _attribute, value):
if not 0 <= value < 3:
raise ValueError('qos must be 0 <= qos < 3')
@attr.s(slots=True)
class SubscriptionSpec(object):
"""
A data class for a topicfilter qos pair.
"""
topicfilter = attr.ib(
validator=attr.validators.instance_of(six.text_type),
)
qos = attr.ib(
validator=_validate_qos,
)
_encoded = attr.ib(init=False)
def __attrs_post_init__(self):
self._encoded = self.topicfilter.encode('utf-8')
def remaining_len(self):
"""
Length for this spec.
"""
return 3 + len(self._encoded)
def to_bytes(self):
"""Encode this spec as bytes"""
return b''.join([
struct.pack('!H', len(self._encoded)),
self._encoded,
struct.pack('!B', self.qos)
])
def subscribe(packetid, topicspecs):
"""Create a subscribe packet.
:param topicfilter: The list of topicfilter specs.
:param qos: The QoS level to use.
"""
if not 0 < packetid < 65535:
raise ValueError('Packetid must be 0 < packetid < 65535')
remaining_len = 2 # packetid
for spec in topicspecs:
remaining_len += spec.remaining_len()
msg = six.int2byte(
(_constants.MQTT_PACKET_SUBSCRIBE << 4) | 0x02,
)
encoded_specs = [msg]
encoded_specs.append(encode_remainining_length(remaining_len))
encoded_specs.append(struct.pack('!H', packetid))
encoded_specs.extend(
[s.to_bytes() for s in topicspecs]
)
return b''.join(encoded_specs)
def disconnect():
# type: () -> bytes
"""Build a DISCONNECT packet."""
return struct.pack(
"!BB",
(_constants.MQTT_PACKET_DISCONNECT << 4),
0
)
def publish(topic, dup, qos, retain, payload, packet_id=None):
# type: (str, bool, int, bool, bytes, Union[None,int]) -> bytes
"""Build a PUBLISH packet.
"""
#remaining_len = (topiclen after encoding + 2) + (2 | 0 if packetid) + payload_len
if qos not in _constants.VALID_QOS:
raise ValueError('QoS must be 0, 1, or 2')
if not isinstance(topic, six.text_type):
raise ValueError('Qos must be 0, 1, or 2')
if qos > 0 and packet_id is None:
raise ValueError('QoS of 1 or 2 must have a packet id')
if qos == 0 and dup:
raise ValueError('Dup must not be set on QoS of 0')
if not isinstance(payload, bytes):
raise TypeError('Payload must be bytes')
remaining_len = len(payload)
encoded_packet_id = b''
if qos > 0:
remaining_len += _constants.PACKET_ID_LEN
encoded_packet_id = struct.pack('!H', packet_id)
encoded_topic = encode_string(topic)
remaining_len += len(encoded_topic)
rl = encode_remainining_length(remaining_len)
byte1 = _constants.MQTT_PACKET_PUBLISH << 4
byte1 |= (int(dup) << 3)
byte1 |= qos << 1
byte1 |= int(retain)
return b''.join((
six.int2byte(byte1),
rl,
encoded_topic,
encoded_packet_id,
payload
))
def unsubscribe(packet_id, topics):
# (int, List[str]) -> bytes
"""Build an UNSUBSCRIBE message for the specified topics."""
if not topics:
raise ValueError('At least one topic must be specified')
remaining_len = 2
encoded_packet_id = struct.pack('!H', packet_id)
encoded_topics = [encode_string(t) for t in topics]
for et in encoded_topics:
remaining_len += len(et)
parts = [six.int2byte((_constants.MQTT_PACKET_UNSUBSCRIBE << 4) | 0x1)]
parts.append(encode_remainining_length(remaining_len))
parts.append(encoded_packet_id)
parts.extend(encoded_topics)
return b''.join(parts)
```
#### File: mqttpacket/tests/test_building.py
```python
import binascii
import json
import six
import pytest
import mqttpacket.v311 as mqttpacket
def test_connect_basic():
"""
A connect packet with only a client id is properly constructed.
"""
expect = binascii.unhexlify(
b'101000044d5154540402003c000474657374'
)
packet = mqttpacket.connect(u'test')
assert packet == expect
assert isinstance(packet, bytes)
assert len(packet) == 18
assert six.indexbytes(packet, 0) == 16
assert six.indexbytes(packet, 9) == 0x02
assert packet[14:].decode('utf-8') == u'test'
def test_will_requirements():
"""
Will topic and will message must be set together.
"""
with pytest.raises(ValueError):
mqttpacket.ConnectSpec(
will_topic=u'foo',
)
with pytest.raises(ValueError):
mqttpacket.ConnectSpec(
will_message=u'my message',
)
def test_valid_will():
"""
A valid will topic/message spec sets flags and payload.
"""
cs = mqttpacket.ConnectSpec(
will_topic=u'my_will_topic',
will_message=u'my_will_message',
will_qos=1,
)
wt = u'my_will_topic'
wm = u'my_will_message'
assert cs.will_topic == wt
assert cs.will_message == wm
assert cs.flags() == 0x0e
assert len(cs.payload()) == 32
cs = mqttpacket.ConnectSpec(
will_topic=u'wt2',
will_message=u'wm2',
will_qos=2,
)
assert cs.will_topic == u'wt2'
assert cs.will_message == u'wm2'
assert cs.flags() == 0x16
def test_default_spec():
"""
A default spec has a remaining length of zero and
a clean session.
"""
cs = mqttpacket.ConnectSpec()
assert not cs.payload()
assert cs.flags() == 0x02
def test_will_must_be_unicode():
"""
Will topic and will message must be unicode.
"""
with pytest.raises(TypeError):
mqttpacket.ConnectSpec(
will_topic=b'foo',
will_message=u'bar'
)
with pytest.raises(TypeError):
mqttpacket.ConnectSpec(
will_topic=u'biz',
will_message=b'baz'
)
def test_will_qos_values():
"""
Will QOS can only be 0 - 2
"""
with pytest.raises(ValueError):
mqttpacket.ConnectSpec(
will_topic=u'biz',
will_message=u'baz',
will_qos=3
)
mqttpacket.ConnectSpec(
will_topic=u'my_will_topic',
will_message=u'my_will_message',
will_qos=1
)
mqttpacket.ConnectSpec(
will_topic=u'my_will_topic',
will_message=u'my_will_message',
will_qos=2
)
def test_connect_with_spec():
"""
A valid connect spec is properly encoded.
"""
cs = mqttpacket.ConnectSpec(
will_topic=u'my_will_topic',
will_message=u'my_will_message',
will_qos=1,
)
packet = mqttpacket.connect(u'test', connect_spec=cs)
assert isinstance(packet, bytes)
assert len(packet) == 50
assert six.indexbytes(packet, 0) == 16
assert six.indexbytes(packet, 9) == 0x0e
assert packet[14:18].decode('utf-8') == u'test'
def test_build_subscription_multiple():
"""
Multiple topic filters can be properly encoded.
This example is from the MQTT specification.
"""
specs = [
mqttpacket.SubscriptionSpec(u'a/b', 0x01),
mqttpacket.SubscriptionSpec(u'c/d', 0x02),
]
packet = mqttpacket.subscribe(10, specs)
assert isinstance(packet, bytes)
assert six.indexbytes(packet, 0) == 0x82
assert six.indexbytes(packet, 1) == 14
assert six.indexbytes(packet, 2) << 8 | six.indexbytes(packet, 3) == 10
assert six.indexbytes(packet, 4) << 8 | six.indexbytes(packet, 5) == 3
assert packet[6:9].decode('utf-8') == u'a/b'
assert six.indexbytes(packet, 9) == 0x01
assert six.indexbytes(packet, 10) << 8 | six.indexbytes(packet, 11) == 3
assert packet[12:15].decode('utf-8') == u'c/d'
assert six.indexbytes(packet, 15) == 0x02
def test_build_subscription_single():
"""
Multiple topic filters can be properly encoded.
This example is from the MQTT specification.
"""
specs = [
mqttpacket.SubscriptionSpec(u'test/1', 0x00),
]
packet = mqttpacket.subscribe(10, specs)
assert isinstance(packet, bytes)
assert six.indexbytes(packet, 0) == 0x82
assert six.indexbytes(packet, 1) == 11
assert six.indexbytes(packet, 2) << 8 | six.indexbytes(packet, 3) == 10
assert six.indexbytes(packet, 4) << 8 | six.indexbytes(packet, 5) == 6
assert packet[6:12].decode('utf-8') == u'test/1'
assert six.indexbytes(packet, 12) == 0x00
def test_subscription_spec_multibyte():
"""
A topic with multibyte characters encoded as UTF uses
the encoded length.
"""
topic = u'super€'
spec = mqttpacket.SubscriptionSpec(
topic,
0
)
assert spec.remaining_len() == 11
assert spec.to_bytes() == b'\x00\x08\x73\x75\x70\x65\x72\xe2\x82\xac\x00'
def test_encode_single_byte_length():
"""
A length < 128 is encoded in a single byte.
"""
r = mqttpacket.encode_remainining_length(127)
assert r == b'\x7f'
r = mqttpacket.encode_remainining_length(0)
assert r == b'\x00'
def test_encode_two_byte_length():
"""
A length over 127 is encoded with two bytes.
"""
r = mqttpacket.encode_remainining_length(128)
assert r == b'\x80\x01'
r = mqttpacket.encode_remainining_length(16383)
assert r == b'\xff\x7f'
def test_encode_three_byte_length():
"""
A length over 16383 is encoded with three bytes.
"""
r = mqttpacket.encode_remainining_length(16384)
assert r == b'\x80\x80\x01'
r = mqttpacket.encode_remainining_length(2097151)
assert r == b'\xff\xff\x7f'
def test_encode_four_byte_length():
"""
A length over 2097151 is encoded with four bytes.
"""
r = mqttpacket.encode_remainining_length(2097152)
assert r == b'\x80\x80\x80\x01'
r = mqttpacket.encode_remainining_length(268435455)
assert r == b'\xff\xff\xff\x7f'
def test_disconnect():
"""
A valid DISCONNECT packet is built.
"""
assert mqttpacket.disconnect() == b'\xe0\x00'
def test_publish():
"""
A valid PUBLISH packet is successfully decoded.
"""
payload = {u'test': u'test'}
payload_str = json.dumps(payload).encode('utf-8')
publish = mqttpacket.publish(
u'test',
False,
0,
True,
payload_str
)
print(binascii.hexlify(publish))
assert six.indexbytes(publish, 0) == 49
assert six.indexbytes(publish, 1) == 22
expect = binascii.unhexlify(
b'31160004746573747b2274657374223a202274657374227d'
)
assert publish == expect
def test_publish_nonzero_qos_requires_packetid():
"""
A PUBLISH packet with a QoS of 1 or 2 requires a packet id.
"""
with pytest.raises(ValueError):
mqttpacket.publish(
u'test',
False,
1,
True,
u'foo'.encode('utf-8')
)
with pytest.raises(ValueError):
mqttpacket.publish(
u'test',
False,
2,
True,
u'foo'.encode('utf-8')
)
def test_publish_qos_1():
"""
A publish with a QoS of 1 and a packet id are successfully encoded.
"""
publish = mqttpacket.publish(
u'test',
False,
1,
True,
u'foo'.encode('utf-8'),
packet_id=255
)
expect = binascii.unhexlify(
b'330b00047465737400ff666f6f'
)
assert publish == expect
def test_publish_qos_2():
"""
A publish with a QoS of 2 and a packet id are successfully encoded.
"""
publish = mqttpacket.publish(
u'test',
False,
2,
False,
u'foo'.encode('utf-8'),
packet_id=256
)
expect = binascii.unhexlify(
b'340b0004746573740100666f6f'
)
assert publish == expect
def test_publish_dup():
"""
A publish with dup set is successfully encoded
"""
publish = mqttpacket.publish(
u'test',
True,
1,
False,
u'foo'.encode('utf-8'),
packet_id=256
)
expect = binascii.unhexlify(
b'3a0b0004746573740100666f6f'
)
assert publish == expect
def test_publish_dup_requires_qos():
"""
Setting dup on PUBLISH requires nonzero QoS.
"""
with pytest.raises(ValueError):
mqttpacket.publish(
u'test',
True,
0,
False,
u'foo'.encode('utf-8'),
packet_id=256
)
def test_publish_payload_requires_bytes():
"""
PUBLISH payload must be bytes.
"""
with pytest.raises(TypeError):
mqttpacket.publish(
u'test',
False,
0,
False,
u'foo'
)
def test_pingreq():
"""A PINGREQ is properly encoded."""
ping = mqttpacket.pingreq()
assert ping == b'\xc0\x00'
def test_unsubscribe():
"""
An unsubscribe of two topics is successfully built.
"""
msg = mqttpacket.unsubscribe(257, [u'a/b', u'c/d'])
assert msg[:1] == b'\xa1'
assert six.indexbytes(msg, 1) == 12
assert msg[2:4] == b'\x01\x01'
assert msg[4:6] == b'\x00\x03'
assert msg[6:9] == u'a/b'.encode('utf-8')
assert msg[9:11] == b'\x00\x03'
assert msg[11:] == u'c/d'.encode('utf-8')
def test_unsubscribe_requires_one():
"""
At least one topic must be provided to unsubscribe.
"""
with pytest.raises(ValueError):
mqttpacket.unsubscribe(123, [])
``` |
{
"source": "jlitzingerdev/psh",
"score": 2
} |
#### File: psh/tests/test_mutators.py
```python
from lib2to3.pygram import python_grammar
import pytest
from psh import _common, _mutators
NO_IR_PRE = """
setup(name="bar")
"""
NO_IR_POST = """
setup(name="bar",install_requires=["baz"])
"""
def test_add_install_nothing():
"""If no install requires argument exists, one is added."""
tree = _common.parse_string(NO_IR_PRE, python_grammar)
_mutators.add_arg_to_install(tree, "baz")
assert str(tree) == NO_IR_POST
EMPTY_IR_PRE = """
setup(name="bar")
"""
EMPTY_IR_POST = """
setup(name="bar",install_requires=["rugz"])
"""
def test_add_to_empty():
"""Adding to and empty install requires inserts an entry"""
tree = _common.parse_string(EMPTY_IR_PRE, python_grammar)
_mutators.add_arg_to_install(tree, "rugz")
assert str(tree) == EMPTY_IR_POST
ONE_IR_PRE = """
setup(name="bar", install_requires=["biz"],
extras_require={'dev': ['back'],
})
"""
ONE_IR_POST = """
setup(name="bar", install_requires=["biz","barg==0.7.8"],
extras_require={'dev': ['back'],
})
"""
def test_add_install_one_exists():
"""Arguments are successfully added to an install_requires list of one"""
tree = _common.parse_string(ONE_IR_PRE, python_grammar)
_mutators.add_arg_to_install(tree, "barg==0.7.8")
assert ONE_IR_POST == str(tree), print(str(tree))
IR_PRE = """
setup(name="bar", install_requires=["biz", "buzz"],
extras_require={'dev': ['back'],
})
"""
IR_POST = """
setup(name="bar", install_requires=["biz", "buzz","foo"],
extras_require={'dev': ['back'],
})
"""
def test_add_install_to_many():
"""Arguments can be added to an existing list of > 1"""
tree = _common.parse_string(IR_PRE, python_grammar)
_mutators.add_arg_to_install(tree, "foo")
assert IR_POST == str(tree)
def test_add_install_exists():
"""Arguments can be added to an existing list of > 1"""
tree = _common.parse_string(IR_PRE, python_grammar)
with pytest.raises(_mutators.AlreadyExistsError):
_mutators.add_arg_to_install(tree, "buzz==0.3.4")
assert IR_PRE == str(tree)
``` |
{
"source": "jliu0501/FBPCS",
"score": 2
} |
#### File: fbpcs/entity/mpc_instance.py
```python
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional
from fbpcs.entity.container_instance import ContainerInstance
from fbpcs.entity.instance_base import InstanceBase
class MPCRole(Enum):
SERVER = "SERVER"
CLIENT = "CLIENT"
class MPCInstanceStatus(Enum):
UNKNOWN = "UNKNOWN"
CREATED = "CREATED"
STARTED = "STARTED"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
CANCELED = "CANCELED"
@dataclass
class MPCInstance(InstanceBase):
instance_id: str
game_name: str
mpc_role: MPCRole
num_workers: int
server_ips: Optional[List[str]]
containers: List[ContainerInstance]
status: MPCInstanceStatus
game_args: Optional[List[Dict[str, Any]]]
@classmethod
def create_instance(
cls,
instance_id: str,
game_name: str,
mpc_role: MPCRole,
num_workers: int,
server_ips: Optional[List[str]] = None,
containers: Optional[List[ContainerInstance]] = None,
status: MPCInstanceStatus = MPCInstanceStatus.UNKNOWN,
game_args: Optional[List[Dict[str, Any]]] = None,
) -> "MPCInstance":
return cls(
instance_id,
game_name,
mpc_role,
num_workers,
server_ips,
containers or [],
status,
game_args,
)
def get_instance_id(self) -> str:
return self.instance_id
```
#### File: fbpcs/service/log_cloudwatch.py
```python
from typing import Dict, Any, List, Optional
from fbpcs.entity.container_instance import ContainerInstance
from fbpcs.entity.log_event import LogEvent
from fbpcs.gateway.cloudwatch import CloudWatchGateway
from fbpcs.service.log import LogService
class CloudWatchLogService(LogService):
def __init__(
self,
log_group: str,
region: str = "us-west-1",
access_key_id: Optional[str] = None,
access_key_data: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
) -> None:
self.cloudwatch_gateway = CloudWatchGateway(
region, access_key_id, access_key_data, config
)
self.log_group = log_group
def fetch(self, log_path: str, start_time: int = 0) -> List[LogEvent]:
"""Fetch logs"""
return self.cloudwatch_gateway.get_log_events(
self.log_group, log_path, start_time
)
def get_log_path(self, container_instance: ContainerInstance) -> str:
return self.log_group[1:] + "/" + container_instance.instance_id.split("/")[-1]
```
#### File: tests/service/test_onedocker.py
```python
import unittest
from unittest.mock import AsyncMock, MagicMock, patch
from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcs.error.pcs import PcsError
from fbpcs.service.onedocker import OneDockerService
class TestOneDockerService(unittest.TestCase):
@patch("fbpcs.service.container.ContainerService")
def setUp(self, MockContainerService):
container_svc = MockContainerService()
self.onedocker_svc = OneDockerService(container_svc)
def test_start_container(self):
mocked_container_info = ContainerInstance(
"arn:aws:ecs:region:account_id:task/container_id",
"192.0.2.0",
ContainerInstanceStatus.STARTED,
)
self.onedocker_svc.container_svc.create_instances_async = AsyncMock(
return_value=[mocked_container_info]
)
returned_container_info = self.onedocker_svc.start_container(
container_definition="task_def",
package_name="project/exe_name",
cmd_args="cmd_args",
)
self.assertEqual(returned_container_info, mocked_container_info)
def test_start_containers(self):
mocked_container_info = [
ContainerInstance(
"arn:aws:ecs:region:account_id:task/container_id_1",
"192.0.2.0",
ContainerInstanceStatus.STARTED,
),
ContainerInstance(
"arn:aws:ecs:region:account_id:task/container_id_2",
"192.0.2.1",
ContainerInstanceStatus.STARTED,
),
]
self.onedocker_svc.container_svc.create_instances_async = AsyncMock(
return_value=mocked_container_info
)
returned_container_info = self.onedocker_svc.start_containers(
container_definition="task_def",
package_name="project/exe_name",
cmd_args_list=["--k1=v1", "--k2=v2"],
)
self.assertEqual(returned_container_info, mocked_container_info)
def test_get_cmd(self):
package_name = "project/exe_name"
cmd_args = "--k1=v1 --k2=v2"
timeout = 3600
version = "0.1.0"
expected_cmd_without_arguments = (
"python3.8 -m onedocker.script.runner project/exe_name --version=latest"
)
expected_cmd_with_arguments = f"python3.8 -m onedocker.script.runner project/exe_name --exe_args='{cmd_args}' --version={version} --timeout={timeout}"
cmd_without_arguments = self.onedocker_svc._get_cmd(package_name)
cmd_with_arguments = self.onedocker_svc._get_cmd(
package_name, version, cmd_args, timeout
)
self.assertEqual(expected_cmd_without_arguments, cmd_without_arguments)
self.assertEqual(expected_cmd_with_arguments, cmd_with_arguments)
def test_stop_containers(self):
containers = [
"0cc43cdb-3bee-4407-9c26-c0e6ea5bee84",
"6b809ef6-c67e-4467-921f-ee261c15a0a2",
]
expected_results = [None, PcsError("instance id not found")]
self.onedocker_svc.container_svc.cancel_instances = MagicMock(
return_value=expected_results
)
self.assertEqual(
self.onedocker_svc.stop_containers(containers), expected_results
)
self.onedocker_svc.container_svc.cancel_instances.assert_called_with(containers)
``` |
{
"source": "jliu79/COM-Server",
"score": 3
} |
#### File: COM-Server/examples/server_example.py
```python
from com_server import Builtins, Connection, ConnectionResource, RestApiHandler
# make the Connection object
conn = Connection(baud=115200, port="/dev/ttyUSB0")
# conn = Connection(baud=115200, port="/dev/ttyUSB...") # if Linux; can be "/dev/ttyACM..."
# conn = Connection(baud=115200, port="/dev/cu.usbserial...")
# conn = Connection(baud=115200, port="COM...") # if Windows
# make the API Handler object; initialize it with the connection object
handler = RestApiHandler(conn)
# add built-in endpoints, does not need to assign to variable; initialize with handler object
Builtins(handler)
# NOTE: these endpoints CANNOT be used, regardless of adding built-ins or not
# - "/register" (GET) - Used to register an IP, all programs must reach this endpoint before interacting with other endpoints
# - "/recall" (GET) - Used to unregister IP and allow others to access the endpoints
# NOTE: these endpoints CANNOT be used after adding built-ins
# - `/send` (POST): Send something through the serial port
# - `/receive` (GET, POST): Respond with the most recent received string from the serial port
# - `/receive/all` (GET, POST): Returns the entire receive queue
# - `/get` (GET, POST): Respond with the first string from serial port after request
# - `/send/get_first` (POST): Responds with the first string response from the serial port after sending data, with data and parameters in request
# - `/get/wait` (POST): Waits until connection receives string data given in request
# - `/send/get` (POST): Continues sending something until connection receives data given in request
# - `/connected` (GET): Indicates if the serial port is currently connected or not
# - `/list_ports` (GET): Lists all available Serial ports
# adding a custom endpoint:
@handler.add_endpoint("/hello_world")
def hello_world(conn: Connection):
# create a function with a class within it, then return the class
class Hello_World_Endpoint(ConnectionResource):
# classes are implemented like flask_restful classes
# each method defines a request method (i.e. get() defines what happens when there is a GET request, post() defines what happens when there is a POST request, etc.)
# to access request parameters, import reqparse from flask_restful (i.e. "from flask_restful import reqparse")
# to abort a request, import abort from flask_restful (i.e. "from flask_restful import abort")
# for more information on flask_restful, see https://flask-restful.readthedocs.io/en/latest/
# for more information on Flask, see https://flask.palletsprojects.com/en/2.0.x/
def get(self):
return {
"Hello": "World!",
"Received": conn.receive_str()
}
return Hello_World_Endpoint
# start the Flask development server on http://0.0.0.0:8080
handler.run_dev(host="0.0.0.0", port=8080)
# start the waitress production server on http://0.0.0.0:8080
# handler.run_prod(host="0.0.0.0", port=8080)
# call disconnect(), as variable will not be used anymore
conn.disconnect()
```
#### File: src/com_server/__main__.py
```python
import sys
from docopt import docopt
from . import __version__, runner
PARSE = """COM_Server command line tool
A simple command line tool to start the API server that interacts
with the serial port in an development environment or a
production environment.
Usage:
com_server (-p | --serport) <serport> (-b | --baud) <baud> run [--env=<env>] [--host=<host>] [--port=<port>] [--s-int=<s-int>] [--to=<to>]
com_server -h | --help
com_server --version
Options:
-p, --serport The serial port to connect to. For MacOS, use the "cu.*" port rather than the "tty.*" port.
-b, --baud The baud rate of the serial connection.
--env=<env> Development or production environment. Value must be 'dev' or 'prod'. [default: dev].
--host=<host> The name of the host server (optional) [default: 0.0.0.0].
--port=<port> The port of the host server (optional) [default: 8080].
--s-int=<s-int> How long, in seconds, the program should wait between sending to serial port [default: 1].
--to=<to> How long, in seconds, the program should wait before exiting when performing time-consuming tasks [default: 1].
-h, --help Show help.
--version Show version.
"""
def _display_version() -> None:
print(f"COM_Server version: {__version__}")
sys.exit()
def main() -> None:
args = docopt(PARSE)
if (args["--version"]):
# if asking for version
_display_version()
if (args["run"]):
# if asking to run
baud = args["<baud>"].strip()
serport = args["<serport>"].strip()
env = args["--env"].strip()
host = args["--host"].strip()
port = args["--port"].strip()
timeout = args["--to"].strip()
send_interval = args["--s-int"].strip()
if (env not in ('dev', 'prod')):
print("Value of <env> must be \"dev\" or \"prod\".")
sys.exit(1)
runner.run(baud, serport, env, host, port, timeout, send_interval)
print("Exited")
``` |
{
"source": "jliu87/bagua",
"score": 3
} |
#### File: bagua/torch_api/bucket.py
```python
from __future__ import annotations
from bagua.torch_api.communication import get_backend
from typing import List, Callable, Optional
import bagua_core as B
import torch
from bagua.torch_api.tensor import BaguaTensor
from bagua.torch_api.utils import check_contiguous
from bagua.torch_api.communication import broadcast
class BaguaBucket:
def __init__(
self, tensors: List[BaguaTensor], name: str, flatten: bool, alignment: int = 1
) -> None:
"""
Create a Bagua bucket with a list of Bagua tensors.
Args:
tensors: A list of Bagua tensors to be put in the
bucket.
name: The unique name of the bucket.
flatten: If ``True``, flatten the input tensors so that they are
contiguous in memory.
alignment: If `alignment > 1`, Bagua will create a padding tensor to
the bucket so that the total number of elements in the bucket divides
the given alignment.
"""
self.tensors = tensors
"""
The tensors contained within the bucket.
"""
self.bagua_module_name = tensors[0].bagua_module_name
for tensor in self.tensors:
assert (
self.bagua_module_name == tensor.bagua_module_name
), "every tensor in the same bucket should have the same model name"
self._bagua_backend = get_backend(self.bagua_module_name)
self.name = name
"""
The bucket's name.
"""
self.padding_tensor = None
if alignment > 1:
padding = sum(tensor.numel() for tensor in self.tensors) % alignment
if padding > 0:
padding = alignment - padding
# padding tensor must be of name bagua_padding_tensor, so that they are always marked as ready for communication in the backend
self.padding_tensor = torch.zeros(
padding, dtype=self.tensors[0].dtype, device=self.tensors[0].device
).to_bagua_tensor("bagua_padding_tensor_bucket_" + name)
self._all_tensors = (
self.tensors + [self.padding_tensor]
if self.padding_tensor is not None
else self.tensors
)
self.backend_tensor = None
self.flatten = flatten
if self.flatten:
self._flatten_()
self.backend_bucket = B.BaguaBucketPy(
name, [tensor._bagua_backend_tensor for tensor in self._all_tensors]
)
for tensor in self._all_tensors:
tensor._bagua_bucket = self
def flattened_tensor(self) -> BaguaTensor:
"""
Returns a tensor contiguous in memory which contains the same data as `self` tensors and padding tensor (if exists).
"""
total_size = 0
for tensor in self._all_tensors:
total_size += tensor.numel()
flatten_tensor = torch.zeros(total_size, dtype=self._all_tensors[0].dtype).to(
self._all_tensors[0].device
)
offset = 0
for tensor in self._all_tensors:
# copy data
flatten_tensor[offset : offset + tensor.numel()] = tensor.data.reshape(-1)
offset += tensor.numel()
return flatten_tensor
def _flatten_(self):
"""
Flatten inner tensors in place.
"""
if self.check_flatten():
return
if len(self._all_tensors) == 0:
return
total_size = 0
for tensor in self._all_tensors:
total_size += tensor.numel()
flatten_tensor = torch.zeros(total_size, dtype=self._all_tensors[0].dtype).to(
self._all_tensors[0].device
)
flatten_storage = flatten_tensor.storage()
offset = 0
for tensor in self._all_tensors:
# copy data
flatten_tensor[offset : offset + tensor.numel()] = tensor.data.reshape(-1)
tensor.bagua_set_storage(flatten_storage, offset)
offset += tensor.numel()
# set backend tensor
self.backend_tensor = flatten_tensor
# check
assert self.check_flatten()
def check_flatten(self) -> bool:
"""
Returns:
True if the bucket's tensors are contiguous in memory.
"""
return check_contiguous(self._all_tensors)
def append_python_op(self, python_function: Callable[[str], None]) -> BaguaBucket:
"""
Append a Python operation to a bucket. A Python operation is a Python function that
takes the bucket's name and returns ``None``. It can do arbitrary things within the
function body.
The operations will be executed by the Bagua backend in the order they are appended
when all the tensors within the bucket are marked ready.
Args:
python_function: The Python operation function.
Returns:
The bucket itself.
"""
def wrapper_function_factory(pyop):
def wrapped_pyop(name):
with torch.cuda.stream(self._bagua_backend.stream):
return pyop(name)
return wrapped_pyop
self.backend_bucket.append_python_op(wrapper_function_factory(python_function))
return self
def append_centralized_synchronous_op(
self,
hierarchical: bool = False,
average: bool = True,
scattergather: bool = False,
compression: Optional[str] = None,
) -> BaguaBucket:
"""
Append a centralized synchronous operation to a bucket. It will sum or average the tensors in the bucket
for all workers.
The operations will be executed by the Bagua backend in the order they are appended
when all the tensors within the bucket are marked ready.
Args:
hierarchical (bool): Enable hierarchical communication. Which means the GPUs on the same machine
will communicate will each other first. After that, machines do inter-node communication. This can
boost performance when the inter-node communication cost is high.
average (bool): If ``True``, the gradients on each worker are averaged. Otherwise, they are summed.
scattergather (bool): If ``True``, the communication between workers are done with scatter gather instead
of allreduce. This is required for using compression.
compression: If not ``None``, the tensors will be compressed for communication. Currently "MinMaxUInt8" is
supported.
Returns:
The bucket itself.
"""
if hierarchical:
self.backend_bucket.append_centralized_synchronous_op(
self._bagua_backend.internode_communicator,
self._bagua_backend.intranode_communicator,
hierarchical=hierarchical,
average=average,
scattergather=scattergather,
compression=compression,
)
else:
self.backend_bucket.append_centralized_synchronous_op(
self._bagua_backend.global_communicator,
None,
hierarchical=hierarchical,
average=average,
scattergather=scattergather,
compression=compression,
)
return self
def append_decentralized_synchronous_op(
self,
peer_weight: BaguaTensor,
hierarchical: bool = True,
peer_selection_mode: str = "all",
) -> BaguaBucket:
"""
Append a decentralized synchronous operation to a bucket. It will do gossipy style model averaging among workers.
This operation is not inplace, which means the bucket weights is first copied to `peer_weight`, and the result of
decentralized averaging will be in `peer_weight`. To copy `peer_weight` back to `self`, call
:func:`decentralized_synchronous_op_copy_back_peer_weight`.
This operation will be executed by the Bagua backend in
the order they are appended when all the tensors within the bucket are marked ready.
Args:
peer_weight (BaguaTensor): A tensor used for averaging model with peers, should be of the same size
with the bucket tensors total size. Use ``self.flattened_tensor().to_bagua_tensor(...)`` to create such a tensor.
hierarchical (bool): Enable hierarchical communication. Which means the GPUs on the same machine
will communicate will each other first. After that, machines do inter-node communication. This can
boost performance when the inter-node communication cost is high.
peer_selection_mode (str): Can be "all" or "shift_one". "all" means all workers' weights are averaged
in each communication step. "shift_one" means each worker selects a different peer to do weights average
in each communication step.
Returns:
The bucket itself.
"""
if hierarchical:
self.backend_bucket.append_decentralized_synchronous_op(
self._bagua_backend.internode_communicator,
self._bagua_backend.intranode_communicator,
hierarchical=hierarchical,
peer_selection_mode=peer_selection_mode,
peer_weight=peer_weight._bagua_backend_tensor,
)
else:
self.backend_bucket.append_decentralized_synchronous_op(
self._bagua_backend.global_communicator,
None,
hierarchical=hierarchical,
peer_selection_mode=peer_selection_mode,
peer_weight=peer_weight._bagua_backend_tensor,
)
return self
def decentralized_synchronous_op_copy_back_peer_weight(
self, peer_weight: BaguaTensor, hierarchical: bool = True
):
"""
Copy `peer_weight` back to bucket weights to end a decentralized synchronous operation.
See :func:`append_decentralized_synchronous_op` for more information.
Args:
peer_weight (BaguaTensor): A tensor used for averaging model with peers, should be of the same size
with the bucket tensors total size. Use ``self.flattened_tensor().to_bagua_tensor(...)`` to create such a tensor.
hierarchical (bool): Enable hierarchical communication. Which means the GPUs on the same machine
will communicate will each other first. After that, machines do inter-node communication. This can
boost performance when the inter-node communication cost is high. Must be the same with `hierarchical` argument in
:func:`append_decentralized_synchronous_op`.
"""
intra_comm = self._bagua_backend.intranode_communicator
inter_comm = self._bagua_backend.internode_communicator
if not hierarchical or (inter_comm is not None):
self.backend_tensor.copy_(peer_weight)
if hierarchical:
broadcast(self.backend_tensor, 0, intra_comm)
def append_low_precision_decentralized_synchronous_op(
self,
weight: BaguaTensor,
left_peer_weight: BaguaTensor,
right_peer_weight: BaguaTensor,
hierarchical: bool = True,
compression: str = "MinMaxUInt8",
) -> BaguaBucket:
"""
Append a low precision decentralized synchronous operation to a bucket. It will compress the difference
of local models between two successive iterations and exchange them among workers.
The operations will be executed by the Bagua backend in the order they are appended
when all the tensors within the bucket are marked ready.
Args:
weight (BaguaTensor): Model replica of current worker's local model. It should be of the same size
with the bucket tensors total size. Use ``self.flattened_tensor().to_bagua_tensor(...)`` to create such a tensor.
left_peer_weight (BaguaTensor): Model replica of current worker's left peer. It should be of the same size
with the bucket tensors total size. Use ``self.flattened_tensor().to_bagua_tensor(...)`` to create such a tensor,
then copy the initializing weights of current worker's left peer to the tensor.
right_peer_weight (BaguaTensor): Model replica of current worker's right peer. It should be of the same size
with the bucket tensors total size. Use ``self.flattened_tensor().to_bagua_tensor(...)`` to create such a tensor.
then copy the initializing weights of current worker's right peer to the tensor.
hierarchical (bool): Enable hierarchical communication. Which means the GPUs on the same machine
will communicate will each other first. After that, machines do inter-node communication. This can
boost performance when the inter-node communication cost is high.
compression (str): The way how tensors are compressed for communication. Currently "MinMaxUInt8" is supported.
Returns:
The bucket itself.
"""
if hierarchical:
self.backend_bucket.append_low_precision_decentralized_synchronous_op(
self._bagua_backend.internode_communicator,
self._bagua_backend.intranode_communicator,
hierarchical=hierarchical,
peer_selection_mode="ring",
compression=compression,
weight=weight._bagua_backend_tensor,
left_peer_weight=left_peer_weight._bagua_backend_tensor,
right_peer_weight=right_peer_weight._bagua_backend_tensor,
)
else:
self.backend_bucket.append_low_precision_decentralized_synchronous_op(
self._bagua_backend.global_communicator,
None,
hierarchical=hierarchical,
peer_selection_mode="ring",
compression=compression,
weight=weight._bagua_backend_tensor,
left_peer_weight=left_peer_weight._bagua_backend_tensor,
right_peer_weight=right_peer_weight._bagua_backend_tensor,
)
return self
def clear_ops(self) -> BaguaBucket:
"""
Clear the previously appended operations.
"""
self.backend_bucket.clear_ops()
return self
def bytes(self) -> int:
"""Returns the total number of bytes occupied by the bucket.
Returns:
int: number of bucket bytes
"""
return sum(tensor.numel() * tensor.element_size() for tensor in self.tensors)
```
#### File: bagua/torch_api/communication.py
```python
import logging
import multiprocessing
import bagua_core as B
from bagua.service import AutotuneService
from collections import defaultdict
from . import env
from .env import (
get_master_addr,
get_world_size,
get_rank,
get_local_rank,
get_local_size,
get_default_bucket_size,
get_bagua_service_port,
)
from .utils import flatten, unflatten, to_bagua_reduce_op
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
from bagua.service.autotune_service import AutotuneClient
from functools import lru_cache
@lru_cache(maxsize=None)
def get_hyperparameters_service_client():
hyperparameters_service_client = AutotuneClient(
get_master_addr(), get_bagua_service_port()
)
return hyperparameters_service_client
@lru_cache(maxsize=None)
def get_backend(model_name: str):
backend = B.BaguaCommBackendPy(100, device_id=get_local_rank())
backend.device_id = get_local_rank()
backend.stream = torch.cuda.Stream(priority=-1)
backend.store = c10d._get_default_store()
backend.internode_communicator = init_bagua_inter_communicator(
model_name=model_name,
stream=backend.stream,
leader_rank=0,
store=backend.store,
device_id=backend.device_id,
)
backend.intranode_communicator = init_bagua_intra_communicator(
model_name=model_name,
stream=backend.stream,
store=backend.store,
device_id=backend.device_id,
)
backend.global_communicator = init_bagua_communicator(
model_name=model_name,
stream=backend.stream,
store=backend.store,
device_id=backend.device_id,
)
return backend
def run_flask_app():
from flask import Flask
autotune_service = AutotuneService(
world_size=get_world_size(),
autotune_level=env.get_autotune_level(),
max_samples=env.get_autotune_max_samples(),
sampling_confidence_time_s=env.get_autotune_sampling_confidence_time_s(),
warmup_time_s=env.get_autotune_warmup_time_s(),
is_output_autotune_log=env.get_is_output_autotune_log(),
default_bucket_size=get_default_bucket_size(),
)
app = Flask(__name__)
app = autotune_service.setup_app(app)
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
app.run(
host="0.0.0.0",
port=get_bagua_service_port(),
debug=False,
)
_autotune_server = None
def start_autotune_server():
"""Start autotune server in background."""
global _autotune_server
_autotune_server = multiprocessing.Process(target=run_flask_app)
_autotune_server.daemon = True
_autotune_server.start()
def init_process_group():
"""Initializes the PyTorch builtin distributed process group, and this will
also initialize the distributed package, should be executed before all the
APIs of bagua.
Raises:
RepeatedInitializationError: If you run this function repeatedly
Examples::
>>> import bagua.torch_api as bagua
>>> bagua.init_process_group()
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> model, optimizer = bagua_init(model, optimizer)
"""
if not dist.is_initialized():
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
) # fmt: off
if get_rank() == 0 and _autotune_server is None:
start_autotune_server()
def gen_nccl_unique_id(comm_type: str, root=0, store=None):
key = f"{comm_type}-{root}-unique_id"
if store is None:
store = c10d._get_default_store()
if get_rank() == root:
idstr = B.BaguaSingleCommunicatorPy.generate_nccl_unique_id_str()
store.set(key, idstr)
else:
idstr = store.get(key)
idstr = str(idstr, encoding="utf-8")
return idstr
def init_bagua_inter_communicator(
model_name: str, stream, leader_rank=0, store=None, device_id=None
):
if device_id is None:
device_id = get_local_rank()
nccl_unique_id = gen_nccl_unique_id(
f"bagua_inter_comm_{model_name}", root=leader_rank, store=store
)
if get_rank() % get_local_size() != leader_rank:
return None
comm = B.BaguaSingleCommunicatorPy(
rank=get_rank() // get_local_size(),
nranks=get_world_size() // get_local_size(),
device_id=device_id,
stream_ptr=stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
comm.cuda_stream = stream
logging.debug(
"init bagua internode communicator ok, global rank: %s rank: %s",
dist.get_rank(),
comm.rank(),
)
return comm
def init_bagua_intra_communicator(model_name: str, stream, store=None, device_id=None):
if device_id is None:
device_id = get_local_rank()
nccl_unique_id = gen_nccl_unique_id(
f"bagua_intra_comm_{model_name}",
root=get_rank() // get_local_size() * get_local_size(),
store=store,
)
comm = B.BaguaSingleCommunicatorPy(
rank=get_rank() % get_local_size(),
nranks=get_local_size(),
device_id=device_id,
stream_ptr=stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
comm.cuda_stream = stream
logging.debug(
"init bagua intranode communicator ok, global rank: %s rank: %s",
dist.get_rank(),
comm.rank(),
)
return comm
def init_bagua_communicator(model_name: str, stream, store=None, device_id=None):
if device_id is None:
device_id = get_local_rank()
nccl_unique_id = gen_nccl_unique_id(f"bagua_global_comm_{model_name}", store=store)
comm = B.BaguaSingleCommunicatorPy(
rank=get_rank(),
nranks=get_world_size(),
device_id=device_id,
stream_ptr=stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
comm.cuda_stream = stream
logging.debug(
"init bagua global communicator ok, global rank: %s rank: %s",
dist.get_rank(),
comm.rank(),
)
return comm
def send(tensor, dst, comm: B.BaguaSingleCommunicatorPy = None):
r"""Sends a tensor to dst synchronously.
Args:
tensor (torch.Tensor): Data to be sent.
dst (int): Destination rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator
to work on. If None, the global bagua communicator will be used.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.send(tensor.to_bagua_tensor().bagua_backend_tensor(), dst)
torch.cuda.synchronize()
def recv(tensor, src, comm: B.BaguaSingleCommunicatorPy = None):
r"""Receives a tensor synchronously.
Args:
tensor (torch.Tensor): Tensor to fill with received data.
src (int): Source rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator
to work on. If None, the global bagua communicator will be used.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.recv(tensor.to_bagua_tensor().bagua_backend_tensor(), src)
torch.cuda.synchronize()
def broadcast_coalesced(tensors, src=0, comm: B.BaguaSingleCommunicatorPy = None):
for tensor in tensors:
assert tensor.device != torch.device(
"cpu"
), "input tensors must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
coalesced = flatten(tensors)
comm.broadcast(coalesced.to_bagua_tensor().bagua_backend_tensor(), src)
for buf, synced in zip(tensors, unflatten(coalesced, tensors)):
buf.copy_(synced)
# TODO: remove
torch.cuda.synchronize()
def broadcast(tensor, src=0, comm: B.BaguaSingleCommunicatorPy = None):
r"""Broadcasts the tensor to the whole communicator.
`tensor` must have the same number of elements in all processes
participating in the collective.
Args:
tensor (torch.Tensor): Data to be sent if `root` is the rank of
current process, and tensor to be used to save received data
otherwise.
src (int, optional): Source rank. Defaults to 0.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator
to work on. If None, the global bagua communicator will be used.
Defaults to None.
""" # noqa: W293
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.broadcast(tensor.to_bagua_tensor().bagua_backend_tensor(), src)
# TODO: remove
torch.cuda.synchronize()
def reduce(
send_tensor,
recv_tensor,
dst,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
r"""Reduces the tensor across all processes.
Only the process whit rank `dst` is going to receive the final result.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have the same size of send_tensor.
dst (int): Destination rank.
op (optional): one of the values from `torch.distributed.ReduceOp`
enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
""" # noqa: W293
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
dst,
to_bagua_reduce_op(op),
)
torch.cuda.synchronize()
def reduce_inplace(
tensor, dst, op=dist.ReduceOp.SUM, comm: B.BaguaSingleCommunicatorPy = None
):
r"""The inplace version of reduce."""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), dst, to_bagua_reduce_op(op)
)
torch.cuda.synchronize()
def allreduce_coalesced_inplace(
tensors,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
for tensor in tensors:
assert tensor.device != torch.device(
"cpu"
), "input tensors must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
coalesced = flatten(tensors)
comm.allreduce_inplace(
coalesced.to_bagua_tensor("allreduce_coalesced"), to_bagua_reduce_op(op)
)
for buf, synced in zip(tensors, unflatten(coalesced, tensors)):
buf.copy_(synced)
# TODO: remove
torch.cuda.synchronize()
def allreduce(
send_tensor,
recv_tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Reduces the tensor data across all machines in such a way that all get
the final result. After the call recv_tensor is going to be bitwise identical
in all processes.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have the same size of send_tensor.
op (optional): one of the values from `torch.distributed.ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
Examples:
>>> from bagua.torch_api import allreduce
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> send_tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> recv_tensor = torch.zeros(2, dtype=torch.int64)
>>> send_tensor
tensor([1, 2]) # Rank 0
tensor([3, 4]) # Rank 1
>>> allreduce(send_tensor, recv_tensor)
>>> recv_tensor
tensor([4, 6]) # Rank 0
tensor([4, 6]) # Rank 1
>>> # All tensors below are of torch.cfloat type.
>>> # We have 2 process groups, 2 ranks.
>>> send_tensor = torch.tensor([1+1j, 2+2j], dtype=torch.cfloat) + 2 * rank * (1+1j)
>>> recv_tensor = torch.zeros(2, dtype=torch.cfloat)
>>> send_tensor
tensor([1.+1.j, 2.+2.j]) # Rank 0
tensor([3.+3.j, 4.+4.j]) # Rank 1
>>> allreduce(send_tensor, recv_tensor)
>>> recv_tensor
tensor([4.+4.j, 6.+6.j]) # Rank 0
tensor([4.+4.j, 6.+6.j]) # Rank 1
""" # noqa: E501
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allreduce(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
to_bagua_reduce_op(op),
)
# TODO: remove
torch.cuda.synchronize()
def allreduce_inplace(
tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of allreduce."""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allreduce_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), to_bagua_reduce_op(op)
)
torch.cuda.synchronize()
def allgather(
send_tensor,
recv_tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Gathers send_tensors from all machines to recv_tensor.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have size send_tensor.size()*comm.nranks.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allgather(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
)
torch.cuda.synchronize()
def allgather_inplace(
tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of allgather."""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allgather_inplace(tensor.to_bagua_tensor().bagua_backend_tensor())
torch.cuda.synchronize()
def gather(
send_tensor,
recv_tensor,
dst,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Gathers send_tensors from all machines to recv_tensor in a single process.
Args:
send_tensor (torch.Tensor): Input of the collective.
recv_tensor (torch.Tensor): Output of the collective, must have size send_tensor.size()*comm.nranks.
dst (int): Destination rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.gather(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
dst,
)
torch.cuda.synchronize()
def gather_inplace(
tensor,
count,
dst,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of gather.
Args:
tensor (torch.Tensor): Input and output of the collective, For dst process,
has size count*comm.nranks() and acts as recv_tensor above. For non-dst processes,
has size count and acts as send_tensor above.
count (int): The per-rank data count.
dst (int): Destination rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.gather_inplace(tensor.to_bagua_tensor().bagua_backend_tensor(), count, dst)
torch.cuda.synchronize()
def scatter(
send_tensor,
recv_tensor,
src,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Scatters send_tensor to all machines.
Args:
send_tensor (torch.Tensor): Input of the collective, must have size recv_tensor.size()*comm.nranks.
recv_tensor (torch.Tensor): Output of the collective.
src (int): Source rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.scatter(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
src,
)
torch.cuda.synchronize()
def scatter_inplace(
tensor,
count,
src,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of gather.
Args:
tensor (torch.Tensor): Input and output of the collective, For src process,
has size count*comm.nranks() and acts as send_tensor above. For non-src processes,
has size count and acts as recv_tensor above.
count (int): The per-rank data count.
src (int): Source rank.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert tensor.device != torch.device("cpu"), "input tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.scatter_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), count, src
)
torch.cuda.synchronize()
def reduce_scatter(
send_tensor,
recv_tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""Reduces on send_tensor, then scatters send_tensor to all machines.
Args:
send_tensor (torch.Tensor): Input of the collective, must have size recv_tensor.size()*comm.nranks.
recv_tensor (torch.Tensor): Output of the collective.
op (optional): one of the values from `torch.distributed.ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_scatter(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
to_bagua_reduce_op(op),
)
torch.cuda.synchronize()
def reduce_scatter_inplace(
tensor,
op=dist.ReduceOp.SUM,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of reduce_scatter.
Args:
send_tensor (torch.Tensor): Input and output of the collective, must satisfy: `tensor.size() % comm.nranks == 0`.
op (optional): one of the values from `torch.distributed.ReduceOp` enum. Specifies an operation used for element-wise reductions.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert tensor.device != torch.device("cpu"), "send tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.reduce_scatter_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(), to_bagua_reduce_op(op)
)
torch.cuda.synchronize()
def alltoall(
send_tensor,
recv_tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""All processes send data to all processes.
Args:
send_tensor (torch.Tensor): Input of the collective, must satisfy: `send_tensor.size() % comm.nranks == 0`.
recv_tensor (torch.Tensor): Output of the collective, must have the same size of send_tensor.
comm (B.BaguaSingleCommunicatorPy, optional): The bagua communicator to
work on. If None the global bagua communicator will be used.
Defaults to None.
"""
assert send_tensor.device != torch.device(
"cpu"
), "send tensor must be CUDA and dense"
assert recv_tensor.device != torch.device(
"cpu"
), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.alltoall(
send_tensor.to_bagua_tensor().bagua_backend_tensor(),
recv_tensor.to_bagua_tensor().bagua_backend_tensor(),
)
torch.cuda.synchronize()
def alltoall_inplace(
tensor,
comm: B.BaguaSingleCommunicatorPy = None,
):
"""The inplace version of alltoall."""
assert tensor.device != torch.device("cpu"), "recv tensor must be CUDA and dense"
if comm is None:
comm = get_backend("").global_communicator
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.alltoall_inplace(tensor.to_bagua_tensor().bagua_backend_tensor())
torch.cuda.synchronize()
```
#### File: torch_api/contrib/load_balancing_data_loader.py
```python
import torch
import math
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
from torch.utils.data.dataset import Dataset
from typing import Optional, Iterator, Callable
from collections import OrderedDict
__all__ = ["LoadBalancingDistributedSampler", "LoadBalancingDistributedBatchSampler"]
class LoadBalancingDistributedSampler(Sampler):
r"""Sampler that restricts data loading to a subset of the dataset.
This sampler use a `complexity_fn` to calculate each sample's computational
complexity and make each batch get similar computation complexity.
This is useful in scenarios like speech and NLP, where each batch has variable
length and distributed training suffers from straggler problem.
The usage is similar to :class:`torch.utils.data.DistributedSampler`, where each
process loads a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Args:
dataset: Dataset used for sampling.
complexity_fn(Callable): A function whose input is a sample and output is an integer as a
measure of the computational complexity of the sample.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`world_size` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
By default, :attr:`rank` is retrieved from the current distributed
group.
shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
indices.
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
drop_last (bool, optional): if ``True``, then the sampler will drop the
tail of the data to make it evenly divisible across the number of
replicas. If ``False``, the sampler will add extra indices to make
the data evenly divisible across the replicas. Default: ``False``.
random_level (float, optional): A float varies from ``0`` and ``1`` that controls the extent
of load balance. ``0`` means the best load balance, while ``1`` means the opposite.
.. warning::
In distributed mode, calling the :meth:`set_epoch` method at
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
is necessary to make shuffling work properly across multiple epochs. Otherwise,
the same ordering will be always used.
Example::
Define your `complexity_fn`, which accepts a dataset sample as its input and produces an integer
as the sample's computational complexity.
>>> dataset = torch.utils.data.TensorDataset(torch.randn(n, 2), torch.randperm(n))
>>> complexity_fn = lambda x: x[1]
Below is the usage of :class:`LoadBalancingDistributedSampler` and :class:`DataLoader`:
>>> sampler = bagua.torch_api.contrib.LoadBalancingDistributedSampler(
... dataset,
... complexity_fn=complexity_fn) if is_distributed else None
>>> loader = torch.utils.data.DataLoader(dataset,
... shuffle=(sampler is None),
... sampler=sampler)
>>>
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(
self,
dataset: Dataset,
complexity_fn: Callable[..., int],
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
drop_last: bool = False,
random_level: float = 0,
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError(
"Invalid rank {}, rank should be in the interval"
" [0, {}]".format(rank, num_replicas - 1)
)
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
dataset_len = len(self.dataset) # type: ignore
if self.drop_last and dataset_len % self.num_replicas != 0: # type: ignore
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
# `type:ignore` is required because Dataset cannot provide a default __len__
# see NOTE in pytorch/torch/utils/data/sampler.py
(dataset_len - self.num_replicas)
/ self.num_replicas
)
else:
self.num_samples = math.ceil(dataset_len / self.num_replicas) # type: ignore
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
self.item_complexity_map = dict()
for item_index in range(dataset_len):
self.item_complexity_map[item_index] = complexity_fn(
self.dataset[item_index]
)
self.ordered_item_complexity_map = OrderedDict(
sorted(self.item_complexity_map.items(), key=lambda t: t[1])
)
max_complexity = max(self.item_complexity_map.values())
min_complexity = min(self.item_complexity_map.values())
if random_level < 0.0 or random_level > 1.0:
raise ValueError(
"Invalid random level {}, shoule be in the range [0.0, 1.0]".format(
random_level
)
)
self.random_number = int((max_complexity - min_complexity) * random_level + 1)
def shuffle_chunks(self):
def chunks_wrap_padding(lst, n):
"""Yield successive n-sized chunks from lst."""
num_chunks = max(1, self.num_samples)
num_elements = num_chunks * n
current_lst = []
for i in range(num_elements):
current_lst.append(lst[i % len(lst)])
if len(current_lst) == n:
yield current_lst
current_lst = []
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
if self.random_number > 0:
item_complexity_map = self.item_complexity_map.copy()
complexity_random_ints = torch.randint(
self.random_number, (len(item_complexity_map),), generator=g
).tolist()
for k, v in zip(item_complexity_map, complexity_random_ints):
item_complexity_map[k] += v
ordered_item_complexity_map = OrderedDict(
sorted(item_complexity_map.items(), key=lambda t: t[1])
)
else:
ordered_item_complexity_map = self.ordered_item_complexity_map
index_chunks = list(
chunks_wrap_padding(
list(ordered_item_complexity_map.keys()), self.num_replicas
)
)
chunk_indices = torch.randperm(len(index_chunks), generator=g).tolist() # type: ignore
else:
index_chunks = list(
chunks_wrap_padding(
list(self.ordered_item_complexity_map.keys()), self.num_replicas
)
)
chunk_indices = list(range(len(index_chunks))) # type: ignore
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.num_samples - len(chunk_indices)
if padding_size <= len(chunk_indices):
chunk_indices += chunk_indices[:padding_size]
else:
chunk_indices += (
chunk_indices * math.ceil(padding_size / len(chunk_indices))
)[:padding_size]
else:
# remove tail of data to make it evenly divisible.
chunk_indices = chunk_indices[: self.num_samples]
assert len(chunk_indices) == self.num_samples
return index_chunks, chunk_indices
def __iter__(self) -> Iterator:
index_chunks, chunk_indices = self.shuffle_chunks()
# subsample
indices = [index_chunks[i][self.rank] for i in chunk_indices]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
class LoadBalancingDistributedBatchSampler(Sampler):
r"""Wraps another load balance sampler to yield variable sized mini-batches.
Args:
sampler (LoadBalancingDistributedSampler): Load balance sampler.
batch_fn (Callable): Callable to yield mini-batch indices.
drop_last (bool): If ``True``, the sampler will drop the last few batches exceeding
the least number of batches among replicas, otherwises, the number of batches
on each replica will be padded to the same.
`batch_fn` will have the signature of
``def batch_fn(indices: List[int]) -> List[List[int]]``.
Example::
>>> from bagua.torch_api.contrib import LoadBalancingDistributedSampler, \
... LoadBalancingDistributedBatchSampler
>>>
>>> sampler = LoadBalancingDistributedSampler(dataset, complexity_fn=complexity_fn)
>>> batch_sampler = LoadBalancingDistributedBatchSampler(sampler, batch_fn=batch_fn)
>>> loader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler)
>>>
>>> for epoch in range(start_epoch, n_epochs):
... batch_sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(
self,
sampler: LoadBalancingDistributedSampler,
batch_fn,
drop_last: bool = False,
) -> None:
if not isinstance(sampler, LoadBalancingDistributedSampler):
raise ValueError(
"sampler should be of LoadBalancingDistributedSampler type."
)
if sampler.drop_last:
raise ValueError("drop_last of sampler should be False")
self.sampler = sampler
self.batch_fn = batch_fn
self.drop_last = drop_last
self.num_replicas = self.sampler.num_replicas
self.rank = self.sampler.rank
self.generate_batches()
def generate_batches(self):
index_chunks, chunk_indices = self.sampler.shuffle_chunks()
batches = []
for rank in range(self.num_replicas):
sub_indices = [index_chunks[i][rank] for i in chunk_indices]
batches.append(self.batch_fn(sub_indices))
self.total_batch = (
max([len(b) for b in batches])
if not self.drop_last
else min([len(b) for b in batches])
)
# here {len(batches[self.rank]) - self.total_batch} batches dropped for
# rank {self.rank}
if self.total_batch < len(batches[self.rank]):
pass
self.padded_batches = [
batch + batch[: self.total_batch - len(batch)] for batch in batches
]
def __iter__(self):
return iter(self.padded_batches[self.rank])
def __len__(self):
return self.total_batch
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.sampler.set_epoch(epoch)
self.generate_batches()
```
#### File: bagua/torch_api/utils.py
```python
from collections import OrderedDict
import torch.distributed as dist
import torch
import math
import time
import logging
import numpy as np
from typing import Tuple, List
LOGGER = logging.getLogger(__name__)
flatten = torch._utils._flatten_dense_tensors
unflatten = torch._utils._unflatten_dense_tensors
def apply_flattened_call(bucket, call, extra_args=None):
coalesced = flatten(bucket)
if extra_args is not None:
call(coalesced, *extra_args)
else:
call(coalesced)
if call is dist.all_reduce:
coalesced /= dist.get_world_size()
for buf, synced in zip(bucket, unflatten(coalesced, bucket)):
buf.copy_(synced)
def _group_by_tensor_type(tensors):
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
return buckets
def apply_flattened_call_all(tensors, call):
"""
Apply call on a list of tensors.
"""
grouped_tensors = _group_by_tensor_type(tensors)
for tensors in grouped_tensors.values():
apply_flattened_call(tensors, call)
def align_size(size, align):
return int((size + align - 1) / align) * align
def check_contiguous(tensors):
data_ptr = None
for t in tensors:
if data_ptr is not None and t.data_ptr() != data_ptr:
return False
data_ptr = t.data_ptr() + t.numel() * t.element_size()
return True
def _get_params_flattened_aligned_size(params, align_bytes):
assert align_bytes == 1 or (
align_bytes % params[0].element_size() == 0
), "align bytes must be multiples of element size"
sizes = [p.numel() for p in params]
total_size = sum(sizes)
aligned_total_size = (
align_size(total_size * params[0].element_size(), align_bytes)
// params[0].element_size()
)
# padding to the last param
sizes[-1] += aligned_total_size - total_size
for p, sz in zip(params, sizes):
p.allocated_size = sz
return aligned_total_size
def flatten_module_params(params_list, align_bytes: int):
if len(params_list) == 0:
return
if not isinstance(params_list[0], list):
params_list = [params_list]
total_size = 0
for params in params_list:
total_size += _get_params_flattened_aligned_size(params, align_bytes)
logging.debug(
f"flatten {str(params_list[0][0].dtype).partition('.')[-1]} params aligned to {align_bytes} bytes, total numels: {total_size}"
)
flatten_weights_tensor = torch.zeros(total_size, dtype=params_list[0][0].dtype).to(
params_list[0][0].device
)
flatten_grads_tensor = torch.zeros(total_size, dtype=params_list[0][0].dtype).to(
params_list[0][0].device
)
flatten_weights_storage = flatten_weights_tensor.storage()
flatten_grads_storage = flatten_grads_tensor.storage()
def set_storage(param, weight_storage, grad_storage, storage_offset):
with torch.no_grad():
z = torch.zeros_like(param.data)
z.set_(weight_storage, storage_offset, param.shape)
param.data = z
t = torch.zeros_like(param.data)
t.set_(grad_storage, storage_offset, param.shape)
param.grad = t
offset = 0
for params in params_list:
for p in params:
# copy data
flatten_weights_tensor[offset : offset + p.numel()] = p.data.reshape(-1)
if p.grad is not None:
flatten_grads_tensor[offset : offset + p.numel()] = p.grad.data.reshape(
-1
)
else:
logging.debug(f"grad is none, {offset}")
# flatten
set_storage(p, flatten_weights_storage, flatten_grads_storage, offset)
offset += p.allocated_size
logging.debug(f"flatten param done {offset}")
# # check
for params in params_list:
weight_tensors = [p.data for p in params]
grad_tensors = [p.grad.data for p in params]
assert check_contiguous(weight_tensors)
assert check_contiguous(grad_tensors)
return new_param(flatten_weights_tensor, flatten_grads_tensor)
def collocate_params(params):
"""
`tensors` share the same storage
"""
if len(params) == 1:
return params[0]
logging.debug(f"fuse {len(params)} params")
sorted_params = sorted(params, key=lambda x: x.storage_offset())
start = None
offset = 0
for p in sorted_params:
weight = p.data
grad = p.grad.data
assert (
weight.storage_offset() == grad.storage_offset()
), "collocated weights and grads must have consistent storage offset"
if start is None:
start = offset = weight.storage_offset()
else:
assert (
offset == weight.storage_offset()
), "params collocated must be contiguous"
offset += (
p.bagua_tensor.num_elem_allocated()
if hasattr(p, "bagua_tensor")
else p.numel()
)
with torch.no_grad():
weight_tensor = torch.zeros(offset - start, dtype=params[0].dtype).to(
params[0].device
)
weight_tensor.set_(params[0].data.storage(), start, weight_tensor.shape)
grad_tensor = torch.zeros(offset - start, dtype=params[0].dtype).to(
params[0].device
)
grad_tensor.set_(params[0].grad.data.storage(), start, grad_tensor.shape)
return new_param(weight_tensor, grad_tensor)
def new_param(weight, grad):
with torch.no_grad():
p = torch.nn.Parameter(weight, requires_grad=False)
p.grad = grad
return p
def to_bagua_datatype(datatype):
if datatype == torch.float32:
return "f32"
elif datatype == torch.float16:
return "f16"
elif datatype == torch.uint8:
return "u8"
elif datatype == torch.long:
return "i64"
else:
raise ValueError(f"unsupported data type {datatype}.")
def to_bagua_reduce_op(torch_reduce_op):
if torch_reduce_op == dist.ReduceOp.SUM:
return 0
elif torch_reduce_op == dist.ReduceOp.PRODUCT:
return 1
elif torch_reduce_op == dist.ReduceOp.MIN:
return 2
elif torch_reduce_op == dist.ReduceOp.MAX:
return 3
elif torch_reduce_op == dist.ReduceOp.BOR:
return 7
elif torch_reduce_op == dist.ReduceOp.BAND:
return 8
elif torch_reduce_op == dist.ReduceOp.BXOR:
return 9
else:
raise ValueError(f"unsupported reduce op {torch_reduce_op}.")
def average_by_removing_extreme_values(raw_score_list):
score_list = np.asarray(raw_score_list)
# weed out warm up data
score_list = score_list[len(score_list) // 3 :]
def weed_out_outliers(X):
mean = np.mean(X)
std = np.std(X)
distance_from_mean = abs(X - mean)
max_deivations = 1
not_outlier = distance_from_mean < max_deivations * std
not_outliers = X[not_outlier]
if len(not_outliers) == 0:
return X
return not_outliers
score_list = weed_out_outliers(score_list)
# Repeat up to ten times
for i in range(10):
if np.std(score_list) < np.mean(score_list):
break
score_list = weed_out_outliers(score_list)
# score = np.mean(score_list) # TODO: @shjwudp check whether these are still needed
# std = np.std(score_list)
return np.mean(score_list), np.std(score_list), score_list.tolist()
class StatisticalAverage:
def __init__(
self,
last_update_time: float = time.time(),
records: List[float] = [],
record_tail: Tuple[float, float] = (0.0, 0.0), # [tail_len, tail_val]
) -> None:
"""Track and record the average over a period of time.
Args:
last_update_time (float, optional): last update time.
Defaults to time.time().
records (List[float], optional): statistical average value from
`last_update_time`, records[i] is the average value from
last_update_time to last_update_time + 2 ^ i (unit: seconds).
Defaults to [].
tail (Tuple[float, float], optional): tail of record, first one
is tail length (unit: seconds), second one is tail average
value. Defaults to (0., 0.).
"""
self.last_update_time: float = last_update_time
self.records: List[float] = records
self.record_tail: Tuple[float, float] = record_tail
def record_seconds(self) -> float:
return 2.0 ** (len(self.records) - 1) if len(self.records) != 0 else 0.0
def total_recording_time(self) -> float:
(tail_seconds, _) = self.record_tail
return self.record_seconds() + tail_seconds
def get_records_mean(self, last_n_seconds: float) -> float:
if last_n_seconds <= 0.0:
return 0.0
records_seconds = self.record_seconds()
(tail_seconds, tail_mean) = self.record_tail
if len(self.records) == 0:
return tail_mean
if last_n_seconds < 1.0:
return self.records[0]
if last_n_seconds <= records_seconds:
floor_id = max(0, math.floor(math.log(last_n_seconds, 2.0)))
floor_time = 2.0 ** floor_id
if floor_id + 1 < len(self.records):
a, b = self.records[floor_id], self.records[floor_id + 1]
a_l, b_l = floor_time, floor_time * 2.0
mean = a + (b - a) * (last_n_seconds - a_l) / (b_l - a_l)
else:
mean = self.records[floor_id]
elif last_n_seconds <= records_seconds + tail_seconds:
a, b = self.records[-1], tail_mean
a_l, b_l = records_seconds, records_seconds + tail_seconds
mean = a + (b - a) * (last_n_seconds - a_l) / (b_l - a_l)
else:
mean = tail_mean
return mean
def record(self, val: float):
now = time.time()
time_dist: float = now - self.last_update_time
new_records: List[float] = []
new_tail: Tuple[float, float] = (0.0, 0.0)
for i in range(64):
coverage_period = 2.0 ** i
if coverage_period <= time_dist:
new_records.append(val)
elif coverage_period <= time_dist + self.total_recording_time():
record_contribution_percentage = time_dist / coverage_period
new_val = val * record_contribution_percentage + self.get_records_mean(
coverage_period - time_dist
) * (1.0 - record_contribution_percentage)
new_records.append(new_val)
if coverage_period > time_dist + self.total_recording_time():
break
else:
new_total_time = time_dist + self.total_recording_time()
report_contribution_percentage = time_dist / new_total_time
tail_len = new_total_time - 2.0 ** (len(new_records) - 1)
tail_val = val * report_contribution_percentage + self.get_records_mean(
self.total_recording_time()
) * (1.0 - report_contribution_percentage)
new_tail = (tail_len, tail_val)
break
self.last_update_time = now
self.records = new_records
self.record_tail = new_tail
def get(self, last_n_seconds: float) -> float:
time_dist = time.time() - self.last_update_time
if last_n_seconds <= time_dist:
if len(self.records) != 0:
return self.records[0]
else:
(tail_mean, _) = self.record_tail
return tail_mean
return self.get_records_mean(last_n_seconds - time_dist)
def __str__(self) -> str:
return str(
{
"last_update_time": self.last_update_time,
"records": self.records,
"record_tail": self.record_tail,
}
)
```
#### File: tests/internal/compressor.py
```python
import torch
class MinMaxUInt8:
def __init__(self):
self.eps = 1e-7
self.quantization_level = 255.0
def compress(self, tensor: torch.Tensor) -> (torch.Tensor, torch.Tensor):
_min = torch.min(tensor)
_max = torch.max(tensor)
scale = self.quantization_level / (_max - _min + self.eps)
upper_bound = torch.round(_max * scale)
lower_bound = upper_bound - self.quantization_level
level = torch.round(tensor * scale)
level = torch.clamp(level, max=upper_bound)
_minmax = torch.zeros(2, dtype=tensor.dtype, device=tensor.device)
_minmax[0] = _min
_minmax[1] = _max
return _minmax, (level - lower_bound).to(torch.uint8)
def decompress(
self, _minmax: torch.Tensor, compressed: torch.Tensor
) -> torch.Tensor:
_min = _minmax[0]
_max = _minmax[1]
scale = self.quantization_level / (_max - _min + self.eps)
upper_bound = torch.round(_max * scale)
lower_bound = upper_bound - self.quantization_level
return (compressed.float() + lower_bound) / scale
if __name__ == "__main__":
x = torch.rand(100).cuda()
_minmax, compressed = MinMaxUInt8().compress(x)
decompressed = MinMaxUInt8().decompress(_minmax, compressed)
diff = x - decompressed
print(f"{diff}, {torch.norm(diff)}")
```
#### File: tests/torch_api/test_decentralized.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from tests.internal.common_utils import find_free_port
import unittest
import torch.multiprocessing as mp
import os
from bagua.torch_api.utils import flatten, unflatten
import bagua.torch_api as bagua
N_EPOCHS = 10
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=True)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
def _init_env(rank):
# set deterministic
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(rank)
# initialize subprocess env
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
def run_model(
rank, nprocs, hierarchical, peer_selection_mode, communication_interval, results
):
_init_env(rank)
# init bagua distributed process group
torch.cuda.set_device(rank)
bagua.init_process_group()
# construct model and optimizer, etc.
model = Net().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
loss_fn = nn.MSELoss()
# wrap model
model = model.with_bagua(
[optimizer],
bagua.algorithms.decentralized.DecentralizedAlgorithm(
hierarchical=hierarchical,
peer_selection_mode=peer_selection_mode,
communication_interval=communication_interval,
),
)
ret = results[rank]
ret.init_weight.copy_(flatten([param.data for param in model.parameters()]))
for epoch in range(N_EPOCHS):
data = torch.randn(4, 2).cuda()
target = torch.randn(4, 4).cuda()
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
ret.bucket_weight.copy_(model.bagua_buckets[0]._peer_weight)
def run_torch_model(
rank,
nprocs,
hierarchical,
peer_selection_mode,
communication_interval,
results,
backend,
):
_init_env(rank)
# init torch distributed process group
torch.cuda.set_device(rank)
store = torch.distributed.FileStore("/tmp/filestore", nprocs)
torch.distributed.init_process_group(
world_size=nprocs, rank=rank, store=store, backend=backend
)
# construct model and optimizer, etc.
model = Net().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
loss_fn = nn.MSELoss()
# wrap model
model = DecentralizedAlgor(
model, optimizer, hierarchical, peer_selection_mode, communication_interval
)
ret = results[rank]
ret.init_weight.copy_(flatten([param.data for param in model.parameters()]))
for epoch in range(N_EPOCHS):
data = torch.randn(4, 2).cuda()
target = torch.randn(4, 4).cuda()
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
model.step()
ret.bucket_weight.copy_(model.peer_weight)
class Result(object):
def __init__(self):
model = Net()
self.init_weight = flatten(
[torch.zeros_like(param.data) for param in model.parameters()]
)
self.bucket_weight = flatten(
[torch.zeros_like(param.data) for param in model.parameters()]
)
class DecentralizedAlgor(nn.Module):
def __init__(
self,
module,
optimizer,
hierarchical,
peer_selection_mode,
communication_interval,
):
super(DecentralizedAlgor, self).__init__()
self.module = module
self.optimizer = optimizer
self.hierarchical = hierarchical
self.peer_selection_mode = peer_selection_mode
self.communication_interval = communication_interval
self.step_count = 0
assert torch.distributed.is_initialized()
self.rank = torch.distributed.get_rank()
self.world_size = torch.distributed.get_world_size()
# broadcast parameters
for param in self.module.parameters():
torch.distributed.broadcast(param.data, src=0)
def _build_params(self):
return [param.data for param in list(self.module.parameters()).__reversed__()]
def communicate_with_peer(self):
if self.peer_selection_mode == "all":
torch.distributed.all_reduce(self.peer_weight)
self.peer_weight /= self.world_size
elif self.peer_selection_mode == "shift_one":
peer_rank = get_peer_rank(
self.peer_selection_mode,
self.rank,
self.world_size,
self.step_count,
self.communication_interval,
)
weight = self.weight.cpu()
peer_weight = self.peer_weight.cpu()
requests = []
requests.append(torch.distributed.isend(weight, peer_rank))
requests.append(torch.distributed.irecv(peer_weight, peer_rank))
for req in requests:
req.wait()
self.peer_weight = peer_weight.cuda()
self.weight = weight.cuda()
self.peer_weight += self.weight
self.peer_weight /= 2
else:
raise ValueError("Unsupported `peer_selection_mode`")
def _should_communicate(self):
return self.step_count % self.communication_interval == 0
def forward(self, *inputs, **kwargs):
if self._should_communicate():
self.weight = flatten(self._build_params())
self.peer_weight = flatten(self._build_params())
self.communicate_with_peer()
result = self.module(*inputs, **kwargs)
return result
def step(self):
if self._should_communicate():
params = self._build_params()
for buf, synced in zip(params, unflatten(self.peer_weight, params)):
buf.copy_(synced)
self.optimizer.step()
self.step_count += 1
def get_peer_rank(peer_selection_mode, rank, nranks, step, communication_interval):
comm_step = step // communication_interval
if peer_selection_mode == "shift_one":
if rank < nranks // 2:
return ((comm_step + rank) % ((nranks + 1) // 2)) + (nranks // 2)
else:
return (rank - (nranks // 2) - comm_step) % (nranks // 2)
else:
ValueError("Unsupported `peer_selection_mode`")
class TestLowPrecisionDecentralized(unittest.TestCase):
def run_test_locally(
self, nprocs, hierarchical, peer_selection_mode, communication_interval
):
if not torch.cuda.is_available():
print("skip tests since cuda is not available")
return
nprocs = torch.cuda.device_count()
os.environ["WORLD_SIZE"] = str(nprocs)
os.environ["LOCAL_WORLD_SIZE"] = str(nprocs)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(find_free_port())
os.environ["BAGUA_SERVICE_PORT"] = str(find_free_port())
results = [Result() for _ in range(nprocs)]
mp.spawn(
run_model,
nprocs=nprocs,
args=(
nprocs,
hierarchical,
peer_selection_mode,
communication_interval,
results,
),
)
for rank in range(nprocs):
if peer_selection_mode == "all":
peer_rank = (rank + 1) % nprocs
# all workers have equal weights
self.assertTrue(
torch.equal(
results[rank].bucket_weight,
results[peer_rank].bucket_weight,
)
)
elif peer_selection_mode == "shift_one":
peer_rank = get_peer_rank(
peer_selection_mode,
rank,
nprocs,
N_EPOCHS - 1,
communication_interval,
)
self.assertTrue(
torch.equal(
results[rank].bucket_weight, results[peer_rank].bucket_weight
)
)
else:
raise ValueError("illegal `peer_selection_mode`!")
def run_diff_locally(
self, nprocs, hierarchical, peer_selection_mode, communication_interval, backend
):
if not torch.cuda.is_available():
print("skip tests since cuda is not available")
return
os.environ["WORLD_SIZE"] = str(nprocs)
os.environ["LOCAL_WORLD_SIZE"] = str(nprocs)
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(find_free_port())
os.environ["BAGUA_SERVICE_PORT"] = str(find_free_port())
torch_results = [Result() for _ in range(nprocs)]
mp.spawn(
run_torch_model,
nprocs=nprocs,
args=(
nprocs,
hierarchical,
peer_selection_mode,
communication_interval,
torch_results,
backend,
),
)
bagua_results = [Result() for _ in range(nprocs)]
mp.spawn(
run_model,
nprocs=nprocs,
args=(
nprocs,
hierarchical,
peer_selection_mode,
communication_interval,
bagua_results,
),
)
for rank in range(nprocs):
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].init_weight,
torch_results[rank].init_weight,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].bucket_weight,
torch_results[rank].bucket_weight,
)
).item()
)
def test_algorithm(self):
self.run_test_locally(
nprocs=8,
hierarchical=False,
peer_selection_mode="all",
communication_interval=1,
)
self.run_test_locally(
nprocs=8,
hierarchical=False,
peer_selection_mode="shift_one",
communication_interval=1,
)
self.run_test_locally(
nprocs=8,
hierarchical=False,
peer_selection_mode="shift_one",
communication_interval=2,
)
def test_compare(self):
self.run_diff_locally(
nprocs=8,
hierarchical=False,
peer_selection_mode="all",
communication_interval=1,
backend="gloo",
)
self.run_diff_locally(
nprocs=8,
hierarchical=False,
peer_selection_mode="shift_one",
communication_interval=1,
backend="gloo",
)
self.run_diff_locally(
nprocs=8,
hierarchical=False,
peer_selection_mode="shift_one",
communication_interval=2,
backend="gloo",
)
if __name__ == "__main__":
unittest.main()
```
#### File: .test_to_migrate/qsgd/utils.py
```python
from torch.distributions.bernoulli import Bernoulli
import torch
import cupy
from bagua.torch_api.utils import torch2cupy, cupy2torch
def qsgd_compress(t, pnorm="inf", quan_bits=8):
"""
Quantize float32 into uint8 using QSGD algorithm.
Arguments:
* `t` - Input PyTorch CUDA Tensor to be quantized.
* `pnorm` - Order of norm used for QSGD compression. Default value is `inf`.
* `quan_bits` - Number of quantization bits used for QSGD compression. Default
value is `8`. Other values like `1`,`2`,`4` are not supported at present.
Yields:
(Torch.tensor, Torch.tensor, Torch.tensor): Quantized PyTorch CUDA Tensors.
Examples:
```python
cupy_stream = cupy.cuda.ExternalStream(torch.cuda.current_stream().cuda_stream)
cupy_stream.use()
norm, sign, compressed_ints = qsgd_compress(tensor)
```
..note::
CuPy and PyTorch use different default CUDA streams. Force CuPy to use PyTorch
current CUDA stream to simplify stream synchronization.
"""
_quantization_level = (1 << quan_bits) - 1
norm = t.norm(float(pnorm))
sign = (torch.sign(t) + 1.0).bool()
_level = t / norm * _quantization_level
_bernoulli_probs = _level - _level.floor()
_incr = Bernoulli(probs=_bernoulli_probs).sample()
_compressed_floats = (_level.floor() + _incr.float()).abs()
compressed_floats = torch.clamp(_compressed_floats, max=_quantization_level)
compressed_ints = compressed_floats.byte()
packed_sign = _cupy_packbits(sign)
return norm, packed_sign, compressed_ints
def qsgd_decompress(norm, packed_sign, compressed_ints, quan_bits=8):
"""
The reverse of the ``qsgd_compress`` function.
Arguments:
* `norm` - Order of norm used in QSGD compression.
* `packed_sign` - Packed sign of quantized tensor.
* `compressed_ints` - Absolute value of quantized tensor.
* `quan_bits` - Number of quantization bits. Default value is 8. Other
values like `1`,`2`,`4` is not supported at present.
Yields:
Torch.tensor: De-quantized tensor.
Examples:
```python
cupy_stream = cupy.cuda.ExternalStream(torch.cuda.current_stream().cuda_stream)
cupy_stream.use()
tensor = qsgd_compress(norm, sign, compressed_ints)
```
..note::
CuPy and PyTorch use different default CUDA streams. Force CuPy to use PyTorch
current CUDA stream to simplify stream synchronization.
"""
compressed_floats = compressed_ints.float()
_quantization_level = (1 << quan_bits) - 1
sign = _cupy_unpackbits(packed_sign, compressed_floats.size(0))
return norm * (sign.float() * 2 - 1) * compressed_floats / _quantization_level
def _cupy_packbits(tensor):
cupy_tensor = torch2cupy(tensor)
packed_cupy_tensor = cupy.packbits(cupy_tensor)
packed_tensor = cupy2torch(packed_cupy_tensor)
return packed_tensor
def _cupy_unpackbits(tensor, size):
cupy_tensor = torch2cupy(tensor)
unpacked_cupy_tensor = cupy.unpackbits(cupy_tensor)[0:size]
tensor = cupy2torch(unpacked_cupy_tensor)
return tensor
``` |
{
"source": "JLivingston01/JasonvsData",
"score": 3
} |
#### File: JLivingston01/JasonvsData/correlation functions.py
```python
def rsquaredfn(X,Y):
import pandas as pd
import numpy as np
R = pd.DataFrame()
R['X'] = X
R['Y'] = Y
MeanX = np.mean(R['X'])
MeanY = np.mean(R['X'])
R['A'] = R['X'] - MeanX
R['B'] = R['Y'] - MeanY
R['AXB'] = R['A']*R['B']
R['A2'] = R['A']*R['A']
R['B2'] = R['B']*R['B']
AXB = np.sum(R['AXB'])
A2 = np.sum(R['A2'])
B2 = np.sum(R['B2'])
correl = AXB/np.sqrt(A2*B2)
rsquared = correl*correl
return(rsquared)
## After importing an SKlearn predictive model, Returning adjusted R-Squared. Intended for regression problems.
## Can be adapted to return model coefficients/projections along with R-Squared
def adjustedrsquaredfn(model, training_data, target_data):
import pandas as pd
import numpy as np
lm = model()
lm.fit(training_data,target_data)
data = pd.DataFrame()
data['Target'] = target_data
data['Fit'] = lm.predict(training_data)
MeanX = np.mean(data['Target'])
MeanY = np.mean(data['Fit'])
data['A'] = data['Target'] - MeanX
data['B'] = data['Fit'] - MeanY
data['AXB'] = data['A']*data['B']
data['A2'] = data['A']*data['A']
data['B2'] = data['B']*data['B']
AXB = np.sum(data['AXB'])
A2 = np.sum(data['A2'])
B2 = np.sum(data['B2'])
correl = AXB/np.sqrt(A2*B2)
rsquared = correl*correl
adjustedrsquared = 1-((1-rsquared)*(len(data['Target'])-1)/((len(data['Target'])-1)-len(list(training_data.columns.values))-1))
return(adjustedrsquared)
```
#### File: JLivingston01/JasonvsData/data_to_string.py
```python
class data_to_string:
def df_to_str(X):
string1 = []
for j in range(len(X[list(X.columns.values)[0]])):
for i in list(X.columns.values):
y = str(X[i][j])
string = i+"="+y
amp = "&"
string1.append(string)
if i != list(X.columns.values)[len(list(X.columns.values))-1]:
string1.append(amp)
lin = "\n"
if j != len(X[list(X.columns.values)[0]])-1:
string1.append(lin)
str1 = "".join(string1)
return(str1)
def df_to_html(X):
string = ['<table><tr>']
for j in list(X.columns.values):
x = '<th>'+j+'</th>'
string.append(x)
string.append('</tr>')
for i in range(len(X[list(X.columns.values)[0]])):
string.append('<tr>')
for k in list(X.columns.values):
x = str(X[k][i])
y = '<td>'+x+'</td>'
string.append(y)
string.append('</tr>')
string.append('</table>')
string1 = "".join(string)
return(string1)
def df_to_list_of_lists(df):
df2 = []
for i in range(len(df[list(df.columns.values)[0]])):
templist = []
for col in list(df.columns.values):
x = df[col][i]
templist.append(x)
df2.append(templist)
return df2
##transform data string in form 'category equal element elem_space lin_space' into Pandas DF
##example of query string 'date=2017-01-01&a=8&b=5&c=6\ndate=2017-01-02&a=3&b=3&c=0'
##casting would be manual after application
class strings_to_df:
def query_str_to_tbl(X,lin_space,equal,elem_space):
import pandas as pd
import numpy as np
rows = X.split(lin_space)
fields = []
data = []
for i in range(len(rows)):
row_n = rows[i].replace(equal+elem_space,equal+"nan"+elem_space)
row_n = rows[i].replace(equal,",")
row_n = row_n.replace(elem_space,",")
row1_sep = row_n.split(",")
for j in range(len(row1_sep)):
if i == 0:
if j % 2 == 0:
fields.append(row1_sep[j])
else:
data.append(row1_sep[j])
else:
if j % 2 != 0:
data.append(row1_sep[j])
data1 = pd.DataFrame()
for i in range(len(fields)):
tempfields = []
for j in range(len(rows)):
tempfields.append(data[i+j*len(fields)])
data1[fields[i]] = tempfields
data1 = data1.replace("nan",np.NaN)
return(data1)
``` |
{
"source": "JLivingston01/py_research",
"score": 3
} |
#### File: py_research/scripts/football_analysis.py
```python
import pandas as pd
import os
import numpy as np
files = os.listdir()
files = [i for i in files if ("Premier_League" in i)&("_b" in i)]
dat = pd.DataFrame()
for i in files:
temp = pd.read_csv(i)
dat = dat.append(temp)
dat['date'] = pd.to_datetime(dat['date'])
dat.sort_values(by=['date'],ascending=True,inplace=True)
dat.reset_index(inplace=True,drop=True)
dat['homewin']=np.where(dat['team_home_90min_goals']>dat['team_away_90min_goals'],1,0)
dat['awaywin']=np.where(dat['team_home_90min_goals']<dat['team_away_90min_goals'],1,0)
dat['draw']=np.where(dat['team_home_90min_goals']==dat['team_away_90min_goals'],1,0)
seasons = dat['seasonName'].unique()
def process_seasons(season_name,dat):
teststr = """
home_record = dat[dat['seasonName'].str.contains("1999/2000")].groupby(
['homeName'])[['team_home_90min_goals','team_away_90min_goals','homewin','awaywin','draw']].cumsum()
home_record['team'] = dat['homeName']
home_record['date'] = dat['date']
home_record.columns=['gf','ga','wins','losses','draws','team','date']
away_record = dat[dat['seasonName'].str.contains("1999/2000")].groupby(
['awayName'])[['team_away_90min_goals','team_home_90min_goals','awaywin','homewin','draw']].cumsum()
away_record['team'] = dat['awayName']
away_record['date'] = dat['date']
away_record.columns=['gf','ga','wins','losses','draws','team','date']
season_name = seasons[0]
"""
home_game = dat[dat['seasonName']==season_name][
['homeName','date','team_home_90min_goals','team_away_90min_goals','homewin','awaywin','draw']].copy()
home_game.columns=['team','date','gf','ga','wins','losses','draws']
away_game = dat[dat['seasonName']==season_name][
['awayName','date','team_away_90min_goals','team_home_90min_goals','awaywin','homewin','draw']].copy()
away_game.columns=['team','date','gf','ga','wins','losses','draws']
season = home_game.append(away_game)
season.sort_values(by='date',ascending=True,inplace=True)
season.reset_index(inplace=True,drop=True)
form = season.groupby(['team'])[['wins','losses','draws']].rolling(window=5,min_periods=0).sum().reset_index()
form.set_index('level_1',inplace=True)
form.sort_values(by='level_1',inplace=True)
form['l5_points'] = form['wins']*3+form['draws']
cumeseason = season.groupby(['team'])[['gf','ga','wins','losses','draws']].cumsum()
cumeseason['l5_wins'] = form['wins']
cumeseason['l5_losses'] = form['losses']
cumeseason['l5_draws'] = form['draws']
cumeseason['l5_points'] = form['l5_points']
cumeseason['team']=season['team']
cumeseason['points'] = 3*cumeseason['wins']+1*cumeseason['draws']
cumeseason['round'] = cumeseason.groupby(['team']).cumcount()+1
cumeseason['season'] = season_name
check = """
cumeseason[cumeseason['team']=='Liverpool']
dat[(dat['seasonName']==season_name)&
((dat['homeName']=='Liverpool')|(dat['awayName']=='Liverpool'))].reset_index()
dat[(dat['seasonName']==season_name)&
((dat['round']==28))].reset_index()
season[season['team']=='Liverpool'].reset_index()
home_game[home_game['team']=='Liverpool'].reset_index()
away_game[away_game['team']=='Liverpool'].reset_index()"""
return cumeseason
season_name = seasons[-2]
cumeseasons = pd.DataFrame()
for season_name in seasons:
temp = process_seasons(season_name,dat)
cumeseasons=cumeseasons.append(temp)
cumeseasons['gd']=cumeseasons['gf']-cumeseasons['ga']
cumeseasons.sort_values(by=['points','gd'],ascending=[False,False],inplace=True)
cumeseasons['place']=cumeseasons.groupby(['season','round']).cumcount()+1
cumeseasons['relegation'] = np.where(cumeseasons['place']>17,1,0)
cumeseasons[(cumeseasons['team']=='Liverpool')&(cumeseasons['season'].str.contains("2019/2020"))]
cumeseasons[(cumeseasons['round']==25)&
(cumeseasons['season'].str.contains("2019/2020"))]
cumeseasons[(cumeseasons['round']==38)&
(cumeseasons['season'].str.contains("2019/2020"))]
dat[(dat['seasonName']=='Premier League - 2019/2020')&
((dat['homeName']=='Liverpool')|(dat['awayName']=='Liverpool'))]
week14=cumeseasons[cumeseasons['round']==14].copy()
week10=cumeseasons[cumeseasons['round']==10].copy()
week33=cumeseasons[cumeseasons['round']==33].copy()
week14['key']=week14['team']+" "+week14['season']
week10['key']=week10['team']+" "+week10['season']
week33['key']=week33['team']+" "+week33['season']
week10=week10[['key','gf','ga','gd','points','place','wins','losses','draws','l5_wins','l5_losses','l5_draws','l5_points']]
week14=week14[['key','gf','ga','gd','points','place','wins','losses','draws','l5_wins','l5_losses','l5_draws','l5_points']]
week33=week33[['key','relegation']]
week10.columns = ['key']+['week_10_'+i for i in week10.columns if i!='key']
week14.columns = ['key']+['week_14_'+i for i in week14.columns if i!='key']
M = week10.merge(week14,on=['key'],how='left')
M=M.merge(week33,on=['key'],how='left')
M['int']=1
val = M[
(M['key'].str.contains('2019'))|
(M['key'].str.contains('2018'))#|
# (M['key'].str.contains('2017'))
].copy()
train = M[~(
(M['key'].str.contains('2019'))|
(M['key'].str.contains('2018'))#|
# (M['key'].str.contains('2017'))
)].copy()
train=train[~train['key'].str.contains('2020')].copy()
ars = M[(M['key'].str.contains('Arsenal'))&(M.key.str.contains("2020/2021"))].copy()
np.mean(M[(M['week_14_l5_wins']==0)&
(M['week_14_l5_points']<=1)&
(M['week_14_points']<=14)&
(~M['key'].str.contains('2021'))]['relegation'])
val.corr()['relegation'].sort_values()
features=['week_10_gf', 'week_10_ga', 'week_10_gd', 'week_10_points',
'week_10_place', 'week_10_wins', 'week_10_losses', 'week_10_draws',
'week_10_l5_wins', 'week_10_l5_losses', 'week_10_l5_draws',
'week_10_l5_points', 'week_14_gf', 'week_14_ga', 'week_14_gd',
'week_14_points', 'week_14_place', 'week_14_wins',
'week_14_losses', 'week_14_draws', 'week_14_l5_wins',
'week_14_l5_losses', 'week_14_l5_draws', 'week_14_l5_points','int']
features=['week_10_l5_losses','week_14_place','week_10_place','week_14_losses','week_10_losses','week_10_ga','int']
kpi = 'relegation'
xt=train[features].copy()
xv=val[features].copy()
xtest = ars[features].copy()
yt = train[kpi]
yv = val[kpi]
yt_odds = yt/(1-yt)
yt_odds=np.where(yt_odds>=1,1e6,1e-6)
yt_log_odds = np.log(yt_odds)
yv_odds = yv/(1-yv)
yv_odds=np.where(yv_odds>=1,1e6,1e-6)
yv_log_odds = np.log(yv_odds)
#logistic
coefs = np.linalg.pinv(xt.T@xt)@(xt.T@yt_log_odds)
yt_log_odds_pred = xt@coefs
yv_log_odds_pred = xv@coefs
ytest_log_odds_pred = xtest@coefs
yt_odds_pred=np.exp(yt_log_odds_pred)
yv_odds_pred=np.exp(yv_log_odds_pred)
ytest_odds_pred=np.exp(ytest_log_odds_pred)
yt_pred = yt_odds_pred/(1+yt_odds_pred)
yv_pred = yv_odds_pred/(1+yv_odds_pred)
ytest_pred = ytest_odds_pred/(1+ytest_odds_pred)
plt.hist(yt_pred)
resultsv = pd.DataFrame()
resultsv['y_pred']=np.where(yv_pred>.33,1,0)
resultsv['y']=yv.values
resultst = pd.DataFrame()
resultst['y_pred']=np.where(yt_pred>.33,1,0)
resultst['y']=yt.values
resultst['accuracy']=np.where((resultst['y']==0)&(resultst['y_pred']==0),'tn',
np.where((resultst['y']==1)&(resultst['y_pred']==1),'tp',
np.where((resultst['y']==0)&(resultst['y_pred']==1),'fp',
np.where((resultst['y']==1)&(resultst['y_pred']==0),'fn','other'))))
resultsv['accuracy']=np.where((resultsv['y']==0)&(resultsv['y_pred']==0),'tn',
np.where((resultsv['y']==1)&(resultsv['y_pred']==1),'tp',
np.where((resultsv['y']==0)&(resultsv['y_pred']==1),'fp',
np.where((resultsv['y']==1)&(resultsv['y_pred']==0),'fn','other'))))
resultst.groupby(['accuracy']).count()
resultsv.groupby(['accuracy']).count()
val['pred']=yv_pred
val['pred']=np.where(val['pred']>=.33,1,0)
val[val['pred']==1]
ars
#sklearn logistic
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(fit_intercept=False).fit(xt,yt)
yv_pred = lr.predict(xv)
yt_pred = lr.predict(xt)
resultst = pd.DataFrame()
resultst['y_pred']=yt_pred
resultst['y']=yt.values
resultst['accuracy']=np.where((resultst['y']==0)&(resultst['y_pred']==0),'tn',
np.where((resultst['y']==1)&(resultst['y_pred']==1),'tp',
np.where((resultst['y']==0)&(resultst['y_pred']==1),'fp',
np.where((resultst['y']==1)&(resultst['y_pred']==0),'fn','other'))))
resultst.groupby(['accuracy']).count()
resultsv = pd.DataFrame()
resultsv['y_pred']=yv_pred
resultsv['y']=yv.values
resultsv['accuracy']=np.where((resultsv['y']==0)&(resultsv['y_pred']==0),'tn',
np.where((resultsv['y']==1)&(resultsv['y_pred']==1),'tp',
np.where((resultsv['y']==0)&(resultsv['y_pred']==1),'fp',
np.where((resultsv['y']==1)&(resultsv['y_pred']==0),'fn','other'))))
resultsv.groupby(['accuracy']).count()
#Tree
from sklearn.tree import DecisionTreeRegressor
rfc = DecisionTreeRegressor().fit(xt,yt)
yv_pred = rfc.predict(xv)
yt_pred = rfc.predict(xt)
ytest_pred = rfc.predict(xtest)
resultst = pd.DataFrame()
resultst['y_pred']=yt_pred
resultst['y']=yt.values
resultst['accuracy']=np.where((resultst['y']==0)&(resultst['y_pred']==0),'tn',
np.where((resultst['y']==1)&(resultst['y_pred']==1),'tp',
np.where((resultst['y']==0)&(resultst['y_pred']==1),'fp',
np.where((resultst['y']==1)&(resultst['y_pred']==0),'fn','other'))))
resultst.groupby(['accuracy']).count()
resultsv = pd.DataFrame()
resultsv['y_pred']=yv_pred
resultsv['y']=yv.values
resultsv['accuracy']=np.where((resultsv['y']==0)&(resultsv['y_pred']==0),'tn',
np.where((resultsv['y']==1)&(resultsv['y_pred']==1),'tp',
np.where((resultsv['y']==0)&(resultsv['y_pred']==1),'fp',
np.where((resultsv['y']==1)&(resultsv['y_pred']==0),'fn','other'))))
resultsv.groupby(['accuracy']).count()
importance = pd.DataFrame()
importance['imp'] = rfc.feature_importances_
importance['features'] = features
importance.sort_values(by='imp',ascending=False)
val['pred']=yv_pred
val[val['pred']==1]
ars
#linear
coefs = np.linalg.pinv(xt.T@xt)@(xt.T@yt)
yt_pred = xt@coefs
yv_pred = xv@coefs
ytest_pred = xtest@coefs
val['pred2']=yv_pred
val[val['pred']==1]
import matplotlib.pyplot as plt
plt.scatter(yt,yt_pred)
plt.scatter(yv,yv_pred)
```
#### File: py_research/scripts/moving_agent.py
```python
import numpy as np
import matplotlib.pyplot as plt
class agent:
"""
Agent has X and Y position, and fuel energy.
Fuel enables the agent to move up to manhattan(1,1) to find more fuel.
"""
def __init__(self,x,y,fuel,eats):
self.x=x
self.y=y
self.fuel=fuel
self.reach=.75
self.eats=eats
def get_posit(self):
return(np.array([self.x,self.y]))
def set_posit(self,x,y):
self.x=x
self.y=y
def get_fuel(self):
return(self.fuel)
def set_fuel(self,fuel):
self.fuel=fuel
def breathe(self,space):
d=np.sum(abs(space[:,1:]-agent1.get_posit()),axis=1)
spacetemp=np.column_stack((space,d))
spacetemp=spacetemp[spacetemp[:,3].argsort()]
if len(spacetemp[(spacetemp[:,0]==self.eats)&
(spacetemp[:,3]<=self.reach)][:,0])>=1:
found=len(spacetemp[(spacetemp[:,0]==self.eats)&
(spacetemp[:,3]<=self.reach)][:,0])
self.fuel=found
spacetemp[:,0]=np.where((spacetemp[:,0]==self.eats)&
(spacetemp[:,3]<=self.reach),1-self.eats,spacetemp[:,0])
self.move()
else:
print("No Fuel in range")
return spacetemp[:,0:3]
def move(self):
if self.fuel >0:
self.fuel -= 1
posit=self.get_posit()
delta=np.random.uniform(-1,1,2)
self.set_posit(posit[0]+delta[0],posit[1]+delta[1])
else:
print("No Fuel: Can't Move")
a=np.random.binomial(1,.5,size=100)
b=np.random.normal(5,2.5,size=(100,2))
space=np.column_stack((a,b))
plt.scatter(space[:,1],space[:,2],color=np.where(space[:,0]==1,'red','blue'))
plt.show()
init_posit1=np.random.normal(5,1.5,size=2)
agent1=agent(init_posit1[0],init_posit1[1],2,1)
init_posit2=np.random.normal(5,1.5,size=2)
agent2=agent(init_posit2[0],init_posit2[1],2,0)
space=agent1.breathe(space)
agent1.get_fuel()
space=agent2.breathe(space)
agent2.get_fuel()
```
#### File: py_research/scripts/nn.py
```python
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/np.float64(1+np.exp(-x))
def createweights(s):
layers=len(s)
layer=0
weights=[]
while layer<layers-1:
w=np.random.normal(0,.05,(s[layer],s[layer+1]))
weights.append(w)
layer=layer+1
return weights
def createbias(s):
layers=len(s)
layer=0
bias=[]
while layer<layers-1:
w=np.random.normal(0,.05,(s[layer+1]))
bias.append(w)
layer=layer+1
return bias
def predict(train,weights,bias,s):
layers=len(s)
layer=0
predict_on=[train]
while layer<layers-1:
pred=sigmoid(predict_on[layer]@weights[layer]+bias[layer])
predict_on.append(pred)
layer=layer+1
return predict_on
def backprop(predict_on,y, weights,bias, s,lr=.01):
layers=len(s)
layer=layers-1
error=predict_on[layer]-y
while layer>0:
inn=predict_on[layer-1]
outt=predict_on[layer]
eoo=error*outt*(1-outt)
gradw=inn.T@eoo
gradb=eoo
weights[layer-1]=weights[layer-1]-lr*gradw.reshape(weights[layer-1].shape)
bias[layer-1]=bias[layer-1]-lr*np.sum(gradb,axis=0)
error=error@weights[layer-1].T
layer=layer-1
return weights,bias
x=np.array([[1,0],[0,1],[1,1],[0,0]])
y=np.array([[1],[1],[0],[0]])
s=[2,3,3,1]
weights=createweights(s=s)
bias=createbias(s=s)
errs = []
for i in range(100000):
predict_on=predict(x,weights, bias,s=s)
errs.append(np.sum(abs(predict_on[-1]-y)))
print(np.sum(abs(predict_on[-1]-y)))
weights,bias=backprop(predict_on,y, weights, bias, s=s,lr=1)
plt.plot(errs)
#Apply on digits
import pandas as pd
training_data = pd.read_csv("c:/users/jliv/downloads/mnist_train.csv")
testing_data = pd.read_csv("c:/users/jliv/downloads/mnist_test.csv")
training_labels = training_data['5']
testing_labels = testing_data['7']
training_data.drop(['5'],axis=1,inplace=True)
testing_data.drop(['7'],axis=1,inplace=True)
training_onehot_y = pd.DataFrame()
training_onehot_y['lab'] = training_labels
lr = np.array(list(range(10)))
for i in lr:
training_onehot_y[i]=np.where(training_onehot_y['lab']==i,1,0)
training_onehot_y.drop(['lab'],axis=1,inplace=True)
training_labels.unique()
testing_labels.unique()
testing_map={i:testing_labels.unique()[i] for i in range(len(testing_labels.unique()))}
training_map={i:training_labels.unique()[i] for i in range(len(training_labels.unique()))}
testing_onehot_y = pd.DataFrame()
testing_onehot_y['lab'] = testing_labels
lr = np.array(list(range(10)))
for i in lr:
testing_onehot_y[i]=np.where(testing_onehot_y['lab']==i,1,0)
testing_onehot_y.drop(['lab'],axis=1,inplace=True)
testing_onehot_y=np.array(testing_onehot_y)
training_onehot_y=np.array(training_onehot_y)
training_data = np.array(training_data)
testing_data = np.array(testing_data)
training_data_flat = training_data.reshape(59999,28*28)
testing_data_flat = testing_data.reshape(9999,28*28)
ex = training_data[1].reshape(28,28)
plt.imshow(ex)
s=[28*28,100,10]
weights=createweights(s=s)
bias=createbias(s=s)
errs = []
for i in range(30000):
predict_on=predict(training_data_flat,weights, bias,s=s)
errs.append(np.sum(abs(predict_on[-1]-training_onehot_y)))
if i%100 == 0:
print(i,":",np.sum(abs(predict_on[-1]-training_onehot_y)))
weights,bias=backprop(predict_on,training_onehot_y, weights, bias, s=s,lr=.00001)
for i in range(len(weights)):
np.save("c://users/jliv/documents/weights_layer_"+str(i),weights[i])
for i in range(len(bias)):
np.save("c://users/jliv/documents/bias_layer_"+str(i),bias[i])
train_pred = np.argmax(predict_on[-1],axis=1)
#train_labs = np.argmax(training_onehot_y,axis=1)
#train_pred_mapped=np.array(pd.Series(train_pred).map(training_map))
trainacc = sum(np.where(train_pred==training_labels,1,0))/len(train_pred)
testpredict_on = predict(testing_data_flat,weights,bias,s=s)
test_pred = np.argmax(testpredict_on[-1],axis=1)
#test_labs=np.argmax(testing_onehot_y,axis=1)
#test_pred_mapped=np.array(pd.Series(test_pred).map(testing_map))
testacc = sum(np.where(test_pred==testing_labels,1,0))/len(test_pred)
print(trainacc)
print(testacc)
```
#### File: py_research/scripts/pence_v_harris.py
```python
import datetime as dt
import oauth2
import pandas as pd
import json
import time
#import pymysql
def pull_tweets(symbols):
def req(query):
consumer = oauth2.Consumer(key='kq5bb4YfBfoLUXd90vCwq4RWX'.encode('utf-8'), secret='<KEY>'.encode('utf-8'))
token = oauth2.Token(key='<KEY>', secret='<KEY>')
client = oauth2.Client(consumer, token)
resp, content = client.request( query, method="GET", body=bytes("", "utf-8"), headers=None )
return content
tdf2 = pd.DataFrame()
for newstock in symbols[:]:
language = 'en'
startdates = [dt.datetime.strftime(dt.datetime.now()+dt.timedelta(-1+i),"%Y-%m-%d") for i in range(8)]
enddates = [dt.datetime.strftime(dt.datetime.now()+dt.timedelta(-0+i),"%Y-%m-%d") for i in range(8)]
#exclude = ['-Congrats','-Stand','-Laura','-Why']
exclude = ['']
#How = mixed, recent or popular
how = 'mixed'
searchterm = newstock
exclude = "%20".join(exclude)
times = []
text = []
retweet_cnt = []
fvrt_cnt = []
user = []
user_flwrs=[]
user_statuses = []
timezone = []
#home_timeline = req("https://api.twitter.com/1.1/application/rate_limit_status.json?resources=help,users,search,statuses")
#home_timeline = home_timeline.decode("utf-8")
#home_timeline = json.loads(home_timeline)
for startdate,enddate in zip(startdates[:],enddates[:]):
raw_query="lang={}&q={}%20-RT%20{}%20since%3A{}%20until%3A{}&result_type={}&count=1000&tweet_mode=extended&-filter=retweets".format(language,searchterm,exclude,startdate,enddate,how)
query = 'https://api.twitter.com/1.1/search/tweets.json?'+raw_query
home_timeline = req(query)
home_timeline = home_timeline.decode("utf-8")
home_timeline = json.loads(home_timeline)
statuses = home_timeline['statuses']
print(startdate,newstock,len(statuses))
#time.sleep(7)
for i in range(len(statuses)):
times.append(statuses[i]['created_at'])
try:
text.append(statuses[i]['retweeted_status']['full_text'])
except:
text.append(statuses[i]['full_text'])
fvrt_cnt.append(statuses[i]['favorite_count'])
retweet_cnt.append(statuses[i]['retweet_count'])
user.append(statuses[i]['user']['name'])
user_flwrs.append(statuses[i]['user']['followers_count'])
user_statuses.append(statuses[i]['user']['statuses_count'])
timezone.append(statuses[i]['user']['time_zone'])
tdf = pd.DataFrame({'time':times,'text':text,'retweets':retweet_cnt,'favorites':fvrt_cnt,
'user':user,'followers':user_flwrs,'user_statuses':user_statuses,'timezone':timezone})
tdf['ticker']=newstock
tdf.text = tdf['text'].apply(lambda x: "".join([i for i in x if (i.isalpha())|(i==" ")] ))
tdf['text']=tdf['text'].apply(lambda x: " ".join([i for i in x.split() if "http" not in i]))
tdf['date'] =tdf.time.apply(lambda x: dt.datetime.strftime(dt.datetime.strptime(x,"%a %b %d %H:%M:%S %z %Y"),"%Y-%m-%d"))
tdf2 = tdf2.append(tdf)
return tdf2
pence = pull_tweets(['<NAME>'])
harris = pull_tweets(['<NAME>'])
pd.set_option('display.max_columns',25)
pence = pence[pence['date']=='2020-10-08'].copy()
harris= harris[harris['date']=='2020-10-08'].copy()
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
pense_t = list(pence['text'])
harris_t = list(harris['text'])
pense_neg = []
pense_neu = []
pense_pos = []
pense_comp = []
harris_neg = []
harris_neu = []
harris_pos = []
harris_comp = []
for i in pense_t:
ss = sid.polarity_scores(i)
neg,neu,pos,comp = ss['neg'],ss['neu'],ss['pos'],ss['compound']
pense_neg.append(neg)
pense_neu.append(neu)
pense_pos.append(pos)
pense_comp.append(comp)
for i in harris_t:
ss = sid.polarity_scores(i)
neg,neu,pos,comp = ss['neg'],ss['neu'],ss['pos'],ss['compound']
harris_neg.append(neg)
harris_neu.append(neu)
harris_pos.append(pos)
harris_comp.append(comp)
import numpy as np
np.mean(pense_neg)
np.mean(harris_neg)
import matplotlib.pyplot as plt
bins = np.linspace(-1,1,30)
bins2 = np.linspace(0,1,20)
plt.hist(harris_pos,label='harris',alpha=.5,bins=bins2)
plt.hist(pense_pos,label='pence',alpha=.5,bins=bins2)
plt.title("positive")
plt.legend()
plt.show()
plt.hist(harris_neu,label='harris',alpha=.5,bins=bins2)
plt.hist(pense_neu,label='pence',alpha=.5,bins=bins2)
plt.title("neutral")
plt.legend()
plt.show()
plt.hist(harris_neg,label='harris',alpha=.5,bins=bins2)
plt.hist(pense_neg,label='pence',alpha=.5,bins=bins2)
plt.title("negative")
plt.legend()
plt.show()
plt.hist(harris_comp,label='harris',alpha=.5,bins=bins)
plt.hist(pense_comp,label='pence',alpha=.5,bins=bins)
plt.title("compound")
plt.legend()
plt.show()
```
#### File: py_research/scripts/pytorch_mnist.py
```python
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
#from torchvision import datasets
#from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
"""
Loading Mnist Digits locally.
I need to rename the columns clearly for myself, and the labels are integers in
the first column.
I'm going to reshape these flat digits into their 28*28 original written shapes.
"""
training_data = pd.read_csv("c:/users/jliv/downloads/mnist_train.csv")
testing_data = pd.read_csv("c:/users/jliv/downloads/mnist_test.csv")
cols = ['label']+['col_'+str(i) for i in range(len(training_data.columns)-1)]
training_data.columns = cols
testing_data.columns = cols
training_labels=training_data['label']
testing_labels=testing_data['label']
training_data.drop(['label'],inplace=True,axis=1)
testing_data.drop(['label'],inplace=True,axis=1)
training_data=np.array(training_data).reshape(59999,28,28)
testing_data=np.array(testing_data).reshape(9999,28,28)
training_labels=np.array(training_labels)
testing_labels=np.array(testing_labels)
"""
Pytorch doesn't expect onehot labels. The below code isn't necessary.
n_values = np.max(training_labels) + 1
training_labels_onehot=np.eye(n_values)[training_labels]
n_values = np.max(training_labels) + 1
testing_labels_onehot=np.eye(n_values)[testing_labels]
"""
plt.imshow(training_data[0])
plt.show()
"""
Float tensors should be of numpy type float32 before converting to tensor.
Integer lables do not need to be onehot, but will be cast to LongTensor before
creating torch dataset.
Numpy arrays will be converted to tensors using from_numpy.
Unsqueezing one dimension of images to explicitely pass images of one channel.
"""
training_data = training_data.astype(np.float32)
testing_data = testing_data.astype(np.float32)
training_labels = training_labels.astype(np.int)
testing_labels = testing_labels.astype(np.int)
training_data_torch = torch.from_numpy(training_data)
testing_data_torch = torch.from_numpy(testing_data)
training_labels = torch.from_numpy(training_labels)
testing_labels = torch.from_numpy(testing_labels)
training_data_torch = training_data_torch.unsqueeze(1)
testing_data_torch = testing_data_torch.unsqueeze(1)
training_labels = training_labels.type(torch.LongTensor)
testing_labels = testing_labels.type(torch.LongTensor)
train_ds = TensorDataset(training_data_torch, training_labels)
test_ds = TensorDataset(testing_data_torch, testing_labels)
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
"""
Configuration using Sequential.
"""
configuration = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.Sigmoid()
)
configuration = nn.Sequential(
nn.Conv2d(1, 1, kernel_size=4, stride=2, padding=2),
nn.ReLU(),
nn.AdaptiveAvgPool2d(16),
nn.Flatten(),
nn.Linear(16*16, 8*8),
nn.ReLU(),
nn.Linear(8*8, 4*4),
nn.ReLU(),
nn.Linear(4*4, 10),
nn.Sigmoid()
)
configuration = nn.Sequential(
nn.Conv2d(1, 1, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.AdaptiveAvgPool2d(10),
nn.Flatten(),
nn.Linear(100, 64),
nn.ReLU(),
nn.Linear(64,32),
nn.ReLU(),
nn.Linear(32,10),
nn.Sigmoid()
)
"""
Define network class inheriting from nn.Module.
Configuration belongs in the __init__.
forward function returns the output of the network configuration.
Define model as the created network class and specify device using to()
"""
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten=nn.Flatten()
self.stack = configuration
def forward(self, x):
#flatten = nn.Flatten()
#x=flatten(x)
logits = self.stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
"""
Define loss.
Define optimizer on model parameters and learning rate.
"""
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
"""
Train and test classes are for organization but can be done in loop.
Predict with model(X)
Evaluate loss with loss_fn(pred,y)
set gradient to zero
propogate loss backwards through network
optimize parameters
"""
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model):
size = len(dataloader.dataset)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
"""
Create a dataloader with model batch size for train and test.
Tell pytorch whether cuda is available or else send processing to CPU
"""
train_dataloader = DataLoader(train_ds, batch_size=1)
test_dataloader = DataLoader(test_ds, batch_size=1)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model)
print("Done!")
pred = model(testing_data_torch)
y_test=testing_labels
ypred=pred.argmax(axis=1)
(ypred == y_test).type(torch.float).sum().item()/len(ypred)
```
#### File: py_research/scripts/sentiment_analysis.py
```python
import pandas as pd
import numpy as np
txt = ['this is fake text',
"i don't care what this text says",
"please don't abandon me",
"it needs preprocessing but i really don't want to do that right now"]
txt = pd.DataFrame({'text':txt})
txt['txt'] = txt['text'].apply(lambda x: "".join([i for i in x if (i.isalpha())|(i==" ")]))
def get_emotions():
emotions=pd.read_csv("lexicons/nrc_emotions.csv")
emotions.drop(['Unnamed: 0'],axis=1, inplace=True)
emotions = emotions[np.sum(emotions,axis=1)>0].copy()
return emotions
def add_emotions(df):
emotions = get_emotions()
emotions.set_index('term',inplace=True)
dimensions = emotions.columns.values
df1=df.copy()
for i in dimensions:
temp = list(emotions[emotions[i]==1].index)
df1['emotions_'+i]=df1.txt.apply(lambda x: len([i for i in x.split() if i in temp]))
for i in dimensions:
df1['emotions_'+i+'_norm'] = df1['emotions_'+i]/np.sum(df1[['emotions_'+j for j in dimensions]],axis=1)
return df1
pd.set_option("display.max_columns",500)
add_emotions(txt)
``` |
{
"source": "jlivingstonsg/Cellbots-2019",
"score": 2
} |
#### File: android/python/cellbotRemote.py
```python
import ConfigParser
import os
import sys
import time
from threading import Thread
import android
import math
from threadedAndroid import droid
import utils
import xmpp
class RemoteState(object):
def __init__(self):
self.running = True
self.pauseSending = False
class RemoteUplink(object):
def __init__(self, remoteUplink, state):
self.remoteUplink = remoteUplink
self.state = state
self.previousMsg = ""
self.lastMsgTime = time.time()
self.previousToastMsg = ''
self.lastToastMsgTime = 0
# Send command out of uplink
def sendCmd(self, droid, msg, override=False):
if not self.state.pauseSending or override:
try:
# Don't send the same message repeatedly unless 1 second has passed
if msg != self.previousMsg or (time.time() > self.lastMsgTime + 1000):
self.remoteUplink.sendCmd(msg)
except IOError:
self.specialToast(droid, "Failed to send command to robot")
self.previousMsg=msg
self.lastMsgTime = time.time()
# Send command out of the device over BlueTooth or XMPP
def specialToast(self, droid, msg):
try:
# Don't toast the same message repeatedly unless 5 seconds have passed
if msg != self.previousToastMsg or \
(time.time() > self.lastToastMsgTime + 5000):
droid.makeToast(msg)
except:
pass
self.previousToastMsg=msg
self.lastToastMsgTime = time.time()
class CellbotRemote(Thread):
"""Cellbot remote control"""
def __init__(self, config, uplink, sendQuit=False):
Thread.__init__(self)
self.config = config
self.state = RemoteState()
self.remoteUplink = RemoteUplink(uplink, self.state)
self.droid = droid
self.optionsThread = RemoteCommandOptions(self.remoteUplink, self.state,
sendQuit)
self.optionsThread.daemon = True
def startOptions(self):
self.optionsThread.start()
def run(self):
self.droid.startSensing()
time.sleep(1.0) # give the sensors a chance to start up
while self.state.running:
try:
sensor_result = self.droid.readSensors()
pitch=float(sensor_result.result['pitch'])
roll=float(sensor_result.result['roll'])
except TypeError:
pitch = 0
roll = 0
self.remoteUplink.specialToast(self.droid, "Failed to read sensors")
# Convert the radions returned into degrees
pitch = pitch * 57.2957795
roll = roll * 57.2957795
# Assumes the phone is flat on table for no speed ad no turning
# Translate the pitch into a speed ranging from -100 (full backward) to
# 100 (full forward).
# Also support a gutter (dead spot) in the middle and buzz the phone when
# user is out of range.
if pitch > 50:
speed = 100
self.droid.vibrate((pitch -50) * 10)
self.remoteUplink.specialToast(self.droid, "Too far forward")
elif pitch < -50:
speed = -100
self.droid.vibrate(((pitch *-1) -50) * 10)
self.remoteUplink.specialToast(self.droid, "Too far backward")
elif pitch in range(-5,5):
speed = 0
else:
# Take the pitch that range from 50 to -50 and multiply it by two and
# reverse the sign
speed = pitch * 2
# Translate the roll into a direction ranging from -100 (full left) to 100
# (full right).
# Also support a gutter (dead spot) in the middle and buzz the phone when
# user is out of range.
if roll > 50:
direction = 100
self.droid.vibrate((roll -50) * 10)
self.remoteUplink.specialToast(self.droid, "Too far left")
elif roll < -50:
direction = -100
self.droid.vibrate(((roll *-1) -50) * 10)
self.remoteUplink.specialToast(self.droid, "Too far right")
elif roll in range(-5,5):
direction = 0
else:
# Take the roll that range from 50 to -50 and multiply it by two and
# reverse the sign
direction = roll * 2
# Reverse turning when going backwards to mimic what happens when steering
# a non-differential drive system
# where direction is really a "bias" and not a true turning angle.
if speed < 0:
direction = direction * -1
# Clamp speed and direction between -100 and 100 just in case the above
# let's something slip
speed = max(min(speed, 100), -100)
direction = max(min(direction, 100), -100)
# Apply acceleration scaling factor since linear use of the accelerometer
# goes to fast with minor tilts
# Apply acceleration scaling factor since linear use of the accelerometer
# goes to fast with minor tilts
scaledSpeed = math.pow(abs(speed) / 100.00, self.config.speedScaleFactor)
speed = math.copysign(scaledSpeed, speed) * 100.00
scaledDirection = math.pow(abs(direction) / 100.00,
self.config.directionScaleFactor)
direction = math.copysign(scaledDirection, direction) * 100.00
# Okay, speed and direction are now both in the range of -100:100.
# Speed=100 means to move forward at full speed. direction=100 means
# to turn right as much as possible.
# Treat direction as the X axis, and speed as the Y axis.
# If we're driving a differential-drive robot (each wheel moving forward
# or back), then consider the left wheel as the X axis and the right
# wheel as Y.
# If we do that, then we can translate [speed,direction] into [left,right]
# by rotating by -45 degrees.
# See the writeup at
# http://code.google.com/p/cellbots/wiki/TranslatingUserControls
# This actually rotates by 45 degrees and scales by 1.414, so that full
# forward = [100,100]
right = speed - direction
left = speed + direction
# But now that we've scaled, asking for full forward + full right turn
# means the motors need to go to 141. If we're asking for > 100, scale
# back without changing the proportion of forward/turning
if abs(left) > 100 or abs(right) > 100:
scale = 1.0
# if left is bigger, use it to get the scaling amount
if abs(left) > abs(right):
scale = 100.0 / abs(left)
else:
scale = 100.0 / abs(right)
left = int(scale * left)
right = int(scale * right)
command = "ws %d %d" % (left, right)
self.remoteUplink.sendCmd(self.droid, command)
time.sleep(0.25)
sys.exit()
# Give the user an option to try other actions while still using the remote as
# an accelerometer
class RemoteCommandOptions(Thread):
kCardinals = {
'North': '0', 'East': '90', 'West': '270', 'South': '180'
}
def __init__ (self, remoteUplink, remoteState, sendQuit=False):
""" Initialize remote command options thread.
This handles the remote control menu, displays menu, get user input, send
commands.
Args:
remoteUplink: RemoteUplink object.
remoteState: RemoteState object, shared with CellbotRemote object.
sendQuit: If true, send quit command on exit.
"""
Thread.__init__(self)
self.remoteUplink = remoteUplink
self.state = remoteState
self.droid = droid
self.unlocked_droid = android.Android()
self.sendQuit = sendQuit
def run(self):
command = ''
msg = ''
while command != "Exit":
try:
command = utils.pickFromList(self.unlocked_droid,
"Pick an action (set down phone to pause)",
['Say Hello', 'Point Using Compass', 'Take Picture',
'Speak Location', 'Voice Command','Exit'])
except KeyError as e:
msg = "Sorry, please try that again. %s" % str(e)
self.droid.makeToast(msg)
else:
# Pause sending commands so that robot only does what user selected here
self.state.pauseSending = True
if command == "Take Picture":
self.remoteUplink.sendCmd(self.droid, "picture", True)
self.droid.ttsSpeak("Asking robot to take a picture")
self.droid.makeToast("Please wait, this may take a few seconds")
time.sleep(5)
msg = "Picture should be taken by now"
elif command == "Speak Location":
msg = "Speaking location"
self.remoteUplink.sendCmd(self.droid, "x", True)
elif command == "Voice Command":
try:
voiceCommand = droid.recognizeSpeech().result
self.remoteUplink.sendCmd(self.droid, voiceCommand, True)
msg = "Told the robot to " + voiceCommand
self.droid.makeToast(msg)
time.sleep(2)
except:
msg = "Could not understand"
elif command == "Point Using Compass":
msg = "This feature is currently not available on the robot."
self.droid.makeToast(msg)
# try:
# direction = utils.pickFromList(self.unlocked_droid,
# "Pick a direction", sorted([c for c in self.kCardinals]))
# except KeyError as e:
# msg = "Sorry, please try that again. %s" % str(e)
# self.droid.makeToast(msg)
# else:
# self.droid.ttsSpeak("Selected direction %s." % direction)
# cmd = "p " + self.kCardinals[direction]
# self.remoteUplink.sendCmd(self.droid, cmd, True)
# msg = "Asking robot to point " + direction
# self.droid.ttsSpeak(msg)
# time.sleep(2)
# msg = "Robot should be facing " + direction
elif command == "Say Hello":
msg = "Asking robot to say hello"
self.remoteUplink.sendCmd(self.droid, "hi", True)
elif command == "Exit":
msg = "Bye bye. Come again."
if self.sendQuit:
self.remoteUplink.sendCmd(self.droid, "q", True)
self.droid.ttsSpeak(msg)
time.sleep(1)
# This resumes sending of normal accelerometer stream of commands
self.state.pauseSending = False
self.remoteUplink.sendCmd(self.droid, "ws 0 0", True)
# This will exit the main loop as well. Remove this if you only want to exit
# the pop-up menu.
self.state.running = False
```
#### File: android/python/configuration.py
```python
__author__ = '<NAME> <<EMAIL>>'
__license__ = 'Apache License, Version 2.0'
"""Parse the configuration file, wrapping ConfigParser, and populate
class with the attributes that represent the configuration, asking the
operator to choose between different implemenations if necessary."""
import ConfigParser
import netip
from threadedAndroid import droid
import utils
class Configure(object):
"""A class to encapsulate the configuration of the Android device
for uplink and downlink. Parse the config and expose a simple, flat
configuration namespace."""
kModeRemote = "remote"
kModeRobot = "robot"
def __init__(self, configFilePath):
"""Setup the config file for reading and call Configure to
populate the class attributes.
Args:
configFilePath: path to the configuration .ini file
"""
self._config = ConfigParser.ConfigParser()
self.configFilePath = configFilePath
self._config.read(configFilePath)
self.Configure()
def getConfigFileValue(self, section, option, title,
valueList, saveToFile=True):
'''Get configurable options from the ini file, prompt user if they
aren't there, and save if needed.
Args:
section: In which section of the .ini will we find the value?
option: Which option in the section has the value?
title: Title for multi-selection dialogue.
valueList: Values to populate a multi-selection dialogue.
saveToFile: Should we save the selection to the .ini?
Example:
inputMethod = getConfigFileValue("control", "inputMethod",
"Select Input Method",
['commandByXMPP',
'commandByTelnet',
'commandByVoice'], False)
'''
# Check if option exists in the file
if self._config.has_option(section, option):
values = self._config.get(section, option)
values = values.split(',')
# Prompt the user to pick an option if the file specified more
# than one option
if len(values) > 1:
setting = utils.pickFromList(droid, title, values)
else:
setting = values[0]
else:
setting = ''
# Deal with blank or missing values by prompting user
if not setting or not self._config.has_option(section, option):
# Provide an empty text prompt if no list of values provided
if not valueList:
setting = droid.getInput(title).result
# Let the user pick from a list of values
else:
setting = utils.pickFromList(droid, title, valueList)
if saveToFile:
self._config.set(section, option, setting)
with open(self.configFilePath, 'wb') as configfile:
self._config.write(configfile)
# Strip whitespace and try turning numbers into floats
setting = setting.strip()
try:
setting = float(setting)
except ValueError:
pass
return setting
def Configure(self):
"""List of config values to get from file or to prompt user for."""
self.mode = self.getConfigFileValue("basics", "mode", "Select Mode",
[self.kModeRobot, self.kModeRemote])
self.inputMethod = self.getConfigFileValue("control", "inputMethod",
"Select Input Method",
['commandByXMPP',
'commandByTelnet',
'commandByVoice',
'commandBySelf'])
# TODO: Test that commandBySelf doesn't require mode to be set in the
# config file.
if self.mode == self.kModeRobot or self.inputMethod == "commandBySelf":
self.outputMethod = self.getConfigFileValue("control", "outputMethod",
"Select Output Method",
['outputSerial',
'outputBluetooth',
'outputBluetoothICreate'])
self.microcontroller = self.getConfigFileValue("basics",
"microcontroller",
"Microcontroller Type",
['arduino', 'serialservo',
'AVR_Stepper', 'icreate'])
self.audioOn = self._config.getboolean("basics", "audioOn")
self.currentSpeed = self._config.getint("basics", "currentSpeed")
self.cardinalMargin = self._config.getint("basics", "cardinalMargin")
self.phoneIP = netip.displayNoLo()
try:
self.bluetoothAddress = self._config.get("control", "bluetoothAddress")
except:
# TODO: Make defaults for this and everything.
self.bluetoothAddress = None
if self.inputMethod == "commandByJSON":
if self.mode == self.kModeRobot:
self.msgRelayUrl = "/".join([self._config.get("json", "msgRelayUrl"),
"device"])
elif self.mode == self.kModeRemote:
self.msgRelayUrl = "/".join([self._config.get("json", "msgRelayUrl"),
"controller"])
if self.mode == self.kModeRemote or self.inputMethod == "commandBySelf":
self.speedScaleFactor = self.getConfigFileValue(
"remote", "speedScaleFactor", "Speed scale factor", '', False)
self.directionScaleFactor = self.getConfigFileValue(
"remote", "directionScaleFactor", "Direction scale factor", '', False)
# Only get these settings if we using XMPP
if self.inputMethod == "commandByXMPP":
self.xmppServer = self._config.get("xmpp", "server")
self.xmppPort = self._config.getint("xmpp", "port")
self.xmppRobotUsername = self.getConfigFileValue(
"xmpp", "robotUsername", "Robot chat username", '')
if self.mode == self.kModeRobot:
self.xmppRobotPassword = self.getConfigFileValue(
"xmpp", "robotPassword", "Robot chat password", '', False)
elif self.mode == self.kModeRemote:
self.xmppRemoteUsername = self.getConfigFileValue(
"xmpp", "remoteUsername", "Remote chat username", '')
self.xmppRemoteUserPassword = self.getConfigFileValue(
"xmpp", "remoteUserPassword", "Remote chat user password", '',
False)
if self.inputMethod == "commandByTelnet":
self.telnetPort = self._config.getint("telnet", "port")
if self.mode == self.kModeRemote:
self.robotHost = self.getConfigFileValue(
"telnet", "robotHost", "Robot hostname", '')
def Print(self):
import pprint
pprint.pprint(self.__dict__)
```
#### File: android/python/downlinks.py
```python
__author__ = '<EMAIL> (<NAME>)'
import struct
import threading
import time
import base64
import threadedAndroid
class DownlinkError(Exception):
"""Exception for downlink errors"""
pass
def downlinkFactory(config):
"""Consult the configuration to determine which downlink to
instantiate, and return it
Args:
config -- global configuration object
Returns:
downlink -- configured downlink instance required to talk to the
robot
"""
if (config.outputMethod == "outputBluetoothASCII" or
config.outputMethod == "outputBluetooth"): # Backwards compat.
downlink = BluetoothDownlinkASCII(bluetoothAddress=config.bluetoothAddress)
elif config.outputMethod == "outputBluetoothBinary":
downlink = BluetoothDownlinkBinary(
bluetoothAddress=config.bluetoothAddress)
elif config.outputMethod == "outputBluetoothICreate":
downlink = BluetoothDownlinkICreate(
bluetoothAddress=config.bluetoothAddress)
else:
raise DownlinkError("Unknown downlink: '%s'" % config.outputMethod)
# TODO: make this a downlink obj
# print "Outputting elsewhere"
# serialReader.lifeline = re.compile(r"(\d) received")
# readerThread = serialReader()
# readerThread.start()
return downlink
class Downlink(object):
"""Base class that defines the interface for communication between
the RobotProtocol classes and the robot (arduino, etc).
The structure is as follows:
WriteCommand() should send a command immediately.
ReadReply() returns some structured msg based on the protocol, within
timeout seconds.
"""
def start(self):
pass
def WriteCommand(self, *msg):
raise NotImplementedError
def ReadReply(self, timeout):
raise NotImplementedError
def FlushInput(self):
raise NotImplementedError
# For debugging "ASCII," that isn't always.
def _StrToHex(str):
return "".join(["%x" % ord(x) for x in str])
class BluetoothDownlink(Downlink):
"""Parent class for BluetoothDownlink{ASCII,Binary}.
Just does initialization. This uses its own Android object since we
need to maintain state of the connection. """
def __init__(self, bluetoothAddress=None):
"""
Args:
bluetoothAddress: HEX string, or None.
"""
self.droid = threadedAndroid.LockedAndroid()
# Initialize Bluetooth outbound if configured for it
self.droid.toggleBluetoothState(True)
if bluetoothAddress:
self.droid.bluetoothConnect('00001101-0000-1000-8000-00805F9B34FB',
bluetoothAddress)
# this is a magic UUID for serial BT devices
else:
self.droid.bluetoothConnect('00001101-0000-1000-8000-00805F9B34FB')
# this is a magic UUID for serial BT devices
print "Initializing Bluetooth connection"
self.droid.makeToast("Initializing Bluetooth connection")
time.sleep(3) # TODO: remove magic number
# Child classes will implement WriteCommand() and ReadReply()
def _WaitForDataReady(self, deadline_time):
""" Wait for a deadline_time (epoch secs, float) for bluetoothReadReady.
Returns True if ready, False on timeout.
"""
while time.time() < deadline_time:
try:
dataReady = self.droid.bluetoothReadReady()
#print(dataReady)
if dataReady.result:
return True
time.sleep(0.02) # TODO: find a better way than polling.
continue
except:
print "BluetoothDownlink: reading failed"
return False
print "BluetoothDownlink: timeout on reading"
return False
def FlushInput(self):
self.droid.bluetoothSkipPendingInput()
class BluetoothDownlinkASCII(BluetoothDownlink):
"""Implementation of a BluetoothDownlink that writes/reads ascii.
"""
def __init__(self, bluetoothAddress=None):
super(BluetoothDownlinkASCII,self).__init__(
bluetoothAddress=bluetoothAddress)
self.buf = "" # Keep partially read messages
def WriteCommand(self, msg):
self.droid.bluetoothWrite(msg + '\n')
def ReadReply(self, timeout):
"""Read one line, keeping any partial line that's come in after that.
Returns: (key, val) tuple, or None on error or timeout.
"""
start_time = time.time()
deadline = start_time + timeout
# Read chunks and add them to buf.
# Once we've gotten a whole line, split it into key/val by colons.
# TODO: Can we just use bluetoothReadline()?
while self._WaitForDataReady(deadline):
try:
buf_partial = self.droid.bluetoothRead().result
except:
print "BTDownlinkASCII: reading failed"
return None
if buf_partial:
if not len(buf_partial): continue
#print "BTDownlinkASCII: Read Hex Chars: %s " % _StrToHex(buf_partial)
print "BTDownlinkASCII: Read Chars: %s " % buf_partial
self.buf += buf_partial
if not '\n' in self.buf: continue
# We know we have at least one line. Parse 1 line
npos = self.buf.find('\n')
msg_line = self.buf[:npos]
self.buf = self.buf[npos+1:]
print "Bot says: %s " % _StrToHex(msg_line)
if len(msg_line.strip()) < 2:
print "BTDownlinkASCII: Trouble parsing k/v pair -- too short"
return None
word_list = msg_line.split(':')
if len(word_list) != 2:
print "BTDownlinkASCII: Trouble parsing k/v pair"
return None
# Success
return (word_list[0], word_list[1])
# Timeout
return None
class BluetoothDownlinkBinary(BluetoothDownlink):
"""Implementation of a BluetoothDownlink, writes/reads the binary protocol.
The protocol spec is here:
https://docs.google.com/a/google.com/Doc?docid=0AeQa1c1ypp_UZGc1Y3A0dmdfMGNjcDl2ZmZj&hl=en
"""
def __init__(self, bluetoothAddress=None):
super(BluetoothDownlinkBinary,self).__init__(
bluetoothAddress=bluetoothAddress)
def WriteCommand(self, command, *args):
# TODO: Make the 'tag' value auto-increment, and then make a reader
# thread (in AVRBinaryProtocol) that associates reads w/ writes based on
# that tag.
bin = struct.pack(">HBB%dh" % len(args), command, 1, len(args), *args)
self.droid.bluetoothWriteBinary(bin.encode("base64"))
def ReadReply(self, timeout):
"""Read one message.
Not _guaranteed_ to return within timeout, but normally should.
Returns: tuple (command, (args,...)) or None.
"""
if not self._WaitForDataReady(deadline):
return None
header = self.droid.bluetoothReadBinary(4).result
header = header.decode("base64")
command, tag, count = struct.unpack(">HBB", header)
if count > 0:
if not self._WaitForDataReady(deadline):
return None
# TODO: Should verify the length is what we expect
data = self.droid.bluetoothReadBinary(
count * 2).result.decode("base64")
args = struct.unpack(">%dh" % count, data)
else:
args = []
return (command, tuple(args))
# Listen for incoming serial responses. If this thread stops working,
# try rebooting.
class serialReader(threading.Thread):
def __init__ (self):
threading.Thread.__init__(self)
def run(self):
process = droid.getSerialIn()
while process:
try:
botReply = process.readline()
if botReply:
backoff = 2
if len(botReply.strip()) > 1:
splitReply = botReply.split(':')
if len(splitReply) == 2:
addToWhiteboard(splitReply[0], splitReply[1])
outputToOperator("Bot says %s " % botReply)
except:
print "Reader Thread Errored"
time.sleep(2)
class BluetoothDownlinkICreate(BluetoothDownlink):
"""Implementation of a BluetoothDownlink that writes/reads to iCreate."""
def __init__(self, bluetoothAddress=None):
super(BluetoothDownlinkICreate,self).__init__(
bluetoothAddress=bluetoothAddress)
def WriteCommand(self, msg):
encoded_data = base64.encodestring(msg)
self.droid.bluetoothWriteBinary(encoded_data)
def ReadReplyWithBuffer(self, timeout, bufferSize):
encoded_data = self.droid.bluetoothReadBinary(bufferSize)
return base64.decodestring(encoded_data.result)
```
#### File: android/python/icreatecellbot.py
```python
__author__ = '<NAME> <<EMAIL>>'
import os
import time
import logging
import math
import base64
import downlinks
import threading
import differentialDriveBot
class ICreateBot(object):
def addSensorTypes(self):
"""Adds the types of sensors this robot has. This base class will
contain any sensors the phone possesses.
As sensor implementations become more complicated, more processing
will be required to set up sensors, which will go into this
addSensorTypes() method, but hopefully mostly into init methods in
each SensorType class.
"""
self.parent.addSensorTypes()
def __init__(self, config, robotProto):
self.parent = differentialDriveBot.DifferentialDriveBot(config, robotProto)
self.config = config
self.robotProto = robotProto
##ACTIONS
def setWheelSpeeds(self, left, right):
"""Set wheel speeds.
Assumes two wheels -- only two wheels are needed to control turning.
Args:
left -- speed of left wheel
right -- speed of right wheel
"""
self.parent.setWheelSpeeds(left, right)
def moveForward(self, distance=None):
"""Moves robot forward.
Args:
distance -- distance to travel.
if distance==None, move forward indefinitely
"""
self.parent.moveForward(distance)
def moveBackward(self, distance=None):
"""Moves robot backward.
Args:
distance -- distance to travel.
if distance==None, move backward indefinitely
"""
self.parent.moveBackward(distance)
def turnLeft(self, angle=None):
"""Turns robot to the left.
Robot should resume previous speed after completing turn.
Args:
angle -- magnitude in degrees
"""
self.parent.turnLeft(angle)
def turnRight(self, angle=None):
"""Turns robot to the right.
Robot should resume previous speed after completing turn.
Args:
angle -- magnitude in degrees
"""
self.parent.turnRight(angle)
def stop(self):
"""Stops all robot motion."""
self.parent.stop()
def readLocation(self):
"""Returns the current location as reported by GPS."""
return self.parent.readLocation()
def startAudioRecording(self, fileName):
"""Starts recording audio."""
self.parent.startAudioRecording(fileName)
def stopAudioRecording(self):
"""Stops recording audio."""
self.parent.stopAudioRecording()
def sing(self, song):
"""Outputs audio.
Args:
song -- audio stream to output
"""
self.parent.sing(song)
def speak(self, speech, override=False):
self.parent.speak(speech, override)
def recognizeSpeech(self):
return self.parent.recognizeSpeech()
def captureImage(self, fileName, camera=None):
"""Capture an image.
Args:
fileName -- save the image to this file
camera -- indicates which camera to use.
if camera == None, capture n images with all n cameras
"""
self.parent.captureImage(fileName, camera)
def setVolume(self, newVolume):
"""Set the media volume (includes the synthesized voice).
Args:
newVolume -- a level between 0 and MaxMediaVolume
"""
self.parent.setVolume(newVolume)
def reset(self):
"""Reset hardware settings to default"""
self.robotProto.Reset()
def shutdown(self, msg="Exiting"):
"""Stop the servos if using a serial controller."""
self.parent.shutdown(msg)
def log(self, foobar):
"""Outputs to log.
Args:
foobar -- text to be logged
"""
self.parent.log(foobar)
# TODO: Require setMaximumSpeed(self, speed): Function
# TODO: Require changeSpeed(self, speed): Function
# TODO: Require turnToHeading(self, heading): Function
# TODO: Require writeRawCommand(self, rawCommand): Function
"""
def moveForward(self, distance=None):
self.speak("Moving forward.")
if distance:
self.sensor_monitor.get_distance() # Reset distance
total_distance = 0
self.setWheelSpeeds(self.max_power/4, self.max_power/4)
while(total_distance <= distance):
if(self.sensor_monitor.is_bumper_hit()):
break
total_distance += self.sensor_monitor.get_distance()
time.sleep(0.25)
self.stop()
else:
self.setWheelSpeeds(self.max_power, self.max_power)
def moveBackward(self, distance=None):
self.speak("Moving backward.")
if distance:
self.sensor_monitor.get_distance() # Reset distance
total_distance = 0
self.setWheelSpeeds(-1*self.max_power/4, -1*self.max_power/4)
while(total_distance <= distance):
if(self.sensor_monitor.is_bumper_hit()):
break
total_distance += math.fabs(self.sensor_monitor.get_distance())
time.sleep(0.25)
self.stop()
else:
self.setWheelSpeeds(-1*self.max_power, -1*self.max_power)
def turnLeft(self, angle=None):
self.speak("Turning left.")
if angle:
absAngle = math.fabs(angle)/3 # For some reason angle traveled is 3 fold
self.sensor_monitor.get_angle() # Reset angle
total_angle = 0
self.setWheelSpeeds(-1*self.max_power/10, self.max_power/10)
while(total_angle <= absAngle):
if(self.sensor_monitor.is_bumper_hit()):
break
total_angle += math.fabs(self.sensor_monitor.get_angle())
time.sleep(0.25)
self.stop()
else:
self.setWheelSpeeds(-1*self.max_power/5, self.max_power/5)
def turnRight(self, angle=None):
self.speak("Turning right.")
if angle:
absAngle = math.fabs(angle)/3 # For some reason angle traveled is 3 fold
self.sensor_monitor.get_angle() # Reset angle
total_angle = 0
self.setWheelSpeeds(self.max_power/10, -1*self.max_power/10)
while(total_angle <= absAngle):
if(self.sensor_monitor.is_bumper_hit()):
break
total_angle += math.fabs(self.sensor_monitor.get_angle())
time.sleep(0.25)
self.stop()
else:
self.setWheelSpeeds(self.max_power/5, -1*self.max_power/5)
def stop(self):
self.speak("Stopping.")
self.setWheelSpeeds(0, 0)
def setWheelSpeeds(self, left, right):
try:
self.robot.SetWheelVelocity(left, right)
print 'Setting wheel velocity to %r:%r' % (left, right)
except pyrobot.StateError, e:
print 'Trying to set wheel velocity in wrong state.'
def getSensors(self):
sensor_data = self.sensor_monitor.get_sensors().get_sensor_data()
d = {}
for key in sensor_data:
d[key] = sensor_data[key]
return d
"""
```
#### File: android/python/icreateservice.py
```python
import logging
import threading
import time
class Service(object):
"""A Service runs in a separate thread and can be started and stopped."""
def __init__(self):
self.name = self.__class__.__name__
self._join = False
self._thread = None
def Loop(self):
"""Should be overridden by subclass to define a single loop iteration."""
raise NotImplementedError
def _Loop(self):
"""Loop until asked to stop."""
while not self._join:
try:
self.Loop()
except:
logging.info('Exception in service %s.' % self.name)
raise
def Start(self):
"""Start up the service."""
if self._thread is not None:
logging.info('Restarting service %s.' % self.name)
self.Stop()
else:
logging.info('Starting service %s.' % self.name)
self._thread = threading.Thread(target=self._Loop)
self._thread.setDaemon(True)
self._thread.start()
def Stop(self):
"""Stop the service."""
self._join = True
if self._thread is not None:
self._thread.join()
self._thread = None
self._join = False
```
#### File: android/python/utils.py
```python
import time
def outputToOperator(msg, uplink=None):
"""Display information on screen and/or reply to the human operator"""
if uplink:
uplink.Write(msg)
print msg
def pickFromList(droid, title, options):
droid.dialogCreateAlert(title)
droid.dialogSetItems(options)
droid.dialogShow()
time.sleep(0.25)
response = droid.dialogGetResponse().result['item']
return options[response]
def log(droid, msg):
return droid.log(msg)
```
#### File: kamel-server/pykml/helpers.py
```python
from pykml.factory import KML_ElementMaker as K
from pykml.factory import GX_ElementMaker as GX
def separate_namespace(qname):
"Separates the namespace from the element"
import re
try:
namespace, element_name = re.search('^{(.+)}(.+)$', qname).groups()
except:
namespace = None
element_name = qname
return namespace, element_name
def set_max_decimal_places(doc, max_decimals):
"""Sets the maximum number of decimal places used by KML elements
This method facilitates reducing the file size of a KML document.
"""
def replace_delimited_string_member(
delimited_str,
separator,
index_no,
decimal_places):
"Modify the number of decimal places for a delimited string member"
values = delimited_str.split(separator)
values[index_no] = str(round(float(values[index_no]), decimal_places))
return separator.join(values)
if max_decimals.has_key('longitude'):
data_type = 'longitude'
index_no = 0 # longitude is in the first position
# modify <longitude>
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}longitude"):
new_val = round(float(el.text), max_decimals[data_type])
el.getparent().longitude = K.longitude(new_val)
# modify <coordinates> elements
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}coordinates"):
vertex_str_list = []
for vertex in el.text.strip().split(' '):
vertex_str_list.append(
replace_delimited_string_member(
delimited_str=vertex,
separator=',',
index_no=index_no,
decimal_places=max_decimals[data_type]
)
)
el_new = K.coordinates(' '.join(vertex_str_list).strip())
el.getparent().replace(el, el_new)
# modify <gx:coords> elements
for el in doc.findall(".//{http://www.google.com/kml/ext/2.2}coord"):
el._setText(
replace_delimited_string_member(
delimited_str=el.text,
separator=' ',
index_no=index_no,
decimal_places=max_decimals[data_type]
)
)
if max_decimals.has_key('latitude'):
data_type = 'latitude'
index_no = 1 # latitude is in the second position
# modify <latitude> elements
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}latitude"):
new_val = round(float(el.text), max_decimals[data_type])
el.getparent().latitude = K.latitude(new_val)
# modify <coordinates> elements
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}coordinates"):
vertex_str_list = []
for vertex in el.text.strip().split(' '):
vertex_str_list.append(
replace_delimited_string_member(
delimited_str=vertex,
separator=',',
index_no=index_no,
decimal_places=max_decimals[data_type]
)
)
el_new = K.coordinates(' '.join(vertex_str_list).strip())
el.getparent().replace(el, el_new)
# modify <gx:coords> elements
for el in doc.findall(".//{http://www.google.com/kml/ext/2.2}coord"):
el._setText(
replace_delimited_string_member(
delimited_str=el.text,
separator=' ',
index_no=index_no,
decimal_places=max_decimals[data_type]
)
)
if max_decimals.has_key('altitude'):
data_type = 'altitude'
index_no = 2 # altitude is in the third position
# modify <altitude> elements
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}altitude"):
new_val = round(float(el.text), max_decimals[data_type])
el.getparent().altitude = K.altitude(new_val)
# modify <coordinates> elements
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}coordinates"):
vertex_str_list = []
for vertex in el.text.strip().split(' '):
vertex_str_list.append(
replace_delimited_string_member(
delimited_str=vertex,
separator=',',
index_no=index_no,
decimal_places=max_decimals[data_type]
)
)
el_new = K.coordinates(' '.join(vertex_str_list).strip())
el.getparent().replace(el, el_new)
# modify <gx:coords> elements
for el in doc.findall(".//{http://www.google.com/kml/ext/2.2}coord"):
el._setText(
replace_delimited_string_member(
delimited_str=el.text,
separator=' ',
index_no=index_no,
decimal_places=max_decimals[data_type]
)
)
if max_decimals.has_key('heading'):
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}heading"):
new_val = round(float(el.text), max_decimals['heading'])
el.getparent().heading = K.heading(new_val)
if max_decimals.has_key('tilt'):
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}tilt"):
new_val = round(float(el.text), max_decimals['tilt'])
el.getparent().tilt = K.tilt(new_val)
if max_decimals.has_key('range'):
for el in doc.findall(".//{http://www.opengis.net/kml/2.2}range"):
new_val = round(float(el.text), max_decimals['range'])
el.getparent().range = K.range(new_val)
```
#### File: python/kml_animator/process_animation.py
```python
index_page = '''
<html>
<head>
<title>X20 Rocket Launch</title>
<script src="https://www.google.com/jsapi"> </script>
<script src="http://earth-api-samples.googlecode.com/svn/trunk/lib/kmldomwalk.js" type="text/javascript"> </script>
<script type="text/javascript">
var ge;
var tour;
google.load("earth", "1");
function init() {
google.earth.createInstance('map3d', initCB, failureCB);
}
function initCB(instance) {
ge = instance;
ge.getWindow().setVisibility(true);
ge.getNavigationControl().setVisibility(ge.VISIBILITY_SHOW);
var href = '%s/x20.kml';
google.earth.fetchKml(ge, href, fetchCallback);
function fetchCallback(fetchedKml) {
// Alert if no KML was found at the specified URL.
if (!fetchedKml) {
setTimeout(function() {
alert('Bad or null KML');
}, 0);
return;
}
// Add the fetched KML into this Earth instance.
ge.getFeatures().appendChild(fetchedKml);
// Walk through the KML to find the tour object; assign to variable 'tour.'
walkKmlDom(fetchedKml, function() {
if (this.getType() == 'KmlTour') {
tour = this;
return false;
}
});
}
}
function failureCB(errorCode) {
}
// Tour control functions.
function enterTour() {
if (!tour) {
alert('No tour found!');
return;
}
ge.getTourPlayer().setTour(tour);
ge.getTourPlayer().play();
}
function pauseTour() {
ge.getTourPlayer().pause();
}
function resetTour() {
ge.getTourPlayer().reset();
}
function exitTour() {
ge.getTourPlayer().setTour(null);
}
google.setOnLoadCallback(init);
</script>
</head>
<body>
<div id="map3d" style="height: 700px; width: 1200px;"></div>
<div id ="controls">
<input type="button" onclick="enterTour()" value="Travel To Black Rock Desert and Launch X20 Rocket"/>
<input type="button" onclick="resetTour()" value="Reset"/>
</div>
</body>
</html>'''
#HEADER DEFINITION
header = '''<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2"
xmlns:gx="http://www.google.com/kml/ext/2.2"
xmlns:kml="http://www.opengis.net/kml/2.2"
xmlns:atom="http://www.w3.org/2005/Atom">
<Document>
<!-- HUNDRED K LINE! -->
<Style id="hundredk">
<LineStyle>
<color>7700ff00</color>
<Width>100</Width>
</LineStyle>
</Style>
<Placemark>
<styleUrl>#hundredk</styleUrl>
<LineString id="hundredk">
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>absolute</altitudeMode>
<coordinates>
-119.112413,40.853570,31669
-119.515,40.903570,31669
</coordinates>
</LineString>
</Placemark>
<Placemark id="hundredkballoon">
<name>100K feet above launch altitude</name>
<Style>
<IconStyle>
<Icon>
</Icon>
</IconStyle>
</Style>
<Point>
<gx:altitudeMode>absolute</gx:altitudeMode>
<coordinates>-119.3,40.87,31669</coordinates>
</Point>
</Placemark>
<Placemark id="achievementballoon">
<Style>
<IconStyle>
<Icon>
</Icon>
</IconStyle>
<BalloonStyle>
<bgColor>ff444444</bgColor>
<text><![CDATA[
<font face="sans-serif" color="white" size="+3"><b>Achievement Unlocked:</b> Reached Carmack Micro Prize altitude!</font>
]]></text>
</BalloonStyle>
</Style>
<description>
Achievement Unlocked: Reached Carmack Micro Prize altitude!
</description>
<Point>
<gx:altitudeMode>absolute</gx:altitudeMode>
<coordinates>-119.3,40.87,31669</coordinates>
</Point>
</Placemark>
<!-- WHOLEROCKET TRAIL -->
<Style id="wholeplume">
<LineStyle>
<color>ff0077ff</color>
<Width>10</Width>
</LineStyle>
</Style>
<Placemark>
<styleUrl>#wholeplume</styleUrl>
<LineString id="wholeTrack">
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>absolute</altitudeMode>
<coordinates>
%s
</coordinates>
</LineString>
</Placemark>
<!-- WHOLE MODEL -->
<Placemark>
<Model>
<altitudeMode>absolute</altitudeMode>
<Location id="wholeLocation">
<longitude>%f</longitude>
<latitude>%f</latitude>
<altitude>%f</altitude>
</Location>
<Orientation id="wholeOrientation">>
<heading>0</heading>
<tilt>0</tilt>
<roll>0</roll>
</Orientation>
<Scale id="wholeScale">
<x>10</x>
<y>10</y>
<z>10</z>
</Scale>
<Link>
<href>%s/whole.dae</href>
</Link>
</Model>
</Placemark>
<!-- SUSTAINERROCKET TRAIL -->
<Style id="sustainerplume">
<LineStyle>
<color>ff0077ff</color>
<Width>10</Width>
</LineStyle>
</Style>
<Placemark>
<styleUrl>#sustainerplume</styleUrl>
<LineString id="sustainerTrack">
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>absolute</altitudeMode>
<coordinates>
%s
</coordinates>
</LineString>
</Placemark>
<!-- SUSTAINER MODEL -->
<Placemark>
<Model>
<altitudeMode>absolute</altitudeMode>
<Location id="sustainerLocation">
<longitude>%f</longitude>
<latitude>%f</latitude>
<altitude>%f</altitude>
</Location>
<Orientation id="sustainerOrientation">>
<heading>0</heading>
<tilt>0</tilt>
<roll>0</roll>
</Orientation>
<Scale id="sustainerScale">
<x>0</x>
<y>0</y>
<z>0</z>
</Scale>
<Link>
<href>%s/sustainer.dae</href>
</Link>
</Model>
</Placemark>
<!-- BOOSTERROCKET TRAIL -->
<Style id="boosterplume">
<LineStyle>
<color>ff0077ff</color>
<Width>10</Width>
</LineStyle>
</Style>
<Placemark>
<styleUrl>#boosterplume</styleUrl>
<LineString id="boosterTrack">
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>absolute</altitudeMode>
<coordinates>
%s
</coordinates>
</LineString>
</Placemark>
<!-- BOOSTER MODEL -->
<Placemark>
<Model>
<altitudeMode>absolute</altitudeMode>
<Location id="boosterLocation">
<longitude>%f</longitude>
<latitude>%f</latitude>
<altitude>%f</altitude>
</Location>
<Orientation id="boosterOrientation">>
<heading>0</heading>
<tilt>0</tilt>
<roll>0</roll>
</Orientation>
<Scale id="boosterScale">
<x>0</x>
<y>0</y>
<z>0</z>
</Scale>
<Link>
<href>%s/booster.dae</href>
</Link>
</Model>
</Placemark>
<gx:Tour>
<name>X20 Rocket Launch</name>
<gx:Playlist>
<!-- Fly to our start location -->
<gx:FlyTo>
<gx:duration>%d</gx:duration>
<Camera>
<longitude>%f</longitude>
<latitude>%f</latitude>
<altitude>%f</altitude>
<heading>0</heading>
<tilt>90</tilt>
<roll>0</roll>
<altitudeMode>absolute</altitudeMode>
</Camera>
</gx:FlyTo>'''
#### END OF HEADER
def vehicle_kml(name, duration, long, lat, alt, heading, tilt, roll, track_coord, balloon_actions, scale):
vehicle_template = '''
<!-- Rocket -->
<gx:AnimatedUpdate>
<gx:duration>%d</gx:duration>
<Update>
<targetHref></targetHref>
<Change>
<Location targetId="%sLocation">
<longitude>%f</longitude>
<latitude>%f</latitude>
<altitude>%f</altitude>
</Location>
</Change>
<Change>
<Orientation targetId="%sOrientation">
<heading>%f</heading>
<tilt>%f</tilt>
<roll>%f</roll>
</Orientation>
</Change>
<Change>
<LineString targetId="%sTrack">
<coordinates>
%s
</coordinates>
</LineString>
</Change>
%s
<Change>
<Scale targetId="%sScale">
<x>%d</x>
<y>%d</y>
<z>%d</z>
</Scale>
</Change>
</Update>
</gx:AnimatedUpdate>
'''
return vehicle_template % (duration, name,long, lat, alt, name, heading, tilt, roll, name, track_coord, balloon_actions, name, scale, scale, scale)
def camera_kml(duration, long, lat, alt, heading, tilt, roll):
camera_template = '''
<!-- Camera -->
<gx:FlyTo>
<gx:duration>%d</gx:duration>
<gx:flyToMode>smooth</gx:flyToMode>
<Camera>
<longitude>%f</longitude>
<latitude>%f</latitude>
<altitude>%f</altitude>
<heading>%f</heading>
<tilt>%f</tilt>
<roll>%f</roll>
<altitudeMode>absolute</altitudeMode>
</Camera>
</gx:FlyTo>
'''
return camera_template % (duration, long, lat, alt, heading, tilt, roll)
tail = '''
<!-- Final Camera Zoom-->
<gx:FlyTo>
<gx:duration>2</gx:duration>
<gx:flyToMode>smooth</gx:flyToMode>
<Camera>
<longitude>-119.32</longitude>
<latitude>39.75</latitude>
<altitude>10000</altitude>
<heading>0</heading>
<tilt>90</tilt>
<roll>0</roll>
<altitudeMode>absolute</altitudeMode>
</Camera>
</gx:FlyTo>
</gx:Playlist>
</gx:Tour>
</Document>
</kml>
'''
import math
import sys
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
if sys.argv.__len__() == 1:
print "must specify a serverroot - eg http://www.corp.google.com/~wdavies"
sys.exit(1)
findex = open('index.html', 'w')
findex.write(index_page % sys.argv[1])
findex.close()
curr_time = 0
total_time = 0
log_time = 10
prev_long = 0
prev_lat = 0
prev_alt = 0
prev_degree = 0
fkml = open('x20.kml', 'w')
f = open('./input.kml', 'r')
coords = ""
balloon = ""
seperation = 0
sep_long = 0
sep_lat = 0
sep_alt = 0
for line in f:
long, lat, alt, time = line.strip().split("\t")
long = float(long)
lat = float(lat)
alt = float(alt)
hr, mi, sec = map(int, time.split(":"))
if curr_time == 0:
coords = "%f,%f,%f\n" % (long, lat, alt)
fkml.write(header % (coords, long, lat, alt, sys.argv[1],
coords, long, lat, alt, sys.argv[1],
coords, long, lat, alt, sys.argv[1],
5, (long - 0.0), (lat - 0.05), alt ))
start_time = hr * 3600 + mi * 60 + sec
prev_time = hr * 3600 + mi * 60 + sec
prev_long = long
prev_lat = lat
prev_alt = alt
curr_time = hr * 3600 + mi * 60 + sec
time_diff = curr_time - prev_time
total_time = curr_time - start_time
if alt == 33089:
balloon = '''
<Change>
<Placemark targetId="achievementballoon">
<gx:balloonVisibility>1</gx:balloonVisibility>
</Placemark>
</Change>'''
else:
balloon = '''<Change>
<Placemark targetId="achievementballoon">
<gx:balloonVisibility>0</gx:balloonVisibility>
</Placemark>
</Change>'''
sys.stderr.write("time: %d, (%d), alt: %d\n" % (curr_time - start_time, time_diff, alt))
if time_diff >= 10 or alt == 33089 or alt == 7533:
if alt == 7533.0 :
sys.stderr.write("SEPERATION ADJUSTMENT!\n")
long = -119.159620
lat = 40.859520
alt = 8279
sep_long = long
sep_lat = lat
sep_alt = alt
seperation = 1
sys.stderr.write("DISPLAY: time: %d, (%d), alt: %d\n" % (curr_time - start_time, time_diff, alt))
horiz = distance([prev_lat, prev_long], [lat, long])
vert = (alt - prev_alt)/1000
degree = math.degrees(math.atan(vert/horiz))
coords = coords + "%f,%f,%f\n" % (long, lat, alt)
if seperation == 0:
fkml.write(vehicle_kml("sustainer", log_time, long, lat, alt, 0, 0, 90-degree, coords, balloon, 0))
fkml.write(vehicle_kml("booster", log_time, long, lat, alt, 0, 0, 90-degree, coords, balloon, 10))
fkml.write(vehicle_kml("whole", log_time, long, lat, alt, 0, 0, 90-degree, coords, balloon, 10))
fkml.write(camera_kml(log_time, (long - 0.0), (lat - 0.1), alt*0.8, 0, 90, 0))
if seperation == 1:
fkml.write(vehicle_kml("sustainer", 1, long, lat, alt, 0, 0, 90-degree, coords, balloon, 10))
fkml.write(vehicle_kml("booster", 1, sep_long, sep_lat, sep_alt, 0, 0, 90-degree, "", balloon, 10))
fkml.write(vehicle_kml("whole", 1, long, lat, alt, 0, 0, 90-degree, "", balloon, 0))
fkml.write(camera_kml(1, (long - 0.0), (lat - 0.1), alt*0.8, 0, 90, 0))
if seperation > 1:
sep_alt = sep_alt * 0.35
fkml.write(vehicle_kml("sustainer", log_time, long, lat, alt, 0, 0, 90-degree, coords, balloon, 10))
fkml.write(vehicle_kml("booster", log_time, sep_long, sep_lat, sep_alt, 0, 0, 90-degree, "", balloon, 10))
fkml.write(vehicle_kml("whole", log_time, long, lat, alt, 0, 0, 90-degree, "", balloon, 0))
fkml.write(camera_kml(log_time, (long - 0.0), (lat - 0.3), alt*0.8, 0, 90, 0))
if seperation == 1:
seperation = 2
prev_time = curr_time
prev_long = long
prev_lat = lat
prev_alt = alt
prev_degree = degree
if log_time > 1:
log_time -= 1
fkml.write(tail)
fkml.close()
f.close()
``` |
{
"source": "jlixfeld/peering-manager",
"score": 2
} |
#### File: management/commands/update_peering_session_states.py
```python
import logging
from django.core.management.base import BaseCommand
from peering.models import InternetExchange
class Command(BaseCommand):
help = "Update peering session states for Internet Exchanges."
logger = logging.getLogger("peering.manager.peering")
def handle(self, *args, **options):
self.logger.info("Updating peering session states...")
internet_exchanges = InternetExchange.objects.all()
for internet_exchange in internet_exchanges:
internet_exchange.update_peering_session_states()
```
#### File: peering-manager/peering_manager/forms.py
```python
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from utils.forms import BootstrapMixin
class LoginForm(BootstrapMixin, AuthenticationForm):
"""
Bootstraped login form.
"""
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.fields["username"].widget.attrs["placeholder"] = ""
self.fields["password"].widget.attrs["placeholder"] = ""
class UserPasswordChangeForm(BootstrapMixin, PasswordChangeForm):
pass
```
#### File: peering-manager/peering_manager/tests.py
```python
from django.urls import reverse
from utils.tests import ViewTestCase
class PeeringManagerViewsTestCase(ViewTestCase):
def test_homepage_view(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
def test_login_view(self):
response = self.client.get(reverse("login"))
self.assertEqual(response.status_code, 200)
# Login
response = self.client.post(reverse("login"), self.credentials, follow=True)
# Should be logged in
self.assertTrue(response.context["user"].is_active)
self.assertEqual(response.status_code, 200)
def test_logout_view(self):
response = self.client.get(reverse("logout"))
# Without been logged -> redirection
self.assertEqual(response.status_code, 302)
# Login
response = self.client.post(reverse("login"), self.credentials, follow=True)
# Should be logged in, so logout should work too
self.assertTrue(response.context["user"].is_active)
response = self.client.get(reverse("logout"))
self.assertEqual(response.status_code, 302)
def test_user_profile_view(self):
response = self.client.get(reverse("user_profile"))
# Without been logged -> redirection
self.assertEqual(response.status_code, 302)
# Login
response = self.client.post(reverse("login"), self.credentials, follow=True)
# Should be logged in, so page should work
self.assertTrue(response.context["user"].is_active)
response = self.client.get(reverse("user_profile"))
self.assertEqual(response.status_code, 200)
def test_user_change_password_view(self):
response = self.client.get(reverse("user_change_password"))
# Without been logged -> redirection
self.assertEqual(response.status_code, 302)
# Login
response = self.client.post(reverse("login"), self.credentials, follow=True)
# Should be logged in, so page should work
self.assertTrue(response.context["user"].is_active)
response = self.client.get(reverse("user_change_password"))
self.assertEqual(response.status_code, 200)
def test_error500_view(self):
with self.assertRaises(Exception):
self.client.get("/error500/")
``` |
{
"source": "jljacoblo/MindMapGenerator",
"score": 3
} |
#### File: jljacoblo/MindMapGenerator/docx2tree.py
```python
from __future__ import annotations
import os
import io
import shutil
import re
import warnings
from typing import List, Set
from PIL import Image
from docx.text.paragraph import Paragraph
from docx.package import OpcPackage
class Node:
def __init__(self, level: int, context: List[Paragraph], parent: Node):
self.level = level
self.context = context
self.parent = parent
self.children = []
def add(self, node: Node):
self.children.append(node)
def is_normal(self):
return self.context is not None and len(self.context) > 0 and self.context[0].style.name.split()[0].lower() == 'normal'
def __repr__(self):
results = '- ' * self.level
if self.context and isinstance(self.context, List):
results += self.context[0].text
for i in range(1,len(self.context)):
results += os.linesep + '- ' * self.level + '\t\t' + self.context[i].text
results += os.linesep
for c in self.children:
results += repr(c)
return results
def get_tags(self) -> List[str]:
if not self.parent or len(self.context) <= 0: return Set()
node = self.parent
results = []
while node:
if (len(node.context) > 0):
tag = re.sub('[^A-Z]+', '_', node.context[0].text, 0, re.I).lower()
results += [tag]
node = node.parent
return results
def get_branch_str(self) -> str:
node = self.parent
result = '- ' * node.level + node.context[0].text
while node.parent:
node = node.parent
if len(node.context) > 0:
result = '- ' * node.level + node.context[0].text + os.linesep + result
else:
result = 'root ' + os.linesep + result
return result.replace(os.linesep, '<br>').replace('\t', ' ')
def convert_paragraph_to_html(self, paragraph: Paragraph, hide_bold: bool) -> str:
result = ''
if not self.is_normal() or not paragraph.text.replace(' ', ''):
return ''
for r in paragraph.runs:
if r.bold:
result += '<b>' + ('_' * len(r.text) if hide_bold else r.text) + '</b>'
elif r.italic:
result += '<i>' + ('_' * len(r.text) if hide_bold else r.text) + '</i>'
else:
result += r.text
return result.replace(os.linesep, '<br>').replace('\t', ' ')
class PhotoNode(Node):
def __init__(self, level: int, image_name: str, image_index: int, show_on_children_level: int, context: List[Paragraph], parent: Node):
super(PhotoNode, self).__init__(level, context, parent)
self.imageName = image_name
self.imageIndex = image_index
self.showOnChildrenLevel = show_on_children_level
def __repr__(self):
results = '- ' * self.level
if self.imageName:
results += self.imageName + " ShowOnChildren : " + str(self.showOnChildrenLevel) + os.linesep
for c in self.children:
results += repr(c)
return results
def get_image_index(package: OpcPackage, imageName: str) -> int:
document = package.main_document_part.document
for i in range(len(document.paragraphs)):
if i >= len(package.image_parts._image_parts):
raise Exception('The Save function from Microsoft Word create a different image name for each image, please use Google Docs and export as .docx file only')
if imageName in package.image_parts._image_parts[i].partname:
return i
return -1
def get_image_name(paragraph: Paragraph ):
if not paragraph.runs:
return ""
cur_xml = paragraph.runs[0].element.xml
regex_match = re.search("image[0-9]*.[a-zA-Z]+", cur_xml)
if regex_match:
return regex_match.group(0)
return ""
def convert_paragraphs_to_tree(package: OpcPackage) -> Node:
"""
Convert a docx file package into internal Node tree structure
Support features:
1) Each line in Document is converted into a Node
2) Combine multiple lines into 1 Node with "©©"
3) Identify photos with "®®"
Input: OpcPackage, which is the file structure of a docx file
You can get a OpcPackage by following:
f = open('Document.docx', 'rb')
package = Package.open(f)
f.close()
Node parent/children structure is based on Headings in Document
Example.docx contains:
Heading1
- word1
- Heading2
-- word2
Node root => Heading1
root.children[1] => word1
root.children[2] => Heading2
root.children[2].children[1] => word2
"""
# reset image directory first
shutil.rmtree('image', ignore_errors=True)
os.mkdir('image')
paragraphs = package.main_document_part.document.paragraphs
root = Node(0, [], None)
cur_parent = root
cur_heading_level = 0
# for loop does not work, https://stackoverflow.com/a/47532461
i = 0
while(i < len(paragraphs)):
p_style = paragraphs[i].style.name.split()
# ©©8 means combine the next 8 lines inside Document into only 1 text node, so only 1 Anki Note is created
if paragraphs[i].text[0:2] == '©©' and paragraphs[i].text[2].isnumeric():
howManyLinesToSkip = int(paragraphs[i].text.split()[0].replace('©©', ''))
group_paragraphs = paragraphs[i:i+howManyLinesToSkip+1]
new_node = Node(cur_parent.level+1, group_paragraphs, cur_parent)
cur_parent.add(new_node)
i += howManyLinesToSkip + 1
continue
# ®®1 means the very next line is a pics, and this pics is going to show on every notes created from each line of this heading level in this document
# For example:
# Heading 1
# texttext1
# ®®2
# image1.png
# - Heading 2
# - texttext2
# In this example, 2 Anki Notes is created, texttext1 and texttext2. Each note has image1.png in it.
if paragraphs[i].text[0:2] == '®®' and paragraphs[i].text[2].isnumeric():
imageInfo = [paragraphs[i]]
show_on_children_level = int(paragraphs[i].text[2])
i += 1
image_name = get_image_name(paragraphs[i])
if not image_name:
warnings.warn("Cannot process image : " + imageInfo[0].text)
continue
image_index = get_image_index(package, image_name)
new_node = PhotoNode(cur_parent.level+1, image_name, image_index, show_on_children_level, imageInfo, cur_parent)
cur_parent.add(new_node)
img_binary = package.image_parts._image_parts[image_index].blob
image = Image.open(io.BytesIO(img_binary))
image.save('image/'+image_name)
# normal paragraph, treat as same level as current level, check if this line is not empty
if p_style[0].lower() == 'normal' and paragraphs[i].text.replace(' ','').replace('\n',''):
new_node = Node(cur_parent.level+1, [paragraphs[i]], cur_parent)
cur_parent.add(new_node)
# If the heading line is actually empty, then skip to next one
if not paragraphs[i].text.replace(' ', '').replace('\t', '').replace('\n', ''):
i += 1
continue
# new paragraph has lower(bigger) heading, so move parent node must be higher up, closer to root
if p_style[0].lower() == 'heading' and int(p_style[1]) <= cur_heading_level:
for _ in range(int(p_style[1]), cur_heading_level + 1):
cur_parent = cur_parent.parent
# This should go in either bigger heading, or smaller heading ( child node ). New node is created under current parent
if p_style[0].lower() == 'heading':
new_node = Node(cur_parent.level+1, [paragraphs[i]], cur_parent)
cur_parent.add(new_node)
cur_parent = new_node
cur_heading_level = int(p_style[1])
i += 1
return root
``` |
{
"source": "JLJTECH/TutorialTesting",
"score": 4
} |
#### File: TutorialTesting/CodeFights/Candies.py
```python
def candies(n, m):
if m % n == 0:
return int((m / n) * n)
elif n == 1:
return m
else:
return int(((m - 1) / n) * n)
```
#### File: TutorialTesting/CodeFights/SeatsInTheater.py
```python
def seatsInTheater(nCols, nRows, col, row):
return (nCols - (col - 1)) * (nRows - row)
```
#### File: CodeWars/2016/DrinkAbout-8k.py
```python
def people_with_age_drink(age):
if age <= 13:
return "drink toddy"
elif age <= 17:
return "drink coke"
elif age <= 18:
return "drink beer"
elif age <= 20:
return "drink beer"
elif age <= 30:
return "drink whisky"
else:
return "drink toddy"
#Alternate Solution
def people_with_age_drink(age):
if age > 20: return 'drink whisky'
if age > 17: return 'drink beer'
if age > 13: return 'drink coke'
return 'drink toddy'
```
#### File: CodeWars/2016/ListFiltering-7k.py
```python
def filter_list(l):
return [x for x in l if type(x) is not str]
```
#### File: CodeWars/2016/MaxMinValues-8k.py
```python
def min(arr):
return sorted(arr)[0]
def max(arr):
return sorted(arr)[-1]
#Alternate Solution
def min(arr):
low = arr[0]
for i in arr[1:]:
if i < low:
low = i
return low
def max(arr):
high = arr[0]
for i in arr[1:]:
if i > high:
high = i
return high
```
#### File: CodeWars/2016/NumberOfOccurrences-7k.py
```python
def number_of_occurrences(s, xs):
return xs.count(s)
```
#### File: CodeWars/2016/OppositeNumber-8k.py
```python
def opposite(number):
if number > 0:
return -number
else:
return abs(number)
#Alternate Solution
def opposite(number):
return -number
```
#### File: CodeWars/2016/PeopleInBus-8k.py
```python
def number(bus_stops):
a = sum(i[0] for i in bus_stops)
b = sum(i[1] for i in bus_stops)
return a - b
#Code Variation
def number(bus_stops):
return sum([stop[0] - stop[1] for stop in bus_stops])
```
#### File: CodeWars/2016/PlayBanjo-8k.py
```python
def areYouPlayingBanjo(name):
if name[0] == "r" or name[0] == "R":
return name + " plays banjo"
else:
return name + " does not play banjo"
#Alternate solution
def areYouPlayingBanjo(name):
return name + (' plays' if name[0].lower() == 'r' else ' does not play') + " banjo";
```
#### File: CodeWars/2016/RemoveMin-7k.py
```python
def remove_smallest(numbers):
if numbers:
numbers.remove(min(numbers))
return numbers
```
#### File: CodeWars/2016/ReversedArray-8k.py
```python
def digitize(n):
return [int(d) for d in reversed(str(n))]
#Alternate Solution
def digitize(n):
return map(int, str(n)[::-1])
```
#### File: CodeWars/2016/SquareEveryDigit-6k.py
```python
def square_digits(num):
squares = []
for i in str(num):
squares.append(int(i)**2)
x = ''.join(map(str, squares))
print(x.replace("'", ""))
#Alternate solution
def square_digits(num):
ret = ""
for x in str(num):
ret += str(int(x)**2)
return int(ret)
```
#### File: CodeWars/2016/SumTwoLowest-7k.py
```python
def sum_two_smallest_numbers(numbers):
st = sorted(numbers)
return st[0] + st[1]
#Alternate Solution
def sum_two_smallest_numbers(numbers):
return sum(sorted(numbers)[:2])
```
#### File: CodeWars/2016/UnderPressure-8k.py
```python
def doubleInteger(i):
return i * 2
```
#### File: CodeWars/2016/VowelRemover-8k.py
```python
def shortcut( s ):
return ''.join(char for char in s if char not in set('aeiou'))
#Alternate Solution
def shortcut(s):
return s.translate(None, 'aeiou')
```
#### File: CodeWars/2019/AnagramFinder-5k.py
```python
def anagrams(word, words):
analis = []
for item in words:
if (sorted(word) == sorted(item)):
analis.append(item)
return analis
#Alternative implementations
def anagrams(word, words):
return [item for item in words if sorted(item)==sorted(word)]
def anagrams(word, words):
return filter(lambda x: sorted(word) == sorted(x), words)
```
#### File: CodeWars/2019/FindTheOdd-6k.py
```python
def find_it(seq):
cnt = 0
for i in seq:
cnt = cnt ^ i
return cnt
#Alternative solutions
def find_it(seq):
for i in seq:
if seq.count(i)%2!=0:
return i
```
#### File: CodeWars/2019/lostMap-8k.py
```python
def maps(a):
pass
result = map(lambda x: x + x, a)
return list(result)
#Additional implementations:
def maps(a):
return [2 * x for x in a]
def maps(a):
return map(lambda x:2*x, a)
```
#### File: CodingBat/Python/List-1 > has23.py
```python
def has23(nums):
return 2 in nums or 3 in nums
```
#### File: CodingBat/Python/String-1 > extra_end.py
```python
def extra_end(str):
return str[-2:] * 3
```
#### File: CodingBat/Python/String-1 > make_abba.py
```python
def make_abba(a, b):
return a + (b * 2) + a
```
#### File: CodingBat/Python/Warmup-1 > not_string.py
```python
def not_string(str):
if str.startswith('not'):
return str
else:
return "not " + str
```
#### File: CodingBat/Python/Warmup-1 > parrot_trouble.py
```python
def parrot_trouble(talking, hour):
return (talking and (hour < 7 or hour > 20))
```
#### File: CodingBat/Python/Warmup-1 > sum_double.py
```python
def sum_double(a, b):
sum = a + b
if a == b:
sum = sum * 2
return sum
```
#### File: TutorialTesting/Edabit/FirstLastIndex.py
```python
def char_index(word, char):
return None if char not in word else [word.index(char), word.rindex(char)]
```
#### File: TutorialTesting/Edabit/LastDigit-Medium.py
```python
def last_dig(a, b, c):
a = list(str(a))
b = list(str(b))
c = list(str(c))
val = list(str(int(a[-1]) * int(b[-1])))
return int(val[-1]) == int(c[-1])
#Alternative Solutions
def last_dig(a, b, c):
return str(a*b)[-1] == str(c)[-1]
def last_dig(a, b, c):
return ((a % 10) * (b % 10) % 10) == (c % 10)
```
#### File: TutorialTesting/Edabit/ListAdjacent.py
```python
def middle_earth(lst):
s = lst.index('Sam')
f = lst.index('Frodo')
if s + 1 == f or s - 1 == f:
return True
else:
return False
#Alternative Solutions
def middle_earth(lst):
return abs(lst.index('Sam') - lst.index('Frodo')) == 1
```
#### File: TutorialTesting/Edabit/ListOfMultiples.py
```python
def list_of_multiples (num, length):
coll = []
while length > 0:
coll.append(num * length)
length -= 1
pass
return coll[::-1]
#Alternative solutions
def list_of_multiples (num, length):
return [i*num for i in range(1,length+1)]
```
#### File: TutorialTesting/Edabit/MiddleCharacter.py
```python
def get_middle(word):
if len(word) <= 2:
return word
elif len(word) % 2 == 0:
return word[(len(word) // 2) - 1] + word[(len(word) // 2)]
else:
return word[(len(word) // 2)]
#Alternative Solutions
def get_middle(word):
return word[(len(word)-1)//2:(len(word)+2)//2]
def get_middle(word):
while len(word) > 2:
word = word[1:-1]
return word
```
#### File: TutorialTesting/Edabit/Mi-Km.py
```python
def km_to_miles(kilometers):
return round(kilometers * 0.621371, 5)
```
#### File: TutorialTesting/Edabit/NoDuplicateLetters-Hard.py
```python
def no_duplicate_letters(phrase):
val = [phrase]
nlst = ' '.join(val).split()
st = [len(i) for i in nlst]
ev = [len(set(i)) for i in nlst]
return st == ev
#Alternative solutions
def no_duplicate_letters(phrase):
return all(i.count(j)==1 for i in phrase.split() for j in i)
def no_duplicate_letters(phrase):
return all([len(set(i))==len(i) for i in phrase.split(' ')])
```
#### File: TutorialTesting/Edabit/SmoothSentence.py
```python
def is_smooth(sentence):
sl = sentence.split()
first = [i[0] for i in sl]
last = [i[-1] for i in sl]
return first[1:] == last[:-1]
```
#### File: TutorialTesting/Edabit/SortedSum.py
```python
def sum_two_smallest_nums(lst):
nlst = [i for i in lst if i > 0]
return sorted(nlst)[0] + sorted(nlst)[1]
#Alternative solutions
def sum_two_smallest_nums(lst):
return sum(sorted([x for x in lst if x > 0])[:2])
```
#### File: 30 Days of Code/Day 9/recursion.py
```python
Days of Code/Day 9/recursion.py<gh_stars>0
#Recursion problem - Task: Write a factorial function that takes a positive integer, as a parameter and prints the result of ( factorial).
def factorial(n):
if n <= 1:
return 1
else:
result = n * factorial(n - 1)
return result
print(factorial(int(input())))
```
#### File: hackerrank/Cracking The Coding Interview/CTCI-ArraysLeftRotation.py
```python
def array_left_rotation(a, n, k):
AL = list(a)
V = AL[k:]+AL[:k]
return V
```
#### File: TutorialTesting/Misc/Fibonacci.py
```python
def fib(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
result = fib(num - 1) + fib(num - 2)
return result
numFibValues = int(input("How many Fibonacci values should be found : "))
i = 1
# While i is less then the number of values requested
# continue to find more
while i < numFibValues:
# Call the fib()
fibValue = fib(i)
print(fibValue)
i += 1
print("All Done")
```
#### File: TutorialTesting/Misc/MultiLetterSwitch.py
```python
from string import maketrans
def tran(dna):
inv = "ATGC"
outv = "TACG"
trantab = maketrans(inv, outv)
print(dna.translate(trantab))
tran("GTAT")
```
#### File: TutorialTesting/Misc/splat.py
```python
def sumAll(*args):
sum = 0
for i in args:
sum += i
return sum
print(sumAll(2,3,4,5))
``` |
{
"source": "jlk9/wavelet_xcorr",
"score": 3
} |
#### File: wavelet_xcorr/dense_code/compute_diagonal_functions.py
```python
import numpy as np
from scipy.signal import correlate
""" Given two signals of the same level from coeffs1 and coeffs2, along with a set number of time lags and
interior right and left entries, this will compute all of the required diagonals for our basic levelx -
levelx component of our xcorr.
Inputs:
coeff1 the wavelet coefficients for the first (longer) signal
coeff2 the wavelet coefficients for the second (shorter) signal, same level as coeff1
right_length the number of steps we need to compute pushing coeff2 forward, including the main diagonal,
for this weight matrix
left_length the number of steps we need to compute pushing coeff2 backward for this weight matrix
offsets the number of time lags we need for this level
NOTE: right_length and left_length are based directly on the lengths of interior_left and interior_right
in the weight matrix used for computing the xcorr between these coefficients.
Returns
left_diags the left diagonals for each timelag
right_diags the right diagonals for each timelag
NOTE: the sizes of left_diags and right_diags are determined by the number of interior entries we need
to compute at each time lag X the number of time lags.
"""
def compute_all_diags(coeff1, coeff2, left_length, right_length, offsets):
left_diags = np.zeros((left_length, offsets))
right_diags = np.zeros((right_length, offsets))
len_coeff2 = len(coeff2)
# First we'll deal with the main diagonals:
#for i in range(offsets):
# right_diags[0,i] = np.inner(coeff1[i:len_coeff2+i], coeff2)
# We'll do this using FFTs:
right_diags[0] = correlate(coeff1[:len_coeff2+offsets-1], coeff2, mode='valid', method='fft')
# Now we'll deal with the first upper diagonals, by filling in from the main diagonal.
# The first upper diagonals at offset 0 do not have a relation to any of our main diagonals, so they
# must be computed separately:
right_diags[1:,0] = np.array([np.inner(coeff1[:len_coeff2-_], coeff2[_:]) for _ in range(1, right_length)])
# We can get the rest of the upper diagonals by slightly changing our main diagonals (and previous uppers)
for i in range(1, right_length):
right_diags[i,1:] = right_diags[i-1,:offsets-1] - coeff2[i-1] * coeff1[:offsets-1]
# Now we'll deal with the lower diagonals, first the last lower diagonals at the final offset:
left_diags[:,offsets-1] = np.array([np.inner(coeff1[offsets+_:len_coeff2+offsets-1], coeff2[:-_-1])
for _ in range(left_length)])
# Here we'll establish the first lower diagonal:
left_diags[0,:-1] = right_diags[0,1:] - coeff2[-1] * coeff1[len_coeff2:len_coeff2+offsets-1]
# And here we'll establish subsequent diagonals:
for i in range(1, left_length):
left_diags[i,:-1] = left_diags[i-1,1:] - coeff2[-i-1] * coeff1[len_coeff2:len_coeff2+offsets-1]
return np.transpose(left_diags), np.transpose(right_diags)
""" Computes the diagonals for the mixed wavelet xcorr in Case 1, where the longer wavelets are coming
from the longer signal. Getting the indices of coeff1 and coeff2 right for this is very intricate -
it's likely the main source of error.
Note: this one is meant specifically for the first case, coeff1 is larger wavelets, coeff2 is smaller.
This computes all the necessary diagonals for the interior of the mixed-wavelet computation.
In this version of the function, we take advantage of the redundancy in values between diagonals at
different offsets. We still calculate the first diagonals as we did previously, but now we use the
values of the first diagonals to fill in subsequent diagonals, based on rules of sliding inner products
between vectors.
Inputs:
coeff1 array of floats, the series of longer wavelets from the longer signal
coeff2 array of floats, the series of shorter wavelets from the shorter signal
scale_diff int, the difference between the scale of coeff1 and coeff2, calculated as 2 ^ (level 1 - level 2)
endpoint_ind the endpoint coordinates for this mixed-wavelet xcorr
offsets int, the number of diagonals we need, based on the strides of these wavelets and our
number of timelags
length_diag int, the number of diagonals we need to compute, based on the number of interior entries
in the corresponding weight matrix
len_coeff1 int, the number of terms from coeff1 we use for 1 diagonal
Returns:
diags the sliding xcorrs we need for interior points, a 2D array of floats of size offsets x
length_diag
"""
def mixed_compute_all_diags(coeff1, coeff2, scale_diff, endpoint_ind, offsets, length_diag, len_coeff1):
# Here we allocate the memory:
diags = np.zeros((length_diag, offsets))
# The coeff2 endpoints are dependent on signal length, so we need to compute them here:
coeff2_ends = endpoint_ind[2,:] + scale_diff * (len_coeff1 - endpoint_ind[1,:] - endpoint_ind[0,:])
main_length = offsets // scale_diff
# We'll get the first diagonals here:
for i in range(scale_diff):
diags[0,i::scale_diff] = correlate(coeff1[endpoint_ind[0,0]:len_coeff1-endpoint_ind[1,0]+main_length-1],
coeff2[endpoint_ind[2,0]-i:coeff2_ends[0]-i:scale_diff],
mode='valid', method='fft')
# Here we'll calculate the first column, since we can't use redundancy rules for it:
diags[1:,0] = [np.inner(coeff1[endpoint_ind[0,i]:len_coeff1-endpoint_ind[1,i]],
coeff2[endpoint_ind[2,i]:coeff2_ends[i]:scale_diff])for i in range(1, length_diag)]
# Here we'll get subsequent rows based on the previous rows:
for i in range(1, length_diag):
# The basic rule is that the next diagonals is equal to the previous diagonal from the previous row:
diags[i,1:] = diags[i-1,:-1]
# TODO, for better accuracy:
# Need to ADD element to front, if endpoint indices coeff1 start went down:
# Need to REMOVE element from back, if endpoint indices coeff1 end went up:
return diags
""" In general, diagonals will go down one and to left because of how the signals slide across each other.
Let's try that, make sure the overall error isn't too extreme, and test the savings:
ASSUMPTION: the endpoint indices of our correlation matrix coeff1 will always either increment or decrement
by 1 only. This affects how we fill in entries from the previous row of diagonals.
Inputs:
coeff1 the series of longer wavelets, from the shorter signal
coeff2 the series of shorter wavelets, from the longer signal
scale_diff the difference between the scale of coeff1 and coeff2, 2 ^ (level 1 - level 2)
endpoint_ind the endpoint coordinates for this mixed-wavelet xcorr
offsets the number of diagonals we need, based on the strides of these wavelets and our
number of timelags
length_diag the number of diagonals we need to compute
len_coeff1 the number of terms from coeff1 we use for 1 diagonal
Returns:
diags the sliding xcorrs we need for interior points
"""
def mixed_compute_all_diags_case2(coeff1, coeff2, scale_diff, endpoint_ind, offsets, length_diag, len_coeff1):
# Here we allocate the memory and get the coeff2 endpoints:
diags = np.zeros((length_diag, offsets))
coeff2_ends = endpoint_ind[2,:] + scale_diff * (len_coeff1 - endpoint_ind[1,:] - endpoint_ind[0,:])
# Each row will have a cyclic pattern for its entries related to endpoint_ind, so it may be best to
# base this off of that.
# Can use the current calculate_nlevel_xcorrs_vec diag fill-in for error checking
for i in range(scale_diff):
# Fix the need for [:len(diags[i,j::scale_diff])]
diags[0,i::scale_diff] = correlate(coeff2[i+endpoint_ind[2,0]:coeff2_ends[0]+offsets-1:scale_diff],
coeff1[endpoint_ind[0,0]:len_coeff1-endpoint_ind[1,0]],
mode='valid', method='fft')[:len(diags[0,i::scale_diff])]
# Since the rightmost entries don't have a main diagonal to base off of, we'll get them here:
diags[1:,-1] = [np.inner(coeff1[endpoint_ind[0,i]:len_coeff1-endpoint_ind[1,i]],
coeff2[offsets-1+endpoint_ind[2,i]:offsets-1+coeff2_ends[i]:scale_diff])
for i in range(1, length_diag)]
# Fill in the correct entries for subsequent diagonals here:
for i in range(1, length_diag):
# This is the basic rule, we need to decide what occurs in addition to this:
diags[i,:-1] = diags[i-1,1:]
# Need to ADD element to front, if endpoint indices coeff1 start went down:
if endpoint_ind[0,i] < endpoint_ind[0,i-1]:
diags[i,:-1] += coeff1[endpoint_ind[0,i]] * coeff2[endpoint_ind[2,i]:endpoint_ind[2,i]+offsets-1]
# Need to REMOVE element from back, if endpoint indices coeff1 went up:
if endpoint_ind[1,i] > endpoint_ind[1,i-1]:
diags[i,:-1] -= coeff1[len_coeff1-endpoint_ind[1,i]] * coeff2[coeff2_ends[i]:coeff2_ends[i]+offsets-1]
return diags
```
#### File: wavelet_xcorr/dense_code/compute_vectorized_timelags.py
```python
import numpy as np
""" Here we compute the xcorr components for one levelx X levelx correlation. We need to compute
the diagonals with compute_all_diagonals first.
Inputs:
xcorr_matrices tuple of stacked weight tensors for this level
left_diags diagonals below the main from compute_all_diags
right_diags diagonals above and including the main from compute_all_diags
coeff1_begins 2D array of the beginning portions of the longer signal (coeffs1) in each sliding
window
coeff1_ends 2D array of the end portions of the longer signal (coeffs1) in each sliding
window
coeff2_begin 1D array of the first portion of the shorter coeffs2 signal. Since we're sliding it
along coeffs1, it is constant
coeff2_end 1D array of the end portion of the shorter coeffs2 signal
Returns:
The xcorr results for this level, arranged in a 1D array across timelags from 0 to lagmax.
The number of timelags we're calculating is given by the number of offsets in coeff1 begins
and ends, plus left and right diags. Thus we don't need to explicitly give this function
lagmax as a parameter.
This handles the weighted inner product operations of the starts and ends of the signals
with the beginning and end submatrices, and the interior entries of the signal with the
interior values of the correlation matrix. The sliding inner products between the signal
interiors are precomputed in advance using the compute_all_diagonals function.
The xcorr_matrices tuple used here is a stacked tensor, so it computes a range of timelags
concurrently.
NOTE: this is identical to compute_vectorized_timelags_sparse in the file of the same name
in sparse_code. Eventually that function may be implemented differently to take advantage
of sparsity, hence the separation between it and the dense one here. If you make changes to
one function, you should change both.
"""
def compute_vectorized_timelags(xcorr_matrices, left_diags, right_diags, coeff1_begins,
coeff1_ends, coeff2_begin, coeff2_end):
# Let's get our submatrices and interior:
begin, interior_left, interior_right, end = xcorr_matrices
# Here we'll add up the components of the xcorr. First the beginning submatrix:
# Then the interior, both left and right of the main diagonal:
# Then the end submatrix:
interiors = left_diags @ np.transpose(interior_left) + right_diags @ np.transpose(interior_right)
# TODO: streamline this reshaping:
begins = np.transpose(coeff1_begins @ begin @ coeff2_begin).flatten()
ends = np.transpose(coeff1_ends @ end @ coeff2_end).flatten()
return interiors.flatten() + begins + ends
""" This computes the weight matrix X the diagonals for the mixed case. Eventually we might incorporate the
endpoints here as well for modularity.
Inputs:
xcorr_matrices tuple, the mixed weight matrix for this level xcorr, in a stacked format
diags the diagonals for this mixed xcorr, from either case
Returns:
interiors the interior components of xcorrs at this level.
NOTE: this is identical to mixed_compute_vectorized_timelags_sparse in the file compute_vectorized_timelags_sparse
in sparse_code. Eventually that function may be implemented differently to take advantage
of sparsity, hence the separation between it and the dense one here. If you make changes to
one function, you should change both.
"""
def mixed_compute_vectorized_timelags(xcorr_matrices, diags):
begin, interior, end = xcorr_matrices
interiors = np.transpose(interior @ diags)
return interiors.flatten()
```
#### File: wavelet_xcorr/sparse_code/calculate_nlevel_xcorrs_sparse.py
```python
import numpy as np
from .compute_diagonal_functions_sparse import compute_all_diags_sparse, mixed_compute_all_diags_sparse, mixed_compute_all_diags_case2_sparse
from .compute_vectorized_timelags_sparse import compute_vectorized_timelags_sparse, mixed_compute_vectorized_timelags_sparse
""" Our new vectorized implementation of xcorr calculations. This approach modularizes computation of the sliding
inner products ("diagonals") needed for the interior entries of the weight matrix operations, allowing us to
use redundancy to reduce compute time. It also aims to vectorize weight matrix / coeff vector computations whenever
possible.
TODO: modularize the begin and end computations as well, maybe write extra helper functions to reduce the length of
this one.
TODO: modify code so lags automatically line up with the stride sizes (for now, specify lags to be a multiple of all
the strides in this DWT)
Inputs:
weight_matrices 1D list of weight matrices for each set of wavelets functions in this transformed
dataset, with timelags. Assumed to be in stacked format.
mixed_weight_matrices 2D list of weight matrices for each pair of wavelet functions in this transformed
dataset, with timelags. Assumed to be in stacked format.
mixed_endpoint_indices 2D list of endpoints for calculating the diagonals of mixed wavelet xcorr operations,
corresponding to the mixed_weight_matrices
sparse_coeffs1 list of wavelet coefficients for the first signal, in the same order as for the
weight matrices
sparse_coeffs2 list of wavelet coefficients for the second signal, in the same order as for the
weight matrices (length of signal should be <= coeffs1 signal length)
lags integer, the largest time lag of the cross correlation: how far ahead start of
coeffs2 is from start of coeffs1 (assumed to be positive). Computes from 0
to lags.
Returns:
xcorr the overall cross correlations between the two transformed signals, from 0 to lags
"""
def calculate_nlevel_xcorrs_sparse(weight_matrices, mixed_weight_matrices, mixed_endpoint_indices,
sparse_coeffs1, sparse_coeffs2, lags):
# First we need to get some values necessary for calculating the time lag. Strides lets us know how big one
# shift is at each level. Shifts lets us know how many steps of wavelets we need to move coeffs1 at each
# level:
levels = np.array([len(weight_matrices) - 1] + list(range(len(weight_matrices) - 1, 0, -1)))
strides = np.array([thing[0].shape[0] for thing in weight_matrices])
shifts = lags // strides
scale_diffs = 2 ** (levels[0]- levels)
steps = np.array([weight_matrices[_][0].shape[1] for _ in range(len(weight_matrices))])
len_coeffs2 = np.array([_[2] for _ in sparse_coeffs2])
# These are used for the mixed wavelet terms, where we need quasi-symmetry:
inverse_shifts = -1 * (-lags // strides)
xcorrs = np.zeros((lags))
# Here we generate the endpoint matrices we need. This assumes inverse_shift >= shifts:
coeff1_begins, coeff1_ends = form_coeff1_begin_end(sparse_coeffs1, steps, inverse_shifts + 1, len_coeffs2)
coeff2_begins, coeff2_ends = form_coeff2_begin_end(sparse_coeffs2, steps, scale_diffs, len_coeffs2)
# Here we add the basic, non-mixed terms
for i in range(len(weight_matrices)):
length_left = weight_matrices[i][1].shape[1]
length_right = weight_matrices[i][2].shape[1]
# We need to see if level i in either coeffs1 or coeffs2 is zero:
if (sparse_coeffs1[i][0].shape[0] != 0) and (sparse_coeffs2[i][0].shape[0] != 0):
left_diags, right_diags = compute_all_diags_sparse(sparse_coeffs1[i], sparse_coeffs2[i], length_left,
length_right, shifts[i])
# Here we loop over each stride of xcorrs for this level:
# NOTE: if we change the endpoints for coeffs2, this might need to change
xcorrs += compute_vectorized_timelags_sparse(weight_matrices[i], left_diags, right_diags, coeff1_begins[i][:shifts[i]],
coeff1_ends[i][:shifts[i]], coeff2_begins[i][0], coeff2_ends[i][0])
# Here we proceed to the mixed terms:
# We have to iterate through each mixed matrix, which only has matrices for smaller wavelets:
for j in range(len(mixed_weight_matrices[i])):
# The stride and shift we'll need are based on the smaller level. The other terms help us compute
# the diagonals properly:
smaller_level = i+j+1
stride = strides[smaller_level]
shift = shifts[smaller_level]
length_diag = mixed_weight_matrices[i][j][1].shape[1]
scale_diff = 2**(levels[i]-levels[smaller_level])
# CASE 1: we handle the coeffs1 term x the coeffs2 term for this level
# First, we need to see if level i in coeffs1 or smaller_level in coeffs2 is zero:
if (sparse_coeffs1[i][0].shape[0] != 0) and (sparse_coeffs2[smaller_level][0].shape[0] != 0):
# Here we deal with the interior terms:
diags = mixed_compute_all_diags_sparse(sparse_coeffs1[i], sparse_coeffs2[smaller_level], scale_diff,
mixed_endpoint_indices[i][j], shift, length_diag, len_coeffs2[i])
xcorrs += mixed_compute_vectorized_timelags_sparse(mixed_weight_matrices[i][j], diags)
# He we calculate the endpoint multiplications:
begin_end = [coeff1_begins[i][:shifts[i]] @ mixed_weight_matrices[i][j][0] @ coeff2_begins[smaller_level][_]
+ coeff1_ends[i][:shifts[i]] @ mixed_weight_matrices[i][j][2] @ coeff2_ends[smaller_level][_]
for _ in range(-scale_diff, 0)]
# We reformat the output's shape, and add it to xcorrs:
xcorrs += np.concatenate(begin_end).flatten(order='F')
# CASE 2: where we get the longer wavelet function from coeffs2 instead of coeffs1
# First, we need to see if smaller_level in coeffs1 or i in coeffs2 is zero:
if (sparse_coeffs1[smaller_level][0].shape[0] != 0) and (sparse_coeffs2[i][0].shape[0] != 0):
diags = mixed_compute_all_diags_case2_sparse(sparse_coeffs2[i], sparse_coeffs1[smaller_level], scale_diff,
mixed_endpoint_indices[i][j], inverse_shifts[smaller_level]+1,
length_diag, len_coeffs2[i])
# Here we flip our appropriate weight matrix:
flipped_matrix = (mixed_weight_matrices[i][j][0], np.zeros(mixed_weight_matrices[i][j][1].shape),
mixed_weight_matrices[i][j][2])
flipped_matrix[1][:] = np.flip(mixed_weight_matrices[i][j][1], axis=0)
# The first diagonal entry is only used for timelag 0, so we will need to truncate the front end
# of the resulting xcorrs:
xcorrs += mixed_compute_vectorized_timelags_sparse(flipped_matrix, diags)[stride-1:-1]
# Like for case 1, we add the beginning and end components to the xcorrs:
# NOTE: if we change the endpoints for coeffs2, this might need to change
begin_end = (coeff2_begins[i][0] @ np.flip(mixed_weight_matrices[i][j][0], axis=0) @ coeff1_begins[smaller_level].T
+ coeff2_ends[i][0] @ np.flip(mixed_weight_matrices[i][j][2], axis=0) @ coeff1_ends[smaller_level].T)
xcorrs += begin_end.flatten(order='F')[stride-1:-1]
return xcorrs
""" Helper, forms the beginning and end matrices for the first signal's coefficient vectors.
Since they are all calculated here, we can reuse them as needed for calculating the endpoint
components of xcorrs.
Inputs:
sparse_coeffs1
steps
shifts
len_coeffs2
Returns:
coeff1_begins
coeff1_ends
"""
def form_coeff1_begin_end(sparse_coeffs1, steps, shifts, len_coeffs2):
count = range(len(steps))
coeff1_begin_values = (shifts + steps) - 1
coeff1_end_values = len_coeffs2 - steps
coeff1_values = np.stack((coeff1_begin_values, coeff1_end_values, (coeff1_begin_values + coeff1_end_values))).T
# For the sparse implementation, we need to determine which entries of begin and end are nonzero:
coeff1_endpoints = [np.searchsorted(sparse_coeffs1[_][0], coeff1_values[_], side='left', sorter=None) for _ in count]
coeff1_begins = [np.zeros((shifts[_] + steps[_] - 1)) for _ in count]
coeff1_ends = [np.zeros((shifts[_] + steps[_] - 1)) for _ in count]
for i in count:
coeff1_begins[i][sparse_coeffs1[i][0][:coeff1_endpoints[i][0]]] = sparse_coeffs1[i][1][:coeff1_endpoints[i][0]]
coeff1_ends[i][sparse_coeffs1[i][0][coeff1_endpoints[i][1]:coeff1_endpoints[i][2]] - len_coeffs2[i] + steps[i]] = sparse_coeffs1[i][1][coeff1_endpoints[i][1]:coeff1_endpoints[i][2]]
coeff1_indexes = [np.arange(steps[_]) + np.array([np.arange(shifts[_])]).T for _ in count]
coeff1_begins = [coeff1_begins[_][coeff1_indexes[_]] for _ in count]
coeff1_ends = [coeff1_ends[_][coeff1_indexes[_]] for _ in count]
return coeff1_begins, coeff1_ends
""" Helper, forms the beginning and end matrices for the second signal's coefficient vectors.
Since they are all calculated here, we can reuse them as needed for calculating the endpoint
components of xcorrs.
Inputs:
sparse_coeffs2
steps
scale_diffs
Returns:
coeff2_begins
coeff2_ends
"""
def form_coeff2_begin_end(sparse_coeffs2, steps, scale_diffs, len_coeffs2):
count = range(len(steps))
# Here we determine what entries of the sparse coefficients we need:
coeff2_values = np.stack((steps, len_coeffs2 - scale_diffs - steps)).T
coeff2_endpoints = [np.searchsorted(sparse_coeffs2[_][0], coeff2_values[_], side='left', sorter=None) for _ in count]
coeff2_begins = [np.zeros((scale_diffs[_] + steps[_])) for _ in count]
coeff2_ends = [np.zeros((scale_diffs[_] + steps[_])) for _ in count]
for i in count:
coeff2_begins[i][scale_diffs[i] + sparse_coeffs2[i][0][:coeff2_endpoints[i][0]]] = sparse_coeffs2[i][1][:coeff2_endpoints[i][0]]
coeff2_ends[i][sparse_coeffs2[i][0][coeff2_endpoints[i][1]:] - coeff2_values[i,1]] = sparse_coeffs2[i][1][coeff2_endpoints[i][1]:]
coeff2_indexes = [np.arange(-steps[_], 0) + np.array([np.arange(0, -scale_diffs[_], -1)]).T for _ in count]
coeff2_begins = [coeff2_begins[_][coeff2_indexes[_]] for _ in count]
coeff2_ends = [coeff2_ends[_][coeff2_indexes[_]] for _ in count]
return coeff2_begins, coeff2_ends
```
#### File: wavelet_xcorr/sparse_code/compute_diagonal_functions_sparse.py
```python
import numpy as np
import math
from scipy.signal import correlate
# First, we need a few C related libraries:
from ctypes import c_void_p, c_double, c_int, cdll
from numpy.ctypeslib import ndpointer
# This loads the compiled C code and the function, getting its path relative to this module:
lib = cdll.LoadLibrary(__file__[:-62] + 'bin/diag_helper_sparse.so')
sparse_xcorr_sum_void = lib.sparse_xcorr_sum_void
# Now we need to load our function, which was already compiled with the following command:
# cc -fPIC -shared -o ../../bin/diag_helper_sparse.so ./diag_helper_sparse.c
""" Given two signals of the same level from coeffs1 and coeffs2, along with a set number of time lags and
interior right and left entries, this will compute all of the required diagonals for our basic levelx -
levelx component of our xcorr.
Inputs:
coeff1 the wavelet coefficients for the first (longer) signal
coeff2 the wavelet coefficients for the second (shorter) signal, same level as coeff1
right_length the number of lags we need to compute pushing coeff2 forward, including the main diagonal,
for this weight matrix
left_length the number of lags we need to compute pushing coeff2 backward for this weight matrix
lags the number of time lags we need
Returns
left_diags the left diagonals for each timelag
right_diags the right diagonals for each timelag
"""
def compute_all_diags_sparse(sparse_coeff1, sparse_coeff2, left_length, right_length, offsets):
left_diags = np.zeros((left_length, offsets))
right_diags = np.zeros((right_length, offsets))
len_coeff2 = sparse_coeff2[2]
# First we'll deal with the main diagonals, the first upper diagonals, and the last lower diagonals. These are the sliding
# inner products of the two sets of wavelet coefficients against each other - we use C code to speed up the required for loops:
# Note: might need to truncate the end of sparse_coeff1 here.
diags = sparse_xcorr_calc_C_void((sparse_coeff1[0] + right_length-1, sparse_coeff1[1], sparse_coeff1[2]), sparse_coeff2, offsets + left_length + right_length-1)
# The first few upper diagonals are the first sliding inner products:
right_diags[::-1,0] = diags[:right_length]
# Most of the sliding inner products make up the main diagonals:
right_diags[0] = diags[right_length-1:offsets+right_length-1]
# The last few sliding inner products are the end lower diagonals:
left_diags[:,offsets-1] = diags[-left_length:]
# We can get the rest of the upper diagonals by slightly changing our main diagonals.
# First, we need to determine which entries of the sparse vector must be added to each lower/upper diagonal.
# These entries correspond to the end of what to remove from the uppers, then the begin/end of what to
# remove from the lowers, respectively.
# This determines what entries of coeff1 are needed to modify our upper and lower (right and left) diagonals:
lower_upper_bounds = np.searchsorted(sparse_coeff1[0], [offsets-1, len_coeff2, len_coeff2+offsets-1], side='left', sorter=None)
# This determines what entries of coeff2 modify our right diagonals:
upper_begins = np.searchsorted(sparse_coeff2[0], list(range(right_length-1)), side='left', sorter=None)
# This is the term from coeff1 we subtract from our upper diagonals:
modify_diag = sparse_coeff1[1][:lower_upper_bounds[0]]
# This gives us the indexing of the entries which are decremented by modify_diag:
indexing = sparse_coeff1[0][:lower_upper_bounds[0]] + 1
for i in range(1, right_length):
# First, we know this upper diagonal almost equals the previous offset's previous upper diagonal:
right_diags[i,1:] = right_diags[i-1,:offsets-1]
# If our sparse vector contains the value to be removed, then we need to remove it to get the exact upper diagonal:
if sparse_coeff2[0][upper_begins[i-1]] == i-1:
right_diags[i, indexing] -= sparse_coeff2[1][upper_begins[i-1]] * modify_diag
# Now we'll deal with the lower diagonals, first determining what part of coeff2 to remove:
lower_ends = np.searchsorted(sparse_coeff2[0], list(range(len_coeff2-1, len_coeff2 - left_length-1, -1)), side='left', sorter=None)
# This is the term from coeff1 we subtract from our lower diagonals:
modify_diag = sparse_coeff1[1][lower_upper_bounds[1]:lower_upper_bounds[2]]
# This gives us the indexing of the entries which are decremented by modify_diag:
indexing = sparse_coeff1[0][lower_upper_bounds[1]:lower_upper_bounds[2]] - len_coeff2
# Here we'll establish the first lower subdiagonal:
left_diags[0,:-1] = right_diags[0,1:]
if (lower_ends[0] < len(sparse_coeff2[0])) and (sparse_coeff2[0][lower_ends[0]] == len_coeff2 - 1):
left_diags[0, indexing] -= sparse_coeff2[1][lower_ends[0]] * modify_diag
# And here we'll establish subsequent diagonals:
for i in range(1, left_length):
left_diags[i,:-1] = left_diags[i-1,1:]
if (lower_ends[i] < len(sparse_coeff2[0])) and (sparse_coeff2[0][lower_ends[i]] == len_coeff2 - 1 - i):
left_diags[i, indexing] -= sparse_coeff2[1][lower_ends[i]] * modify_diag
return np.transpose(left_diags), np.transpose(right_diags)
""" Given two signals of the same level from coeffs1 and coeffs2, along with a set number of time lags and
interior right and left entries, this will compute all of the required diagonals for our mixed level1 -
level2 component of our xcorr.
Inputs:
coeff1 the wavelet coefficients for the first (longer) signal
coeff2 the wavelet coefficients for the second (shorter) signal, same level as coeff1
scale_diff
endpoint_ind
offsets
length_diag the number of lags we need to compute pushing coeff2 forward for this weight matrix
len_coeff1 the length of coeff1
Returns
diags the diagonals for each timelag
"""
def mixed_compute_all_diags_sparse(sparse_coeff1, sparse_coeff2, scale_diff, endpoint_ind, offsets, length_diag, len_coeff1):
# We'll get the first column by padding some extra 0s to our timelags and applying redundancy rules:
padding = math.ceil(length_diag / scale_diff) * scale_diff
# Here we allocate the memory:
diags = np.zeros((length_diag, offsets + padding))
# The coeff2 endpoints are dependent on signal length, so we need to compute them here:
coeff2_ends = endpoint_ind[2,:] + scale_diff * (len_coeff1 - endpoint_ind[1,:] - endpoint_ind[0,:])
main_length = (offsets + padding) // scale_diff
# We'll get the first diagonals here:
# IDEA: break sparse_coeff2 up into parts, based on which part goes into which correlate call.
# Then call sparse_xcorr_calc_C on those separate xcorr calculations
# FIRST, call searchsorted out here to get the beginning and end indices of both coeffs for each i (begin will be coeff2[endpoint_ind[2,0]-i, end will be coeff2_ends[0]-i)
coeff1_padded = sparse_coeff1[0] + (padding // scale_diff)
coeff1_endpoints = np.searchsorted(coeff1_padded, [endpoint_ind[0,0], len_coeff1-endpoint_ind[1,0]+main_length-1], side='left', sorter=None)
coeff2_endpoints = np.searchsorted(sparse_coeff2[0], [endpoint_ind[2,0]-scale_diff+1, coeff2_ends[0]], side='left', sorter=None)
# Here, we determine what portions of coeff1 and coeff2 we need to operate on:
coeff1_to_compute = (coeff1_padded[coeff1_endpoints[0]:coeff1_endpoints[1]] - endpoint_ind[0,0], sparse_coeff1[1][coeff1_endpoints[0]:coeff1_endpoints[1]], sparse_coeff1[2])
coeff2_to_compute_indices = sparse_coeff2[0][coeff2_endpoints[0]:coeff2_endpoints[1]]
coeff2_to_compute_values = sparse_coeff2[1][coeff2_endpoints[0]:coeff2_endpoints[1]]
for i in range(scale_diff):
# HERE, use np.where to find which entries of sparse_coeff2[0][begin for this i:end for this i] are divisible by scale_diff
this_scale = ((coeff2_to_compute_indices % scale_diff) == (scale_diff - 1 - i))
# LAST, call sparse_xcorr_calc_C here, with the sparse vectors filtered using work from above. The lagmax should be
# main_length
diags[0,i::scale_diff] = sparse_xcorr_calc_C_void(coeff1_to_compute, (coeff2_to_compute_indices[this_scale] // scale_diff, coeff2_to_compute_values[this_scale]), main_length)
# Here we'll get subsequent rows based on the previous rows. Since we padded the front entries, we now use the redundancy rules to generate the
# first column:
for i in range(1, length_diag):
# The basic rule is that the next diagonals is equal to the previous diagonal from the previous row:
diags[i,1:] = diags[i-1,:-1]
# TODO, for better accuracy:
# Need to ADD element to front, if endpoint indices coeff1 start went down:
# Need to REMOVE element from back, if endpoint indices coeff1 end went up:
return diags[:,padding:]
""" In general, diagonals will go down one and to left because of how the signals slide across each other.
Let's try that, make sure the overall error isn't too extreme, and test the savings:
ASSUMPTION: the endpoint indices of our correlation matrix coeff1 will always either increment or decrement
by 1 only. This affects how we fill in entries from the previous row of diagonals.
Inputs:
coeff1 the series of longer wavelets, from the shorter signal
coeff2 the series of shorter wavelets, from the longer signal
scale_diff the difference between the scale of coeff1 and coeff2, 2 ^ (level 1 - level 2)
endpoint_ind the endpoint coordinates for this mixed-wavelet xcorr
offsets the number of diagonals we need, based on the strides of these wavelets and our
number of timelags
length_diag the number of diagonals we need to compute
len_coeff1 the number of terms from coeff1 we use for 1 diagonal
Returns:
diags the sliding xcorrs we need for interior points
"""
def mixed_compute_all_diags_case2_sparse(sparse_coeff1, sparse_coeff2, scale_diff, endpoint_ind, offsets, length_diag, len_coeff1):
# We'll get the last column by padding some extra 0s to our timelags and applying redundancy rules:
padding = math.ceil(length_diag / scale_diff) * scale_diff
# Here we allocate the memory and get the coeff2 endpoints:
diags = np.zeros((length_diag, offsets + padding))
coeff2_ends = endpoint_ind[2,:] + scale_diff * (len_coeff1 - endpoint_ind[1,:] - endpoint_ind[0,:])
# FIRST, call searchsorted out here to get the beginning and end indices of both coeffs for each i (begin will be coeff2[endpoint_ind[2,0]-i, end will be coeff2_ends[0]-i)
coeff1_endpoints = np.searchsorted(sparse_coeff1[0], [endpoint_ind[0,0], len_coeff1-endpoint_ind[1,0]], side='left', sorter=None)
coeff2_endpoints = np.searchsorted(sparse_coeff2[0], [endpoint_ind[2,0], coeff2_ends[0]+offsets+padding-1], side='left', sorter=None)
# Here, we determine what portions of coeff1 and coeff2 we need to operate on:
coeff1_to_compute = (sparse_coeff1[0][coeff1_endpoints[0]:coeff1_endpoints[1]] - endpoint_ind[0,0], sparse_coeff1[1][coeff1_endpoints[0]:coeff1_endpoints[1]], sparse_coeff1[2])
coeff2_to_compute_indices = sparse_coeff2[0][coeff2_endpoints[0]:coeff2_endpoints[1]] - endpoint_ind[2,0]
coeff2_to_compute_values = sparse_coeff2[1][coeff2_endpoints[0]:coeff2_endpoints[1]]
for i in range(scale_diff):
# HERE, use np.where to find which entries of sparse_coeff2[0][begin for this i:end for this i] are divisible by scale_diff
this_scale = ((coeff2_to_compute_indices % scale_diff) == i)
diags[0,i::scale_diff] = sparse_xcorr_calc_C_void((coeff2_to_compute_indices[this_scale] // scale_diff, coeff2_to_compute_values[this_scale]), coeff1_to_compute,
len(diags[0,i::scale_diff]))
# Fill in the correct entries for subsequent diagonals here. First, we need to determine what coeff 1 entries we need
# to add and remove:
coeff1_add_indices = np.searchsorted(sparse_coeff1[0], endpoint_ind[0,1:], side='left', sorter=None)
coeff1_sub_indices = np.minimum(np.searchsorted(sparse_coeff1[0], len_coeff1 - endpoint_ind[1,1:], side='left', sorter=None), len(sparse_coeff1[0])-1)
# We need to zero out the terms that are not the correct indices, or that repeat:
coeff1_adds = sparse_coeff1[1][coeff1_add_indices] * (sparse_coeff1[0][coeff1_add_indices] == endpoint_ind[0,1:]) * (endpoint_ind[0,1:] != endpoint_ind[0,:-1])
coeff1_subs = sparse_coeff1[1][coeff1_sub_indices] * (sparse_coeff1[0][coeff1_sub_indices] == len_coeff1 - endpoint_ind[1,1:]) * (endpoint_ind[1,1:] != endpoint_ind[1,:-1])
coeff2_adds_start = np.searchsorted(sparse_coeff2[0], endpoint_ind[2,1:], side='left', sorter=None)
coeff2_adds_end = np.searchsorted(sparse_coeff2[0], endpoint_ind[2,1:] + offsets+padding-1, side='left', sorter=None)
coeff2_subs_start = np.searchsorted(sparse_coeff2[0], coeff2_ends[1:], side='left', sorter=None)
coeff2_subs_end = np.searchsorted(sparse_coeff2[0], coeff2_ends[1:] + offsets+padding-1, side='left', sorter=None)
for i in range(length_diag-1):
# This is the basic rule, we need to decide what occurs in addition to this:
diags[i+1,:-1] = diags[i,1:]
# Need to ADD element to front, if endpoint indices coeff1 start went down:
if coeff1_adds[i] != 0:
diags[i+1, sparse_coeff2[0][coeff2_adds_start[i]:coeff2_adds_end[i]] - endpoint_ind[2,i+1]] += coeff1_adds[i] * sparse_coeff2[1][coeff2_adds_start[i]:coeff2_adds_end[i]]
# Need to REMOVE element from back, if endpoint indices coeff1 went up:
if coeff1_subs[i] != 0:
diags[i+1, sparse_coeff2[0][coeff2_subs_start[i]:coeff2_subs_end[i]] - coeff2_ends[i+1]] -= coeff1_subs[i] * sparse_coeff2[1][coeff2_subs_start[i]:coeff2_subs_end[i]]
return diags[:,:-padding]
""" Here we'll create our modified function, with the summation part done in C.
Other things we need this function to do:
1. Compute the first upper diagonals
2. Compute the first lower diagonals
3. Find the necessary entries of coeffs2 to modify values for subsequent diagonals
"""
def sparse_xcorr_calc_C_void(sparse_vector1, sparse_vector2, lagmax):
# get array indices where vector1[0] is between curr_index and curr_index+1000:
indices_left = np.searchsorted(sparse_vector1[0], sparse_vector2[0], side='left', sorter=None)
indices_right = np.searchsorted(sparse_vector1[0], sparse_vector2[0]+lagmax, side='left', sorter=None)
length = len(sparse_vector2[0])
sparse_xcorrs = np.zeros((lagmax))
sparse_xcorr_sum_void(c_void_p(indices_left.ctypes.data), c_void_p(indices_right.ctypes.data),
c_void_p(sparse_vector1[0].ctypes.data), c_void_p(sparse_vector2[0].ctypes.data),
c_void_p(sparse_vector1[1].ctypes.data), c_void_p(sparse_vector2[1].ctypes.data),
c_int(length), c_void_p(sparse_xcorrs.ctypes.data))
return sparse_xcorrs
```
#### File: wavelet_xcorr/support_code/thresholding_functions.py
```python
import h5py
import numpy as np
import pywt
""" Given a coefficient vector and a percentile, this truncates all entries which are below
the vector's percentile in magnitude.
Inputs:
coeffs A set of wavelet coefficients derived from a DWT
percentile The percentile of data to be preserved.
Returns:
thresheld_coeffs A set of wavelet coefficients stored in the same format as coeffs, with all entries
smaller in magnitude than the percentile zeroed out.
"""
def threshold_coeffs_one_channel(coeffs, percentile):
coeffs_array, coeffs_slices, coeffs_shapes = pywt.ravel_coeffs(coeffs, axes=None)
thresheld_coeffs_array = pywt.threshold(coeffs_array, np.percentile(np.abs(coeffs_array), percentile),
mode='hard', substitute=0)
thresheld_coeffs = pywt.unravel_coeffs(thresheld_coeffs_array, coeffs_slices,
coeffs_shapes, output_format='wavedecn')
thresheld_coeffs = [thresheld_coeffs[0]] + [_['d'] for _ in thresheld_coeffs[1:]]
return thresheld_coeffs
""" Given a set of coefficient vectors, this converts them into a sparse format.
Input:
coeffs list of coefficient vectors for a signal in a DWT
Returns:
sparse_coeffs coeffs in a sparse format - every dense array is replace by a triple of
(nonzero indices, nonzero values, orginal length of dense array)
"""
def make_sparse_coeffs(coeffs):
sparse_coeffs = [(np.nonzero(coeff)[0], coeff[np.nonzero(coeff)[0]], len(coeff)) for coeff in coeffs]
return sparse_coeffs
""" Creates HDF5 files containing wavelet-domain data instead of time-domain data. Stores these in
a specified path. This preserves the file name (with wavelet_domain_ appended to the front) and
the details of the transform, but not the metadata from the original file.
Assumes the time-domain data of interest is in the DAS dataset, and it must be transposed.
Inputs:
Returns:
0. Also creates an hdf5 file containing the wavelet-transformed data, with datasets for each level
of the DWT.
"""
def store_wavelet_hdf5(input_path, output_path, files, data_name, transposed, wavelet, level, percentile):
for file in files:
h5_file = h5py.File(input_path + file, 'r')
data = h5_file[data_name][:]
data = data.astype(np.float64)
if transposed == "yes":
data = data.T
h5_file.close()
coeffs = pywt.wavedec(data, wavelet, level=level, mode="periodic")
# Threshold each channel here:
if percentile != 0:
for i in range(0, data.shape[0]):
thresheld_coeffs = threshold_coeffs_one_channel([coeff[i] for coeff in coeffs], percentile)
for j in range(len(coeffs)):
coeffs[j][i] = thresheld_coeffs[j]
result = h5py.File(output_path + "wavelet_domain_" + str(percentile) + "_percentile_" + file, 'w')
result.attrs["wavelet"] = wavelet
result.attrs["level"] = level
result.create_dataset("approximation", data=coeffs[0])
for i in range(1, level+1):
result.create_dataset("detail_" + str(i), data=coeffs[-i])
result.close()
return 0
""" Takes thresholded wavelets from store_wavelet_hdf5 and converts them into
a sparse format.
Inputs:
input_path the location of the dense thresholded wavelet-domain arrays
output_path where to place the sparse array file
files the list of thresholded wavelet-domain files to work with
Returns:
0, but also creates an hdf5 file of sparse wavelet coefficients for each file in input_path. These
files are located in output_path. Each file has three datasets:
1. indices contains the nonzero indices at each row (one 2D array)
2. values contains the nonzero values at each row (one 2D array)
3. level_lengths contains the number of shift factors ("lengths") for each level
4. level_starts shows the starting index for each level, since all levels are stored in one array
We also keep the wavelet and level attributes in our metadata.
"""
def make_wavelet_hdf5_sparse(input_path, output_path, files):
for file in files:
# First we need to extract the dense thresheld coefficients and concatenate them:
dense_file = h5py.File(input_path + file, 'r')
wavelet = dense_file.attrs["wavelet"]
level = dense_file.attrs["level"]
dense_file_all_levels = [dense_file["approximation"][:]] + [dense_file["detail_" + str(_)][:] for _ in range(level, 0, -1)]
all_coeffs = np.concatenate(dense_file_all_levels, axis=1)
level_lengths = np.array([_.shape[1] for _ in dense_file_all_levels])
level_starts = np.array([np.sum(level_lengths[:i]) for i in range(len(level_lengths))])
dense_file.close()
# Now we get the nonzero indices for each row:
indices = []
values = []
for i in range(all_coeffs.shape[0]):
indices.append(np.nonzero(all_coeffs[i])[0])
values.append(all_coeffs[i,indices[i]])
# Here we stack the indices and values into two arrays:
# TODO: zero-pad to avoid issues with uneven lengths:
index_array = np.stack(indices, axis=0)
value_array = np.stack(values, axis=0)
# And here we store this all into an hdf5 file:
result = h5py.File(output_path + "sparse_" + file, 'w')
result.attrs["wavelet"] = wavelet
result.attrs["level"] = level
result.create_dataset("indices", data=index_array)
result.create_dataset("values", data=value_array)
result.create_dataset("level_starts", data=level_starts)
result.create_dataset("level_lengths", data=level_lengths)
result.close()
return 0
""" Takes a sparse wavelet coefficients hdf5 file and breaks it up into a list of the wavelet coefficients,
useful for our cross-correlation algorithm.
Inputs:
input_path where the sparse wavelet coefficient files are located
files the sparse wavelet coefficient file names
Outputs:
list of the wavelet coefficients for each file. Each file is represented by its own list of tuples for the sparse
coefficients at each channel and level. The list is sorted:
list of files -> list of channels -> list of levels -> each level is a tuple representinf a sparse vector
"""
def break_sparse_hdf5(input_path, files):
all_file_coeffs = []
for file in files:
# The list of lists that make up our coefficients across the whole file:
file_coeffs = []
# We get the necessary information for this file:
sparse_coeffs_file = h5py.File(input_path + file, 'r')
indices = sparse_coeffs_file["indices"][:]
values = sparse_coeffs_file["values"][:]
level_lengths = sparse_coeffs_file["level_lengths"][:]
level_starts = np.concatenate([sparse_coeffs_file["level_starts"][:], [sum(level_lengths)]])
# Now we go through each channel:
for i in range(indices.shape[0]):
# Each channel has a different number of entries per level, so we need to find
# which subsets of this channel correspond to each level:
lv_bds = np.searchsorted(indices[i], level_starts)
file_coeffs.append([(indices[i,lv_bds[_]:lv_bds[_+1]] - level_starts[_],
values[i,lv_bds[_]:lv_bds[_+1]],
level_lengths[_]) for _ in range(len(level_lengths))])
all_file_coeffs.append(file_coeffs)
return all_file_coeffs
``` |
{
"source": "jlk/aws-tagger",
"score": 2
} |
#### File: jlk/aws-tagger/tag_instance.py
```python
import argparse
import boto3
import sys
# Arguments: instance_id
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("-R", "--region", help="region to find instance in", default="us-west-2")
cli_parser.add_argument("-I", "--instanceid", help="instance to set tags on", required=True)
cli_parser.add_argument("-V", "--verbose", help="enable verbose logging", action="count")
cli_parser.add_argument("-L", "--lifecycle", help="enable verbose logging", default="20191101")
cli_args = cli_parser.parse_args()
my_ec2_tags = [{'Key':'Owner', 'Value':'<NAME>'},
{'Key':'Email', 'Value':'<EMAIL>'},
{'Key':'Department', 'Value':'dept'},
{'Key':'Lifecycle', 'Value':'20202101'},
{'Key':'Jira', 'Value':''}]
# print_tags() - prints out AWS tags from a python dict
def print_tags(tags):
for tag in tags:
print("{}: {}".format(tag["Key"], tag["Value"]))
ec2_resource = boto3.resource('ec2', region_name=cli_args.region)
try:
instance = ec2_resource.Instance(cli_args.instanceid)
instance.tags
except Exception as e:
print("Exception loading instance: %s" % e)
sys.exit(-1)
if cli_args.verbose:
print "Current tags:"
print_tags(instance.tags)
print ""
instance.tags.extend(my_ec2_tags)
if cli_args.verbose:
print "Updated tags (may have duplicates - AWS will dedupe):"
print_tags(instance.tags)
print ""
if cli_args.verbose:
print "Updating..."
ec2_client = boto3.client('ec2', region_name=cli_args.region)
ec2_client.create_tags(Resources=[cli_args.instanceid], Tags=instance.tags)
if cli_args.verbose:
print "Done."
``` |
{
"source": "jlkazan/pfr-scraper",
"score": 4
} |
#### File: pfr-scraper/scraping/scraper.py
```python
from typing import List
from bs4 import BeautifulSoup, Comment
import pandas as pd
from urllib.request import urlopen
class Scraper:
"""
General class for scraping data from pro-football-reference.com
Methods were inspired by the following source:
https://towardsdatascience.com/scraping-nfl-stats-to-compare-quarterback-efficiencies-4989642e02fe
https://github.com/BenKite/football_data/blob/master/profootballReferenceScrape.py
"""
def __init__(self, url: str):
"""
Initialize a scraper
:param url: the url to scrape (must start with http://)
"""
self.url = url
def find_table_ids(self) -> List[str]:
"""
Function for scraping the url in order to determine the table ids for each table in the url
:return: A list of the table ids
"""
# Open URL and pass the html to BeautifulSoup
html = urlopen(self.url)
stats_page = BeautifulSoup(html, features="html.parser")
# Remove all comments from the html
# Code from: https://stackoverflow.com/questions/23299557/beautifulsoup-4-remove-comment-tag-and-its-content
for element in stats_page(text=lambda text: isinstance(text, Comment)):
element.extract()
# Get all of the divs that may have a table
divs = stats_page.findAll('div', id="content")
divs = divs[0].findAll("div", attrs={"class": "table_container"})
# Iterate through each div looking for tables
table_ids = []
for div in divs:
table_str = str(div.findAll("table"))
table_id = table_str[table_str.find("id=") + 3: table_str.find(">")]
# Remove " characters
table_id = table_id.replace("\"", "")
if len(table_id) > 0:
table_ids.append(table_id)
return table_ids
def scrape(self, table_id: str) -> pd.DataFrame:
"""
Function for scraping the url and turning the data into a pandas DataFrame
:param table_id: A string representing the table id, which can be found with the find_table_ids method
:return: A DataFrame consisting of the scraped table data from pfr
"""
# Open URL and pass the html to BeautifulSoup
html = urlopen(self.url)
stats_page = BeautifulSoup(html, features="html.parser")
tables = stats_page.findAll("table", id=table_id)
# Obtain table headers
table_header = tables[0].findAll("thead")
over_header = table_header[0].findAll("tr", attrs={"class": "over_header"})
column_headers = table_header[0].findAll("tr")[0] if len(over_header) == 0 else table_header[0].findAll("tr")[1]
column_headers = [i.getText() for i in column_headers.findAll("th")]
# Obtain table rows
table_body = tables[0].findAll("tbody")
table_rows = table_body[0].findAll("tr")
# Aggregate data from each row into an array
rows_data = []
for row in table_rows:
row_data = [col.getText() for col in row.findAll("td")]
rows_data.append(row_data)
# Create DataFrame from the array of scraped data
data = pd.DataFrame(rows_data, columns=column_headers[1:])
return data
``` |
{
"source": "jlkeesey/jlkdots",
"score": 3
} |
#### File: durdn/bin/base.py
```python
import os
import sys
import argparse
import logging as log
from pprint import pformat, pprint
def get_options():
maxres = 20
collect_command = 'git ls-files'
find_collect_command = 'find . -type f'
parser = argparse.ArgumentParser(description='command-t like shell command')
parser.add_argument('target', metavar='target', help='the target pattern to search')
parser.add_argument('-c', '--command', help='command to collect a list of files (default: git ls-files)')
parser.add_argument('-f', '--find', action='store_true', help='use find collect command (find . -type f)')
parser.add_argument('-m', '--max', type=int, help='stop at max results')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose: print out debug information')
options = parser.parse_args()
if options.verbose:
log.basicConfig(level=log.DEBUG)
else:
log.basicConfig(level=log.INFO)
if options.find:
collect_command = find_collect_command
elif options.command:
collect_command = options.command
if options.max:
maxres = options.max
return options.target, collect_command, maxres
if __name__ == '__main__':
get_options()
``` |
{
"source": "JLKenyon/multiple-view-geometry",
"score": 4
} |
#### File: multiple-view-geometry/Chapter02/Section02.py
```python
import numpy
# Example 2.3
def example_2_3():
print('''
*******************************************************************************
* Example 2.3
Where do the following lines intersect?
eq1: -1 * x + 0 * y + 1 = 0
eq2: 0 * x + -1 * y + 1 = 0
| i j k | ( 1 )
x = l x l' = |-1 0 1 | = ( 1 )
| 0 -1 1 | ( 1 )
''')
result = numpy.cross([-1, 0, 1], [0, -1, 1])
print(result)
print("\n")
print("result:")
print(result[0:2])
print("The last field in the result should be 1, so this is a real point, not an ideal point")
print("The first two fields should be [1,1], which is the point in R2 where the lines intersect")
print('\n\n')
# Example 2.5
def example_2_5():
print('''
*******************************************************************************
* Example 2.5
Where do the following parallel lines intersect?
eq1: x = 1 => -1 * x + 0 * y + 1 = 0
eq2: x = 2 => -2 * x + 0 * y + 1 = 0
| i j k | ( 0 )
x = l x l' = |-1 0 1 | = ( 1 )
|-1 0 2 | ( 0 )
''')
result = numpy.cross([-1, 0, 1], [-1, 0, 2])
print("\n")
print("result:")
print(result)
print("The last field in the result should be a 0, so this is an ideal point at infinity, not a real point")
print("This means that the two lines are parallel.")
print("The clever aspect is that the same operation, the cross product, gave us a clearly valid answer, of the same type R^3")
print("Section 2")
example_2_3()
example_2_5()
``` |
{
"source": "jlko/STOVE",
"score": 3
} |
#### File: model/envs/envs.py
```python
import argparse
import pickle
import imageio
import numpy as np
import scipy as sc
import multiprocessing as mp
from tqdm import tqdm
from spriteworld import renderers as spriteworld_renderers
from spriteworld.sprite import Sprite
def norm(x):
"""Overloading numpys default behaviour for norm()."""
if len(x.shape) == 1:
_norm = np.linalg.norm(x)
else:
_norm = np.linalg.norm(x, axis=1).reshape(-1, 1)
return _norm
class Task():
"""Defines a task for interactive environments.
For all tasks defined here, actions correspond to direct movements of the
controlled balls. Rewards are defined by the derived classes.
"""
angular = 1. / np.sqrt(2)
action_selection = [
np.array([0., 0.]),
np.array([1., 0.]),
np.array([0., 1.]),
np.array([angular, angular]),
np.array([-1., 0.]),
np.array([0., -1.]),
np.array([-angular, -angular]),
np.array([-angular, angular]),
np.array([angular, -angular])]
def __init__(self, env, num_stacked=4, greyscale=False, action_force=.3):
"""Initialise task.
Args:
env (Environment): Tasks have environments as attribute.
num_stacked (int): Create a frame buffer of num_stacked images.
greyscale (bool): Convert rgb images to 'greyscale'.
action_force (float): Distance moved per applied action.
"""
self.env = env
# make controlled ball quasi-static
self.env.m[0] = 10000
if greyscale:
self.frame_buffer = np.zeros(
(*env.get_obs_shape()[:2], num_stacked))
self.conversion = lambda x: np.sum(
x * [[[0.3, 0.59, 0.11]]], 2, keepdims=True)
else:
sh = env.get_obs_shape()
self.frame_buffer = np.zeros((*sh[:2], sh[2] * num_stacked))
self.conversion = lambda x: x
self.frame_channels = 3 if not greyscale else 1
self.action_force = action_force
def get_action_space(self):
"""Return number of available actions."""
return len(self.action_selection)
def get_framebuffer_shape(self):
"""Return shape of frame buffer."""
return self.frame_buffer.shape
def calculate_reward(self, state, action, env):
"""Abstract method. To be overwritten by derived classes."""
raise NotImplementedError
def resolve_action(self, _action, env=None):
"""Implement the effects of an action. Change this to change action."""
action = self.action_selection[_action]
action = action * self.action_force
return action
def step(self, _action):
"""Propagate env to next step."""
action = self.resolve_action(_action)
img, state, done = self.env.step(action)
r = self.calculate_reward()
return img, state, r, done
def step_frame_buffer(self, _action=None):
"""Step environment with frame buffer."""
action = self.resolve_action(_action)
img, state, done = self.env.step(action)
r = self.calculate_reward()
img = self.conversion(img)
c = self.frame_channels
self.frame_buffer[:, :, :-c] = self.frame_buffer[:, :, c:]
self.frame_buffer[:, :, -c:] = img
return self.frame_buffer, state, r, done
class AvoidanceTask(Task):
"""Derived Task: Avoidance Task."""
def calculate_reward(self,):
"""Negative sparse reward of -1 is given in case of collisions."""
return -self.env.collisions
class MaxDistanceTask(Task):
"""Derived Task: Maximal Distance Task."""
def calculate_reward(self):
"""Continuous reward is given.
Negative reward is given in dependence of the minimal distance of the
controlled ball to any other ball.
"""
scaling = 2
r = 0
for i in range(1, self.env.n):
current_norm = norm(self.env.x[i, 0:2] - self.env.x[0, 0:2])\
- 2 * self.env.r[0]
current_exp = -np.clip(np.exp(-current_norm * scaling), 0, 1)
r = min(r, current_exp)
return r
class MinDistanceTask(Task):
"""Derived Task: Minimal Distance Task."""
def calculate_reward(self, state, action, env):
"""Continuous reward is given.
Controlled ball is incentivised to follow any of the other balls.
Reward is always negative, unless the controlled ball touches any of the
other balls. Negative reward is given for the distance to the nearest
ball to the controlled ball.
"""
# initialize r to very small reward (~ -inf)
r = - ((100 * env.hw) ** 2)
for i in range(1, env.n):
r = max(r,
-(norm(state[i, 0:2] - state[0, 0:2]) - 2 * env.r[0]) ** 2)
return r
class PhysicsEnv:
"""Base class for the physics environments."""
def __init__(self, n=3, r=1., m=1., hw=10, granularity=5, res=32, t=1.,
init_v_factor=None, friction_coefficient=0., seed=None,
sprites=False, use_colors=None):
"""Initialize a physics env with some general parameters.
Args:
n (int): Optional, number of objects in the scene.
r (float)/list(float): Optional, radius of objects in the scene.
m (float)/list(float): Optional, mass of the objects in the scene.
hw (float): Optional, coordinate limits of the environment.
eps (float): Optional, internal simulation granularity as the
fraction of one time step. Does not change speed of simulation.
res (int): Optional, pixel resolution of the images.
t (float): Optional, dt of the step() method. Speeds up or slows
down the simulation.
init_v_factor (float): Scaling factor for inital velocity. Used only
in Gravity Environment.
friction_coefficient (float): Friction slows down balls.
seed (int): Set random seed for reproducibility.
sprites (bool): Render selection of sprites using spriteworld
instead of balls.
"""
np.random.seed(seed)
self.n = n
self.r = np.array([[r]] * n) if np.isscalar(r) else r
self.m = np.array([[m]] * n) if np.isscalar(m) else m
self.hw = hw
self.internal_steps = granularity
self.eps = 1 / granularity
self.res = res
self.t = t
self.x = self.init_x()
self.v = self.init_v(init_v_factor)
self.a = np.zeros_like(self.v)
self.fric_coeff = friction_coefficient
self.v_rotation_angle = 2 * np.pi * 0.05
if use_colors is None:
if n < 3:
self.use_colors = False
else:
self.use_colors = True
else:
self.use_colors = use_colors
if sprites:
self.renderer = spriteworld_renderers.PILRenderer(
image_size=(self.res, self.res),
anti_aliasing=10,
)
shapes = ['triangle', 'square', 'circle', 'star_4']
if not np.isscalar(r):
print("Scale elements according to radius of first element.")
# empirical scaling rule, works for r = 1.2 and 2
self.scale = self.r[0] / self.hw / 0.6
self.shapes = np.random.choice(shapes, 3)
self.draw_image = self.draw_sprites
else:
self.draw_image = self.draw_balls
def init_v(self, init_v_factor=None):
"""Randomly initialise velocities."""
v = np.random.normal(size=(self.n, 2))
v = v / np.sqrt((v ** 2).sum()) * .5
if init_v_factor is not None:
v = v * np.random.uniform(1/init_v_factor, init_v_factor)
return v
def init_x(self):
"""Initialize ojbject positions without overlap and in bounds."""
good_config = False
while not good_config:
x = np.random.rand(self.n, 2) * self.hw / 2 + self.hw / 4
good_config = True
for i in range(self.n):
for z in range(2):
if x[i][z] - self.r[i] < 0:
good_config = False
if x[i][z] + self.r[i] > self.hw:
good_config = False
for i in range(self.n):
for j in range(i):
if norm(x[i] - x[j]) < self.r[i] + self.r[j]:
good_config = False
return x
def simulate_physics(self, actions):
"""Calculates physics for a single time step.
What "physics" means is defined by the respective derived classes.
Args:
action (np.Array(float)): A 2D-float giving an x,y force to
enact upon the first object.
Returns:
d_vs (np.Array(float)): Velocity updates for the simulation.
"""
raise NotImplementedError
def step(self, action=None, mass_center_obs=False):
"""Full step for the environment."""
if action is not None:
# Actions are implemented as hardly affecting the first object's v.
self.v[0] = action * self.t
actions = True
else:
actions = False
for _ in range(self.internal_steps):
self.x += self.t * self.eps * self.v
if mass_center_obs:
# Do simulation in center of mass system.
c_body = np.sum(self.m * self.x, 0) / np.sum(self.m)
self.x += self.hw / 2 - c_body
self.v -= self.fric_coeff * self.m * self.v * self.t * self.eps
self.v = self.simulate_physics(actions)
img = self.draw_image()
state = np.concatenate([self.x, self.v], axis=1)
done = False
return img, state, done
def get_obs_shape(self):
"""Return image dimensions."""
return (self.res, self.res, 3)
def get_state_shape(self):
"""Get shape of state array."""
state = np.concatenate([self.x, self.v], axis=1)
return state.shape
@staticmethod
def ar(x, y, z):
"""Offset array function."""
return z / 2 + np.arange(x, y, z, dtype='float')
def draw_balls(self):
"""Render balls on canvas."""
if self.n > 6:
raise ValueError(
'Max self.n implemented currently is 6.')
img = np.zeros((self.res, self.res, 3), dtype='float')
[I, J] = np.meshgrid(self.ar(0, 1, 1. / self.res) * self.hw,
self.ar(0, 1, 1. / self.res) * self.hw)
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 1, 0], [1, 0, 1], [0, 1, 1]])
for i in range(self.n):
factor = np.exp(- (((I - self.x[i, 0]) ** 2 +
(J - self.x[i, 1]) ** 2) /
(self.r[i] ** 2)) ** 4)
if self.use_colors:
img[:, :, 0] += colors[i, 0] * factor
img[:, :, 1] += colors[i, 1] * factor
img[:, :, 2] += colors[i, 2] * factor
else:
idx = i % 3
img[:, :, idx] += factor
img[img > 1] = 1
return img
def draw_sprites(self):
"""Render sprites on the current locations."""
s1 = Sprite(self.x[0, 0] / self.hw, 1 - self.x[0, 1] / self.hw,
self.shapes[0],
c0=255, c1=0, c2=0, scale=self.scale)
s2 = Sprite(self.x[1, 0] / self.hw, 1 - self.x[1, 1] / self.hw,
self.shapes[1],
c0=0, c1=255, c2=0, scale=self.scale)
s3 = Sprite(self.x[2, 0] / self.hw, 1 - self.x[2, 1] / self.hw,
self.shapes[2],
c0=0, c1=0, c2=255, scale=self.scale)
sprites = [s1, s2, s3]
img = self.renderer.render(sprites)
return img / 255.
def reset(self, init_v_factor=None):
"""Resets the environment to a new configuration."""
self.v = self.init_v(init_v_factor)
self.a = np.zeros_like(self.v)
self.x = self.init_x()
class BillardsEnv(PhysicsEnv):
"""Billiards or Bouncing Balls environment."""
def __init__(self, n=3, r=1., m=1., hw=10, granularity=5, res=32, t=1.,
init_v_factor=None, friction_coefficient=0., seed=None,
sprites=False, use_colors=None, drift=False):
"""Initialise arguments of parent class."""
super().__init__(n, r, m, hw, granularity, res, t, init_v_factor,
friction_coefficient, seed, sprites, use_colors)
# collisions is updated in step to measure the collisions of the balls
self.collisions = 0
# no collisions between objects!
self.drift = drift
def simulate_physics(self, actions):
# F = ma = m dv/dt ---> dv = a * dt = F/m * dt
v = self.v.copy()
# check for collisions with wall
for i in range(self.n):
for z in range(2):
next_pos = self.x[i, z] + (v[i, z] * self.eps * self.t)
# collision at 0 wall
if next_pos < self.r[i]:
self.x[i, z] = self.r[i]
v[i, z] = - v[i, z]
# collision at hw wall
elif next_pos > (self.hw - self.r[i]):
self.x[i, z] = self.hw - self.r[i]
v[i, z] = - v[i, z]
if self.drift:
return v
# check for collisions with objects
for i in range(self.n):
for j in range(i):
dist = norm((self.x[i] + v[i] * self.t * self.eps)
- (self.x[j] + v[j] * self.t * self.eps))
if dist < (self.r[i] + self.r[j]):
if actions and j == 0:
self.collisions = 1
w = self.x[i] - self.x[j]
w = w / norm(w)
v_i = np.dot(w.transpose(), v[i])
v_j = np.dot(w.transpose(), v[j])
if actions and j == 0:
v_j = 0
new_v_i, new_v_j = self.new_speeds(
self.m[i], self.m[j], v_i, v_j)
v[i] += w * (new_v_i - v_i)
v[j] += w * (new_v_j - v_j)
if actions and j == 0:
v[j] = 0
return v
def new_speeds(self, m1, m2, v1, v2):
"""Implement elastic collision between two objects."""
new_v2 = (2 * m1 * v1 + v2 * (m2 - m1)) / (m1 + m2)
new_v1 = new_v2 + (v2 - v1)
return new_v1, new_v2
def step(self, action=None):
"""Overwrite step functino to ensure collisions are zeroed beforehand."""
self.collisions = 0
return super().step(action)
class GravityEnv(PhysicsEnv):
"""Derived Task: Minimal Distance Task."""
def __init__(self, n=3, r=1., m=1., hw=10, granularity=5, res=32, t=1,
init_v_factor=0.18, friction_coefficient=0, seed=None,
sprites=False, use_colors=False, drift=False):
"""Initialise arguments of parent class."""
super().__init__(
n, r, m, hw, granularity, res, t, init_v_factor,
friction_coefficient, seed, sprites, use_colors)
self.G = 0.5
self.K1 = self.G
self.K2 = 1
def init_x(self):
"""Initialize object positions without overlap and in bounds.
To achieve a stable gravity configuration, default init is overwritten.
Here, objects are initialised with more padding.
"""
good_config = False
counter = 0
while not good_config and counter < 1000:
x = sc.rand(self.n, 2) * 0.9 * self.hw / 2 + self.hw / 2
good_config = True
for i in range(self.n):
for j in range(i):
good_config = good_config and norm(
x[i] - x[j]) > self.hw / 3
counter += 1
return x
def init_v(self, factor):
"""Initialize a stable velocity configuration.
Velocities are initialised as orthogonal to the object's position vector
as measured from the center.
"""
x_middle = np.sum(self.x, 0) / self.n
pref = np.random.choice([-1, 1])
full_v = np.zeros((self.n, 2))
for i in range(self.n):
v = - (x_middle - self.x[i])
v = v / norm(v)
# make noise component wise
full_v[i] = np.array([pref * v[1] * (factor + 0.13 * sc.randn()),
-pref * v[0] * (factor + 0.13 * sc.randn())])
return full_v
def step(self, action=None):
"""Set actions to false by default."""
return super().step(action, True)
def simulate_physics(self, actions):
"""Simulate gravitational physics.
Additional attractive force towards the center is applied for stability.
Forces are clipped to avoid slingshotting effects.
"""
x_middle = np.array([self.hw/2, self.hw/2])
v = np.zeros_like(self.v)
for i in range(self.n):
F_tot = np.array([0., 0.])
for j in range(self.n):
if i != j:
r = np.linalg.norm(self.x[j] - self.x[i])
F_tot -= self.G * self.m[j] * self.m[i] * (
self.x[i] - self.x[j]) / ((r + 1e-5) ** 3)
r = (x_middle - self.x[i])
F_tot += 0.001 * (r ** 3) / norm(r)
F_tot = np.clip(F_tot, -1, 1)
v[i] = self.v[i] + (F_tot / self.m[i]) * self.t * self.eps
return v
class ActionPolicy:
"""Abstract base class for action policy.
An action policy specifies a series of actions.
"""
def __init__(self, action_space):
"""Initialise action policy.
Args:
action_space (int): Number of available actions.
"""
self.action_space = action_space
def next(self):
raise NotImplementedError("ABC does not implement methods.")
class RandomActionPolicy(ActionPolicy):
"""Random action policy."""
def __init__(self, action_space=9):
"""Initialise random action policy."""
super().__init__(action_space)
def next(self):
"""Next action is given completely independent of history."""
return np.random.randint(self.action_space)
class MonteCarloActionPolicy(ActionPolicy):
"""Monte carlo action policy.
First action is chosen randomly. After, action is only changed with
prob_change probability.
"""
def __init__(self, action_space=9, prob_change=0.1):
"""Initialise monte carlo action policy.
Args:
prob_change (float): Probability of changing action from t to t+1.
"""
super().__init__(action_space)
self.p = prob_change
self.action_arr = range(self.action_space)
self.current_state = np.random.randint(self.action_space)
def next(self):
"""Get next action given current."""
action_space = self.action_space
current_weights = self.p / (action_space - 1) * np.ones(action_space)
current_weights[self.current_state] = 1 - self.p
# assert current_weights.sum() == 1
self.current_state = np.random.choice(self.action_arr,
p=current_weights)
return self.current_state
def generate_fitting_run(env_class, run_len=100, run_num=1000, max_tries=10000,
res=50, n=2, r=1., dt=0.01, granularity=10, fc=0.3,
hw=10, m=1., seed=None,
init_v_factor=None, check_overlap=False, sprites=False,
use_colors=None, drift=False):
"""Generate runs for environments.
Integrated error checks. Parameters as passed to environments.
"""
good_counter = 0
bad_counter = 0
good_imgs = []
good_states = []
for _try in tqdm(range(max_tries)):
# init_v is ignored for BillardsEnv
env = env_class(
n=n, r=r, m=m, hw=hw, granularity=granularity, res=res, t=dt,
init_v_factor=init_v_factor, friction_coefficient=fc, seed=seed,
sprites=sprites, use_colors=use_colors, drift=drift)
run_value = 0
all_imgs = np.zeros((run_len, *env.get_obs_shape()))
all_states = np.zeros((run_len, env.n, 4))
run_value = 0
for t in tqdm(range(run_len)):
img, state, _ = env.step()
all_imgs[t] = img
all_states[t] = state
run_value += np.sum(np.logical_and(
state[:, :2] > 0, state[:, :2] < env.hw)) / (env.n * 2)
if check_overlap:
overlap = 0
for i in range(n):
other = list(set(range(n)) - {i, })
# allow small overlaps
overlap += np.any(norm(state[i, :2] - state[other, :2])
< 0.9 * (env.r[i] + env.r[other]))
if overlap > 0:
run_value -= 1
if run_value > (run_len - run_len / 100):
good_imgs.append(all_imgs)
good_states.append(all_states)
good_counter += 1
else:
bad_counter += 1
if good_counter >= run_num:
break
good_imgs = np.stack(good_imgs, 0)
good_states = np.stack(good_states, 0)
print(
'Generation of {} runs finished, total amount of bad runs: {}. '.format(
run_num, bad_counter))
return good_imgs, good_states
def generate_data(save=True, test_gen=False, name='billiards', env=BillardsEnv,
config=None, num_runs=None):
"""Generate data for billiards or gravity environment."""
if num_runs is None or test_gen:
num_runs = [1000, 300] if (save and not test_gen) else [2, 5]
for run_types, run_num in zip(['train', 'test'], num_runs):
# generate runs
X, y = generate_fitting_run(
env, run_len=100, run_num=run_num, max_tries=10000, **config)
# save data
data = dict()
data['X'] = X
data['y'] = y
data.update(config)
data['coord_lim'] = config['hw']
if save:
path = './data/{}_{}.pkl'.format(name, run_types)
f = open(path, "wb")
pickle.dump(data, f, protocol=4)
f.close()
# also generate gif of data
first_seq = (255 * X[:20].reshape(
(-1, config['res'], config['res'], 3))).astype(np.uint8)
imageio.mimsave('./data/{}.gif'.format(name), first_seq, fps=24)
def generate_billiards_w_actions(ChosenTask=AvoidanceTask, save=True,
config=None, test_gen=False):
"""Generate action conditioned billiards data."""
run_len = 100
action_space = 9
action_force = 0.6
num_runs = [1000, 300] if (save and not test_gen) else [2, 10]
for run_types, run_num in zip(['train', 'test'], num_runs):
all_imgs = np.zeros(
(run_num, run_len, config['res'], config['res'], 3))
all_states = np.zeros((run_num, run_len, config['n'], 4))
all_actions = np.zeros((run_num, run_len, 9))
all_rewards = np.zeros((run_num, run_len, 1))
all_dones = np.zeros((run_num, run_len, 1))
# number of sequences
for run in tqdm(range(run_num)):
env = ChosenTask(BillardsEnv(**config),
4, greyscale=False, action_force=action_force)
assert action_space == env.get_action_space()
p = np.random.uniform(0.2, 0.3)
ap = MonteCarloActionPolicy(action_space=action_space,
prob_change=p)
# number of steps per sequence
for t in tqdm(range(run_len)):
action = ap.next()
img, state, reward, done = env.step(action)
all_imgs[run, t] = img
all_states[run, t] = state
tmp = np.zeros(action_space)
tmp[action] = 1
all_actions[run, t - 1] = tmp
all_rewards[run, t] = reward
all_dones[run, t] = done
# save results
data = dict()
data['X'] = all_imgs
data['y'] = all_states
data['action'] = all_actions
data['reward'] = all_rewards
data['done'] = all_dones
# still a bit hacky, need to implement __str__
if ChosenTask is not AvoidanceTask:
raise ValueError
data['type'] = 'AvoidanceTask'
data['action_force'] = action_force
data.update({'action_space': action_space})
data.update(config)
data['coord_lim'] = config['hw']
if save:
path = 'data/avoidance_{}.pkl'.format(run_types)
f = open(path, "wb")
pickle.dump(data, f, protocol=4)
f.close()
# example sequences as gif
res = config['res']
first_seq = (255 * all_imgs[:20].reshape((-1, res, res, 3)))
first_seq = first_seq.astype(np.uint8)
imageio.mimsave('data/avoidance.gif'.format(save), first_seq, fps=24)
def parse_wrapper(script_args):
"""DRY wrapper around parse."""
parser = argparse.ArgumentParser()
parser.add_argument('--test-gen', dest='test_gen', action='store_true')
parser.add_argument('--no-save', dest='save', action='store_false')
args = parser.parse_args(script_args)
return args
def multi_billiards(script_args):
"""Create billiards with 6 balls."""
args = parse_wrapper(script_args)
config = {
'res': 50, 'hw': 10, 'n': 6, 'dt': 1, 'm': 1., 'fc': 0,
'granularity': 10, 'r': 1, 'check_overlap': False, 'use_colors': False}
generate_data(
save=args.save, test_gen=args.test_gen, name='multibilliards',
env=BillardsEnv, config=config)
def billiards_energy(script_args):
"""Create billiards with varying total energy."""
args = parse_wrapper(script_args)
config = {
'res': 32, 'hw': 10, 'n': 3, 'dt': 1, 'm': 1., 'fc': 0,
'granularity': 10, 'r': 1.2, 'check_overlap': False,
'init_v_factor': args.init_v}
name = 'billiards_energy_{:.1f}'.format(args.init_v)
generate_data(
save=args.save, test_gen=args.test_gen, name=name,
env=BillardsEnv, config=config)
def drift_runs(script_args):
"""Create billiards with varying total energy."""
args = parse_wrapper(script_args)
config = {
'res': 32, 'hw': 10, 'n': 3, 'dt': 1, 'm': 1., 'fc': 0,
'granularity': 10, 'r': 1.2, 'check_overlap': False, 'drift': True}
name = 'billiards_drift'
generate_data(
save=args.save, test_gen=args.test_gen, name=name,
env=BillardsEnv, config=config)
def billiards_smooth(script_args):
"""Create billiards with varying total energy."""
args = parse_wrapper(script_args)
config = {
'res': 32, 'hw': 10, 'n': 3, 'dt': 1, 'm': 1., 'fc': 0,
'granularity': 10, 'r': 1.2, 'check_overlap': False, 'drift': False,}
name = 'billiards_smooth'
generate_data(
save=args.save, test_gen=args.test_gen, name=name,
env=BillardsEnv, config=config)
def main(script_args):
"""Create standard collection of data sets."""
args = parse_wrapper(script_args)
config = {
'res': 32, 'hw': 10, 'n': 3, 'dt': 1, 'm': 1., 'fc': 0,
'granularity': 10, 'r': 1.2, 'check_overlap': False}
generate_data(
save=args.save, test_gen=args.test_gen, name='billiards',
env=BillardsEnv, config=config)
# config.update({'sprites': True})
# generate_data(
# test_gen=args.test_gen, name='billards_sprites', env=BillardsEnv, config=config)
config = {
'res': 50, 'hw': 30, 'n': 3, 'dt': 1, 'm': 4., 'fc': 0,
'init_v_factor': 0.55, 'granularity': 50, 'r': 2,
'check_overlap': True}
generate_data(
save=args.save, test_gen=args.test_gen, name='gravity',
env=GravityEnv, config=config)
# config.update({'sprites': True})
# generate_data(
# test_gen=args.test_gen, name='gravity_sprites', env=GravityEnv, config=config)
config = {
'res': 32, 'hw': 10, 'n': 3, 't': 1., 'm': 1.,
'granularity': 50, 'r': 1, 'friction_coefficient': 0}
generate_billiards_w_actions(
config=config, save=args.save, test_gen=args.test_gen)
``` |
{
"source": "JLkp/openslides-backend",
"score": 2
} |
#### File: action/mixins/sequential_numbers_mixin.py
```python
from typing import Any, Dict
from datastore.shared.util import DeletedModelsBehaviour
from ...models.models import Model
from ...services.datastore.interface import DatastoreService
from ...shared.filters import FilterOperator
from ..generics.create import CreateAction
class SequentialNumbersMixin(CreateAction):
datastore: DatastoreService
model: Model
def get_sequential_number(self, meeting_id: int) -> int:
"""
Creates a sequential number, unique per meeting and returns it
"""
filter = FilterOperator("meeting_id", "=", meeting_id)
number = self.datastore.max(
collection=self.model.collection,
filter=filter,
field="sequential_number",
get_deleted_models=DeletedModelsBehaviour.ALL_MODELS,
)
number = 1 if number is None else number + 1
return number
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
instance["sequential_number"] = self.get_sequential_number(
instance["meeting_id"]
)
return instance
``` |
{
"source": "jlk/qualys-cs-python-client",
"score": 2
} |
#### File: qualys_cs_api/api/sensor_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from qualys_cs_api.api_client import ApiClient
from qualys_cs_api.exceptions import (
ApiTypeError,
ApiValueError
)
class SensorApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_sensors_using_delete(self, sensor_delete_request, **kwargs): # noqa: E501
"""Delete sensors in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sensors_using_delete(sensor_delete_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SensorDeleteRequest sensor_delete_request: Provide one or more sensor Ids or filters in the format shown under Example Value. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_sensors_using_delete_with_http_info(sensor_delete_request, **kwargs) # noqa: E501
def delete_sensors_using_delete_with_http_info(self, sensor_delete_request, **kwargs): # noqa: E501
"""Delete sensors in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sensors_using_delete_with_http_info(sensor_delete_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param SensorDeleteRequest sensor_delete_request: Provide one or more sensor Ids or filters in the format shown under Example Value. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['sensor_delete_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_sensors_using_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sensor_delete_request' is set
if self.api_client.client_side_validation and ('sensor_delete_request' not in local_var_params or # noqa: E501
local_var_params['sensor_delete_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sensor_delete_request` when calling `delete_sensors_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'sensor_delete_request' in local_var_params:
body_params = local_var_params['sensor_delete_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/sensors', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sensor_details_using_get(self, sensor_id, **kwargs): # noqa: E501
"""Show details of a sensor # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sensor_details_using_get(sensor_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sensor_id: Specify the sensor ID of a specific sensor in the user’s scope (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Sensor
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sensor_details_using_get_with_http_info(sensor_id, **kwargs) # noqa: E501
def get_sensor_details_using_get_with_http_info(self, sensor_id, **kwargs): # noqa: E501
"""Show details of a sensor # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sensor_details_using_get_with_http_info(sensor_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str sensor_id: Specify the sensor ID of a specific sensor in the user’s scope (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Sensor, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['sensor_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sensor_details_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'sensor_id' is set
if self.api_client.client_side_validation and ('sensor_id' not in local_var_params or # noqa: E501
local_var_params['sensor_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sensor_id` when calling `get_sensor_details_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'sensor_id' in local_var_params:
path_params['sensorId'] = local_var_params['sensor_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/sensors/{sensorId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Sensor', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sensors_list_using_get(self, page_no, page_size, **kwargs): # noqa: E501
"""Show a list of sensors in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sensors_list_using_get(page_no, page_size, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int page_no: The page to be returned. (required)
:param int page_size: The number of records per page to be included in the response. (required)
:param str filter: Filter the sensors list by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str sort: Sort the results using a Qualys token. For example created:desc. <a href='/cs/help/search_tips/sortable_tokens.htm'>Click here</a> for a listing of tokens.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PivotListResponseSensor
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sensors_list_using_get_with_http_info(page_no, page_size, **kwargs) # noqa: E501
def get_sensors_list_using_get_with_http_info(self, page_no, page_size, **kwargs): # noqa: E501
"""Show a list of sensors in your account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sensors_list_using_get_with_http_info(page_no, page_size, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int page_no: The page to be returned. (required)
:param int page_size: The number of records per page to be included in the response. (required)
:param str filter: Filter the sensors list by providing a query using Qualys syntax. <a href='/cs/help/search/language.htm' target='_blank'>Click here</a> for help with creating your query.
:param str sort: Sort the results using a Qualys token. For example created:desc. <a href='/cs/help/search_tips/sortable_tokens.htm'>Click here</a> for a listing of tokens.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PivotListResponseSensor, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_no', 'page_size', 'filter', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sensors_list_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'page_no' is set
if self.api_client.client_side_validation and ('page_no' not in local_var_params or # noqa: E501
local_var_params['page_no'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_no` when calling `get_sensors_list_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if self.api_client.client_side_validation and ('page_size' not in local_var_params or # noqa: E501
local_var_params['page_size'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `page_size` when calling `get_sensors_list_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'page_no' in local_var_params and local_var_params['page_no'] is not None: # noqa: E501
query_params.append(('pageNo', local_var_params['page_no'])) # noqa: E501
if 'page_size' in local_var_params and local_var_params['page_size'] is not None: # noqa: E501
query_params.append(('pageSize', local_var_params['page_size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1.1/sensors', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PivotListResponseSensor', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: qualys_cs_api/models/container_details.py
```python
import pprint
import re # noqa: F401
import six
from qualys_cs_api.configuration import Configuration
class ContainerDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'arguments': 'list[str]',
'command': 'str',
'container_id': 'str',
'created': 'str',
'customer_uuid': 'str',
'drift': 'Drift',
'environment': 'list[str]',
'host': 'Host',
'hostname': 'str',
'image_id': 'str',
'image_sha': 'str',
'image_uuid': 'str',
'ipv4': 'str',
'ipv6': 'str',
'is_drift': 'bool',
'is_root': 'bool',
'label': 'list[Label]',
'last_scanned': 'str',
'mac_address': 'str',
'name': 'str',
'operating_system': 'str',
'path': 'str',
'port_mapping': 'list[PortMapping]',
'privileged': 'bool',
'sensor_uuid': 'str',
'services': 'list[ServiceDetails]',
'sha': 'str',
'softwares': 'list[Software]',
'source': 'str',
'state': 'str',
'state_changed': 'str',
'users': 'list[str]',
'uuid': 'str',
'vulnerabilities': 'list[ServiceVulnerabilityDetails]'
}
attribute_map = {
'arguments': 'arguments',
'command': 'command',
'container_id': 'containerId',
'created': 'created',
'customer_uuid': 'customerUuid',
'drift': 'drift',
'environment': 'environment',
'host': 'host',
'hostname': 'hostname',
'image_id': 'imageId',
'image_sha': 'imageSha',
'image_uuid': 'imageUuid',
'ipv4': 'ipv4',
'ipv6': 'ipv6',
'is_drift': 'isDrift',
'is_root': 'isRoot',
'label': 'label',
'last_scanned': 'lastScanned',
'mac_address': 'macAddress',
'name': 'name',
'operating_system': 'operatingSystem',
'path': 'path',
'port_mapping': 'portMapping',
'privileged': 'privileged',
'sensor_uuid': 'sensorUuid',
'services': 'services',
'sha': 'sha',
'softwares': 'softwares',
'source': 'source',
'state': 'state',
'state_changed': 'stateChanged',
'users': 'users',
'uuid': 'uuid',
'vulnerabilities': 'vulnerabilities'
}
def __init__(self, arguments=None, command=None, container_id=None, created=None, customer_uuid=None, drift=None, environment=None, host=None, hostname=None, image_id=None, image_sha=None, image_uuid=None, ipv4=None, ipv6=None, is_drift=None, is_root=None, label=None, last_scanned=None, mac_address=None, name=None, operating_system=None, path=None, port_mapping=None, privileged=None, sensor_uuid=None, services=None, sha=None, softwares=None, source=None, state=None, state_changed=None, users=None, uuid=None, vulnerabilities=None, local_vars_configuration=None): # noqa: E501
"""ContainerDetails - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._arguments = None
self._command = None
self._container_id = None
self._created = None
self._customer_uuid = None
self._drift = None
self._environment = None
self._host = None
self._hostname = None
self._image_id = None
self._image_sha = None
self._image_uuid = None
self._ipv4 = None
self._ipv6 = None
self._is_drift = None
self._is_root = None
self._label = None
self._last_scanned = None
self._mac_address = None
self._name = None
self._operating_system = None
self._path = None
self._port_mapping = None
self._privileged = None
self._sensor_uuid = None
self._services = None
self._sha = None
self._softwares = None
self._source = None
self._state = None
self._state_changed = None
self._users = None
self._uuid = None
self._vulnerabilities = None
self.discriminator = None
if arguments is not None:
self.arguments = arguments
if command is not None:
self.command = command
if container_id is not None:
self.container_id = container_id
if created is not None:
self.created = created
if customer_uuid is not None:
self.customer_uuid = customer_uuid
if drift is not None:
self.drift = drift
if environment is not None:
self.environment = environment
if host is not None:
self.host = host
if hostname is not None:
self.hostname = hostname
if image_id is not None:
self.image_id = image_id
if image_sha is not None:
self.image_sha = image_sha
if image_uuid is not None:
self.image_uuid = image_uuid
if ipv4 is not None:
self.ipv4 = ipv4
if ipv6 is not None:
self.ipv6 = ipv6
if is_drift is not None:
self.is_drift = is_drift
if is_root is not None:
self.is_root = is_root
if label is not None:
self.label = label
if last_scanned is not None:
self.last_scanned = last_scanned
if mac_address is not None:
self.mac_address = mac_address
if name is not None:
self.name = name
if operating_system is not None:
self.operating_system = operating_system
if path is not None:
self.path = path
if port_mapping is not None:
self.port_mapping = port_mapping
if privileged is not None:
self.privileged = privileged
if sensor_uuid is not None:
self.sensor_uuid = sensor_uuid
if services is not None:
self.services = services
if sha is not None:
self.sha = sha
if softwares is not None:
self.softwares = softwares
if source is not None:
self.source = source
if state is not None:
self.state = state
if state_changed is not None:
self.state_changed = state_changed
if users is not None:
self.users = users
if uuid is not None:
self.uuid = uuid
if vulnerabilities is not None:
self.vulnerabilities = vulnerabilities
@property
def arguments(self):
"""Gets the arguments of this ContainerDetails. # noqa: E501
:return: The arguments of this ContainerDetails. # noqa: E501
:rtype: list[str]
"""
return self._arguments
@arguments.setter
def arguments(self, arguments):
"""Sets the arguments of this ContainerDetails.
:param arguments: The arguments of this ContainerDetails. # noqa: E501
:type: list[str]
"""
self._arguments = arguments
@property
def command(self):
"""Gets the command of this ContainerDetails. # noqa: E501
:return: The command of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this ContainerDetails.
:param command: The command of this ContainerDetails. # noqa: E501
:type: str
"""
self._command = command
@property
def container_id(self):
"""Gets the container_id of this ContainerDetails. # noqa: E501
:return: The container_id of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._container_id
@container_id.setter
def container_id(self, container_id):
"""Sets the container_id of this ContainerDetails.
:param container_id: The container_id of this ContainerDetails. # noqa: E501
:type: str
"""
self._container_id = container_id
@property
def created(self):
"""Gets the created of this ContainerDetails. # noqa: E501
:return: The created of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this ContainerDetails.
:param created: The created of this ContainerDetails. # noqa: E501
:type: str
"""
self._created = created
@property
def customer_uuid(self):
"""Gets the customer_uuid of this ContainerDetails. # noqa: E501
:return: The customer_uuid of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._customer_uuid
@customer_uuid.setter
def customer_uuid(self, customer_uuid):
"""Sets the customer_uuid of this ContainerDetails.
:param customer_uuid: The customer_uuid of this ContainerDetails. # noqa: E501
:type: str
"""
self._customer_uuid = customer_uuid
@property
def drift(self):
"""Gets the drift of this ContainerDetails. # noqa: E501
:return: The drift of this ContainerDetails. # noqa: E501
:rtype: Drift
"""
return self._drift
@drift.setter
def drift(self, drift):
"""Sets the drift of this ContainerDetails.
:param drift: The drift of this ContainerDetails. # noqa: E501
:type: Drift
"""
self._drift = drift
@property
def environment(self):
"""Gets the environment of this ContainerDetails. # noqa: E501
:return: The environment of this ContainerDetails. # noqa: E501
:rtype: list[str]
"""
return self._environment
@environment.setter
def environment(self, environment):
"""Sets the environment of this ContainerDetails.
:param environment: The environment of this ContainerDetails. # noqa: E501
:type: list[str]
"""
self._environment = environment
@property
def host(self):
"""Gets the host of this ContainerDetails. # noqa: E501
:return: The host of this ContainerDetails. # noqa: E501
:rtype: Host
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this ContainerDetails.
:param host: The host of this ContainerDetails. # noqa: E501
:type: Host
"""
self._host = host
@property
def hostname(self):
"""Gets the hostname of this ContainerDetails. # noqa: E501
:return: The hostname of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""Sets the hostname of this ContainerDetails.
:param hostname: The hostname of this ContainerDetails. # noqa: E501
:type: str
"""
self._hostname = hostname
@property
def image_id(self):
"""Gets the image_id of this ContainerDetails. # noqa: E501
:return: The image_id of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""Sets the image_id of this ContainerDetails.
:param image_id: The image_id of this ContainerDetails. # noqa: E501
:type: str
"""
self._image_id = image_id
@property
def image_sha(self):
"""Gets the image_sha of this ContainerDetails. # noqa: E501
:return: The image_sha of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._image_sha
@image_sha.setter
def image_sha(self, image_sha):
"""Sets the image_sha of this ContainerDetails.
:param image_sha: The image_sha of this ContainerDetails. # noqa: E501
:type: str
"""
self._image_sha = image_sha
@property
def image_uuid(self):
"""Gets the image_uuid of this ContainerDetails. # noqa: E501
:return: The image_uuid of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._image_uuid
@image_uuid.setter
def image_uuid(self, image_uuid):
"""Sets the image_uuid of this ContainerDetails.
:param image_uuid: The image_uuid of this ContainerDetails. # noqa: E501
:type: str
"""
self._image_uuid = image_uuid
@property
def ipv4(self):
"""Gets the ipv4 of this ContainerDetails. # noqa: E501
:return: The ipv4 of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._ipv4
@ipv4.setter
def ipv4(self, ipv4):
"""Sets the ipv4 of this ContainerDetails.
:param ipv4: The ipv4 of this ContainerDetails. # noqa: E501
:type: str
"""
self._ipv4 = ipv4
@property
def ipv6(self):
"""Gets the ipv6 of this ContainerDetails. # noqa: E501
:return: The ipv6 of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._ipv6
@ipv6.setter
def ipv6(self, ipv6):
"""Sets the ipv6 of this ContainerDetails.
:param ipv6: The ipv6 of this ContainerDetails. # noqa: E501
:type: str
"""
self._ipv6 = ipv6
@property
def is_drift(self):
"""Gets the is_drift of this ContainerDetails. # noqa: E501
:return: The is_drift of this ContainerDetails. # noqa: E501
:rtype: bool
"""
return self._is_drift
@is_drift.setter
def is_drift(self, is_drift):
"""Sets the is_drift of this ContainerDetails.
:param is_drift: The is_drift of this ContainerDetails. # noqa: E501
:type: bool
"""
self._is_drift = is_drift
@property
def is_root(self):
"""Gets the is_root of this ContainerDetails. # noqa: E501
:return: The is_root of this ContainerDetails. # noqa: E501
:rtype: bool
"""
return self._is_root
@is_root.setter
def is_root(self, is_root):
"""Sets the is_root of this ContainerDetails.
:param is_root: The is_root of this ContainerDetails. # noqa: E501
:type: bool
"""
self._is_root = is_root
@property
def label(self):
"""Gets the label of this ContainerDetails. # noqa: E501
:return: The label of this ContainerDetails. # noqa: E501
:rtype: list[Label]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ContainerDetails.
:param label: The label of this ContainerDetails. # noqa: E501
:type: list[Label]
"""
self._label = label
@property
def last_scanned(self):
"""Gets the last_scanned of this ContainerDetails. # noqa: E501
:return: The last_scanned of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._last_scanned
@last_scanned.setter
def last_scanned(self, last_scanned):
"""Sets the last_scanned of this ContainerDetails.
:param last_scanned: The last_scanned of this ContainerDetails. # noqa: E501
:type: str
"""
self._last_scanned = last_scanned
@property
def mac_address(self):
"""Gets the mac_address of this ContainerDetails. # noqa: E501
:return: The mac_address of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
"""Sets the mac_address of this ContainerDetails.
:param mac_address: The mac_address of this ContainerDetails. # noqa: E501
:type: str
"""
self._mac_address = mac_address
@property
def name(self):
"""Gets the name of this ContainerDetails. # noqa: E501
:return: The name of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ContainerDetails.
:param name: The name of this ContainerDetails. # noqa: E501
:type: str
"""
self._name = name
@property
def operating_system(self):
"""Gets the operating_system of this ContainerDetails. # noqa: E501
:return: The operating_system of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this ContainerDetails.
:param operating_system: The operating_system of this ContainerDetails. # noqa: E501
:type: str
"""
self._operating_system = operating_system
@property
def path(self):
"""Gets the path of this ContainerDetails. # noqa: E501
:return: The path of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ContainerDetails.
:param path: The path of this ContainerDetails. # noqa: E501
:type: str
"""
self._path = path
@property
def port_mapping(self):
"""Gets the port_mapping of this ContainerDetails. # noqa: E501
:return: The port_mapping of this ContainerDetails. # noqa: E501
:rtype: list[PortMapping]
"""
return self._port_mapping
@port_mapping.setter
def port_mapping(self, port_mapping):
"""Sets the port_mapping of this ContainerDetails.
:param port_mapping: The port_mapping of this ContainerDetails. # noqa: E501
:type: list[PortMapping]
"""
self._port_mapping = port_mapping
@property
def privileged(self):
"""Gets the privileged of this ContainerDetails. # noqa: E501
:return: The privileged of this ContainerDetails. # noqa: E501
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""Sets the privileged of this ContainerDetails.
:param privileged: The privileged of this ContainerDetails. # noqa: E501
:type: bool
"""
self._privileged = privileged
@property
def sensor_uuid(self):
"""Gets the sensor_uuid of this ContainerDetails. # noqa: E501
:return: The sensor_uuid of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._sensor_uuid
@sensor_uuid.setter
def sensor_uuid(self, sensor_uuid):
"""Sets the sensor_uuid of this ContainerDetails.
:param sensor_uuid: The sensor_uuid of this ContainerDetails. # noqa: E501
:type: str
"""
self._sensor_uuid = sensor_uuid
@property
def services(self):
"""Gets the services of this ContainerDetails. # noqa: E501
:return: The services of this ContainerDetails. # noqa: E501
:rtype: list[ServiceDetails]
"""
return self._services
@services.setter
def services(self, services):
"""Sets the services of this ContainerDetails.
:param services: The services of this ContainerDetails. # noqa: E501
:type: list[ServiceDetails]
"""
self._services = services
@property
def sha(self):
"""Gets the sha of this ContainerDetails. # noqa: E501
:return: The sha of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._sha
@sha.setter
def sha(self, sha):
"""Sets the sha of this ContainerDetails.
:param sha: The sha of this ContainerDetails. # noqa: E501
:type: str
"""
self._sha = sha
@property
def softwares(self):
"""Gets the softwares of this ContainerDetails. # noqa: E501
:return: The softwares of this ContainerDetails. # noqa: E501
:rtype: list[Software]
"""
return self._softwares
@softwares.setter
def softwares(self, softwares):
"""Sets the softwares of this ContainerDetails.
:param softwares: The softwares of this ContainerDetails. # noqa: E501
:type: list[Software]
"""
self._softwares = softwares
@property
def source(self):
"""Gets the source of this ContainerDetails. # noqa: E501
:return: The source of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this ContainerDetails.
:param source: The source of this ContainerDetails. # noqa: E501
:type: str
"""
self._source = source
@property
def state(self):
"""Gets the state of this ContainerDetails. # noqa: E501
:return: The state of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this ContainerDetails.
:param state: The state of this ContainerDetails. # noqa: E501
:type: str
"""
self._state = state
@property
def state_changed(self):
"""Gets the state_changed of this ContainerDetails. # noqa: E501
:return: The state_changed of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._state_changed
@state_changed.setter
def state_changed(self, state_changed):
"""Sets the state_changed of this ContainerDetails.
:param state_changed: The state_changed of this ContainerDetails. # noqa: E501
:type: str
"""
self._state_changed = state_changed
@property
def users(self):
"""Gets the users of this ContainerDetails. # noqa: E501
:return: The users of this ContainerDetails. # noqa: E501
:rtype: list[str]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this ContainerDetails.
:param users: The users of this ContainerDetails. # noqa: E501
:type: list[str]
"""
self._users = users
@property
def uuid(self):
"""Gets the uuid of this ContainerDetails. # noqa: E501
:return: The uuid of this ContainerDetails. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this ContainerDetails.
:param uuid: The uuid of this ContainerDetails. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def vulnerabilities(self):
"""Gets the vulnerabilities of this ContainerDetails. # noqa: E501
:return: The vulnerabilities of this ContainerDetails. # noqa: E501
:rtype: list[ServiceVulnerabilityDetails]
"""
return self._vulnerabilities
@vulnerabilities.setter
def vulnerabilities(self, vulnerabilities):
"""Sets the vulnerabilities of this ContainerDetails.
:param vulnerabilities: The vulnerabilities of this ContainerDetails. # noqa: E501
:type: list[ServiceVulnerabilityDetails]
"""
self._vulnerabilities = vulnerabilities
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContainerDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ContainerDetails):
return True
return self.to_dict() != other.to_dict()
```
#### File: qualys_cs_api/models/drift.py
```python
import pprint
import re # noqa: F401
import six
from qualys_cs_api.configuration import Configuration
class Drift(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'category': 'list[str]',
'reason': 'list[str]',
'software': 'list[DriftSoftware]',
'vulnerability': 'list[DriftVulnerability]'
}
attribute_map = {
'category': 'category',
'reason': 'reason',
'software': 'software',
'vulnerability': 'vulnerability'
}
def __init__(self, category=None, reason=None, software=None, vulnerability=None, local_vars_configuration=None): # noqa: E501
"""Drift - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._category = None
self._reason = None
self._software = None
self._vulnerability = None
self.discriminator = None
if category is not None:
self.category = category
if reason is not None:
self.reason = reason
if software is not None:
self.software = software
if vulnerability is not None:
self.vulnerability = vulnerability
@property
def category(self):
"""Gets the category of this Drift. # noqa: E501
:return: The category of this Drift. # noqa: E501
:rtype: list[str]
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Drift.
:param category: The category of this Drift. # noqa: E501
:type: list[str]
"""
self._category = category
@property
def reason(self):
"""Gets the reason of this Drift. # noqa: E501
:return: The reason of this Drift. # noqa: E501
:rtype: list[str]
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this Drift.
:param reason: The reason of this Drift. # noqa: E501
:type: list[str]
"""
self._reason = reason
@property
def software(self):
"""Gets the software of this Drift. # noqa: E501
:return: The software of this Drift. # noqa: E501
:rtype: list[DriftSoftware]
"""
return self._software
@software.setter
def software(self, software):
"""Sets the software of this Drift.
:param software: The software of this Drift. # noqa: E501
:type: list[DriftSoftware]
"""
self._software = software
@property
def vulnerability(self):
"""Gets the vulnerability of this Drift. # noqa: E501
:return: The vulnerability of this Drift. # noqa: E501
:rtype: list[DriftVulnerability]
"""
return self._vulnerability
@vulnerability.setter
def vulnerability(self, vulnerability):
"""Sets the vulnerability of this Drift.
:param vulnerability: The vulnerability of this Drift. # noqa: E501
:type: list[DriftVulnerability]
"""
self._vulnerability = vulnerability
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Drift):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Drift):
return True
return self.to_dict() != other.to_dict()
```
#### File: qualys_cs_api/models/registry_request.py
```python
import pprint
import re # noqa: F401
import six
from qualys_cs_api.configuration import Configuration
class RegistryRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'aws': 'AWS',
'credential': 'Credential',
'credential_type': 'str',
'docker_hub_org_name': 'str',
'registry_type': 'str',
'registry_uri': 'str',
'registry_uuid': 'str'
}
attribute_map = {
'aws': 'aws',
'credential': 'credential',
'credential_type': 'credentialType',
'docker_hub_org_name': 'dockerHubOrgName',
'registry_type': 'registryType',
'registry_uri': 'registryUri',
'registry_uuid': 'registryUuid'
}
def __init__(self, aws=None, credential=None, credential_type=None, docker_hub_org_name=None, registry_type=None, registry_uri=None, registry_uuid=None, local_vars_configuration=None): # noqa: E501
"""RegistryRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._aws = None
self._credential = None
self._credential_type = None
self._docker_hub_org_name = None
self._registry_type = None
self._registry_uri = None
self._registry_uuid = None
self.discriminator = None
if aws is not None:
self.aws = aws
if credential is not None:
self.credential = credential
if credential_type is not None:
self.credential_type = credential_type
if docker_hub_org_name is not None:
self.docker_hub_org_name = docker_hub_org_name
if registry_type is not None:
self.registry_type = registry_type
if registry_uri is not None:
self.registry_uri = registry_uri
if registry_uuid is not None:
self.registry_uuid = registry_uuid
@property
def aws(self):
"""Gets the aws of this RegistryRequest. # noqa: E501
:return: The aws of this RegistryRequest. # noqa: E501
:rtype: AWS
"""
return self._aws
@aws.setter
def aws(self, aws):
"""Sets the aws of this RegistryRequest.
:param aws: The aws of this RegistryRequest. # noqa: E501
:type: AWS
"""
self._aws = aws
@property
def credential(self):
"""Gets the credential of this RegistryRequest. # noqa: E501
:return: The credential of this RegistryRequest. # noqa: E501
:rtype: Credential
"""
return self._credential
@credential.setter
def credential(self, credential):
"""Sets the credential of this RegistryRequest.
:param credential: The credential of this RegistryRequest. # noqa: E501
:type: Credential
"""
self._credential = credential
@property
def credential_type(self):
"""Gets the credential_type of this RegistryRequest. # noqa: E501
:return: The credential_type of this RegistryRequest. # noqa: E501
:rtype: str
"""
return self._credential_type
@credential_type.setter
def credential_type(self, credential_type):
"""Sets the credential_type of this RegistryRequest.
:param credential_type: The credential_type of this RegistryRequest. # noqa: E501
:type: str
"""
allowed_values = ["None", "Token", "BasicAuth", "DockerHub", "Oauth", "AWS"] # noqa: E501
if self.local_vars_configuration.client_side_validation and credential_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `credential_type` ({0}), must be one of {1}" # noqa: E501
.format(credential_type, allowed_values)
)
self._credential_type = credential_type
@property
def docker_hub_org_name(self):
"""Gets the docker_hub_org_name of this RegistryRequest. # noqa: E501
:return: The docker_hub_org_name of this RegistryRequest. # noqa: E501
:rtype: str
"""
return self._docker_hub_org_name
@docker_hub_org_name.setter
def docker_hub_org_name(self, docker_hub_org_name):
"""Sets the docker_hub_org_name of this RegistryRequest.
:param docker_hub_org_name: The docker_hub_org_name of this RegistryRequest. # noqa: E501
:type: str
"""
self._docker_hub_org_name = docker_hub_org_name
@property
def registry_type(self):
"""Gets the registry_type of this RegistryRequest. # noqa: E501
:return: The registry_type of this RegistryRequest. # noqa: E501
:rtype: str
"""
return self._registry_type
@registry_type.setter
def registry_type(self, registry_type):
"""Sets the registry_type of this RegistryRequest.
:param registry_type: The registry_type of this RegistryRequest. # noqa: E501
:type: str
"""
allowed_values = ["V2_PRIVATE", "V2", "DockerHub", "AWS", "Azure"] # noqa: E501
if self.local_vars_configuration.client_side_validation and registry_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `registry_type` ({0}), must be one of {1}" # noqa: E501
.format(registry_type, allowed_values)
)
self._registry_type = registry_type
@property
def registry_uri(self):
"""Gets the registry_uri of this RegistryRequest. # noqa: E501
:return: The registry_uri of this RegistryRequest. # noqa: E501
:rtype: str
"""
return self._registry_uri
@registry_uri.setter
def registry_uri(self, registry_uri):
"""Sets the registry_uri of this RegistryRequest.
:param registry_uri: The registry_uri of this RegistryRequest. # noqa: E501
:type: str
"""
self._registry_uri = registry_uri
@property
def registry_uuid(self):
"""Gets the registry_uuid of this RegistryRequest. # noqa: E501
:return: The registry_uuid of this RegistryRequest. # noqa: E501
:rtype: str
"""
return self._registry_uuid
@registry_uuid.setter
def registry_uuid(self, registry_uuid):
"""Sets the registry_uuid of this RegistryRequest.
:param registry_uuid: The registry_uuid of this RegistryRequest. # noqa: E501
:type: str
"""
self._registry_uuid = registry_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RegistryRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RegistryRequest):
return True
return self.to_dict() != other.to_dict()
```
#### File: qualys_cs_api/models/schedule_response.py
```python
import pprint
import re # noqa: F401
import six
from qualys_cs_api.configuration import Configuration
class ScheduleResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'created': 'str',
'errors': 'list[CMSError]',
'filters': 'list[Filter]',
'job_completion_date': 'str',
'job_listing_completion_date': 'str',
'job_listing_start_date': 'str',
'job_scanning_completion_date': 'str',
'job_scanning_start_date': 'str',
'job_start_date': 'str',
'name': 'str',
'on_demand': 'bool',
'schedule': 'str',
'schedule_uuid': 'str',
'status': 'str',
'updated': 'str'
}
attribute_map = {
'created': 'created',
'errors': 'errors',
'filters': 'filters',
'job_completion_date': 'jobCompletionDate',
'job_listing_completion_date': 'jobListingCompletionDate',
'job_listing_start_date': 'jobListingStartDate',
'job_scanning_completion_date': 'jobScanningCompletionDate',
'job_scanning_start_date': 'jobScanningStartDate',
'job_start_date': 'jobStartDate',
'name': 'name',
'on_demand': 'onDemand',
'schedule': 'schedule',
'schedule_uuid': 'scheduleUuid',
'status': 'status',
'updated': 'updated'
}
def __init__(self, created=None, errors=None, filters=None, job_completion_date=None, job_listing_completion_date=None, job_listing_start_date=None, job_scanning_completion_date=None, job_scanning_start_date=None, job_start_date=None, name=None, on_demand=None, schedule=None, schedule_uuid=None, status=None, updated=None, local_vars_configuration=None): # noqa: E501
"""ScheduleResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._created = None
self._errors = None
self._filters = None
self._job_completion_date = None
self._job_listing_completion_date = None
self._job_listing_start_date = None
self._job_scanning_completion_date = None
self._job_scanning_start_date = None
self._job_start_date = None
self._name = None
self._on_demand = None
self._schedule = None
self._schedule_uuid = None
self._status = None
self._updated = None
self.discriminator = None
if created is not None:
self.created = created
if errors is not None:
self.errors = errors
if filters is not None:
self.filters = filters
if job_completion_date is not None:
self.job_completion_date = job_completion_date
if job_listing_completion_date is not None:
self.job_listing_completion_date = job_listing_completion_date
if job_listing_start_date is not None:
self.job_listing_start_date = job_listing_start_date
if job_scanning_completion_date is not None:
self.job_scanning_completion_date = job_scanning_completion_date
if job_scanning_start_date is not None:
self.job_scanning_start_date = job_scanning_start_date
if job_start_date is not None:
self.job_start_date = job_start_date
if name is not None:
self.name = name
if on_demand is not None:
self.on_demand = on_demand
if schedule is not None:
self.schedule = schedule
if schedule_uuid is not None:
self.schedule_uuid = schedule_uuid
if status is not None:
self.status = status
if updated is not None:
self.updated = updated
@property
def created(self):
"""Gets the created of this ScheduleResponse. # noqa: E501
:return: The created of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this ScheduleResponse.
:param created: The created of this ScheduleResponse. # noqa: E501
:type: str
"""
self._created = created
@property
def errors(self):
"""Gets the errors of this ScheduleResponse. # noqa: E501
:return: The errors of this ScheduleResponse. # noqa: E501
:rtype: list[CMSError]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this ScheduleResponse.
:param errors: The errors of this ScheduleResponse. # noqa: E501
:type: list[CMSError]
"""
self._errors = errors
@property
def filters(self):
"""Gets the filters of this ScheduleResponse. # noqa: E501
:return: The filters of this ScheduleResponse. # noqa: E501
:rtype: list[Filter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""Sets the filters of this ScheduleResponse.
:param filters: The filters of this ScheduleResponse. # noqa: E501
:type: list[Filter]
"""
self._filters = filters
@property
def job_completion_date(self):
"""Gets the job_completion_date of this ScheduleResponse. # noqa: E501
:return: The job_completion_date of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._job_completion_date
@job_completion_date.setter
def job_completion_date(self, job_completion_date):
"""Sets the job_completion_date of this ScheduleResponse.
:param job_completion_date: The job_completion_date of this ScheduleResponse. # noqa: E501
:type: str
"""
self._job_completion_date = job_completion_date
@property
def job_listing_completion_date(self):
"""Gets the job_listing_completion_date of this ScheduleResponse. # noqa: E501
:return: The job_listing_completion_date of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._job_listing_completion_date
@job_listing_completion_date.setter
def job_listing_completion_date(self, job_listing_completion_date):
"""Sets the job_listing_completion_date of this ScheduleResponse.
:param job_listing_completion_date: The job_listing_completion_date of this ScheduleResponse. # noqa: E501
:type: str
"""
self._job_listing_completion_date = job_listing_completion_date
@property
def job_listing_start_date(self):
"""Gets the job_listing_start_date of this ScheduleResponse. # noqa: E501
:return: The job_listing_start_date of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._job_listing_start_date
@job_listing_start_date.setter
def job_listing_start_date(self, job_listing_start_date):
"""Sets the job_listing_start_date of this ScheduleResponse.
:param job_listing_start_date: The job_listing_start_date of this ScheduleResponse. # noqa: E501
:type: str
"""
self._job_listing_start_date = job_listing_start_date
@property
def job_scanning_completion_date(self):
"""Gets the job_scanning_completion_date of this ScheduleResponse. # noqa: E501
:return: The job_scanning_completion_date of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._job_scanning_completion_date
@job_scanning_completion_date.setter
def job_scanning_completion_date(self, job_scanning_completion_date):
"""Sets the job_scanning_completion_date of this ScheduleResponse.
:param job_scanning_completion_date: The job_scanning_completion_date of this ScheduleResponse. # noqa: E501
:type: str
"""
self._job_scanning_completion_date = job_scanning_completion_date
@property
def job_scanning_start_date(self):
"""Gets the job_scanning_start_date of this ScheduleResponse. # noqa: E501
:return: The job_scanning_start_date of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._job_scanning_start_date
@job_scanning_start_date.setter
def job_scanning_start_date(self, job_scanning_start_date):
"""Sets the job_scanning_start_date of this ScheduleResponse.
:param job_scanning_start_date: The job_scanning_start_date of this ScheduleResponse. # noqa: E501
:type: str
"""
self._job_scanning_start_date = job_scanning_start_date
@property
def job_start_date(self):
"""Gets the job_start_date of this ScheduleResponse. # noqa: E501
:return: The job_start_date of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._job_start_date
@job_start_date.setter
def job_start_date(self, job_start_date):
"""Sets the job_start_date of this ScheduleResponse.
:param job_start_date: The job_start_date of this ScheduleResponse. # noqa: E501
:type: str
"""
self._job_start_date = job_start_date
@property
def name(self):
"""Gets the name of this ScheduleResponse. # noqa: E501
:return: The name of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ScheduleResponse.
:param name: The name of this ScheduleResponse. # noqa: E501
:type: str
"""
self._name = name
@property
def on_demand(self):
"""Gets the on_demand of this ScheduleResponse. # noqa: E501
:return: The on_demand of this ScheduleResponse. # noqa: E501
:rtype: bool
"""
return self._on_demand
@on_demand.setter
def on_demand(self, on_demand):
"""Sets the on_demand of this ScheduleResponse.
:param on_demand: The on_demand of this ScheduleResponse. # noqa: E501
:type: bool
"""
self._on_demand = on_demand
@property
def schedule(self):
"""Gets the schedule of this ScheduleResponse. # noqa: E501
:return: The schedule of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this ScheduleResponse.
:param schedule: The schedule of this ScheduleResponse. # noqa: E501
:type: str
"""
self._schedule = schedule
@property
def schedule_uuid(self):
"""Gets the schedule_uuid of this ScheduleResponse. # noqa: E501
:return: The schedule_uuid of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._schedule_uuid
@schedule_uuid.setter
def schedule_uuid(self, schedule_uuid):
"""Sets the schedule_uuid of this ScheduleResponse.
:param schedule_uuid: The schedule_uuid of this ScheduleResponse. # noqa: E501
:type: str
"""
self._schedule_uuid = schedule_uuid
@property
def status(self):
"""Gets the status of this ScheduleResponse. # noqa: E501
:return: The status of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ScheduleResponse.
:param status: The status of this ScheduleResponse. # noqa: E501
:type: str
"""
allowed_values = ["Created", "Queued", "Paused", "Running", "Completed", "Finished", "Error", "Failed", "BaselineCompleted", "BaselineCreated", "BaselineQueued", "BaselineRunning", "Canceled"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def updated(self):
"""Gets the updated of this ScheduleResponse. # noqa: E501
:return: The updated of this ScheduleResponse. # noqa: E501
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this ScheduleResponse.
:param updated: The updated of this ScheduleResponse. # noqa: E501
:type: str
"""
self._updated = updated
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ScheduleResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ScheduleResponse):
return True
return self.to_dict() != other.to_dict()
```
#### File: qualys_cs_api/models/sensor.py
```python
import pprint
import re # noqa: F401
import six
from qualys_cs_api.configuration import Configuration
class Sensor(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'activation_uuid': 'str',
'binary_version': 'str',
'configuration_profile': 'str',
'container_ipv4': 'str',
'container_ipv6': 'str',
'container_mac_address': 'str',
'created': 'str',
'customer_uuid': 'str',
'docker_version': 'str',
'host_uuid': 'str',
'hostname': 'str',
'image_id': 'str',
'image_sha': 'str',
'ipv4': 'str',
'ipv6': 'str',
'label': 'list[Label]',
'last_checked_in': 'str',
'mac_address': 'str',
'name': 'str',
'os': 'str',
'platform': 'str',
'privileged': 'str',
'registry': 'str',
'sensor_id': 'str',
'sensor_type': 'str',
'sensor_version': 'str',
'sha': 'str',
'status': 'str',
'uuid': 'str',
'vuln_sig_version': 'str'
}
attribute_map = {
'activation_uuid': 'activationUuid',
'binary_version': 'binaryVersion',
'configuration_profile': 'configurationProfile',
'container_ipv4': 'containerIpv4',
'container_ipv6': 'containerIpv6',
'container_mac_address': 'containerMacAddress',
'created': 'created',
'customer_uuid': 'customerUuid',
'docker_version': 'dockerVersion',
'host_uuid': 'hostUuid',
'hostname': 'hostname',
'image_id': 'imageId',
'image_sha': 'imageSha',
'ipv4': 'ipv4',
'ipv6': 'ipv6',
'label': 'label',
'last_checked_in': 'lastCheckedIn',
'mac_address': 'macAddress',
'name': 'name',
'os': 'os',
'platform': 'platform',
'privileged': 'privileged',
'registry': 'registry',
'sensor_id': 'sensorId',
'sensor_type': 'sensorType',
'sensor_version': 'sensorVersion',
'sha': 'sha',
'status': 'status',
'uuid': 'uuid',
'vuln_sig_version': 'vulnSigVersion'
}
def __init__(self, activation_uuid=None, binary_version=None, configuration_profile=None, container_ipv4=None, container_ipv6=None, container_mac_address=None, created=None, customer_uuid=None, docker_version=None, host_uuid=None, hostname=None, image_id=None, image_sha=None, ipv4=None, ipv6=None, label=None, last_checked_in=None, mac_address=None, name=None, os=None, platform=None, privileged=None, registry=None, sensor_id=None, sensor_type=None, sensor_version=None, sha=None, status=None, uuid=None, vuln_sig_version=None, local_vars_configuration=None): # noqa: E501
"""Sensor - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._activation_uuid = None
self._binary_version = None
self._configuration_profile = None
self._container_ipv4 = None
self._container_ipv6 = None
self._container_mac_address = None
self._created = None
self._customer_uuid = None
self._docker_version = None
self._host_uuid = None
self._hostname = None
self._image_id = None
self._image_sha = None
self._ipv4 = None
self._ipv6 = None
self._label = None
self._last_checked_in = None
self._mac_address = None
self._name = None
self._os = None
self._platform = None
self._privileged = None
self._registry = None
self._sensor_id = None
self._sensor_type = None
self._sensor_version = None
self._sha = None
self._status = None
self._uuid = None
self._vuln_sig_version = None
self.discriminator = None
if activation_uuid is not None:
self.activation_uuid = activation_uuid
if binary_version is not None:
self.binary_version = binary_version
if configuration_profile is not None:
self.configuration_profile = configuration_profile
if container_ipv4 is not None:
self.container_ipv4 = container_ipv4
if container_ipv6 is not None:
self.container_ipv6 = container_ipv6
if container_mac_address is not None:
self.container_mac_address = container_mac_address
if created is not None:
self.created = created
if customer_uuid is not None:
self.customer_uuid = customer_uuid
if docker_version is not None:
self.docker_version = docker_version
if host_uuid is not None:
self.host_uuid = host_uuid
if hostname is not None:
self.hostname = hostname
if image_id is not None:
self.image_id = image_id
if image_sha is not None:
self.image_sha = image_sha
if ipv4 is not None:
self.ipv4 = ipv4
if ipv6 is not None:
self.ipv6 = ipv6
if label is not None:
self.label = label
if last_checked_in is not None:
self.last_checked_in = last_checked_in
if mac_address is not None:
self.mac_address = mac_address
if name is not None:
self.name = name
if os is not None:
self.os = os
if platform is not None:
self.platform = platform
if privileged is not None:
self.privileged = privileged
if registry is not None:
self.registry = registry
if sensor_id is not None:
self.sensor_id = sensor_id
if sensor_type is not None:
self.sensor_type = sensor_type
if sensor_version is not None:
self.sensor_version = sensor_version
if sha is not None:
self.sha = sha
if status is not None:
self.status = status
if uuid is not None:
self.uuid = uuid
if vuln_sig_version is not None:
self.vuln_sig_version = vuln_sig_version
@property
def activation_uuid(self):
"""Gets the activation_uuid of this Sensor. # noqa: E501
:return: The activation_uuid of this Sensor. # noqa: E501
:rtype: str
"""
return self._activation_uuid
@activation_uuid.setter
def activation_uuid(self, activation_uuid):
"""Sets the activation_uuid of this Sensor.
:param activation_uuid: The activation_uuid of this Sensor. # noqa: E501
:type: str
"""
self._activation_uuid = activation_uuid
@property
def binary_version(self):
"""Gets the binary_version of this Sensor. # noqa: E501
:return: The binary_version of this Sensor. # noqa: E501
:rtype: str
"""
return self._binary_version
@binary_version.setter
def binary_version(self, binary_version):
"""Sets the binary_version of this Sensor.
:param binary_version: The binary_version of this Sensor. # noqa: E501
:type: str
"""
self._binary_version = binary_version
@property
def configuration_profile(self):
"""Gets the configuration_profile of this Sensor. # noqa: E501
:return: The configuration_profile of this Sensor. # noqa: E501
:rtype: str
"""
return self._configuration_profile
@configuration_profile.setter
def configuration_profile(self, configuration_profile):
"""Sets the configuration_profile of this Sensor.
:param configuration_profile: The configuration_profile of this Sensor. # noqa: E501
:type: str
"""
self._configuration_profile = configuration_profile
@property
def container_ipv4(self):
"""Gets the container_ipv4 of this Sensor. # noqa: E501
:return: The container_ipv4 of this Sensor. # noqa: E501
:rtype: str
"""
return self._container_ipv4
@container_ipv4.setter
def container_ipv4(self, container_ipv4):
"""Sets the container_ipv4 of this Sensor.
:param container_ipv4: The container_ipv4 of this Sensor. # noqa: E501
:type: str
"""
self._container_ipv4 = container_ipv4
@property
def container_ipv6(self):
"""Gets the container_ipv6 of this Sensor. # noqa: E501
:return: The container_ipv6 of this Sensor. # noqa: E501
:rtype: str
"""
return self._container_ipv6
@container_ipv6.setter
def container_ipv6(self, container_ipv6):
"""Sets the container_ipv6 of this Sensor.
:param container_ipv6: The container_ipv6 of this Sensor. # noqa: E501
:type: str
"""
self._container_ipv6 = container_ipv6
@property
def container_mac_address(self):
"""Gets the container_mac_address of this Sensor. # noqa: E501
:return: The container_mac_address of this Sensor. # noqa: E501
:rtype: str
"""
return self._container_mac_address
@container_mac_address.setter
def container_mac_address(self, container_mac_address):
"""Sets the container_mac_address of this Sensor.
:param container_mac_address: The container_mac_address of this Sensor. # noqa: E501
:type: str
"""
self._container_mac_address = container_mac_address
@property
def created(self):
"""Gets the created of this Sensor. # noqa: E501
:return: The created of this Sensor. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Sensor.
:param created: The created of this Sensor. # noqa: E501
:type: str
"""
self._created = created
@property
def customer_uuid(self):
"""Gets the customer_uuid of this Sensor. # noqa: E501
:return: The customer_uuid of this Sensor. # noqa: E501
:rtype: str
"""
return self._customer_uuid
@customer_uuid.setter
def customer_uuid(self, customer_uuid):
"""Sets the customer_uuid of this Sensor.
:param customer_uuid: The customer_uuid of this Sensor. # noqa: E501
:type: str
"""
self._customer_uuid = customer_uuid
@property
def docker_version(self):
"""Gets the docker_version of this Sensor. # noqa: E501
:return: The docker_version of this Sensor. # noqa: E501
:rtype: str
"""
return self._docker_version
@docker_version.setter
def docker_version(self, docker_version):
"""Sets the docker_version of this Sensor.
:param docker_version: The docker_version of this Sensor. # noqa: E501
:type: str
"""
self._docker_version = docker_version
@property
def host_uuid(self):
"""Gets the host_uuid of this Sensor. # noqa: E501
:return: The host_uuid of this Sensor. # noqa: E501
:rtype: str
"""
return self._host_uuid
@host_uuid.setter
def host_uuid(self, host_uuid):
"""Sets the host_uuid of this Sensor.
:param host_uuid: The host_uuid of this Sensor. # noqa: E501
:type: str
"""
self._host_uuid = host_uuid
@property
def hostname(self):
"""Gets the hostname of this Sensor. # noqa: E501
:return: The hostname of this Sensor. # noqa: E501
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""Sets the hostname of this Sensor.
:param hostname: The hostname of this Sensor. # noqa: E501
:type: str
"""
self._hostname = hostname
@property
def image_id(self):
"""Gets the image_id of this Sensor. # noqa: E501
:return: The image_id of this Sensor. # noqa: E501
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""Sets the image_id of this Sensor.
:param image_id: The image_id of this Sensor. # noqa: E501
:type: str
"""
self._image_id = image_id
@property
def image_sha(self):
"""Gets the image_sha of this Sensor. # noqa: E501
:return: The image_sha of this Sensor. # noqa: E501
:rtype: str
"""
return self._image_sha
@image_sha.setter
def image_sha(self, image_sha):
"""Sets the image_sha of this Sensor.
:param image_sha: The image_sha of this Sensor. # noqa: E501
:type: str
"""
self._image_sha = image_sha
@property
def ipv4(self):
"""Gets the ipv4 of this Sensor. # noqa: E501
:return: The ipv4 of this Sensor. # noqa: E501
:rtype: str
"""
return self._ipv4
@ipv4.setter
def ipv4(self, ipv4):
"""Sets the ipv4 of this Sensor.
:param ipv4: The ipv4 of this Sensor. # noqa: E501
:type: str
"""
self._ipv4 = ipv4
@property
def ipv6(self):
"""Gets the ipv6 of this Sensor. # noqa: E501
:return: The ipv6 of this Sensor. # noqa: E501
:rtype: str
"""
return self._ipv6
@ipv6.setter
def ipv6(self, ipv6):
"""Sets the ipv6 of this Sensor.
:param ipv6: The ipv6 of this Sensor. # noqa: E501
:type: str
"""
self._ipv6 = ipv6
@property
def label(self):
"""Gets the label of this Sensor. # noqa: E501
:return: The label of this Sensor. # noqa: E501
:rtype: list[Label]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Sensor.
:param label: The label of this Sensor. # noqa: E501
:type: list[Label]
"""
self._label = label
@property
def last_checked_in(self):
"""Gets the last_checked_in of this Sensor. # noqa: E501
:return: The last_checked_in of this Sensor. # noqa: E501
:rtype: str
"""
return self._last_checked_in
@last_checked_in.setter
def last_checked_in(self, last_checked_in):
"""Sets the last_checked_in of this Sensor.
:param last_checked_in: The last_checked_in of this Sensor. # noqa: E501
:type: str
"""
self._last_checked_in = last_checked_in
@property
def mac_address(self):
"""Gets the mac_address of this Sensor. # noqa: E501
:return: The mac_address of this Sensor. # noqa: E501
:rtype: str
"""
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
"""Sets the mac_address of this Sensor.
:param mac_address: The mac_address of this Sensor. # noqa: E501
:type: str
"""
self._mac_address = mac_address
@property
def name(self):
"""Gets the name of this Sensor. # noqa: E501
:return: The name of this Sensor. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Sensor.
:param name: The name of this Sensor. # noqa: E501
:type: str
"""
self._name = name
@property
def os(self):
"""Gets the os of this Sensor. # noqa: E501
:return: The os of this Sensor. # noqa: E501
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this Sensor.
:param os: The os of this Sensor. # noqa: E501
:type: str
"""
self._os = os
@property
def platform(self):
"""Gets the platform of this Sensor. # noqa: E501
:return: The platform of this Sensor. # noqa: E501
:rtype: str
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this Sensor.
:param platform: The platform of this Sensor. # noqa: E501
:type: str
"""
self._platform = platform
@property
def privileged(self):
"""Gets the privileged of this Sensor. # noqa: E501
:return: The privileged of this Sensor. # noqa: E501
:rtype: str
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""Sets the privileged of this Sensor.
:param privileged: The privileged of this Sensor. # noqa: E501
:type: str
"""
self._privileged = privileged
@property
def registry(self):
"""Gets the registry of this Sensor. # noqa: E501
:return: The registry of this Sensor. # noqa: E501
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""Sets the registry of this Sensor.
:param registry: The registry of this Sensor. # noqa: E501
:type: str
"""
self._registry = registry
@property
def sensor_id(self):
"""Gets the sensor_id of this Sensor. # noqa: E501
:return: The sensor_id of this Sensor. # noqa: E501
:rtype: str
"""
return self._sensor_id
@sensor_id.setter
def sensor_id(self, sensor_id):
"""Sets the sensor_id of this Sensor.
:param sensor_id: The sensor_id of this Sensor. # noqa: E501
:type: str
"""
self._sensor_id = sensor_id
@property
def sensor_type(self):
"""Gets the sensor_type of this Sensor. # noqa: E501
:return: The sensor_type of this Sensor. # noqa: E501
:rtype: str
"""
return self._sensor_type
@sensor_type.setter
def sensor_type(self, sensor_type):
"""Sets the sensor_type of this Sensor.
:param sensor_type: The sensor_type of this Sensor. # noqa: E501
:type: str
"""
allowed_values = ["REGISTRY", "GENERAL", "CICD"] # noqa: E501
if self.local_vars_configuration.client_side_validation and sensor_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `sensor_type` ({0}), must be one of {1}" # noqa: E501
.format(sensor_type, allowed_values)
)
self._sensor_type = sensor_type
@property
def sensor_version(self):
"""Gets the sensor_version of this Sensor. # noqa: E501
:return: The sensor_version of this Sensor. # noqa: E501
:rtype: str
"""
return self._sensor_version
@sensor_version.setter
def sensor_version(self, sensor_version):
"""Sets the sensor_version of this Sensor.
:param sensor_version: The sensor_version of this Sensor. # noqa: E501
:type: str
"""
self._sensor_version = sensor_version
@property
def sha(self):
"""Gets the sha of this Sensor. # noqa: E501
:return: The sha of this Sensor. # noqa: E501
:rtype: str
"""
return self._sha
@sha.setter
def sha(self, sha):
"""Sets the sha of this Sensor.
:param sha: The sha of this Sensor. # noqa: E501
:type: str
"""
self._sha = sha
@property
def status(self):
"""Gets the status of this Sensor. # noqa: E501
:return: The status of this Sensor. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Sensor.
:param status: The status of this Sensor. # noqa: E501
:type: str
"""
self._status = status
@property
def uuid(self):
"""Gets the uuid of this Sensor. # noqa: E501
:return: The uuid of this Sensor. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this Sensor.
:param uuid: The uuid of this Sensor. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def vuln_sig_version(self):
"""Gets the vuln_sig_version of this Sensor. # noqa: E501
:return: The vuln_sig_version of this Sensor. # noqa: E501
:rtype: str
"""
return self._vuln_sig_version
@vuln_sig_version.setter
def vuln_sig_version(self, vuln_sig_version):
"""Sets the vuln_sig_version of this Sensor.
:param vuln_sig_version: The vuln_sig_version of this Sensor. # noqa: E501
:type: str
"""
self._vuln_sig_version = vuln_sig_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Sensor):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Sensor):
return True
return self.to_dict() != other.to_dict()
```
#### File: qualys-cs-python-client/test/test_schedule_request.py
```python
from __future__ import absolute_import
import unittest
import qualys_cs_api
from qualys_cs_api.models.schedule_request import ScheduleRequest # noqa: E501
from qualys_cs_api.rest import ApiException
class TestScheduleRequest(unittest.TestCase):
"""ScheduleRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testScheduleRequest(self):
"""Test ScheduleRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = qualys_cs_api.models.schedule_request.ScheduleRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jll123567/freshmen-code",
"score": 3
} |
#### File: class work/Jacob_ledbetter_102215_pygmaedemo/Jacob_ledbetter_102215_pygmaedemo.py
```python
import pygame
import random
import time
#init
pygame.init()
#surface size
display_width=1000
display_height=600
#color def
black= (0,0,0)
white=(255,255,255)
red=(255,0,0)
cyan=(65,255,243)
#more surface info
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('demo')
#clock
clock = pygame.time.Clock()
#the refrence imiges
faceImg = pygame.image.load('face.png')
hrtImg=pygame.image.load('hrt_sml.png')
badImg=pygame.image.load('bad.png')
#runing the elements
def bad(x,y):
gameDisplay.blit(badImg,(x,y))
def face(x,y):
gameDisplay.blit(faceImg,(x,y))
def heart(x,y):
gameDisplay.blit(hrtImg,(x,y))
#dif
def dif():
global hrt_count
if hrt_count==10:
bad(badx-100,bady+10)
def score(count):
font = pygame.font.SysFont('./fontp',32)
text= font.render(str(count),True,black)
gameDisplay.blit(text,(0,0))
#start pos
facex=(display_width*0.68)
facey=(display_height*0.4)
hrtx=(display_width*0.34)
hrty=facey
badx=random.randint(20,950)
bady=random.randint(20,550)
#hrtcounter
hrt_count=0
#the change vars
facex_change=0
facey_change=0
badx_chng=5
bady_chng=5
#crash var(old)
crashed = False
#game loop
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
quit()
#border fix
if facex > display_width:
facex -= 150
elif facex < 0:
facex += 150
elif facey > display_height:
facey -= 50
elif facey < 0:
facey += 50
#movement
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
facex_change= -5
elif event.key==pygame.K_RIGHT:
facex_change=5
elif event.key==pygame.K_UP:
facey_change= -5
elif event.key==pygame.K_DOWN:
facey_change= 5
if event.type==pygame.KEYUP:
if event.key==pygame.K_RIGHT or event.key == pygame.K_LEFT:
facex_change=0
elif event.key==pygame.K_UP or event.key==pygame.K_DOWN:
facey_change=0
facex += facex_change
facey += facey_change
#dif change
dif()
#bad movement
if bady > 580:
bady_chng=bady_chng*-1
bady_chng-=1
elif bady < 20:
bady_chng=bady_chng**1
bady_chng+=1
elif badx > 980:
badx_chng=badx_chng*-1
badx_chng-=1
elif badx < 20:
badx_chng=badx_chng**1
badx_chng +=1
badx +=badx_chng
bady += bady_chng
#hrt colection
if facex >= hrtx - 50 and facex <= hrtx +50:
if facey >= hrty-50 and facey<=hrty+50:
hrt_count += 1
hrtx=random.randint(0,950)
hrty=random.randint(0,550)
#bad colision detection
if facex >= badx- 50 and facex <= badx +50:
if facey >= bady-50 and facey<=bady+50:
print('you ded')
print('score:',hrt_count)
time.sleep(2)
pygame.quit()
time.sleep(2
)
quit()
#background
gameDisplay.fill(cyan)
heart(hrtx,hrty)
face(facex,facey)
bad(badx,bady)
score(hrt_count)
#update
pygame.display.update()
clock.tick(30)
#quit
pygame.quit()
quit()
#complete!!!
```
#### File: class work/JacobLedbetter_11,23,15_pdf 6-10/pong.py
```python
import pygame
#-------------Color Pallet------------------------
black = ( 0, 0, 0)
white = (255,255,255)
red = (255, 0, 0)
green = ( 0,255, 0)
blue = ( 0, 0,255)
#-------------Initializations-----------------------
pygame.init()
screensize_x=700
screensize_y=500
screensize=[screensize_x,screensize_y]
screen_color=black
screen = pygame.display.set_mode(screensize)
pygame.display.set_caption("Pong")
font = pygame.font.Font(None, 36)
background = pygame.Surface(screen.get_size())
clock=pygame.time.Clock()
paddle_width=20
paddle_height=80
#--------------Player Sprite-------------------
class Player(pygame.sprite.Sprite):
def __init__(self,x,y):
pygame.sprite.Sprite.__init__(self)
self.width=paddle_width
self.height=paddle_height
self.image=pygame.Surface([self.width,self.height])
self.image.fill(white)
self.rect=self.image.get_rect()
self.rect.x=x
self.rect.y=y
self.speed_x=0
self.speed_y=0
def move(self):
self.rect.x+=self.speed_x
self.rect.y+=self.speed_y
def collide(self):
if self.rect.y<0:
self.rect.y=0
if self.rect.y>screensize_y-self.height:
self.rect.y=screensize_y-self.height
if self.rect.x<0:
self.rect.x=0
if self.rect.x>screensize_x-self.width:
self.rect.x=screensize_x-self.width
#--------------Ball Sprite-------------------
class Ball(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.width=10
self.height=10
self.image=pygame.Surface([self.width,self.height])
self.image.fill(blue)
self.rect=self.image.get_rect()
self.rect.x=screensize_x/2
self.rect.y=screensize_y/2
self.speed_x=-3
self.speed_y=3
def move(self):
self.rect.x+=self.speed_x
self.rect.y+=self.speed_y
def collide(self):
if self.rect.x<0 or self.rect.x>screensize_x-self.width:
self.speed_x=-1*self.speed_x
if self.rect.y<0 or self.rect.y>screensize_y-self.height:
self.speed_y=-1*self.speed_y
def gameover(self):
if self.rect.x<0 or self.rect.x>screensize_x-paddle_width:
self.rect.x=screensize_x/2
return True
else:
return False
#------------Sprite initialization----------------
balls = pygame.sprite.Group()
allsprites = pygame.sprite.RenderPlain()
player2=Player(0,0)
player1=Player(screensize_x-paddle_width,0)
ball=Ball()
balls.add(ball)
allsprites.add(player1,player2,ball)
#-----------Game Initialization------------------
rungame=True
gameover=False
#-----------Main Program Loop---------------------
while rungame:
screen.fill(screen_color)
#----------Events-----------------------------
for event in pygame.event.get():
if event.type == pygame.QUIT:
rungame=False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player1.speed_y=-4
if event.key == pygame.K_DOWN:
player1.speed_y=4
if event.key == pygame.K_w:
player2.speed_y=-4
if event.key == pygame.K_s:
player2.speed_y=4
if event.key == pygame.K_SPACE:
gameover=False
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
player1.speed_y=0
if event.key == pygame.K_DOWN:
player1.speed_y=0
if event.key == pygame.K_w:
player2.speed_y=0
if event.key == pygame.K_s:
player2.speed_y=0
#---------Game Logic-----------------------------
if not gameover:
player1.move()
player2.move()
gameover=ball.gameover()
ball.move()
player1.collide()
player2.collide
ball.collide()
if gameover:
text=font.render("Game Over: Press Space",True,white)
text_position=text.get_rect(centerx=background.get_width()/2)
text_position.top=250
screen.blit(text,text_position)
if pygame.sprite.spritecollide(player1,balls,False):
ball.speed_x=-1*ball.speed_x
if pygame.sprite.spritecollide(player2,balls,False):
ball.speed_x=-1*ball.speed_x
#------------Update Drawings-------------------------
allsprites.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit()
```
#### File: JacobLedbetter_12_1_15_gmaeproj/data/enemy test_1.py
```python
import pygame
import random
import time
############## Define Variables/Initilization #######################
#All variable definitions, functions, and class objects go below this line
pygame.init()
iconImg=pygame.image.load('./icon.png')
pygame.display.set_icon(iconImg)
#Define color palette
# r g b
black = ( 0, 0, 0)
white = (255,255,255)
red = (255, 0, 0)
green = ( 0,255, 0)
blue = ( 0, 0,255)
#set display parameters
size_x=800
size_y=700
size=[size_x,size_y]
class requirement:
xyz=0
display=pygame.display.set_mode(size)
pygame.display.set_caption("Bunny slipper crusade")
#image loading
test_1Img_l = pygame.image.load('enemeesprite_l.png')
test_1Img_r = pygame.image.load('./enemeesprite_r.png')
test_2Img_l = pygame.image.load('kittyspringfinal.png')
test_2Img_r = pygame.image.load('kittyspringfinal.png')
test_3Img_l=pygame.image.load('melonlegs_l.png')
test_3Img_r=pygame.image.load('melonlegs_r.png')
projImg=pygame.image.load('./mellonleg_proj.png')
#game vars
jump_add=1
life=2
test_1_x_chng=3
test_1_img=test_1Img_r
test_1_x_0=30
test_1_x_1=120
test_2_x_chng=3
test_2_img=test_2Img_r
test_2_x_0=60
test_2_x_1=150
jump_timer=100
jump_timer_chng=-1
jumping_timer=60
test_2_y=650
test_3_img=test_3Img_r
test_3_x_chng=3
test_3_x_0=200
shoot_timer=3
proj_x=test_3_x_0
projtest_x=200
projtest_x_chng=3
wall=True
timer=2
timer_chng=1
#game objects
def testguy_1(x,y):
global test_1_x_0,test_1_x_chng,test_1_img
if test_1_x_0 >= 773:
test_1_x_chng=-3
test_1_img=test_1Img_l
if test_1_x_0<=0:
test_1_x_chng=3
test_1_img=test_1Img_r
test_1_x_0+=test_1_x_chng
display.blit(test_1_img,(x,y))
def testguy_2(x,y):
global test_2_img,test_2_x_0,test_2_x_chng,jump_timer,jump_timer_chng,jumping_timer,test_2_y
if jump_timer==0:
jump_timer_chng=0
jumping_timer+=-1
test_2_y-=2
if jumping_timer==0:
test_2_y=650
jump_timer=200
jump_timer_chng=-1
jumping_timer=120
if test_2_x_0 >= 773:
test_2_x_chng=-3
test_2_img=test_2Img_l
if test_1_x_0<=0:
test_2_x_chng=3
test_2_img=test_2Img_r
test_2_x_0+=test_2_x_chng
jump_timer+=jump_timer_chng
display.blit(test_2_img,(x,y))
def testguy_3(x,y):
global test_3_x_0,test_3_x_chng,test_3_img,timer
def projtest(y):
global projtest_x,projtest_x_chng,est_2_x_0,wall,timer,timer_chng,test_3_img
if test_3_img==test_3Img_l:
projtest_chng_chng=5
if test_3_img==test_3Img_r:
projtest_chng_chng=-5
if projtest_x<=0:
timer_chng=0
projtest_x=test_3_x_0
projtest_x_chng=0
wall=True
timer=120
timer_chng=1
if projtest_x>=777:
timer_chng=0
projtest_x=test_3_x_0
projtest_x_chng=0
wall=True
timer=120
timer_chng=1
if wall:
projtest_x=test_3_x_0
timer-=timer_chng
if timer==0:
timer=120
wall=False
projtest_x_chng=projtest_chng_chng
projtest_x-=projtest_x_chng
display.blit(projImg,(projtest_x,y))
projtest(655)
if test_3_x_0 >= 773:
test_3_x_chng=-3
test_3_img=test_3Img_l
if test_3_x_0<=0:
test_3_x_chng=3
test_3_img=test_3Img_r
test_3_x_0+=test_3_x_chng
display.blit(test_3_img,(x,y))
#set the clock to manage how fast the screen updates
clock=pygame.time.Clock()
#Setup the loop control
rungame=True
#------------Main Program Loop -----------------
while rungame:
########## Events ######################
for event in pygame.event.get():
if event.type == pygame.QUIT:
rungame=False
display.fill(white)
######### Begin Game Logic ####################
#Start by clearing the old display
######### End Game Logic ######################
testguy_1(test_1_x_0,640)
#testguy_1(test_1_x_1,640)
testguy_2(test_2_x_0,test_2_y)
#testguy_2(test_2_x_1,test_2_y)
testguy_3(test_3_x_0,645)
########## Update screen and clock #############
#update the screen with the new drawing
pygame.display.flip()
#set the clock speed
clock.tick(30)
pygame.QUIT
quit()
```
#### File: JacobLedbetter_12_1_15_gmaeproj/data/game code (2).py
```python
import time
############## Import Libraries #######################
import pygame
import random
def go_thing():
x=1
go_thing()
############## Define Variables/Initilization #######################
#All variable definitions, functions, and class objects go below this line
pygame.init()
#corner icon
iconImg=pygame.image.load('./icon.png')
pygame.display.set_icon(iconImg)
#Define color palette
# r g b
black = ( 0, 0, 0)
white = (255,255,255)
red = (255, 0, 0)
green = ( 0,255, 0)
blue = ( 0, 0,255)
grass_green=(15,219,20)
#set display parameters
size_x=800
size_y=700
size=[size_x,size_y]
#program name
display=pygame.display.set_mode(size)
pygame.display.set_caption("Bunny slipper crusade")
#sound load
music = pygame.mixer.Sound("./music.wav")
pew=pygame.mixer.Sound('./pew.wav')
enemy_dead=pygame.mixer.Sound('./enemy dead.wav')
jump=pygame.mixer.Sound('./jump.wav')
kitty_dead=pygame.mixer.Sound('./kitty_dead.wav')
#image loading
bground=pygame.image.load('bg.png')
lifecount=pygame.image.load('heart_new.png')
player_r_Img = pygame.image.load('supersprite_r.png')
player_l_Img = pygame.image.load('supersprite_L.png')
player_j_r_Img=pygame.image.load('superspritejump_r.png')
player_j_l_Img=pygame.image.load('superspritejump_l.png')
player_proj_r_Img=pygame.image.load('sum_r.png')
player_proj_l_Img=pygame.image.load('sum_l.png')
#enemy img load
test_1Img_l = pygame.image.load('enemeesprite_l.png')
test_1Img_r = pygame.image.load('./enemeesprite_r.png')
test_2Img_l = pygame.image.load('kittyspringfinal.png')
test_2Img_r = pygame.image.load('kittyspringfinal.png')
test_3Img_l=pygame.image.load('melonlegs_l.png')
test_3Img_r=pygame.image.load('melonlegs_r.png')
projImg=pygame.image.load('./mellonleg_proj.png')
#play music
music.play(-1)
#player vars
player_x=100
player_y=650
player_x_change=0
player_y_change=0
player_img=player_l_Img
player_proj_chng=0
player_proj_chng_chng=0
player_proj_x=player_x
player_proj_y=player_y
player_proj_wall_colide=True
player_proj_fireing=False
player_proj_dset=False
player_jump_height=0
player_y=650
player_jumptime=60
player_jumping=False
hit=0
player_nohit_time=60
player_hit=False
life=7
#enemy vars
test1_x_0_chng=3
test1_0_img=test_1Img_r
test1_x_0=330
test1_y_0=640
alien_0_dead=False
test_2_x_chng=3
test_2_img=test_2Img_r
test_2_x_0=60
test_2_x_1=150
jump_timer=100
jump_timer_chng=-1
jumping_timer=60
test_2_y_0=650
s_wait=0
test_3_img=test_3Img_r
test_3_x_chng=3
test_3_x_0=200
shoot_timer=3
proj_x=test_3_x_0
projtest_x=200
projtest_x_chng=3
wall=True
timer=2
timer_chng=1
#lvl vars
checkpoint=0
lvl_comp=0
lvl_two_start=False
wait=0
inst=True
lvl_three_start=False
#game objects
#levels
def lvl_one():
global wait,test1_x_0,test1_x_0_chng,test1_0_img,alien_0_dead,lvl_comp,player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life,player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset,checkpoint
def player(x,y):
global player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life,test1_x_0,test1_y_0,lvl_comp
def proj():
global player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset
player_proj_y=player_y
if not player_proj_dset:
if player_img==player_r_Img or player_img==player_j_r_Img:
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if not player_proj_dset:
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=-5
if player_proj_x<=0:
player_proj_x=player_x
player_proj_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_x>=777:
player_proj_x=player_x
projtest_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_wall_colide:
player_proj_dset=False
player_proj_x=player_x
player_proj_chng_chng=0
if player_proj_fireing:
player_proj_wall_colide=False
if player_img==player_r_Img or player_img==player_j_r_Img:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
player_proj_chng_chng=-5
player_proj_chng=player_proj_chng_chng
player_proj_x+=player_proj_chng
display.blit(player_proj_img,(player_proj_x,player_proj_y))
proj()
if player_jumping==True:
player_jump_height=-3
player_jumptime-=1
player_y += player_jump_height
if player_jumptime<=0:
player_jumping=False
player_y +=3
player_y=650
player_jumptime=60
if player_y<=500:
player_jumping=False
player_jump_hieght=0
player_y=650
player_jumptime=60
if player_x+35 >= test1_x_0 and player_x <= test1_x_0+35:
if player_y+50 >= test1_y_0 and player_y<= test1_y_0+60:
if not player_hit:
life-=1
player_hit=True
player_nohit_time=50
if player_hit:
if player_nohit_time>0:
player_nohit_time-=1
else:
player_hit=False
player_nohit_time=50
if life <= 0:
time.sleep(1)
#player vars
player_x=100
player_y=650
player_x_change=0
player_y_change=0
player_img=player_l_Img
player_proj_chng=0
player_proj_chng_chng=0
player_proj_x=player_x
player_proj_y=player_y
player_proj_wall_colide=True
player_proj_fireing=False
player_proj_dset=False
player_jump_height=0
player_y=650
player_jumptime=60
player_jumping=False
hit=0
player_nohit_time=60
player_hit=False
life=7
#enemy vars
test1_x_0_chng=3
test1_0_img=test_1Img_r
test1_x_0=330
test1_y_0=640
alien_0_dead=False
test_2_x_chng=3
test_2_img=test_2Img_r
test_2_x_0=60
test_2_x_1=150
jump_timer=100
jump_timer_chng=-1
jumping_timer=60
test_2_y_0=650
#lvl vars
lvl_comp=0
checkpoint=0
player_x += player_x_change
display.blit(player_img,(x,y))
player(player_x,player_y)
if alien_0_dead==False:
#enemy in level
def testguy1_0(x,y):
global test1_x_0,test1_x_0_chng,test1_0_img,alien_0_dead,player_proj_wall_colide
if test1_x_0 >= 773:
test1_x_0_chng=-3
test1_0_img=test_1Img_l
if test1_x_0<=0:
test1_x_0_chng=3
test1_0_img=test_1Img_r
if player_proj_x >= test1_x_0 and player_proj_x+20 <= test1_x_0+31:
if player_proj_y >= test1_y_0 and player_proj_y+20 <= test1_y_0+60:
enemy_dead.play()
alien_0_dead=True
player_proj_wall_colide=True
test1_x_0+=test1_x_0_chng
display.blit(test1_0_img,(x,y))
testguy1_0(test1_x_0,test1_y_0)
if alien_0_dead==True:
lvl_comp+=1
alien_0_dead=False
test1_x_0=random.randint(10,705)
if lvl_comp>=20:
def win_text():
font = pygame.font.SysFont("Calibri",32, False, False)
text= font.render('Level Complete',True,black)
display.blit(text,(250,400))
win_text()
wait+=1
if wait>=20:
wait=0
time.sleep(2)
checkpoint=1
def lvl_two():
global wait,test_2_x_chng,test_2_img,test_2_x_0,test_2_x_1,jump_timer,jump_timer_chng,jumping_timer,test_2_y_0,alien_0_dead,lvl_comp,player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life,player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset,lvl_two_start
if lvl_two_start==False:
lvl_comp=0
lvl_two_start=True
def player(x,y):
global player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life,test_2_x_0,test_2_y_0,lvl_comp
def proj():
global player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset
player_proj_y=player_y
if not player_proj_dset:
if player_img==player_r_Img or player_img==player_j_r_Img:
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if not player_proj_dset:
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=-5
if player_proj_x<=0:
player_proj_x=player_x
player_proj_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_x>=777:
player_proj_x=player_x
projtest_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_wall_colide:
player_proj_dset=False
player_proj_x=player_x
player_proj_chng_chng=0
if player_proj_fireing:
player_proj_wall_colide=False
if player_img==player_r_Img or player_img==player_j_r_Img:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
player_proj_chng_chng=-5
player_proj_chng=player_proj_chng_chng
player_proj_x+=player_proj_chng
display.blit(player_proj_img,(player_proj_x,player_proj_y))
proj()
if player_jumping==True:
player_jump_height=-3
player_jumptime-=1
player_y += player_jump_height
if player_jumptime<=0:
player_jumping=False
player_y +=3
player_y=650
player_jumptime=60
if player_y<=500:
player_jumping=False
player_jump_hieght=0
player_y=650
player_jumptime=60
if player_x+35 >= test_2_x_0 and player_x <= test_2_x_0+35:
if player_y+50 >= test_2_y_0 and player_y<= test_2_y_0+60:
if not player_hit:
life-=1
player_hit=True
player_nohit_time=50
if player_hit:
if player_nohit_time>0:
player_nohit_time-=1
else:
player_hit=False
player_nohit_time=50
if life <= 0:
time.sleep(1)
#player vars
player_x=100
player_y=650
player_x_change=0
player_y_change=0
player_img=player_l_Img
player_proj_chng=0
player_proj_chng_chng=0
player_proj_x=player_x
player_proj_y=player_y
player_proj_wall_colide=True
player_proj_fireing=False
player_proj_dset=False
player_jump_height=0
player_y=650
player_jumptime=60
player_jumping=False
hit=0
player_nohit_time=60
player_hit=False
life=7
#enemy vars
test1_x_0_chng=3
test1_0_img=test_1Img_r
test1_x_0=330
test1_y_0=640
alien_0_dead=False
test_2_x_0_chng=3
test_2_x_0=330
test_2_y_0=640
jump_timer=100
jump_timer_chng=-1
jumping_timer=60
test_2_y_0=650
#lvl vars
lvl_comp=0
checkpoint=1
player_x += player_x_change
display.blit(player_img,(x,y))
player(player_x,player_y)
if alien_0_dead==False:
#enemy in level
def testguy_2(x,y):
global s_wait,test_2_img,test_2_x_0,test_2_x_chng,jump_timer,jump_timer_chng,jumping_timer,test_2_y_0,player_proj_x,player_proj_y,alien_0_dead,player_proj_wall_colide
if jump_timer==0:
s_wait+=1
if s_wait>=0:
jump.play()
s_wait=-120
jump_timer_chng=0
jumping_timer-=1
test_2_y_0-=2
if jumping_timer==0:
test_2_y_0=650
b_sound=False
jump_timer=200
jump_timer_chng=-1
jumping_timer=120
if test_2_x_0 >= 773:
test_2_x_chng=-3
test_2_img=test_2Img_l
if test_2_x_0<=0:
test_2_x_chng=3
test_2_img=test_2Img_r
if player_proj_x >= test_2_x_0 and player_proj_x+20 <= test_2_x_0+31:
if player_proj_y >= test_2_y_0 and player_proj_y+20 <= test_2_y_0+60:
kitty_dead.play()
alien_0_dead=True
player_proj_wall_colide=True
test_2_x_0+=test_2_x_chng
jump_timer+=jump_timer_chng
display.blit(test_2_img,(x,y))
testguy_2(test_2_x_0,test_2_y_0)
if alien_0_dead==True:
lvl_comp+=1
alien_0_dead=False
test_2_x_0=random.randint(10,705)
if lvl_comp>=20:
def win_text():
font = pygame.font.SysFont("Calibri",32, False, False)
text= font.render('Level Complete',True,black)
display.blit(text,(250,400))
win_text()
checkpoint=2
wait+=1
if wait>=20:
wait=0
time.sleep(2)
def lvl_three():
global wait,test_3_img,test_3_x_chng,test_3_x_0,shoot_timer,proj_x,projtest_x,projtest_x_chng,wall,timer,timer_chng,alien_0_dead,lvl_comp,player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life,player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset,lvl_three_start
if lvl_three_start==False:
lvl_comp=0
lvl_three_start=True
def player(x,y):
global player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life,test_2_x_0,test_2_y_0,lvl_comp
def proj():
global player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset
player_proj_y=player_y
if not player_proj_dset:
if player_img==player_r_Img or player_img==player_j_r_Img:
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if not player_proj_dset:
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=-5
if player_proj_x<=0:
player_proj_x=player_x
player_proj_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_x>=777:
player_proj_x=player_x
projtest_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_wall_colide:
player_proj_dset=False
player_proj_x=player_x
player_proj_chng_chng=0
if player_proj_fireing:
player_proj_wall_colide=False
if player_img==player_r_Img or player_img==player_j_r_Img:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
player_proj_chng_chng=-5
player_proj_chng=player_proj_chng_chng
player_proj_x+=player_proj_chng
display.blit(player_proj_img,(player_proj_x,player_proj_y))
proj()
if player_jumping==True:
player_jump_height=-3
player_jumptime-=1
player_y += player_jump_height
if player_jumptime<=0:
player_jumping=False
player_y +=3
player_y=650
player_jumptime=60
if player_y<=500:
player_jumping=False
player_jump_hieght=0
player_y=650
player_jumptime=60
if player_x+35 >= test_3_x_0 and player_x <= test_3_x_0+35:
if player_y+50 >= 645 and player_y<= 705:
if not player_hit:
life-=1
player_hit=True
player_nohit_time=50
if player_x+35 >= projtest_x and player_x <= projtest_x+10:
if player_y+50 >= 645 and player_y<= 665:
if not player_hit:
life-=1
player_hit=True
player_nohit_time=50
if player_hit:
if player_nohit_time>0:
player_nohit_time-=1
else:
player_hit=False
player_nohit_time=50
if life <= 0:
time.sleep(1)
#player vars
player_x=100
player_y=650
player_x_change=0
player_y_change=0
player_img=player_l_Img
player_proj_chng=0
player_proj_chng_chng=0
player_proj_x=player_x
player_proj_y=player_y
player_proj_wall_colide=True
player_proj_fireing=False
player_proj_dset=False
player_jump_height=0
player_y=650
player_jumptime=60
player_jumping=False
hit=0
player_nohit_time=60
player_hit=False
life=7
#enemy vars
test1_x_0_chng=3
test1_0_img=test_1Img_r
test1_x_0=330
test1_y_0=640
alien_0_dead=False
test_2_x_0_chng=3
test_2_x_0=330
test_2_y_0=640
jump_timer=100
jump_timer_chng=-1
jumping_timer=60
test_2_y_0=650
test_3_img=test_3Img_r
test_3_x_chng
test_3_x_0
shoot_timer
proj_x
projtest_x
projtest_x_chng
wall=True
timer
timer_chng
#lvl vars
lvl_comp=0
checkpoint=1
player_x += player_x_change
display.blit(player_img,(x,y))
player(player_x,player_y)
if alien_0_dead==False:
#enemy in level
def testguy_3(x,y):
global test_3_x_0,test_3_x_chng,test_3_img,timer,alien_0_dead
def projtest(y):
global projtest_x,projtest_x_chng,est_2_x_0,wall,timer,timer_chng,test_3_img
if test_3_img==test_3Img_l:
projtest_chng_chng=5
if test_3_img==test_3Img_r:
projtest_chng_chng=-5
if projtest_x<=0:
timer_chng=0
projtest_x=test_3_x_0
projtest_x_chng=0
wall=True
timer=120
timer_chng=1
if projtest_x>=777:
timer_chng=0
projtest_x=test_3_x_0
projtest_x_chng=0
wall=True
timer=120
timer_chng=1
if wall:
projtest_x=test_3_x_0
timer-=timer_chng
if timer==0:
timer=120
wall=False
projtest_x_chng=projtest_chng_chng
projtest_x-=projtest_x_chng
display.blit(projImg,(projtest_x,y))
projtest(655)
if test_3_x_0 >= 773:
test_3_x_chng=-3
test_3_img=test_3Img_l
if test_3_x_0<=0:
test_3_x_chng=3
test_3_img=test_3Img_r
if player_proj_x >= test_3_x_0 and player_proj_x+20 <= test_3_x_0+31:
if player_proj_y >= 645 and player_proj_y+20 <= 705:
enemy_dead.play()
alien_0_dead=True
player_proj_wall_colide=True
test_3_x_0+=test_3_x_chng
display.blit(test_3_img,(x,y))
testguy_3(test_3_x_0,645)
if alien_0_dead==True:
lvl_comp+=1
alien_0_dead=False
test_3_x_0=random.randint(10,705)
if lvl_comp>=20:
def win_text():
font = pygame.font.SysFont("Calibri",32, False, False)
text= font.render('Level Complete',True,black)
display.blit(text,(250,400))
win_text()
checkpoint=3
wait+=1
if wait>=20:
wait=0
time.sleep(2)
pygame.quit()
quit()
#life count
def life_count(life):
font = pygame.font.SysFont("Calibri",32, False, False)
text= font.render(str(life),True,black)
display.blit(lifecount,(0,0))
display.blit(text,(20,10))
def enemy_count(comp):
compdisp=str(comp)+'/20'
font = pygame.font.SysFont("Calibri",32, False, False)
text= font.render(compdisp,True,black)
display.blit(text,(720,30))
def instructions():
font = pygame.font.SysFont("Calibri",16, False, False)
text= font.render('''arrows=movement and jump z=fire sumner head''',True,white)
display.blit(text,(200,400))
#set the clock to manage how fast the screen updates
clock=pygame.time.Clock()
#Setup the loop control
rungame=True
#------------Main Program Loop -----------------
while rungame:
########## Events ######################
for event in pygame.event.get():
if event.type == pygame.QUIT:
rungame=False
display.fill(grass_green)
display.blit(bground,(0,0))
if hit>=1:
hit=0
######### Begin Game Logic ####################
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_z:
player_proj_fireing = True
player_proj_dset=True
pew.play()
if event.key==pygame.K_UP:
player_jumping=True
jump.play()
if event.key==pygame.K_x:
hit+=2
if event.key==pygame.K_LEFT:
player_x_change= -4
player_img=player_l_Img
elif event.key==pygame.K_RIGHT:
player_x_change=4
player_img=player_r_Img
if event.type==pygame.KEYUP:
if event.key==pygame.K_UP:
if player_jumping:
player_jumping=True
else:
player_jumping=False
if event.key==pygame.K_z:
player_proj_fireing=False
if event.key==pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_x_change=0
######### End Game Logic ######################
life_count(life)
enemy_count(lvl_comp)
if inst:
wait+=1
instructions()
if wait>=20:
time.sleep(7)
inst=False
if checkpoint==0:
lvl_one()
if checkpoint==1:
lvl_two()
if checkpoint==2:
lvl_three()
########## Update screen and clock #############
#update the screen with the new drawing
pygame.display.flip()
#set the clock speed
clock.tick(24)
pygame.quit()
quit()
```
#### File: JacobLedbetter_12_1_15_gmaeproj/data/game code.py
```python
import time
print('''press p to play
press c for credits
press q to quit''')
command=input()
if command == 'p':
print('''playing...''')
if command=='c':
print('''code by jacob
everything else by adam''')
time.sleep(5)
if command=='q':
quit()
else:
p=1
############## Import Libraries #######################
import pygame
import random
############## Define Variables/Initilization #######################
#All variable definitions, functions, and class objects go below this line
pygame.init()
#corner icon
iconImg=pygame.image.load('./icon.png')
pygame.display.set_icon(iconImg)
#Define color palette
# r g b
black = ( 0, 0, 0)
white = (255,255,255)
red = (255, 0, 0)
green = ( 0,255, 0)
blue = ( 0, 0,255)
grass_green=(15,219,20)
#set display parameters
size_x=800
size_y=700
size=[size_x,size_y]
#program name
display=pygame.display.set_mode(size)
pygame.display.set_caption("Bunny slipper crusade")
#image loading
bground=pygame.image.load('bg.png')
lifecount=pygame.image.load('heart_new.png')
player_r_Img = pygame.image.load('supersprite_r.png')
player_l_Img = pygame.image.load('supersprite_L.png')
player_j_r_Img=pygame.image.load('superspritejump_r.png')
player_j_l_Img=pygame.image.load('superspritejump_l.png')
player_proj_r_Img=pygame.image.load('sum_r.png')
player_proj_l_Img=pygame.image.load('sum_l.png')
#enemy img load
test_1Img_l = pygame.image.load('enemeesprite_l.png')
test_1Img_r = pygame.image.load('./enemeesprite_r.png')
#game vars
player_x=100
player_y=650
player_x_change=0
player_y_change=0
player_img=player_l_Img
player_proj_chng=0
player_proj_chng_chng=0
player_proj_x=player_x
player_proj_y=player_y
player_proj_wall_colide=True
player_proj_fireing=False
player_proj_dset=False
player_jump_height=0
player_y=650
player_jumptime=60
player_jumping=False
hit=0
player_nohit_time=60
player_hit=False
life=7
#enemy vars
test1_x_0_chng=3
test1_0_img=test_1Img_r
test1_x_0=330
test1_y_0=640
alien_0_dead=False
test1_x_1_chng=3
test1_1_img=test_1Img_r
test1_x_1=330
test1_y_1=640
alien_1_dead=False
test1_x_2chng=3
test1_2_img=test_1Img_r
test1_x_2=330
test1_y_2=640
alien_2_dead=False
#enemy object
lvl_comp=0
#game objects
def player(x,y):
global player_img,player_x,player_y,player_y_change,player_x_change,player_x,player_proj_wall_colide,player_jumping,player_jump_height,player_jumptime,player_nohit_time,player_hit,hit,life
def proj():
global player_img,player_proj_chng,player_proj_x,player_proj_y,player_proj_wall_colide,player_proj_img,player_proj_fireing,player_proj_chng_chng,player_proj_dset
player_proj_y=player_y
if not player_proj_dset:
if player_img==player_r_Img or player_img==player_j_r_Img:
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if not player_proj_dset:
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
if player_proj_chng_chng==0:
player_proj_chng_chng=0
else:
player_proj_chng_chng=-5
if player_proj_x<=0:
player_proj_x=player_x
player_proj_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_x>=777:
player_proj_x=player_x
projtest_x_chng_chng=0
player_proj_wall_colide=True
if player_proj_wall_colide:
player_proj_dset=False
player_proj_x=player_x
player_proj_chng_chng=0
if player_proj_fireing:
player_proj_wall_colide=False
if player_img==player_r_Img or player_img==player_j_r_Img:
player_proj_chng_chng=5
player_proj_img=player_proj_r_Img
if player_img==player_l_Img or player_img==player_j_l_Img:
player_proj_img=player_proj_l_Img
player_proj_chng_chng=-5
player_proj_chng=player_proj_chng_chng
player_proj_x+=player_proj_chng
display.blit(player_proj_img,(player_proj_x,player_proj_y))
proj()
if player_jumping==True:
player_jump_height=-3
player_jumptime-=1
player_y += player_jump_height
if player_jumptime<=0:
player_jumping=False
player_y +=3
player_y=650
player_jumptime=60
if player_y<=540:
player_jumping=False
player_jump_hieght=0
player_y=650
player_jumptime=60
if player_x+35 >= test1_x_0 and player_x <= test1_x_0+35:
if player_y+50 >= test1_y_0 and player_y<= test1_y_0+60:
if not player_hit:
life-=1
player_hit=True
player_nohit_time=50
if player_hit:
if player_nohit_time>0:
player_nohit_time-=1
else:
player_hit=False
player_nohit_time=50
if life <= 0:
pygame.quit()
print('game_over')
time.sleep(7)
quit()
player_x += player_x_change
display.blit(player_img,(x,y))
#levels
def lvl_one():
global test1_x_0,test1_x_0_chng,test1_0_img,alien_0_dead,lvl_comp
if alien_0_dead==False:
#enemy in level
def testguy1_0(x,y):
global test1_x_0,test1_x_0_chng,test1_0_img,alien_0_dead,player_proj_wall_colide
if test1_x_0 >= 773:
test1_x_0_chng=-3
test1_0_img=test_1Img_l
if test1_x_0<=0:
test1_x_0_chng=3
test1_0_img=test_1Img_r
if player_proj_x >= test1_x_0 and player_proj_x+20 <= test1_x_0+31:
if player_proj_y >= test1_y_0 and player_proj_y+20 <= test1_y_0+60:
alien_0_dead=True
player_proj_wall_colide=True
test1_x_0+=test1_x_0_chng
display.blit(test1_0_img,(x,y))
testguy1_0(test1_x_0,test1_y_0)
if alien_0_dead==True:
lvl_comp+=1
alien_0_dead=False
test1_x_0=random.randint(10,705)
if lvl_comp>=25:
pygame.quit()
print('YOU WIN!!!')
time.sleep(7)
quit()
#life count
def life_count(life):
font = pygame.font.SysFont('./fontp',32)
text= font.render(str(life),True,black)
display.blit(lifecount,(0,0))
display.blit(text,(20,10))
def enemy_count(comp):
compdisp=str(comp)+'/25'
font = pygame.font.SysFont('./fontp',32)
text= font.render(compdisp,True,black)
display.blit(text,(740,30))
#set the clock to manage how fast the screen updates
clock=pygame.time.Clock()
#Setup the loop control
rungame=True
#------------Main Program Loop -----------------
while rungame:
########## Events ######################
for event in pygame.event.get():
if event.type == pygame.QUIT:
rungame=False
display.fill(grass_green)
display.blit(bground,(0,0))
if hit>=1:
hit=0
######### Begin Game Logic ####################
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_z:
player_proj_fireing = True
player_proj_dset=True
if event.key==pygame.K_UP:
player_jumping=True
if event.key==pygame.K_x:
hit+=2
if event.key==pygame.K_LEFT:
player_x_change= -4
player_img=player_l_Img
elif event.key==pygame.K_RIGHT:
player_x_change=4
player_img=player_r_Img
if event.type==pygame.KEYUP:
if event.key==pygame.K_UP:
if player_jumping:
player_jumping=True
else:
player_jumping=False
if event.key==pygame.K_z:
player_proj_fireing=False
if event.key==pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_x_change=0
######### End Game Logic ######################
life_count(life)
enemy_count(lvl_comp)
player(player_x,player_y)
lvl_one()
########## Update screen and clock #############
#update the screen with the new drawing
pygame.display.flip()
#set the clock speed
clock.tick(24)
pygame.quit()
quit()
```
#### File: class work/stuff.old/ch7.py
```python
def insult_generator(word,name):
print('''you '''+word+' '+name)
def one(list_of_numbers):
cnt=0
for n in list_of_numbers:
print(n)
n=n%2
if n !=0:
cnt=cnt+1
print('''there are''',cnt,'''odd numbers''')
one([1,2,3,4,5,13,10])
def two(list_of_numbers):
cnt=0
for n in list_of_numbers:
print(n)
n=n%2
if n ==0:
cnt=cnt+1
print('''there are''',cnt,'''even numbers''')
two([1,2,3,4,5,13,10])
def three(list_of_num):
negadd=0
do_nothing=''
for n in list_of_num:
print(n)
if n <0:
negadd=negadd+n
print('the sum of all the negtive numbers is''',negadd)
three([1,-2,3,-4,-10])
def four(list_of_wrds):
cnt=0
for wrd in list_of_wrds:
print(wrd)
n=len(wrd)
if n == 5:
cnt=cnt+1
print('''there are/is''',cnt,'''word(s) with legnth 5''')
four(['hey','you','come','across','as','insulting','sometimes','joe','apple'])
def five(list_of_num):
firsteven=False
numsum=0
for n in list_of_num:
print(n)
if firsteven==False:
if n%2 ==0:
firsteven=True
print(n,'''it the first even''')
else:
numsum=numsum+n
else:
numsum=numsum+n
print(numsum)
five([1,2,3,4,5,6])
def six(list_of_words):
sam=False
wrd=0
for w in list_of_words:
if w =='SAM'or'sam'or'Sam':
wrd=wrd+1
sam=True
else:
wrd=wrd+1
nosam=nosam+1
#if sam:
#break
print('''there are''',wrd,'''words''')
six(['i','know','sam','the','man'])
```
#### File: class work/stuff.old/turtlestactoe().py
```python
import turtle
#derfine screen and turtles
wn=turtle.Screen()
x=turtle.Turtle()
o=turtle.Turtle()
o.color('hotpink')
x.color('cyan')
o.shape('turtle')
x.shape('turtle')
board=turtle.Turtle()
wn.bgcolor("yellow")
board.speed(0)
x.speed(0)
o.speed(0)
def create_board():
board.pu()
board.forward(150)
def line_down():
board.pendown()
board.right(90)
board.forward(400)
board.left(180)
board.forward(800)
board.forward(-400)
board.penup()
line_down()
board.pu()
board.left(90)
board.forward(300)
line_down()
board.pu()
board.left(180)
board.forward(150)
def line_left():
board.pd()
board.left(90)
board.forward(800)
board.forward(-2400)
board.forward(1600)
board.right(270)
line_left()
board.pu()
board.forward(300)
line_left()
board.forward(1000)
def circle():
o.pu()
o.right(90)
o.forward(50)
o.left(90)
o.pd()
o.pu()
o.forward(250)
o.pd()
o.pu()
o.left(90)
o.forward(230)
o.right(90)
o.pd()
o.circle(50)
o.pu()
o.right(180)
o.forward(250)
o.right(180)
o.pd()
o.circle(50)
o.right(180)
o.pu()
o.forward(250)
o.right(180)
o.pd()
o.circle(50)
o.pu()
o.right(90)
o.forward(230)
o.pd()
o.left(90)
o.circle(50)
o.pu()
o.right(90)
o.forward(230)
o.left(90)
o.pd()
o.pu()
o.forward(250)
o.pd()
o.circle(50)
o.pu()
o.forward(250)
o.pd()
o.pu()
o.forward(275)
def make_x ():
x.pu()
x.left(90)
x.forward(50)
x.left(90)
x.forward(25)
x.pd()
x.left(180)
def x_right():
for i in range(10):
x.right(90)
x.forward(5)
x.left(90)
x.forward(5)
x_right()
x.pu()
x.left(90)
x.forward(50)
x.left(180)
x.pd()
x_right()
x.pu()
x.left(90)
x.forward(250)
x.left(90)
x.forward(50)
x.right(90)
x.pd()
x_right()
x.pu()
x.left(90)
x.forward(50)
x.right(180)
x.pd()
x_right()
x.pu()
x.forward(200)
x.left(90)
x.pd()
x_right()
x.pu()
x.left(90)
x.forward(50)
x.right(180)
x.pd()
x_right()
x.pu()
x.right(90)
x.forward(500)
x.right(90)
x.forward(50)
x.left(270)
x.pd()
x_right()
x.pu()
x.left(90)
x.forward(50)
x.right(180)
x.pd()
x_right()
create_board()
circle()
make_x()
```
#### File: for funzies/cs/code.py
```python
import time
import pygame
#init
pygame.init()
#700x700
display_width=700
display_height=700
#color def
black=(0,0,0)
white=(255,255,255)
#surface prop
gameDisplay=pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('chicken fighter')
#clock
clock=pygame.time.Clock()
#pics
chickenImg=pygame.image.load('./chicken_2.jpg')
chicken2Img=pygame.image.load('./chicken_1.jpg')
bulletImg=pygame.image.load('./bullet2.jpg')
bullet2Img=pygame.image.load('./bullet.jpg')
#spritwe loading
def chick1(x,y):
gameDisplay.blit(chickenImg,(x,y))
def chick2(x,y):
gameDisplay.blit(chicken2Img,(x,y))
def b1(x,y):
gameDisplay.blit(bulletImg,(x,y))
def b2(x,y):
gameDisplay.blit(bullet2Img,(x,y))
#inital variables
c1_x=525
c1_y=350
c2_x=175
c2_y=350
c1_x_chng=0
c1_y_chng=0
c2_x_chng=0
c2_y_chng=0
b1_x=c1_x
b1_y=c1_y
b2_x=c2_x+30
b2_y=c2_y
b1_chng=5
b1_ext=False
b2_ext=False
b2_chng=5
red_ded=False
blue_ded=False
tie=False
#game loop
while not red_ded or blue_ded or tie:
for event in pygame.event.get():
if event.type == pygame.QUIT:
tie=True
#fill white
gameDisplay.fill(white)
#p1 movement
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
c1_x_chng= -5
elif event.key==pygame.K_RIGHT:
c1_x_chng=5
elif event.key==pygame.K_UP:
c1_y_chng= -5
elif event.key==pygame.K_DOWN:
c1_y_chng= 5
elif event.key==pygame.K_RCTRL:
b1_ext=True
if event.type==pygame.KEYUP:
if event.key==pygame.K_RIGHT or event.key == pygame.K_LEFT:
c1_x_chng=0
elif event.key==pygame.K_UP or event.key==pygame.K_DOWN:
c1_y_chng=0
elif event.key==pygame.K_RCTRL:
if b1_ext:
b1_ext=True
else:
b1_ext=False
c1_x += c1_x_chng
c1_y += c1_y_chng
#p2 movement
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_a:
c2_x_chng= -5
elif event.key==pygame.K_d:
c2_x_chng=5
elif event.key==pygame.K_w:
c2_y_chng= -5
elif event.key==pygame.K_s:
c2_y_chng= 5
elif event.key==pygame.K_e:
b2_ext=True
if event.type==pygame.KEYUP:
if event.key==pygame.K_d or event.key == pygame.K_a:
c2_x_chng=0
elif event.key==pygame.K_w or event.key==pygame.K_s:
c2_y_chng=0
elif event.key==pygame.K_e:
if b2_ext:
b2_ext=True
else:
b2_ext=False
c2_x += c2_x_chng
c2_y += c2_y_chng
#p1 bullet movement
b1_y=c1_y
if b1_ext:
if b1_x > 0:
b1_x-=b1_chng
b1(b1_x,b1_y)
else:
b1_ext=False
b1_x=c1_x
#p2 bullet movement
b2_y=c2_y
if b2_ext:
if b2_x < 700:
b2_x+=b2_chng
b2(b2_x,b2_y)
else:
b2_ext=False
b2_x=c2_x
#p1 bullet to p2 colsion
if b1_x >= c2_x and b1_x <= c2_x+72:
if b1_y >= c2_y and b1_y<=c2_y+63:
print('red wins')
time.sleep(2)
pygame.quit()
#p2 bullet to p1 colsion
if b2_x >= c1_x and b2_x <= c1_x +72:
if b2_y >= c1_y and b2_y<=c1_y+63:
print('blue wins')
time.sleep(2)
pygame.quit()
#sprite display
chick1(c1_x,c1_y)
chick2(c2_x,c2_y)
#update
pygame.display.update()
#clock speed
clock.tick(30)
#game end(broken?)
pygame.QUIT()
quit()
#!!!COMPLETE!!!#
``` |
{
"source": "jll123567/Sysh",
"score": 4
} |
#### File: Sysh/sysModules/Personality.py
```python
class Personality:
"""
Represents a user's decision making.
The idea is to get all Conditional objects in goals to be true and false in limits.
This allows a user to make decisions by mapping functions to changed output.
:param list gl: Conditionals to make true.
:param list lim: Conditionals to keep false.
:param list funct: Functions usable by users.
"""
def __init__(self, gl=None, lim=None, funct=None):
"""Constructor"""
if gl is None:
self.goals = []
else:
self.goals = gl
if lim is None:
self.limits = []
else:
self.limits = lim
if funct is None:
self.functions = []
else:
self.functions = funct
def __str__(self):
return "g:{}, l:{} f:{}".format(self.goals, self.limits, self.functions)
class Conditional:
"""
Put two objects in to have a constantly updating way to compare them.
:param any a: Thing one.
:param str a_atr: The part of a to check.
:param any b: Thing two.
:param str b_atr: The part of b to check.
:param str evalType: How Conditional compares a and b.
Methods
distance(): Calculate how far apart a and b are.
"""
def __init__(self, a, a_atr, b, b_atr, evalType):
"""Constructor"""
self.a = a
self.aAttribute = a_atr
self.b = b
self.bAttribute = b_atr
self.evalType = evalType
def __bool__(self):
"""
Returns true or false depending on a, b, and evalType. Defaults to false if bad evalType.
"""
e = self.evalType
if self.aAttribute is None:
a = self.a
else:
a = self.a.__getattribute__(self.aAttribute)
if self.bAttribute is None:
b = self.b
else:
b = self.b.__getattribute__(self.bAttribute)
if e == '=':
return a == b
elif e == '!':
return a != b
elif e == ">":
return a > b
elif e == "<":
return a < b
elif e == ">=":
return a >= b
elif e == "<=":
return a <= b
elif e == "is":
return a is b
else:
return False
def distance(self):
"""
Calculate how far apart a and b are.
For numerical values its the absolute value of the difference.
For booleans this returns 0 for matching values.
For lists, strings, and a being a tuple return how many matches there are between a and b.
"""
if self.aAttribute is None:
a = self.a
else:
a = self.a.__getattribute__(self.aAttribute)
if self.bAttribute is None:
b = self.b
else:
b = self.b.__getattribute__(self.bAttribute)
if isinstance(a, (int, float)) and isinstance(b, (int, float)):
return abs(a - b)
elif isinstance(a, bool) and isinstance(b, bool):
if a == b:
return 0
else:
return 1
elif isinstance(a, (list, str, tuple)) and isinstance(b, (list, str)):
matches = 0
b = list(b)
for i in a:
for f in b:
if i == f:
matches += 1
b.remove(f)
break
return abs(a.__len__() - matches)
else:
return None
```
#### File: Sysh/sysModules/Switchers.py
```python
class Switcher:
"""
Base class for Switcher type modules.
Use this to create modules for switching between prefab modules.
:param dict elements: The elements that can be swapped between.
Format should be {"unique name of element": element}
:param str current: The name of the currently used element.
"""
def __init__(self, elements=None, current=None):
if elements is None:
self.elements = {}
else:
self.elements = elements
self.current = current
self._previous = None
def setCurrent(self, name):
"""
Set the current element to that of <name>.
Sets previous.
:param str name: The name of the element. Should be a valid index to elements.
"""
self._previous = self.current
self.current = name
def switchWithPrevious(self):
"""Swap the contents of current and previous."""
c = self.current
self.current = self._previous
self._previous = c
def getCurrentFromElements(self):
"""Return an element from elements using current as the index."""
if self.current is not None:
return self.elements[self.current]
else:
return None
class ModelSwitcher(Switcher):
"""
Switcher for model.
Put this module at <your_object>.modelSwitcher.
:param Taskable owner: The owner of this module.
:param dict elements: The elements that can be swapped between.
Format should be {"unique name of element": element}
:param str current: The name of the currently used element.
"""
def __init__(self, owner, elements=None, current=None):
self.owner = owner
if elements is None:
super().__init__()
else:
super().__init__(elements, current)
def setCurrentModel(self, name):
"""
Set the current Model using <name>.
Sets previous.
:param str name: The name of the element. Should be a valid index to elements.
"""
self.setCurrent(name)
self.owner.model = self.getCurrentFromElements()
def switchModelWithPrevious(self):
"""Swap the contents of current and previous and set model."""
self.switchWithPrevious()
self.owner.model = self.getCurrentFromElements()
functionSuiteString = """
def modelSwitcherSetCurrentModel(self, name):
self.modelSwitcher.setCurrentPers(name)
def modelSwitcherSwitchModelWithPrevious(self):
self.modelSwitcher.switchModelWithPrevious()
""" # Install this with Taskable.installFunctionSuite()
class PersonalitySwitcher(Switcher):
"""
Switcher for personality.
Put this module at <your_object>.personalitySwitcher.
:param Taskable owner: The owner of this module.
:param dict elements: The elements that can be swapped between.
Format should be {"unique name of element": element}
:param str current: The name of the currently used element.
"""
def __init__(self, owner, elements=None, current=None):
self.owner = owner
if elements is None:
super().__init__()
else:
super().__init__(elements, current)
def setCurrentPers(self, name):
"""
Set the current personality using <name>.
Sets previous.
:param str name: The name of the element. Should be a valid index to elements.
"""
self.setCurrent(name)
self.owner.personality = self.getCurrentFromElements()
def switchModelWithPrevious(self):
"""Swap the contents of current and previous and set personality."""
self.switchWithPrevious()
self.owner.personality = self.getCurrentFromElements()
functionSuiteString = """
def personalitySwitcherSetCurrentPers(self, name):
self.personalitySwitcher.setCurrentPers(name)
def personalitySwitcherSwitchModelWithPrevious(self):
self.personalitySwitcher.switchModelWithPrevious()
""" # Install this with Taskable.installFunctionSuite()
```
#### File: Sysh/sysObjects/Data.py
```python
from sysObjects.Tagable import Tagable
class Data(Tagable):
"""
Class for storing some arbitrary data with tags.
Inherit from this if you plan to make a more robust object to store your particular type of data.
**Attributes**:
* **tags** (`dict`): Tags.
* **storage** (`any`): Stored data, could be anything.
:param str id: The id of this object.
:param any storage: The data you plan to store.
:param dict tags: Tags. Defaults to
``{"id": id, "dataType": None, "relevancy": [0], "interest": [0], "content": []}``
"""
def __init__(self, id, storage=None, tags=None):
"""Constructor"""
super().__init__(tags)
self.tags['id'] = id
self.tags["dataType"] = "None"
self.tags["relevancy"] = [0] # Make a formal format for this later.
self.tags["interest"] = [0]
self.tags["content"] = [] # A list of strings that describe the content of the data. Not just the type.
self.storage = storage
def __str__(self):
return "{}:{}".format(self.tags["dataType"], self.storage)
def setDataType(self, dataType: str):
"""Set the dataType of this Data to ``dataType``."""
self.tags["dataType"] = dataType
```
#### File: Sysh/sysObjects/Profiles.py
```python
from random import Random
# TODO: This needs to be entirely redone.
class Profile:
"""
Produce objects randomly.
Overload :py:meth:`factoryProfile` and use it to produce objects using random values.
:param int mainSeed: The seed for the :py:attr:`mainRandom` used to generate seeds for Randoms in produceRandint,
defaults to ``None``.
:param tuple mainState: A state to use for :py:attr:`mainRandom`, defaults to ``None``, not used if set to ``None``.
"""
def __init__(self, mainSeed=None, mainState=None):
if mainSeed is None:
self.mainRandom = Random()
else:
self.mainRandom = Random(mainSeed)
self.mainSeed = mainSeed
if mainState is not None:
self.mainRandom.setstate(mainState)
def produceRandint(self, rRange):
"""
Produce an object using :py:meth:`factoryProfile` and a :py:class:`Random` produced by :py:attr:`rRange`.
:param tuple rRange: Range of ints to use when making a seed for the random passed to :py:attr:`factoryProfile`.
:return: An object defined by factoryProfile.
:rtype: any
"""
return self.factoryProfile(Random(self.mainRandom.randint(rRange[0], rRange[1])))
def produceRand(self):
"""
Produce an object using :py:meth:`factoryProfile` and a :py:class:`Random`.
:return: An object defined by :py:meth:`factoryProfile`.
:rtype: any
"""
return self.factoryProfile(Random())
def produceSeed(self, seed):
"""
Produce an object using :py:meth:`factoryProfile` and :py:class:`Random` with `seed`.
:param int seed: The seed for the :py:class:`Random` passed to :py:meth:`factoryProfile`.
:return: An object defined by factoryProfile.
:rtype: any
"""
return self.factoryProfile(Random(seed))
def getMainRandomState(self):
"""
Get the state of the :py:attr:`mainRandom`.
:return: The state of :py:attr:`mainRandom`.
:rtype: tuple
"""
return self.mainRandom.getstate()
@staticmethod
def factoryProfile(rand):
"""
Overload this method. Take the :py:class:`Random` and output an object.
:param Random rand: The :py:class:`Random` that you use to create your object.
:return: The object you make.
:rtype: any
"""
return
class GeneticProfile:
"""
Produce objects based on values from their parents.
Overload :py:meth:`factoryProfile` and use it to produce objects using semi-random values.
:param walkBounds: Bounds for how far to randomly walk averaged values from parents.
A single float can be used for all walks or a list of floats can be used to specify bounds for each value.
:type walkBounds: float or list
:param int mainSeed: The seed for the :py:attr:`mainRandom` used to generate seeds for Randoms in
:py:meth:`produceRandint`. If ``None`` is provided then dont provide a seed for the :py:attr:`mainRandom`.
Defaults to ``None``.
:type mainSeed: int or None
:param tuple mainState: A state to use for py:attr:`mainRandom`, defaults to ``None``, not used if set to ``None``.
"""
def __init__(self, walkBounds, mainSeed=None, mainState=None):
if mainSeed is None:
self.mainRandom = Random()
else:
self.mainRandom = Random(mainSeed)
self.mainSeed = mainSeed
if mainState is not None:
self.mainRandom.setstate(mainState)
self.walkBounds = walkBounds
@staticmethod
def getLongest(vals):
"""
Get the length of the longest sublist of ``vals``.
:param list vals: A ``list`` of lists.
:return: The length of the longest sublist of vals.
:rtype: int
"""
longest = 0 # Get length of longest parent.
for parent in vals:
if longest < parent.__len__():
longest = parent.__len__()
return longest
@staticmethod
def convertToSetsList(vals, longest):
"""
Convert vals from a ``list`` of parents values to a list of the 0 to nth of value from each parent.
The length of the outputted lists of values are determined by longest.
:param list vals: The list of values to convert the formatting of.
:param int longest: The length of the parent values which have the longest length.
:return: The converted values.
:rtype: list
"""
sets = [] # Go from vals = [<parent>, ...] parent=[<num>, ...]
# to sets = [<numSet>, ...] numSet = [<numFromParent>, ...].
for idx in range(longest):
numSet = []
for parent in vals:
try: # If a list is shorter than longest. When trying to index an out of bound value just skip this
# list.
numSet.append(parent[idx])
except IndexError:
pass
sets.append(numSet)
return sets
@staticmethod
def average(vals):
"""
Take a ``list`` of lists of values and return the average of each list's values.
:param list vals: A ``list`` of lists of values.
:return: A ``list`` of averages.
:rtype: list
"""
for idx in range(vals.__len__()): # Average each numSet
some = 0
for num in vals[idx]:
some += num
vals[idx] = some / vals[idx].__len__()
return vals
def applyRandomWalk(self, val, walkBounds):
"""
Add a random value between ``-randomWalk`` and ``randomWalk`` to ``val``.
:param float val: ``List`` of values to apply the walk to.
:param float walkBounds: The bounds between to get the value of represented by one value.
:return: Walked value.
:rtype: float
"""
rand = self.mainRandom
return val + rand.triangular((-1 * walkBounds), walkBounds)
def applyRandomWalks(self, vals, walkBounds):
"""
Apply a Random walk to eac value in <vals>. Bounds are defined by walkBounds.
Where the length of vals and walkBounds mismatch a walk wont be applied so keep them same length.
:param list vals: The list of values to apply walks to.
:param float/list walkBounds: The bound for each value. Can use a single float for all values.
:return: List of walked values.
:rtype: list
"""
bounds = []
if isinstance(walkBounds, int):
for _ in range(vals.__len__()):
bounds.append(walkBounds)
else:
bounds = walkBounds
for idx in range(vals.__len__()):
try:
vals[idx] = self.applyRandomWalk(vals[idx], bounds[idx])
except IndexError: # If bounds are shorter than averages. Don't apply walk.
pass
return vals
def produceWithParents(self, pVals):
"""
Produce an object using a list of values from multiple parents and the :py:meth:`factoryProfile`.
This method creates a list of average values from the values of all the parents. Then applies a random walk to
each average who's bounds are defined by :py:attr:`walkBounds`. It then feeds these walked averages into
:py:meth:`factoryProfile` and returns the the averages and produced object in a tuple.
:param list pVals: List of the input values from each parent.
:return: Tuple of the values fed into :py:meth:`factoryProfile`.
:rtype: tuple
"""
longest = self.getLongest(pVals)
sets = self.convertToSetsList(pVals, longest)
averages = self.average(sets)
averages = self.applyRandomWalks(averages, self.walkBounds)
return averages, self.factoryProfile(averages)
def getMainRandomState(self):
"""
Get the state of the :py:attr:`mainRandom`.
:return: The state as a Tuple
:rtype: tuple
"""
return self.mainRandom.getstate()
@staticmethod
def factoryProfile(vals):
"""
Overload this method. Take the :py:class:`Random` and output an object.
:param list vals: List of values.
:return: What you return.
:rtype: any
"""
return
``` |
{
"source": "jllam/tides",
"score": 3
} |
#### File: jllam/tides/tides.py
```python
import requests
from bs4 import BeautifulSoup
import re
import time
class TidesError(Exception):
pass
class DownloadError(TidesError):
pass
class TideTimeParseError(TidesError):
pass
class Tides():
def __init__(self):
pass
def get_daylight_low_tides(self, url):
'''
get low tide info for list of urls
'''
out = []
page = self.download_page(url)
tide_data,sunrise,sunset = self.parse_page(page)
for tide_timestamp,data in tide_data:
if sunrise < tide_timestamp and tide_timestamp < sunset:
out.append(data)
else:
print('Data point was not during sun up hours.')
return out
def download_page(self, url):
'''
download page
'''
res = requests.get(url)
if res.ok:
return res.content
else:
raise DownloadError(f"Unable to get {url}")
def parse_page(self, page):
'''
parse page for tides and sunset times
'''
soup = BeautifulSoup(page, 'html.parser')
tides = self.parse_tides_data(soup)
sunrise,sunset = self.parse_sun_times(soup)
return (tides, sunrise, sunset)
def parse_tides_data(self, soup):
'''
getting tides datapoints
'''
# find table in div class = tide-header-today
today_table = soup.find('div',{'class':'tide-header-today'}).find('table')
# get all rows with 1st td = 'Low Tide'
low_tide_data = []
for row in today_table.findChildren('tr'):
tds = row.findAll('td')
if tds and tds[0].text == 'Low Tide':
#TODO: issues parsing 00:XXam. need to look rework this
try:
tide_str = tds[1].text.split('(')[0].strip()
time_obj = time.strptime(tide_str,"%I:%M %p")
except ValueError as e:
raise TideTimeParseError(f"unable to parse time: {tide_str}")
low_tide_data.append((time_obj, (tds[1].text, tds[2].text)))
return low_tide_data
def parse_sun_times(self, soup):
'''
getting sunrise/sunset times
'''
summary_p = soup.find('p', {'class':'tide-header-summary'}).text
match = re.search(r'.*Sunrise is at\s*(1[0-2]|0?[1-9]:[0-5][0-9][ap][m]) and sunset is at\s*(1[0-2]|0?[1-9]:[0-5][0-9][ap][m]*)', summary_p)
sunrise = match.group(1)
sunset = match.group(2)
sunrise = time.strptime(sunrise,"%I:%M%p")
sunset = time.strptime(sunset,"%I:%M%p")
return sunrise,sunset
if __name__ == '__main__':
urls = [
(
'Half Moon Bay, California',
'https://www.tide-forecast.com/locations/Half-Moon-Bay-California/tides/latest'),
(
'Huntington Beach, California',
'https://www.tide-forecast.com/locations/Huntington-Beach/tides/latest'),
(
'Providence, Rhode Island',
'https://www.tide-forecast.com/locations/Providence-Rhode-Island/tides/latest'),
(
'Wrightsville Beach, North Carolina',
'https://www.tide-forecast.com/locations/Wrightsville-Beach-North-Carolina/tides/latest')
]
tides = Tides()
for location, url in urls:
print(location)
try:
out = tides.get_daylight_low_tides(url)
except TideTimeParseError as e:
print(f"skipping: {e}")
continue
print(out)
``` |
{
"source": "jllanfranchi/i3cols",
"score": 3
} |
#### File: i3cols/i3cols/mctree_phys.py
```python
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> for the IceCube Collaboration"
__all__ = [
"get_null_particle",
"get_best_filter",
"true_filter",
"is_cascade",
"is_neutrino",
"is_nucleus",
"is_track",
"is_muon",
"more_energetic",
"get_most_energetic",
"get_most_energetic_neutrino",
"get_most_energetic_muon",
"get_most_energetic_track",
]
import copy
import numba
import numpy as np
from i3cols import dtypes, enums
@numba.njit(cache=True, error_model="numpy")
def get_null_particle():
"""Get a null particle for use when an invalid / n/a result is desired.
Returns
-------
null_particle : shape () ndarray of dtype I3PARTICLE_T
"""
null_particle = np.empty(shape=1, dtype=dtypes.I3PARTICLE_T)[0]
# TODO: set majorID, minorID to random values?
null_particle["id"]["majorID"] = 0
null_particle["id"]["minorID"] = 0
null_particle["pdg_encoding"] = 0
null_particle["shape"] = enums.ParticleShape.Null
null_particle["pos"]["x"] = np.nan
null_particle["pos"]["y"] = np.nan
null_particle["pos"]["z"] = np.nan
null_particle["dir"]["zenith"] = np.nan
null_particle["dir"]["azimuth"] = np.nan
null_particle["time"] = np.nan
null_particle["energy"] = np.nan
null_particle["length"] = np.nan
null_particle["speed"] = np.nan
null_particle["fit_status"] = enums.FitStatus.NotSet
null_particle["location_type"] = enums.LocationType.Anywhere
return null_particle
@numba.njit(error_model="numpy")
def get_best_filter(particles, filter_function, cmp_function):
"""Get best particle according to `cmp_function`, only looking at particles
for which `filter_function` returns `True`. If no particle meeting these
criteria is found, returns a copy of `NULL_I3PARTICLE`.
See dataclasses/public/dataclasses/physics/I3MCTreeUtils.h
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
filter_function : numba Callable(I3PARTICLE_T)
cmp_function : numba Callable(I3PARTICLE_T, I3PARTICLE_T)
Returns
-------
best_particle : shape () ndarray of dtype I3PARTICLE_T
"""
best_particle = get_null_particle()
for particle in particles:
if filter_function(particle) and cmp_function(test=particle, ref=best_particle):
best_particle = particle
return best_particle
@numba.njit(cache=True, error_model="numpy")
def true_filter(test): # pylint: disable=unused-argument
"""Simply return True regardless of the input.
Designed to be used with `get_best_filter` where no filtering is desired;
intended to have same effect as `IsParticle` function defined in
dataclasses/private/dataclasses/physics/I3MCTreePhysicsLibrary.cxx
Parameters
----------
test
Returns
-------
True : bool
"""
return True
@numba.njit(cache=True, error_model="numpy")
def is_cascade(particle):
"""Test if particle is a cascade.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_cascade : bool
"""
return (
particle["shape"]
in (enums.ParticleShape.Cascade, enums.ParticleShape.CascadeSegment,)
or particle["pdg_encoding"]
in (
enums.ParticleType.EPlus,
enums.ParticleType.EMinus,
enums.ParticleType.Brems,
enums.ParticleType.DeltaE,
enums.ParticleType.PairProd,
enums.ParticleType.NuclInt,
enums.ParticleType.Hadrons,
enums.ParticleType.Pi0,
enums.ParticleType.PiPlus,
enums.ParticleType.PiMinus,
)
or (
particle["shape"] != enums.ParticleShape.Primary
and (
is_nucleus(particle)
or particle["pdg_encoding"]
in (
enums.ParticleType.PPlus,
enums.ParticleType.PMinus,
enums.ParticleType.Gamma,
)
)
)
)
@numba.njit(cache=True, error_model="numpy")
def is_neutrino(particle):
"""Test if particle is a neutrino.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_neutrino : bool
"""
return particle["pdg_encoding"] in (
enums.ParticleType.NuE,
enums.ParticleType.NuEBar,
enums.ParticleType.NuMu,
enums.ParticleType.NuMuBar,
enums.ParticleType.NuTau,
enums.ParticleType.NuTauBar,
enums.ParticleType.Nu,
)
@numba.njit(cache=True, error_model="numpy")
def is_nucleus(particle):
"""Test if particle is a nucleus.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_nucleus : bool
"""
return 1000000000 <= abs(particle["pdg_encoding"]) <= 1099999999
@numba.njit(cache=True, error_model="numpy")
def is_track(particle):
"""Test if particle is a track.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_track : bool
"""
return (
particle["shape"]
in (
enums.ParticleShape.InfiniteTrack,
enums.ParticleShape.StartingTrack,
enums.ParticleShape.StoppingTrack,
enums.ParticleShape.ContainedTrack,
)
or particle["pdg_encoding"]
in (
enums.ParticleType.MuPlus,
enums.ParticleType.MuMinus,
enums.ParticleType.TauPlus,
enums.ParticleType.TauMinus,
enums.ParticleType.STauPlus,
enums.ParticleType.STauMinus,
enums.ParticleType.SMPPlus,
enums.ParticleType.SMPMinus,
enums.ParticleType.Monopole,
enums.ParticleType.Qball,
)
or (
particle["shape"] == enums.ParticleShape.Primary
and (
is_nucleus(particle)
or particle["pdg_encoding"]
in (
enums.ParticleType.PPlus,
enums.ParticleType.PMinus,
enums.ParticleType.Gamma,
)
)
)
)
@numba.njit(cache=True, error_model="numpy")
def is_muon(particle):
"""Test if particle is a muon.
See dataclasses/private/dataclasses/physics/I3Particle.cxx
Parameters
----------
particle : shape () ndarray of dtype I3PARTICLE_T
Returns
-------
is_muon : bool
"""
return (
particle["pdg_encoding"] == enums.ParticleType.MuPlus
or particle["pdg_encoding"] == enums.ParticleType.MuMinus
)
@numba.njit(cache=True, error_model="numpy")
def more_energetic(test, ref):
"""Is `test` particle more energetic than `ref` particle?
Not if `test` energy is NaN, always returns False.
Designed to be used with `get_best_filter`.
See function `MoreEnergetic` in
dataclasses/private/dataclasses/physics/I3MCTreePhysicsLibrary.cxx
Parameters
----------
test : I3PARTICLE_T
ref : I3PARTICLE_T
Returns
-------
is_most_energetic : bool
"""
if np.isnan(test["energy"]):
return False
if np.isnan(ref["energy"]):
return True
return test["energy"] > ref["energy"]
# return not np.isnan(test["energy"]) and (
# np.isnan(ref["energy"]) or test["energy"] > ref["energy"]
# )
@numba.njit(error_model="numpy")
def get_most_energetic(particles):
"""Get most energetic particle. If no particle with a non-NaN energy is
found, returns a copy of `NULL_I3PARTICLE`.
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
Returns
-------
most_energetic : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=true_filter, cmp_function=more_energetic,
)
@numba.njit(error_model="numpy")
def get_most_energetic_neutrino(particles):
"""Get most energetic neutrino.
Parameters
----------
particles : ndarray of dtype I3PARTICLE_T
Returns
-------
most_energetic_neutrino : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=is_neutrino, cmp_function=more_energetic,
)
@numba.njit(error_model="numpy")
def get_most_energetic_muon(particles):
"""Get most energetic muon.
Parameters
----------
particles : ndarray of dtype I3PARTICLE_T
Returns
-------
most_energetic_muon : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=is_muon, cmp_function=more_energetic,
)
@numba.njit(error_model="numpy")
def get_most_energetic_track(particles):
"""Get most energetic track.
Parameters
----------
particles : ndarray of dtype I3PARTICLE_T
Returns
-------
most_energetic_track : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=is_track, cmp_function=more_energetic,
)
```
#### File: i3cols/i3cols/operators.py
```python
from __future__ import absolute_import, division, print_function
__author__ = "<NAME>"
__license__ = """Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
__all__ = ["split", "apply"]
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
from collections import OrderedDict
import numpy as np
import numba
from six import string_types
from i3cols.cols import load
def split(
path,
on,
categories=None,
inkeys=None,
outkeys=None,
convert_indexes_to_columns=False,
):
"""Split arrays using a function, values in a column, or index"""
if convert_indexes_to_columns:
raise NotImplementedError("can't convert scalar indexes to columns")
if callable(on): # function to operate on `path`
# Memory map only if no keys specified
arrays, _ = load(path, keys=inkeys, mmap=inkeys is None)
# apply(on, arrays
elif isinstance(on, string_types): # index or scalar column path (not ambiguous)
pass
elif isinstance(on, Sequence): # key path
pass
def apply(func, data, out_dtype=None, valid=None, index=None, **kwargs):
"""Apply a function to a scalar or vector column on a row-by-row basis,
returning an array with one element per row.
Parameters
----------
func : callable
If numba-compiled (numba CPUDispatcher) and no kwargs, call in
numba-compiled loop
out_dtype : numpy dtype or None, optional
dtype of output numpy ndarray; if None, `out_dtype` is set to be same
as dtype of `data`
data : numpy ndarray
If `data` is scalar (one entry per row), then the input length is
num_rows; if the data is vecotr (any number of entries per row),
then `data` can have any length
valid : None or shape-(num_rows,) numpy ndarray, optional
index : None or shape-(num_rows,) numpy ndarray of dtype retro_types.START_STOP_T
Required for chunking up vector `data` by row
**kwargs
Passed to `func` via ``func(x, **kwargs)``
Returns
-------
out : shape-(num_rows,) numpy ndarray of dtype `out_dtype`
Notes
-----
If `valid` is provided, the output for rows where ``bool(valid)`` is
False will be present but is undefined (the `out` array is initialized via
`np.empty()` and is not filled for these cases).
Also, if `func` is a numba-compiled callable, it will be run from a
numba-compiled loop to minimize looping in Python.
"""
# pylint: disable=no-else-return
# TODO: allow behavior for dynamically figuring out `out_type` (populate a
# list or a numba.typed.List, and convert the returned list to a ndarray)
if out_dtype is None:
out_dtype = data.dtype
if isinstance(func, numba.targets.registry.CPUDispatcher):
if not kwargs:
return apply_numba(
func=func, out_dtype=out_dtype, data=data, valid=valid, index=index,
)
else:
print(
"WARNING: cannot run numba functions within a numba loop"
" since non-empty `**kwargs` were passed; will call in a"
" Python loop instead."
)
# No `valid` array
if valid is None:
if index is None:
out = np.empty(shape=len(data), dtype=out_dtype)
for i, data_ in enumerate(data):
out[i] = func(data_, **kwargs)
return out
else:
out = np.empty(shape=len(index), dtype=out_dtype)
for i, index_ in enumerate(index):
out[i] = func(data[index_["start"] : index_["stop"]], **kwargs)
return out
# Has `valid` array
else:
if index is None:
out = np.empty(shape=len(data), dtype=out_dtype)
out_valid = out[valid]
for i, data_ in enumerate(data[valid]):
out_valid[i] = func(data_, **kwargs)
return out
else:
out = np.empty(shape=len(index), dtype=out_dtype)
out_valid = out[valid]
for i, index_ in enumerate(index[valid]):
out_valid[i] = func(data[index_["start"] : index_["stop"]], **kwargs)
return out
@numba.generated_jit(nopython=True, error_model="numpy")
def apply_numba(func, out_dtype, data, valid, index):
"""Apply a numba-compiled function to a scalar or vector data column on a
row-by-row basis, returning an array with one element per row.
See docs for `apply` for full documentation; but note that `apply_numba`
does not support **kwargs.
"""
# pylint: disable=function-redefined, unused-argument, no-else-return
# No `valid` array
if isinstance(valid, numba.types.NoneType):
if isinstance(index, numba.types.NoneType):
def apply_impl(func, out_dtype, data, valid, index):
out = np.empty(shape=len(data), dtype=out_dtype)
for i, data_ in enumerate(data):
out[i] = func(data_)
return out
return apply_impl
else:
def apply_impl(func, out_dtype, data, valid, index):
out = np.empty(shape=len(index), dtype=out_dtype)
for i, index_ in enumerate(index):
out[i] = func(data[index_["start"] : index_["stop"]])
return out
return apply_impl
# Has `valid` array
else:
if isinstance(index, numba.types.NoneType):
def apply_impl(func, out_dtype, data, valid, index):
out = np.empty(shape=len(data), dtype=out_dtype)
for i, (valid_, data_) in enumerate(zip(valid, data)):
if valid_:
out[i] = func(data_)
return out
return apply_impl
else:
def apply_impl(func, out_dtype, data, valid, index):
out = np.empty(shape=len(index), dtype=out_dtype)
for i, (valid_, index_) in enumerate(zip(valid, index)):
if valid_:
out[i] = func(data[index_["start"] : index_["stop"]])
return out
return apply_impl
@numba.generated_jit(nopython=True, error_model="numpy")
def iter_col(data, valid, index):
"""Consistent return values for a given column whether `data` is scalar or
vector, and whether or not it contains a `valid` array.
This unfortunately can NOT be used within a Numba njit-ed function.
Yields
------
valid : bool
Corresponding value in `valid` array; always True if `valid` is None
values : numpy ndarray
Length-1 ndarray (_not_ a Numpy scalar) corresponding to value in
`data` array if `data` is scalar, or length-N array containing the
corresponding values if `data` is vector
"""
# pylint: disable=function-redefined, unused-argument, no-else-return
# No `valid` array
if isinstance(valid, numba.types.NoneType):
if isinstance(index, numba.types.NoneType):
def gen_impl(data, valid, index):
# out = np.empty(shape=len(data), dtype=data.dtype)
for i in range(len(data)):
yield True, data[i : i + 1]
return gen_impl
else:
def gen_impl(data, valid, index):
# out = np.empty(shape=len(index), dtype=data.dtype)
for index_ in index:
yield True, data[index_["start"] : index_["stop"]]
return gen_impl
# Has `valid` array
else:
if isinstance(index, numba.types.NoneType):
def gen_impl(data, valid, index):
# out = np.empty(shape=len(data), dtype=data.dtype)
for i, valid_ in enumerate(valid):
yield valid_, data[i : i + 1]
return gen_impl
else:
def gen_impl(data, valid, index):
# out = np.empty(shape=len(index), dtype=data.dtype)
for valid_, index_ in zip(valid, index):
yield valid_, data[index_["start"] : index_["stop"]]
return gen_impl
# def iter_ncol(arrays):
# kwargs = OrderedDict()
# for key, array_d in arrays.items():
``` |
{
"source": "jllanfranchi/phys597_computational2",
"score": 2
} |
#### File: phys597_computational2/ch07_problem7.30/p7x30.py
```python
from __future__ import division
from __future__ import with_statement
import numpy as np
#from pylab import ion
import matplotlib as mpl
#from matplotlib.path import Path
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
#import numexpr as ne
#from numba import autojit
import os
import sys
import time
import cPickle as pickle
import collections
from collections import deque
from multiprocessing import Process, Queue
from smartFormat import smartFormat
from genericUtils import wstdout, timestamp
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2014 <NAME>"
__credits__ = ["<NAME>"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
#-- Turn live-interactive plotting on (makes updated plots appear animated)
#ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
mpl.rc('font', **font)
def coordsFromAbsDir(absdir):
nsteps = len(absdir)
offset = 1
xincr = zeros(nsteps+1, dtype=int)
yincr = ones(nsteps+1, dtype=int)
xincr[argwhere(absdir==0)+1] = 1
xincr[argwhere(absdir==2)+1] = -1
yincr[argwhere(absdir==1)+1] = 1
yincr[argwhere(absdir==3)+1] = -1
x = cumsum(xincr)
y = cumsum(yincr)
return x, y
def plotSnakeXY(x, y):
fig, ax = subplots()
plot(x,y,'r-o',linewidth=3,markersize=6)
plot(x[0],y[0],'ko',markersize=10)
#ax.set_xlim(min(x)-2, max(x)+2)
#ax.set_ylim(min(y)-2, max(y)+2)
axis('image')
for spine in ax.spines.itervalues():
spine.set_visible(False)
ax.set_xlim(min(x)-2, max(x)+2)
ax.set_ylim(min(y)-2, max(y)+2)
def plotSnakeAbsDir(absdir):
plotSnakeXY(coordsFromDir(absdir))
def plotSnakeCoord(coords):
x = []
y = []
for c in coords:
x.append(c[0])
y.append(c[1])
plotSnakeXY(x, y)
def newSnake1(nSteps=10):
#reldir = (random.random(nSteps)*2).astype(int)-1
reldir = random.randint(-1,2,nSteps)
absdir = mod(1+cumsum(reldir), 4)
x, y = coordsFromDir(absdir)
class Snake:
"""Self-avoiding random walk."""
def __init__(self, nsteps, validDirs=(-1,1), recordAfter=None):
#-- Use a deque as a circular buffer to store the coords
self.coords = deque(maxlen=nsteps+1)
[ self.coords.append((0,y)) for y in range(nsteps+1) ]
self.R2 = [nsteps**2]
#-- This is either -1 (points at most-recently-added element)
# or 0 (points at oldest element)
self.forward = True
self.c1 = -1
self.c2 = -2
self.c_end = 0
self.validDirs = validDirs
self.nValidDirs = len(validDirs)
if recordAfter == None:
self.recordAfter = nsteps
else:
self.recordAfter = recordAfter
self.reptateCount = 0
def plot(self):
if self.forward:
plotSnakeCoord(self.coords)
else:
rc = self.coords
rc.reverse()
plotSnakeCoord(rc)
def meanR2(self):
return np.mean(self.R2)
def reptate(self):
dx = self.coords[self.c1][0]-self.coords[self.c2][0]
if dx == 1:
previousDir = 0
elif dx == -1:
previousDir = 2
elif self.coords[self.c1][1]-self.coords[self.c2][1] == 1:
previousDir = 1
else:
previousDir = 3
proposedDir = (previousDir + \
self.validDirs[np.random.randint(0,self.nValidDirs)]) % 4
if proposedDir == 0:
proposedCoord = (self.coords[self.c1][0]+1,self.coords[self.c1][1])
elif proposedDir == 1:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]+1)
elif proposedDir == 2:
proposedCoord = (self.coords[self.c1][0]-1,self.coords[self.c1][1])
else:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]-1)
#-- Exchange head and tail of snake...
if proposedCoord in self.coords:
self.forward = not self.forward
if self.forward:
self.c1 = -1
self.c2 = -2
self.c_end = 0
else:
self.c1 = 0
self.c2 = 1
self.c_end = -1
if self.reptateCount % self.recordAfter == 0:
self.R2.append(self.R2[-1])
self.recordStats = False
#-- ... or prepand / append new coord
else:
if self.forward:
self.coords.append(proposedCoord)
else:
self.coords.appendleft(proposedCoord)
if self.reptateCount % self.recordAfter == 0:
self.R2.append(self.R2[-1])
self.R2.append((self.coords[self.c1][0]
-self.coords[self.c_end][0])**2+
(self.coords[self.c1][1]
-self.coords[self.c_end][1])**2)
self.recordStats = False
self.reptateCount += 1
formatDic = {'sigFigs': 5, 'demarc': "", 'threeSpacing': False, 'rightSep':""}
def powerLaw(x, power, multFact, offset):
return multFact*(x**power) + offset
def powerLawLatex(power, multFact=1, offset=0, pcov=None):
offsetStr = smartFormat(offset, alwaysShowSign=True, **formatDic)
if not (offsetStr[0] == "+" or offsetStr[0] == "-"):
offsetStr = "+" + offsetStr
latex = r"$" + smartFormat(multFact, **formatDic) + \
r" \cdot N^{" + smartFormat(power, **formatDic) + r"} " + \
offsetStr + \
r"$"
return latex
def exponential(x, expExponent, multFact=1):
return multFact * np.exp(np.array(x)*expExponent)
def exponentialLatex(expExponent, multFact=1, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}$"
return latex
def expPower(x, expExponent, powerLawExp, multFact):
x = np.array(x)
return multFact * np.exp(x*expExponent) * x**powerLawExp
def expPowerLatex(expExponent, powerLawExp, multFact, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}\cdot N^{" + smartFormat(powerLawExp, **formatDic) + \
r"}$"
return latex
class SimulationData:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Simulation:
def __init__(self):
self.sd = SimulationData()
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
self.stateFilename = os.path.basename(__file__) + ".pk" #"p7x28_state.pk"
print self.stateFilename
def saveState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'wb') as stateFile:
pickle.dump(self.sd, stateFile, -1)
def loadState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'rb') as stateFile:
self.sd = pickle.load(stateFile)
def runSimulation(self, targetSuccesses=10, stepsRange=(4,50),
plotting=False):
#-- Reset state variables for a new simulation run
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
timeLastSaved = time.time()
self.sd.targetSuccesses = targetSuccesses
self.sd.stepsInChains = range(stepsRange[0],stepsRange[1])
self.sd.allChainFinalCoords = []
self.sd.allMeanChainFinalCoords = []
self.sd.meanChainFinalCoords = []
self.sd.chainSquareLengthAvg = []
self.sd.successRatio = []
self.sd.timingAvg = []
if plotting:
self.fig1 = plt.figure(1)
self.fig1.clf()
self.ax1 = fig1.add_subplot(111)
line, = ax1.plot([], [], 'ko-', lw=2)
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
ax1.axis('image')
plt.draw()
for stepsThisChain in self.sd.stepsInChains:
startTime = time.time()
snake = Snake(stepsThisChain, validDirs=(-1,0,1))
#successfulChains = []
#chainSquareLengths = []
#chainFinalCoords = []
#meanChainFinalCoord = []
nSuccesses = 0
trialN = 0
while nSuccesses < self.sd.targetSuccesses:
trialN += 1
#-- Perform as many reptations as chain links, to
# help ensure an independent configuration
[ snake.reptate() for n in xrange(stepsThisChain) ]
nSuccesses += 1
if plotting:
snake.plot()
plt.draw()
time.sleep(0.005)
self.sd.chainSquareLengthAvg.append(snake.meanR2())
self.sd.timingAvg.append( (time.time()-startTime)/nSuccesses )
wstdout("\nstepsThisChain = " + str(stepsThisChain) + "\n")
wstdout(" time/success = " + str(self.sd.timingAvg[-1]) + "\n")
if (time.time() - timeLastSaved) > 60*5:
self.saveState()
timeLastSaved = time.time()
self.sd.allMeanChainFinalCoords = \
np.array(self.sd.allMeanChainFinalCoords)
self.sd.simulationCompleted = True
self.saveState()
def postproc(self):
"""Perform curve fitting to the data"""
#-- Update state
self.sd.postprocCompleted = False
#-- Check that simulation data is present
if not self.sd.simulationCompleted:
raise Exception("No simulation run; cannot perform curve fit!")
#-- Same x data is used for *all* the below curve fits
x = self.sd.stepsInChains
#============================================================
# Fit R_N^2 with const * power-law + const
#============================================================
y = self.sd.chainSquareLengthAvg
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt3, pcov3 = curve_fit(f=powerLaw, xdata=x, ydata=y, sigma=sigma)
self.sd.fit3 = powerLaw(x, *popt3)
self.sd.fit3eqn = powerLawLatex(*popt3)
print popt3, pcov3, "\n"
#============================================================
# Exponential * power-law fit to wall-clock time
#============================================================
y = self.sd.timingAvg
#-- Initial guess
p0 = (0.129, 0, 2.981e-3)
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt4, pcov4 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0, )
self.sd.fit4 = expPower(x, *popt4)
self.sd.fit4eqn = expPowerLatex(*popt4)
print popt4, pcov4, "\n"
#-- Update state
self.sd.postprocCompleted = True
def plotResults(self, savePlot=True):
"""Plot the data and the fit curves"""
if not self.sd.simulationCompleted:
raise Exception("No simulation has been run; cannot plot results!")
if not self.sd.postprocCompleted:
self.postproc()
self.fig2 = plt.figure(2, figsize=(7,8), dpi=80)
self.fig2.clf()
self.ax23 = self.fig2.add_subplot(211)
self.ax23.plot(self.sd.stepsInChains, self.sd.chainSquareLengthAvg,
'bo', label="data", markersize=4)
self.ax23.plot(self.sd.stepsInChains, self.sd.fit3,
'r-', label=self.sd.fit3eqn, linewidth=2, alpha=0.75)
self.ax23.set_ylabel(r"$\langle R_N^2\rangle$")
self.ax23.grid(which='major', b=True)
self.ax23.legend(loc="upper left", fancybox=True, shadow=True)
self.ax24 = self.fig2.add_subplot(212)
self.ax24.plot(self.sd.stepsInChains, self.sd.timingAvg,
'bo', label="data", markersize=4)
self.ax24.plot(self.sd.stepsInChains, self.sd.fit4,
'r-', label=self.sd.fit4eqn, linewidth=2, alpha=0.75)
self.ax24.set_xlabel(r"Nmber of steps in walk, $N$")
self.ax24.set_ylabel("Wall-clock time per successful chain (s)")
self.ax24.set_yscale('log')
self.ax24.grid(which='major', b=True)
self.ax24.legend(loc="upper left", fancybox=True, shadow=True)
self.fig2.tight_layout()
if savePlot:
self.fig2.savefig(timestamp(t=False) + "_problem7x30_plots.pdf")
self.fig2.savefig(timestamp(t=False) + "_problem7x30_plots.png", dpi=120)
plt.show()
if __name__ == "__main__":
startTime = time.time()
#-- Instantiate the Simulation object
sim = Simulation()
#-- Try to load the sim data from any previous run; if no data saved
# to disk in the default location, run a new simulation
try:
sim.loadState()
except Exception as e:
print "Error({0}: {1}".format(e.errno, e.strerror)
#sim.runSimulation(targetSuccesses=10, stepsRange=(4,101))
sim.runSimulation(targetSuccesses=100, stepsRange=(5,500))
#-- *Always* perform post-processing and plotting (allows easy modification
# of the postprocessing (curve fitting) and plotting routines
# without needing to re-run the simulation, which can take hours)
sim.postproc()
sim.plotResults()
plt.show()
```
#### File: phys597_computational2/landau_ch19_problem19.3.2/p9x3x2_v2.py
```python
from __future__ import division
from __future__ import with_statement
import numpy as np
from pylab import ion
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib import pyplot as plt
from matplotlib import animation
from scipy.optimize import curve_fit
from scipy.weave import inline, converters
import sys
import time
import cPickle as pickle
from JSAnimation import IPython_display, HTMLWriter
from smartFormat import smartFormat
from plotGoodies import plotDefaults
plotDefaults()
# <codecell>
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2014 <NAME>"
__credits__ = ["<NAME>"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
# <codecell>
#-- Turn live-interactive plotting on (makes updated plots appear animated)
ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
#mpl.rcParams('font', **font)
# <codecell>
class WaxWendroff:
def __init__(self):
self.c_lax_wendroff = """
py::list ret;
double beta2 = beta*beta;
double u_i2;
double u_ip12;
double u_im12;
double this_diff;
double max_ydiff = 0;
int j;
int i = 0;
//u_i2 = u0(i)*u0(i);
//u_ip12 = u0(i+1)*u0(i+1);
for (j=0; j<n_skip; j++) {
for (i=1; i<m-1; i++) {
//u_im12 = u_i2;
//u_i2 = u_ip2;
u_i2 = u0(i)*u0(i);
u_im12 = u0(i-1)*u0(i-1);
u_ip12 = u0(i+1)*u0(i+1);
//-- Lax - Wendroff scheme
u(i) = u0(i)
- 0.25*beta*(u_ip12 - u_im12)
+ 0.125*beta2 * ( (u0(i+1)+u0(i))*(u_ip12-u_i2)
- (u0(i)+u0(i-1))*(u_i2-u_im12) );
this_diff = fabs(u(i)-u(i-1));
if (this_diff > max_ydiff)
max_ydiff = this_diff;
//-- Update "present step" array element with what was just computed as
// the next step" value for this array element
u0(i) = u(i);
}
}
//for (i=1; i<m-1; i++)
// u0(i) = u(i);
//-- Enforce boundary conditions
//u(0) = 0;
//u(m-1) = 0;
ret.append(max_ydiff);
return_val = ret;
"""
self.m = 1000
self.c = 1.0
#dx = 1./m
self.dx = 2*np.pi/self.m
self.dt = self.dx/10
self.epsilon = 1.0
self.beta = self.epsilon*self.dt/self.dx
self.u = np.zeros((self.m+1),float)
self.u0 = np.zeros((self.m+1), float)
self.uf = np.zeros((self.m+1),float)
self.T_final = 100
self.maxN = int(self.T_final/self.dt)
print "dt =", self.dt, ", dx =", self.dx, \
", epsilon =", self.epsilon, ", beta =", self.beta
self.x = np.arange(-(self.m/2)*self.dx,(self.m/2)*self.dx,self.dx)
print len(self.x)
#-- beta = 0.01
#-- epsilon = 0.2
#-- dx = 1e-3
#-- dt = 1e-4
#-- beta = epsilon*dt/dx = 0.02
self.prob = 1
if self.prob == 0:
def finalFun(x, t):
return -np.exp( - 10.*(x - 1.5 - self.c*t)**2 ) \
+ np.exp( - 10.*(x + 1.5 + self.c*t)**2 ) # Exact
elif self.prob == 1:
def finalFun(x, t):
a0 = -1.0
fx = 1 #4*np.pi
return a0/2*np.sin(fx*x-self.c*t)+a0/2*np.sin(fx*x+self.c*t)
self.u0 = finalFun(self.x, 0)
self.u = np.zeros_like(self.u0)
self.fig1 = plt.figure(1, figsize=(5,10), dpi=120)
self.fig1.clf()
self.ax1 = self.fig1.add_subplot(211)
self.ax1.plot(self.x, self.u0, '-',
color=(.6,.6,.6), lw=6, label="initial cond")
self.l_ns, = self.ax1.plot(self.x, self.u, 'o-',
markersize=2,
color='b',
markerfacecolor=(0.8,0,0,.25),
markeredgecolor=(0.8,0,0,.25),
lw=0.5,
label="numerical soln")
self.ax1.legend(loc="best")
self.ax1.set_xlim(-np.pi,np.pi)
self.ax1.set_ylim(-1,1)
self.ax1.set_xlabel(r"Spatial dimension, $x$")
self.ax1.set_title(r"Spatial wave depiction")
self.ax2 = self.fig1.add_subplot(212)
self.l_ms, = self.ax2.plot(0,0, '-o',
color='k',
markerfacecolor='g',
markersize=3,
lw=1.0)
self.ax2.set_xlabel(r"Time index, $j$")
#ax2.set_ylabel(r"Maximum spatial slope")
self.ax2.set_xlim(0, self.maxN)
self.ax2.set_ylim(0,500)
self.ax2.set_title(r"Maximum spatial slope at a given time step")
plt.tight_layout()
#-- Note: Time steps are indexed with j and spatial coordinates with i.
# The previous solution is preserved in u0 for use in computing the
# new solution, which is incrementally stored into the u array.
#
# Once the computation is complete for the new solution, the u array
# is copied into u0 for use in the next time step.
#def init(self):
self.l_ns.set_data(self.x, finalFun(self.x,0))
self.l_ms.set_data(0,0)
self.maxslopelist = []
slf.nskiplist = []
self.allj = []
self.n_skip = 1
self.j = 0
#return self.l_ns, self.l_ms
def animate(self, ii):
print "Iteration number, ii:", ii
out = inline(self.c_lax_wendroff, ['self.u', 'self.u0', 'self.beta',
'self.m', 'self.n_skip'],
type_converters=converters.blitz)
self.j += self.n_skip
self.allj.append(j)
self.slope = out[0]/self.dx
self.maxslopelist.append(self.slope)
self.n_skip = min( max(int(5e4/self.slope**2), 10), 1000)
self.n_skip = 100
self.nskiplist.append(n_skip)
print out[0]/self.dx
self.l_ns.set_ydata(self.u)
self.l_ms.set_xdata(self.allj)
self.l_ms.set_ydata(self.maxslopelist)
self.ax2.set_ylim(0,np.max(self.maxslopelist))
self.ax2.set_xlim(0,self.j)
self.fig1.canvas.draw()
#plt.draw()
#if j >= maxN or slope > 2000:
# break
#return l_ns, l_ms
#fig2 = plt.figure(2)
#fig2.clf()
#ax = fig2.add_subplot(111)
#ax.plot(nskiplist, 'm-', lw=3)
#ax.set_ylabel("n skip")
#plt.tight_layout()
ww = WaxWendroff()
animation.FuncAnimation(ww.fig1, ww.animate, frames=20, blit=True)
# <codecell>
plt.show()
# <codecell>
``` |
{
"source": "jllanfranchi/playground",
"score": 2
} |
#### File: playground/gpu_histogramdd/GPUHist.py
```python
from __future__ import division
import os
import numpy as np
from pycuda import autoinit
from pycuda.compiler import SourceModule
from pycuda.tools import DeviceData
from pisa.core.binning import OneDimBinning, MultiDimBinning
from pisa.utils.profiler import profile
from pisa.utils.log import logging, set_verbosity
__all__ = ['BinningStruct', 'GPUHist',
'CUDA_HIST_MOD',
'EVEN_LIN_SPACING', 'EVEN_LOG_SPACING', 'UNEVEN_SPACING']
EVEN_LIN_SPACING = 0
EVEN_LOG_SPACING = 1
UNEVEN_SPACING = 2
class BinningStruct(object):
def __init__(self, binning, ftype):
binning = MultiDimBinning(binning)
num_dims = np.int32(binning.num_dims)
bins_per_dim = np.array(shape=num_dims, dtype=np.int32)
# For dim_spacing_type: 0 => lin, 1 => log, 2 => arbitrary
dim_spacing_type = np.array(shape=num_dims, dtype=np.uint8)
further_dims_bincounts = np.array(shape=num_dims, dtype=np.int32)
bmin = np.array(shape=num_dims, dtype=ftype)
bmax = np.array(shape=num_dims, dtype=ftype)
bwidth = np.array(shape=num_dims, dtype=ftype)
units = []
# Sequence of arrays
bin_edges = []
for dim_num, dim in enumerate(binning.dims):
bins_per_dim[dim_num] = len(dim)
units.append(dim.bin_edges.units)
if dim.is_lin:
dim_spacing_type[dim_num] = EVEN_LIN_SPACING
bwidth[dim_num] = (bmax[dim_num] - bmin[dim_num])/len(dim)
assert bwidth[dim_num] > 0
bin_edges.append(np.array([np.nan], dtype=ftype))
bmin[dim_num] = dim.bin_edges[0].magnitude
bmax[dim_num] = dim.bin_edges[-1].magnitude
elif dim.is_log:
dim_spacing_type[dim_num] = EVEN_LOG_SPACING
# Record critical bin values in log space
bmin[dim_num] = np.log(dim.bin_edges[0].magnitude)
bmax[dim_num] = np.log(dim.bin_edges[-1].magnitude)
bwidth[dim_num] = (bmax[dim_num] - bmin[dim_num])/len(dim)
assert bwidth[dim_num] > 0
bin_edges.append(np.array([np.nan], dtype=ftype))
else:
dim_spacing_type[dim_num] = UNEVEN_SPACING
bwidth[dim_num] = np.nan
bin_edges.append(dim.bin_edges.magnitude.astype(ftype))
bmin[dim_num] = dim.bin_edges[0].magnitude
bmax[dim_num] = dim.bin_edges[-1].magnitude
# Total bincounts in subsequent dimensions for indexing
cumulative_bincount = 1
for dim_idx in range(num_dims-1, -1, -1):
further_dims_bincount[dim_idx] = cumulative_bincount
cumulative_bincount *= bins_per_dim[dim_idx]
# Record members of struct here (to be passed to device)
self.num_dims = np.int32(num_dims)
self.bins_per_dim = bins_per_dim
self.dim_spacing_type = dim_spacing_type
self.further_dims_bincount = further_dims_bincount
self.bmin = bmin
self.bmax = bmax
self.bwidth = bwidth
self.bin_edges = bin_edges
# Record things useful to keep around on Python side as "private" attrs
self._units = units
self._binning = binning
@property
def sizeof(self):
def struct_to_device(self, device_loc):
#for (int dim_idx = 0; dim_idx < NUM_DIMS; dim_idx++) {
# if (dim_spacing_type[dim_idx] == 1) {
# bmin[dim_idx] = binning[dim_idx][0];
# bmax[dim_idx] = binning[dim_idx][bins_per_dim[dim_idx]];
# bwidth[dim_idx] = (bmax[dim_idx] - bmin[dim_idx])/bins_per_dim[dim_idx];
# }
# else if (dim_spacing_type[dim_idx] == 2) {
# bmin[dim_idx] = log(binning[dim_idx][0]);
# bmax[dim_idx] = log(binning[dim_idx][bins_per_dim[dim_idx]]);
# bwidth[dim_idx] = log(bin_deltas[dim_idx]);
# bwidth[dim_idx] = (log(bmax[dim_idx]) - log(bmin[dim_idx])) / bins_per_dim[dim_idx];
# }
# else {
# bmin[dim_idx] = 0.0;
# bmax[dim_idx] = 0.0;
# bwidth[dim_idx] = 0.0;
# }
#}
class GPUHist(object):
def __init__(self, binning, data_units=None):
self._get_gpu_info()
self._setup_binning(binning=binning, data_units=data_units)
def _setup_binning(self, binning, data_units=None):
self.binning = MultiDimBinning(binning)
# TODO: do something meaningful with units
btruct =
for dim in self.binning.dims:
self.
def _get_gpu_info(self):
gpu = autoinit.device
self.gpu = gpu
self.device_data = DeviceData()
def _get_cuda_run_params(self, compiled_kernel, num_events):
"""Get "optimal" (or at least dynamic) block & thread parameters for
running the compiled kernel.
Returns
-------
threads_per_block, num_blocks, events_per_thread : int
See http://pythonhosted.org/CudaPyInt/_modules/cudapyint/Solver.html
"""
max_threads = min(
self.device_data.max_registers_per_block/compiled_kernel.num_regs,
self.device_data.max_threads
)
max_warps = max_threads / self.device_data.warp_size
threads_per_block = int(np.ceil(max_warps * self.device_data.warp_size))
# Use only a single multiprocessor (cpu core), so limits the number of
# thread blocks (?)
max_thread_blocks = self.device_data.thread_blocks_per_mp
num_blocks, r = divmod(num_events, threads_per_block)
if r != 0:
num_blocks += 1
num_blocks = min(num_blocks, max_thread_blocks)
events_per_thread = int(np.ceil(num_events / (threads_per_block *
num_blocks)))
return threads_per_block, num_blocks, events_per_thread
CUDA_HIST_MOD = SourceModule(
"""//CUDA//
/*
* setup things that will not change across multiple invocations of histogram
* function
*/
__global__ setup(const int n_dims, const int n_flat_bins, const int num_events)
{
// Following must be supplied by the caller
int bins_per_dim[NUM_DIMS];
int dim_spacing_type[NUM_DIMS];
// Initialize further_dims_bincounts for indexing into the flattened
// histogram. One can imagine indexing into hist via e.g. 3 indices if it's
// 3-dimensional:
// hist[i][j][k]
// with i in [0, I-1], j in [0, J-1], and k in [0, K-1]. This indexes into
// the linear chunk of memory that stores the histogram in row-major order,
// i.e., this translates to linear (flattened) memory address
// hist[i*J*K + j*K + k]
// and analogously for lower/higher dimensions.
int further_dims_bincounts[NUM_DIMS];
int cumulative_bincount = 1;
if (NUM_DIMS > 1) {
for (int dim_idx = NUM_DIMS-1; dim_idx >= 0; dim_idx--) {
further_dims_bincount[dim_idx] = cumulative_bincount;
cumulative_bincount *= bins_per_dim[dim_idx];
}
}
// Initialize binning constants needed for fast histogramming of lin/log
// dims
fType bmin[NUM_DIMS];
fType bmax[NUM_DIMS];
fType bwidth[NUM_DIMS];
for (int dim_idx = 0; dim_idx < NUM_DIMS; dim_idx++) {
if (dim_spacing_type[dim_idx] == %(EVEN_LIN_SPACING)d) {
bmin[dim_idx] = binning[dim_idx][0];
bmax[dim_idx] = binning[dim_idx][bins_per_dim[dim_idx]];
bwidth[dim_idx] = (bmax[dim_idx] - bmin[dim_idx])
/ bins_per_dim[dim_idx];
}
else if (dim_spacing_type[dim_idx] == %(EVEN_LOG_SPACING)d) {
bmin[dim_idx] = log(binning[dim_idx][0]);
bmax[dim_idx] = log(binning[dim_idx][bins_per_dim[dim_idx]]);
bwidth[dim_idx] = log(bin_deltas[dim_idx]);
bwidth[dim_idx] = (log(bmax[dim_idx]) - log(bmin[dim_idx]))
/ bins_per_dim[dim_idx];
}
else { // UNEVEN_SPACING
bmin[dim_idx] = 0.0;
bmax[dim_idx] = 0.0;
bwidth[dim_idx] = 0.0;
}
}
}
/*
* Histogram the D-dimensional data store
*/
__global__ histogramdd(int n_samples, fType *sample, fType *weights)
{
// TODO:
// Is this faster : fType data[NUM_EVENTS][NUM_DIMS+1];
// Or is this faster : fType data[NUM_DIMS+1][NUM_EVENTS];
// Note weights are last array stored to data, hence the length NUM_DIMS+1
fType data[NUM_DIMS+1][NUM_EVENTS];
// Perform the histogramming operation
fType val;
int flat_idx;
int thisdim_bin_idx;
for (evt_idx = EVT_START_IDX; evt_idx < EVT_STOP_IDX; evt_idx++) {
flat_idx = 0;
dim_indices = int[NUM_DIMS];
// TODO: go through dimensions in order of least-expensive-to-compute
// (even linear bin spacing) to most-expensive-to-compute (uneven bin
// spacing)
for (dim_idx = 0; dim_idx < NUM_DIMS; dim_idx++) {
if (dim_spacing_type[dim_idx] == %(UNEVEN_SPACING)d) {
// do binary search (see Philipp's code for this)
}
else {
if (dim_spacing_type[dim_idx] == %(EVEN_LIN_SPACING)d)
val = data[dim_idx][evt_idx];
else
val = log(data[dim_idx][evt_idx]);
thisdim_bin_idx = <int> (val - bmin[dim_idx]) / bwidth[dim_idx];
// Move on to the next event if this event doesn't fall within
// any of this dimension's bins
if (thisdim_bin_idx < 0
|| thisdim_bin_idx > bins_per_dim[dim_idx]
|| (thisdim_bin_idx == bins_per_dim[dim_idx]
&& val != bmax))
break;
}
// If we get here, then the event *is* in a bin in this dimension.
if (dim_idx == NUM_DIMS-1)
hist[flat_idx + thisdim_bin_idx] += data[NUM_DIMS][evt_idx];
else
flat_idx += thisdim_bin_idx * further_dims_bincount[di];
}
}
""" % dict(EVEN_LIN_SPACING=EVEN_LIN_SPACING,
EVEN_LOG_SPACING=EVEN_LOG_SPACING,
UNEVEN_SPACING=UNEVEN_SPACING)
)
``` |
{
"source": "jllanfranchi/pygeneric",
"score": 2
} |
#### File: jllanfranchi/pygeneric/comsolData.py
```python
from __future__ import division
from __future__ import with_statement
import numpy as np
import re
import os
from genericUtils import wstdout, wstderr
class ComsolData: #(OrderedDict):
def __init__(self, *arg, **kwarg):
#super(ComsolData, self).__init__(*arg, **kwarg)
self.variables = {}
self.re_modelName = re.compile(r"\% Model[:,]\s*(\S[\S ]*)")
self.re_dimensions = re.compile(r"\% Dimension[:,]\s*(\S*)")
self.re_nodes = re.compile(r"\% Nodes[:,]\s*(\S*)")
#-- The following regex was designed to parse the entire header at
# once, but fails to capture the initial coordinate letters, e.g., R
# and Z or X and Y ...
#self.re_header = re.compile(r"(\S+)(?: \((\S*)\)){0,1}(?: @ (t|freq|Eigenfrequency)=([0-9.e+-]+)){0,1}\s*", re.IGNORECASE)
self.re_header = re.compile(r"([0-9a-zA-Z.()_+-]+)(?: \((\S*)\)){0,1}(?: @ (t|freq|Eigenfrequency)=([0-9.e+-]+)){0,1}(?:,){0,1}\s*", re.IGNORECASE)
#-- This one only works after splitting the header row at commas
#re_header = re.compile(r"(?: @ (t|freq|Eigenfrequency)=([0-9.e+-]+)){0,1}\s*[,]{0,1}", re.IGNORECASE+re.DEBUG)
self.re_units = re.compile(r"\% .*unit[:,]\s*(\S*)")
self.re_data = re.compile(r"([0-9a-zA-Z.()_+-]+)[,]{0,1}[\s]*")
def importSpreadsheetData(self, fname, numLines=-1, header=True):
modelNameLine = 1
comsolVersionLine = 2
dateLine = 3
dimensionLine = 4
nodesLine = 5
numExpressionsLine = 6
descriptionLine = 7
unitsLine = 8
headerLine = 9
firstDataLine = 10
self.varNames = []
lineN = 0
with open(fname, 'r') as f:
while True:
lineN += 1
lineStr = f.readline()
if len(lineStr) == 0:
break
if lineN == dimensionLine:
numDimensions = int(self.re_dimensions.findall(lineStr)[0])
#print "numDimensions", numDimensions
if lineN == nodesLine:
numDataLines = int(self.re_nodes.findall(lineStr)[0])
#print "numDataLines", numDataLines
if lineN == unitsLine:
self.coordUnits = self.re_units.findall(lineStr)[0]
#print "self.coordUnits", self.coordUnits
if lineN == headerLine:
#print lineStr[2:].strip().split(',')
self.varTuples = self.re_header.findall(lineStr[2:])
#print self.varTuples[0]
#print self.varTuples[1]
#print self.varTuples[2]
#print self.varTuples[-1]
for varTuple in self.varTuples:
varName = varTuple[0].replace(".", "_")
units = varTuple[1]
freqTimeType = varTuple[2]
freqTime = varTuple[3]
if len(freqTimeType) > 0:
freqTimeSpecd = True
else:
freqTimeSpecd = False
if not self.variables.has_key(varName):
if freqTimeSpecd:
self.variables[varName] = {'units':units,
'val':[],
'ft':freqTimeType,
freqTimeType:[],
'dim':2}
else:
self.variables[varName] = {'units':units,
'val':[],
'dim':1}
if self.variables[varName]['dim'] == 2:
ft = self.variables[varName]['ft']
self.variables[varName][ft].append(np.float64(freqTime))
self.varNames.append(varName)
if lineN == firstDataLine:
self.uniqueVarNames = list(set(self.varNames))
for varName in self.uniqueVarNames:
self.variables[varName]['val'] = []
if lineN >= firstDataLine:
n = lineN - firstDataLine
for varName in self.uniqueVarNames:
self.variables[varName]['val'].append([])
valStrings = self.re_data.findall(lineStr)
for (varName, valString) in zip(self.varNames, valStrings):
try:
self.variables[varName]['val'][n].append(np.float64(valString))
except:
#print "'" + valString + "'"
self.variables[varName]['val'][n].append(complex(valString.replace("i","j")))
if numLines >= 0 and lineN-firstDataLine+1 >= numLines:
break
#-- Break *single-valued* datum out of list
# -or-
# stuff multi-valued data into numpy arrays
# And in either case, if there's a freq or time associated, make that list an NP array
for varName in self.varNames:
self.variables[varName]['val'] = np.array(self.variables[varName]['val'])
sp = self.variables[varName]['val'].shape
if sp == (1,1):
self.variables[varName]['val'] = self.variables[varName]['val'][0,0]
if self.variables[varName].has_key('ft'):
ft = self.variables[varName]['ft']
self.variables[varName][ft] = np.array(self.variables[varName][ft])
#elif sp[0] == 1:
# self.variables[varName]['val'] = self.variables[varName]['val'][0,:]
#try:
# len(self.variables['R']['val'])
#except:
# pass
#else:
# self.coords = zip(self.variables['R']['val'], self.variables['Z']['val'])
def appendData(self, src):
n = 0
for varName in np.unique(src.varNames):
if varName in ['R', 'Z', 'X', 'Y']:
continue
if self.variables[varName].has_key('ft'):
ft = self.variables[varName]['ft']
self.variables[varName][ft] = np.concatenate(
(self.variables[varName][ft], src.variables[varName][ft]) )
self.variables[varName]['val'] = np.concatenate(
(self.variables[varName]['val'],
src.variables[varName]['val']), axis=1 )
#def writeSpreadsheet(self, fname):
# with open(fname, 'w') as f:
def concatenateData(dataFnames, numLines=-1):
if isinstance(dataFnames, str):
dataFnames = [dataFnames]
n = 0
for fname in dataFnames:
#wstdout( " Loading data from " + fname + "\n" )
if n == 0:
comDat = ComsolData()
comDat.importSpreadsheetData(os.path.join( os.path.expanduser("~"), fname ), numLines=numLines)
if len(dataFnames) == 1:
break
else:
tempComDat = ComsolData()
tempComDat.importSpreadsheetData(os.path.join( os.path.expanduser("~"), fname ), numLines=numLines)
comDat.appendData(tempComDat)
n += 1
return comDat
if __name__ == "__main__":
headerTest = []
#-- Comsol v4.3
headerTest.append("% R Z w (m) @ freq=1 w (m) @ freq=20 w (m) @ freq=40 w (m) @ freq=60 w (m) @ freq=80 w (m) @ freq=100 w (m) @ freq=120 w (m) @ freq=140 w (m) @ freq=160")
#-- Comsol v4.4
headerTest.append("% R,Z,solid.omega (rad/s) @ Eigenfrequency=1,solid.omega (rad/s) @ Eigenfrequency=2,solid.omega (rad/s) @ Eigenfrequency=3,solid.omega (rad/s) @ Eigenfrequency=4,solid.omega (rad/s) @ Eigenfrequency=5,solid.omega (rad/s) @ Eigenfrequency=6,solid.omega (rad/s) @ Eigenfrequency=7,solid.omega (rad/s) @ Eigenfrequency=8,solid.omega (rad/s) @ Eigenfrequency=9,solid.omega (rad/s) @ Eigenfrequency=10,solid.omega (rad/s) @ Eigenfrequency=11,solid.omega (rad/s) @ Eigenfrequency=12")
dataTest = []
#-- Comsol v4.3
dataTest.append("0 -2.0320000000000002E-5 3.163184658904194E-8-5.253257571105478E-12i 3.1631848087487895E-8-5.2532620879589966E-12i 3.16318525942391E-8-5.253275673017112E-12i 3.163186010598492E-8-5.25329831660938E-12i 3.1631870623465245E-8-5.253330021474611E-12i 3.163188414771718E-8-5.253370791450161E-12i 3.163190068007449E-8-5.2534206314766505E-12i 3.1631920222168254E-8-5.2534795475923665E-12i 3.1631942775926096E-8-5.253547546939175E-12i 3.163196834357526E-8-5.2536246377639015E-12i 3.1631996927641414E-8-5.253710829427027E-12i 3.1632028530949193E-8-5.2538061323995324E-12i 3.163206315662417E-8-5.253910558267347E-12i 3.1632100808093434E-8-5.254024119742534E-12i")
#-- Comsol v4.4
dataTest.append("0,-1.0E-6,40217.86220191298-24.1687515218126i,40217.86220191298+24.1687515218126i,112699.22619363452-69.58419851241112i,112699.22619363452+69.58419851241112i,146594.47787933613-94.7889630852653i,146594.47787933613+94.7889630852653i,154652.19752320193-110.38330785740403i,154652.19752320193+110.38330785740403i,180043.40522204724-108.3307881079505i,180043.40522204724+108.3307881079505i,182426.34065921057-206.1517842909712i,182426.34065921057+206.1517842909712i,192912.50407993805-158.3260394512793i,192912.50407993805+158.3260394512793i")
comDat = ComsolData()
for header in headerTest:
hTuple = comDat.re_header.findall(header[1:].strip())
print "hTuple:", hTuple, "\n"
comDat = ComsolData()
for data in dataTest:
dTuple = comDat.re_data.findall(data)
print "dTuple:", dTuple, "\n"
dataFnames = ["gibble/laser_cavity/comsol/resulting_data/" +
"comsol_cav1a_params_02_epo02_epop2_2mil_r5.2_R12.7.txt",
"gibble/laser_cavity/comsol/freqresp_8mil_0.05loss_epo_6-6.8kHz_mirdve.txt"
]
wstdout( "="*79 + "\n" )
comDat = ComsolData()
#print dataFnames
for fname in dataFnames:
print fname
wstdout( " Loading data from " + fname + "\n" )
comDat.importSpreadsheetData(os.path.join( os.path.expanduser("~"), fname ))
dataFnames = [
"gibble/laser_cavity/comsol/fresp_0.8mil_0.05loss_1-6.3_6.7-50_20step_pgaus_mesh5_1.5.txt",
"gibble/laser_cavity/comsol/fresp_0.8mil_0.05loss_6.3-6.7kHz_0.5Hzstep_pgaus_mesh5_1.5.txt",
"gibble/laser_cavity/comsol/fresp_0.8mil_0.05loss_50-100k_20step_pgaus_mesh5_1.5.txt",
"gibble/laser_cavity/comsol/fresp_0.8mil_0.05loss_100-150k_20step_pgaus_mesh5_1.5.txt"
]
comDat = concatenateData(dataFnames)
```
#### File: jllanfranchi/pygeneric/mystats.py
```python
import scipy.stats as stat
import scipy.optimize as opt
import numpy as np
import lmfit
def var_ci(var, n, ci):
df = n - 1
tail_prob = (1.-ci)/2.
chi2_left = stat.chi2.ppf(q=tail_prob, df=df)
chi2_right = stat.chi2.ppf(q=1-tail_prob, df=df)
lb = df * var / chi2_right
ub = df * var / chi2_left
return lb, ub
def sd_ci(sd, n, ci):
vlb, vub = var_ci(var=sd**2, n=n, ci=ci)
return np.sqrt(vlb), np.sqrt(vub)
def sd_exp_negllh(params, x, sd_meas, n_meas):
# Evaluate the exponential function at the n values
#sd_proposal = alpha*np.exp(beta*(1.0/n_meas))
sd_proposal = params[0]*np.exp(params[1]*x)
# Assuming a chi-squared PDF, the prob. of seeing the actual data pont, sd,
# is just the value of the chi-sq distr generated by sd_proposal and the n
# here
df = n_meas -1
llh = np.sum(
stat.chi2.logpdf(x=((sd_meas/sd_proposal)**2)*df, df=df)
)
return -llh
def exp_negllh(params, x, sd_meas, n_meas):
# Evaluate the exponential function at the n values
#sd_proposal = alpha*np.exp(beta*(1.0/n_meas))
sd_proposal = params[0]*np.exp(params[1]*x)
# Assuming a NORMAL PDF, the prob. of seeing the actual data pont, sd,
# is just the value of the normal distr generated by sd_proposal and the n
# here
df = n_meas -1
llh = np.sum(
stat.chi2.logpdf(x=((sd_meas/sd_proposal)**2)*df, df=df)
)
return -llh
def sd_pwr_negllh(params, x, sd_meas, n_meas):
# Evaluate the exponential function at the n values
#sd_proposal = alpha*np.exp(beta*(1.0/n_meas))
x = np.log10(x)
sd_proposal = params[0]*x**(params[1])
# Assuming a chi-squared PDF, the prob. of seeing the actual data pont, sd,
# is just the value of the chi-sq distr generated by sd_proposal and the n
# here
df = n_meas -1
llh = np.sum(
stat.chi2.logpdf(x=((sd_meas/sd_proposal)**2)*df, df=df)
)
return -llh
def lmfit_sd_exp_negllh(params, x, sd_meas, n_meas):
return sd_exp_negllh(alpha=params['alpha'].value, beta=params['beta'].value, x=x, sd_meas=sd_meas, n_meas=n_meas)
def sd_maxllh_fit(sd_meas, n_meas, alpha0=1, beta0=-1):
#out = opt.minimize(sd_exp_negllh, x0=[alpha0, beta0], args=(1/n_meas, sd_meas, n_meas), method='CG')
#out = opt.minimize(sd_pwr_negllh, x0=[alpha0, beta0], args=(1/n_meas, sd_meas, n_meas), method='CG')
out = opt.minimize(exp_negllh, x0=[alpha0, beta0], args=(1/n_meas, sd_meas, n_meas), method='CG')
print out
return out
def lmfit_sd_maxllh_fit(sd_meas, n_meas):
params = lmfit.Parameters()
params.add('alpha', value=1, min=0.0)
params.add('beta', value=-1, max=0.0)
out = lmfit.minimize(lmfit_sd_exp_negllh, params, args=(1/n_meas, sd_meas, n_meas))
print out
return out
```
#### File: jllanfranchi/pygeneric/tek3kComms.py
```python
from __future__ import with_statement
from __future__ import division
import sys
import os
import time
import glob
import serial
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import struct
import re
import time
import urllib2
import urllib
import urlparse
from smartFormat import *
from instrumentComms import *
if os.name is 'nt':
import scanwin32
if os.name is 'posix':
pass
def wstdout(s):
sys.stdout.write(s)
sys.stdout.flush()
def wstderr(s):
sys.stderr.write(s)
sys.stderr.flush()
#===============================================================================
# Module exception definitions
#===============================================================================
#===============================================================================
# Conversions convenient for module
#===============================================================================
def interpretWFMPREString(s):
sDict = {}
s = s.split(";")
sDict = {
'byteNum' : int(s[0]), # number of bytes per datum
'bitNum' : int(s[1]), # number of bits per datum
'encoding' : s[2], # one of ASC or BIN
'binFmt' : s[3], # one of RI or RP
'byteOrder' : s[4], # one of MSB, LSB
'numPts' : int(s[5]), # number of data points
'WFID' : s[6], # string describing scope params
'ptFmt' : s[7], # one of ENV or Y (?)
'xIncr' : np.float64(s[8]), # x increment (floating point value)
'xPtOffset' : np.float64(s[9]),
'xZero' : np.float64(s[10]),
'xUnit' : s[11].replace('"', ''),
'yMult' : np.float64(s[12]),
'yZero' : np.float64(s[13]),
'yOffset' : np.float64(s[14]),
'yUnit' : s[15].replace('"', '')
}
return sDict
def interpretRawData(bd, **kwargs):
encoding = kwargs['encoding']
binFmt = kwargs['binFmt']
byteNum = kwargs['byteNum']
byteOrder = kwargs['byteOrder']
yOffset = kwargs['yOffset']
numPts = kwargs['numPts']
yZero = kwargs['yZero']
yMult = kwargs['yMult']
xZero = kwargs['xZero']
xIncr = kwargs['xIncr']
xPtOffset = kwargs['xPtOffset']
if encoding == 'BIN':
if byteOrder == "MSB":
fmt0 = ">"
else:
fmt0 = "<"
if (binFmt == "RP" or binFmt == "SRP") and byteNum == 2:
fmt1 = "h"
elif (binFmt == "RI" or binFmt == "SRI") and byteNum == 2:
fmt1 = "H"
elif (binFmt == "RP" or binFmt == "SRP") and byteNum == 1:
fmt1 = "b"
elif (binFmt == "RI" or binFmt == "SRI") and byteNum == 1:
fmt1 = "B"
rawSamples = np.array([ struct.unpack(fmt0+fmt1, bd[n:n+byteNum])[0]
for n in range(0,len(bd),byteNum) ],
dtype=np.float64)
if encoding == 'ASC':
rawSamples = np.array(bd.split(','), dtype=np.float64)
samples = yZero + yMult*(rawSamples-yOffset)
t = xZero + xIncr*(np.arange(0,numPts)-xPtOffset)
return t, samples
class Tek3kComms(InstrumentComms):
def __init__(self, useSerial=True, useEthernet=True,
ipAddress="0.0.0.0", ethPort=80,
serialPortName=None,
baud=38400, bytesize=8, parity='N', stopbits=1,
timeout=2, xonxoff=False, rtscts=False, dsrdtr=True,
serCmdPrefix="", serCmdTerm='\n', serIdnCmd='*IDN?',
ethCmdPrefix='COMMAND=',ethCmdTerm='\r\ngpibsend=Send\r\n',
ethDataRE=re.compile('.*<TEXTAREA.*>(.*)</TEXTAREA>'),
ethIdnCmd='*IDN?',
argSep=""):
super(Tek3kComms, self).__init__(
useSerial=useSerial, useEthernet=useEthernet,
ipAddress=ipAddress, ethPort=ethPort,
serialPortName=serialPortName,
baud=baud, bytesize=bytesize, parity=parity, stopbits=stopbits,
timeout=timeout, xonxoff=xonxoff, rtscts=rtscts, dsrdtr=dsrdtr,
serCmdPrefix=serCmdPrefix, serCmdTerm=serCmdTerm,
serIdnCmd=serIdnCmd,
ethCmdPrefix=ethCmdPrefix, ethCmdTerm=ethCmdTerm,
ethDataRE=ethDataRE, ethIdnCmd=ethIdnCmd,
argSep=argSep)
def __ethernetQuery(self, command, returnResponse=True, upperCaseOnly=True):
url = 'http://' + self.scopeIP + '/Comm.html' #+ str(self.scopePort)
if upperCaseOnly:
command2 = self.lowerRegex.sub('', command)
else:
command2 = command
#wstderr("\""+ command2 +"\"\n")
httpPostSendStr = self.ETHCMD_PREFIX + command2 + self.ETHCMD_TERM
fullRequest = urllib2.Request(url, httpPostSendStr)
cnxn = urllib2.urlopen(fullRequest)
httpPostReturnStr = cnxn.read()
#wstdout('-'*80 + '\n' + str(httpPostSendStr) + '\n')
#wstdout(str(httpPostReturnStr) + '\n' )
if returnResponse:
response = self.ETHDATA_RE.findall(httpPostReturnStr)[0]
#wstdout(str(response) + '-'*80 + '\n')
return response
else:#
pass
#wstdout('-'*80 + '\n')
def __ethernetBinaryQuery(self, command, returnResponse=True):
"""??? -- Haven't figured out how to do binary data via HTTP post;
possibly if I used a different command altogether (see web
interface, as they have a data download function there)"""
url = 'http://' + self.scopeIP + '/Comm.html' #+ str(self.scopePort)
s = self.ETHCMD_PREFIX + command + self.ETHCMD_TERM
httpPostSendStr = s
fullRequest = urllib2.Request(url, httpPostSendStr)
cnxn = urllib2.urlopen(fullRequest)
httpPostReturnStr = cnxn.read()
#httpPostReturnStr += cnxn.read()
#httpPostReturnStr += cnxn.read()
#wstdout('-'*80 + '\n' + str(httpPostSendStr) + '\n')
#wstdout(str(httpPostReturnStr) + '\n' )
if returnResponse:
response = self.ETHDATA_RE.findall(httpPostReturnStr)[0]
#wstdout(str(response) + '-'*80 + '\n')
return response
else:
pass
#wstdout('-'*80 + '\n')
def getActiveChannels(self):
selected = self.query("SELect?").split(';')
self.config['activeChannels'] = []
if "CH" in selected:
#-- This means verbose mode is on
rxEnabled = re.compile(r"(CH|REF|MATH)([1-4]) [1]",
flags=re.IGNORECASE)
for s in selected:
r = rxEnabled.findall(line)
if len(r) > 0:
self.config['activeChannels'].append( ''.join(r[0]) )
else:
nCh = 4
nMath = 1
nRef = 4
for n in range(nCh):
if selected[n] == '1':
self.config['activeChannels'].append('CH' + str(n+1))
for n in range(nMath):
if selected[nCh+n] == '1':
self.config['activeChannels'].append('MATH')
for n in range(nRef):
if selected[nCh+nMath+n] == '1':
self.config['activeChannels'].append('REF' + str(n))
return self.config['activeChannels']
def grabWaveform(self, channel):
self.tell("DATa:SOUrce " + channel)
waveformMetaData = self.query("WFMPre?")
waveform = self.binQuery("CURVe?")
metaDataDict = interpretWFMPREString(waveformMetaData)
xVec, yVec = interpretRawData(waveform, **metaDataDict)
self.data[channel] = {}
self.data[channel]['rawdata'] = waveform
self.data[channel]['x'] = xVec
self.data[channel]['y'] = yVec
#-- Now append ALL the metadata to the channel's dictionary
self.data[channel].update(metaDataDict)
return xVec, yVec, metaDataDict
def plotMultiChan(self, channels=None):
mpl.rcParams['font.size'] = 7
if channels == None:
channels = self.config['activeChannels']
if not (isinstance(channels, list) or isinstance(channels, tuple)):
channels = [channels]
nChannels = len(channels)
fig = plt.figure(figsize=(8,nChannels*2),dpi=80)
#ax = plt.axes(axisbg='k')
#colors = { \
# 'CH1' : (1,1,.1),
# 'CH2' : (.1,1,1),
# 'CH3' : (1,.1,1),
# 'CH4' : (.1,1,.1),
# 'MATH': (1,.05,.05),
# 'MATH1': 'r',
# 'REF1': 'w',
# 'REF2': (.5,.5,.5),
# 'REF3': (.8,.4,.3),
# 'REF4': (.3,.4,.8) }
colors = { \
'CH1' : (.6,.6,0),
'CH2' : (0,0.6,0.6),
'CH3' : (0.8,0.2,0.5),
'CH4' : (0,0.6,0),
'MATH': 'k',
'MATH1': 'k',
'REF1': (.6,.6,.2),
'REF2': (.3,.3,.3),
'REF3': (.6,.4,.3),
'REF4': (.3,.4,.6) }
linestyles = { \
'CH1' : '-',
'CH2' : '--',
'CH3' : '-.',
'CH4' : ':',
'MATH': '-',
'MATH1': 'r',
'REF1': 'w',
'REF2': (.5,.5,.5),
'REF3': (.8,.4,.3),
'REF4': (.3,.4,.8) }
allAxes = []
n = 0
for channel in channels:
try:
x = self.data[channel]['x']
y = self.data[channel]['y']
except:
self.retrieveSingleWaveform(channel)
#print channel, len(x), len(y), x[0], x[-1], y[0], y[-1]
#print x[0:10], y[0:10]
#print x[-11:-1], y[-11:-1]
xUnit = self.data[channel]['xUnit']
yUnit = self.data[channel]['yUnit']
if n == 0:
ax = fig.add_subplot(nChannels,1,n+1, axisbg='w')
else:
ax = fig.add_subplot(nChannels,1,n+1, axisbg='w',
sharex=allAxes[0])
allAxes.append(ax)
ax.plot(x,y, color=colors[channel.upper()], label=channel,
linewidth=0.75)
plt.xlabel(xUnit)
plt.ylabel(yUnit)
ax.hold(True)
n += 1
plt.title(channel)
ax.grid(True, color=(.25,.25,.25))
plt.xlim((min(x), max(x)))
ax.hold(False)
if nChannels == 1:
fig.subplots_adjust(top=0.85, bottom=0.2)
elif nChannels == 2:
fig.subplots_adjust(hspace=0.4, top=0.915)
elif nChannels == 3:
fig.subplots_adjust(hspace=0.425, top=0.925)
elif nChannels == 4:
fig.subplots_adjust(hspace=0.445, top=0.945)
elif nChannels == 5:
fig.subplots_adjust(hspace=0.485, top=0.945, bottom=0.07)
elif nChannels == 6:
fig.subplots_adjust(hspace=0.7, top=0.945, bottom=0.06)
elif nChannels == 7:
fig.subplots_adjust(hspace=0.8, top=0.955, bottom=0.06)
elif nChannels == 8:
fig.subplots_adjust(hspace=0.8, top=0.955, bottom=0.06)
return fig
def plotChannel(self, channel):
try:
x = self.data[channel]['x']
y = self.data[channel]['y']
except:
self.retrieveSingleWaveform(channel)
xUnit = self.data[channel]['xUnit']
yUnit = self.data[channel]['yUnit']
figure()
plot(x,y)
title(channel + " data")
xlabel(xUnit)
ylabel(yUnit)
def testPlot(self, numWaveforms=2):
wfRef = ['CH1','CH2','CH3','CH4','MATH','REF1','REF2','REF3','REF4']
waveforms = wfRef[0:numWaveforms]
self.config['activeChannels'] = waveforms
for wf in waveforms:
x = np.arange(-5,-5+.001*10000,.001)
y = np.random.standard_normal(10000) + (np.random.rand()-.5)*10
self.data[wf] = {}
self.data[wf]['x'] = x
self.data[wf]['y'] = y
self.data[wf]['xUnit'] = 's'
self.data[wf]['yUnit'] = 'V'
self.plotMultiChan(waveforms)
if __name__ == "__main__":
tk = Tek3kComms()
tk.testPlot(4)
plt.show()
```
#### File: jllanfranchi/pygeneric/tek3kGrabData.py
```python
from __future__ import with_statement
from __future__ import division
import sys
import os
import time
import numpy as np
import re
import time
import csv
import socket
import argparse
import matplotlib.pyplot as plt
from tek3kComms import Tek3kComms
def wstdout(txt):
sys.stdout.write(txt)
sys.stdout.flush()
def wstderr(txt):
sys.stderr.write(txt)
sys.stderr.flush()
def main(description=""):
scriptStartTime = time.time()
parser = argparse.ArgumentParser(description=
"Simple data grab from TEK 3k scope")
parser.add_argument('--plot', dest='showPlot', action='store_true',
help="display a plot of the data to the user")
parser.add_argument('--dont_save_plot', dest='dontSavePlot',
action='store_true',
help="overrides default behavior which savea a plot " +
"of the data in the data destination dir; note " +
"that you must specify --show_plot in order to " +
"actually see the plots at the time the script is " +
"run.")
args = parser.parse_args()
wstderr("Initializing script & scope...")
homeDir = os.path.expanduser("~")
dataBaseDir = os.path.join(homeDir, "gibble", "data")
dataSubDir = os.path.join(dataBaseDir, time.strftime("%Y"),
time.strftime("%m-%b"),
time.strftime("%d"))
#-- Find existing run sub-directories and label this run accordingly
if os.path.isdir(dataSubDir):
existingRunDirs = [ d for d in os.listdir(dataSubDir) if
os.path.isdir(os.path.join(dataSubDir,d)) ]
else:
existingRunDirs = []
runDirRE = re.compile(r"^run([0-9]+)")
runDirNumsTaken = [0]
for d in existingRunDirs:
rd = runDirRE.findall(d)
if len(rd) > 0:
runDirNumsTaken.append(int(rd[0]))
runDirNum = max(runDirNumsTaken) + 1
runSubDir = "run" + '{:04d}'.format(runDirNum)
dataDir = os.path.join(dataSubDir, runSubDir)
hostName = socket.gethostbyaddr(socket.gethostname())[0]
filePrefix = "scope_"
#commsType = "Ethernet"
ipAddress = "192.168.3.11"
httpPort = 80
try:
#-- Set up with serial port comms
tekScope = Tek3kComms(useSerial=True)
tekScope.simpleSerial()
commsType = "serial"
except:
#-- If that failed, try to set up with Ethernet comms
tekScope = Tek3kComms(useEthernet=True, ipAddress=ipAddress)
commsType = "Ethernet"
completeConfigStr = tekScope.query(
"HEADer ON;VERBose ON;*LRN?;HEADer OFF;VERBose OFF")
#-- Turn off headers
tekScope.tell("HEADer OFF")
if commsType == "serial":
tekScope.tell("RS232:TRANsmit:TERMinator LF;RS232:HARDFlagging OFF")
#tekScope.tell("RS232:HARDFlagging OFF")
#-- TODO: the following is good sometimes, but bad if you want the user
# to be able to force a trigger, for example... and maybe the
# software should force a trigger?
##-- Lock scope
#tekScope.tell("LOCk ALL")
idnStr = tekScope.query("*IDN?")
wstderr(" done.\nWaiting for acquisition sequence to complete...")
try:
#-- Grab original scope acquisition parameters
origAcqMode = tekScope.query("ACQuire:MODe?")
origAcqStopAfter = tekScope.query("ACQuire:STOPAfter?")
origAcqState = tekScope.query("ACQuire:STATE?")
#-- Wait until a full acquisition completes
#tekScope.tell("WAI")
#print "busy:", tekScope.query("BUSY?")
##-- Set acquisition state if necessary
#if "0" not in origAcqState:
# #-- If it was running...
# if origAcqStopAfter[0:3].upper() != "SEQ":
# #-- Set scope to stop after the next sequence completes
# # (useful for really long data sets, but could be undesirable
# # in other situations, so ymmv)
# tekScope.tell("ACQuire:STOPAfter SEQuence")
##if origAcqStopAfter[0:5].upper() != "RUNST":
## tekScope.tell("ACQuire:STOPAfter RUNSTop")
##if "0" not in origAcqState:
## tekScope.tell("ACQuire:STATE 0")
#-- TODO: The following, when "Auto" triggering is used, waits until
# *TWO* sequences come in, which can be bad if time scale is
# set too long. In this case, the program should compute how
# much time a single sequence will take, and wait that much
# time OR just force an acquisition "right now". If the
# scope isn't already triggered, should we have the software
# be able to force trigger, if user wishes so?
#-- Wait for a full acquisition (or series, in case of ENV or AVE)
if "0" not in origAcqState:
nAcqExpected = 1
averaging = False
envelope = False
if origAcqMode[0:3].upper() == "AVE":
averaging = True
numAvg = int(tekScope.query("ACQuire:NUMAVg?"))
nAcqExpected = numAvg
elif origAcqMode[0:3].upper() == "ENV":
envelope = True
numEnv = int(tekScope.query("ACQuire:NUMEnv?"))
nAcqExpected = numEnv
reportedNumAcq = []
while True:
nAcq = int(tekScope.query("ACQuire:NUMACq?"))
if nAcq not in reportedNumAcq:
wstderr("\n Number of acq reported: " + str(nAcq) + " (of "
+ str(nAcqExpected) + ")")
reportedNumAcq.append(nAcq)
if nAcq >= nAcqExpected:
break
wstderr("\n")
tekScope.tell("ACQuire:STATE 0")
#-- Set filename (and hence timestamp) to now
timeNow = time.strftime("%Y-%m-%dT%H%M%S") + \
"{0:+05d}".format(-int(round(time.timezone/3600)*100))
baseFname = filePrefix + timeNow + "_"
#-- Set data fomratting
if commsType == "Ethernet":
#-- must do this until I figure out how to get binary data via HTTP
dataType = "ASCII"
tekScope.tell("DATa:ENCdg ASCII;DATa:WIDth 2")
else:
dataType = "RIBinary"
tekScope.tell("DATa:ENCdg RIBinary;DATa:WIDth 2")
#-- Find out what channels are active
activeChannels = tekScope.getActiveChannels()
wstderr(" done.\n\n")
data = []
for channel in activeChannels:
#-- Retrieve data from scope
wstderr("Grabbing " + channel + " data...")
t0 = time.time()
xVec, yVec, metaDataDict = tekScope.grabWaveform(channel)
t1 = time.time()
data.append(metaDataDict)
qualDataFname = os.path.join(dataDir,
baseFname + channel + "_data.csv")
qualMetaFname = os.path.join(dataDir,
baseFname + channel + "_meta.txt")
qualPlotPDFFname = os.path.join(dataDir,
baseFname + "plots.pdf")
qualPlotPNGFname = os.path.join(dataDir,
baseFname + "plots.png")
wstderr(" saving data to\n\"" + qualDataFname + "\"\n" +
" saving metadata to\n\"" + qualMetaFname + "\"\n" )
#-- Create directory if necessary (and if possible)
if os.path.exists(dataDir):
if not os.path.isdir(dataDir):
raise Exception("Wanted to create directory " + dataDir +
" but this path is a file.")
else:
os.makedirs(dataDir, mode=0770)
with open(qualDataFname, 'w') as f:
c = csv.writer(f)
for n in range(len(xVec)):
c.writerow([xVec[n], yVec[n]])
with open(qualMetaFname, 'w') as f:
f.write("dateTime=" + timeNow + "\r\n")
f.write("description=" + description + "\r\n")
f.write("HOSTNAME=" + hostName + "\r\n")
f.write("dataPath=\"" + qualDataFname + "\"\r\n")
f.write("metaPath=\"" + qualMetaFname + "\"\r\n")
f.write("IDN=" + idnStr + "\r\n")
keys = metaDataDict.keys()
keys.sort()
for key in keys:
f.write(key + "=" + str(metaDataDict[key]) + "\r\n")
f.write("completeConfig=" + completeConfigStr + "\r\n")
wstderr("\n")
#-- Reset to original run state
#if origAcqStopAfter != "RUNST":
# tekScope.tell("ACQuire:STOPAfter " + origAcqStopAfter)
if "0" not in origAcqState:
tekScope.tell("ACQuire:STATE " + origAcqState)
scriptEndTime = time.time()
wstderr("Total script time: " +
str(round((scriptEndTime-scriptStartTime)*10)/10) + " s.\n")
finally:
tekScope.tell("LOCk 0")
if not args.dontSavePlot or args.showPlot:
fig = tekScope.plotMultiChan()
if not args.dontSavePlot:
fig.savefig(qualPlotPDFFname, format='pdf')
fig.savefig(qualPlotPNGFname, format='png')
if args.showPlot:
plt.show()
if __name__ == "__main__":
description = raw_input(
"Type a description of the data or <enter> to continue\n")
main(description)
```
#### File: jllanfranchi/pygeneric/texTable.py
```python
import numpy as np
from smartFormat import smartFormat
def texTable(array, headings=None, tableFormat=None, numFormat=[(1,5)],
precision=[5], nanSub=['---'], inftyThresh=[np.infty]):
if not isinstance(array, np.ndarray):
array = np.array(array)
nRows = np.shape(array)[0]
print nRows
try:
nCols = np.shape(array)[1]
except:
nCols = nRows
nRows = 1
#-- Begin environment and specify centering, etc. for each col
table = r"\begin{tabular}{"
if tableFormat != None:
table += tableFormat
else:
table += "c"*nCols
table += r"}" + "\n"
#-- Add headings if present
if headings != None:
table += r" & ".join(headings)
else:
table += r" & ".join([r"\ " for n in range(nCols)])
#-- Add horizontal line
table += r" \\ \midrule"
#-- Add table entries
for rowN in range(nRows):
table += "\n" + r" & ".join([smartFormat(array[rowN,colN])
for colN in range(nCols)])
table += r"\\ \addlinespace[5pt]"
if headings == None:
table += r" \midrule" + "\n"
else:
table += "\n"
#-- Close out environment
table += r"\end{tabular}" + "\n"
return table
#if len(numFormat) == 1:
# numFormat = numFormat*nCols
#if len(precision) == 1:
# precision = precision*nCols
#if len(nanSub) == 1:
# nanSub = nanSub*nCols
#if len(inftyThresh) == 1:
# inftyThresh = inftyThresh*nCols
#for colNum in range(nCols):
# vals = array[:,colNum]
# allInts = True
# for val in vals:
# allInts = allInts and isInt(val)
# maxEl = np.max(vals)
# minEl = np.min(vals)
# #-- Compute minimum resolution for discerning elements
# v2 = vals.copy()
# v2.sort()
# aDiff = np.abs(np.diff(v2))
# aDiff = aDiff[aDiff>0]
# if len(aDiff) > 0:
# res = np.min(aDiff[aDiff >
# (10**-(precision+np.floor(np.log10(minEl)))])
# else:
# res = 0
#
# #-- Multiplicative dynamic range
# MDR = np.ceil(np.log10(maxEl)-np.log10(minEl))
# #-- Additive dynamic range
# ADR = np.ceil(np.log10(maxEl-minEl))
#
# dynamicRange = np.ceil(np.log10(maxEl)-np.log10(minEl))
# if dynamicRange <= precision[colNum]:
# fixExp = True
# fixedExp = np.floor(np.log10(minEl))
# else:
# fixExp = False
if __name__ == "__main__":
header = r"""\documentclass{article}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{booktabs}
\usepackage[letterpaper,landscape]{geometry}
\begin{document}{
\begin{centering}
"""
a = np.array([np.logspace(-4,10,10),np.logspace(-4,10,10)*10])
headings = ['Col ' + str(n) for n in range(np.size(a,1))]
body = texTable(a, headings=headings)
footer = r"""
\end{centering}
}\end{document}"""
print header, body, footer
```
#### File: jllanfranchi/pygeneric/unmerge.py
```python
from __future__ import absolute_import, division, print_function
from argparse import ArgumentParser
from os.path import expanduser, expandvars, isfile, split, splitext
S_BOTH = 0
S_F0_ONLY = 1
S_F1_ONLY = 2
def unmerge(fpath):
"""Take a file that has been marked up by git with merge conflicts and
write the two original (conflicting) source files.
The new files will be named like the original, but suffixed by
"_<commit/branch><ext>" where the extension is taken from the original
filename.
Parameters
----------
fpath : string
Path to the file to "unmerge"
Returns
-------
f0_path : string
Path to the new file created, file 0
f0 : list of strings
Each line of file 0 (including newlines)
f1_path : string
Path to the new file created, file 1
f1 : list of strings
Each line of file 1 (including newlines)
"""
fpath = expanduser(expandvars(fpath))
#srcdir, fpath = split(fpath)
basepath, ext = splitext(fpath)
with open(fpath, 'r') as fhandle:
contents = fhandle.readlines()
f0, f1 = [], []
f0_name, f1_name = None, None
state = S_BOTH
for line_no, line in enumerate(contents):
if line.startswith('<<<<<<< '):
if state != S_BOTH:
raise ValueError('Line {}: got "<<<<<<< " but not in S_BOTH'
.format(line_no))
state = S_F0_ONLY
if f0_name is None:
f0_name = line.lstrip('<<<<<<< ').strip().replace('/', '_')
continue
elif line.startswith('======='):
if state != S_F0_ONLY:
raise ValueError('Line {}: got "=======" but not in S_F0_ONLY'
.format(line_no))
state = S_F1_ONLY
continue
elif line.startswith('>>>>>>> '):
if state != S_F1_ONLY:
raise ValueError('Line {}: got ">>>>>>> but not in S_F1_ONLY'
.format(line_no))
state = S_BOTH
if f1_name is None:
f1_name = line.lstrip('>>>>>>> ').strip().replace('/', '_')
continue
if state in (S_BOTH, S_F0_ONLY):
f0.append(line)
if state in (S_BOTH, S_F1_ONLY):
f1.append(line)
if f0_name is None or f1_name is None:
print('Nothing to unmerge.')
return
new_f0_path = basepath + '_' + f0_name + ext
new_f1_path = basepath + '_' + f1_name + ext
for new_fpath, new_contents in zip((new_f0_path, new_f1_path), (f0, f1)):
if isfile(new_fpath):
print('"{}" already exists, not overwriting.'.format(new_fpath))
else:
with open(new_fpath, 'w') as outfile:
outfile.writelines(new_contents)
return new_f0_path, f0, new_f1_path, f1
def parse_args(descr=__doc__):
"""Parse command line arguments"""
arg_parser = ArgumentParser(description=descr)
arg_parser.add_argument('fpath')
args = arg_parser.parse_args()
return args
def main():
"""Main function if calling as script"""
args = parse_args()
unmerge(args.fpath)
if __name__ == '__main__':
main()
``` |
{
"source": "jllang/DistSys2019Assignment1",
"score": 4
} |
#### File: DistSys2019Assignment1/NetworkedPythonRPS/game.py
```python
class Game:
def __init__(self, id):
# initially player 1's move is false
self.p1Went = False
# initially player 2's move is false
self.p2Went = False
self.ready = False
# current game's id
self.id = id
# two players move, initially none[player1's move, player2's move]
self.moves = [None, None]
# initially palyer1 and player2 both 0
self.wins = [0,0]
# if ties
self.ties = 0
# get the player's move, either player 1's move or player 2's move
def get_player_move(self, p):
return self.moves[p]
# updates the moves list with that certain player's move
def play(self, player, move):
self.moves[player] = move
# for player 1, if player1 makes a move then p1 went is true that means p1 already made a move
if player == 0:
self.p1Went = True
else:
self.p2Went = True
# check if the two player's connected to the game or not
def connected(self):
return self.ready
# check if both of the player made a move or not
def bothWent(self):
return self.p1Went and self.p2Went
# deciding the winner
def winner(self):
# taking only the first letter of the move(R from Rock)
p1 = self.moves[0].upper()[0]
p2 = self.moves[1].upper()[0]
winner = -1
if p1 == "R" and p2 == "S":
winner = 0
elif p1 == "S" and p2 == "R":
winner = 1
elif p1 == "P" and p2 == "R":
winner = 0
elif p1 == "R" and p2 == "P":
winner = 1
elif p1 == "S" and p2 == "P":
winner = 0
elif p1 == "P" and p2 == "S":
winner = 1
return winner
# after the game we reset the state of the players
def resetWent(self):
self.p1Went = False
self.p2Went = False
``` |
{
"source": "JLLeitschuh/blueflood",
"score": 2
} |
#### File: ops/rackspace-agent-plugins/bf-rollups-delay.py
```python
'''For each rollup level, lists the number of slots which need to processed by blueflood. For the 5m range, one day is 288 slots.'''
# Licensed to Rackspace under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Rackspace licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."
#
# The following is an example 'criteria' for a Rackspace Monitoring Alarm:
#
# if (metric['metrics_5m_delay'] > 300 ) {
# return new AlarmStatus( WARNING, 'metrics_5m_delay has > 300 slots waiting to be rolled up.' );
# }
#
import pycassa
import sys
import time
import logging
import os
import argparse
from collections import defaultdict
SLOTS = 4032
MILLIS_IN_BASE_SLOT = 300000
GRAN_MAPPINGS = {
'metrics_5m': {'max_slots': 4032, 'milliseconds_in_slot': 300000},
'metrics_20m': {'max_slots': 1008, 'milliseconds_in_slot': 1200000},
'metrics_60m': {'max_slots': 336, 'milliseconds_in_slot': 3600000},
'metrics_240m': {'max_slots': 84, 'milliseconds_in_slot': 14400000},
'metrics_1440m': {'max_slots': 14, 'milliseconds_in_slot': 86400000}
}
def __is_more_available(len_fetched, page_size):
return (len_fetched >= page_size)
def get_metrics_state_for_shard(shard, cf):
page_size = 100 # Pycassa has an implicit max limit of 100
start = ''
states = {}
while True:
batch = cf.get(shard, column_start=start,
column_finish='', column_count=page_size)
keys = batch.keys()
states.update(batch)
if not __is_more_available(len(batch), page_size):
# there are no more columns left
break
start = keys[len(batch) - 1]
return states
def get_metrics_state_for_shards(shards, servers):
pool = pycassa.ConnectionPool('DATA',
server_list=servers)
cf = pycassa.ColumnFamily(pool, 'metrics_state')
metrics_state_for_shards = {}
for shard in shards:
metrics_state_for_shards[shard] = get_metrics_state_for_shard(shard,
cf)
return metrics_state_for_shards
def _millis_to_slot(now_millis):
return int((now_millis % (SLOTS * MILLIS_IN_BASE_SLOT))
/ MILLIS_IN_BASE_SLOT)
def _get_slot_for_time(now_millis, gran):
full_slot = _millis_to_slot(now_millis)
return (GRAN_MAPPINGS[gran]['max_slots'] * full_slot) / SLOTS
def print_stats_for_metrics_state(metrics_state_for_shards, print_res):
delayed_slots = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
now = int(time.time() * 1000)
for shard in metrics_state_for_shards:
states_per_shard = metrics_state_for_shards[shard]
for resolution in GRAN_MAPPINGS.keys():
max_slots = GRAN_MAPPINGS[resolution]['max_slots']
for slot in range(max_slots):
last_active_key = ',' .join([resolution, str(slot), 'A'])
rolled_up_at_key = ',' .join([resolution, str(slot), 'X'])
last_active_timestamp = states_per_shard[last_active_key] if last_active_key in states_per_shard else 0
rolled_up_at_timestamp = states_per_shard[rolled_up_at_key] if rolled_up_at_key in states_per_shard else 0
current_slot = _get_slot_for_time(now, resolution)
if (current_slot > slot
and rolled_up_at_timestamp < last_active_timestamp):
# if slot is not rolled up yet, delay measured in slots
delayed_slots[
resolution][shard][slot] = current_slot - slot
if ( print_res == resolution ):
print "shard: %4s last_active_key: %19s rolled_up_at_key: %19s current_slot: %s slot: %s" % ( shard, last_active_key, rolled_up_at_key, current_slot, slot)
print " last_active_timestamp: %19s rolled_up_at_timestamp: %19s" % (last_active_timestamp, rolled_up_at_timestamp)
print " last_active_timestamp: %19s rolled_up_at_timestamp: %19s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( last_active_timestamp/1000)), time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(rolled_up_at_timestamp/1000)) )
if ( print_res == resolution ):
if (last_active_key not in states_per_shard):
print "WARNING: %s does not exist in shard %s" % (last_active_key, shard)
if (rolled_up_at_key not in states_per_shard):
print "WARNING: %s does not exist in shard %s" % (rolled_up_at_key, shard)
output = {}
for resolution in GRAN_MAPPINGS.keys():
across_shards_most_delay = []
for shard in delayed_slots[resolution].keys():
max_delay = max(delayed_slots[resolution][shard].values())
# print 'Most delay: %d, Res: %s' % (float(max_delay/(1000*60)),
# resolution)
across_shards_most_delay.append(max_delay)
if (len(across_shards_most_delay)):
output[resolution] = max(across_shards_most_delay)
else:
output[resolution] = 0
for resol, delay in output.items():
print 'metric %s uint32 %u' % ('_'.join([resol, 'delay']), delay)
def main():
parser = argparse.ArgumentParser(description='For each rollup level, lists the number of slots which need to '
'be processed by blueflood. One day is approximately 300 slots.')
parser.add_argument( '-s', '--servers', help='Cassandra server IP addresses, space separated', required=True, nargs="+")
parser.add_argument( '-v', '--verbose', help='Print out the unprocessed slots for each shard, for the given granuality. Default: metrics_5m',
required=False, nargs="?", choices=['metrics_5m', 'metrics_20m', 'metrics_60m', 'metrics_240m', 'metrics_1440m'], const='metrics_5m' )
args = parser.parse_args()
try:
logfile = os.path.expanduser('~') + '/bf-rollup.log'
logging.basicConfig(format='%(asctime)s %(message)s',
filename=logfile, level=logging.DEBUG)
shards = range(128)
logging.debug('getting metrics state for shards')
metrics_state_for_shards = get_metrics_state_for_shards(shards,
args.servers)
print 'status ok bf_health_check'
logging.debug('printing stats for metrics state')
print_stats_for_metrics_state(metrics_state_for_shards,
args.verbose)
except Exception, ex:
logging.exception(ex)
print "status error", ex
raise ex
if __name__ == "__main__":
main()
``` |
{
"source": "JLLeitschuh/bulk-security-pr-generator",
"score": 2
} |
#### File: JLLeitschuh/bulk-security-pr-generator/vulnerability_fix_engine.py
```python
import asyncio
import json
import logging
import os
import re
import shutil
import string
from collections import Counter
from dataclasses import dataclass, asdict, field
from random import random
from typing import List, Optional, Dict, Generator
import aiofiles
import github
import time
import github_util
git_hub = github_util.load_github()
github_util.print_current_rate_limit()
class ShallowUpdateNotAllowedException(Exception):
pass
class CouldNotReadFromRemoteRepositoryException(Exception):
pass
class CLRFReplacementException(Exception):
pass
class PullRequestAlreadyExistsException(Exception):
pass
class ForkAlreadyExistsException(Exception):
pass
class AmbiguousObjectNameHeadException(Exception):
pass
async def subprocess_run(args: List[str], cwd: str) -> Optional[str]:
proc = await asyncio.create_subprocess_exec(
args[0],
*args[1:],
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd
)
stdout, stderr = await proc.communicate()
print(f'[{args!r} exited with {proc.returncode}]')
if stdout:
print(f'[stdout]\n{stdout.decode()}')
if proc.returncode != 0:
if stderr:
msg = stderr.decode()
error_msg = f'[stderr]\n{msg}'
if 'timeout' in msg:
raise TimeoutError(error_msg)
if 'shallow update not allowed' in msg:
raise ShallowUpdateNotAllowedException(error_msg)
if 'Could not read from remote repository' in msg:
raise CouldNotReadFromRemoteRepositoryException(error_msg)
if 'A pull request already exists' in msg:
raise PullRequestAlreadyExistsException(error_msg)
if 'Error creating fork' in msg and 'already exists on github.com' in msg:
raise ForkAlreadyExistsException(error_msg)
if ' Ambiguous object name: \'HEAD\'' in msg:
raise AmbiguousObjectNameHeadException(error_msg)
raise RuntimeError(error_msg)
else:
if stderr:
msg = stderr.decode()
error_msg = f'[stderr]\n{msg}'
if 'warning: CRLF will be replaced by LF' in msg:
raise CLRFReplacementException(stderr)
print(error_msg)
if stdout:
return stdout.decode()
else:
return None
@dataclass
class VulnerabilityFixModule:
branch_name: str
clone_repos_location: str
data_base_dir: str
save_point_location: str
pr_message_file_absolute_path: str
commit_message: str
_cached_vulnerable_projects: List['VulnerableProjectFiles'] = field(default_factory=list)
def clean_previous_run(self):
# Cleanup method to get rid of previous files
logging.info('Begining Cleanup')
if os.path.isdir(self.clone_repos_location):
shutil.rmtree(self.clone_repos_location)
os.mkdir(self.clone_repos_location)
if not os.path.isdir(self.save_point_location):
os.mkdir(self.save_point_location)
logging.info('Cleanup Complete')
def _list_all_json_files(self) -> Generator[str, None, None]:
directory = os.fsencode(self.data_base_dir)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.startswith('g__') and filename.endswith('.json'):
yield self.data_base_dir + '/' + filename
def should_accept_project(self, project_name: str) -> bool:
return False
@staticmethod
def _read_repository_and_file_names(json_file_name: str) -> 'VulnerableProjectFiles':
with open(json_file_name) as jsonFile:
data = json.load(jsonFile)
project_name: str = data['project']['name']
# Counter is a Dict[file name, count] representation
files = Counter([obj[0]['file'] for obj in data['data']])
return VulnerableProjectFiles(project_name, files)
def _load_vulnerable_projects(self) -> List['VulnerableProjectFiles']:
vulnerable_projects: List[VulnerableProjectFiles] = []
for json_file in self._list_all_json_files():
vulnerable = self._read_repository_and_file_names(json_file)
if not self.should_accept_project(vulnerable.project_name):
continue
vulnerable.print()
vulnerable_projects.append(vulnerable)
return vulnerable_projects
def get_vulnerable_project_files(self) -> List['VulnerableProjectFiles']:
if len(self._cached_vulnerable_projects) == 0:
self._cached_vulnerable_projects = self._load_vulnerable_projects()
return self._cached_vulnerable_projects
def save_point_file_name(self, project_files: 'VulnerableProjectFiles') -> str:
project_as_file_name = project_files.project_name.replace('/', '__')
return f'{self.save_point_location}/g__{project_as_file_name}.json'
async def do_fix_vulnerable_file(self, project_name: str, file: str, expected_fix_count: int) -> int:
"""
Fixes the vulnerabilities in the file passed.
:param project_name: The name of the project being fixed.
:param file: The file to fix the vulnerabilities in.
:param expected_fix_count: The expected number of vulnerabilities to be fixed.
:return: The actual number of vulnerabilities fixed.
"""
pass
@dataclass(frozen=True)
class VulnerabilityFixReport:
files_fixed: int
vulnerabilities_fixed: int
file_name_fixed: List[str]
@dataclass
class VulnerableProjectFiles:
project_name: str
files: Dict[str, int]
def print(self):
print(self.project_name)
for file in self.files:
print('\t', '/' + file + ': ' + str(self.files[file]))
@dataclass
class VulnerabilityFixerEngine:
fix_module: VulnerabilityFixModule
project_files: VulnerableProjectFiles
def _project_name(self):
return self.project_files.project_name
def project_file_name(self) -> str:
return self.fix_module.clone_repos_location + '/' + self._project_name()
def save_point_file_name(self) -> str:
return self.fix_module.save_point_file_name(self.project_files)
@staticmethod
async def do_resilient_hub_call(args: List[str], cwd: str, lock=None) -> Optional[str]:
"""
Make a call to hub that is resilient to timeout exceptions.
:return: stdout output if successful
"""
async def do_call(wait_time, previous_wait_time=0) -> Optional[str]:
try:
if lock is not None:
async with lock:
# GitHub documentation says to wait 1 second between writes
# https://docs.github.com/en/rest/guides/best-practices-for-integrators#dealing-with-abuse-rate-limits
await asyncio.sleep(1)
return await subprocess_run(args, cwd=cwd)
else:
return await subprocess_run(args, cwd=cwd)
except TimeoutError as e:
if wait_time > 70:
raise TimeoutError(f'Gave up after waiting {previous_wait_time} seconds') from e
# This serves a double purpose as informational and also a 'sane'
# way to slow down this script reasonably
github_util.print_current_rate_limit()
await asyncio.sleep(wait_time)
return await do_call(wait_time * 2 + random(), previous_wait_time=wait_time)
return await do_call(1)
async def do_clone(self):
# Deal with fskobjects https://stackoverflow.com/a/41029655/3708426
await self.do_resilient_hub_call(
[
'hub',
'clone',
self._project_name(),
self._project_name(), # This is the directory to clone into
'--config',
'transfer.fsckobjects=false',
'--config',
'receive.fsckobjects=false',
'--config',
'fetch.fsckobjects=false'
],
cwd=self.fix_module.clone_repos_location
)
async def do_run_in(self, args: List[str]) -> Optional[str]:
assert args[0] != 'hub', 'This method is unsuitable for calling `hub`. Use `do_run_hub_in` instead!'
return await subprocess_run(args, cwd=self.project_file_name())
async def do_run_hub_in(self, args: List[str], lock) -> Optional[str]:
return await self.do_resilient_hub_call(args=args, cwd=self.project_file_name(), lock=lock)
async def do_fix_vulnerable_file(self, file: str, expected_fix_count: int) -> int:
file_being_fixed: str = self.project_file_name() + file
# Sanity check, verify the file still exists, the data may be out of date
if not os.path.exists(file_being_fixed):
logging.warning(
'Fix for `%s` in file `%s` can not be applied as file does not exist!',
self._project_name(),
file
)
return 0
return await self.fix_module.do_fix_vulnerable_file(
self._project_name(),
file_being_fixed,
expected_fix_count
)
def submodule_files(self) -> List[str]:
"""
List all of the git submodule files in this project.
We're not going to be fixing pom files in Git submodules so this allows us to filter them out.
"""
files: List[str] = []
submodule_file_path: str = self.project_file_name() + '/.gitmodules'
if not os.path.isfile(submodule_file_path):
return []
with open(submodule_file_path) as submodule_file:
for line in submodule_file:
if 'path' in line:
files.append('/' + line.split('= ')[1][0:-1])
return files
async def do_fix_vulnerabilities(self) -> VulnerabilityFixReport:
project_vulnerabilities_fixed = 0
project_files_fixed = 0
submodules = self.submodule_files()
files_fixed: List[str] = []
for file in self.project_files.files:
# Skip submodule files
skip = next((True for submodule in submodules if file.startswith(submodule)), False)
if not skip:
file_vulnerabilities_fixed = await self.do_fix_vulnerable_file(file, self.project_files.files[file])
if file_vulnerabilities_fixed > 0:
project_vulnerabilities_fixed += file_vulnerabilities_fixed
project_files_fixed += 1
files_fixed.append(file)
return VulnerabilityFixReport(
project_files_fixed,
project_vulnerabilities_fixed,
files_fixed
)
async def do_create_branch(self):
await self.do_run_in(['git', 'checkout', '-b', self.fix_module.branch_name])
async def do_stage_changes(self, project_report: VulnerabilityFixReport):
command = ['git', 'add']
# Only run add on the files we've modified
# This hopefully limits CRLF changes
files_trimmed = [file_name.lstrip('/') for file_name in project_report.file_name_fixed]
command.extend(files_trimmed)
await self.do_run_in(command)
async def do_commit_changes(self):
msg = self.fix_module.commit_message
await self.do_run_in(['git', 'commit', '-m', msg])
async def do_fork_repository(self, lock, index: int = 0):
org_name = 'BulkSecurityGeneratorProject'
if index == 0:
use_org_name = org_name
else:
use_org_name = f'{org_name}{index}'
try:
await self.do_run_hub_in(
[
'hub',
'fork',
'--remote-name',
'origin',
'--org',
use_org_name
],
lock
)
except ForkAlreadyExistsException as e:
if index >= 46:
raise e
else:
return await self.do_fork_repository(lock, index + 1)
async def do_push_changes(self, retry_count: int = 5):
try:
# Don't use '--force-with-lease' here, it doesn't work. Trust me.
await self.do_run_in(['git', 'push', 'origin', self.fix_module.branch_name, '--force'])
except ShallowUpdateNotAllowedException:
# A shallow update isn't allowed against this repo (I must have forked it before)
await self.do_run_in(['git', 'fetch', '--unshallow'])
# Now re-run the push
# Don't use '--force-with-lease' here, it doesn't work. Trust me.
await self.do_run_in(['git', 'push', 'origin', self.fix_module.branch_name, '--force'])
except CouldNotReadFromRemoteRepositoryException as e:
logging.warning(f'Could not read from remote repository {5 - retry_count}/5')
if retry_count <= 0:
raise e
else:
# Forking is an async operation, so we may need to wait a bit for it
await asyncio.sleep((5 - retry_count) * 2 + random())
await self.do_push_changes(retry_count - 1)
async def do_create_pull_request(self, lock) -> str:
try:
stdout = await self.do_run_hub_in(
['hub', 'pull-request', '-p', '--file', self.fix_module.pr_message_file_absolute_path],
lock
)
pattern = re.compile(r'(https://.*)')
match = pattern.search(stdout)
return match.group(1)
except PullRequestAlreadyExistsException:
return 'ALREADY_EXISTS'
async def do_create_save_point(self, report: VulnerabilityFixReport, pr_url: str):
json_body = {
'project_name': self.project_files.project_name,
'files': self.project_files.files,
'pull_request': pr_url,
'report': asdict(report)
}
async with aiofiles.open(self.save_point_file_name(), 'w') as json_file_to_write:
await json_file_to_write.write(json.dumps(json_body, indent=4))
async def execute_vulnerability_fixer_engine(engine: VulnerabilityFixerEngine, lock) -> VulnerabilityFixReport:
engine.project_files.print()
await engine.do_clone()
project_report: VulnerabilityFixReport = await engine.do_fix_vulnerabilities()
pr_url = ''
# If the LGTM data is out-of-date, there can be cases where no vulnerabilities are fixed
if project_report.vulnerabilities_fixed != 0:
await engine.do_create_branch()
await engine.do_stage_changes(project_report)
await engine.do_commit_changes()
if not engine.project_files.project_name.lower().startswith('jlleitschuh'):
await engine.do_fork_repository(lock)
await engine.do_push_changes()
pr_url = await engine.do_create_pull_request(lock)
await engine.do_create_save_point(project_report, pr_url)
return project_report
async def execute_vulnerability_fixer_engine_checked(
engine: VulnerabilityFixerEngine,
lock
) -> Optional[VulnerabilityFixReport]:
try:
return await execute_vulnerability_fixer_engine(engine, lock)
except AmbiguousObjectNameHeadException:
# They named their main branch 'HEAD'... Why?! No fix for them.
return None
except BaseException as e:
if 'CancelledError' in e.__class__.__name__:
raise e
logging.error(
f'Failed while processing project `{engine.project_files.project_name}`. Exception type: {type(e)}.\n{e!s}')
raise e
def is_archived_git_hub_repository(project: VulnerableProjectFiles) -> bool:
try:
return git_hub.get_repo(project.project_name).archived
except github.UnknownObjectException:
# The repository was removed, so treat it as the same
return True
class EnginesExecutionException(Exception):
pass
async def _do_execute_engines(engines: List[VulnerabilityFixerEngine]):
github_hub_lock = asyncio.Lock()
waiting_reports = []
try:
for engine in engines:
waiting_reports.append(
execute_vulnerability_fixer_engine_checked(engine, github_hub_lock)
)
projects_fixed = 0
files_fixed = 0
vulnerabilities_fixed = 0
print(f'Processing {len(waiting_reports)} Projects:')
all_reports = await asyncio.gather(*waiting_reports)
for report in all_reports:
if report is None:
continue
if report.vulnerabilities_fixed > 0:
projects_fixed += 1
files_fixed += report.files_fixed
vulnerabilities_fixed += report.vulnerabilities_fixed
print('Done!')
print(f'Fixed {vulnerabilities_fixed} vulnerabilities in {files_fixed} files across {projects_fixed} projects!')
except Exception as e:
raise EnginesExecutionException('Engine execution failed!') from e
async def _do_execute_fix_module(fix_module: VulnerabilityFixModule, starting_letter: str):
fix_module.clean_previous_run()
vulnerable_projects = fix_module.get_vulnerable_project_files()
print()
print(f'Loading Async Project Executions for {len(vulnerable_projects)} Projects:')
engines = []
for vulnerable_project in vulnerable_projects:
if not vulnerable_project.project_name.startswith(starting_letter):
continue
# Check this first, it's going to be faster
if os.path.exists(fix_module.save_point_file_name(vulnerable_project)):
logging.info(f'Skipping project {vulnerable_project.project_name} since save point file already exists')
continue
# Check this second, it's going to be slower
if is_archived_git_hub_repository(vulnerable_project):
logging.info(f'Skipping project {vulnerable_project.project_name} since it is archived')
continue
print(f'Loading Execution for: {vulnerable_project.project_name}')
engine = VulnerabilityFixerEngine(
fix_module=fix_module,
project_files=vulnerable_project
)
engines.append(engine)
# Break the engine list into sub-lists of size 100
size = 100
engine_lists = x = [engines[i:i + size] for i in range(0, len(engines), size)]
for engine_list in engine_lists:
await _do_execute_engines(engine_list)
# try:
# await _do_execute_engines(engine_list)
# except EnginesExecutionException as e:
# logging.exception(f'Failed while processing engine group. {str(e)}')
def do_execute_fix_module(fix_module: VulnerabilityFixModule):
start = time.monotonic()
for char in string.ascii_letters + string.digits:
asyncio.run(_do_execute_fix_module(fix_module, starting_letter=char))
end = time.monotonic()
duration_seconds = end - start
print(f'Execution took {duration_seconds} seconds')
github_util.print_current_rate_limit()
``` |
{
"source": "JLLeitschuh/DDF",
"score": 2
} |
#### File: python/ddf/conf.py
```python
from __future__ import unicode_literals
def find_ddf():
import os
if 'DDF_HOME' in os.environ:
return os.path.abspath(os.environ['DDF_HOME'])
path = os.path.abspath(os.path.split(os.path.abspath(__file__))[0] + '/../../')
if all([os.path.exists(os.path.join(path, x)) for x in ['core', 'spark']]):
return path
raise ImportError('Unable to find DDF_HOME. Please define this variable in your environment')
DDF_HOME = find_ddf()
# TODO: find a better way to set this
SCALA_VERSION = '2.10'
```
#### File: python/tests/test_ml.py
```python
from __future__ import unicode_literals
import unittest
import pandas as pd
from py4j.java_gateway import Py4JJavaError
import test_base
from ddf import ml
class TestMl(test_base.BaseTest):
"""
Test ML functions
"""
def testKmeans(self):
model = ml.kmeans(self.mtcars, 2, 5, 10)
self.assertIsInstance(model, ml.KMeansModel)
self.assertIsInstance(model.centers, pd.DataFrame)
self.assertEqual(len(model.centers), 2)
self.assertItemsEqual(model.centers.columns.tolist(), self.mtcars.colnames)
self.assertIsInstance(model.predict(range(0, self.mtcars.ncol)), float)
with self.assertRaises(Py4JJavaError):
model.predict([0, 1, 2])
def testLinearRegression(self):
model = ml.linear_regression_gd(self.mtcars, 0.1, 0.1, 10)
self.assertIsInstance(model, ml.LinearRegressionModel)
self.assertIsInstance(model.weights, pd.DataFrame)
self.assertEqual(len(model.weights), 1)
self.assertEqual(len(model.weights.columns), self.mtcars.ncol)
self.assertIsInstance(model.predict(range(0, self.mtcars.ncol - 1)), float)
with self.assertRaises(Py4JJavaError):
model.predict([0, 1, 2])
def testLogisticRegression(self):
model = ml.logistic_regression_gd(self.mtcars, 0.1, 10)
self.assertIsInstance(model, ml.LogisticRegressionModel)
self.assertIsInstance(model.weights, pd.DataFrame)
self.assertEqual(len(model.weights), 1)
self.assertEqual(len(model.weights.columns), self.mtcars.ncol)
self.assertIsInstance(model.predict(range(0, self.mtcars.ncol - 1)), float)
with self.assertRaises(Py4JJavaError):
model.predict([0, 1, 2])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JLLeitschuh/Hibernate-3-to-4-Liquibase-Changelog-Gen",
"score": 2
} |
#### File: JLLeitschuh/Hibernate-3-to-4-Liquibase-Changelog-Gen/hibernate3to4changelogGen.py
```python
import argparse
import sys
import os
import xml.etree.ElementTree as ET
import StringIO
import xml.dom.minidom
import getpass
XML_NAMESPACE = {
'node': 'http://www.liquibase.org/xml/ns/dbchangelog'
}
def dict_to_sorted_list(dict):
d = dict.items()
d.sort()
return d
def flatten(list_of_lists):
return [val for sublist in list_of_lists for val in sublist]
def xml_line_to_dict(xmlLine):
"""
>>> dict = xml_line_to_dict('<addUniqueConstraint columnNames="uuid, macaddress, vlan" constraintName="attachednetworkdevicejpa_uuid_macaddress_vlan_key" deferrable="false" disabled="false" initiallyDeferred="false" tableName="attachednetworkdevicejpa"/>')
>>> dict_to_sorted_list(dict)
[('columnNames', 'uuid, macaddress, vlan'), ('constraintName', 'attachednetworkdevicejpa_uuid_macaddress_vlan_key'), ('deferrable', 'false'), ('disabled', 'false'), ('initiallyDeferred', 'false'), ('tableName', 'attachednetworkdevicejpa')]
:param xmlLine:
:return:
"""
return ET.fromstring(xmlLine).attrib
def make_file_relative(path):
"""
>>> make_file_relative('com/company/core/db/changelog/db.changelog-1.1.0.xml')
'db.changelog-1.1.0.xml'
>>> make_file_relative('com/company/core/db/changelog/db.changelog-master.xml')
'db.changelog-master.xml'
>>> make_file_relative('db.changelog-master.xml')
'db.changelog-master.xml'
:param path: The path that is defined relative to this directory
:return: The file name with the path made relative to this directory.
"""
head, tail = os.path.split(path)
return tail
def get_inner_imported_files(root):
"""
:param root: The root node of an XML file
:return: A list of all of the files to import relative to this directory
"""
return [make_file_relative(child.attrib['file'])
for child in root.findall("./node:include", XML_NAMESPACE)]
def parse_file_to_xml(file):
""""
Parses an input file into XML.
"""
return ET.parse(file).getroot()
def to_drop_constraint_version(dict):
return {'constraintName': dict['constraintName'], 'tableName': dict['tableName']}
def adds_to_add_drop_constraints(masterConstraints, newConstraints):
"""
>>> masterConstraints = {'columnNames': "uuid, macaddress, vlan", 'constraintName': "attachednetworkdevicejpa_uuid_macaddress_vlan_key", 'deferrable': "false", 'disabled':"false", 'initiallyDeferred':"false", 'tableName':"attachednetworkdevicejpa"}
>>> newConstraints = {'columnNames': "uuid, macaddress, vlan", 'constraintName': "uk_2o0nn8nq8eoo40bpyyq5k9anh", 'tableName':"attachednetworkdevicejpa"}
>>> drop, add = adds_to_add_drop_constraints(masterConstraints, newConstraints)
>>> dict_to_sorted_list(drop)
[('constraintName', 'attachednetworkdevicejpa_uuid_macaddress_vlan_key'), ('tableName', 'attachednetworkdevicejpa')]
>>> dict_to_sorted_list(add)
[('columnNames', 'uuid, macaddress, vlan'), ('constraintName', 'uk_2o0nn8nq8eoo40bpyyq5k9anh'), ('deferrable', 'false'), ('disabled', 'false'), ('initiallyDeferred', 'false'), ('tableName', 'attachednetworkdevicejpa')]
>>> masterConstraints = xml_line_to_dict('<addUniqueConstraint columnNames="uuid" constraintName="externalgatewayjpa_uuid_key" deferrable="false" disabled="false" initiallyDeferred="false" tableName="externalgatewayjpa"/>')
>>> newConstraints = xml_line_to_dict('<addUniqueConstraint columnNames="uuid" constraintName="uk_2pqcv4b75ribau4in54ppmyuu" tableName="externalgatewayjpa"/>')
>>> drop, add = adds_to_add_drop_constraints(masterConstraints, newConstraints)
>>> dict_to_sorted_list(drop)
[('constraintName', 'externalgatewayjpa_uuid_key'), ('tableName', 'externalgatewayjpa')]
>>> dict_to_sorted_list(add)
[('columnNames', 'uuid'), ('constraintName', 'uk_2pqcv4b75ribau4in54ppmyuu'), ('deferrable', 'false'), ('disabled', 'false'), ('initiallyDeferred', 'false'), ('tableName', 'externalgatewayjpa')]
:param masterConstraints: The add constraint that are from the master changelog
:param newConstraints: The constraint for the same column name in the same table from the base change-set
:return: (drop, add) The tuple to generate the drop then the add lines for the changelog
"""
assert masterConstraints['columnNames'] == newConstraints['columnNames']
assert masterConstraints['tableName'] == newConstraints['tableName']
return to_drop_constraint_version(masterConstraints), \
{'columnNames': masterConstraints['columnNames'], 'constraintName': newConstraints['constraintName'],
'deferrable': "false", 'disabled': "false", 'initiallyDeferred': "false",
'tableName': masterConstraints['tableName']}
def get_all_properties(root):
return [child.attrib for child in root.findall("./node:property", XML_NAMESPACE)]
def get_all_unique_constraint_additions(root):
return [child.attrib for child in root.findall("./node:changeSet/node:addUniqueConstraint", XML_NAMESPACE)]
def get_all_unique_constraint_drops(root):
return [child.attrib for child in root.findall("./node:changeSet/node:dropUniqueConstraint", XML_NAMESPACE)]
def remove_dropped_adds(additions, drops):
"""
>>> addition1 = {'columnNames': 'foreignuuid', 'constraintName': 'ethertypeinternalaffinityelementjpa_foreignuuid_key', 'tableName': 'ethertypeinternalaffinityelementjpa'}
>>> addition2 = {'columnNames': 'uuid', 'constraintName': 'ethertypeinternalaffinityelementjpa_uuid_key', 'tableName': 'ethertypeinternalaffinityelementjpa'}
>>> removal = {'constraintName': 'ethertypeinternalaffinityelementjpa_uuid_key', 'tableName': 'ethertypeinternalaffinityelementjpa'}
>>> len(remove_dropped_adds([addition1, addition2], [removal]))
1
>>> len(remove_dropped_adds([addition1, addition2], []))
2
:param additions:
:param drops:
:return:
"""
return [addition for addition in additions
if to_drop_constraint_version(addition) not in drops]
def merge_master_adds_and_new_adds(master_list, new_list):
return [(master, new)
for master in master_list
for new in new_list
if master['tableName'] == new['tableName']
and master['columnNames'] == new['columnNames']
# no need to add a constraint change that already exists
and master['constraintName'] != new['constraintName']]
def add_and_removes_to_changelog_xml(add_and_removes, properties, change_id):
ns = {'xmlns': "http://www.liquibase.org/xml/ns/dbchangelog", 'xmlns:ext': "http://www.liquibase.org/xml/ns/dbchangelog-ext", 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance", 'xsi:schemaLocation': "http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.4.xsd"}
top = ET.Element('databaseChangeLog', ns)
for prop in properties:
ET.SubElement(top, 'property', prop)
change_set = ET.SubElement(top, 'changeSet', {'author': getpass.getuser(), 'id': change_id})
ET.SubElement(change_set, 'comment')\
.text = """
Constraint naming convention was changed between Hibernate 3 and 4.
This XML was generated using the `hibernate3to4changelogGen.py` file.
"""
for (drop, add) in add_and_removes:
ET.SubElement(change_set, 'dropUniqueConstraint', drop)
ET.SubElement(change_set, 'addUniqueConstraint', add)
modify_sql = ET.SubElement(change_set, 'modifySql', {'dbms': 'postgresql'})
ET.SubElement(modify_sql, 'replace', {'replace': "WITH", 'with': "WITHOUT"})
return top
parser = argparse.ArgumentParser(
description='Process db.changelog-master and the current changelog to generate key constraint changes in liquibase')
parser.add_argument('changelog',
metavar='C', type=argparse.FileType('r'),
# default=sys.stdin,
help='The base changelog for this branch (not db.changelog-master.xml)')
parser.add_argument('output',
metavar='O', type=argparse.FileType('w'),
default=sys.stdout,
help='The output file for this changeset (not db.changelog-master.xml)')
parser.add_argument('change_id',
metavar='ID', type=str,
help='The changeset id to be used on this generated change')
parser.add_argument('--test', action='store_true', help='Run all of the tests in the script')
if '--test' in sys.argv:
# This can't be done through argparse because it will require all input flags to be set
import doctest
doctest.testmod(verbose=True)
else:
args = parser.parse_args()
print args.changelog
imported = get_inner_imported_files(parse_file_to_xml('db.changelog-master.xml'))
changelog_xml = parse_file_to_xml(args.changelog)
properties = get_all_properties(changelog_xml)
new_constraints = get_all_unique_constraint_additions(changelog_xml)
adds = flatten([get_all_unique_constraint_additions(parse_file_to_xml(importMe)) for importMe in imported])
drops = flatten([get_all_unique_constraint_drops(parse_file_to_xml(importMe)) for importMe in imported])
filtered = remove_dropped_adds(adds, drops)
import pprint
pp = pprint.PrettyPrinter(indent=2)
all_adds = merge_master_adds_and_new_adds(filtered, new_constraints)
xml_diff = [adds_to_add_drop_constraints(master, new) for (master, new) in all_adds]
pp.pprint(xml_diff)
output_xml = add_and_removes_to_changelog_xml(xml_diff, properties, args.change_id)
tree = ET.ElementTree(output_xml)
output = StringIO.StringIO()
tree.write(output, xml_declaration=True, encoding='UTF-8')
reparsed = xml.dom.minidom.parseString(output.getvalue())
output.close()
args.output.write(reparsed.toprettyxml(indent=" "))
# print [constraint for constraint in constraints if 'uk_' not in constraint['constraintName']]
``` |
{
"source": "JLLeitschuh/homoglyph",
"score": 3
} |
#### File: homoglyph/generator/data_file_parser.py
```python
import os
import os.path
class DataFileLine:
def __init__(self, text):
self.text = text.strip()
self.parts = self.text.split(';', 2)
def has_data(self):
return (not self.text.startswith('#')) and len(self.parts) >= 2
def _get_char_from_code(self, code):
return chr(int(code.strip(), 16))
def get_chars(self):
return self._get_char_from_code(self.parts[0]), self._get_char_from_code(self.parts[1])
class DataFileParser:
def __init__(self, file_path):
self.file_path = file_path
def parse(self):
char_pairs = []
with open(self.file_path, encoding='utf-8') as f:
for line_text in f:
line = DataFileLine(line_text)
if line.has_data():
try:
char_pairs.append(line.get_chars())
except:
pass
return char_pairs
class DataFileDir:
def __init__(self, dir_name):
self.dir_name = dir_name
def parse_all(self, char_manager):
for file in os.listdir(self.dir_name):
char_pairs = DataFileParser(os.path.join(self.dir_name, file)).parse()
for pair in char_pairs:
char_manager.add_pair(*pair)
```
#### File: homoglyph/generator/output_js_tests.py
```python
from output_builder import OutputBuilder
class OutputJSTests(OutputBuilder):
def __init__(self, file_name, output_dir, template_dir):
self.file_name = file_name
OutputBuilder.__init__(self, output_dir, template_dir)
def create(self, char_manager, chars):
check_statements = []
for char in chars:
char_homoglyphs = char_manager.get_set_for_char(char)
char_homoglyphs_as_unicode = []
for char_homoglyph in char_homoglyphs:
char_homoglyphs_as_unicode.append('"\\u{' + '{:0>4}'.format(self._hex_code_for_char(char_homoglyph)) + '}"')
check_statements.append(' check("{}", [{}]);'.format(char, ' ,'.join(char_homoglyphs_as_unicode)))
text = self._get_template_text().replace('[[check_statements]]', '\n'.join(check_statements))
self._write_output(text)
``` |
{
"source": "JLLeitschuh/JiraTestResultReporter",
"score": 3
} |
#### File: JiraTestResultReporter/test/testConfigSyntaxChecks.py
```python
from selenium import selenium
from SeleniumTest import SeleniumTest
class testConfigSyntaxChecks(SeleniumTest):
def test_baa_SyntaxWarnings(self):
sel = self.selenium
# open reporterTest config page
sel.click("link=reporterTest")
sel.wait_for_page_to_load("30000")
sel.click("link=Configure")
sel.wait_for_page_to_load("30000")
sel.type("name=_.projectKey", "")
sel.focus("name=_.username")
self.assertTrue(sel.is_text_present("You must provide a project key."))
sel.focus("name=_.projectKey")
sel.type("name=_.projectKey", "aaaa")
sel.focus("name=_.username")
self.assertFalse(sel.is_text_present("You must provide a project key."))
sel.type("name=_.serverAddress", "")
sel.focus("name=_.username")
self.assertTrue(sel.is_text_present("You must provide an URL."))
sel.focus("name=_.serverAddress")
sel.type("name=_.serverAddress", "bbbb")
sel.focus("name=_.username")
self.assertTrue(sel.is_text_present("This is not a valid URL."))
sel.focus("name=_.serverAddress")
sel.type("name=_.serverAddress", "http://www.google.com")
sel.focus("name=_.username")
self.assertFalse(sel.is_text_present("This is not a valid URL."))
``` |
{
"source": "JLLeitschuh/TIPL",
"score": 2
} |
#### File: snippets/Jython/fancyThreshold.py
```python
import os, sys, inspect # standard python libraries
# All the TIPL libraries we might need
import tipl.formats.VirtualAim as VA # image IO
import tipl.tools.XDF as XDF # radial distribution function
import tipl.tools.Resize as Resize # Resize
import tipl.tools.VFilterScale as VFilterScale # Resize
import tipl.tools.Morpho as Morpho # morphological operations
import tipl.tools.ComponentLabel as CL # component labeling
import tipl.tools.EasyContour as EasyContour # component labeling
import tipl.tools.Peel as Peel # peeling
import tipl.tools.GrayAnalysis as GrayAnalysis # shape analysis
import tipl.tools.kVoronoiShrink as KV # voronoi transform
import tipl.tools.Neighbors as Neighbors # neighbors
import tipl.util.D3int as D3int # 3d points
import tipl.util.D3float as D3float # 3d float points
import tipl.util.ArgumentParser as AP
import tipl.formats.MappedImage as MappedImage
import tipl.util.TImgTools as TIT
import tipl.util.TImgTools.ReadTImg as ReadTImg
import tipl.util.TImgTools.WriteTImg as SaveImage
options = AP(sys.argv[1:])
thickFile = options.getOptionPath("thickmap", "", "Input thickness map")
smallThickMin = options.getOptionDouble(
"smallradiusmin", 1.0, "Smallest thickness for small objects"
)
smallThickMax = options.getOptionDouble(
"smallradiusmax", 2.0, "Largest thickness for small objects"
)
mediumThickMin = options.getOptionDouble(
"mediumthickmin", smallThickMax, "Smallest thickness for medium objects"
)
mediumThickMax = options.getOptionDouble(
"mediumthickmax", 10.0, "Largest thickness for medium objects"
)
largeThickMin = options.getOptionDouble(
"largethickmin", mediumThickMax, "Smallest thickness for large objects"
)
largeThickMax = options.getOptionDouble(
"largethickmax", 20.0, "Largest thickness for large objects"
)
smallFile = options.getOptionPath("small", "small.tif", "Small object output image")
mediumFile = options.getOptionPath("medium", "medium.tif", "Medium object output image")
largeFile = options.getOptionPath("large", "large.tif", "Large object output image")
doCache = options.getOptionBoolean(
"cache", "Cache mapped values (faster but more memory intensive"
)
doBG = options.getOptionBoolean("bg", "Perform Writes in Background")
doPreload = options.getOptionBoolean("preload", "Preload images")
runAsJob = options.getOptionBoolean("sge:runasjob", "Run Program as a Job")
if runAsJob:
scriptName = options.getOptionPath(
"sge:scriptname",
os.path.abspath(inspect.getfile(inspect.currentframe())),
"Path to Script File",
)
job = SGEJob.runScriptAsJob(scriptName, options, "sge:")
if options.hasOption("?"):
print options.getHelp()
exit()
# check for invalid parameters before starting
options.checkForInvalid()
if runAsJob:
job.submit()
exit()
if doBG:
import tipl.util.TImgTools.WriteBackground as SaveImage # allows background processing
inImg = ReadTImg(thickFile)
if doPreload:
inImg.getShortAim() # preload the image
class keepSizedStructures(MappedImage.StationaryVoxelFunction):
def __init__(self, minVal, maxVal, name):
self.minVal = minVal
self.maxVal = maxVal
self.cname = name
def name(self):
return self.cname + ": keep structures size:" + str((self.minVal, self.maxVal))
def getRange(self):
return MappedImage.typeRange(10) # output type is boolean
def get(self, inval):
return (inval < self.maxVal) & (inval > self.minVal)
keepSmall = keepSizedStructures(smallThickMin, smallThickMax, "Large")
keepMedium = keepSizedStructures(mediumThickMin, mediumThickMax, "Medium")
keepLarge = keepSizedStructures(largeThickMin, largeThickMax, "Large")
smallImage = MappedImage(inImg, 2, keepSmall)
mediumImage = MappedImage(inImg, 2, keepMedium)
largeImage = MappedImage(inImg, 2, keepLarge)
if doCache:
smallImage = smallImage.cache(1, 0.0)
mediumImage = mediumImage.cache(1, 0.0)
largeImage = largeImage.cache(1, 0.0)
SaveImage(smallImage, smallFile)
SaveImage(mediumImage, mediumFile)
SaveImage(largeImage, largeFile)
```
#### File: snippets/Python/DBTools.py
```python
import os
import re
import md5
import numpy as np
import socket
import time
# make this work on os_xx
# sudo install_name_tool -change libmysqlclient.18.dylib /usr/local/mysql/lib/libmysqlclient.18.dylib /Applications/Sage-4.7.2-OSX-64bit-10.6.app/Contents/Resources/sage/local/lib/python2.6/site-packages/MySQL_python-1.2.3-py2.6-macosx-10.7-x86_64.egg/_mysql.so
def rndSuf(nchars=6):
return strcsv(md5.md5(str(time.time()) + "wow").hexdigest())[0:nchars]
seqNameCount = 0
def seqName(nchars=10):
outStr = "%0" + str(nchars) + "d"
globals()["seqNameCount"] += 1
if globals()["seqNameCount"] >= 10 ** (nchars - 1):
globals()["seqNameCount"] = 0
return outStr % globals()["seqNameCount"]
# Master Project List
projects = {}
defPre = "/DISK63/DATA/SLS/PROJECTS/*"
defSuf = "/doc/csv"
# project['Abbreviated Name'] = ('Project Title',Path Prefix (Drive/Folder), Path Suffix (where csv files are),File Names Need, File names shouldn't have, Is a Local Tomography Measurement,Which Database File)
projects["UJX"] = ("ULTRAJAX", defPre, defSuf, "", "", False, "Lacuna")
projects["JBM"] = ("ULTRAJAX/JBMR", defPre, defSuf, "", "", True, "Lacuna")
projects["UST"] = ("ULTRAJAX/SOST", defPre, defSuf, "", "", True, "Lacuna")
# projects['UST2']=('ULTRAJAX/2DSOST',defPre,defSuf,'','',True,'Lacuna')
projects["VAL"] = ("ULTRAJAX/VALIDATE", defPre, defSuf, "", "SPH", True, "Validate")
projects["MISC"] = ("ULTRAJAX/OTHER", defPre, defSuf, "", "", True, "Other")
projects["BIN"] = (
"BINTEST",
"/DISK72/DATA/SLS/PROJECTS/*",
defSuf,
"",
"",
False,
"Lacuna",
)
projects["PR1"] = (
"ULTRAJAX_PR",
"/DISK72/DATA/SLS/PROJECTS/ULTRAJAX_B1",
defSuf,
"PR",
"F2",
False,
"Lacuna",
)
projects["PR2"] = (
"ULTRAJAX_PR",
"/DISK83/DATA/SLS/PROJECTS/ULTRAJAX_B2",
defSuf,
"PR",
"F2",
False,
"Lacuna",
)
projects["UF3"] = (
"ULTRAJAX_F2",
"/DISK83/DATA/SLS/PROJECTS/ULTRAJAX_B100530",
defSuf,
"",
"",
False,
"Lacuna",
)
projects["UF2"] = (
"ULTRAJAX_F2",
"/DISK83/DATA/SLS/PROJECTS/UJAX_B1009A",
defSuf,
"",
"",
False,
"Lacuna",
)
# projects['DIG']=('DIGFA','/DISK73/DATA/SLS/PROJECTS/DIGFA/KEVIN',defSuf,'','',True,'Lacuna')
# projects['ADIG']=('DIGFA/ALINA','/DISK73/DATA/SLS/PROJECTS/DIGFA/LEVCHUK',defSuf,'','',True,'Lacuna')
# projects['ASTR']=('DIGFA/STRAIN','/DISK73/DATA/SLS/PROJECTS/DIGFA/STRAIN',defSuf,'','',True,'Lacuna')
# Philipp Aging Study
projects["REPK"] = (
"REPRO/KEVIN",
"/DISK83/DATA/SLS/PROJECTS/REPRO/KEVIN",
defSuf,
"",
"",
True,
"Lacuna",
)
projects["REPF"] = (
"REPRO/FELIX",
"/DISK83/DATA/SLS/PROJECTS/REPRO/FELIX",
defSuf,
"",
"",
True,
"Lacuna",
)
projects["REPF2"] = (
"REPRO/FELIX2",
"/DISK83/DATA/SLS/PROJECTS/REPRO/FELIX",
defSuf,
"",
"",
True,
"Lacuna",
)
projects["REPB6"] = (
"REPRO/B6",
"/DISK83/DATA/SLS/PROJECTS/REPRO/B6",
defSuf,
"",
"",
True,
"Lacuna",
)
projects["REPC3"] = (
"REPRO/C3",
"/DISK83/DATA/SLS/PROJECTS/REPRO/C3",
defSuf,
"",
"",
True,
"Lacuna",
)
projects["DEVEL"] = (
"DEVELOP",
"/DISK83/DATA/SLS/PROJECTS/DEVELOP",
defSuf,
"",
"",
True,
"Lacuna",
)
# Floor project
# projects['3XL']=('SINGLE','/DISK82/DATA/UCT/PROJECTS/FML/SINGLE_SLS',defSuf,'','',False,'Lacuna')
# projects['OVX']=('LOADING','/DISK82/DATA/UCT/PROJECTS/FML/OVX_SLS',defSuf,'','',False,'Lacuna')
# Temp Junk Projects
floatlist = lambda d: map(float, list(d))
nprange = lambda x: (max(x) - min(x))
persistentColoringMap = {}
persistentColoringCount = 0
def resetMaps():
globals()["persistentColoringMap"] = {}
globals()["persistentStylingMap"] = {}
globals()["colorList"] = "rgbcmyk"
globals()["styleList"] = ".^x*osh+"
colorList = "rgbcmyk"
def consistentColoringFunction(cName):
colorList = globals()["colorList"]
if not globals()["persistentColoringMap"].has_key(cName):
globals()["persistentColoringMap"][cName] = colorList[
globals()["persistentColoringCount"] % len(colorList)
]
globals()["persistentColoringCount"] += 1
return globals()["persistentColoringMap"][cName]
persistentStylingMap = {}
persistentStylingCount = 0
styleList = ".^x*osh+"
def consistentStylingFunction(cName):
styleList = globals()["styleList"]
if not globals()["persistentStylingMap"].has_key(cName):
globals()["persistentStylingMap"][cName] = styleList[
globals()["persistentStylingCount"] % len(styleList)
]
globals()["persistentStylingCount"] += 1
return globals()["persistentStylingMap"][cName]
## Curve Fitting Models for SQLHistPlot
logParabolaFit = (
lambda xdat, ydat: (
nprange(log10(ydat)) / nprange(log10(xdat)),
mean(log10(xdat)),
max(log10(ydat)),
),
lambda xdat, p: np.power(10, p[2] - p[0] * (log10(xdat) - p[1]) ** 2),
)
def spow(x, y):
try:
return math.pow(x, y)
except:
return 0
# Database Math Functions
def lacdb_AddMathFcn(fcnName, fcnArgs, fcn):
## Create 'Safe' math functions in SQLite
def safeFcn(*args):
try:
return apply(fcn, args)
except:
"LDB_AMF - Problem in : " + str(fcnName) + " invalid input : " + str(args)
return 0
try:
con.create_function(fcnName, fcnArgs, safeFcn)
except:
con.createscalarfunction(fcnName, safeFcn)
def is_numeric(lit):
"Return value of numeric literal string or ValueError exception"
try:
f = float(lit)
return True
except ValueError:
return False
class lacDB_UNICNT:
def __init__(self):
self.items = {}
def step(self, value):
if value is None:
return 0
self.items[value] = 1
def finalize(self):
oVal = len(self.items.keys())
if oVal is None:
oVal = 0
return oVal
@classmethod
def factory(cls):
return cls(), cls.step, cls.finalize
nullVals = lambda cVar: cur.execute(
"SELECT SAMPLE_AIM_NUMBER,COUNT(*) From Lacuna WHERE "
+ cVar
+ " is not IFNULL("
+ cVar
+ ",-1.1) GROUP BY SAMPLE_AIM_NUMBER"
).fetchall()
def Grid2D(varX="POS_X", varY="POS_Y", stepX=5.0 / 100, stepY=None):
## 2D Grid for Group By Function in SQL
if stepY is None:
stepY = stepX
return (
"(FLOOR(("
+ str(varX)
+ ")/"
+ str(stepX)
+ ") || FLOOR(("
+ str(varY)
+ ")/"
+ str(stepY)
+ "))"
)
def Grid3D(
varX="POS_X", varY="POS_Y", varZ="POS_Z", stepX=5.0 / 100, stepY=None, stepZ=None
):
## 3D Grid for Group By Function
if stepY is None:
stepY = stepX
if stepZ is None:
stepZ = stepY
return (
"(FLOOR(("
+ str(varX)
+ ")/"
+ str(stepX)
+ ") || FLOOR(("
+ str(varY)
+ ")/"
+ str(stepY)
+ ") || FLOOR(("
+ str(varZ)
+ ")/"
+ str(stepZ)
+ "))"
)
def GridVar(varX="POS_X", stepX=5.0 / 100):
## For Later Plots
return "(" + str(stepX) + "*FLOOR((" + str(varX) + ")/" + str(stepX) + "))"
class CaseFreeDict:
"""Dictionary, that has case-insensitive keys.
Keys are retained in their original form
when queried with .keys() or .items().
Implementation: An internal dictionary maps lowercase
keys to (key,value) pairs. All key lookups are done
against the lowercase keys, but all methods that expose
keys to the user retrieve the original keys."""
def __init__(self, dict=None):
"""Create an empty dictionary, or update from 'dict'."""
self._dict = {}
if dict:
self.update(dict)
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = key.lower()
return self._dict[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = key.lower()
self._dict[k] = (key, value)
def has_key(self, key):
"""Case insensitive test wether 'key' exists."""
k = key.lower()
return self._dict.has_key(k)
def keys(self):
"""List of keys in their original case."""
return [v[0] for v in self._dict.values()]
def values(self):
"""List of values."""
return [v[1] for v in self._dict.values()]
def items(self):
"""List of (key,value) pairs."""
return self._dict.values()
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exists, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k, v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k, v)) for k, v in self.items()])
return "{%s}" % items
def __str__(self):
"""String representation of the dictionary."""
return repr(self)
class mysqlCurWrapper:
""" A wrapper for the my sql function to remove case sensitivity (make everything uppercase) and return the cursor when queries are executed -> cur.execute(...).fetchall() now works """
def __init__(self, rCon, regenCall=lambda x: None):
self.__connection__ = rCon
self.__cursor__ = rCon.cursor()
self.regenCall = regenCall
self.verbose = False
def _fixquery(self, qryText, qryData):
qryText = "".join(qryText.upper().split("BEGIN;"))
qryText = "".join(qryText.upper().split("BEGIN ;"))
qryText = "".join(qryText.upper().split("BEGIN"))
qryText = "".join(qryText.upper().split("COMMIT;"))
qryText = "".join(qryText.upper().split("COMMIT ;"))
qryText = "".join(qryText.upper().split("COMMIT"))
qrySplit = qryText.split("?")
oQry = "%s".join(qrySplit)
# print oQry
return oQry
qrySplit.reverse()
qryOut = qrySplit.pop()
qrySplit.reverse()
for (val, qryPostfix) in zip(qryData, qrySplit):
tv = type(val)
if True:
qryOut += "%s" # very boring options
elif tv is type(""):
qryOut += "%s"
elif tv is type(1):
qryOut += "%s"
elif tv is int:
qryOut += "%d"
elif tv is type(1.0):
qryOut += "%f"
elif tv is float:
qryOut += "%f"
else:
qryOut += "%s"
qryOut += qryPostfix
# print qryOut
return qryOut
def prepStatement(self, inStatement, inVals):
return inStatement % self._get_db().literal(inVals)
def execute(self, *args, **keywords):
if self.verbose:
print ("exec:", args, keywords)
nargs = list(args)
nargs[0] = nargs[0].upper()
if len(nargs) > 1:
nargs[0] = self._fixquery(nargs[0], nargs[1])
self.__cursor__.execute(*tuple(nargs), **keywords)
return self
def executemany(self, *args, **keywords):
if self.verbose:
print ("execmany:", args[0], keywords)
nargs = list(args)
nargs[0] = nargs[0].upper()
if len(nargs) > 1:
nargs[0] = self._fixquery(nargs[0], nargs[1][0])
# print nargs
self.__cursor__.executemany(*nargs, **keywords)
return self
def begin(self):
return self.__connection__.begin()
def commit(self):
return self.__connection__.commit()
def refresh(self):
nCur = self.regenCall(0)
self.__connection__ = nCur.__connection__
self.__cursor__ = nCur.__cursor__
def __getattr__(self, what):
# print (what,type(what),'is missing checking mysql')
try:
return getattr(self.__cursor__, what)
except:
return getattr(self.__connection__, what)
def StartDatabase(
dbName="Lacuna",
debugMode=False,
apsw=True,
mysql=True,
dorw=True,
doad=None,
mysqlHost="tomquant.sql.psi.ch",
):
hostComputer = socket.gethostname().upper()
if debugMode:
dbName = dbName + "-Debug"
regenFunc = lambda x: StartDatabase(
dbName,
debugMode=debugMode,
apsw=apsw,
mysql=mysql,
dorw=dorw,
doad=doad,
mysqlHost=mysqlHost,
)
if mysql:
if dorw:
globals()["homeDb"] = (mysqlHost, "tomquant_rw", "8y0jz0", "tomquant")
else:
globals()["homeDb"] = (mysqlHost, "tomquant_ro", "OL5iGe", "tomquant")
if doad is not None:
globals()["homeDb"] = (mysqlHost, "tomquant_ad", doad, "tomquant")
print "Sage is Running on : " + hostComputer + " using database - " + str(
globals()["homeDb"]
)
import MySQLdb as mdb
globals()["con"] = mdb.connect(*globals()["homeDb"])
globals()["cur"] = mysqlCurWrapper(globals()["con"], regenCall=regenFunc)
lacTemp = {}
return globals()["cur"]
else:
cPath = os.path.realpath(os.path.realpath(sys.argv[0]))
cPath = os.path.expandvars("$HOME/nt/u/OpenVMS/JAX/Sage/FTPTools.py")
# DBCompiled should be in the same directory
dbCScript = "/".join(cPath.split("/")[:-1] + ["DBCompiled.pyx"])
print "DBC=" + dbCScript
sage.all.attach(dbCScript)
if hostComputer.find("STERNUM") >= 0: # On Sternum
globals()["homeDb"] = os.path.expandvars(
"/project/sageuser/" + dbName + ".db"
)
if hostComputer.find("X02DA") >= 0: # On Beamline
globals()["homeDb"] = os.path.expandvars("$HOME/Data10/" + dbName + ".db")
elif hostComputer.find("PC7819") >= 0: # On PC7819
globals()["homeDb"] = os.path.expandvars("/home/scratch/" + dbName + ".db")
else: # On Kevins Laptop
globals()["homeDb"] = os.path.expandvars("$HOME/" + dbName + ".db")
print "Sage is Running on : " + hostComputer + " using database - " + globals()[
"homeDb"
]
if apsw:
import apsw
globals()["con"] = apsw.Connection(
globals()["homeDb"],
flags=apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE,
)
globals()["cur"] = globals()["con"].cursor()
print globals()["cur"].execute("PRAGMA cache_size=-400000").fetchall()
print globals()["cur"].execute("PRAGMA temp_store=2").fetchall()
# print globals()['cur'].execute('PRAGMA journal_mode=WAL').fetchall()
print globals()["cur"].execute("PRAGMA journal_mode=DELETE").fetchall()
lacTemp = {}
SetupAPSWDatabase()
globals()["cur"] = globals()["con"].cursor()
return globals()["cur"]
else:
## Initialize Database Code, Functions
try:
from sage.databases.database import SQLDatabase
except:
from sage.databases.sql_db import SQLDatabase
if sage0_version().find("4.6.1") >= 0:
globals()["lacDB"] = SQLDatabase(globals()["homeDb"])
else:
globals()["lacDB"] = SQLDatabase(globals()["homeDb"], read_only=False)
globals()["lacTemp"] = globals()["lacDB"].get_skeleton()["Lacuna"].items()
SetupDatabase(lacDB)
return globals()["cur"]
def SetupAPSWDatabase():
# Database Code
# create cursors and connections
lacdb_AddMathFcn("sqrt", 1, math.sqrt)
lacdb_AddMathFcn("is_numeric", 1, is_numeric)
lacdb_AddMathFcn("sq", 1, lambda x: x * x)
lacdb_AddMathFcn("pow", 2, lambda x, y: spow(x, y))
lacdb_AddMathFcn("acos", 1, lambda x: 180 / pi * math.acos(x))
lacdb_AddMathFcn("asin", 1, lambda x: 180 / pi * math.asin(x))
lacdb_AddMathFcn("atan", 1, lambda x: 180 / pi * math.atan(x))
lacdb_AddMathFcn("atan2", 2, lambda x, y: 180 / pi * math.atan2(x, y))
lacdb_AddMathFcn("cos", 1, lambda x: math.cos(pi / 180.0 * x))
lacdb_AddMathFcn("log10", 1, lambda x: math.log10(x))
lacdb_AddMathFcn("ln", 1, lambda x: math.log(x))
lacdb_AddMathFcn("exp", 1, lambda x: math.exp(x))
lacdb_AddMathFcn("sin", 1, lambda x: math.sin(pi / 180.0 * x))
lacdb_AddMathFcn("tan", 1, lambda x: math.tan(pi / 180.0 * x))
lacdb_AddMathFcn("floor", 1, lambda x: floor(x))
lacdb_AddMathFcn("linspace", 4, clacdb_LINSPACE)
lacdb_AddMathFcn("flinspace", 4, clacdb_FLINSPACE)
lacdb_AddMathFcn("fixang", 1, c_fixang)
APSW_createscalar("zame", 2, c_zame2)
APSW_createscalar("zame", 3, c_zame3)
APSW_createscalar("split", 3, clacdb_SPLIT)
APSW_createscalar("fsplit", 3, clacdb_FSPLIT)
APSW_createscalar("isplit", 3, clacdb_ISPLIT)
APSW_createscalar("RDIST", 6, clacdb_rdist)
APSW_createaggregate("med", 1, clacDB_MED)
APSW_createaggregate("var", 1, clacDB_VAR)
APSW_createaggregate("unicnt", 1, lacDB_UNICNT)
APSW_createaggregate("std", 1, clacDB_STD)
APSW_createaggregate("wavg", 2, clacDB_WAVG)
APSW_createaggregate("wvar", 2, clacDB_WVAR)
APSW_createaggregate("wstd", 2, clacDB_WSTD)
APSW_createaggregate("linfit", 2, clacDB_LINFIT)
APSW_createaggregate("wlinfit", 3, clacDB_WLINFIT)
APSW_createaggregate("corr", 2, clacDB_CORR)
APSW_createaggregate("ttest", 2, clacDB_TTEST)
APSW_createaggregate("texture", 3, clacDB_TEXT)
APSW_createaggregate("wtexture", 4, clacDB_TEXT)
def APSW_createscalar(cName, cCount, cFunc):
con.createscalarfunction(cName, cFunc)
def APSW_createaggregate(cName, cCount, cClass):
con.createaggregatefunction(cName, cClass.factory)
def SetupDatabase(lacDB):
# Database Code
# create cursors and connections
globals()["cur"] = lacDB.get_cursor()
globals()["con"] = lacDB.get_connection()
lacdb_AddMathFcn("sqrt", 1, math.sqrt)
lacdb_AddMathFcn("is_numeric", 1, is_numeric)
lacdb_AddMathFcn("sq", 1, lambda x: x * x)
lacdb_AddMathFcn("pow", 2, lambda x, y: spow(x, y))
lacdb_AddMathFcn("acos", 1, lambda x: 180 / pi * math.acos(x))
lacdb_AddMathFcn("asin", 1, lambda x: 180 / pi * math.asin(x))
lacdb_AddMathFcn("atan", 1, lambda x: 180 / pi * math.atan(x))
lacdb_AddMathFcn("atan2", 2, lambda x, y: 180 / pi * math.atan2(x, y))
lacdb_AddMathFcn("cos", 1, lambda x: math.cos(pi / 180.0 * x))
lacdb_AddMathFcn("log10", 1, lambda x: math.log10(x))
lacdb_AddMathFcn("ln", 1, lambda x: math.log(x))
lacdb_AddMathFcn("exp", 1, lambda x: math.exp(x))
lacdb_AddMathFcn("sin", 1, lambda x: math.sin(pi / 180.0 * x))
lacdb_AddMathFcn("tan", 1, lambda x: math.tan(pi / 180.0 * x))
lacdb_AddMathFcn("floor", 1, lambda x: floor(x))
lacdb_AddMathFcn("linspace", 4, clacdb_LINSPACE)
lacdb_AddMathFcn("flinspace", 4, clacdb_FLINSPACE)
lacdb_AddMathFcn("fixang", 1, c_fixang)
con.create_function("zame", 2, c_zame2)
con.create_function("zame", 3, c_zame3)
con.create_function("split", 3, clacdb_SPLIT)
con.create_function("fsplit", 3, clacdb_FSPLIT)
con.create_function("isplit", 3, clacdb_ISPLIT)
con.create_function("RDIST", 6, clacdb_rdist)
con.create_aggregate("med", 1, clacDB_MED)
con.create_aggregate("var", 1, clacDB_VAR)
con.create_aggregate("unicnt", 1, lacDB_UNICNT)
con.create_aggregate("std", 1, clacDB_STD)
con.create_aggregate("wavg", 2, clacDB_WAVG)
con.create_aggregate("wvar", 2, clacDB_WVAR)
con.create_aggregate("wstd", 2, clacDB_WSTD)
con.create_aggregate("linfit", 2, clacDB_LINFIT)
con.create_aggregate("wlinfit", 3, clacDB_WLINFIT)
con.create_aggregate("corr", 2, clacDB_CORR)
con.create_aggregate("ttest", 2, clacDB_TTEST)
con.create_aggregate("texture", 3, clacDB_TEXT)
con.create_aggregate("wtexture", 4, clacDB_TEXT)
# See if there are any objects from this sample in the database
def getSampleCount(cur, sampleNum, tableName="Lacuna"):
cQry = cur.execute(
"SELECT COUNT(*) FROM " + tableName + " WHERE SAMPLE_AIM_NUMBER = ?",
(int(sampleNum),),
).fetchone()
return cQry[0]
# Loopkup the sample number based on path
def getSampleNumFromPath(cur, dataPath, projNum):
cQry = cur.execute(
"SELECT SAMPLE_AIM_NUMBER,SAMPLE_AIM_NAME FROM SAMPLE WHERE PROJECT_NUMBER=? AND DATA_PATH LIKE ?",
(int(projNum), dataPath),
).fetchall()
print cQry
if len(cQry) > 0:
is_match = cQry[0][0]
else:
is_match = -1
if is_match < 0:
print dataPath + " could not be found in project " + str(
projNum
) + " not enough information to create it!"
return is_match
# Sample Metrics Database Interface
def toMetricName(inName):
return inName.upper().strip()
def getMetrics(cur, metricName):
cQry = cur.execute(
"SELECT SAMPLE_AIM_NUMBER,VALUE FROM SAMPLEMETRICS WHERE NAME = ?",
(toMetricName(metricName),),
).fetchall()
return dict(cQry)
def getSampleMetrics(cur, sampleNum):
cQry = cur.execute(
"SELECT NAME,VALUE FROM SAMPLEMETRICS WHERE SAMPLE_AIM_NUMBER = ?", (sampleNum,)
).fetchall()
return dict(cQry)
def getSampleMetricsString(cur, sampleNum):
cQry = cur.execute(
"SELECT NAME,STRINGVALUE FROM SAMPLEMETRICS WHERE SAMPLE_AIM_NUMBER = ?",
(sampleNum,),
).fetchall()
return dict(cQry)
def addSampleMetrics(cur, sampleNum, nameVal, valVal=-1, strVal=""):
try:
cur.execute(
"INSERT INTO SAMPLEMETRICS (SAMPLE_AIM_NUMBER,NAME,VALUE,STRINGVALUE) VALUES (?,?,?,?)",
(sampleNum, toMetricName(nameVal), valVal, strVal),
)
except:
cur.execute(
"UPDATE SAMPLEMETRICS SET VALUE=?,STRINGVALUE=? WHERE SAMPLE_AIM_NUMBER=? AND NAME=?",
(valVal, strVal, sampleNum, toMetricName(nameVal)),
)
# Loopkup the current sample number and if it does not exist, make it
def getSampleNum(cur, sampleName, projNum, tries=0, doInsert=False, dataPath=None):
if dataPath is None:
dataPath = sampleName
if sampleName is None:
return getSampleNumFromPath(cur, dataPath, projNum)
print ("le sample", sampleName)
if tries > 2:
print "getSampleNum has failed, please check the integrity of the database"
return -1
wArgs = ["SAMPLE_AIM_NAME LIKE ?"]
if type(projNum) is type(""):
projNum = getProjNum(cur, projNum, doInsert=doInsert)
if projNum is not None:
wArgs += ["PROJECT_NUMBER=%i" % int(projNum)]
cQry = cur.execute(
"SELECT SAMPLE_AIM_NUMBER,SAMPLE_AIM_NAME FROM SAMPLE WHERE "
+ " AND ".join(wArgs),
(sampleName),
).fetchall()
print cQry
if len(cQry) > 0:
is_match = cQry[0][0]
else:
is_match = 0
if is_match < 1:
if doInsert:
try:
print "Creating Sample : " + sampleName + " // " + dataPath
cur.execute(
"INSERT INTO SAMPLE (PROJECT_NUMBER,SAMPLE_AIM_NAME,DATA_PATH) VALUES (?,?,?)",
(projNum, sampleName, dataPath),
)
cur.commit()
except:
print "Creating Sample " + sampleName + " Failed!"
cur.rollback()
return getSampleNum(
cur,
sampleName,
projNum,
tries + 1,
doInsert=doInsert,
dataPath=dataPath,
)
else:
return -1
else:
return is_match
# Loopkup the current project number and if it does not exist, make it
def getProjNum(cur, projName, tries=0, doInsert=False):
if tries > 2:
print "getProjNum has failed, please check the integrity of the database"
return -1
cQry = cur.execute(
"SELECT PROJECT_NUMBER,PROJECT_NAME FROM PROJECT WHERE PROJECT_NAME LIKE ?",
(projName,),
).fetchall()
print cQry
if len(cQry) > 0:
is_match = cQry[0][0]
else:
is_match = 0
if is_match < 1:
if doInsert:
try:
cur.execute(
"INSERT INTO PROJECT (PROJECT_NAME) VALUES (?)", (projName,)
)
cur.commit()
except:
print "Creating Project Failed!"
cur.rollback()
return getProjNum(cur, projName, tries + 1, doInsert=doInsert)
else:
return -1
else:
return is_match
crossprod = lambda va, vb: (
"((" + va + "_Y)*(" + vb + "_Z)-(" + va + "_Z)*(" + vb + "_Y))",
"((" + va + "_Z)*(" + vb + "_X)-(" + va + "_X)*(" + vb + "_Z))",
"((" + va + "_X)*(" + vb + "_Y)-(" + va + "_Y)*(" + vb + "_X))",
)
saimText = "(SELECT SA.SAMPLE_AIM_NAME FROM SAMPLE SA WHERE SA.SAMPLE_AIM_NUMBER=SAMPLE_AIM_NUMBER AND SA.PROJECT_NUMBER=PROJECT_NUMBER)"
paimText = (
"(SELECT PA.PROJECT_NAME FROM PROJECT PA WHERE PA.PROJECT_NUMBER=PROJECT_NUMBER)"
)
strSecCanAng = "MIN(ACOS(CANAL_GRAD_X*PCA2_X+CANAL_GRAD_Y*PCA2_Y+CANAL_GRAD_Z*PCA2_Z),ACOS(-CANAL_GRAD_X*PCA2_X-CANAL_GRAD_Y*PCA2_Y-CANAL_GRAD_Z*PCA2_Z))"
strSecMaskAng = "MIN(ACOS(MASK_GRAD_X*PCA2_X+MASK_GRAD_Y*PCA2_Y+MASK_GRAD_Z*PCA2_Z),ACOS(-MASK_GRAD_X*PCA2_X-MASK_GRAD_Y*PCA2_Y-MASK_GRAD_Z*PCA2_Z))"
fullFieldNames = {}
fullFieldNames["SAMPLE_AIM_NAME"] = "Sample Name"
fullFieldNames["POS_X*1000"] = "X-Position ($\mu m$)"
fullFieldNames["POS_Y*1000"] = "Y-Position ($\mu m$)"
fullFieldNames["POS_Z*1000"] = "Z-Position ($\mu m$)"
fullFieldNames["CANAL_DISTANCE_MEAN*1000"] = "Canal Distance ($\mu m$)"
fullFieldNames["MASK_DISTANCE_MEAN*1000"] = "Distance to Bone Surface ($\mu m$)"
fullFieldNames["CANAL_ANGLE"] = "Orientation to Nearest Canal"
fullFieldNames[strSecCanAng] = "Seconday Canal Angle"
fullFieldNames[strSecMaskAng] = "Seconday Mask Angle"
fullFieldNames["MASK_ANGLE"] = "Orientation to Bone Surface"
fullFieldNames["(MASK_RADIUS_MAX-POS_RADIUS)*1000"] = "Perium Distance ($\mu m$)"
fullFieldNames["(POS_RADIUS-MASK_RADIUS_MIN)*1000"] = "Marrow Distance ($\mu m$)"
fullFieldNames["OBJ_RADIUS*1000"] = "Radius ($\mu m$)"
fullFieldNames["VOLUME"] = "Volume ($mm^3$)"
fullFieldNames["VOLUME*1000*1000*1000"] = "Volume ($\mu m^3$)"
fullFieldNames["PCA1_PHI"] = "Angle from XY Plane"
fullFieldNames["PCA2_THETA"] = "Secondary Orientation"
fullFieldNames["POW(VOLUME*1000*1000*1000,0.666667)"] = "$\ell$"
fullFieldNames["POW(VOLUME,0.666667)"] = "$\ell$"
fullFieldNames["PROJ_PCA1*1000"] = "Length ($\mu m$)"
fullFieldNames["PROJ_PCA2*1000"] = "Width ($\mu m$)"
fullFieldNames["PROJ_PCA3*1000"] = "Height ($\mu m$)"
fullFieldNames["2*SQRT(5)*PCA1_S*1000"] = "Length ($\mu m$)"
fullFieldNames["2*SQRT(5)*PCA2_S*1000"] = "Width ($\mu m$)"
fullFieldNames["2*SQRT(5)*PCA3_S*1000"] = "Height ($\mu m$)"
fullFieldNames["(VOLUME-VOLUME_LAYER)/VOLUME*100"] = "Roughness (\%)"
fullFieldNames[
"4*3.14/3.0*OBJ_RADIUS*OBJ_RADIUS*OBJ_RADIUS/VOLUME*100"
] = "Sphericity (\%)"
fullFieldNames["(PCA1_S-PCA3_S)/PCA1_S"] = "Anisotropy Factor"
fullFieldNames["(PCA1_S-PCA3_S)/PCA1_S"] = "Anisotropy Factor (%)"
fullFieldNames["PROJ_PCA1/(PROJ_PCA2+PROJ_PCA3)*2"] = "Anisotropy"
fullFieldNames["PROJ_PCA2/PROJ_PCA3"] = "Disc Anisotropy (A23)"
fullFieldNames["2*(PCA2_S-PCA3_S)/(PCA1_S-PCA3_S)-1"] = "Oblateness"
fullFieldNames["PROJ_PCA3/PROJ_PCA1"] = "Anisotropy (A31)"
fullFieldNames["PROJ_PCA1/OBJ_RADIUS"] = "Anisotropy from Radius"
fullFieldNames["DENSITY_VOLUME*1000*1000*1000"] = "Lc.Unit Volume ($\mu m^3$)"
fullFieldNames["DENSITY"] = "Density (1/$mm^3$)"
fullFieldNames["DENSITY/1000"] = "Density (kLac/$mm^3$)"
fullFieldNames["SUM(VOLUME)/SUM(DENSITY_VOLUME)*100"] = "Volume Fraction ($\%$)"
fullFieldNames["THICKNESS*1000"] = "Thickness ($\mu m$)"
fullFieldNames["1/AVG(DENSITY_VOLUME)"] = "Density (1/$mm^3$)"
fullFieldNames["1/AVG(DENSITY_VOLUME)/1000"] = "Density (k/$mm^3$)"
fullFieldNames["POS_RADIUS*1000"] = "Radial Distance ($\mu m$)"
fullFieldNames["MASK_THETA"] = "Radial Angle ($\theta$)"
fullFieldNames["POS_DISTANCE*1000"] = "Center Distance ($\mu m$)"
fullFieldNames["ABS(PCA1_Z)*100"] = "Vertical Orientation (\%)"
fullFieldNames["VAR[PCA1_Z]"] = "Vertical Orientation Wobble"
fullFieldNames["VAR[PCA1_Z]*100"] = "Vertical Orientation Variation (\%)"
fullFieldNames["SHELL_ABSORPTION*1000"] = "Calcification Density (a.u.)"
fullFieldNames["SHELL_ABSORPTION_STD*1000"] = "Calcification Density Variation (a.u.)"
fullFieldNames["VOLUME/VOLUME_BOX*100"] = "Boxiness (\%)"
fullFieldNames["NEAREST_NEIGHBOR_NEIGHBORS"] = "Neighbors"
fullFieldNames["NEAREST_NEIGHBOR_DISTANCE*1000"] = "Nearest Neighbor Distance ($\mu$m)"
fullFieldNames[
"NEAREST_NEIGHBOR_DISTANCE/POW(DENSITY_VOLUME/6.28,0.33333)"
] = "Self-Avoiding Coefficient"
fullFieldNames[
"AVG(NEAREST_NEIGHBOR_DISTANCE)/AVG(POW(DENSITY_VOLUME,0.33333))"
] = "Self-Avoiding Coefficient"
fullFieldNames["TEXTURE(PCA1_X,PCA1_Y,PCA1_Z)"] = "Primary Lacuna Alignment ($\%$)"
fullFieldNames[
"WTEXTURE(PCA1_X,PCA1_Y,PCA1_Z,PROJ_PCA1/PROJ_PCA2>1.5)"
] = "Primary Lacuna Alignment Corrected($\%$)"
fullFieldNames["TEXTURE(PCA2_X,PCA2_Y,PCA2_Z)"] = "Secondary Lacuna Alignment ($\%$)"
fullFieldNames[
"WTEXTURE(PCA2_X,PCA2_Y,PCA2_Z,PROJ_PCA2/PROJ_PCA3>1.5)"
] = "Secondary Lacuna Alignment Corrected ($\%$)"
fullFieldNames[
"1/(36*3.14)*DENSITY_VOLUME_SHELL*DENSITY_VOLUME_SHELL*DENSITY_VOLUME_SHELL/(DENSITY_VOLUME*DENSITY_VOLUME)-1"
] = "Voronoi Sphericity (a.u.)"
# Lacuna Displacement Paramters
strLacDisp = "DISPLACEMENT_MEAN*1000"
strLacNDisp = "100*DISPLACEMENT_MEAN/(2*POW(DENSITY_VOLUME,.333))"
strDispLac = "100*(DISPLACEMENT_MEAN>VOX_SIZE*2.2)"
strNDispLac = "100*(" + strLacNDisp + ">5)"
fullFieldNames[strLacDisp] = "Lacuna Displacement ($\mu m$)"
fullFieldNames[strLacNDisp] = "Normalized Lacuna Displacement ($\%$)"
fullFieldNames[strDispLac] = "Displaced Lacuna ($\%$)"
fullFieldNames[strNDispLac] = "Normalized Displaced Lacuna ($\%$)"
strLacACanAlign = "TEXTURE(" + ",".join(crossprod("PCA1", "CANAL_GRAD")) + ")"
strLacBCanAlign = "TEXTURE(" + ",".join(crossprod("PCA2", "CANAL_GRAD")) + ")"
strLacACanAlignW = (
"WTEXTURE("
+ ",".join(crossprod("PCA1", "CANAL_GRAD"))
+ ",PROJ_PCA1/PROJ_PCA2>1.5)"
)
strLacBCanAlignW = (
"WTEXTURE("
+ ",".join(crossprod("PCA2", "CANAL_GRAD"))
+ ",PROJ_PCA2/PROJ_PCA3>1.5)"
)
fullFieldNames[strLacACanAlign] = "Primary Lacuna Canal Alignment ($\%$)"
fullFieldNames[strLacBCanAlign] = "Secondary Lacuna Canal Alignment ($\%$)"
fullFieldNames[strLacACanAlignW] = "Primary Lacuna Canal Alignment ($\%$)"
fullFieldNames[strLacBCanAlignW] = "Secondary Lacuna Canal Alignment ($\%$)"
prGroups = {
"B6 lit/+ female": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (69,70,71,72,73,74,107,108,109,110,111,112,113,114,115)',
"B6 lit/+ male": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (75,76,77,79,80,81,82,137,138,139,140,141)',
"B6 lit/lit female": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (1,2,3,4,5,6,7,8,9,102,103,104,105,106)',
"B6xC3.B6F1 lit/lit female": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (18,19,20,21,22,23,24,25,26,27,28)',
"C3.B6 lit/lit female": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (34,35,36,37,38,39,40,41,42,43,44,45,116,117,118,119,120,121)',
"B6 lit/lit male": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (10,11,12,13,14,15,16,17,130,131,132,133,134,135,136)',
"C3.B6 lit/lit male": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (46,47,48,49,50,51,52,99,145,146,147,148,149,150,151,152)',
"B6xC3.B6F1 lit/lit male": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (29,30,31,32,33)',
"C3.B6 lit/+ male": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (61,62,63,64,65,66,67,68,153,154,155,156,157,158,159)',
"C3.B6 lit/+ female": 'ISPLIT(SAMPLE_AIM_NAME,"_",3) IN (53,54,55,56,57,58,59,60,122,123,124,125,126,127,128)',
}
progGrpDict = dict(
reduce(
lambda x, y: x + y,
[
[(cDex, cKey) for cDex in eval(cVal.split(" IN ")[-1])]
for (cKey, cVal) in prGroups.items()
],
)
)
def getSampleGroup(csNumber):
sNumber = int(csNumber)
if sNumber >= 10000:
return "B6xC3.B6F2 +/+"
if sNumber <= max(progGrpDict.keys()):
if progGrpDict.has_key(sNumber):
return progGrpDict[sNumber]
else:
print "Error: Sample " + str(sNumber) + " is not in database!!"
return "INVALID"
else:
return "B6xC3.B6F2 lit/lit"
qkName = {}
qkName["Anterior"] = 'SAMPLE_AIM_NUMBER LIKE "%A%"'
qkName["Posterior"] = 'SAMPLE_AIM_NUMBER LIKE "%P%"'
qkName["Wild-type"] = 'SAMPLE_AIM_NUMBER LIKE "%WT%"'
qkName["Knock-out"] = 'SAMPLE_AIM_NUMBER LIKE "%KO%"'
badSampleQuery = "DENSITY<0 OR DENSITY/1000>10000000 OR (NOT (PCA1_X BETWEEN -1 AND 1)) OR (NOT (PCA1_Y BETWEEN -1 AND 1)) OR (NOT (PCA1_Z BETWEEN -1 AND 1))"
def showBadSamples(cProject_Number=None):
if cProject is None:
cProject_Number = projectTitle
oList = cur.execute(
"SELECT SAMPLE_AIM_NUMBER,COUNT(*) FROM LACUNA WHERE ("
+ badSampleQuery
+ ') AND Project_Number = "'
+ cProject
+ '" GROUP BY SAMPLE_AIM_NUMBER'
).fetchall()
for cLine in oList:
print cLine
return [cObj[0] for cObj in oList]
def clearBadSamples(samples):
cur.execute(
"DELETE FROM Lacuna WHERE ("
+ badSampleQuery
+ ") AND SAMPLE_AIM_NUMBER IN ("
+ ",".join(['"' + cObj + '"' for cObj in samples])
+ ")"
).fetchall()
lacDB.commit()
logParabolaFit = (
lambda xdat, ydat: (
nprange(log10(ydat)) / nprange(log10(xdat)),
mean(log10(xdat)),
max(log10(ydat)),
),
lambda xdat, p: np.power(10, p[2] - p[0] * (log10(xdat) - p[1]) ** 2),
)
linearFit = (
lambda xdat, ydat: (
nprange(ydat) / nprange(xdat),
mean(ydat) - nprange(ydat) / nprange(xdat) * mean(xdat),
),
lambda xdat, p: p[0] * xdat + p[1],
)
cosFit = (
lambda xdat, ydat: (
(max(ydat) - min(ydat)) / 2,
guessspace(xdat, ydat),
(max(ydat) + min(ydat)) / 2,
),
lambda xdat, p: p[0] * cos((xdat) * 2 * pi / p[1]) + p[2],
)
sinFit = (
lambda xdat, ydat: (
(max(ydat) - min(ydat)) / 2,
guessspace(xdat, ydat),
0,
(max(ydat) + min(ydat)) / 2,
),
lambda xdat, p: abs(p[0]) * sin((xdat - p[2]) * 2 * pi / p[1]) + p[3],
)
fSinFit = (
lambda xdat, ydat: ((max(ydat) - min(ydat)) / 2, 0, (max(ydat) + min(ydat)) / 2),
lambda xdat, p: abs(p[0]) * sin((xdat) * 2 * pi / 43.0 - p[2]) + p[1],
)
expDecayFit = (
lambda xdat, ydat: (max(ydat) - min(ydat), (max(xdat)) / 5, min(ydat), min(xdat)),
lambda xdat, p: p[0] * exp(-abs(xdat - p[3]) / abs(p[1])) + p[2],
)
simpExpDecayFit = (
lambda xdat, ydat: (max(ydat) - min(ydat), min(ydat)),
lambda xdat, p: p[0] * exp(-(xdat)) + p[1],
)
kexpFit = (
lambda xdat, ydat: (
max(ydat) - min(ydat),
ln(abs(ydat[-1] - ydat[0])) / (xdat[-1] - xdat[0]),
min(ydat),
),
lambda xdat, p: p[0] * np.exp(p[1] * xdat) + p[2],
)
def latexFriendlyName(cField):
cField = "".join(cField.split("&"))
cName = ptName(cField)
badChars = ["%", "_", "\\", "#", "[", "]", "{", "}", "~", "<", ">"]
if cName.find("$") < 0:
for bChar in badChars:
cName = " ".join(cName.split(bChar))
return cName
def ptName(cField):
cName = cField
if fullFieldNames.has_key(cField.upper()):
cName = fullFieldNames[cField.upper()]
return cName
def strcsv(x):
return "".join([tx for tx in x if tx.isalnum()])
def strdbname(x):
outStr = ""
for tx in x:
if tx.isalnum():
outStr += tx
else:
outStr += " "
outStr = "_".join(outStr.strip().upper().split(" "))
if outStr == "GROUP":
outStr = "MOUSE_GROUP"
return outStr
def strd(x):
try:
return str(round(x, 2))
except:
try:
if x == "ULTRAJAX_PR":
return "Progenitor"
if x.find("SOST_") > -1:
def fbone(name):
return "".join(name.split("_"))
def rbone(name):
return name[name.find("262_") + 4 :]
return fbone(rbone(x))
elif x.find("UCT_") > -1:
def fbone(name):
return name[: name.find("_LACUN") + 1]
def rbone(name):
return name[name.find("UCT_") + 4 :]
return fbone(rbone(x))
elif x.find("ULTRAJAX_") > -1:
def rbone(name):
return name[name.find("ULTRAJAX_") + 9 :]
return rbone((x))
else:
return str(x)
except:
return str(x)
from fnmatch import fnmatch
dosfilt = lambda lst, fnstr: filter(lambda x: fnmatch(x, fnstr), lst)
def SafeArray2d(lacOut):
lacArr = np.zeros((len(lacOut), len(lacOut[0])), dtype=float)
for ij in range(len(lacOut)):
for ik in range(len(lacOut[0])):
try:
lacArr[ij, ik] = float(lacOut[ij][ik])
except:
print "Invalid : " + str(lacOut[ij][ik])
return lacArr
def appendDict(inItems):
outDict = {}
for (key, value) in inItems:
if outDict.has_key(key):
outDict[key] += [value]
else:
outDict[key] = [value]
return outDict
class kdict(dict):
# Designed for On-the-fly tables from SQL DB Output
def __getitem__(self, giArgs):
if not self.has_key(giArgs):
tSearch = None
if type(giArgs) is type("bob"):
tSearch = self._inexactMatch(giArgs)
if tSearch is None:
super(kdict, self).__setitem__(giArgs, kdict())
else:
giArgs = tSearch
return super(kdict, self).__getitem__(giArgs)
def __setitem__(self, *args):
giArgs = args[0]
if not self.has_key(giArgs):
super(kdict, self).__setitem__(giArgs, kdict())
super(kdict, self).__setitem__(giArgs, args[1])
def _inexactMatch(self, giArgs):
cMatches = dosfilt(self.keys(), giArgs)
if len(cMatches) > 0:
return cMatches[0]
return None
def __list__(self):
print self.values()
return [cEle.__list__() for cEle in self.values()]
def __latex__(self):
return self._html()
def tup(self):
return tuple([cEle.values() for cEle in self.values()])
def _html(self, renderFunc=lambda x: str(x)):
outStr = ""
headerStr = (
"<tr><td>Output</td><td>"
+ "</td><td>".join([str(cObj) for cObj in sorted(self.values()[0].keys())])
+ "</td></tr>"
)
for cName in sorted(self.keys()):
cVal = self[cName]
outStr += (
"<tr><td>"
+ str(cName)
+ "</td><td>"
+ "</td><td>".join(
[renderFunc(self[cName][cObj]) for cObj in sorted(cVal.keys())]
)
+ "</td></tr>"
)
return "<table border=1>" + headerStr + outStr + "</table>"
def html(self, renderFunc=lambda x: str(x)):
html(self._html(renderFunc))
class appendingkDict(dict):
# Designed for On-the-fly tables from SQL DB Output
def __getitem__(self, giArgs):
if not self.has_key(giArgs):
tSearch = None
if type(giArgs) is type("bob"):
tSearch = self._inexactMatch(giArgs)
if tSearch is None:
super(kdict, self).__setitem__(giArgs, kdict())
else:
giArgs = tSearch
return super(kdict, self).__getitem__(giArgs)
def __setitem__(self, *args):
giArgs = args[0]
if not self.has_key(giArgs):
super(kdict, self).__setitem__(giArgs, kdict())
super(kdict, self).__setitem__(giArgs, args[1])
def _inexactMatch(self, giArgs):
cMatches = dosfilt(self.keys(), giArgs)
if len(cMatches) > 0:
return cMatches[0]
return None
def __list__(self):
print self.values()
return [cEle.__list__() for cEle in self.values()]
def __latex__(self):
return self._html()
def tup(self):
return tuple([cEle.values() for cEle in self.values()])
def _html(self, renderFunc=lambda x: str(x)):
outStr = ""
headerStr = (
"<tr><td>Output</td><td>"
+ "</td><td>".join([str(cObj) for cObj in sorted(self.values()[0].keys())])
+ "</td></tr>"
)
for cName in sorted(self.keys()):
cVal = self[cName]
outStr += (
"<tr><td>"
+ str(cName)
+ "</td><td>"
+ "</td><td>".join(
[renderFunc(self[cName][cObj]) for cObj in sorted(cVal.keys())]
)
+ "</td></tr>"
)
return "<table border=1>" + headerStr + outStr + "</table>"
def html(self, renderFunc=lambda x: str(x)):
html(self._html(renderFunc))
def pTable(arr, maxWid):
def padObj(cObj):
if len(cObj) > maxWid - 1:
return cObj[0 : maxWid - 1]
else:
return " " * (maxWid - len(cObj) - 1) + cObj
return " ".join(["<td>" + padObj(strd(anObj)) + "</td>" for anObj in arr])
def _mitScharf(cobj, colorFunc=""):
outVal = strd(cobj)
if colorFunc != "":
if colorFunc(cobj):
outVal = '<font color="red">' + outVal + "</font>"
return outVal
def latexTable(header, data, colorFunc=""):
"""latexTable(header,data)
"""
outStr = r"\begin{tabular}{|" + "".join(["c|"] * len(header)) + "}"
outStr += r"\hline"
outStr += r" & ".join([strd(sobj) for sobj in header]) + r" \\"
outStr += r"\hline"
outStr += (
r"\\".join([r" & ".join([strd(sobj) for sobj in obj]) for obj in data]) + r"\\"
)
outStr += r"\hline"
outStr += r"\end{tabular}"
return outStr
def htmlTable(header, data, colorFunc=""):
"""htmlTable(header,data)
"""
html(
'<font size=2><table border="1"><tr><td>'
+ "</td><td>".join([latexFriendlyName(sobj) for sobj in header])
+ "</td></tr>"
)
html(
"<tr><td>"
+ "</td></tr><tr><td>".join(
[
"</td><td>".join([_mitScharf(sobj, colorFunc) for sobj in obj])
for obj in data
]
)
+ "</td></tr>"
)
html("</table></font>")
def csvTable(header, data, filename="outtable.csv"):
outStr = ",".join([strcsv(sobj) for sobj in header]) + "\n"
outStr += "\n".join([",".join([strd(sobj) for sobj in obj]) for obj in data]) + "\n"
text_file = open(filename, "w")
text_file.write(outStr)
text_file.close()
def dbExecute(fields, table="Lacuna", wherei="", groupbyi="", sortopt="", records=-1):
"""dbExecute(fields,table='Lacuna',wherei='',groupbyi='',sortopt='',records=-1)
# Implements Corr, Std pseudo-functions
"""
where = ""
if wherei != "":
where = " WHERE " + wherei
groupby = ""
if groupbyi != "":
groupby = " GROUP BY " + groupbyi
lineByLine = False
ksqlCmds = re.compile("(?P<cmd>\w+)?\[(?P<args>[^\]]+)?\]")
newSubs = {}
cbot = fields
kDex = 0
cSearch = ksqlCmds.search(cbot)
while cSearch:
kDex = 0
cTempName = str(kDex)
cTempName = "tempVar_" + "0" * (6 - len(cTempName)) + cTempName
cCmd = cSearch.group("cmd")
cArgs = cSearch.group("args")
if cCmd == "CR":
cArgs = cArgs.split(";")
cArgs = ["(" + cArg + ")" for cArg in cArgs]
if len(cArgs) > 1:
tFields = (
"AVG("
+ cArgs[0]
+ "),AVG("
+ cArgs[0]
+ "*"
+ cArgs[0]
+ "),AVG("
+ cArgs[1]
+ "),AVG("
+ cArgs[1]
+ "*"
+ cArgs[1]
+ ")"
)
rawResult = dbExecute(
tFields, table, wherei, groupbyi, sortopt, records
)
nResult = []
for cRes in rawResult:
tFields = (
"(AVG(("
+ cArgs[0]
+ "-("
+ str(cRes[0])
+ "))*("
+ cArgs[1]
+ "-("
+ str(cRes[2])
+ ")))"
)
cVar1 = sqrt(cRes[1] - cRes[0] * cRes[0])
cVar2 = sqrt(cRes[3] - cRes[2] * cRes[2])
tFields += "/(" + str(cVar1 * cVar2) + "))"
nResult += [tFields]
if len(nResult) > 1:
lineByLine = len(nResult)
newSubs[cTempName] = nResult
else:
cTempName = str(nResult[0])
elif cCmd == "VAR":
tFields = "AVG(" + cArgs + ")"
rawResult = dbExecute(tFields, table, wherei, groupbyi, sortopt, records)
nResult = []
for cRes in rawResult:
tFields = (
"AVG(("
+ cArgs
+ "-("
+ str(cRes[0])
+ "))*("
+ cArgs
+ "-("
+ str(cRes[0])
+ ")))"
)
# cVar1=sqrt(cRes[1]-cRes[0]*cRes[0])
# cVar2=sqrt(cRes[3]-cRes[2]*cRes[2])
# tFields+='/('+str(cVar1*cVar2)+'))'
nResult += [tFields]
if len(nResult) > 1:
lineByLine = len(nResult)
newSubs[cTempName] = nResult
else:
cTempName = str(nResult[0])
elif cCmd == "RAT":
tFields = "AVG(" + cArgs + ")"
rawResult = dbExecute(tFields, table, wherei, groupbyi, sortopt, records)
nResult = []
for cRes in rawResult:
tFields = (
"(AVG(("
+ cArgs
+ "-("
+ str(cRes[0])
+ "))*("
+ cArgs
+ "-("
+ str(cRes[0])
+ ")))"
)
# cVar1=sqrt(cRes[1]-cRes[0]*cRes[0])
# cVar2=sqrt(cRes[3]-cRes[2]*cRes[2])
tFields += "/(AVG(" + cArgs + ")*AVG(" + cArgs + ")))"
nResult += [tFields]
if len(nResult) > 1:
lineByLine = len(nResult)
newSubs[cTempName] = nResult
else:
cTempName = str(nResult[0])
else:
print "Command " + cCmd + " is not yet supported!"
cTempName = "0"
cbot = cbot[: cSearch.start()] + cTempName + cbot[cSearch.end() :]
cSearch = ksqlCmds.search(cbot)
if lineByLine > 1:
results = []
for curDex in range(0, lineByLine):
smField = cbot
for cKey in newSubs.keys():
smField = str(newSubs[cKey][curDex]).join(smField.split(cKey))
sqlString = (
"select " + smField + " from " + table + " " + where + " " + groupby
)
print sqlString
sqlObj = cur.execute(sqlString)
results += [sqlObj.fetchmany(curDex + 1)[curDex]]
return results
else:
sqlString = "select " + cbot + " from " + table + " " + where + " " + groupby
sqlObj = cur.execute(sqlString)
if records > 0:
return sqlObj.fetchmany(records)
else:
return sqlObj.fetchall()
def statsTable(fields, projectName="", sqlAdd=""):
"""statsTable(fields,projectName='',sqlAdd='')
"""
if (sqlAdd != "") & (projectName != ""):
sqlAdd += " AND "
if projectName != "":
sqlAdd += ' Project_Number="' + projectName + '" '
if sqlAdd != "":
sqlAdd = " WHERE " + sqlAdd + " "
genFields = []
headFields = ["Sample Name", "Lacuna Count", "Canal Count"]
for cField in fields:
genFields += ["AVG(" + cField + "),AVG(" + cField + "*" + cField + ")"]
cName = ptName(cField)
# cName=cField
headFields += ["Avg." + cName]
headFields += ["Std." + cName]
genFields += ["Sample_AIM_Number", "UNICNT(LACUNA_NUMBER)", "UNICNT(CANAL_NUMBER)"]
nTable = (
cur.execute(
"select "
+ ",".join(genFields)
+ " from Lacuna "
+ sqlAdd
+ " group by SAMPLE_AIM_NUMBER"
)
).fetchall()
oData = []
for row in nTable:
cRow = list(row)
canCnt = cRow.pop()
lacCnt = cRow.pop()
sampleName = cRow.pop()
oRow = [sampleName, lacCnt, canCnt]
for cEle in range(0, len(cRow) / 2):
cMean = cRow[2 * cEle]
cSq = cRow[2 * cEle + 1]
oRow += [(cMean)]
try:
cStd = sqrt(cSq - cMean * cMean)
except:
cStd = -1
oRow += [cStd]
oData += [oRow]
htmlTable(headFields, oData)
csvTable(headFields, oData)
def lacunaTable(fields, projectName="", sqlAdd="", maxFetch=10000):
"""lacunaTable(fields,projectName='',sqlAdd='',maxFetch=10000):
"""
if (sqlAdd != "") & (projectName != ""):
sqlAdd += " AND "
if projectName != "":
sqlAdd += ' Project_Number="' + projectName + '" '
if sqlAdd != "":
sqlAdd = " WHERE " + sqlAdd + " "
genFields = []
headFields = ["Sample"]
for cField in fields:
genFields += ["" + cField + ""]
cName = cField
if fullFieldNames.has_key(cField.upper()):
cName = fullFieldNames[cField.upper()]
headFields += [cName]
genFields += ["BoneId"]
nTable = (
cur.execute(
"select "
+ ",".join(genFields)
+ " from Lacuna "
+ sqlAdd
+ " group by BoneId"
)
).fetchmany(maxFetch)
oData = []
for row in nTable:
cRow = list(row)
sampleName = cRow.pop()
oRow = [sampleName]
for cEle in range(0, len(cRow)):
cMean = cRow[cEle]
oRow += [(cMean)]
oData += [oRow]
csvTable(headFields, oData)
def combCanalTable(
params,
boneName="",
sqlAdd="",
canalMax=999,
minRadius=5,
maxRadius=50,
maskRadius=10,
useHTML=True,
useCSV=False,
):
""" combCanalTable(params,boneName='',sqlAdd='',canalMax=999,minRadius=5,maxRadius=50,maskRadius=10,useHTML=True,useCSV=False)
Canal parameters are simply normal names
Lacuna parameters have a & before them
L& means that are normal parameters
AS& means they are to be printed out as avg and variance
"""
header = []
if boneName == "":
sqlAddT = sqlAdd
if len(sqlAdd) > 0:
sqlAddT = " AND " + sqlAddT
sqlString = (
'select SAMPLE_AIM_NUMBER from Lacuna WHERE Project_Number="'
+ str(projectTitle)
+ '" '
+ sqlAddT
+ " group by Sample_AIM_Number"
)
bones = [obj[0] for obj in (cur.execute(sqlString)).fetchall()]
nTable = []
for cBone in bones:
print cBone
(header, curTable) = combCanalTable(
params,
cBone,
sqlAdd,
canalMax,
minRadius,
maxRadius,
maskRadius,
useHTML=False,
useCSV=False,
)
nTable += curTable
boneName = "summary"
else:
if len(sqlAdd) > 0:
sqlAdd = " AND " + sqlAdd
canparms = [cparm for cparm in params if cparm.upper().find("&") < 0]
lacparms = [
cparm.split("&")[1] for cparm in params if cparm.upper().find("L&") >= 0
]
lacstats = [
cparm.split("&")[1] for cparm in params if cparm.upper().find("AS&") >= 0
]
laccounts = [
cparm.split("&")[1] for cparm in params if cparm.upper().find("CT&") >= 0
]
canparms = ["SAMPLE_AIM_NUMBER", "Canal_Number"] + canparms
oTable = cur.execute(
"select "
+ ",".join(canparms)
+ ' from Canal where Project_Number="'
+ str(projectTitle)
+ '" AND SAMPLE_AIM_NUMBER LIKE "'
+ boneName
+ '" group by Canal_Number'
).fetchmany(canalMax)
lacparms = ["COUNT(Lacuna_Number)", "SUM(VOLUME*1000*1000*1000)"] + lacparms
lacstatsparms = ["AVG(" + cparm + "),STD(" + cparm + ")" for cparm in lacstats]
laczusammen = lacparms + lacstatsparms
nTable = []
for cRow in oTable:
sqlString = (
' from Lacuna where Project_Number="'
+ str(projectTitle)
+ '" AND SAMPLE_AIM_NUMBER LIKE "'
+ boneName
+ '" AND Canal_Number='
+ str(cRow[1])
+ " AND Mask_Distance_Mean*1000>"
+ str(maskRadius)
+ " AND Canal_Distance_Mean*1000 BETWEEN "
+ str(minRadius)
+ " AND "
+ str(maxRadius)
+ " "
+ sqlAdd
)
lRow = cur.execute(
"select " + ",".join(laczusammen) + sqlString
).fetchall()[0]
ctRow = []
for curCnt in laccounts:
cval = list(
cur.execute(
"select COUNT(Lacuna_Number),SUM(VOLUME*1000*1000*1000)"
+ sqlString
+ " AND "
+ curCnt
).fetchall()[0]
)
if cval[0] is None:
cval[0] = 0
if cval[1] is None:
cval[1] = 0
try:
ctRow += [float(cval[0]) / lRow[0] * 100.0, cval[1] / lRow[1] * 100]
except:
print (cval[0], lRow[0], cval[1], lRow[1])
ctRow += [-1, -1]
nTable += [cRow + lRow + tuple(ctRow)]
# Assemble the header
for cp in canparms:
if fullFieldNames.has_key(cp.upper()):
header += [fullFieldNames[cp.upper()]]
else:
header += [cp]
for cp in lacparms:
if fullFieldNames.has_key(cp.upper()):
cp = fullFieldNames[cp.upper()]
header += ["Lac." + cp]
for cp in lacstats:
if fullFieldNames.has_key(cp):
cp = fullFieldNames[cp.upper()]
header += ["Lac.Avg." + cp]
header += ["Lac.Std." + cp]
for cp in laccounts:
if fullFieldNames.has_key(cp.upper()):
cp = fullFieldNames[cp.upper()]
header += ["Num%" + cp]
header += ["Vol%" + cp]
if useHTML:
htmlTable(header, nTable)
elif useCSV:
csvTable(header, nTable, strcsv(boneName) + ".csv")
else:
return (header, nTable)
```
#### File: snippets/Sage/dbAddSample.py
```python
import DBTools as dbt
import DBInit as dbi
import os
import numpy
from numpy import *
def getTableColumns(cur,tableName):
rcur=cur.execute('SHOW COLUMNS FROM '+tableName)
tabCols={}
for cVal in rcur:
colDict={}
if cVal[1].lower().find('text')==0: colDict['sql']='TEXT'
if cVal[1].lower().find('int(')==0: colDict['sql']='INTEGER'
if cVal[1].lower().find('double')==0: colDict['sql']='REAL'
if cVal[3].lower().find('pri')==0: colDict['primary_key']=True
if cVal[3].lower().find('uni')==0: colDict['unique']=True
tabCols[cVal[0].upper()]=colDict
return tabCols
#file2blob=lambda fname: (zlib.compress(open(fname,'rb').read(),9)) # without rescaling
from PIL import Image
import cStringIO,zlib
def file2blob(cFile,imgSize): # with rescaling
ty=Image.open(cFile)
odata=cStringIO.StringIO()
if imgSize is not None: ty=ty.resize(imgSize)
ty.save(odata,'png')
ovals=odata.getvalue()
print len(ovals)
return zlib.compress(ovals,9)
def dbAddImage(cur,fileName,sampleNum=None,projNum=None,view=None,desc='',imgType=None,imgSize=(100,100),doInsert=False,doReplace=True):
cName=fileName.split('/')[-1]
if imgType is None: imgType=cName.split('.')[-1].upper()
if view is None: view=cName.split('-')[0]
if sampleNum is None:
sampleNum=cName.split('.')[0].split('-')[1]
if type(sampleNum) is type(''):
sampleName=sampleNum
sampleNum=dbt.getSampleNum(cur,sampleName,projNum=projNum,doInsert=doInsert,dataPath=fileName)
print (sampleName,sampleNum)
#if doReplace:
# cur.execute('DELETE FROM IMAGES WHERE SAMPLE_AIM_NUMBER=? AND NAME=? AND VIEW=?', (sampleNum,cName,view))
# cur.commit()
sqlSubStr='INTO IMAGES (SAMPLE_AIM_NUMBER,NAME,DESCRIPTION,TYPE,VIEW,IMAGE) VALUES (%s,%s,%s,%s,%s,%s)'
sqlCmd='INSERT'
if doReplace: sqlCmd='REPLACE'
cur.__cursor__.execute(sqlCmd+' '+sqlSubStr,(sampleNum,cName,desc,imgType,view,file2blob(fileName,imgSize)))
cur.commit()
print 'Upload Image...'
#
from numpy import *
import numpy
def LoadCSVFile(filename):
try:
rawtext=''.join(open(filename).readlines())
except:
print filename+' is garbage'
try:
(outrows,a,b)=parseCSV(rawtext)
if outrows>2:
return (a,b)
else:
print cFile+' is too short!'
except:
print filename+' is junk:'+rawtext[0:20]
def pathToSample(cur,projectTitle,samplename,rawFilename,doInsert=False):
if type(projectTitle) is type(''): cProjNum=dbt.getProjNum(cur,projectTitle,doInsert=True)
else: cProjNum=projectTitle
dataPath=os.path.abspath(rawFilename+'/..')
print 'Searching for Sample : '+str(('SAMPLE:',samplename,'PROJ:',cProjNum,'insert:',doInsert,'datapath:',dataPath))
cSampleNum=dbt.getSampleNum(cur,samplename,cProjNum,doInsert=doInsert,dataPath=dataPath)
print (cSampleNum,'Sample Matched!')
return (cProjNum,cSampleNum)
def lacpa_adddb(cur,ptList,oTable,rawFilename,processName=True,tableName='Lacuna',CanalMode=0,projectTitle='DEBUG',samplename=None):
lacNumOffset=0
if processName: (filename,lacNumOffset)=processInputName(rawFilename,lacFilename)
else: filename=rawFilename
if samplename is None: samplename=filename
ptList=dbt.CaseFreeDict(ptList)
dbLen=len(ptList['SCALE_X'])
if not oTable.has_key('SAMPLE'):
oTable['SAMPLE']=''
print filename+' is missing sample name'
dx=numpy.median(ptList['SCALE_X'])
dy=numpy.median(ptList['SCALE_Y'])
dz=numpy.median(ptList['SCALE_Z'])
dr=sqrt(dx**2+dy**2+dz**2)
lacTemp={}
(cProjNum,cSampleNum)=pathToSample(cur,projectTitle,samplename,rawFilename,doInsert=True)
if cSampleNum<0: return -1
csCount=dbt.getSampleCount(cur,cSampleNum,tableName=tableName)
if csCount>0:
print 'Error:'+samplename+' should be empty ('+str(csCount)+') in table '+tableName
return -1
lacTemp['SAMPLE_AIM_Number']=(cSampleNum,)*dbLen
lacTemp['Project_Number']=(cProjNum,)*dbLen
lacunIds=[lacId+lacNumOffset for lacId in ptList['Lacuna_NUMBER']]
lacTemp[tableName+'_Number']=tuple(lacunIds)
# Variables that scale directly with x,y,z voxel size
lacTemp['VOX_SIZE']=tuple(numpy.abs(ptList['SCALE_X']*1000))
scaleVars=['POS','STD','PROJ']
for cVar in scaleVars:
for cAx in ['X','Y','Z']:
lacTemp[cVar+'_'+cAx]=tuple(ptList[cVar+'_'+cAx]*ptList['SCALE_'+cAx])
# This doesnt work since I dont save PCA1,2,3 dumb
# Variables that scale with PCA 1,2,3 voxel size * denotes PCA1, PCA2, PCA3
pcaScaleVars=['*_S','PROJ_*']
for cAx in ['PCA1','PCA2','PCA3']:
cDr=numpy.sqrt((ptList[cAx+'_X']*dx)**2+(ptList[cAx+'_Y']*dy)**2+(ptList[cAx+'_Z']*dz)**2)
for cVar in pcaScaleVars:
rcVar=cAx.join(cVar.split('*'))
lacTemp[rcVar]=tuple(ptList[rcVar]*cDr)
# Normal Variables
normalVars= ['PCA1_X','PCA1_Y','PCA1_Z','PCA2_X','PCA2_Y','PCA2_Z']
normalVars+=['MASK_GRAD_X','MASK_GRAD_Y','MASK_GRAD_Z','MASK_ANGLE']
if CanalMode==0: normalVars+=['Canal_ANGLE','Canal_GRAD_X','Canal_GRAD_Y','Canal_GRAD_Z']
for cVar in normalVars:
if ptList.has_key(cVar): lacTemp[cVar]=tuple(ptList[cVar])
elif ((cVar.find('GRAD')>=0) | (cVar.find('ANGLE'))): lacTemp[cVar]=(-1,)*dbLen
else: print 'Missing important column:'+cVar+', what the frick!'
# Variables that require a radial scaling factor
radialVars=['MASK_DISTANCE_MEAN','MASK_DISTANCE_STD'] # 'MASK_DISTANCE_COV'
radialVars+=['OBJ_RADIUS','OBJ_RADIUS_STD']
if CanalMode==0:
if ptList.has_key(cVar): radialVars+=['Canal_DISTANCE_MEAN','Canal_DISTANCE_STD']
for cVar in radialVars:
if ptList.has_key(cVar): lacTemp[cVar]=tuple(numpy.abs(ptList[cVar]*dr))
if ptList.has_key('Canal_Distance'): # on some datasets it does not have the _MEAN suffix
lacTemp['Canal_DISTANCE_MEAN']=tuple(numpy.abs(ptList['Canal_Distance']*dr))
# Variables that require a radial cubed scaling factor
volVars=['VOLUME','VOLUME_BOX']
for cVar in volVars:
lacTemp[cVar]=tuple(numpy.abs(ptList[cVar]*dx*dy*dz))
if ptList.has_key('SHELL_CNT'):
lacTemp['VOLUME_LAYER']=tuple(numpy.abs((ptList['VOLUME']-ptList['SHELL_CNT'])*dx*dy*dz))
# GrayAnalysis Columns
if ptList.has_key('MASK'): # new Lacuna method
lacTemp['MASK_DISTANCE_MEAN']=tuple(numpy.abs(ptList['MASK']*dr))
lacTemp['MASK_DISTANCE_STD']=tuple(numpy.abs(ptList['MASK_STD']*dr))
if ptList.has_key('MASK_WX'):
lacTemp['MASK_GRAD']=tuple(ptList['MASK'])
lacTemp['MASK_DISTANCE_STD']=tuple(ptList['MASK_STD'])
if ptList.has_key('SHELL_ABSORPTION'):
lacTemp['SHELL_ABSORPTION']=tuple(ptList['SHELL_ABSORPTION'])
if ptList.has_key('SHELL_ABSORPTION_STD'):
lacTemp['SHELL_ABSORPTION_STD']=tuple(ptList['SHELL_ABSORPTION_STD'])
else:
lacTemp['SHELL_ABSORPTION']=(-1,)*dbLen
lacTemp['SHELL_ABSORPTION_STD']=(-1,)*dbLen
# Lining Absorption
if ptList.has_key('LINING_ABSORPTION'):
lacTemp['LINING_ABSORPTION']=tuple(ptList['LINING_ABSORPTION'])
if ptList.has_key('LINING_ABSORPTION_STD'):
lacTemp['LINING_ABSORPTION_STD']=tuple(ptList['LINING_ABSORPTION_STD'])
else:
lacTemp['LINING_ABSORPTION']=(-1,)*dbLen
lacTemp['LINING_ABSORPTION_STD']=(-1,)*dbLen
if CanalMode==0:
# This doesnt work since I dont save PCA1,2,3 dumb
# Variables that scale with PCA 1,2,3 voxel size * denotes PCA1, PCA2, PCA3
for cAx in ['PCA1','PCA2','PCA3']:
cDr=numpy.sqrt((ptList[cAx+'_X']*dx)**2+(ptList[cAx+'_Y']*dy)**2+(ptList[cAx+'_Z']*dz)**2)
rcVar='DENSITY_PROJ_'+cAx
if ptList.has_key('DENSITY_VOLUME_PROJ_'+cAx):
lacTemp[rcVar]=tuple(ptList[rcVar]*cDr)
else:
lacTemp[rcVar]=(-1,)*dbLen
if ptList.has_key('Canal_NUMBER'):
lacTemp['Canal_NUMBER']=tuple(ptList['Canal_NUMBER'])
#lacTemp['Canal_NAME']=tuple([projectTitle+'_'+filename+'_CAN_'+str(int(curCan)) for curCan in ptList['Canal_NUMBER']])
elif ptList.has_key('Canal_Region'):
lacTemp['Canal_NUMBER']=tuple(ptList['Canal_Region'])
if ptList.has_key('Canal_NUMBER_STD'):
lacTemp['Canal_NUMBER_STD']=tuple(ptList['Canal_NUMBER_STD'])
elif ptList.has_key('Canal_Region_STD'):
lacTemp['Canal_NUMBER_STD']=tuple(ptList['Canal_Region_STD'])
else:
lacTemp['Canal_NUMBER_STD']=(-1,)*dbLen
# Nearest Neighbors
lacTemp['NEAREST_NEIGHBOR_DISTANCE']=(-1,)*dbLen
lacTemp['NEAREST_NEIGHBOR_ANGLE']=(-1,)*dbLen
if ptList.has_key('NEIGHBORS'):
lacTemp['NEAREST_NEIGHBOR_NEIGHBORS']=tuple(ptList['NEIGHBORS'])
else:
lacTemp['NEAREST_NEIGHBOR_NEIGHBORS']=(-1,)*dbLen
# Mask Params
lacTemp['POS_RADIUS']=(-1,)*dbLen
lacTemp['MASK_RADIUS']=(-1,)*dbLen
lacTemp['MASK_RADIUS_MIN']=(-1,)*dbLen
lacTemp['MASK_RADIUS_MAX']=(-1,)*dbLen
lacTemp['MASK_RADIUS_MEAN']=(-1,)*dbLen
lacTemp['MASK_THETA']=(-1,)*dbLen
if ptList.has_key('THICKNESS'):
lacTemp['THICKNESS']=tuple(ptList['THICKNESS'])
else:
lacTemp['THICKNESS']=(-1,)*dbLen
if ptList.has_key('THICKNESS_STD'):
lacTemp['THICKNESS_STD']=tuple(ptList['THICKNESS_STD'])
else:
lacTemp['THICKNESS_STD']=(-1,)*dbLen
# Lacuna Density / Volume
if ptList.has_key('DENSITY_VOLUME'):
lacTemp['DENSITY_VOLUME']=tuple(numpy.abs(ptList['DENSITY_VOLUME']*dx*dy*dz))
lacTemp['DENSITY']=tuple(numpy.abs(1/(ptList['DENSITY_VOLUME']*dx*dy*dz)))
elif ptList.has_key('DENSITY_VOLUME_CNT'):
lacTemp['DENSITY_VOLUME']=tuple(numpy.abs(ptList['DENSITY_VOLUME_CNT']*dx*dy*dz))
lacTemp['DENSITY']=tuple(numpy.abs(1/(ptList['DENSITY_VOLUME_CNT']*dx*dy*dz)))
elif ptList.has_key('DENSITY_CNT'):
lacTemp['DENSITY_VOLUME']=tuple(numpy.abs(ptList['DENSITY_CNT']*dx*dy*dz))
lacTemp['DENSITY']=tuple(numpy.abs(1/(ptList['DENSITY_CNT']*dx*dy*dz)))
else:
lacTemp['DENSITY_VOLUME']=(-1,)*dbLen
lacTemp['DENSITY']=(-1,)*dbLen
if CanalMode==0:
# Lacuna Territory Shape
lacTemp['DISPLACEMENT_MEAN']=(-1,)*dbLen
if ptList.has_key('NEIGHBOR_AREA'):
lacTemp['DENSITY_VOLUME_SHELL']=tuple(numpy.abs(ptList['NEIGHBOR_AREA']*dx*dy))
elif ptList.has_key('MASK_VOLUME_SHELL_CNT'):
## Old Definition of Shell
lacTemp['DENSITY_VOLUME_SHELL']=tuple(numpy.abs(ptList['MASK_VOLUME_SHELL_CNT']*dx*dy*dz))
else:
lacTemp['DENSITY_VOLUME_SHELL']=(-1,)*dbLen
# Lacuna Territory that is mineralized
if ptList.has_key('BONE_VOLUME_CNT'):
lacTemp['DENSITY_VOLUME_BONE']=tuple(numpy.abs(ptList['BONE_VOLUME_CNT']*dx*dy*dz))
else:
lacTemp['DENSITY_VOLUME_BONE']=(-1,)*dbLen
# Lacuna Territory that is part of the mask (for porosity calculations)
if ptList.has_key('MASK_VOLUME_CNT'):
lacTemp['DENSITY_VOLUME_MASK']=tuple(numpy.abs(ptList['MASK_VOLUME_CNT']*dx*dy*dz))
else:
lacTemp['DENSITY_VOLUME_MASK']=(-1,)*dbLen
# PCA1 is a makeshift holding place for STD until the table is once again updated
terrShapeMap={'DENSITY_VOLUME_C':'DENSITY_','DENSITY_VOLUME_S':'DENSITY_STD_'}
for cKey in terrShapeMap.keys():
missingKeys=False
for cAx in ['X','Y','Z']:
#print cKey+cAx
if ptList.has_key(cKey+cAx):
#print 'isch da'
lacTemp[terrShapeMap[cKey]+cAx]=tuple(ptList[cKey+cAx]*ptList['SCALE_'+cAx])
else:
if cKey=='DENSITY_VOLUME_C': missingKeys=True
else:
lacTemp[terrShapeMap[cKey]+cAx]=(-1,)*dbLen
if not missingKeys:
if cKey=='DENSITY_VOLUME_C':
dispMean=numpy.sqrt(((ptList[cKey+'X']-ptList['POS_X'])*dx)**2+((ptList[cKey+'Y']-ptList['POS_Y'])*dy)**2+((ptList[cKey+'Z']-ptList['POS_Z'])*dz)**2)
lacTemp['DISPLACEMENT_MEAN']=tuple(dispMean)
lacTemp['DISPLACEMENT_X']=tuple((ptList[cKey+'X']-ptList['POS_X'])*dx)
lacTemp['DISPLACEMENT_Y']=tuple((ptList[cKey+'Y']-ptList['POS_Y'])*dy)
lacTemp['DISPLACEMENT_Z']=tuple((ptList[cKey+'Z']-ptList['POS_Z'])*dz)
# Polar Coordinates Hints
# Only really valid for Full Femur, but Lacuna angle can be useful
mR=numpy.sqrt(((ptList['POS_X']-numpy.mean(ptList['POS_X']))*dx)**2+((ptList['POS_Y']-numpy.mean(ptList['POS_Y']))*dy)**2)
for cPCA in [1,2]:
pR=numpy.sqrt(ptList['PCA'+str(cPCA)+'_X']**2+ptList['PCA'+str(cPCA)+'_Y']**2)
pPhi=180/pi*numpy.arctan2(ptList['PCA'+str(cPCA)+'_Z'],pR)
lacTemp['PCA'+str(cPCA)+'_Phi']= tuple(pPhi)
lacTemp['PCA'+str(cPCA)+'_Theta']=tuple(180/pi*numpy.arccos(ptList['PCA'+str(cPCA)+'_X']/pR)) # update
# Junk Angles
lacTemp['POS_THETA']=(-1,)*dbLen
lacTemp['MASK_THETA']=(-1,)*dbLen
lacTemp['POS_DISTANCE']=(-1,)*dbLen
lacTemp['NEAREST_NEIGHBOR_AVG']=(-1,)*dbLen
lacTemp['NEAREST_NEIGHBOR_DISTANCE']=(-1,)*dbLen
lacTemp['NEAREST_NEIGHBOR_DISTANCE']=(-1,)*dbLen
# Normalize PCA
pcastot=dr*numpy.sqrt(ptList['PCA1_S']**2+ptList['PCA2_S']**2+ptList['PCA3_S']**2)
#lacTemp['PCAS_TOTAL']=tuple(pcastot)
#for tz in lacTemp.keys(): print tz+' '+str(len(lacTemp[tz]))
tcols=getTableColumns(cur,tableName)
outKeys=[cKey for cKey in lacTemp.keys() if cKey.upper() in tcols]
missingKeys=[cKey for cKey in lacTemp.keys() if cKey.upper() not in tcols]
if len(missingKeys)>0: print 'Missing Keys from '+tableName+' : '+str(missingKeys)
outArr=[lacTemp[cKey] for cKey in outKeys]
#for cKey in outKeys: print (cKey,len(lacTemp[cKey]))
outMat=numpy.array(outArr).swapaxes(1,0)
invalidRows=numpy.sum(numpy.isnan(outMat),1)
outMat=outMat[numpy.nonzero(invalidRows==0)[0],:]
globals()['Om']=outMat
outMat=[tuple(obj) for obj in outMat]
addRowsToTable(cur,tableName,outMat,entry_order=outKeys)
print filename+' was successfully entered %05d, invalid %03d' % (lacNumOffset,sum(invalidRows))
def addRowsToTable(cur,tableName,outMat,entry_order=[]):
#globals()['test']=(tableName,outMat,entry_order)
# old command
#lacDB.add_rows(tableName,outMat,entry_order=entry_order)
print (len(entry_order),numpy.array(outMat[0]).shape)
cur.execute('BEGIN')
sqlString='INSERT INTO '+tableName+' ('+','.join(entry_order)+') VALUES ('+','.join(['?']*len(entry_order))+');'
cur.executemany(sqlString,outMat)
#print(sqlString)
#for cRow in outMat:
# globals()['cRow']=cRow
# cur.execute(sqlString,cRow)
cur.execute('COMMIT')
def parseCSV(text,filename=''):
def temp_clean(text):
return (''.join(text.split('/'))).upper().strip()
def temp_parse(temp):
ntemp=[]
errCount=0
for val in temp:
if val.strip().upper()=='NAN':
cval='0'
errCount+=1
else:
cval=val
try:
cval=single(cval)
except:
cval=-1
errCount+=1
ntemp+=[cval]
return (ntemp,errCount)
rows=text.split('\n')
# First row is header
head1=rows[0]
newStr=[cEle.strip().split(':') for cEle in head1[head1.find('//')+1:].strip().split(',')]
fileDict={}
for cEle in newStr: fileDict[temp_clean(cEle[0])]=cEle[1].split('/')[-1].strip()
fTime=True
head2=rows[1]
head2=[temp_clean(cEle) for cEle in head2[head2.find('//')+1:].strip().split(',')]
# Check for duplicates in header string (and just use the last entry)
# Generate a dictionary of all header entries
cleanHeader={}
for k in range(len(head2)):
cleanHeader[head2[k]]=k
# create a new null filled header
head2=['NULL']*len(head2)
# use the dictionary to repopulate the head2 entry
for cKey in cleanHeader.keys(): head2[cleanHeader[cKey]]=cKey
outTable={}
for col in head2: outTable[col]=[]
for row in rows[2:]:
temp=row.split(',')
try:
(ntemp,errs)=temp_parse(temp)
if errs<2:
if len(ntemp)==len(head2):
for k in range(0,len(head2)): outTable[head2[k]]+=[ntemp[k]]
except:
#if fTime: print (len(ntemp),len(head2))
fTime=False
temp=[]
for col in head2: outTable[col]=numpy.array(outTable[col])
outrows=len(outTable[head2[0]])
print 'Parsed .. '+str(outrows)+' of '+str(len(rows))
return (outrows,fileDict,outTable)
def InsertMetric(cur,filename,metricName,metricValue,metricString='',samplename=None,projectTitle='DEBUG'):
print 'entre'
(cProjNum,cSampleNum)=pathToSample(cur,projectTitle,samplename,filename,doInsert=False)
if cSampleNum>0:
print ('AddSampleMetric',cSampleNum,metricName,metricValue,metricString)
dbt.addSampleMetrics(cur,cSampleNum,metricName,metricValue,strVal=metricString)
cur.commit()
else:
print ('Sample Numbers <0 Do not exist, Skipped')
def InsertCSV(cur,filename,samplename=None,projectTitle='DEBUG',tableName='Lacuna',CanalMode=0):
(a,b)=LoadCSVFile(filename)
if samplename is None: samplename=filename
lacpa_adddb(cur,b,a,filename,False,projectTitle=projectTitle,samplename=samplename,tableName=tableName,CanalMode=CanalMode)
```
#### File: snippets/Sage/histoSub.py
```python
import sys,os
from numpy import *
from subprocess import *
import parseHistogram as ph
import preptext as pt
import prepedge as pe
from optparse import OptionParser
hsubOpt=OptionParser()
hsubOpt.add_option('-p','--project',dest='project',help='Project Title [default: %default]',default='UJAX_F2')
hsubOpt.add_option('-f','--file',dest='filename',help='Histograms to Analyze [default: %default]',default='maskdto.tif.csv')
hsubOpt.add_option('-v','--value',dest='valcol',help='Column number for value column [default: %default]',default=0)
hsubOpt.add_option('-w','--weight',dest='weightcol',help='Column number for weight column [default: %default]',default=1)
hsubOpt.add_option('-t','--texture',dest='textMode',help='Run Texture Analysis [default: %default]',default=False,action='store_true')
hsubOpt.add_option('-e','--prepedge',dest='prepedge',help='Skip PrepEdge Analysis [default: %default]',default=True,action='store_false')
hsubOpt.add_option('-m','--meanname',dest='meanname',help='Mean Parameter Name [default: %default]',default='CT_TH')
(opt,args)=hsubOpt.parse_args()
hsubOpt.add_option('-s','--stdname',dest='stdname',help='STD Parameter Name [default: %default]',default=opt.meanname+'_STD')
(opt,args)=hsubOpt.parse_args()
hsubOpt.print_help()
print (opt.filename,opt.valcol,opt.weightcol)
#sys.exit()
from glob import glob
from dbAddSample import *
cur=dbt.StartDatabase()
vmsFix=lambda wholeFile: '\\;'.join(wholeFile.split(';'))
def canBeIntFilter(x):
try:
y=int(x)
return True
except:
return False
for rt,drs,files in os.walk(os.getcwd(),topdown=False):
ffiles=filter(lambda x: x.find(opt.filename)>=0,files)
for ifile in ffiles:
cfile=rt+'/'+ifile
print cfile
if opt.textMode:
steps=(5,5,5)
lstep=numpy.array((42,42,42*1.5))
dirV=['DIR_X','DIR_Y','DIR_Z']
isoSteps=False
useLstep=False
useBubbles=False
useRadPos=False
doStrain=-1
trimEdge=1
try:
if opt.prepedge: # Generate prepared edge file first with correct positions
laclist=glob(rt+'/lacun_*.csv')
lacFile=filter(lambda x: canBeIntFilter(x[x.rfind('_')+1:x.rfind('.csv')]),sorted(laclist))[-1] # latest lacuna
print ('Making Edges',cfile,lacFile)
textFile=pe.prepareEdge(cfile,lacFile)
else:
textFile=cfile
tInfo=pt.runTexture(textFile,steps,lstep,dirV,isoSteps,useLstep,useBubbles,useRadPos,doStrain,trimEdge=trimEdge)
print tInfo
tText=tInfo['AvgTexture']
cov=tText.tens()
print (cur,cfile,opt.meanname,tText.aiso,tText.obl)#projectTitle=opt.project)
InsertMetric(cur,cfile,opt.meanname+'_AISO',tText.aiso,projectTitle=opt.project)
InsertMetric(cur,cfile,opt.meanname+'_OBLATENESS',tText.obl,projectTitle=opt.project)
for (cDex,cLabel) in enumerate('XYZ'):
for (rDex,rLabel) in enumerate('XYZ'):
InsertMetric(cur,cfile,opt.meanname+'_'+cLabel+rLabel,cov[cDex,rDex],projectTitle=opt.project)
except:
print 'Texture Failed!'
else:
try:
(a,b)=ph.readHist(cfile)
(meanVal,stdVal)=ph.meanStd(b,valCol=opt.valcol,weightCol=opt.weightcol)
print ('Submitting :',rt,'<Mean,STD>',meanVal,stdVal)
if len(opt.meanname): InsertMetric(cur,cfile,opt.meanname,meanVal,projectTitle=opt.project)
if len(opt.stdname): InsertMetric(cur,cfile,opt.stdname,stdVal,projectTitle=opt.project)
except:
print cfile+' IS INVALID!'
``` |
{
"source": "JLLEW/World-Simulation-",
"score": 3
} |
#### File: JLLEW/World-Simulation-/organizm.py
```python
from komentator import Komentator
from abc import ABC, abstractmethod
class Organizm(ABC):
id = 0
komentator = Komentator()
def __init__(self, swiat, sila, priorytet, ruch, color, x, y):
self.obecnySwiat = swiat
self.id = Organizm.id
self.sila = sila
self.priorytet = priorytet
self.ruch = ruch
self.color = color
self.x = x
self.y = y
self.prevX = x
self.prevY = y
Organizm.id += 1
if ruch:
self.obecnySwiat.dodaj_organizm(self)
else:
self.obecnySwiat.dodaj_do_poczekalni(self)
def rysuj(self):
x = self.x
y = self.y
cell_width = self.obecnySwiat.cell_width
cell_height = self.obecnySwiat.cell_height
posx = x * cell_width
posy = y * cell_height
w_x = x * cell_width + cell_width
w_y = y * cell_height + cell_height
self.obecnySwiat.canvas.create_rectangle(posx, posy, w_x, w_y, fill=self.color)
def getNazwa(self):
return self.__class__.__name__ + " id: " + str(self.id)
@abstractmethod
def akcja(self): pass
@abstractmethod
def kolizja(self, atakujacy): pass
@abstractmethod
def obrona(self, atakujacy): pass
@abstractmethod
def ucieczka(self, atakujacy): pass
```
#### File: World-Simulation-/rosliny/wilczeJagody.py
```python
from rosliny.roslina import Roslina
import random
class WilczeJagody(Roslina):
def __init__(self, swiat, ruch=False, x=-1, y=-1):
self.sila = 99
self.priorytet = 0
self.obecnySwiat = swiat
if x == -1:
super().__init__(swiat, 99, 0, "navy")
else:
super().__init__(swiat, 99, 0, "navy", ruch, x, y)
def rozsiew(self, swiat, x, y):
super().rozsiew(swiat, x, y)
return WilczeJagody(swiat, False, x, y)
def indeks_rozsiewu(self):
return random.randint(0, 99) >= 95
def kolizja(self, atakujacy):
self.obecnySwiat.plansza[self.y][self.x] = None
self.obecnySwiat.usun_organizm(atakujacy)
self.obecnySwiat.usun_organizm(self)
```
#### File: World-Simulation-/zwierzeta/antylopa.py
```python
from zwierzeta.zwierze import Zwierze
import random
class Antylopa(Zwierze):
def __init__(self, swiat, ruch=False , x=-1, y=-1):
self.sila = 4
self.priorytet = 4
self.obecnySwiat = swiat
if x == -1:
super().__init__(swiat, 4, 4, "chocolate")
else:
super().__init__(swiat, 4, 4, "chocolate", ruch, x, y)
def klonowanie(self,swiat, x, y):
return Antylopa(swiat, False, x, y)
def akcja(self):
kierunek = ["gora", "dol", "lewo", "prawo"]
idz = random.choice(kierunek)
self.prevX = self.x
self.prevY = self.y
self.obecnySwiat.plansza[self.y][self.x] = None
if idz is "gora":
self.y -= 2
elif idz is "dol":
self.y += 2
elif idz is "lewo":
self.x -= 2
else:
self.x += 2
while self.x < 0:
self.x = self.prevX
while self.x >= self.obecnySwiat.szerokosc:
self.x = self.prevX
while self.y < 0:
self.y = self.prevY
while self.y >= self.obecnySwiat.wysokosc:
self.y = self.prevY
if self.obecnySwiat.plansza[self.y][self.x] is not None:
self.obecnySwiat.plansza[self.y][self.x].kolizja(self)
else:
self.obecnySwiat.plansza[self.y][self.x] = self
def ucieczka(self, atakujacy):
czyUciekac = random.randint(0, 1) == 1
if czyUciekac:
if self.x + 1 < self.obecnySwiat.szerokosc and self.obecnySwiat.plansza[self.y][self.x + 1] is None:
self.obecnySwiat.plansza[self.y][self.x + 1] = self
self.obecnySwiat.plansza[self.y][self.x] = atakujacy
self.x += 1
return True
elif self.x - 1 >= 0 and self.obecnySwiat.plansza[self.y][self.x - 1] is None:
self.obecnySwiat.plansza[self.y][self.x - 1] = self
self.obecnySwiat.plansza[self.y][self.x] = atakujacy
self.x -= 1
return True
elif self.y + 1 < self.obecnySwiat.wysokosc and self.obecnySwiat.plansza[self.y + 1][self.x] is None:
self.obecnySwiat.plansza[self.y + 1][self.x] = self
self.obecnySwiat.plansza[self.y][self.x] = atakujacy
self.y += 1
return True
elif self.y - 1 >= 0 and self.obecnySwiat.plansza[self.y - 1][self.x] is None:
self.obecnySwiat.plansza[self.y - 1][self.x] = self
self.obecnySwiat.plansza[self.y][self.x] = atakujacy
self.y -= 1
return True
else:
return False
else:
return False
```
#### File: World-Simulation-/zwierzeta/lis.py
```python
from zwierzeta.zwierze import Zwierze
import random
class Lis(Zwierze):
def __init__(self, swiat, ruch=False, x=-1, y=-1):
self.sila = 3
self.priorytet = 7
self.obecnySwiat = swiat
if x == -1:
super().__init__(swiat, 3, 7, "red")
else:
super().__init__(swiat, 3, 7, "red", ruch, x, y)
def klonowanie(self, swiat, x, y):
return Lis(swiat, False, x, y)
def akcja(self):
powtorka = True
ruch = True
lewySilniejszy = False
prawySilniejszy = False
goraSilniejszy = False
dolSilniejszy = False
kierunek = ["gora", "dol", "lewo", "prawo"]
self.obecnySwiat.plansza[self.y][self.x] = None
while ruch:
while powtorka and not (lewySilniejszy and prawySilniejszy and goraSilniejszy and dolSilniejszy):
idz = random.choice(kierunek)
powtorka = False
self.prevY = self.y
self.prevX = self.x
if idz is "gora":
if not goraSilniejszy:
self.y -= 1
else:
powtorka = True
elif idz is "dol":
if not dolSilniejszy:
self.y += 1
else:
powtorka = True
elif idz is "lewo":
if not lewySilniejszy:
self.x -= 1
else:
powtorka = True
else:
if not prawySilniejszy:
self.x += 1
else:
powtorka = True
while self.x < 0:
self.x += 1
while self.x >= self.obecnySwiat.szerokosc:
self.x -= 1
while self.y < 0:
self.y += 1
while self.y >= self.obecnySwiat.wysokosc:
self.y -= 1
if self.obecnySwiat.plansza[self.y][self.x] is None:
self.obecnySwiat.plansza[self.y][self.x] = self
ruch = False
elif self.obecnySwiat.plansza[self.y][self.x].sila <= self.sila:
self.obecnySwiat.plansza[self.y][self.x].kolizja(self)
ruch = False
elif goraSilniejszy and dolSilniejszy and prawySilniejszy and lewySilniejszy:
self.x = self.prevX
self.y = self.prevY
self.obecnySwiat.plansza[self.y][self.x] = self
break
else:
if idz is "gora":
goraSilniejszy = True
elif idz is "dol":
dolSilniejszy = True
elif idz is "lewo":
lewySilniejszy = True
else:
prawySilniejszy = True
ruch = True
self.x = self.prevX
self.y = self.prevY
```
#### File: World-Simulation-/zwierzeta/zolw.py
```python
from zwierzeta.zwierze import Zwierze
import random
class Zolw(Zwierze):
def __init__(self, swiat,ruch=False, x=-1, y=-1):
self.sila = 2
self.priorytet = 1
self.obecnySwiat = swiat
if x == -1:
super().__init__(swiat, 2, 1, "olive")
else:
super().__init__(swiat, 2, 1, "olive", ruch, x, y)
def klonowanie(self, swiat, x, y):
return Zolw(swiat, False, x, y)
def akcja(self):
move = random.randint(1, 100) <= 25
if move:
super().akcja()
def obrona(self, atakujacy):
if atakujacy.sila < 5:
self.obecnySwiat.plansza[atakujacy.prevY][atakujacy.prevX] = atakujacy
atakujacy.x = atakujacy.prevX
atakujacy.y = atakujacy.prevY
return True
return False
``` |
{
"source": "jllivermont/hotjar-task",
"score": 2
} |
#### File: hotjar-task/survey/notifier.py
```python
import os
import pusher
CHANNEL = "response-updates"
client = pusher.Pusher(
app_id=os.environ.get("PUSHER_APP_ID"),
key=os.environ.get("PUSHER_KEY"),
secret=os.environ.get("PUSHER_SECRET"),
cluster='eu',
ssl=True
) if "IS_PROD" in os.environ else None
def notify(msg_type, payload):
"""Notifies that a SurveyResponse has been created or modified"""
if client is not None:
client.trigger(CHANNEL, msg_type, payload)
```
#### File: tests/test_survey/test_models.py
```python
import pytest
from peewee import IntegrityError
from survey.models import SurveyResponse
def test_create_user_with_all_fields_successful():
return SurveyResponse.create(
name="James",
email="<EMAIL>",
age=35,
about_me="developer",
address="123 Big Street, Waunakee, WI, USA",
gender="male",
favorite_book="War & Peace",
favorite_colors="red,blue,green")
def test_create_user_with_minimum_required_fields_successful():
SurveyResponse.create(
name="Nancy",
email="<EMAIL>",
age=62,
about_me="I love gardening!!!")
def test_create_user_with_minimum_model_fields_successful():
SurveyResponse.create(
name="Pedro",
email="<EMAIL>")
def test_user_with_existing_email_is_not_inserted():
SurveyResponse.create(
name="Rosa",
email="<EMAIL>",
age=62,
about_me="I love birds")
with pytest.raises(IntegrityError):
SurveyResponse.create(
name="Joan",
email="<EMAIL>",
age=63,
about_me="I love gardening, too!!!")
```
#### File: tests/test_survey/test_normalizer.py
```python
from survey.normalizer import normalize
def test_fields():
input_data = {
"name": " <NAME>",
"email": "<EMAIL> ",
"age": "52 ",
"about_me": " I love fish!! !",
"address": "N/A",
"gender": " MALE",
"favorite_book": "Call of the Wild ",
"favorite_colors": " RED, pINK , blue "
}
normalized_data = normalize(input_data)
assert normalized_data["name"] == "<NAME>"
assert normalized_data["email"] == "<EMAIL>"
assert normalized_data["age"] == 52
assert normalized_data["about_me"] == "I love fish!! !"
assert normalized_data["address"] == "N/A"
assert normalized_data["gender"] == "male"
assert normalized_data["favorite_book"] == "Call of the Wild"
assert normalized_data["favorite_colors"] == "red,pink,blue"
``` |
{
"source": "jlllk/hp_manager",
"score": 3
} |
#### File: hp_manager/api/crud.py
```python
from sqlalchemy.orm import Session
from . import models, schemas
def db_write(db: Session, obj):
db.add(obj)
db.commit()
db.refresh(obj)
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def update_balance(db: Session,
user_id: int,
update: schemas.Balance):
db_user = db.query(models.User).filter(models.User.id == user_id).first()
if db_user:
if update.reset:
db_user.balance = 0
else:
db_user.balance = update.balance
db.commit()
return db_user
def get_user_by_name(db: Session, name: str):
return db.query(models.User).filter(models.User.name == name).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def create_user(db: Session, user: schemas.UserBase):
db_user = models.User(name=user.name)
db_write(db, db_user)
return db_user
def get_games(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Game).offset(skip).limit(limit).all()
def create_game(db: Session, game: schemas.GameBase):
db_game = models.Game(title=game.title)
db_write(db, db_game)
return db_game
def create_session(
db: Session,
session: schemas.SessionCreate, user_id: int, game_id: int):
db_session = models.Session(
**session.dict(),
owner_id=user_id,
game_id=game_id,
)
db_write(db, db_session)
``` |
{
"source": "jlln/GeneticKnapsack",
"score": 3
} |
#### File: GeneticAlgorithms/src/KnapsackIndividual.py
```python
import numpy as np
from src.Evolution import Individual
class KnapsackItem:
def __init__(self,weight,value):
self.value = value
self.weight = weight
class KnapsackIndividual(Individual):
def fitness_score(self):
if self.total_weight() > self.max_weight:
scaling_extra = self.total_weight() - (self.max_weight + 1) # penalize being overweight
scaling = self.total_weight() + scaling_extra
else:
scaling = self.total_weight() ** 0.5
return self.total_value() ** 3 / (scaling + 1)
def __init__(self, item_pool, max_weight=100):
self.expected = int(np.round(max_weight / np.mean([x.weight for x in item_pool])))
if self.expected > len(item_pool):
self.expected = len(item_pool)
self.items = np.random.choice(item_pool, size=self.expected, replace=False)
self.item_pool = item_pool
self.max_weight = max_weight
def is_legal(self):
return self.total_weight() < self.max_weight
def total_weight(self):
return np.sum([x.weight for x in self.items])
def total_value(self):
return np.sum([x.value for x in self.items])
def report(self):
return ";".join([str([i.weight,i.value]) for i in self.items])
def mutate(self, prob):
n_items = len(self.items)
cutoff = np.random.random()
if prob < cutoff:
action = np.random.choice(["i","m","d"])
if action == "i" or len(self.items) < 2:
# Insert
if len(self.items) < len(self.item_pool):
self.items = list(self.items) + list(
np.random.choice([x for x in self.item_pool if x not in self.items], size=1))
elif action == "m":
# Modify
mut_index = np.random.randint(0, n_items)
if len([x for x in self.item_pool if x not in self.items]) > 0:
self.items[mut_index] = np.random.choice([x for x in self.item_pool if x not in self.items])
else:
# Delete
mut_element = np.random.choice(self.items)
self.items = [x for x in self.items if x != mut_element]
def breed(self, partner):
combined_genes = list(set(list(self.items) + list(partner.items)))
offspring = KnapsackIndividual(combined_genes, self.max_weight)
return offspring
``` |
{
"source": "jlmaccal/FrEIA",
"score": 2
} |
#### File: experiments/colorization_cINN/data.py
```python
import sys
import glob
from os.path import join
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color
from PIL import Image, ImageEnhance
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
import torchvision.transforms as T
from tqdm import tqdm
import joint_bilateral_filter as jbf
import config as c
offsets = (47.5, 2.4, 7.4)
scales = (25.6, 11.2, 16.8)
def apply_filt(args):
'''multiprocessing wrapper for applying the joint bilateral filter'''
L_i, ab_i = args
return jbf.upsample(L_i[0], ab_i, s_x=6, s_l=0.10)
def norm_lab_to_rgb(L, ab, norm=True, filt=False, bw=False):
'''given an Nx1xWxH Tensor L and an Nx2xwxh Tensor ab, normalized accoring to offsets and
scales above, upsample the ab channels and combine with L, and form an RGB image.
norm: If false, assume that L, ab are not normalized and already in the correct range
filt: Use joint bilateral upsamling to do the upsampling. Slow, but improves image quality.
bw: Simply produce a grayscale RGB, ignoring the ab channels'''
if bw:
filt=False
if filt:
with Pool(12) as p:
ab_up_list = p.map(apply_filt, [(L[i], ab[i]) for i in range(len(L))])
ab = np.stack(ab_up_list, axis=0)
ab = torch.Tensor(ab)
else:
ab = F.interpolate(ab, size=L.shape[2], mode='bilinear')
lab = torch.cat([L, ab], dim=1)
for i in range(1 + 2*norm):
lab[:, i] = lab[:, i] * scales[i] + offsets[i]
lab[:, 0].clamp_(0., 100.)
lab[:, 1:].clamp_(-128, 128)
if bw:
lab[:, 1:].zero_()
lab = lab.cpu().data.numpy()
rgb = [color.lab2rgb(np.transpose(l, (1, 2, 0))).transpose(2, 0, 1) for l in lab]
return np.array(rgb)
class LabColorDataset(Dataset):
def __init__(self, file_list, transform=None):
self.files = file_list
self.transform = transform
self.to_tensor = T.ToTensor()
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
im = Image.open(self.files[idx])
if self.transform:
im = self.transform(im)
im = self.to_tensor(im).numpy()
try:
if im.shape[0] == 1:
im = np.concatenate([im]*3, axis=0)
if im.shape[0] == 4:
im = im[:3]
im = np.transpose(im, (1,2,0))
im = color.rgb2lab(im).transpose((2, 0, 1))
for i in range(3):
im[i] = (im[i] - offsets[i]) / scales[i]
return torch.Tensor(im)
except:
return self.__getitem__(idx+1)
# Data transforms for training and test/validation set
transf = T.Compose([T.RandomHorizontalFlip(),
T.RandomResizedCrop(c.img_dims_orig[0], scale=(0.2, 1.))])
transf_test = T.Compose([T.Resize(c.img_dims_orig[0]),
T.CenterCrop(c.img_dims_orig[0])])
if c.dataset == 'imagenet':
with open('./imagenet/training_images.txt') as f:
train_list = [join('./imagenet', fname[2:]) for fname in f.read().splitlines()]
with open(c.validation_images) as f:
test_list = [ t for t in f.read().splitlines()if t[0] != '#']
test_list = [join('./imagenet', fname) for fname in test_list]
if c.val_start is not None:
test_list = test_list[c.val_start:c.val_stop]
else:
data_dir = '/home/diz/data/coco17'
complete_list = sorted(glob.glob(join(data_dir, '*.jpg')))
train_list = complete_list[64:]
test_list = complete_list[64:]
train_data = LabColorDataset(train_list,transf)
test_data = LabColorDataset(test_list, transf_test)
train_loader = DataLoader(train_data, batch_size=c.batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=min(64, len(test_list)), shuffle=c.shuffle_val, num_workers=4, pin_memory=True, drop_last=False)
if __name__ == '__main__':
# Determine mean and standard deviation of RGB channels
# (i.e. set global variables scale and offsets to 1., then use the results as new scale and offset)
for x in test_loader:
x_l, x_ab, _, x_ab_pred = model.prepare_batch(x)
#continue
img_gt = norm_lab_to_rgb(x_l, x_ab)
img_pred = norm_lab_to_rgb(x_l, x_ab_pred)
for i in range(c.batch_size):
plt.figure()
plt.subplot(2,2,1)
plt.imshow(img_gt[i].transpose(1,2,0))
plt.subplot(2,2,2)
plt.scatter(x_ab[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='gt')
plt.scatter(x_ab_pred[i, 0].cpu().numpy().flatten() * scales[1] + offsets[1],
x_ab_pred[i, 1].cpu().numpy().flatten() * scales[2] + offsets[2], label='pred')
plt.legend()
plt.subplot(2,2,3)
plt.imshow(img_pred[i].transpose(1,2,0))
plt.show()
sys.exit()
means = []
stds = []
for i, x in enumerate(train_loader):
print('\r', '%i / %i' % (i, len(train_loader)), end='')
mean = []
std = []
for i in range(3):
mean.append(x[:, i].mean().item())
std.append(x[:, i].std().item())
means.append(mean)
stds.append(std)
if i >= 1000:
break
means, stds = np.array(means), np.array(stds)
print()
print('Mean ', means.mean(axis=0))
print('Std dev', stds.mean(axis=0))
#[-0.04959071 0.03768991 0.11539354]
#[0.51175581 0.17507738 0.26179135]
```
#### File: experiments/colorization_cINN/feature_net.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__weights_dict = dict()
def load_weights(weight_file):
if weight_file == None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.bw_conv1_1 = self.__conv(2, name='bw_conv1_1', in_channels=1, out_channels=64, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv1_2 = self.__conv(2, name='conv1_2', in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True, dilation=1, padding=1)
self.conv1_2norm = self.__batch_normalization(2, 'conv1_2norm', num_features=64, eps=9.999999747378752e-06, momentum=0.1)
self.conv2_1 = self.__conv(2, name='conv2_1', in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv2_2 = self.__conv(2, name='conv2_2', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True, dilation=1, padding=1)
self.conv2_2norm = self.__batch_normalization(2, 'conv2_2norm', num_features=128, eps=9.999999747378752e-06, momentum=0.1)
self.conv3_1 = self.__conv(2, name='conv3_1', in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv3_2 = self.__conv(2, name='conv3_2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv3_3 = self.__conv(2, name='conv3_3', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True, dilation=1, padding=1)
self.conv3_3norm = self.__batch_normalization(2, 'conv3_3norm', num_features=256, eps=9.999999747378752e-06, momentum=0.1)
self.conv4_1 = self.__conv(2, name='conv4_1', in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv4_2 = self.__conv(2, name='conv4_2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv4_3 = self.__conv(2, name='conv4_3', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv4_3norm = self.__batch_normalization(2, 'conv4_3norm', num_features=512, eps=9.999999747378752e-06, momentum=0.1)
self.conv5_1 = self.__conv(2, name='conv5_1', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=2, padding=2)
self.conv5_2 = self.__conv(2, name='conv5_2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=2, padding=2)
self.conv5_3 = self.__conv(2, name='conv5_3', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=2, padding=2)
self.conv5_3norm = self.__batch_normalization(2, 'conv5_3norm', num_features=512, eps=9.999999747378752e-06, momentum=0.1)
self.conv6_1 = self.__conv(2, name='conv6_1', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=2, padding=2)
self.conv6_2 = self.__conv(2, name='conv6_2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=2, padding=2)
self.conv6_3 = self.__conv(2, name='conv6_3', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=2, padding=2)
self.conv6_3norm = self.__batch_normalization(2, 'conv6_3norm', num_features=512, eps=9.999999747378752e-06, momentum=0.1)
self.conv7_1 = self.__conv(2, name='conv7_1', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv7_2 = self.__conv(2, name='conv7_2', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv7_3 = self.__conv(2, name='conv7_3', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv7_3norm = self.__batch_normalization(2, 'conv7_3norm', num_features=512, eps=9.999999747378752e-06, momentum=0.1)
self.conv8_1 = self.__conv_transpose(2, name='conv8_1', in_channels=512, out_channels=256, kernel_size=(4, 4), stride=(2, 2), groups=1, bias=True)
self.conv8_2 = self.__conv(2, name='conv8_2', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv8_3 = self.__conv(2, name='conv8_3', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True, dilation=1, padding=1)
self.conv8_313 = self.__conv(2, name='conv8_313', in_channels=256, out_channels=313, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True, dilation=1, padding=0)
self.class8_ab = self.__conv(2, name='class8_ab', in_channels=313, out_channels=2, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True, dilation=1, padding=0)
def features(self, x):
out = self.bw_conv1_1(x)
out = F.relu(out)
out = self.conv1_2(out)
out = F.relu(out)
out = self.conv1_2norm(out)
out = self.conv2_1(out)
out = F.relu(out)
out = self.conv2_2(out)
out = F.relu(out)
out = self.conv2_2norm(out)
out = self.conv3_1(out)
out = F.relu(out)
out = self.conv3_2(out)
out = F.relu(out)
out = self.conv3_3(out)
out = F.relu(out)
out = self.conv3_3norm(out)
out = self.conv4_1(out)
out = F.relu(out)
out = self.conv4_2(out)
out = F.relu(out)
out = self.conv4_3(out)
out = F.relu(out)
out = self.conv4_3norm(out)
out = self.conv5_1(out)
out = F.relu(out)
out = self.conv5_2(out)
out = F.relu(out)
out = self.conv5_3(out)
out = F.relu(out)
out = self.conv5_3norm(out)
out = self.conv6_1(out)
out = F.relu(out)
out = self.conv6_2(out)
out = F.relu(out)
out = self.conv6_3(out)
out = F.relu(out)
out = self.conv6_3norm(out)
out = self.conv7_1(out)
out = F.relu(out)
out = self.conv7_2(out)
out = F.relu(out)
out = self.conv7_3(out)
out = F.relu(out)
out = self.conv7_3norm(out)
out = self.conv8_1(out)
out = F.relu(out)
out = self.conv8_2(out)
out = F.relu(out)
out = self.conv8_3(out)
return out
def forward(self, x):
out = self.features(x)
out = F.relu(out)
out = self.conv8_313(out)
out = 2.606 * out
out = F.softmax(out, dim=1)
out = self.class8_ab(out)
return out
def fwd_from_features(self, f):
out = F.relu(f)
out = self.conv8_313(out)
out = 2.606 * out
out = F.softmax(out, dim=1)
out = self.class8_ab(out)
return out
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if dim == 1: layer = nn.BatchNorm1d(**kwargs)
elif dim == 2: layer = nn.BatchNorm2d(**kwargs)
elif dim == 3: layer = nn.BatchNorm3d(**kwargs)
else: raise NotImplementedError()
try:
if 'scale' in __weights_dict[name]:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
except:
pass
return layer
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1: layer = nn.Conv1d(**kwargs)
elif dim == 2: layer = nn.Conv2d(**kwargs)
elif dim == 3: layer = nn.Conv3d(**kwargs)
else: raise NotImplementedError()
try:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
except:
pass
return layer
@staticmethod
def __conv_transpose(dim, name, **kwargs):
if dim == 1: layer = nn.ConvTranspose1d(**kwargs)
elif dim == 2: layer = nn.ConvTranspose2d(**kwargs)
elif dim == 3: layer = nn.ConvTranspose3d(**kwargs)
else: raise NotImplementedError()
try:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
except:
pass
return layer
```
#### File: experiments/colorization_cINN/train.py
```python
import sys
import torch
import torch.nn
import torch.optim
from torch.nn.functional import avg_pool2d#, interpolate
from torch.autograd import Variable
import numpy as np
import tqdm
import config as c
if c.no_cond_net:
import model_no_cond as model
else:
import model
import data
import viz
if c.load_file:
model.load(c.load_file)
class dummy_loss(object):
def item(self):
return 1.
def sample_outputs(sigma, out_shape):
return [sigma * torch.cuda.FloatTensor(torch.Size((4, o))).normal_() for o in out_shape]
tot_output_size = 2 * c.img_dims[0] * c.img_dims[1]
try:
for i_epoch in range(-c.pre_low_lr, c.n_epochs):
loss_history = []
data_iter = iter(data.train_loader)
if i_epoch < 0:
for param_group in model.optim.param_groups:
param_group['lr'] = c.lr * 2e-2
if i_epoch == 0:
for param_group in model.optim.param_groups:
param_group['lr'] = c.lr
if c.end_to_end and i_epoch <= c.pretrain_epochs:
for param_group in model.feature_optim.param_groups:
param_group['lr'] = 0
if i_epoch == c.pretrain_epochs:
for param_group in model.feature_optim.param_groups:
param_group['lr'] = 1e-4
iterator = tqdm.tqdm(enumerate(data_iter),
total=min(len(data.train_loader), c.n_its_per_epoch),
leave=False,
mininterval=1.,
disable=(not c.progress_bar),
ncols=83)
for i_batch , x in iterator:
zz, jac = model.combined_model(x)
neg_log_likeli = 0.5 * zz - jac
l = torch.mean(neg_log_likeli) / tot_output_size
l.backward()
model.optim_step()
loss_history.append([l.item(), 0.])
if i_batch+1 >= c.n_its_per_epoch:
# somehow the data loader workers don't shut down automatically
try:
data_iter._shutdown_workers()
except:
pass
iterator.close()
break
epoch_losses = np.mean(np.array(loss_history), axis=0)
epoch_losses[1] = np.log10(model.optim.param_groups[0]['lr'])
for i in range(len(epoch_losses)):
epoch_losses[i] = min(epoch_losses[i], c.loss_display_cutoff)
with torch.no_grad():
ims = []
for x in data.test_loader:
x_l, x_ab, cond, ab_pred = model.prepare_batch(x[:4])
for i in range(3):
z = sample_outputs(c.sampling_temperature, model.output_dimensions)
x_ab_sampled = model.combined_model.module.reverse_sample(z, cond)
ims.extend(list(data.norm_lab_to_rgb(x_l, x_ab_sampled)))
break
if i_epoch >= c.pretrain_epochs * 2:
model.weight_scheduler.step(epoch_losses[0])
model.feature_scheduler.step(epoch_losses[0])
viz.show_imgs(*ims)
viz.show_loss(epoch_losses)
if i_epoch > 0 and (i_epoch % c.checkpoint_save_interval) == 0:
model.save(c.filename + '_checkpoint_%.4i' % (i_epoch * (1-c.checkpoint_save_overwrite)))
model.save(c.filename)
except:
if c.checkpoint_on_error:
model.save(c.filename + '_ABORT')
raise
finally:
viz.signal_stop()
```
#### File: experiments/inverse_problems_science/dkfz_eval.py
```python
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch
import dkfz_train
import model
import config as c
model.load('output/dkfz_inn.pt')
print('Trainable parameters:')
print(sum([p.numel() for p in model.params_trainable]))
def concatenate_test_set():
x_all, y_all = [], []
for x,y in c.test_loader:
x_all.append(x)
y_all.append(y)
return torch.cat(x_all, 0), torch.cat(y_all, 0)
x_all, y_all = concatenate_test_set()
def sample_posterior(y_it, N=4096):
outputs = []
for y in y_it:
rev_inputs = torch.cat([torch.randn(N, c.ndim_z + c.ndim_pad_zy),
torch.zeros(N, c.ndim_y)], 1).to(c.device)
if c.ndim_pad_zy:
rev_inputs[:, c.ndim_z:-c.ndim_y] *= c.add_pad_noise
rev_inputs[:, -c.ndim_y:] = y
with torch.no_grad():
x_samples = model.model(rev_inputs, rev=True)
outputs.append(x_samples.data.cpu().numpy())
return outputs
def show_posteriors():
# how many different posteriors to show:
n_plots = 5
# how many dimensions of x to use:
n_x = 3
def hists(x):
results = []
for j in range(n_x):
h, b = np.histogram(x[:, j], bins=100, range=(-2,2), density=True)
h /= np.max(h)
results.append([b[:-1],h])
return results
prior_hists = hists(x_all)
x_gt = x_all[:n_plots]
y_gt = y_all[:n_plots]
posteriors = sample_posterior(y_gt)
confidence = 0.68
q_low = 100. * 0.5 * (1 - confidence)
q_high = 100. * 0.5 * (1 + confidence)
for i in range(n_plots):
hist_i = hists(posteriors[i])
for j in range(n_x):
plt.subplot(n_plots, n_x, n_x*i + j + 1)
plt.step(*(prior_hists[j]), where='post', color='grey')
plt.step(*(hist_i[j]), where='post', color='blue')
x_low, x_high = np.percentile(posteriors[i][:,j], [q_low, q_high])
plt.plot([x_gt[i,j], x_gt[i,j]], [0,1], color='black')
plt.plot([x_low, x_low], [0,1], color='orange')
plt.plot([x_high, x_high], [0,1], color='orange')
plt.tight_layout()
def calibration_error():
# which parameter to look at (0: SO2)
x_ind = 0
# how many different confidences to look at
n_steps = 100
q_values = []
confidences = np.linspace(0., 1., n_steps+1, endpoint=False)[1:]
uncert_intervals = [[] for i in range(n_steps)]
inliers = [[] for i in range(n_steps)]
for conf in confidences:
q_low = 0.5 * (1 - conf)
q_high = 0.5 * (1 + conf)
q_values += [q_low, q_high]
from tqdm import tqdm
for x,y in tqdm(zip(x_all, y_all), total=x_all.shape[0], disable=False):
post = sample_posterior([y])[0][:, x_ind]
x_margins = list(np.quantile(post, q_values))
for i in range(n_steps):
x_low, x_high = x_margins.pop(0), x_margins.pop(0)
uncert_intervals[i].append(x_high - x_low)
inliers[i].append(int(x[x_ind] < x_high and x[x_ind] > x_low))
inliers = np.mean(inliers, axis=1)
uncert_intervals = np.median(uncert_intervals, axis=1)
calib_err = inliers - confidences
print(F'Median calibration error: {np.median(np.abs(calib_err))}')
print(F'Calibration error at 68% confidence: {calib_err[68]}')
print(F'Med. est. uncertainty at 68% conf.: {uncert_intervals[68]}')
plt.subplot(2, 1, 1)
plt.plot(confidences, calib_err)
plt.ylabel('Calibration error')
plt.subplot(2, 1, 2)
plt.plot(confidences, uncert_intervals)
plt.ylabel('Median estimated uncertainty')
plt.xlabel('Confidence')
show_posteriors()
calibration_error()
plt.show()
```
#### File: experiments/mnist_cINN/cond_net.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import config as c
import data as color_data
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3),
nn.Conv2d(32, 64, kernel_size=3),
nn.MaxPool2d(2),
nn.Conv2d(64, 64, kernel_size=3),
nn.Conv2d(64, 64, kernel_size=3),
nn.MaxPool2d(2),
)
self.linear = nn.Sequential(
nn.Dropout(),
nn.Linear(1024, 512),
nn.Dropout(),
nn.Linear(512, 512),
nn.Dropout(),
nn.Linear(512, c.cond_width),
)
self.fc_final = nn.Linear(c.cond_width, 10)
def forward(self, x):
x = self.conv(x)
x = x.view(c.batch_size, -1)
x = self.linear(x)
x = self.fc_final(x)
return F.log_softmax(x, dim=1)
def features(self, x):
x = self.conv(x)
x = x.view(c.batch_size, -1)
return self.linear(x)
model = Net().cuda()
log_interval = 25
def train():
model.train()
for batch_idx, (color, target, data) in enumerate(color_data.train_loader):
data, target = data.cuda(), target.long().cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(color_data.train_loader.dataset),
100. * batch_idx / len(color_data.train_loader), loss.item()))
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./mnist_data', train=False, transform=transforms.ToTensor()),
batch_size=c.batch_size, shuffle=True, drop_last=True)
def test():
model.train()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if __name__ == '__main__':
optimizer = optim.SGD(model.parameters(), lr=0.03, momentum=0.5)
for epoch in range(6):
train()
test()
torch.save(model.state_dict(), c.cond_net_file)
else:
model.train()
if c.cond_net_file:
model.load_state_dict(torch.load(c.cond_net_file))
```
#### File: experiments/mnist_cINN/data.py
```python
import os
from os.path import join, isfile, basename
from time import time
from multiprocessing import Process
from tqdm import tqdm
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torchvision.transforms as T
import config as c
import torchvision.datasets
def unnormalize(x):
return x * c.data_std + c.data_mean
if c.colorize:
data_dir = 'color_mnist_data'
ims = (torch.load(join(data_dir, 'color_mnist_images.pt')) - c.data_mean) / c.data_std
labels = torch.load(join(data_dir, 'color_mnist_labels.pt'))
masks = torch.load(join(data_dir, 'color_mnist_masks.pt'))
dataset = torch.utils.data.TensorDataset(ims, labels, masks)
train_loader = DataLoader(dataset, batch_size=c.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
test_loader = train_loader
else:
data_dir = 'mnist_data'
train_data = torchvision.datasets.MNIST(data_dir, train=True, transform=T.ToTensor(), download=True)
test_data = torchvision.datasets.MNIST(data_dir, train=False, transform=T.ToTensor(), download=True)
train_loader = DataLoader(train_data, batch_size=c.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=c.batch_size, shuffle=False, num_workers=4, pin_memory=True, drop_last=True)
```
#### File: experiments/mnist_cINN/losses.py
```python
import torch
import numpy as np
from torch.autograd import Variable
import config as c
def MMD(x, y):
xx, yy, xy = torch.mm(x,x.t()), torch.mm(y,y.t()), torch.mm(x,y.t())
rx = (xx.diag().unsqueeze(0).expand_as(xx))
ry = (yy.diag().unsqueeze(0).expand_as(yy))
dxx = rx.t() + rx - 2.*xx
dyy = ry.t() + ry - 2.*yy
dxy = rx.t() + ry - 2.*xy
dxx = torch.clamp(dxx, 0., np.inf)
dyy = torch.clamp(dyy, 0., np.inf)
dxy = torch.clamp(dxy, 0., np.inf)
XX, YY, XY = (Variable(torch.zeros(xx.shape).cuda()),
Variable(torch.zeros(xx.shape).cuda()),
Variable(torch.zeros(xx.shape).cuda()))
for cw in c.kernel_widths:
for a in c.kernel_powers:
XX += cw**a * (cw + 0.5 * dxx / a)**-a
YY += cw**a * (cw + 0.5 * dyy / a)**-a
XY += cw**a * (cw + 0.5 * dxy / a)**-a
return torch.mean(XX + YY - 2.*XY)
def moment_match(x, y):
return (torch.mean(x) - torch.mean(y))**2 + (torch.var(x) - torch.var(y))**2
```
#### File: experiments/mnist_minimal_example/eval.py
```python
import torch
import numpy as np
import matplotlib.pyplot as plt
import model
import data
cinn = model.MNIST_cINN(0)
cinn.cuda()
state_dict = {k:v for k,v in torch.load('output/mnist_cinn.pt').items() if 'tmp_var' not in k}
cinn.load_state_dict(state_dict)
cinn.eval()
def show_samples(label):
'''produces and shows cINN samples for a given label (0-9)'''
N_samples = 100
l = torch.cuda.LongTensor(N_samples)
l[:] = label
z = 1.0 * torch.randn(N_samples, model.ndim_total).cuda()
with torch.no_grad():
samples = cinn.reverse_sample(z, l).cpu().numpy()
samples = data.unnormalize(samples)
full_image = np.zeros((28*10, 28*10))
for k in range(N_samples):
i, j = k // 10, k % 10
full_image[28 * i : 28 * (i + 1),
28 * j : 28 * (j + 1)] = samples[k, 0]
full_image = np.clip(full_image, 0, 1)
plt.figure()
plt.title(F'Generated digits for c={label}')
plt.imshow(full_image, vmin=0, vmax=1, cmap='gray')
def val_loss():
'''prints the final validiation loss of the model'''
with torch.no_grad():
z, log_j = cinn(data.val_x, data.val_l)
nll_val = torch.mean(z**2) / 2 - torch.mean(log_j) / model.ndim_total
print('Validation loss:')
print(nll_val.item())
val_loss()
for i in range(10):
show_samples(i)
plt.show()
``` |
{
"source": "jlmaccal/gromacs",
"score": 2
} |
#### File: src/gmxapi/commandline.py
```python
__all__ = ['commandline_operation']
import functools
import os
import pathlib
import shutil
import subprocess
import gmxapi as gmx
from gmxapi import exceptions
from gmxapi import logger as root_logger
from gmxapi.datamodel import NDArray
from gmxapi.operation import OutputCollectionDescription
# Module-level logger
logger = root_logger.getChild('commandline')
logger.info('Importing {}'.format(__name__))
@functools.lru_cache()
def _config() -> dict:
"""Get the GROMACS configuration detected during installation.
If this appears to be a useful function, it may become part of the regular
interface, but it is currently unadvertised.
"""
import json
from importlib.resources import open_text
with open_text('gmxapi', 'gmxconfig.json') as textfile:
config = json.load(textfile)
return config
@functools.lru_cache()
def cli_executable() -> pathlib.Path:
"""Report the installed GROMACS command line executable."""
path = _config().get('gmx_executable', None)
if path is not None:
path = pathlib.Path(os.path.abspath(path))
if path.is_file():
return path
raise exceptions.FeatureNotAvailableError('GROMACS installation unavailable.')
@functools.lru_cache()
def cli_bindir() -> pathlib.Path:
"""Report the installed GROMACS binary directory."""
path = _config().get('gmx_bindir', None)
if path is not None:
path = pathlib.Path(os.path.abspath(path))
if path.is_dir():
return path
raise exceptions.FeatureNotAvailableError('GROMACS installation unavailable.')
# Create an Operation that consumes a list and a boolean to produce a string and an integer.
#
# Wrap the defined function using a decorator that
# * strips the `output` parameter from the signature
# * provides `output` publishing proxy to the inner function and
# * produce a result with attributes for
# * file: mapping of output flags to output filenames
# * stdout: process STDOUT
# * stderr: porcess STDERR
# * returncode: integer return code of wrapped command
#
# Note that the existence of the 'file' output map is expressed here, but
# the keys of the map are not implicit or set by the wrapped function.
# For the map to be non-empty, it must be defined before the resulting helper
# function is called.
#
# TODO: Operation returns the output object when called with the shorter signature.
#
@gmx.function_wrapper(output={'stdout': str,
'stderr': str,
'returncode': int})
def cli(command: NDArray, shell: bool, output: OutputCollectionDescription, stdin: str = ''):
"""Execute a command line program in a subprocess.
Configure an executable in a subprocess. Executes when run in an execution
Context, as part of a work graph or via gmx.run(). Runs in the current
working directory.
Shell processing is not enabled, but can be considered for a future version.
This means that shell expansions such as environment variables, globbing (`*`),
and other special symbols (like `~` for home directory) are not available.
This allows a simpler and more robust implementation, as well as a better
ability to uniquely identify the effects of a command line operation. If you
think this disallows important use cases, please let us know.
Arguments:
command: a tuple (or list) to be the subprocess arguments, including `executable`
output: mapping of command line flags to output filename arguments
shell: unused (provides forward-compatibility)
stdin (str): String input to send to STDIN (terminal input) of the executable.
Multi-line text sent to *stdin* should be joined into a single string
(e.g. ``'\n'.join(list_of_strings) + '\n'``).
If multiple strings are provided to *stdin*, gmxapi will assume an ensemble,
and will run one operation for each provided string.
Only string input (:py:func:str) to *stdin* is currently supported.
If you have a use case that requires streaming input or binary input,
please open an issue or contact the author(s).
Arguments are iteratively added to the command line with standard Python
iteration, so you should use a tuple or list even if you have only one parameter.
I.e. If you provide a string with `arguments="asdf"` then it will be passed as
`... "a" "s" "d" "f"`. To pass a single string argument, `arguments=("asdf")`
or `arguments=["asdf"]`.
`input` and `output` should be a dictionary with string keys, where the keys
name command line "flags" or options.
Example:
Execute a command named `exe` that takes a flagged option for file name
(stored in a local Python variable `my_filename`) and an `origin` flag
that uses the next three arguments to define a vector.
>>> my_filename = "somefilename"
>>> result = cli(('exe', '--origin', 1.0, 2.0, 3.0, '-f', my_filename), shell=False)
>>> assert hasattr(result, 'file')
>>> assert hasattr(result, 'stdout')
>>> assert hasattr(result, 'stderr')
>>> assert hasattr(result, 'returncode')
Returns:
A data structure with attributes for each of the results `file`, `stdout`, `stderr`, and `returncode`
Result object attributes:
* `file`: the mapping of CLI flags to filename strings resulting from the `output` kwarg
* `stdout`: A string mapping from process STDOUT.
* `stderr`: A string mapping from process STDERR; it will be the
error output (if any) if the process failed.
* `returncode`: return code of the subprocess.
"""
# In the operation implementation, we expect the `shell` parameter to be intercepted by the
# wrapper and set to False.
if shell:
raise exceptions.UsageError("Operation does not support shell processing.")
if stdin == '':
stdin = None
if isinstance(command, (str, bytes)):
command = [command]
command = list([arg for arg in command])
executable = shutil.which(command[0])
if executable is None:
executable = shutil.which(command[0], path=str(cli_bindir()))
if executable is None:
raise exceptions.ValueError('"{}" is not found or not executable.'.format(command[0]))
command[0] = executable
# TODO: (FR9) Can OS input/output filehandles be a responsibility of
# the code providing 'resources'?
stdout = ''
stderr = ''
logger.debug('executing subprocess')
try:
completed_process = subprocess.run(command,
shell=shell,
input=stdin,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8',
universal_newlines=True
)
returncode = completed_process.returncode
# TODO: Resource management code should manage a safe data object for `output`.
logger.debug('STDOUT:')
if completed_process.stderr is not None:
for line in completed_process.stdout.split('\n'):
logger.debug(line)
else:
logger.debug('STDOUT is empty')
logger.debug('STDERR:')
if completed_process.stderr is not None:
for line in completed_process.stderr.split('\n'):
logger.debug(line)
else:
logger.debug('STDERR is empty')
stdout = completed_process.stdout
stderr = completed_process.stderr
except subprocess.CalledProcessError as e:
logger.info("commandline operation had non-zero return status"
"when calling {}".format(e.cmd))
stdout = e.stdout
stderr = e.stderr
returncode = e.returncode
# Publish outputs.
output.stdout = stdout
output.stderr = stderr
output.returncode = returncode
# TODO: (FR4) Make this a formal operation to properly handle gmxapi data dependencies.
# The consumer of this operation has an NDArray input. filemap may contain gmxapi data flow
# aspects that we want the framework to handle for us.
def filemap_to_flag_list(filemap: dict = None):
"""Convert a map of command line flags and filenames to a list of command line arguments.
Used to map inputs and outputs of command line tools to and from gmxapi data handles.
User provides mappings of flags and filenames so that gmxapi can construct an
executable command line.
Primary use case is implicit. commandline_operation() instantiates this operation based on
user input, and sends the output to cli()
Arguments:
filemap: key-value map of command line flags and filename arguments
Returns:
list of strings and/or gmxapi data references
"""
result = []
if filemap is not None:
for key, value in filemap.items():
# Note that the value may be a string, a list, an ndarray, or a future
if not isinstance(value, (list, tuple, NDArray)):
if hasattr(value, 'result') and value.dtype == NDArray:
pass
elif hasattr(value, 'result') and value.dtype != NDArray:
# TODO: Fix this ugly hack when we have proper Future slicing and can make NDArray futures.
# FIXME: This should not modify the source object.
# FIXME: Recursion protection (not idempotent): function may be repeatedly wrapped since dtype is
# not updated.
result_function = value.result
value.result = lambda function=result_function: [function()]
else:
value = [value]
result = gmx.join_arrays(front=result, back=gmx.join_arrays(front=gmx.ndarray([key]), back=value))
return result
# TODO: (FR4) Use generating function or decorator that can validate kwargs?
# TODO: (FR4) Outputs need to be fully formed and typed in the object returned
# from the helper (decorated function).
def commandline_operation(executable=None,
arguments=(),
input_files: dict = None,
output_files: dict = None,
stdin: str = None,
**kwargs):
"""Helper function to define a new operation that executes a subprocess in gmxapi data flow.
Define a new Operation for a particular executable and input/output parameter set.
Generate a chain of operations to process the named key word arguments and handle
input/output data dependencies.
Arguments:
executable: name of an executable on the path
arguments: list of positional arguments to insert at ``argv[1]``
input_files: mapping of command-line flags to input file names
output_files: mapping of command-line flags to output file names
stdin (str): String input to send to STDIN (terminal input) of the executable (optional).
Multi-line text sent to *stdin* should be joined into a single string.
E.g.::
commandline_operation(..., stdin='\\n'.join(list_of_strings) + '\\n')
If multiple strings are provided to *stdin*, gmxapi will assume an ensemble,
and will run one operation for each provided string.
Only string input (:py:func:`str`) to *stdin* is currently supported.
If you have a use case that requires streaming input or binary input,
please open an issue or contact the author(s).
Output:
The output node of the resulting operation handle contains
* ``file``: the mapping of CLI flags to filename strings resulting from the ``output_files`` kwarg
* ``stdout``: A string mapping from process STDOUT.
* ``stderr``: A string mapping from process STDERR; it will be the
error output (if any) if the process failed.
* ``returncode``: return code of the subprocess.
"""
# Implementation details: When used in a script, this function returns an
# instance of an operation. However, because of the dynamic specification of
# inputs and outputs, each invocation may have the overhead of defining new
# types to express the data flow topology, regardless of the executable.
# If this overhead is problematic, consider exposing the intermediate step
# at which the Operation is fully specified to facilitate reuse.
##
# 1. Define a new operation with outputs from `cli()` plus `file` from `output_files`
# output_files is essentially passed through, but we need assurance that results
# will not be published until the rest of the operation has run (i.e. the cli() executable.)
# Warning: decorating a local function like this is counter to the notion of Operations
# as portable (importable, serializable/deserializable). The big picture here needs
# some more consideration.
# TODO: (NOW) Distinguish portable Operations from relocatable Futures.
# There is nothing antithetical about objects implementing gmxapi data interfaces
# that are only resolvable by a certain Context as long as that Context can convey
# the results to another Context upon request. Re-instantiating Operations is
# only one way of relocating Futures. In this case, though, the dynamic creation of
# merged_ops doesn't seem right, and commandline_operation should probably be
# a proper Operation.
#
# TODO: (FR4+) Characterize the `file` dictionary key type:
# explicitly sequences rather than maybe-string/maybe-sequence-of-strings
@gmx.function_wrapper(output={'stdout': str,
'stderr': str,
'returncode': int,
'file': dict})
def merged_ops(stdout: str = None,
stderr: str = None,
returncode: int = None,
file: dict = None,
output: OutputCollectionDescription = None):
assert stdout is not None
assert stderr is not None
assert returncode is not None
assert file is not None
assert output is not None
output.returncode = returncode
output.stdout = stdout
output.stderr = stderr
if returncode == 0:
output.file = file
else:
output.file = {}
##
# 2. Prepare data flow.
if input_files is None:
input_files = {}
if output_files is None:
output_files = {}
if isinstance(arguments, (str, bytes)):
arguments = [arguments]
command = gmx.concatenate_lists([[executable],
arguments,
filemap_to_flag_list(input_files),
filemap_to_flag_list(output_files)])
shell = gmx.make_constant(False)
cli_args = {'command': command,
'shell': shell}
cli_args.update(**kwargs)
if stdin is not None:
# FIXME: No ensemble handling.
# FIXME: Type checking instead of blind conversion.
# Maybe `stdin ? isinstance(stdin, str) : '\n'.join(str(line) for line in stdin)`
cli_args['stdin'] = str(stdin)
##
# 3. Merge operations
#
# Note: Without a `label` argument, repeated calls to cli(**cli_args) should
# produce references to the same unique resource. Creating this handle
# separately should not be necessary, but we've got a way to go until we have the
# fingerprinting and Context resource management we need for that.
# TODO: ``label`` kwarg
# TODO: input fingerprinting
cli_result = cli(**cli_args)
merged_result = merged_ops(stdout=cli_result.output.stdout,
stderr=cli_result.output.stderr,
returncode=cli_result.output.returncode,
file=output_files,
**kwargs)
# Return an object with an OutputCollection granting access to outputs of
# cli() and of output_files (as "file")
return merged_result
```
#### File: src/test/conftest.py
```python
import json
import logging
import os
import pytest
pytest_plugins = ('gmxapi.testsupport',)
try:
from mpi4py import MPI
rank_number = MPI.COMM_WORLD.Get_rank()
comm_size = MPI.COMM_WORLD.Get_size()
except ImportError:
rank_number = 0
comm_size = 1
rank_tag = ''
MPI = None
else:
rank_tag = 'rank{}:'.format(rank_number)
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.rank_tag = rank_tag
return record
logging.setLogRecordFactory(record_factory)
@pytest.fixture(scope='session')
def spc_water_box_collection(gmxcli, remove_tempdir):
"""Provide a collection of simulation input items for a simple simulation.
Prepare the MD input in a freshly created working directory.
Solvate a 5nm cubic box with spc water. Return a dictionary of the artifacts produced.
"""
import gmxapi as gmx
# TODO: Remove this import when the the spc_water_box fixture is migrated to gmxapi.testsupport
from gmxapi.testsupport import _cleandir
# TODO: (#2896) Fetch MD input from package / library data.
# Example:
# import pkg_resources
# # Note: importing pkg_resources means setuptools is required for running this test.
# # Get or build TPR file from data bundled via setup(package_data=...)
# # Ref https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
# from gmx.data import tprfilename
with _cleandir(remove_tempdir) as tempdir:
testdir = os.path.dirname(__file__)
with open(os.path.join(testdir, 'testdata.json'), 'r') as fh:
testdata = json.load(fh)
# TODO: (#2756) Don't rely on so many automagical behaviors (as described in comments below)
structurefile = os.path.join(tempdir, 'structure.gro')
# We let `gmx solvate` use the default solvent. Otherwise, we would do
# gro_input = testdata['solvent_structure']
# with open(structurefile, 'w') as fh:
# fh.write('\n'.join(gro_input))
# fh.write('\n')
topfile = os.path.join(tempdir, 'topology.top')
top_input = testdata['solvent_topology']
# `gmx solvate` will append a line to the provided file with the molecule count,
# so we strip the last line from the input topology.
with open(topfile, 'w') as fh:
fh.write('\n'.join(top_input[:-1]))
fh.write('\n')
assert os.path.exists(topfile)
solvate = gmx.commandline_operation(gmxcli,
arguments=['solvate', '-box', '5', '5', '5'],
# We use the default solvent instead of specifying one.
# input_files={'-cs': structurefile},
output_files={'-p': topfile,
'-o': structurefile,
}
)
assert os.path.exists(topfile)
if solvate.output.returncode.result() != 0:
logging.debug(solvate.output.stderr.result())
raise RuntimeError('solvate failed in spc_water_box testing fixture.')
# Choose an exactly representable dt of 2^-9 ps (approximately 0.002)
dt = 2. ** -9.
mdp_input = [('integrator', 'md'),
('dt', dt),
('cutoff-scheme', 'Verlet'),
('nsteps', 2),
('nstxout', 1),
('nstvout', 1),
('nstfout', 1),
('tcoupl', 'v-rescale'),
('tc-grps', 'System'),
('tau-t', 1),
('ref-t', 298)]
mdp_input = '\n'.join([' = '.join([str(item) for item in kvpair]) for kvpair in mdp_input])
mdpfile = os.path.join(tempdir, 'md.mdp')
with open(mdpfile, 'w') as fh:
fh.write(mdp_input)
fh.write('\n')
tprfile = os.path.join(tempdir, 'topol.tpr')
# We don't use mdout_mdp, but if we don't specify it to grompp,
# it will be created in the current working directory.
mdout_mdp = os.path.join(tempdir, 'mdout.mdp')
grompp = gmx.commandline_operation(gmxcli, 'grompp',
input_files={
'-f': mdpfile,
'-p': solvate.output.file['-p'],
'-c': solvate.output.file['-o'],
'-po': mdout_mdp,
},
output_files={'-o': tprfile})
tprfilename = grompp.output.file['-o'].result()
if grompp.output.returncode.result() != 0:
logging.debug(grompp.output.stderr.result())
raise RuntimeError('grompp failed in spc_water_box testing fixture.')
# TODO: more inspection of grompp errors...
assert os.path.exists(tprfilename)
collection = {
'tpr_filename': tprfilename,
'mdp_input_filename': mdpfile,
'mdp_output_filename': mdout_mdp,
'topology_filename': solvate.output.file['-p'].result(),
'gro_filename': solvate.output.file['-o'].result(),
'mdp_input_list': mdp_input
}
yield collection
@pytest.fixture(scope='session')
def spc_water_box(spc_water_box_collection):
"""Provide a TPR input file for a simple simulation."""
yield spc_water_box_collection['tpr_filename']
``` |
{
"source": "JLMadsen/AdventOfCode",
"score": 3
} |
#### File: AdventOfCode/2021/day03.py
```python
def nth(arr, n):
return [*zip(*arr)][n]
def criteria(arr, high):
if (ones := arr.count("1")) == (zero := arr.count("0")):
return "1" if high else "0"
return str((ones > zero) * 1) if high else str((ones < zero) * 1)
if __name__ == "__main__":
with open('input/day03.txt') as f:
content = f.read().split('\n')[:-1]
gamma = int("".join([str(max(set(n), key=n.count)) for i in range(len(content[0])) if (n := nth(content, i))]),2)
eps = int("".join([str(min(set(n), key=n.count)) for i in range(len(content[0])) if (n := nth(content, i))]),2)
print(eps * gamma) # 3429254
oxy = ""
arr = [*content]
for i in range(len(content[0])):
if len(arr) == 1:
break
arr = [num for num in arr if num[i] == criteria(nth(arr, i), True)]
oxy = arr[0]
c02 = ""
arr = [*content]
for i in range(len(content[0])):
if len(arr) == 1:
break
arr = [num for num in arr if num[i] == criteria(nth(arr, i), False)]
c02 = arr[0]
c02, oxy = int(c02, 2), int(oxy, 2)
print(c02 * oxy) # 5410338
```
#### File: AdventOfCode/2021/day07.py
```python
def calc_fuel(crabs, nthTriangleNumber = False ):
fuel = float('inf')
for pos in range(min(crabs), max(crabs) + 1):
temp_fuel = 0
for crab in crabs:
diff = abs(pos - crab)
temp_fuel += diff if not nthTriangleNumber else diff * (1 + diff) // 2
if temp_fuel < fuel:
fuel = temp_fuel
return fuel
if __name__ == "__main__":
with open('input/day07.txt') as f:
content = f.read().split('\n')[:-1]
crabs = [*map(int, content[0].split(','))]
print(calc_fuel(crabs)) # 349812
print(calc_fuel(crabs, True)) # 99763899
```
#### File: AdventOfCode/2021/day14.py
```python
from collections import Counter
from collections import defaultdict
def process(template, insertions, steps):
counter = Counter([a + b for a, b in zip(template, template[1:])])
for _ in range(steps):
new_counter = Counter()
for pair in counter:
if pair in insertions.keys():
element = insertions[pair]
new_counter[pair[0] + element] += counter[pair]
new_counter[element + pair[1]] += counter[pair]
counter = new_counter
chars = defaultdict(lambda: 0)
for pair in counter:
chars[pair[0]] += counter[pair]
chars[ template[-1] ] += 1
print( max(chars.values()) - min(chars.values()) )
if __name__ == "__main__":
with open('input/day14.txt') as f:
template, lines = content = f.read().strip().split('\n\n')
insertions = dict(line.split(' -> ') for line in lines.split('\n'))
process(template, insertions.copy(), 10) # 3009
process(template, insertions.copy(), 40) # 3459822539451
```
#### File: AdventOfCode/2021/day20.py
```python
from collections import defaultdict
def adjacent(y, x):
for dy in [-1, 0, 1]:
for dx in [-1, 0, 1]:
yield y+dy, x+dx
def enhance(image, enhancement):
new_image = defaultdict(lambda: 0)
for cell, value in image.items():
for y1, x1, in adjacent(*cell):
if (y1, x1) in new_image:
continue
bin_str = ''
for y2, x2 in adjacent(y1, x1):
if (y2, x2) in image :
bin_str += str(image[(y2, x2)])
else:
bin_str += '0'
new_image[(y1, x1)] = 1 if enhancement[ int(bin_str, 2) ] == '#' else 0
return new_image
def print_image(img):
print('print image')
for i in range(-5,40):
for j in range(-5, 40):
print('█' if img[(i,j)] == 1 else ' ', end="")
print()
test = """..#.#..#####.#.#.#.###.##.....###.##.#..###.####..#####..#....#..#..##..###..######.###...####..#..#####..##..#.#####...##.#.#..#.##..#.#......#.###.######.###.####...#.##.##..#..#..#####.....#.#....###..#.##......#.....#..#..#..##..#...##.######.####.####.#.#...#.......#..#.#.#...####.##.#......#..#...##.#.##..#...##.#.##..###.#......#.#.......#.#.#.####.###.##...#.....####.#..#..#.##.#....##..#.####....##...##..#...#......#.#.......#.......##..####..#...#.#.#...##..#.#..###..#####........#..####......#..#
#..#.
#....
##..#
..#..
..###"""
if __name__ == "__main__":
with open('input/day20.txt') as f:
content = f.read().split('\n')[:-1]
#content = test.split('\n')
enhancement = content[0]
image = defaultdict(lambda: 0)
for y, line in enumerate(content[2:]):
for x, value in enumerate(line):
image[(y, x)] = 1 if value == '#' else 0
for step in range(2):
print_image(image)
image = enhance(image, enhancement)
#print_image(image)
print(sum(image.values())) # 5658 < x < 5980
# x != 5933
``` |
{
"source": "JLMadsen/NTNU",
"score": 3
} |
#### File: Kjemi/assignment/Element.py
```python
class Element:
columnLength = 12
def __init__(self, atomic_number_, short_name_, full_name_, empirical_, calculated_, density_, atoms_per_volume_, atomic_mass_):
# from datasett
self.atomic_number = atomic_number_
self.short_name = short_name_
self.full_name = full_name_
self.empirical = float(empirical_)
self.calculated = float(calculated_)
self.density = float(density_)
self.atoms_per_volume = float(atoms_per_volume_) # number of atoms per volume unit
self.atomic_mass = float(atomic_mass_)
# calculated
self.calcDensity = 0.0
self.densityCalcDifference = 0.0
self.empDensity = 0.0
self.densityEmpDifference = 0.0
self.distance = 0.0
self.zetta = 0.0
def setZetta(self, zetta_):
self.zetta = zetta_
def setDistance(self, distance_):
self.distance = distance_
def setDensity(self, calcDensity_, empDensity_):
self.calcDensity = calcDensity_
self.empDensity = empDensity_
self.densityCalcDifference = abs(self.density - self.calcDensity)
self.densityEmpDifference = abs(self.density - self.empDensity)
def getItems(self):
precision = 4
return [self.atomic_number,
self.full_name,
self.short_name,
self.atomic_mass,
self.density,
round(self.calcDensity,precision),
round(self.densityCalcDifference,precision),
round(self.empDensity,precision),
round(self.densityEmpDifference,precision),
round(self.distance,8),
round(self.zetta,precision)]
if __name__ == '__main__':
import os
os.system('python Assignment.py')
``` |
{
"source": "jlmadurga/django-oscar-telegram-bot",
"score": 2
} |
#### File: oscar_telegrambot/templatetags/telegrambot_tags.py
```python
from django import template
from oscar.core.loading import get_class
register = template.Library()
Selector = get_class('partner.strategy', 'Selector')
@register.assignment_tag
def purchase_info_for_product(product):
# TODO: while request is not used in context
strategy = Selector().strategy()
if product.is_parent:
return strategy.fetch_for_parent(product)
return strategy.fetch_for_product(product)
```
#### File: django-oscar-telegram-bot/tests/test_commands.py
```python
from telegrambot.test import testcases
from telegrambot.models import AuthToken, Chat
from django.core.urlresolvers import reverse
from oscar.core.compat import get_user_model
try:
from unittest import mock
except ImportError:
import mock # noqa
User = get_user_model()
class TestSimpleCommands(testcases.BaseTestBot):
start = {'in': '/start',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': "Wellcome to Oscar"}
}
help = {'in': '/help',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': 'You can control'}
}
unknown = {'in': '/unknown_command',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': 'Unknown command. Use /help'}
}
def test_start(self):
self._test_message_ok(self.start)
def test_help(self):
self._test_message_ok(self.help)
def test_unknonw(self):
self._test_message_ok(self.unknown)
class TestOscarCommands(testcases.BaseTestBot):
fixtures = ['tests/fixtures/catalogue.json', 'tests/fixtures/partner.json', 'tests/fixtures/users.json',
'tests/fixtures/orders.json', 'tests/fixtures/baskets.json', 'tests/fixtures/addresses.json']
categories_list = {'in': '/categories',
'out': {'parse_mode': 'Markdown',
'reply_markup': '/categories books',
'text': 'Books'}
}
categories_detail = {'in': '/categories computers-in-literature',
'out': {'parse_mode': 'Markdown',
'reply_markup': '/products tron',
'text': 'Tron'}
}
products_list = {'in': '/products',
'out': {'parse_mode': 'Markdown',
'reply_markup': '/categories books',
'text': 'select a category'}
}
products_detail = {'in': '/products tron',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': 'Tron'}
}
orders_list_not_authed = {'in': '/orders',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': "You need an *authenticated chat*" +
" to perform this action please login" +
" [here](https://example.com/telegrambot/auth/"
}
}
orders_list_authed = {'in': '/orders',
'out': {'parse_mode': 'Markdown',
'reply_markup': '/orders 100001',
'text': "This is the list of the orders:\nOrder 100001- "}
}
orders_detail_not_authed = {'in': '/orders 100001',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': "You need an *authenticated chat*" +
" to perform this action please login" +
" [here](https://example.com/telegrambot/auth/"
}
}
orders_detail_authed = {'in': '/orders 100001',
'out': {'parse_mode': 'Markdown',
'reply_markup': '',
'text': 'Order *100001*'}
}
def test_categories_list(self):
self._test_message_ok(self.categories_list)
def test_categories_detail(self):
self._test_message_ok(self.categories_detail)
def test_products_list(self):
self._test_message_ok(self.products_list)
def test_products_detail(self):
self._test_message_ok(self.products_detail)
def test_orders_list_not_authed(self):
self._test_message_ok(self.orders_list_not_authed)
def test_orders_list_authed(self):
login = self.client.login(email="<EMAIL>", password=u'<PASSWORD>')
user = User.objects.get(email="<EMAIL>")
token = AuthToken.objects.create(user=user)
chat = Chat.objects.create(**self.update.message.chat.to_dict())
token.chat_api = chat
token.save()
self._test_message_ok(self.orders_list_authed)
def test_orders_list_detail_not_authed(self):
self._test_message_ok(self.orders_detail_not_authed)
def test_orders_detail_authed(self):
login = self.client.login(email="<EMAIL>", password=u'<PASSWORD>')
user = User.objects.get(email="<EMAIL>")
token = AuthToken.objects.create(user=user)
chat = Chat.objects.create(**self.update.message.chat.to_dict())
token.chat_api = chat
token.save()
self._test_message_ok(self.orders_detail_authed)
``` |
{
"source": "jlmadurga/listenclosely",
"score": 2
} |
#### File: listenclosely/listenclosely/app.py
```python
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
import logging
from listenclosely.models import Chat, Agent, Asker, NoAgentFound
from listenclosely.services.exceptions import AuthenticationError, ConnectionError
logger = logging.getLogger(__name__)
class ListenCloselyApp(object):
def __init__(self, message_service_backend_class, agent_strategy_class, time_obsolete_offset):
self.service_backend = message_service_backend_class(self)
self.time_obsolete_offset = time_obsolete_offset
self.strategy = agent_strategy_class()
self.listening = False
def listen(self):
"""
Listen/Connect to message service loop to start receiving messages.
Do not include in constructor, in this way it can be included in tasks
"""
self.listening = True
try:
self.service_backend.listen()
except AuthenticationError:
self.listening = False
raise
else:
self.listening = False
def disconnect(self):
"""
Disconnect and not listen anymore
"""
try:
self.service_backend.disconnect()
except ConnectionError:
raise
else:
self.listening = False
def attend_pendings(self):
"""
Check all chats created with no agent assigned yet.
Schedule a timer timeout to call it.
"""
chats_attended = []
pending_chats = Chat.pending.all()
for pending_chat in pending_chats:
free_agent = self.strategy.free_agent()
if free_agent:
pending_chat.attend_pending(free_agent, self)
pending_chat.save()
chats_attended.append(pending_chat)
else:
break
return chats_attended
def terminate_obsolete(self):
"""
Check chats can be considered as obsolete to terminate them
"""
chats_terminated = []
live_chats = Chat.live.all()
for live_chat in live_chats:
if live_chat.is_obsolete(self.time_obsolete_offset):
live_chat.terminate()
live_chat.save()
chats_terminated.append(live_chat)
return chats_terminated
def send_message(self, id_service, content):
message_id = self.service_backend.send_message(id_service, content)
return message_id
def _new_chat_processing(self, message_id_service, contact_id_service, content):
try:
Agent.objects.get(id_service=contact_id_service)
except ObjectDoesNotExist:
asker, _ = Asker.objects.get_or_create(id_service=contact_id_service)
chat = Chat(asker=asker)
chat.save()
try:
chat.handle_message(message_id_service, contact_id_service, content, self)
chat.save()
except NoAgentFound:
# No agent to attend chat
# TODO: automessage to inform asker to wait
logging.info("Chat %s from %s not attended. No free agents" % (chat.id, asker.id_service))
else:
# Agent not attending any chat
# TODO: automessage to warning agent
logging.warning("Message %s from %s with no chat ignored" % (message_id_service,
contact_id_service))
def on_message(self, message_id_service, contact_id_service, content):
"""
To use as callback in message service backend
"""
try:
live_chat = Chat.live.get(
Q(agent__id_service=contact_id_service) | Q(asker__id_service=contact_id_service))
except ObjectDoesNotExist:
self._new_chat_processing(message_id_service, contact_id_service, content)
else:
live_chat.handle_message(message_id_service, contact_id_service, content, self)
```
#### File: listenclosely/listenclosely/celery.py
```python
from __future__ import absolute_import
from celery import bootsteps
import logging
from listenclosely.app import ListenCloselyApp
from listenclosely import conf
from django.utils.module_loading import import_string
logger = logging.getLogger(__name__)
class ListenCloselyAppStep(bootsteps.StartStopStep):
def __init__(self, worker, **kwargs):
"""
:param worker: celery worker
"""
worker.app.listenclosely_app = ListenCloselyApp(import_string(conf.LISTENCLOSELY_MESSAGE_SERVICE_BACKEND),
import_string(conf.LISTENCLOSELY_AGENT_STRATEGY),
int(conf.LISTENCLOSELY_QUERY_TIME_OBSOLETE))
logger.info("Listenclosely App initialized")
def stop(self, worker):
logger.info("Stopping Listenclosely App")
if worker.app.listenclosely_app.listening:
worker.app.listenclosely_app.disconnect()
logger.info("Disconnect Listenclosely App")
```
#### File: listenclosely/listenclosely/models.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_fsm import FSMField, transition
from django.utils.encoding import python_2_unicode_compatible
from listenclosely import managers
from django.utils.timezone import now
import datetime
class NoAgentFound(Exception):
"""
Raised when strategy can not find agent to attend chat
"""
class AbstractContact(models.Model):
id_service = models.CharField(_("Id Service"), unique=True, db_index=True, max_length=128)
created = models.DateTimeField(_("Date created"), auto_now_add=True)
class Meta:
abstract = True
verbose_name = _("Contact")
verbose_name_plural = _("Contacts")
@python_2_unicode_compatible
class Asker(AbstractContact):
"""
Customer, client, ... the who ask a question and starts a chat
"""
class Meta:
verbose_name = _("Asker")
verbose_name_plural = _("Askers")
def __str__(self):
return _(u"Asker(id_service: %(id_service)s") % {'id_service': self.id_service}
@python_2_unicode_compatible
class Agent(AbstractContact):
"""
One who answer chat
"""
OFFLINE, ONLINE, BUSY = "Offline", "Online", "Busy"
STATE_CHOICES = (
(OFFLINE, _("Offline")),
(ONLINE, _("Online")),
(BUSY, _("Busy")))
state = FSMField(default=OFFLINE, choices=STATE_CHOICES)
class Meta:
verbose_name = _("Agent")
verbose_name_plural = _("Agents")
objects = models.Manager()
offline = managers.OfflineAgentManager()
online = managers.OnlineAgentManager()
busy = managers.BusyAgentManager()
def __str__(self):
return _(u"Agent(id_service: %(id_service)s, state:%(state)s") % {'id_service': self.id_service,
'state': self.state}
@transition(field=state, source=OFFLINE, target=ONLINE)
def register(self):
"""
Agent is registered into the system so now is online to answers
"""
@transition(field=state, source=ONLINE, target=OFFLINE)
def unregister(self):
"""
Agent is not online anymore
"""
@transition(field=state, source=ONLINE, target=BUSY)
def attend(self, chat):
"""
Agent is assigned to a chat so it is busy answering
"""
chat.agent = self
@transition(field=state, source=BUSY, target=ONLINE)
def release(self, chat):
"""
Agent finishes chat
"""
@python_2_unicode_compatible
class Chat(models.Model):
asker = models.ForeignKey(Asker, verbose_name=_("Asker"), related_name="chats")
agent = models.ForeignKey(Agent, null=True, blank=True, verbose_name=_("Agent"), related_name="chats")
created = models.DateTimeField(_("Date created"), auto_now_add=True)
last_modified = models.DateTimeField(_("Last modified"), auto_now=True)
PENDING, LIVE, TERMINATED = "Pending", "Live", "Terminated"
STATE_CHOICES = (
(PENDING, _("Pending")),
(LIVE, _("Live")),
(TERMINATED, _("Terminated")))
state = FSMField(default=PENDING, choices=STATE_CHOICES)
class Meta:
verbose_name = _("Chat")
verbose_name_plural = _("Chats")
objects = models.Manager()
pending = managers.PendingChatsManager()
live = managers.LiveChatsManager()
terminated = managers.TerminatedChatsManager()
def __str__(self):
return _(u"Chat(asker: %(asker)s, agent: %(agent)s, state: %(state)s") % {'asker': self.asker,
'agent': self.agent,
'state': self.state}
@transition(field=state, source=[PENDING, LIVE], target=LIVE)
def handle_message(self, message_id_service, contact_id_service, content, listenclosely_app):
message = Message(id_service_in=message_id_service,
chat=self,
content=content,
type=Message.INCOMING if contact_id_service == self.asker.id_service else Message.OUTGOING)
if not self.agent:
# get free online agents
free_agent = listenclosely_app.strategy.free_agent()
if free_agent:
free_agent.attend(self)
free_agent.save()
else:
message.save()
# TODO: raise especific exception when no free agent to attend. Send auto message
raise NoAgentFound("No agent to attend %s created by %s" % (self.id, contact_id_service))
sent_id = listenclosely_app.service_backend.send_message(message.chat.agent.id_service if message.incoming()
else message.chat.asker.id_service,
content)
if sent_id:
message.id_service_out = sent_id
message.t_sent = now()
message.save()
@transition(field=state, source=PENDING, target=LIVE)
def attend_pending(self, agent, listenclosely_app):
agent.attend(self)
agent.save()
for message in self.messages.all():
sent = listenclosely_app.service_backend.send_message(message.chat.agent.id_service if message.incoming()
else message.chat.asker.id_service,
message.content)
if sent:
message.t_sent = now()
message.save()
@transition(field=state, source=LIVE, target=TERMINATED)
def terminate(self):
"""
Chat is finished and Agent is free
"""
self.agent.release(self)
self.agent.save()
def is_obsolete(self, time_offset):
"""
Check if chat is obsolete
"""
return now() > datetime.timedelta(seconds=time_offset) + self.last_modified
@python_2_unicode_compatible
class Message(models.Model):
id_service_in = models.CharField(_("Id Service In"), unique=True, db_index=True, max_length=128)
id_service_out = models.CharField(_("Id service Out"), null=True, blank=True, max_length=128)
chat = models.ForeignKey(Chat, verbose_name=_("Chat"), related_name="messages")
created = models.DateTimeField(_("Date created"), auto_now_add=True)
t_sent = models.DateTimeField(_("Date sent"), null=True, blank=True)
content = models.TextField(_("Content"))
INCOMING, OUTGOING = "Incoming", "Outgoing"
TYPE_CHOICES = ((INCOMING, _("Incoming")),
(OUTGOING, _("Outgoing")),
)
type = models.CharField(_("Type"), max_length=128, default=INCOMING, choices=TYPE_CHOICES)
class Meta:
verbose_name = _("Message")
verbose_name_plural = _("Messages")
def incoming(self):
return self.type == self.INCOMING
def outgoing(self):
return self.type == self.OUTGOING
def __str__(self):
return _(u"Chat(id_service: %(id_service)s, chat: %(chat)s") % {'id_service': self.id_service_in,
'chat': self.chat}
```
#### File: listenclosely/services/dummy.py
```python
from listenclosely.services.base import BaseMessageServiceBackend
import random
import string
class DummyMessageService(BaseMessageServiceBackend):
def __init__(self, caller, *args, **kwargs):
super(DummyMessageService, self).__init__(caller, *args, **kwargs)
self.incoming_messages = []
self.outgoing_messages = []
def listen(self):
pass
def _message_id(self):
return ''.join(random.choice(string.ascii_lowercase) for i in range(10))
def send_message(self, id_service, content):
msg_id = self._message_id()
self.outgoing_messages.append((msg_id, id_service, content))
return msg_id
def on_message(self, id_service, content):
self.caller.on_message(id_service, content)
self.incoming_messages.append((id_service, content))
def disconnect(self):
pass
```
#### File: listenclosely/strategies/first_free.py
```python
from listenclosely.strategies.base import BaseAgentStrategy
from listenclosely.models import Agent
class FirstFreeAgentStrategy(BaseAgentStrategy):
"""
Choose first free agent
"""
def free_agent(self):
free_agents = Agent.online.all()
if free_agents:
return free_agents[0]
return None
```
#### File: listenclosely/listenclosely/tasks.py
```python
from __future__ import absolute_import
from celery import Task, shared_task
from functools import wraps
def listening_required(f):
@wraps(f)
def decorated_function(self, *args, **kwargs):
if not self.facade.listening:
listen.apply_async(queue=self.request.delivery_info['routing_key'])
return self.retry()
else:
return f(self, *args, **kwargs)
return decorated_function
class ListenCloselyTask(Task):
abstract = True
default_retry_delay = 0.5
@property
def facade(self):
return self.app.listenclosely_app
@shared_task(base=ListenCloselyTask, bind=True, ignore_result=True)
def listen(self):
if not self.facade.listening:
return self.facade.listen()
else:
return "Already listening"
@shared_task(base=ListenCloselyTask, bind=True)
@listening_required
def disconnect(self):
return self.facade.disconnect()
@shared_task(base=ListenCloselyTask, bind=True)
@listening_required
def send_message(self, number, content):
self.facade.send_message(number, content)
return True
@shared_task(base=ListenCloselyTask, bind=True)
@listening_required
def attend_pendings(self):
self.facade.attend_pendings()
@shared_task(base=ListenCloselyTask, bind=True)
@listening_required
def terminate_obsolete(self):
self.facade.terminate_obsolete()
``` |
{
"source": "jlmadurga/permabots",
"score": 2
} |
#### File: permabots/models/messenger_api.py
```python
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from permabots.models.base import PermabotsModel
@python_2_unicode_compatible
class MessengerMessage(PermabotsModel):
bot = models.ForeignKey('MessengerBot', related_name='messages', verbose_name=_("Messenger Bot"))
sender = models.CharField(_("Sender Id"), max_length=255)
recipient = models.CharField(_("Recipient Id"), max_length=255)
timestamp = models.DateTimeField(_('Timestamp'))
MESSAGE, POSTBACK, DELIVERY = 'message', 'postback', 'delivery'
TYPE_CHOICES = (
(MESSAGE, _('Message')),
(POSTBACK, _('Postback')),
(DELIVERY, _('Delivery')),
)
type = models.CharField(max_length=255, choices=TYPE_CHOICES)
postback = models.CharField(_("PostBack"), null=True, blank=True, max_length=255)
text = models.TextField(null=True, blank=True, verbose_name=_("Text"))
class Meta:
verbose_name = 'Messenger Message'
verbose_name_plural = 'Messenger Messages'
ordering = ['-timestamp', ]
@property
def is_message(self):
return self.type == self.MESSAGE
@property
def is_postback(self):
return self.type == self.POSTBACK
@property
def is_delivery(self):
return self.type == self.DELIVERY
@property
def data(self):
if self.is_message:
return self.text
elif self.is_postback:
return self.postback
else:
return None
def __str__(self):
return "(%s,%s:%s)" % (self.id, self.type, self.data)
def to_dict(self):
message_dict = {'sender': self.sender,
'recipient': self.recipient,
'timestamp': self.timestamp,
}
if self.is_message:
message_dict.update({'text': self.text})
elif self.is_postback:
message_dict.update({'postback': self.postback})
return message_dict
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.