input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>jenka13all/Master-Thesis-Multilingual-Longformer
#!/usr/bin/env python
import argparse
import copy
import datetime
from dataclasses import dataclass, field
import functools
import logging
import math
import os
import pickle
import re
import sys
import time
import threading
from typing import Optional
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.tensorboard import SummaryWriter
import tqdm
from transformers import logging as hf_logging
from transformers.modeling_longformer import LongformerSelfAttention
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
AutoModelForMaskedLM,
RobertaForMaskedLM,
XLMRobertaForMaskedLM,
AutoTokenizer,
)
from transformers import (
HfArgumentParser,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
set_seed,
)
class color:
"""Help print colors to terminal."""
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def is_roberta_based_model(model_name: str) -> str:
"""Validate if the model to pre-train is of roberta architecture."""
r = re.compile('(.*)roberta(.*)')
matches = r.findall(model_name)
base_name = 'none'
if len(matches) > 0:
base_name = '-'.join(model_name.split('-')[:-1])
return base_name
##########################################
#
# Arguments
#
##########################################
"""Helper function: Define argparser and args."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name to save the model as.",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
help="The output directory for the trained model.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help="Model type selected in the list from Huggingface ex:"
" `bert, roberta, xlm-roberta, ...`",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model from huggingface.co/models. "
"Only tested on `xlm-roberta-base` and `roberta-base`.",
)
parser.add_argument(
"--logging_dir",
default=None,
type=str,
help="Where logs are stored.",
)
parser.add_argument(
"--model_max_length",
default=4096,
type=int,
choices=[
512,
1024,
2048,
4096,
8192,
16384,
32768,
65536,
131072,
262144,
524288,
1048576,
],
help="The maxiumum position of the model",
)
parser.add_argument(
"--attention_window",
default=512,
type=int,
help="Size of attention window",
)
parser.add_argument(
"--evaluation_strategy",
default="no",
type=str,
help="How evaluation should be logged, 'steps', 'epochs', 'no'.",
)
parser.add_argument(
"--do_train",
action="store_true",
help="Whether to run training."
)
parser.add_argument(
"--do_eval",
action="store_true",
help="Whether to run eval on the dev set."
)
parser.add_argument(
"--per_device_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_device_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of gradient updates to perform before updating the weights",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. "
"Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, log all information when loading datasets.",
)
parser.add_argument(
"--cache_dir",
default=None,
help="Where do you want to store the pretrained models.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models "
"(see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name"
"ending and ending with step number",
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization"
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex)",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in"
"['O0', 'O1', 'O2', and 'O3'].",
)
parser.add_argument(
"--train_file_path",
type=str,
default="/workspace/data/wikitext-103/wiki.train.raw",
help="File path to language model training file",
)
parser.add_argument(
"--val_file_path",
type=str,
default="/workspace/data/wikitext-103/wiki.valid.raw",
help="File path to language model validation file",
)
parser.add_argument(
"--eval_steps",
type=int,
default=None,
help="Number of evaluation steps",
)
parser.add_argument(
"--prediction_loss_only",
action="store_true",
help="Prediction loss only",
)
args = parser.parse_args()
hf_logging.enable_default_handler()
hf_logging.set_verbosity_info()
hf_logging.enable_explicit_format()
tb_writer = SummaryWriter(log_dir=args.logging_dir)
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
fh = logging.FileHandler(f"{args.logging_dir}.log")
sh = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"[%(asctime)s], %(levelname)s %(message)s",
datefmt="%a, %d %b %Y %H:%M:%S",
)
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info("\n --> Starting logger:\n" + "=" * 55 + "\n")
logger.warning(
f"Process rank: {args.local_rank}, \
distributed training: {bool(args.local_rank != -1)}, \
16-bits training: {args.fp16}"
)
##########################################
#
# Replace Huggingface - TextDataset
#
##########################################
# https://github.com/tqdm/tqdm/issues/458
def provide_progress_bar(
function, estimated_time, tstep=0.2, tqdm_kwargs={}, args=[], kwargs={}
):
ret = [None] # Mutable var so the function can store its return value
def myrunner(function, ret, *args, **kwargs):
ret[0] = function(*args, **kwargs)
thread = threading.Thread(
target=myrunner, args=(function, ret) + tuple(args), kwargs=kwargs
)
pbar = tqdm.tqdm(total=estimated_time, **tqdm_kwargs)
thread.start()
while thread.is_alive():
thread.join(timeout=tstep)
pbar.update(tstep)
pbar.close()
return ret[0]
def progress_wrapped(estimated_time, tstep=0.2, tqdm_kwargs={}):
def real_decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
return provide_progress_bar(
function,
estimated_time=estimated_time,
tstep=tstep,
tqdm_kwargs=tqdm_kwargs,
args=args,
kwargs=kwargs,
)
return wrapper
return real_decorator
class TextDataset(Dataset):
# Ugly HACK on older transformers
# Use same code as Huggingface TextDataset
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
cache_dir: Optional[str] = None,
):
assert os.path.isfile(
file_path), f"Input file path {file_path} not found"
block_size = block_size - \
tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else directory,
"cached_lm_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
@progress_wrapped(estimated_time=200)
def tokenize_text(text):
return tokenizer.tokenize(text)
@progress_wrapped(estimated_time=300)
def convert_tokens_to_ids(tokenized_text):
return tokenizer.convert_tokens_to_ids(tokenized_text)
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]",
time.time() - start,
)
else:
logger.info(
f"Creating features from dataset file at {directory}\n\n")
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
# For large texts and models, this could take a long time
# Done i two steps, since each part can take between 5-10 min
start = time.time()
text = tokenize_text(text)
logger.info("Tokenizing text [took %.3f s]", time.time() - start)
start = time.time()
tokenized_text = convert_tokens_to_ids(text)
logger.info(
"Converting text to id [took %.3f s]\n", time.time() - start)
start = time.time()
for i in range(
0, len(tokenized_text) - block_size + 1, block_size
): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(
tokenized_text[i: i + block_size]
)
)
logger.info(
"Build tokenizer inputs by block_size length [took %.3f s]",
time.time() - start,
)
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle,
protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]",
cached_features_file,
time.time() - start,
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
###########################################################
#
# Longformer conversion
#
###########################################################
# TODO: Huggingface transformers v. >3.5.1 breaks this
class LongModelSelfAttention(LongformerSelfAttention):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
print()
return super().forward(
hidden_states,
attention_mask=attention_mask,
)
# Load initial model
MODEL: PreTrainedModel
if is_roberta_based_model(args.model_name_or_path) == "xlm-roberta":
MODEL = XLMRobertaForMaskedLM
elif is_roberta_based_model(args.model_name_or_path) == "roberta":
MODEL = RobertaForMaskedLM
else:
raise NotImplementedError("Currently only supports roberta-based architectures.")
class LongModelForMaskedLM(MODEL):
def __init__(self, config):
super().__init__(config)
#print(f"\n{color.YELLOW}Converting models to Longformer is currently only tested for RoBERTa like architectures.{color.END}")
for i, layer in enumerate(self.roberta.encoder.layer):
layer.attention.self = LongModelSelfAttention(config, layer_id=i)
def create_long_model(
save_model_to,
model,
tokenizer,
attention_window,
model_max_length
):
config = model.config
position_embeddings = model.roberta.embeddings.position_embeddings
tokenizer.model_max_length = model_max_length
tokenizer.init_kwargs['model_max_length'] = model_max_length
current_model_max_length, embed_size = position_embeddings.weight.shape
# NOTE: RoBERTa has positions 0,1 reserved
# embedding size is max position + 2
model_max_length += 2
config.max_position_embeddings = model_max_length
assert model_max_length > current_model_max_length, \
"New model max_length must be longer than current max_length"
# BUG for XLM: Need to make all zeros since too large base model
new_pos_embed = position_embeddings.weight.new_zeros(
model_max_length, embed_size
)
k = 2
step = current_model_max_length - 2
while k < model_max_length - 1:
new_pos_embed[k:(
k + step)] = position_embeddings.weight[2:]
k += step
# HACK for Huggingface transformers >=3.4.0 and < 4.0
# https://github.com/huggingface/transformers/issues/6465#issuecomment-719042969
position_embeddings.weight.data = new_pos_embed
model.roberta.embeddings.position_embeddings.num_embeddings = len(
new_pos_embed.data
)
num_model_embeddings = position_embeddings.num_embeddings
model.roberta.embeddings.position_ids = torch.arange(
0, num_model_embeddings
)[None]
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.roberta.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
#allenai
longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)
#longformer_self_attn.query_global = layer.attention.self.query
#longformer_self_attn.key_global = layer.attention.self.key
#longformer_self_attn.value_global = layer.attention.self.value
layer.attention.self = longformer_self_attn
logger.info(f'saving model to {save_model_to}')
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
return model, tokenizer
#allenai
def copy_proj_layers(model):
for i, layer in enumerate(model.roberta.encoder.layer):
layer.attention.self.query_global = copy.deepcopy(layer.attention.self.query)
layer.attention.self.key_global = copy.deepcopy(layer.attention.self.key)
layer.attention.self.value_global = copy.deepcopy(layer.attention.self.value)
return model
#def copy_proj_layers(model):
# for _, layer in enumerate(model.roberta.encoder.layer):
# layer.attention.self.query_global = layer.attention.self.query
# layer.attention.self.key_global = layer.attention.self.key
# layer.attention.self.value_global = layer.attention.self.value
# return model
def pretrain_and_evaluate(
training_args, data_args, model, tokenizer, eval_only, model_path
):
val_dataset = TextDataset(
tokenizer=tokenizer,
file_path=data_args.val_file_path,
block_size=tokenizer.max_len,
)
if eval_only:
train_dataset = val_dataset
else:
logger.info(
f"Loading and tokenizing training data is usually slow: {data_args.train_file_path}"
)
train_dataset = TextDataset(
tokenizer=tokenizer,
file_path=data_args.train_file_path,
block_size=tokenizer.max_len,
)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=0.15
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=val_dataset,
#deprecated as a keyword argument: move into args.prediction_los_only
#prediction_loss_only=True,
)
eval_loss = trainer.evaluate()
eval_loss = eval_loss["eval_loss"]
print(f"Initial eval bpc: {color.GREEN}{eval_loss/math.log(2)}{color.END}")
logger.info(f"Initial eval bpc: {eval_loss/math.log(2)}")
if not eval_only:
trainer.train(model_path=model_path)
trainer.save_model()
| |
self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert len(resp.json["jobs"]) == 2
for job in resp.json["jobs"]:
base_uri = sd.jobs_service.path + "/{}".format(job)
path = get_path_kvp(base_uri)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
interval = datetime_after.replace(DATETIME_INTERVAL_OPEN_END_SYMBOL, "")
assert date_parser.parse(resp.json["created"]) >= date_parser.parse(interval)
def test_get_jobs_datetime_interval(self):
"""
Test that only filtered jobs in the time interval are returned when ``datetime`` query parameter is provided.
.. seealso::
- `/req/collections/rc-time-collections-response
<https://github.com/opengeospatial/ogcapi-common/blob/master/collections/requirements/collections/REQ_rc-time-collections-response.adoc>`_
"""
datetime_interval = self.datetime_interval[1] + DATETIME_INTERVAL_CLOSED_SYMBOL + self.datetime_interval[3]
path = get_path_kvp(sd.jobs_service.path, datetime=datetime_interval)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
datetime_after, datetime_before = datetime_interval.split(DATETIME_INTERVAL_CLOSED_SYMBOL)
assert len(resp.json["jobs"]) == 3
for job in resp.json["jobs"]:
base_uri = sd.jobs_service.path + "/{}".format(job)
path = get_path_kvp(base_uri)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert date_parser.parse(resp.json["created"]) >= date_parser.parse(datetime_after)
assert date_parser.parse(resp.json["created"]) <= date_parser.parse(datetime_before)
def test_get_jobs_datetime_match(self):
"""
Test that only filtered jobs at a specific time are returned when ``datetime`` query parameter is provided.
.. seealso::
- `/req/collections/rc-time-collections-response
<https://github.com/opengeospatial/ogcapi-common/blob/master/collections/requirements/collections/REQ_rc-time-collections-response.adoc>`_
"""
datetime_match = self.datetime_interval[1]
path = get_path_kvp(sd.jobs_service.path, datetime=datetime_match)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert len(resp.json["jobs"]) == 1
for job in resp.json["jobs"]:
base_uri = sd.jobs_service.path + "/{}".format(job)
path = get_path_kvp(base_uri)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert date_parser.parse(resp.json["created"]) == date_parser.parse(datetime_match)
def test_get_jobs_datetime_invalid(self):
"""
Test that incorrectly formatted ``datetime`` query parameter value is handled.
.. seealso::
- `/req/collections/rc-time-collections-response
<https://github.com/opengeospatial/ogcapi-common/blob/master/collections/requirements/collections/REQ_rc-time-collections-response.adoc>`_
Value of ``datetime_invalid`` is not formatted against the RFC-3339 datetime format.
For more details refer to https://datatracker.ietf.org/doc/html/rfc3339#section-5.6.
"""
datetime_invalid = "2022-31-12 23:59:59"
path = get_path_kvp(sd.jobs_service.path, datetime=datetime_invalid)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
def test_get_jobs_datetime_interval_invalid(self):
"""
Test that invalid ``datetime`` query parameter value is handled.
.. seealso::
- `/req/collections/rc-time-collections-response
<https://github.com/opengeospatial/ogcapi-common/blob/master/collections/requirements/collections/REQ_rc-time-collections-response.adoc>`_
Value of ``datetime_invalid`` represents a datetime interval where the limit dates are inverted.
The minimum is greater than the maximum datetime limit.
"""
datetime_interval = self.datetime_interval[3] + DATETIME_INTERVAL_CLOSED_SYMBOL + self.datetime_interval[1]
path = get_path_kvp(sd.jobs_service.path, datetime=datetime_interval)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 422
def test_get_jobs_datetime_before_invalid(self):
"""
Test that invalid ``datetime`` query parameter value with a range is handled.
.. seealso::
- `/req/collections/rc-time-collections-response
<https://github.com/opengeospatial/ogcapi-common/blob/master/collections/requirements/collections/REQ_rc-time-collections-response.adoc>`_
Value of ``datetime_before`` represents a bad open range datetime interval.
"""
datetime_before = "./" + self.datetime_interval[3]
path = get_path_kvp(sd.jobs_service.path, datetime=datetime_before)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
def test_get_jobs_duration_min_only(self):
test = {"minDuration": 35}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_jobs = [self.job_info[i].id for i in [7, 8]]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
test = {"minDuration": 25}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_idx = [6, 7, 8] # although 10 has duration=25, it is dynamic. Delay until here is reached becomes >25
expect_jobs = [self.job_info[i].id for i in expect_idx]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
test = {"minDuration": 49}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_jobs = [self.job_info[i].id for i in [8]]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
def test_get_jobs_duration_max_only(self):
test = {"maxDuration": 30}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
# 3, 4 are private, >9 except 11 are dynamic since running (11 only accepted), others fixed duration <30s
expect_idx = [0, 1, 2, 5, 6, 9, 10, 12]
expect_jobs = [self.job_info[i].id for i in expect_idx]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
test = {"maxDuration": 49}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
# same as previous for repeated indices except 7 == 40s now also < max duration, 8 is 50s just below range
expect_idx = [0, 1, 2, 5, 6, 7, 9, 10, 12]
expect_jobs = [self.job_info[i].id for i in expect_idx]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
def test_get_jobs_duration_min_max(self):
# note: avoid range <35s for this test to avoid sudden dynamic duration of 9, 10 becoming within min/max
test = {"minDuration": 35, "maxDuration": 60}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_jobs = [self.job_info[i].id for i in [7, 8]]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
test = {"minDuration": 38, "maxDuration": 42}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_jobs = [self.job_info[i].id for i in [7]]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
test = {"minDuration": 35, "maxDuration": 37}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
assert len(result_jobs) == 0
def test_get_jobs_duration_min_max_invalid(self):
test = {"minDuration": 30, "maxDuration": 20}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code in [400, 422]
test = {"minDuration": -1}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code in [400, 422]
test = {"maxDuration": -20}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code in [400, 422]
test = {"minDuration": -10, "maxDuration": 10}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code in [400, 422]
def test_get_jobs_by_status_single(self):
test = {"status": STATUS_SUCCEEDED}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_jobs = [self.job_info[0].id]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
test = {"status": STATUS_FAILED}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
expect_jobs = [self.job_info[i].id for i in [1, 2, 5, 6, 7, 8]] # 8 total, but only 6 visible
result_jobs = resp.json["jobs"]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
@pytest.mark.xfail(reason="Multiple statuses not supported") # FIXME: support comma-separated list of statuses
def test_get_jobs_by_status_multi(self):
test = {"status": "{},{}".format(STATUS_SUCCEEDED, STATUS_RUNNING)}
path = get_path_kvp(sd.jobs_service.path, **test)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
result_jobs = resp.json["jobs"]
expect_jobs = [self.job_info[i].id for i in [0, 9, 10]]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, test)
def test_get_jobs_by_status_invalid(self):
path = get_path_kvp(sd.jobs_service.path, status="random")
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
assert resp.json["code"] == "JobInvalidParameter"
assert resp.json["value"]["status"] == "random"
assert "status" in resp.json["cause"]
status = "random,{}".format(STATUS_RUNNING)
path = get_path_kvp(sd.jobs_service.path, status=status)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
assert resp.json["code"] == "JobInvalidParameter"
assert resp.json["value"]["status"] == status
assert "status" in resp.json["cause"]
def test_get_job_status_response_process_id(self):
"""
Verify the processID value in the job status response.
"""
body = {
"outputs": [],
"mode": EXECUTE_MODE_ASYNC,
"response": EXECUTE_RESPONSE_DOCUMENT,
}
with contextlib.ExitStack() as stack:
for runner in mocked_process_job_runner():
stack.enter_context(runner)
path = "/processes/{}/jobs".format(self.process_public.identifier)
resp = self.app.post_json(path, params=body, headers=self.json_headers)
assert resp.status_code == 201
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert resp.json["processID"] == "process-public"
def test_get_job_invalid_uuid(self):
"""
.. versionchanged:: 4.6.0
Jobs must explicitly use an :class:`uuid.UUID` object to search.
Any value provided in path parameter that does not correspond to such definition raises a bad request.
"""
# to make sure UUID is applied, use the "same format" (8-4-4-4-12), but with invalid definitions
base_path = sd.job_service.path.format(job_id="thisisnt-some-real-uuid-allerrordata")
for sub_path in ["", "/inputs", "/outputs", "/results", "/logs", "exceptions"]:
path = f"{base_path}{sub_path}"
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
assert resp.json["title"] == "NoSuchJob"
assert resp.json["type"].endswith("no-such-job")
assert "UUID" in resp.json["detail"]
@mocked_dismiss_process()
def test_job_dismiss_running_single(self):
"""
Jobs that are in a valid *running* (or about to) state can be dismissed successfully.
Subsequent calls to the same job dismiss operation must respond with HTTP Gone (410) status.
.. seealso::
OGC specification of dismiss operation: https://docs.ogc.org/DRAFTS/18-062.html#sec_cons_dismiss
"""
job_running = self.job_info[10]
assert job_running.status == STATUS_RUNNING, "Job must be in running state for test"
job_accept = self.job_info[11]
assert job_accept.status == STATUS_ACCEPTED, "Job must be in accepted state for test"
job_started = self.job_info[12]
assert job_started.status == STATUS_STARTED, "Job must be in started state for test"
for job in [job_running, job_accept, job_started]:
path = sd.job_service.path.format(job_id=job.id)
resp = self.app.delete(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.json["status"] == STATUS_DISMISSED
# job are not removed, only dismissed
path = get_path_kvp(sd.jobs_service.path, status=STATUS_DISMISSED, limitt=1000)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert job.id in resp.json["jobs"], "Job HTTP DELETE should not have deleted it, but only dismissed it."
path = sd.job_service.path.format(job_id=job.id)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200, "Job should still exist even after dismiss"
assert resp.json["status"] == STATUS_DISMISSED
resp = self.app.delete(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 410, "Job cannot be dismissed again."
assert job.id in | |
else:
h = spec.peak_heights[peak_idx] / htot
size = [h, 1. - h]
tit = '% sum of heights'
colors = ['k', 'w']
plt.pie(size, colors=colors,
startangle=90,
radius=0.25, center=(0, 0), frame=False)
ax_pie.axis('equal')
ax_pie.set_title(tit)
if axes3 is None:
return fig, ax
def make_composite_peak(self, peak_idx_list):
"""Make composite peaks for all spectra in the Block"""
for prof in self.profiles:
for spec in prof.spectra:
spec.make_composite_peak(peak_idx_list)
prof.get_peak_info()
def print_spectra_names(self, show_initials=True):
"""Print out fnames of all spectra in the Block."""
if show_initials is True:
if self.initial_profiles is None:
self.setupWB()
if self.initial_profiles is not None:
print('--Initial profiles--')
for prof in self.initial_profiles:
print(prof.profile_name)
spec_list = []
for spectrum in prof.spectra:
spec_list.append(spectrum.fname)
print(spec_list)
print(' ')
else:
print('No initial profiles given')
if self.profiles is not None:
print('--Final profiles--')
for prof in self.profiles:
print(prof.profile_name)
spec_list = []
for spectrum in prof.spectra:
spec_list.append(spectrum.fname)
print(spec_list)
print(' ')
def make_baselines(self,
raw_data=False,
wn_low=3200,
wn_high=3700,
linetype='line',
spline_kind='cubic',
spline_wn_low=3000,
spline_wn_high=4000,
curvature=None,
force_through_wn=None,
polynomial_order=None,
show_fit_values=False,
show_plot=False,
abs_high=None,
abs_low=None,
abs_smear_high=0,
abs_smear_low=0,
store_baseline=True
):
"""
Make spectra baselines for all spectra the Block.
Keywords are similar to spectrum.make_baseline()
"""
for prof in self.profiles:
prof.make_baselines(raw_data=raw_data, wn_low=wn_low,
wn_high=wn_high, linetype=linetype,
spline_kind=spline_kind,
spline_wn_high=spline_wn_high,
spline_wn_low=spline_wn_low,
curvature=curvature,
force_through_wn=force_through_wn,
polynomial_order=polynomial_order,
show_fit_values=show_fit_values,
show_plot=show_plot, abs_high=abs_high,
abs_low=abs_low,
abs_smear_high=abs_smear_high,
abs_smear_low=abs_smear_low,
store_baseline=store_baseline)
def save_baselines(self, initial_too=True,
baseline_ending='-baseline.CSV'):
"""Make and save spectra baselines for all spectra in the Block."""
for prof in self.profiles:
for spectrum in prof.spectra:
spectrum.save_baseline(baseline_ending=baseline_ending)
def plot_peakfits(self, initial_too=False, profile_idx=None, legloc=1):
""" Plot peakfits for all spectra in all profiles in the Block """
if profile_idx is None:
for prof in self.profiles:
prof.plot_peakfits(initial_too, legloc=legloc)
else:
self.profiles[profile_idx].plot_peakfits(initial_too,
legloc=legloc)
def print_max_arearatio(self, peak_idx=None, heights_instead=False):
""" Prints out the maximum whole-block area ratio observed
in any profile of the Block for specified peak_idx"""
if peak_idx is None:
self.setupWB(False, True)
else:
self.get_peakfit()
self.setupWB(True, False)
a = []
for prof in self.profiles:
if peak_idx is None:
maxval = max(prof.wb_areas)
else:
prof.make_wholeblock(peakfit=True)
if heights_instead is False:
maxval = max(prof.peak_wb_areas[peak_idx])
else:
maxval = max(prof.peak_wb_heights[peak_idx])
a.append(maxval)
print('\n', self.name)
print(max(a))
return max(a)
def print_peakfits_ave(self, printall=True, print_max=False,
print_aves=True):
""" Prints out and returns average peak areas, peak heights, and
sum of the average peak areas summed over all profiles"""
areas = []
heights = []
totalareas = []
max_a = []
max_h = []
for prof in self.profiles:
a, h, totala, ma, mh = prof.print_peakfits_ave(printout=printall)
areas.append(a)
heights.append(h)
totalareas.append(totala)
max_a.append(ma)
max_h.append(mh)
if print_max is True:
print(ma)
print(mh)
if print_aves is True:
asum = np.array(np.sum(areas, axis=0))
hsum = np.array(np.sum(heights, axis=0))
tasum = np.sum(totalareas)
print('\n', self.name)
print('peak positions (cm-1)')
print(self.profiles[0].spectra[0].peakpos)
print('\naverage peak areas summed over all profiles (cm-2)')
print(asum)
print('\naverage peak heights summed over all profiles (cm-1)')
print(hsum)
print('\naverage total area summed over all profiles (cm-1)')
print(tasum)
return asum, hsum, tasum
def print_diffusivities(self, peak_idx=None, profile_idx=None,
show_plot=False, top=1.5):
"""Print diffusivities for each profile"""
if show_plot is True:
self.plot_3panels_ave_spectra(peak_idx=peak_idx, top=top)
if peak_idx is None and profile_idx is None:
for prof in self.profiles:
prof.get_diffusivities()
prof.print_diffusivities()
return
if profile_idx is None:
for prof in self.profiles:
k = peak_idx
print('\n', prof.profile_name)
print('peak position and log10(diffusivity in m2/s)')
print('bulk H :', prof.D_area_wb, '+/-', \
prof.D_area_wb_error)
print(prof.peakpos[k], ':', prof.D_peakarea_wb[k],\
'+/-', prof.peak_D_area_wb_error[k])
return
prof = self.profiles[profile_idx]
print('\n', prof.profile_name)
print('peak positions, log10(D in m2/s), D errors')
print('bulk H : ', prof.D_area_wb, '+/-', \
prof.D_area_wb_error)
print(prof.peakpos)
print(prof.D_peakarea_wb)
print(prof.peak_D_area_wb_error)
def save_diffusivities(self, folder=None,
file_ending='-diffusivities.txt'):
"""Save diffusivities for all profiles in Block to files"""
for prof in self.profiles:
prof.save_diffusivities(folder, file_ending)
def get_diffusivities(self, folder=None,
file_ending='-diffusivities.txt'):
"""Gets diffusivities for all profiles in Block from saved files"""
if folder is None:
folder = self.folder
for prof in self.profiles:
prof.get_diffusivities(folder, file_ending)
def diffusion_profiles(self,
init=1.,
fin=0,
wholeblock_data=False,
wholeblock_diffusion=False,
peak_idx=None,
time_seconds=None,
log10D_m2s=[-12., -12., -12.],
erf_or_sum='erf',
points=50,
heights_instead=False,
approximation1D=False):
"""
Calculate diffusion profiles in the Block
Requires:
* time in seconds either explicitly passed here or as an
attribute of the Block object
* list of log10 diffusivities in m2/s
(default=[-12, -12, -12])
* initial value (default init=1)
* final value (default fin=0)
Defaults:
* Calculates 3D non-path-integrated profiles
(wholeblock_diffusion=False) as opposed to path-integrated
whole-block profiles (wholeblock_diffusion=True)
* Assumes the data are not normalized to any initial
value (wholeblock_data=False) as opposed to normalized
(wholeblock_data=True). This matters for determining the
maximum and minimum values
Returns lmfit parameters, x-data, and y-data for 3-dimensional
diffusion in a block.
"""
if self.lengths is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if self.lengths is None:
print('Need to setup self.lengths, which is in microns')
return False
if self.directions is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if self.initial_profiles is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if self.raypaths is None:
self.setupWB(peakfit=False, make_wb_areas=False)
if time_seconds is None:
if self.time_seconds is not None:
time_seconds = self.time_seconds
else:
print('Need time information')
return False, False, False
# Pick which diffusivities to use
if log10D_m2s is not None:
D3 = models.D_checker(log10D_m2s)
elif wholeblock_diffusion is True and peak_idx is None:
D3 = self.D_area_wb
else:
D3 = []
for prof in self.profiles:
D = prof.D_picker(wholeblock_data, heights_instead, peak_idx)
D3.append(D)
if D3 is None or 0.0 in D3:
print('D3:', D3)
print('\nNeed diffusivities.')
print('Input directly as diffusivities_log10D_m2s')
print('or input bulk in profile.D_area_wb')
print('or peak_diffusivities at specified peak_idx\n')
return False
L3 = self.lengths
# need initial and final as unit values, so max=1
# compare values with maximum
if wholeblock_data is True:
maxval = max([init, fin, 1.])
else:
maxval = max([init, fin])
init_unit = init/maxval
fin_unit = fin/maxval
params = models.params_setup3D(L3, D3, time_seconds,
init_unit, fin_unit)
if wholeblock_diffusion is True:
xdiff, ydiff = models.diffusion3Dwb_params(params,
raypaths=self.raypaths,
erf_or_sum=erf_or_sum,
show_plot=False)
else:
v, ydiff, xdiff = models.diffusion3Dnpi_params(params,
points=points,
centered=False)
if wholeblock_data is False:
maxareas = []
for prof in self.profiles:
if len(prof.fnames) > 0:
try:
maxa = np.max(prof.areas)
except AttributeError:
prof.make_areas()
maxa = np.max(prof.areas)
maxareas.append(maxa)
ydiff = np.array(ydiff) * np.max(maxareas)
return params, xdiff, list(ydiff)
def plot_diffusion(self, wholeblock_diffusion=False,
wholeblock_data=True,
peak_idx=None,
time_seconds=None,
log10D_m2s=[-12., -12., -12.],
init=1.,
fin=0,
erf_or_sum='erf',
show_plot=True,
show_data=True,
xaxis='centered',
show_slice=False,
label4legend=[None, None, None],
axes3=None,
points=50,
top_spectra=1.0,
ytop=None,
numformat='{:.1f}',
heights_instead=False,
centered=True,
approximation1D=False,
labelD=True,
show_errorbars=True,
labelDy=None,
labelDx=[None, None, None],
style_data=styles.style_points,
style_diffusion=styles.style_1,
show_line_at_1=True,
):
"""
Plot 3D diffusion profiles for Block.
Applies 3-dimensionsal diffusion equations in
pynams.diffusion.models and plots non-path-integrated 3-dimensional
diffusion curves (default) or path-integrated whole-block profiles
described in Ferriss et al. 2015 (wholeblock_diffusion=True).
If show_data is True (the default), also plots the data, either
directly as the measured areas (default) or peak heights
(with heights_instead=True) or as the ratio of the
measured area to a best-fit line through the initial areas
(wholeblock_data=True)
Default initial value is 1, and default final value is 0.
Change keywords init and fin to change these values, including
to switch from diffusion out to diffusion in.
If axes3 = a list of 3 axes handles, the data and diffusion curve
are plotted there. Otherwise, the figure handle and a list of
the 3 axes handles are returned.
The diffusivities are automatically labeled on the plot.
To remove the labels, set labelD=False
To change the number of digits, play with numformat keyword.
To change the y position across all axes, use labelDy.
To change the x positions, pass *a list of x-values* into labelDx.
Change the maximum y value with the top keyword.
"""
D3 = models.D_checker(log10D_m2s)
hinstead = heights_instead
approx = approximation1D
params, xdiff, ydiff = self.diffusion_profiles(wholeblock_data=wholeblock_data,
peak_idx=peak_idx,
time_seconds=time_seconds,
log10D_m2s=D3,
erf_or_sum=erf_or_sum,
wholeblock_diffusion=wholeblock_diffusion,
points=points,
heights_instead=hinstead,
init=init, fin=fin,
approximation1D=approx)
if params is False:
return False, False
if axes3 is None:
fig, axes3 = self.plot_areas_3panels(peak_idx=peak_idx, ytop=ytop,
wholeblock=wholeblock_data,
heights_instead=heights_instead,
show_line_at_1=False,
label4legend=label4legend,
styles3=[style_data]*3,
centered=centered,
show_errorbars=show_errorbars)
if centered is True:
for idx_len in range(3):
xdiff[idx_len] = xdiff[idx_len] - (self.lengths[idx_len]/2.)
if wholeblock_data is False:
maxareas = []
| |
chemokine secretion', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float891, units_btn, description_btn]
box955 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear_NFkB', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float892 = FloatText(value='0.25', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial nuclear NFkB concentration', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float892, units_btn, description_btn]
box956 = Box(children=row, layout=box_layout)
name_btn = Button(description='inactive_NLRP3', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float893 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration of inactive NLRP3', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float893, units_btn, description_btn]
box957 = Box(children=row, layout=box_layout)
name_btn = Button(description='active_NLRP3', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float894 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration of active NLRP3', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float894, units_btn, description_btn]
box958 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_NLRP3', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float895 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration of inflammasone bound', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float895, units_btn, description_btn]
box959 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_ASC', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float896 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration of bound ASC', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float896, units_btn, description_btn]
box960 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_caspase1', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float897 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration of bound caspase1', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float897, units_btn, description_btn]
box961 = Box(children=row, layout=box_layout)
name_btn = Button(description='cleaved_gasderminD', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float898 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration cleaved gasderminD', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float898, units_btn, description_btn]
box962 = Box(children=row, layout=box_layout)
name_btn = Button(description='pro_IL_1b', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float899 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration pro-IL-1b', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float899, units_btn, description_btn]
box963 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_IL_1b', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float900 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration cytoplasmic IL-1b', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float900, units_btn, description_btn]
box964 = Box(children=row, layout=box_layout)
name_btn = Button(description='external_IL_1b', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float901 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration external IL-1b', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float901, units_btn, description_btn]
box965 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_IL_18', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float902 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration cytoplasmic IL-18', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float902, units_btn, description_btn]
box966 = Box(children=row, layout=box_layout)
name_btn = Button(description='external_IL_18', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float903 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration external IL-18', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float903, units_btn, description_btn]
box967 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float904 = FloatText(value='2494', step='100', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of volume', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='cytoplasmic cell volume', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float904, units_btn, description_btn]
box968 = Box(children=row, layout=box_layout)
name_btn = Button(description='cell_pyroptosis_flag', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float905 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='bool for pyropotosis', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float905, units_btn, description_btn]
box969 = Box(children=row, layout=box_layout)
name_btn = Button(description='cell_bystander_pyroptosis_flag', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float906 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='bool for bystander pyropotosis', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float906, units_btn, description_btn]
box970 = Box(children=row, layout=box_layout)
name_btn = Button(description='cell_virus_induced_apoptosis_flag', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float907 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='bool for bystander pyropotosis', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float907, units_btn, description_btn]
box971 = Box(children=row, layout=box_layout)
name_btn = Button(description='internalised_pro_pyroptosis_cytokine', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float908 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='none', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='used internally to track pro-pyroptotic cytokine concentration', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float908, units_btn, description_btn]
box972 = Box(children=row, layout=box_layout)
name_btn = Button(description='interferon_secretion_rate_via_infection', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float909 = FloatText(value='0.05', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='Type-1 interferon secretion rate for infected cells', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float909, units_btn, description_btn]
box973 = Box(children=row, layout=box_layout)
name_btn = Button(description='max_interferon_secretion_rate_via_paracrine', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float910 = FloatText(value='0.5', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='Type-1 interferon secretion rate after activation by Type-1 interferon', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float910, units_btn, description_btn]
box974 = Box(children=row, layout=box_layout)
name_btn = Button(description='interferon_max_response_threshold', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float911 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='Interferon response scales linearly until Int-1 exceeds this threshold', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float911, units_btn, description_btn]
box975 = Box(children=row, layout=box_layout)
name_btn = Button(description='interferon_activation', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float912 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='Current interferon signaling activation state (between 0 and 1)', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float912, units_btn, description_btn]
box976 = Box(children=row, layout=box_layout)
name_btn = Button(description='interferon_max_virus_inhibition', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float913 = FloatText(value='0.9', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='At max interferon activation, max inhibition of viral replication (between 0 and 1)', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float913, units_btn, description_btn]
box977 = Box(children=row, layout=box_layout)
name_btn = Button(description='interferon_viral_RNA_threshold', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float914 = FloatText(value='2', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='infected cell interferon secretion saturates at this viral RNA level', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float914, units_btn, description_btn]
box978 = Box(children=row, layout=box_layout)
name_btn = Button(description='TCell_contact_time', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float915 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='tracks total contact time with CD8 T cells', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float915, units_btn, description_btn]
box979 = Box(children=row, layout=box_layout)
name_btn = Button(description='cell_attachment_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float916 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='the rate at which the cell attaches to cells in contact', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float916, units_btn, description_btn]
box980 = Box(children=row, layout=box_layout)
name_btn = Button(description='cell_attachment_lifetime', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float917 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='the mean duration of a cell-cell attachment', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float917, units_btn, description_btn]
box981 = Box(children=row, layout=box_layout)
name_btn = Button(description='TCell_contact_death_threshold', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float918 = FloatText(value='50', step='1', style=style, layout=widget_layout)
units_btn = Button(description='min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='threshold CD8 T cell contact time to trigger apoptosis', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = | |
#!/usr/bin/python3
import os
import time
import sys
os.system("clear")
print('''\033[91m
CREATED BY Hironotori
''')
def slowprint(s):
for c in s + '\n' :
sys.stdout.write(c)
sys.stdout.flush()
slowprint(''' \033[93m
[1] apt-pkg pip-pip3 [2] apt-pkg python
[3] apt-pkg python2 [4] apt-pkg bash
[5] apt-pkg git [6] apt-pkg perl
[7] apt-pkg nano [8] apt-pkg curl
[9] apt-pkg openssl [10] apt-pkg openssh
[11] apt-pkg wget [12] apt-pkg clang
[13] apt-pkg nmap [14] apt-pkg w3m
[15] apt-pkg ruby [16] apt-pkg dnsutils
[17] apt-pkg coreutils [18] apt-pkg fish.
[19] apt-pkg zip [20] apt-pkg figlet.
[21] apt-pkg cowsay [22] apt-pkg unzip.
[23] apt-pkg vim [24] apt-pkg wcalc.
[25] apt-pkg bmon [26] apt-pkg unrar.
[27] apt-pkg proot [28] apt-pkg golang.
[29] apt-pkg tsu [30] apt-pkg tor.
[31] apt-pkg php
[00] Установить все Вместе [0] Выход''')
print (" ")
choice = input("\033[93mВыберите пункт : ")
if choice == '0' : sys.exit()
if choice == '1' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system("python -m pip install --upgrade pip")
os.system ("pip3 install --upgrade setuptools pip")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '2' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install python -y")
os.system ("pkg upgrade python -y")
os.system ("apt install python -y")
os.system ("apt upgrade python -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '3' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install python2 -y")
os.system ("pkg upgrade python2 -y")
os.system ("apt install python2 -y")
os.system ("apt upgrade python2 -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '4' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install bash")
os.system ("apt install bash")
os.system ("pkg upgrade bash")
os.system ("apt upgrade bash")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '5' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("apt install git -y")
os.system ("pkg install git -y")
os.system ("pkg upgrade git -y")
os.system ("apt upgrade git -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '6' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install perl -y")
os.system ("apt install perl -y")
os.system ("pkg upgrade perl -y")
os.system ("apt upgrade perl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '7' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install nano -y")
os.system ("apt install nano -y")
os.system ("pkg upgrade nano -y")
os.system ("apt upgrade nano -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '8' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install curl -y")
os.system ("apt install curl -y")
os.system ("pkg upgrade curl -y")
os.system ("apt upgrade curl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '9' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install openssl -y")
os.system ("apt install openssl -y")
os.system ("pkg upgrade openssl -y")
os.system ("apt upgrade openssl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '10' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install openssh -y")
os.system ("apt install openssh -y")
os.system ("pkg upgrade openssh -y")
os.system ("apt upgrade openssh -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '11' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install wget -y")
os.system ("apt install wget -y")
os.system ("pkg upgrade wget -y")
os.system ("apt upgrade wget -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '12' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install clang -y")
os.system ("apt install clang -y")
os.system ("pkg upgrade clang -y")
os.system ("apt upgrade clang -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '13' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install nmap -y")
os.system ("apt install nmap -y")
os.system ("pkg upgrade nmap -y")
os.system ("apt upgrade nmap -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '14' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install w3m -y")
os.system ("apt install w3m -y")
os.system ("pkg upgrade w3m -y")
os.system ("apt upgrade w3m -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '15' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install ruby -y")
os.system ("apt install ruby -y")
os.system ("pkg upgrade ruby -y")
os.system ("apt upgrade ruby -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '16' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install dnsutils -y")
os.system ("apt install dnsutils -y")
os.system ("pkg upgrade dnsutils -y")
os.system ("apt upgrade dnsutils -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '17' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install coreutils -y")
os.system ("apt install coreutils -y")
os.system ("pkg upgrade coreutils -y")
os.system ("apt upgrade coreutils -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '18' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install fish -y")
os.system ("apt install fish -y")
os.system ("pkg upgrade fish -y")
os.system ("apt upgrade fish -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '19' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install zip -y")
os.system ("apt install zip -y")
os.system ("pkg upgrade zip -y")
os.system ("apt upgrade zip -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '20' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install figlet -y")
os.system ("apt install figlet -y")
os.system ("pkg upgrade figlet -y")
os.system ("apt upgrade figlet -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '21' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install cowsay -y")
os.system ("apt install cowsay -y")
os.system ("pkg upgrade cowsay -y")
os.system ("apt upgrade cowsay -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '22' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install unzip -y")
os.system ("apt install unzip -y")
os.system ("pkg upgrade unzip -y")
os.system ("apt upgrade unzip -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '23' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install vim -y")
os.system ("apt install vim -y")
os.system ("pkg upgrade vim -y")
os.system ("apt upgrade vim -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '24' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install wcalc -y")
os.system ("apt install wcalc -y")
os.system ("pkg upgrade wcalc -y")
os.system ("apt upgrade wcalc -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '25' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install bmon -y")
os.system ("apt install bmon -y")
os.system ("pkg upgrade bmon -y")
os.system ("apt upgrade bmon -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '26' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install unrar -y")
os.system ("apt install unrar -y")
os.system ("pkg upgrade unrar -y")
os.system ("apt upgrade unrar -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '27' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install proot -y")
os.system ("apt install proot -y")
os.system ("pkg upgrade proot -y")
os.system ("apt upgrade proot -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '28' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install golang -y")
os.system ("apt install golang -y")
os.system ("pkg upgrade golang -y")
os.system ("apt upgrade golang -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '29' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system("pkg install tsu-y")
os.system ("apt install tsu -y")
os.system ("pkg upgrade tsu -y")
os.system ("apt upgrade tsu -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '30' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install tor")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '31' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install php -y")
os.system ("pkg upgrade php -y")
os.system ("apt install php -y")
os.system ("apt upgrade php -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '00' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system | |
"""
The pipeline module contains the transformations and actions API of PyFunctional
"""
from __future__ import division, absolute_import
from operator import mul, add
import collections
from functools import reduce, wraps, partial
import json
import csv
import sqlite3
import re
import six
from tabulate import tabulate
from functional.execution import ExecutionEngine
from functional.lineage import Lineage
from functional.util import is_iterable, is_primitive, is_namedtuple, is_tabulatable, identity
from functional.io import WRITE_MODE, universal_write_open
from functional import transformations
from functional.execution import ExecutionStrategies
class Sequence(object):
"""
Sequence is a wrapper around any type of sequence which provides access to common
functional transformations and reductions in a data pipeline style
"""
def __init__(self, sequence, transform=None, engine=None, max_repr_items=None):
# pylint: disable=protected-access
"""
Takes a Sequence, list, tuple. or iterable sequence and wraps it around a Sequence object.
If the sequence is already an instance of Sequence, it will in total be wrapped exactly
once. A TypeError is raised if sequence is none of these.
:param sequence: sequence of items to wrap in a Sequence
:param transform: transformation to apply
:param engine: execution engine
:param max_repr_items: maximum number of items to print with repr
:return: sequence wrapped in a Sequence
"""
self.engine = engine or ExecutionEngine()
if isinstance(sequence, Sequence):
self._max_repr_items = max_repr_items or sequence._max_repr_items
self._base_sequence = sequence._base_sequence
self._lineage = Lineage(prior_lineage=sequence._lineage,
engine=engine)
elif isinstance(sequence, (list, tuple)) or is_iterable(sequence):
self._max_repr_items = max_repr_items
self._base_sequence = sequence
self._lineage = Lineage(engine=engine)
else:
raise TypeError("Given sequence must be an iterable value")
if transform is not None:
self._lineage.apply(transform)
def __iter__(self):
"""
Return iterator of sequence.
:return: iterator of sequence
"""
return self._evaluate()
def __eq__(self, other):
"""
Checks for equality with the sequence's equality operator.
:param other: object to compare to
:return: true if the underlying sequence is equal to other
"""
return self.sequence == other
def __ne__(self, other):
"""
Checks for inequality with the sequence's inequality operator.
:param other: object to compare to
:return: true if the underlying sequence is not equal to other
"""
return self.sequence != other
def __hash__(self):
"""
Return the hash of the sequence.
:return: hash of sequence
"""
raise TypeError("unhashable type: Sequence")
def __repr__(self):
"""
Return repr using sequence's repr function.
:return: sequence's repr
"""
items = self.to_list()
if self._max_repr_items is None or len(items) <= self._max_repr_items:
return repr(items)
else:
return repr(items[:self._max_repr_items])[:-1] + ', ...]'
def __str__(self):
"""
Return string using sequence's string function.
:return: sequence's string
"""
return str(self.to_list())
def __bool__(self):
"""
Returns True if size is not zero.
:return: True if size is not zero
"""
return self.size() != 0
def __nonzero__(self):
"""
Returns True if size is not zero.
:return: True if size is not zero
"""
return self.size() != 0
def __getitem__(self, item):
"""
Gets item at given index.
:param item: key to use for getitem
:return: item at index key
"""
self.cache()
return _wrap(self.sequence[item])
def __reversed__(self):
"""
Return reversed sequence using sequence's reverse function
:return: reversed sequence
"""
return self._transform(transformations.reversed_t())
def __contains__(self, item):
"""
Checks if item is in sequence.
:param item: item to check
:return: True if item is in sequence
"""
return self.sequence.__contains__(item)
def __add__(self, other):
"""
Concatenates sequence with other.
:param other: sequence to concatenate
:return: concatenated sequence with other
"""
if isinstance(other, Sequence):
return Sequence(self.sequence + other.sequence)
else:
return Sequence(self.sequence + other)
def _evaluate(self):
"""
Creates and returns an iterator which applies all the transformations in the lineage
:return: iterator over the transformed sequence
"""
return self._lineage.evaluate(self._base_sequence)
def _transform(self, *transforms):
"""
Copies the given Sequence and appends new transformation
:param transform: transform to apply or list of transforms to apply
:return: transformed sequence
"""
sequence = None
for transform in transforms:
if sequence:
sequence = Sequence(sequence, transform=transform)
else:
sequence = Sequence(self, transform=transform)
return sequence
@property
def sequence(self):
"""
Alias for to_list used internally for brevity
:return: result of to_list() on sequence
"""
return self.to_list()
def cache(self, delete_lineage=False):
"""
Caches the result of the Sequence so far. This means that any functions applied on the
pipeline before cache() are evaluated, and the result is stored in the Sequence. This is
primarily used internally and is no more helpful than to_list() externally. delete_lineage
allows for cache() to be used in internal initialization calls without the caller having
knowledge of the internals via the lineage
:param delete_lineage: If set to True, it will cache then erase the lineage
"""
if len(self._lineage) == 0 or self._lineage[-1] == transformations.CACHE_T:
if not isinstance(self._base_sequence, list):
self._base_sequence = list(self._base_sequence)
self._lineage.apply(transformations.CACHE_T)
else:
self._base_sequence = list(self._evaluate())
self._lineage.apply(transformations.CACHE_T)
if delete_lineage:
self._lineage = Lineage(engine=self.engine)
return self
def head(self):
"""
Returns the first element of the sequence.
>>> seq([1, 2, 3]).head()
1
Raises IndexError when the sequence is empty.
>>> seq([]).head()
Traceback (most recent call last):
...
IndexError: list index out of range
:return: first element of sequence
"""
return _wrap(self.take(1)[0])
def first(self):
"""
Returns the first element of the sequence.
>>> seq([1, 2, 3]).first()
1
Raises IndexError when the sequence is empty.
>>> seq([]).first()
Traceback (most recent call last):
...
IndexError: list index out of range
:return: first element of sequence
"""
return self.head()
def head_option(self):
"""
Returns the first element of the sequence or None, if the sequence is empty.
>>> seq([1, 2, 3]).head_option()
1
>>> seq([]).head_option()
None
:return: first element of sequence or None if sequence is empty
"""
if not self.sequence:
return None
return self.head()
def last(self):
"""
Returns the last element of the sequence.
>>> seq([1, 2, 3]).last()
3
Raises IndexError when the sequence is empty.
>>> seq([]).last()
Traceback (most recent call last):
...
IndexError: list index out of range
:return: last element of sequence
"""
return _wrap(self.sequence[-1])
def last_option(self):
"""
Returns the last element of the sequence or None, if the sequence is empty.
>>> seq([1, 2, 3]).last_option()
3
>>> seq([]).last_option()
None
:return: last element of sequence or None if sequence is empty
"""
if not self.sequence:
return None
return self.last()
def init(self):
"""
Returns the sequence, without its last element.
>>> seq([1, 2, 3]).init()
[1, 2]
:return: sequence without last element
"""
return self._transform(transformations.init_t())
def tail(self):
"""
Returns the sequence, without its first element.
>>> seq([1, 2, 3]).tail()
[2, 3]
:return: sequence without first element
"""
return self._transform(transformations.tail_t())
def inits(self):
"""
Returns consecutive inits of the sequence.
>>> seq([1, 2, 3]).inits()
[[1, 2, 3], [1, 2], [1], []]
:return: consecutive init()s on sequence
"""
return self._transform(transformations.inits_t(_wrap))
def tails(self):
"""
Returns consecutive tails of the sequence.
>>> seq([1, 2, 3]).tails()
[[1, 2, 3], [2, 3], [3], []]
:return: consecutive tail()s of the sequence
"""
return self._transform(transformations.tails_t(_wrap))
def cartesian(self, *iterables, **kwargs):
"""
Returns the cartesian product of the passed iterables with the specified number of
repetitions.
The keyword argument `repeat` is read from kwargs to pass to itertools.cartesian.
>>> seq.range(2).cartesian(range(2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
:param iterables: elements for cartesian product
:param kwargs: the variable `repeat` is read from kwargs
:return: cartesian product
"""
return self._transform(transformations.cartesian_t(iterables, kwargs.get('repeat', 1)))
def drop(self, n):
"""
Drop the first n elements of the sequence.
>>> seq([1, 2, 3, 4, 5]).drop(2)
[3, 4, 5]
:param n: number of elements to drop
:return: sequence without first n elements
"""
if n <= 0:
return self._transform(transformations.drop_t(0))
else:
return self._transform(transformations.drop_t(n))
def drop_right(self, n):
"""
Drops the last n elements of the sequence.
>>> seq([1, 2, 3, 4, 5]).drop_right(2)
[1, 2, 3]
:param n: number of elements to drop
:return: sequence with last n elements dropped
"""
return self._transform(transformations.CACHE_T, transformations.drop_right_t(n))
def drop_while(self, func):
"""
Drops elements in the sequence while func evaluates to True, then returns the rest.
>>> seq([1, 2, 3, 4, 5, 1, 2]).drop_while(lambda x: x < 3)
[3, 4, 5, 1, 2]
:param func: truth returning function
:return: elements including and after func evaluates to False
"""
return self._transform(transformations.drop_while_t(func))
def take(self, n):
"""
Take the first n elements of the sequence.
>>> seq([1, 2, 3, 4]).take(2)
[1, 2]
:param n: number of elements to take
:return: first n elements of sequence
"""
if n <= 0:
return self._transform(transformations.take_t(0))
else:
return self._transform(transformations.take_t(n))
def take_while(self, func):
"""
Take | |
<reponame>vgoliber/airline-hubs
# Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import itertools
from collections import defaultdict
import imageio
import matplotlib
import numpy as np
import networkx as nx
from dimod import DiscreteQuadraticModel
from dwave.system import LeapHybridDQMSampler
try:
import matplotlib.pyplot as plt
except ImportError:
matplotlib.use("agg")
import matplotlib.pyplot as plt
def read_inputs(flow_file, cost_file, verbose=True):
"""Reads in scenario information on passenger flow and route cost.
Args:
- flow_file: CSV file. Number of passengers that desire to travel from city i to city j.
- cost_file: CSV file. Cost for airline to operate leg from city i to city j.
- verbose: Print to command-line for user.
Returns:
- W: Numpy matrix. Represents passenger demand. Normalized with total demand equal to 1.
- C: Numpy matrix. Represents airline leg cost.
- n: Int. Number of cities in play.
"""
if verbose:
print("\nReading in flow/cost info...\n")
W = np.genfromtxt(flow_file, delimiter=',')
W = W/np.sum(np.sum(W))
C = np.genfromtxt(cost_file, delimiter=',')
n = W.shape[0]
return W, C, n
def read_city_info(file_name, verbose=True):
"""Reads in scenario information on airports and lat/long coordinates.
Args:
- file_name: Text file. Includes airport code and lat/long coordinates.
- verbose: Print to command-line for user.
Returns:
- city_names: List of all airport codes.
- city_lats: List of all airport lat coordinates.
- city_longs: List of all airport long coordinates.
All returned lists have airports in the same order, i.e. airport city_names[i]
has latitude city_lats[i] and longitude city_longs[i].
"""
file1 = open(file_name, 'r')
lines = file1.readlines()
city_names = []
city_lats = []
city_longs = []
# Strips the newline character
for line in lines:
info = line.split(",")
city_names.append(info[1])
city_lats.append(float(info[2]))
city_longs.append(float(info[3].strip()))
file1.close()
if verbose:
print("\nProcessed", info[0], "city locations.\n")
return city_names, city_lats, city_longs
def build_graph(dist_mat, city_names, verbose=True):
"""Builds weighted graph based on cities and distances.
Args:
- dist_mat: Numpy matrix providing distance between cities i and j.
- city_names: List of all airport codes.
- verbose: Print to command-line for user.
Returns:
- G: NetworkX weighted graph of cities with distances on edges.
"""
if verbose:
print("\nConstructing map...\n")
G = nx.Graph()
num_cities = len(city_names)
for i in range(num_cities):
for j in range(i+1, num_cities):
G.add_edge(city_names[i], city_names[j], weight=dist_mat[i,j])
return G
def draw_graph(G, city_names, city_lats, city_longs):
"""Visualizes the city graph and saves as file.
Args:
- G: NetworkX weighted graph of cities with distances on edges.
- city_names: List of all airport codes.
- city_lats: List of all airport lat coordinates.
- city_longs: List of all airport long coordinates.
All city info lists have airports in the same order, i.e. airport city_names[i]
has latitude city_lats[i] and longitude city_longs[i].
Returns:
None. Saves visual as 'complete_network.png'.
"""
positions = {}
for i in range(len(city_names)):
positions[city_names[i]] = [-city_longs[i], city_lats[i]]
nx.draw(G, pos=positions, with_labels=True)
plt.savefig('complete_network.png')
plt.close()
def build_dqm(W, C, n, p, a, verbose=True):
"""Builds discrete quadratic model representing the optimization problem.
Args:
- W: Numpy matrix. Represents passenger demand. Normalized with total demand equal to 1.
- C: Numpy matrix. Represents airline leg cost.
- n: Int. Number of cities in play.
- p: Int. Number of hubs airports allowed.
- a: Float in [0.0, 1.0]. Discount allowed for hub-hub legs.
- verbose: Print to command-line for user.
Returns:
- dqm: DiscreteQuadraticModel representing the optimization problem.
"""
if verbose:
print("\nBuilding DQM...\n")
# Initialize DQM object.
dqm = DiscreteQuadraticModel()
for i in range(n):
dqm.add_variable(n, label=i)
# Objective: Minimize cost.
for i in range(n):
for j in range(n):
for k in range(n):
dqm.set_linear_case(i, k, dqm.get_linear_case(i,k)+C[i][k]*W[i][j])
dqm.set_linear_case(j, k, dqm.get_linear_case(j,k)+C[j][k]*W[i][j])
for m in range(n):
if i != j:
dqm.set_quadratic_case(i, k, j, m, a*C[k][m]*W[i][j])
# Constraint: Every leg must connect to a hub.
gamma1 = 150
for i in range(n):
for j in range(n):
dqm.set_linear_case(i,j, dqm.get_linear_case(i,j) + 1*gamma1)
if i != j:
dqm.set_quadratic_case(i, j, j, j, dqm.get_quadratic_case(i, j, j, j) - 1*gamma1)
# Constraint: Exactly p hubs required.
gamma2 = 75
for j in range(n):
dqm.set_linear_case(j, j, dqm.get_linear_case(j,j) + (1-2*p)*gamma2)
for k in range(j+1,n):
dqm.set_quadratic_case(j, j, k, k, dqm.get_quadratic_case(j, j, k, k) + 2*gamma2)
return dqm
def get_layout_from_sample(ss, city_names, p):
"""Determines the airline route network from a sampleset.
Args:
- ss: Sampleset dictionary. One solution returned from the hybrid solver.
- city_names: List of all airport codes, in order.
- p: Int. Number of hubs airports allowed.
Returns:
- hubs: List of airports designated as hubs.
- legs: List of airline city-city route legs that will be operated.
- valid: Boolean designated whether provided solution satisfies the constraints.
"""
hubs = []
legs = []
valid = True
for key, val in ss.items():
if key == val:
hubs.append(city_names[key])
else:
legs.append((city_names[key],city_names[val]))
if ss[val] != val:
valid = False
if len(hubs) != p:
valid = False
return hubs, legs, valid
def get_cost(ss, a, dist_mat, C, n):
"""Determines the cost of an airline route network from a sampleset.
Args:
- ss: Sampleset dictionary. One solution returned from the hybrid solver.
- a: Float in [0.0, 1.0]. Discount allowed for hub-hub legs.
- dist_mat: Numpy matrix providing distance between cities i and j.
- C: Numpy matrix. Represents airline leg cost.
- n: Int. Number of cities in play.
Returns:
- cost: Cost of provided route network.
"""
cost = 0
for i in range(n):
for j in range(i+1, n):
cost += dist_mat[i][j]*(C[i][ss[i]] + C[j][ss[j]] + a*C[ss[i]][ss[j]])
return cost
def visualize_results(dist_mat, city_names, hubs, legs, city_lats, city_longs, cost, filenames=None, counter=0, verbose=True):
"""Visualizes a given route layout and saves the file as a .png.
Args:
- dist_mat: Numpy matrix providing distance between cities i and j.
- city_names: List of all airport codes.
- hubs: List of airports designated as hubs.
- legs: List of airline city-city route legs that will be operated.
- city_lats: List of all airport lat coordinates, in order.
- city_longs: List of all airport long coordinates, in order.
- cost: Cost of provided route network.
- filenames: List of image filenames produced so far.
- counter: Counter for image filename.
- verbose: Print results to command-line.
Returns:
- filenames: List of image filenames produced so far with new image filename appended.
"""
if filenames is None:
filenames = []
num_cities = len(city_names)
positions = {city_names[i]: [-city_longs[i], city_lats[i]] for i in range(num_cities)}
hub_cxn = list(itertools.combinations(hubs, 2))
H = nx.Graph()
H.add_nodes_from(city_names)
H.add_edges_from(legs)
d = dict(H.degree)
hub_degrees = {k:d[k]+len(hubs)-1 for k in hubs if k in d}
plt.figure(figsize=(10,5))
ax = plt.gca()
ax.set_title("Cost: {}".format(cost))
nx.draw_networkx_nodes(H, node_size=[v * 10 for v in d.values()], pos=positions, edgecolors='k', ax=ax)
nx.draw_networkx_nodes(hubs, node_size=[v * 100 for v in hub_degrees.values()], pos=positions, node_color='r', edgecolors='k', ax=ax)
nx.draw_networkx_edges(H, pos=positions, edgelist=H.edges(), width=1.0, ax=ax)
nx.draw_networkx_edges(H, pos=positions, edgelist=hub_cxn, width=3.0, ax=ax)
hub_graph = H.subgraph(hubs)
nx.draw_networkx_labels(hub_graph, pos=positions, ax=ax)
filename = str(counter)+'.png'
filenames.append(filename)
plt.savefig(filename)
plt.close()
if verbose:
print("Hubs:", hubs, "\tCost:", cost)
return filenames
if __name__ == '__main__':
passenger_demand, leg_cost, num_cities = read_inputs(flow_file='flow.csv', cost_file='cost.csv')
city_names, city_lats, city_longs = read_city_info('city-data.txt')
p = 3 # number of hubs
a = 0.4 # discount for hub-hub routes
# Uncomment lines below to visualize total network options
# G = build_graph(passenger_demand, city_names)
# draw_graph(G, city_names, city_lats, city_longs)
dqm = build_dqm(passenger_demand, leg_cost, num_cities, p, a)
print("\nRunning hybrid solver...\n")
sampler = LeapHybridDQMSampler()
sampleset = sampler.sample_dqm(dqm, label='Example - DQM Airline Hubs')
print("\nInterpreting solutions...\n")
ss = list(sampleset.data(['sample']))
cost_dict = {index: get_cost(ss[index].sample, a, passenger_demand, leg_cost, num_cities) for index in range(len(ss))}
ordered_samples = dict(sorted(cost_dict.items(), key=lambda item: item[1], reverse=True))
filenames = []
counter = 0
print("\nGenerating images for output GIF...\n")
print("\nFeasible solutions found:")
print("---------------------------\n")
output_string = []
for key, val in ordered_samples.items():
hubs, legs, valid = get_layout_from_sample(ss[key].sample, city_names, p)
if counter > 0:
if prev_val == val:
valid = False
if valid:
| |
maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
stacklevel = 2 # Number of stack levels from df.sort_values
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self._get_label_or_level_values(x, axis=axis,
stacklevel=stacklevel)
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis,
stacklevel=stacklevel)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order).
.. deprecated:: 0.20.0
Use :meth:`DataFrame.sort_index`
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
| |
<reponame>bopopescu/mysql-dbcompare
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the clone server utility which launches a new instance
of an existing server.
"""
import getpass
import os
import subprocess
import tempfile
import time
import shlex
import shutil
from mysql.utilities.common.tools import (check_port_in_use,
estimate_free_space,
get_mysqld_version,
get_tool_path)
from mysql.utilities.common.messages import WARN_OPT_SKIP_INNODB
from mysql.utilities.common.server import Server
from mysql.utilities.exception import UtilError
MAX_DATADIR_SIZE = 200
MAX_SOCKET_PATH_SIZE = 107
# Required free disk space in MB to create the data directory.
REQ_FREE_SPACE = 120
LOW_SPACE_ERRR_MSG = ("The new data directory {directory} has low free space"
"remaining, please free some space and try again. \n"
"mysqlserverclone needs at least {megabytes} MB to run "
"the new server instance.\nUse force option to ignore "
"this Error.")
def clone_server(conn_val, options):
"""Clone an existing server
This method creates a new instance of a running server using a datadir
set to the new_data parametr, with a port set to new_port, server_id
set to new_id and a root password of <PASSWORD>. You can also specify
additional parameters for the mysqld command line as well as turn on
verbosity mode to display more diagnostic information during the clone
process.
The method will build a new base database installation from the .sql
files used to construct a new installation. Once the database is
created, the server will be started.
dest_val[in] a dictionary containing connection information
including:
(user, password, host, port, socket)
options[in] dictionary of options:
new_data[in] An existing path to create the new database and use
as datadir for new instance
(default = None)
new_port[in] Port number for new instance
(default = 3307)
new_id[in] Server_id for new instance
(default = 2)
root_pass[in] Password for <PASSWORD> on new instance (optional)
mysqld_options[in] Additional command line options for mysqld
verbosity[in] Print additional information during operation
(default is 0)
quiet[in] If True, do not print messages.
(default is False)
cmd_file[in] file name to write startup command
start_timeout[in] Number of seconds to wait for server to start
"""
new_data = os.path.abspath(options.get('new_data', None))
new_port = options.get('new_port', '3307')
root_pass = options.get('root_pass', None)
verbosity = options.get('verbosity', 0)
user = options.get('user', 'root')
quiet = options.get('quiet', False)
cmd_file = options.get('cmd_file', None)
start_timeout = int(options.get('start_timeout', 10))
mysqld_options = options.get('mysqld_options', '')
force = options.get('force', False)
if not check_port_in_use('localhost', int(new_port)):
raise UtilError("Port {0} in use. Please choose an "
"available port.".format(new_port))
# Check if path to database files is greater than MAX_DIR_SIZE char,
if len(new_data) > MAX_DATADIR_SIZE and not force:
raise UtilError("The --new-data path '{0}' is too long "
"(> {1} characters). Please use a smaller one. "
"You can use the --force option to skip this "
"check".format(new_data, MAX_DATADIR_SIZE))
# Clone running server
if conn_val is not None:
# Try to connect to the MySQL database server.
server1_options = {
'conn_info': conn_val,
'role': "source",
}
server1 = Server(server1_options)
server1.connect()
if not quiet:
print "# Cloning the MySQL server running on %s." % \
conn_val["host"]
basedir = ""
# Get basedir
rows = server1.exec_query("SHOW VARIABLES LIKE 'basedir'")
if not rows:
raise UtilError("Unable to determine basedir of running server.")
basedir = os.path.normpath(rows[0][1])
# Cloning downed or offline server
else:
basedir = os.path.abspath(options.get("basedir", None))
if not quiet:
print "# Cloning the MySQL server located at %s." % basedir
new_data_deleted = False
# If datadir exists, has data, and user said it was Ok, delete it
if os.path.exists(new_data) and options.get("delete", False) and \
os.listdir(new_data):
new_data_deleted = True
shutil.rmtree(new_data, True)
# Create new data directory if it does not exist
if not os.path.exists(new_data):
if not quiet:
print "# Creating new data directory..."
try:
os.mkdir(new_data)
except OSError as err:
raise UtilError("Unable to create directory '{0}', reason: {1}"
"".format(new_data, err.strerror))
# After create the new data directory, check for free space, so the errors
# regarding invalid or inaccessible path had been dismissed already.
# If not force specified verify and stop if there is not enough free space
if not force and os.path.exists(new_data) and \
estimate_free_space(new_data) < REQ_FREE_SPACE:
# Don't leave empty folders, delete new_data if was previously deleted
if os.path.exists(new_data) and new_data_deleted:
shutil.rmtree(new_data, True)
raise UtilError(LOW_SPACE_ERRR_MSG.format(directory=new_data,
megabytes=REQ_FREE_SPACE))
# Check for warning of using --skip-innodb
mysqld_path = get_tool_path(basedir, "mysqld")
version = get_mysqld_version(mysqld_path)
if mysqld_options is not None and ("--skip-innodb" in mysqld_options or
"--innodb" in mysqld_options) and version is not None and \
int(version[0]) >= 5 and int(version[1]) >= 7 and int(version[2]) >= 5:
print("# WARNING: {0}".format(WARN_OPT_SKIP_INNODB))
if not quiet:
print "# Configuring new instance..."
print "# Locating mysql tools..."
mysqladmin_path = get_tool_path(basedir, "mysqladmin")
mysql_basedir = get_tool_path(basedir, "share/english/errgmsg.sys",
False, False)
mysql_basedir = basedir
if os.path.exists(os.path.join(basedir, "local/mysql/share/")):
mysql_basedir = os.path.join(mysql_basedir, "local/mysql/")
# for source trees
elif os.path.exists(os.path.join(basedir, "/sql/share/english/")):
mysql_basedir = os.path.join(mysql_basedir, "/sql/")
system_tables = get_tool_path(basedir, "mysql_system_tables.sql", False)
system_tables_data = get_tool_path(basedir, "mysql_system_tables_data.sql",
False)
test_data_timezone = get_tool_path(basedir, "mysql_test_data_timezone.sql",
False)
help_data = get_tool_path(basedir, "fill_help_tables.sql", False)
if verbosity >= 3 and not quiet:
print "# Location of files:"
locations = [
("mysqld", mysqld_path),
("mysqladmin", mysqladmin_path),
("mysql_system_tables.sql", system_tables),
("mysql_system_tables_data.sql", system_tables_data),
("mysql_test_data_timezone.sql", test_data_timezone),
("fill_help_tables.sql", help_data),
]
if cmd_file is not None:
locations.append(("write startup command to", cmd_file))
for location in locations:
print "# % 28s: %s" % location
# Create the new mysql data with mysql_import_db-like process
if not quiet:
print "# Setting up empty database and mysql tables..."
# Get bootstrap SQL statements
sql = list()
sql.append("CREATE DATABASE mysql;")
sql.append("USE mysql;")
innodb_disabled = False
if mysqld_options:
innodb_disabled = '--innodb=OFF' in mysqld_options
for sqlfile in [system_tables, system_tables_data, test_data_timezone,
help_data]:
lines = open(sqlfile, 'r').readlines()
for line in lines:
line = line.strip()
# Don't fail when InnoDB is turned off (Bug#16369955) (Ugly hack)
if (sqlfile == system_tables and
"SET @sql_mode_orig==@@SES" in line and innodb_disabled):
for line in lines:
if 'SET SESSION sql_mode=@@sql' in line:
break
sql.append(line)
# Bootstap to setup mysql tables
fnull = open(os.devnull, 'w')
cmd = [
mysqld_path,
"--no-defaults",
"--bootstrap",
"--datadir={0}".format(new_data),
"--basedir={0}".format(os.path.abspath(mysql_basedir)),
]
proc = None
if verbosity >= 1 and not quiet:
proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=fnull, stderr=fnull)
proc.communicate('\n'.join(sql))
# Wait for subprocess to finish
res = proc.wait()
# Kill subprocess just in case it didn't finish - Ok if proc doesn't exist
if int(res) != 0:
if os.name == "posix":
try:
os.kill(proc.pid, subprocess.signal.SIGTERM)
except OSError:
raise UtilError("Failed to kill process with pid '{0}'"
"".format(proc.pid))
else:
ret_code = subprocess.call("taskkill /F /T /PID "
"{0}".format(proc.pid), shell=True)
# return code 0 means it was successful and 128 means it tried
# to kill a process that doesn't exist
if ret_code not in (0, 128):
raise UtilError("Failed to kill process with pid '{0}'. "
"Return code {1}".format(proc.pid,
ret_code))
# Drop the bootstrap file
if os.path.isfile("bootstrap.sql"):
os.unlink("bootstrap.sql")
# Start the instance
if not quiet:
print "# Starting new instance of the server..."
# If the user is not the same as the user running the script...
# and this is a Posix system... and we are running as root
if user_change_as_root(options):
subprocess.call(['chown', '-R', user, new_data])
subprocess.call(['chgrp', '-R', user, new_data])
cmd = [mysqld_path, '--no-defaults']
socket_path = os.path.join(new_data, 'mysql.sock')
# If socket path is too long, use mkdtemp to create a tmp dir and
# use it instead to store the socket
if os.name == 'posix' and len(socket_path) > MAX_SOCKET_PATH_SIZE:
socket_path = os.path.join(tempfile.mkdtemp(), 'mysql.sock')
if not quiet:
print("# WARNING: The socket file path '{0}' is too long (>{1}), "
"using '{2}' instead".format(
os.path.join(new_data, 'mysql.sock'),
MAX_SOCKET_PATH_SIZE, socket_path))
cmd.extend([
'--datadir={0}'.format(new_data),
'--tmpdir={0}'.format(new_data),
'--pid-file={0}'.format(os.path.join(new_data, "clone.pid")),
'--port={0}'.format(new_port),
'--server-id={0}'.format(options.get('new_id', 2)),
'--basedir={0}'.format(mysql_basedir),
'--socket={0}'.format(socket_path),
])
if user:
| |
<filename>germany_compliance/germany_compliance/report/datev/datev.py
# Copyright (c) 2022, Frappe Technologies Private Limited[C and contributors
# For license information, please see license.txt
"""
Provide a report and downloadable CSV according to the German DATEV format.
- Query report showing only the columns that contain data, formatted nicely for
dispay to the user.
- CSV download functionality `download_datev_csv` that provides a CSV file with
all required columns. Used to import the data into the DATEV Software.
"""
import json
import frappe
from frappe import _
from erpnext.accounts.utils import get_fiscal_year
from germany_compliance.utils.datev.datev_constants import (
AccountNames,
DebtorsCreditors,
Transactions,
)
from germany_compliance.utils.datev.datev_csv import get_datev_csv, zip_and_download
COLUMNS = [
{
"label": "Umsatz (ohne Soll/Haben-Kz)",
"fieldname": "Umsatz (ohne Soll/Haben-Kz)",
"fieldtype": "Currency",
"width": 100
},
{
"label": "Soll/Haben-Kennzeichen",
"fieldname": "Soll/Haben-Kennzeichen",
"fieldtype": "Data",
"width": 100
},
{
"label": "Konto",
"fieldname": "Konto",
"fieldtype": "Data",
"width": 100
},
{
"label": "Gegenkonto (ohne BU-Schlüssel)",
"fieldname": "Gegenkonto (ohne BU-Schlüssel)",
"fieldtype": "Data",
"width": 100
},
{
"label": "BU-Schlüssel",
"fieldname": "BU-Schlüssel",
"fieldtype": "Data",
"width": 100
},
{
"label": "Belegdatum",
"fieldname": "Belegdatum",
"fieldtype": "Date",
"width": 100
},
{
"label": "Belegfeld 1",
"fieldname": "Belegfeld 1",
"fieldtype": "Data",
"width": 150
},
{
"label": "Buchungstext",
"fieldname": "Buchungstext",
"fieldtype": "Text",
"width": 300
},
{
"label": "Beleginfo - Art 1",
"fieldname": "Beleginfo - Art 1",
"fieldtype": "Link",
"options": "DocType",
"width": 100
},
{
"label": "Beleginfo - Inhalt 1",
"fieldname": "Beleginfo - Inhalt 1",
"fieldtype": "Dynamic Link",
"options": "Beleginfo - Art 1",
"width": 150
},
{
"label": "Beleginfo - Art 2",
"fieldname": "Beleginfo - Art 2",
"fieldtype": "Link",
"options": "DocType",
"width": 100
},
{
"label": "Beleginfo - Inhalt 2",
"fieldname": "Beleginfo - Inhalt 2",
"fieldtype": "Dynamic Link",
"options": "Beleginfo - Art 2",
"width": 150
},
{
"label": "Beleginfo - Art 3",
"fieldname": "Beleginfo - Art 3",
"fieldtype": "Link",
"options": "DocType",
"width": 100
},
{
"label": "Beleginfo - Inhalt 3",
"fieldname": "Beleginfo - Inhalt 3",
"fieldtype": "Dynamic Link",
"options": "Beleginfo - Art 3",
"width": 150
},
{
"label": "Beleginfo - Art 4",
"fieldname": "Beleginfo - Art 4",
"fieldtype": "Data",
"width": 100
},
{
"label": "Beleginfo - Inhalt 4",
"fieldname": "Beleginfo - Inhalt 4",
"fieldtype": "Data",
"width": 150
},
{
"label": "Beleginfo - Art 5",
"fieldname": "Beleginfo - Art 5",
"fieldtype": "Data",
"width": 150
},
{
"label": "Beleginfo - Inhalt 5",
"fieldname": "Beleginfo - Inhalt 5",
"fieldtype": "Data",
"width": 100
},
{
"label": "Beleginfo - Art 6",
"fieldname": "Beleginfo - Art 6",
"fieldtype": "Data",
"width": 150
},
{
"label": "Beleginfo - Inhalt 6",
"fieldname": "Beleginfo - Inhalt 6",
"fieldtype": "Date",
"width": 100
},
{
"label": "Fälligkeit",
"fieldname": "Fälligkeit",
"fieldtype": "Date",
"width": 100
}
]
def execute(filters=None):
"""Entry point for frappe."""
data = []
if filters and validate(filters):
fn = 'temporary_against_account_number'
filters[fn] = frappe.get_value('DATEV Settings', filters.get('company'), fn)
data = get_transactions(filters, as_dict=0)
return COLUMNS, data
def validate(filters):
"""Make sure all mandatory filters and settings are present."""
company = filters.get('company')
if not company:
frappe.throw(_('<b>Company</b> is a mandatory filter.'))
from_date = filters.get('from_date')
if not from_date:
frappe.throw(_('<b>From Date</b> is a mandatory filter.'))
to_date = filters.get('to_date')
if not to_date:
frappe.throw(_('<b>To Date</b> is a mandatory filter.'))
validate_fiscal_year(from_date, to_date, company)
if not frappe.db.exists('DATEV Settings', filters.get('company')):
msg = 'Please create DATEV Settings for Company {}'.format(filters.get('company'))
frappe.log_error(msg, title='DATEV Settings missing')
return False
return True
def validate_fiscal_year(from_date, to_date, company):
from_fiscal_year = get_fiscal_year(date=from_date, company=company)
to_fiscal_year = get_fiscal_year(date=to_date, company=company)
if from_fiscal_year != to_fiscal_year:
frappe.throw(_('Dates {} and {} are not in the same fiscal year.').format(from_date, to_date))
def get_transactions(filters, as_dict=1):
def run(params_method, filters):
extra_fields, extra_joins, extra_filters = params_method(filters)
return run_query(filters, extra_fields, extra_joins, extra_filters, as_dict=as_dict)
def sort_by(row):
# "Belegdatum" is in the fifth column when list format is used
return row["Belegdatum" if as_dict else 5]
type_map = {
# specific query methods for some voucher types
"Payment Entry": get_payment_entry_params,
"Sales Invoice": get_sales_invoice_params,
"Purchase Invoice": get_purchase_invoice_params
}
only_voucher_type = filters.get("voucher_type")
transactions = []
for voucher_type, get_voucher_params in type_map.items():
if only_voucher_type and only_voucher_type != voucher_type:
continue
transactions.extend(run(params_method=get_voucher_params, filters=filters))
if not only_voucher_type or only_voucher_type not in type_map:
# generic query method for all other voucher types
filters["exclude_voucher_types"] = type_map.keys()
transactions.extend(run(params_method=get_generic_params, filters=filters))
return sorted(transactions, key=sort_by)
def get_payment_entry_params(filters):
extra_fields = """
, 'Zahlungsreferenz' as 'Beleginfo - Art 5'
, pe.reference_no as 'Beleginfo - Inhalt 5'
, 'Buchungstag' as 'Beleginfo - Art 6'
, pe.reference_date as 'Beleginfo - Inhalt 6'
, '' as 'Fälligkeit'
"""
extra_joins = """
LEFT JOIN `tabPayment Entry` pe
ON gl.voucher_no = pe.name
"""
extra_filters = """
AND gl.voucher_type = 'Payment Entry'
"""
return extra_fields, extra_joins, extra_filters
def get_sales_invoice_params(filters):
extra_fields = """
, '' as 'Beleginfo - Art 5'
, '' as 'Beleginfo - Inhalt 5'
, '' as 'Beleginfo - Art 6'
, '' as 'Beleginfo - Inhalt 6'
, si.due_date as 'Fälligkeit'
"""
extra_joins = """
LEFT JOIN `tabSales Invoice` si
ON gl.voucher_no = si.name
"""
extra_filters = """
AND gl.voucher_type = 'Sales Invoice'
"""
return extra_fields, extra_joins, extra_filters
def get_purchase_invoice_params(filters):
extra_fields = """
, 'Lieferanten-Rechnungsnummer' as 'Beleginfo - Art 5'
, pi.bill_no as 'Beleginfo - Inhalt 5'
, 'Lieferanten-Rechnungsdatum' as 'Beleginfo - Art 6'
, pi.bill_date as 'Beleginfo - Inhalt 6'
, pi.due_date as 'Fälligkeit'
"""
extra_joins = """
LEFT JOIN `tabPurchase Invoice` pi
ON gl.voucher_no = pi.name
"""
extra_filters = """
AND gl.voucher_type = 'Purchase Invoice'
"""
return extra_fields, extra_joins, extra_filters
def get_generic_params(filters):
# produce empty fields so all rows will have the same length
extra_fields = """
, '' as 'Beleginfo - Art 5'
, '' as 'Beleginfo - Inhalt 5'
, '' as 'Beleginfo - Art 6'
, '' as 'Beleginfo - Inhalt 6'
, '' as 'Fälligkeit'
"""
extra_joins = ""
if filters.get("exclude_voucher_types"):
# exclude voucher types that are queried by a dedicated method
exclude = "({})".format(', '.join("'{}'".format(key) for key in filters.get("exclude_voucher_types")))
extra_filters = "AND gl.voucher_type NOT IN {}".format(exclude)
# if voucher type filter is set, allow only this type
if filters.get("voucher_type"):
extra_filters += " AND gl.voucher_type = %(voucher_type)s"
return extra_fields, extra_joins, extra_filters
def run_query(filters, extra_fields, extra_joins, extra_filters, as_dict=1):
"""
Get a list of accounting entries.
Select GL Entries joined with Account and Party Account in order to get the
account numbers. Returns a list of accounting entries.
Arguments:
filters -- dict of filters to be passed to the sql query
as_dict -- return as list of dicts [0,1]
"""
query = """
SELECT
/* either debit or credit amount; always positive */
case gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',
/* 'H' when credit, 'S' when debit */
case gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',
/* account number or, if empty, party account number */
acc.account_number as 'Konto',
/* against number or, if empty, party against number */
%(temporary_against_account_number)s as 'Gegenkonto (ohne BU-Schlüssel)',
/* disable automatic VAT deduction */
'40' as 'BU-Schlüssel',
gl.posting_date as 'Belegdatum',
gl.voucher_no as 'Belegfeld 1',
REPLACE(LEFT(gl.remarks, 60), '\n', ' ') as 'Buchungstext',
gl.voucher_type as 'Beleginfo - Art 1',
gl.voucher_no as 'Beleginfo - Inhalt 1',
gl.against_voucher_type as 'Beleginfo - Art 2',
gl.against_voucher as 'Beleginfo - Inhalt 2',
gl.party_type as 'Beleginfo - Art 3',
gl.party as 'Beleginfo - Inhalt 3',
case gl.party_type when 'Customer' then 'Debitorennummer' when 'Supplier' then 'Kreditorennummer' else NULL end as 'Beleginfo - Art 4',
par.debtor_creditor_number as 'Beleginfo - Inhalt 4'
{extra_fields}
FROM `tabGL Entry` gl
/* Kontonummer */
LEFT JOIN `tabAccount` acc
ON gl.account = acc.name
LEFT JOIN `tabParty Account` par
ON par.parent = gl.party
AND par.parenttype = gl.party_type
AND par.company = %(company)s
{extra_joins}
WHERE gl.company = %(company)s
AND DATE(gl.posting_date) >= %(from_date)s
AND DATE(gl.posting_date) <= %(to_date)s
{extra_filters}
ORDER BY 'Belegdatum', gl.voucher_no""".format(
extra_fields=extra_fields,
extra_joins=extra_joins,
extra_filters=extra_filters
)
gl_entries = frappe.db.sql(query, filters, as_dict=as_dict)
return gl_entries
def get_customers(filters):
"""
Get a list of Customers.
Arguments:
filters -- dict of filters to be passed to the sql query
"""
return frappe.db.sql("""
SELECT
par.debtor_creditor_number as 'Konto',
CASE cus.customer_type
WHEN 'Company' THEN cus.customer_name
ELSE null
END as 'Name (Adressatentyp Unternehmen)',
CASE cus.customer_type
WHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))
ELSE null
END as 'Name (Adressatentyp natürl. Person)',
CASE cus.customer_type
WHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)
ELSE null
END as 'Vorname (Adressatentyp natürl. Person)',
CASE cus.customer_type
WHEN 'Individual' THEN '1'
WHEN 'Company' THEN '2'
ELSE '0'
END as 'Adressatentyp',
adr.address_line1 as 'Straße',
adr.pincode as 'Postleitzahl',
adr.city as 'Ort',
UPPER(country.code) as 'Land',
adr.address_line2 as 'Adresszusatz',
adr.email_id as 'E-Mail',
adr.phone as 'Telefon',
adr.fax as 'Fax',
cus.website as 'Internet',
cus.tax_id as 'Steuernummer'
FROM `tabCustomer` cus
left join `tabParty Account` par
on par.parent = cus.name
and par.parenttype = 'Customer'
and par.company = %(company)s
left join `tabDynamic Link` dyn_adr
on dyn_adr.link_name = cus.name
and dyn_adr.link_doctype = 'Customer'
and dyn_adr.parenttype = 'Address'
left join `tabAddress` adr
on adr.name = dyn_adr.parent
and adr.is_primary_address = '1'
left join `tabCountry` country
on country.name = adr.country
WHERE adr.is_primary_address = '1'
""", filters, as_dict=1)
def get_suppliers(filters):
"""
Get a list of Suppliers.
Arguments:
filters -- dict of filters to be passed to the sql query
"""
return frappe.db.sql("""
SELECT
par.debtor_creditor_number as 'Konto',
CASE sup.supplier_type
WHEN 'Company' THEN sup.supplier_name
ELSE null
END as 'Name (Adressatentyp Unternehmen)',
CASE sup.supplier_type
WHEN 'Individual' THEN TRIM(SUBSTR(sup.supplier_name, LOCATE(' ', sup.supplier_name)))
ELSE null
END as 'Name (Adressatentyp natürl. Person)',
CASE sup.supplier_type
WHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(sup.supplier_name, ' ', 1), ' ', -1)
ELSE null
END as 'Vorname (Adressatentyp natürl. Person)',
CASE sup.supplier_type
WHEN 'Individual' THEN '1'
WHEN 'Company' THEN '2'
ELSE '0'
END as 'Adressatentyp',
adr.address_line1 as 'Straße',
adr.pincode as 'Postleitzahl',
adr.city as 'Ort',
UPPER(country.code) as 'Land',
adr.address_line2 as 'Adresszusatz',
adr.email_id as 'E-Mail',
adr.phone as 'Telefon',
adr.fax as 'Fax',
sup.website as 'Internet',
sup.tax_id as 'Steuernummer',
case sup.on_hold when 1 then sup.release_date else null end as 'Zahlungssperre bis'
FROM `tabSupplier` sup
left join `tabParty Account` par
on par.parent = sup.name
and par.parenttype = 'Supplier'
and par.company = %(company)s
left join `tabDynamic Link` dyn_adr
on dyn_adr.link_name = sup.name
and dyn_adr.link_doctype = 'Supplier'
and dyn_adr.parenttype = 'Address'
left join `tabAddress` adr
on adr.name = dyn_adr.parent
and adr.is_primary_address = '1'
left join `tabCountry` country
on country.name = adr.country
WHERE adr.is_primary_address = '1'
""", filters, as_dict=1)
def get_account_names(filters):
return frappe.db.sql("""
SELECT
account_number as 'Konto',
LEFT(account_name, 40) as 'Kontenbeschriftung',
'de-DE' as 'Sprach-ID'
FROM `tabAccount`
WHERE company = %(company)s
AND is_group = 0
AND account_number != ''
""", filters, as_dict=1)
@frappe.whitelist()
def download_datev_csv(filters):
"""
Provide accounting entries for download in DATEV format.
Validate the filters, get the data, produce the CSV file and provide it for
download. Can be called like this:
GET /api/method/gegermany_compliance.germany_compliance.report.datev.datev.download_datev_csv
Arguments / Params:
filters -- dict of filters to be passed to the sql query
"""
if isinstance(filters, str):
filters = json.loads(filters)
validate(filters)
company = filters.get('company')
fiscal_year = get_fiscal_year(date=filters.get('from_date'), company=company)
filters['fiscal_year_start'] = fiscal_year[1]
# set chart of accounts used
coa = frappe.get_value('Company', company, 'chart_of_accounts')
filters['skr'] = '04' if 'SKR04' in coa else ('03' if 'SKR03' in coa else '')
datev_settings = frappe.get_doc('DATEV Settings', company)
filters['account_number_length'] = datev_settings.account_number_length
filters['temporary_against_account_number'] = datev_settings.temporary_against_account_number
transactions = get_transactions(filters)
account_names | |
is clicked and last_click set to 20
def on_click(self, mouse_x, mouse_y):
if self.rect.collidepoint(mouse_x, mouse_y)and self.last_click == 0 and not self.grey:
self.clicked = True
self.last_click = 20
# Called every frame, checks if mouse is inside button but doesn't need to be clicked
def on_hover(self, mouse_x, mouse_y):
# If in button, make border thicker and make background slightly lighter
if self.rect.collidepoint(mouse_x, mouse_y) and not self.grey:
self.border = 2
self.bg_colour = (100, 100, 100)
# If not in button, set border and colour back to normal
else:
self.border = 1
self.bg_colour = light_grey
# Called every second
def update(self, mouse_x, mouse_y):
# Runs method to check if mouse is inside button
self.on_hover(mouse_x, mouse_y)
# If button has not been clicked in that frame, decrement button counter and set clicked to false
if self.last_click != 0:
self.last_click -= 1
self.clicked = False
def update_grey(self):
# If the button is greyed out, make background colour darker
if self.grey:
self.bg_colour = (150, 150, 150)
# Try to make text colour darker
# Uses try statement because Button is parent class of ImageButton
# ImageButton has no text attribute
try:
self.text_colour = darkGrey
self.txt_obj = self.font.render(self.text, 1, self.text_colour)
except AttributeError:
pass
# If not grey, set background colour and text colour to normal
else:
self.bg_colour = light_grey
try:
self.text_colour = black
self.txt_obj = self.font.render(self.text, 1, self.text_colour)
except AttributeError:
pass
def draw(self, screen):
# Draws the background rectangle of the button
pygame.draw.rect(screen, self.bg_colour, (self.x, self.y, self.width, self.height))
# Draws the button text
screen.blit(self.txt_obj, (self.x + 3, self.y + 3))
# Draws the border
pygame.draw.lines(screen, black, True, ((self.x, self.y), (self.x, self.y2), (self.x2, self.y2),
(self.x2, self.y)), self.border)
# Child of the button class but displays an image instead of a text label
# Inherits all methods and attributes from Button which also inherits from Element
class ImageButton(Button):
# Takes in a filepath to an image instead of text label
def __init__(self, x, y, font, filepath):
# Tries to open the image specified by 'filepath' in the /img folder
# The root of the /img folder is the folder where this .py file is
try:
self.image = pygame.image.load("img/" + filepath + ".png")
# Validation: Tell user if image cannot be found
except FileNotFoundError:
print("Could not find file at img/" + filepath + ".png")
# Get width and height of image
size = self.image.get_rect().size
# Dimensions of button is dimensions of image with 10 pixels of padding in each direction
self.width = size[0] + 10
self.height = size[1] + 10
Element.__init__(self, x, y, self.width, self.height, font)
self.border = 1
self.clicked = False
self.last_click = 0
self.grey = False
def draw(self, screen):
# Draw the background
pygame.draw.rect(screen, self.bg_colour, (self.x, self.y, self.width, self.height))
# Draw the image
screen.blit(self.image, (self.x + 5, self.y + 5))
# Draw the borders
pygame.draw.lines(screen, black, True, ((self.x, self.y), (self.x, self.y2), (self.x2, self.y2),
(self.x2, self.y)), self.border)
# Class for a slider that has a small triangle that moves along a bar when clicked and dragged
# The output is always between 2 limits, given by parameter limits, a tuple of length 2
# Lower limit is limit[0], upper limit is limit[1]
# starting_pos determines how far along the line the pointer starts where 0 is fully left
# 1 is fully right and 0.5 is halfway. Defaults to 0.5
# dec_points - how many decimal points the text should render, defaults to 0 (integers only)
# Inherits all methods and attributes from Element
class Slider(Element):
# Limits = the lowest and highest points given as a tuple
# StartingPos = how far along the bar the pointer is at when program starts
def __init__(self, x, y, width, height, font, limits, starting_pos=.5, dec_points = 0):
Element.__init__(self, x, y, width, height, font)
self.limits = limits
# line_y = The y value at which the line starts
self.line_y = self.y + (self.height * 0.8)
self.starting_pos = starting_pos
self.dec_points = dec_points
# 'pointer' is the raw pixel position of the x co-ord of the middle of the triangular pointer
self.pointer = self.x + (self.width * self.starting_pos)
# Value is the output of the slider
self.value = self.get_pos()
# txt is the text object that renders the value of the slider
self.txt = self.update_txt()
# true when the slider itself is clicked
self.clicked = False
# true when the pointer is clicked
self.tri_clicked = False
# Pygame Rect object for the triangle pointer
self.tri_rect = pygame.Rect(self.pointer - 10, self.y + 2, 20, (self.line_y - 2) - (self.y + 2))
# Called when a slider object is added to a Menu object
# Updates all x and y positions of the triangle and text
def on_menu_add(self):
self.line_y = self.y + (self.height * 0.8)
self.pointer = self.x + (self.width * self.starting_pos)
self.value = self.get_pos()
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
self.tri_rect = pygame.Rect(self.pointer - 10, self.y + 2, 20, (self.line_y - 2) - (self.y + 2))
self.update_txt()
# Given the raw pointer position relative to the top left corner of the screen
# Gets the value from the slider and returns it
def get_pos(self):
# Gets the proportion of slider to left of pointer
# Eg. if 10% of slider to left of pointer, result is 0.1
pos = (self.pointer - self.x) / self.width
# Multiplies proportion by the difference between the limits
# This gives a proportional value of how far the pointer is from the left limit
pos = pos * (self.limits[1] - self.limits[0])
# Adds the lower limit
pos += self.limits[0]
return pos
# Updates the text object of the value above the pointer
def update_txt(self):
if self.dec_points == 0:
txt = self.font.render(str(round(self.value)), 1, black)
else:
txt = self.font.render(str(round(self.value, self.dec_points)), 1, black)
return txt
def draw(self, screen):
# Draws bottom line
pygame.draw.rect(screen, black, (self.x, self.line_y, self.width, self.y2 - self.line_y))
# Draws triangular pointer 2 pixels above the line
pygame.draw.polygon(screen, black, ((self.pointer, self.line_y - 2), (self.pointer - 10, self.y + 2),
(self.pointer + 10, self.y + 2)))
# Draws value above pointer
self.value = self.get_pos()
self.txt = self.update_txt()
screen.blit(self.txt, (self.pointer + 12, self.y))
# If clicked and is in bounds of the triangle, clicked = True
def on_click(self, mouse_x, mouse_y):
if self.tri_rect.collidepoint(mouse_x, mouse_y):
self.tri_clicked = True
elif self.rect.collidepoint(mouse_x, mouse_y):
self.clicked = True
# sets clicked booleans to false when mouse button released
def on_unclick(self):
self.tri_clicked = False
self.clicked = False
# Run every frame. Only needs x co-ord of mouse
# Requires both co-ords but sets y to None as default to allow overriding of method of same name in Element
def update(self, mouse_x, mouse_y=None):
if self.tri_clicked or self.clicked:
# If mouse x co-ord further than upper boundary
if mouse_x > self.x2:
# Pointer = upper boundary
self.pointer = self.x2
# If mouse x co-ord further than lower boundary
elif mouse_x < self.x:
# Pointer = lower boundary
self.pointer = self.x
# Otherwise, mouse x co-ord is between the 2 boundaries
# pointer = mouse x co-ord
else:
self.pointer = mouse_x
self.tri_rect = pygame.Rect(self.pointer - 10, self.y + 2, 20, (self.line_y - 2) - (self.y + 2))
# Class for a text entry box
# Inherits all methods and attributes from Element
class Textbox(Element):
# Static tuple of pygame character codes that have a different key_name than a letter
# Eg. key_name of the g key is g, therefore not included in tuple
special_chars = ("space", "escape", "left ctrl", "right ctrl", "return", "left alt", "right alt", "caps lock",
"numlock", "scroll lock", "tab", "left super", "right super", "menu", "f1", "f2", "f3", "f4", "f5",
"f6", "f7", "f8", "f9", "f10", "f11", "f12", "insert", "home", "delete", "end", "page up",
"page down", "pause")
# Dictionary of the keys that have different characters when shift is pressed with them
# and the corresponding characters
shifts = {"1": "!", "2": "\"", "3": "£", "4": "$", "5": "%", "6": "^", "7": "&", "8": "*", "9": "(", "0": ")",
"-": "_", "=": "+", "#": "~", "[": | |
),
grants as (
select coalesce(
string_agg(format(
E'GRANT %s ON FUNCTION {0} TO %s%s;\n',
privilege_type,
case grantee
when 'PUBLIC' then 'PUBLIC'
else quote_ident(grantee)
end,
case is_grantable
when 'YES' then ' WITH GRANT OPTION'
else ''
end), ''),
'') as text
from privileges
)
select (select text from createfunction) ||
(select text from alterowner) ||
(select text from grants)
'''.format(p_function))
def GetDDLProcedure(self, p_procedure):
return self.v_connection.ExecuteScalar('''
with obj as (
SELECT p.oid,
p.proname AS name,
n.nspname AS namespace,
pg_get_userbyid(p.proowner) AS owner,
'{0}'::text AS sql_identifier
FROM pg_proc p JOIN pg_namespace n ON n.oid=p.pronamespace
WHERE p.prokind = 'p' AND p.oid = '{0}'::text::regprocedure
),
createfunction as (
select substring(body from 1 for length(body)-1) || E';\n\n' as text
from (
select pg_get_functiondef(sql_identifier::regprocedure) as body
from obj
) x
),
alterowner as (
select
'ALTER PROCEDURE '||sql_identifier||
' OWNER TO '||quote_ident(owner)||E';\n\n' as text
from obj
),
privileges as (
select (u_grantor.rolname)::information_schema.sql_identifier as grantor,
(grantee.rolname)::information_schema.sql_identifier as grantee,
(p.privilege_type)::information_schema.character_data as privilege_type,
(case when (pg_has_role(grantee.oid, p.proowner, 'USAGE'::text) or p.is_grantable)
then 'YES'::text
else 'NO'::text
end)::information_schema.yes_or_no AS is_grantable
from (
select p.pronamespace,
p.proowner,
(aclexplode(COALESCE(p.proacl, acldefault('f', p.proowner)))).grantor as grantor,
(aclexplode(COALESCE(p.proacl, acldefault('f', p.proowner)))).grantee as grantee,
(aclexplode(COALESCE(p.proacl, acldefault('f', p.proowner)))).privilege_type as privilege_type,
(aclexplode(COALESCE(p.proacl, acldefault('f', p.proowner)))).is_grantable as is_grantable
from pg_proc p
where p.prokind = 'p'
and p.oid = '{0}'::regprocedure
) p
inner join pg_namespace n
on n.oid = p.pronamespace
inner join pg_roles u_grantor
on u_grantor.oid = p.grantor
inner join (
select r.oid,
r.rolname
from pg_roles r
union all
select (0)::oid AS oid,
'PUBLIC'::name
) grantee
on grantee.oid = p.grantee
),
grants as (
select coalesce(
string_agg(format(
E'GRANT %s ON FUNCTION {0} TO %s%s;\n',
privilege_type,
case grantee
when 'PUBLIC' then 'PUBLIC'
else quote_ident(grantee)
end,
case is_grantable
when 'YES' then ' WITH GRANT OPTION'
else ''
end), ''),
'') as text
from privileges
)
select (select text from createfunction) ||
(select text from alterowner) ||
(select text from grants)
'''.format(p_procedure))
def GetDDLConstraint(self, p_schema, p_table, p_object):
return self.v_connection.ExecuteScalar('''
with cs as (
select
'ALTER TABLE ' || text(regclass(c.conrelid)) ||
' ADD CONSTRAINT ' || quote_ident(c.conname) ||
E'\n ' || pg_get_constraintdef(c.oid, true) as sql
from pg_constraint c
join pg_class t
on t.oid = c.conrelid
join pg_namespace n
on t.relnamespace = n.oid
where quote_ident(n.nspname) = '{0}'
and quote_ident(t.relname) = '{1}'
and quote_ident(c.conname) = '{2}'
)
select coalesce(string_agg(sql,E';\n') || E';\n\n','') as text
from cs
'''.format(p_schema, p_table, p_object))
def GetDDLUserMapping(self, p_server, p_object):
if p_object == 'PUBLIC':
return self.v_connection.ExecuteScalar('''
select format(E'CREATE USER MAPPING FOR PUBLIC\n SERVER %s%s;\n',
quote_ident(s.srvname),
(select (case when s is not null and s <> ''
then format(E'\n OPTIONS (%s)', s)
else ''
end)
from (
select array_to_string(array(
select format('%s %s', a[1], quote_literal(a[2]))
from (
select string_to_array(unnest(u.umoptions), '=') as a
from pg_user_mapping u
inner join pg_foreign_server s
on s.oid = u.umserver
where u.umuser = 0
and quote_ident(s.srvname) = '{0}'
) x
), ', ') as s) x))
from pg_user_mapping u
inner join pg_foreign_server s
on s.oid = u.umserver
where u.umuser = 0
and quote_ident(s.srvname) = '{0}'
'''.format(p_server))
else:
return self.v_connection.ExecuteScalar('''
select format(E'CREATE USER MAPPING FOR %s\n SERVER %s%s;\n',
quote_ident(r.rolname),
quote_ident(s.srvname),
(select (case when s is not null and s <> ''
then format(E'\n OPTIONS (%s)', s)
else ''
end)
from (
select array_to_string(array(
select format('%s %s', a[1], quote_literal(a[2]))
from (
select string_to_array(unnest(u.umoptions), '=') as a
from pg_user_mapping u
inner join pg_foreign_server s
on s.oid = u.umserver
inner join pg_roles r
on r.oid = u.umuser
where quote_ident(s.srvname) = '{0}'
and quote_ident(r.rolname) = '{1}'
) x
), ', ') as s) x))
from pg_user_mapping u
inner join pg_foreign_server s
on s.oid = u.umserver
inner join pg_roles r
on r.oid = u.umuser
where quote_ident(s.srvname) = '{0}'
and quote_ident(r.rolname) = '{1}'
'''.format(p_server, p_object))
def GetDDLForeignServer(self, p_object):
return self.v_connection.ExecuteScalar('''
WITH privileges AS (
SELECT (u_grantor.rolname)::information_schema.sql_identifier AS grantor,
(grantee.rolname)::information_schema.sql_identifier AS grantee,
(current_database())::information_schema.sql_identifier AS srv_catalog,
(c.srvname)::information_schema.sql_identifier AS srv_name,
(c.prtype)::information_schema.character_data AS privilege_type,
(
CASE
WHEN (pg_has_role(grantee.oid, c.srvowner, 'USAGE'::text) OR c.grantable) THEN 'YES'::text
ELSE 'NO'::text
END)::information_schema.yes_or_no AS is_grantable,
(
CASE
WHEN (c.prtype = 'SELECT'::text) THEN 'YES'::text
ELSE 'NO'::text
END)::information_schema.yes_or_no AS with_hierarchy
FROM ( SELECT s.oid,
s.srvname,
s.srvowner,
(aclexplode(COALESCE(s.srvacl, acldefault('r', s.srvowner)))).grantor AS grantor,
(aclexplode(COALESCE(s.srvacl, acldefault('r', s.srvowner)))).grantee AS grantee,
(aclexplode(COALESCE(s.srvacl, acldefault('r', s.srvowner)))).privilege_type AS privilege_type,
(aclexplode(COALESCE(s.srvacl, acldefault('r', s.srvowner)))).is_grantable AS is_grantable
FROM pg_foreign_server s
WHERE s.srvname = '{0}') c(oid, srvname, srvowner, grantor, grantee, prtype, grantable),
pg_roles u_grantor,
( SELECT pg_roles.oid,
pg_roles.rolname
FROM pg_roles
UNION ALL
SELECT (0)::oid AS oid,
'PUBLIC'::name) grantee(oid, rolname)
WHERE (c.grantee = grantee.oid) AND (c.grantor = u_grantor.oid)
AND (pg_has_role(u_grantor.oid, 'USAGE'::text) OR pg_has_role(grantee.oid, 'USAGE'::text) OR (grantee.rolname = 'PUBLIC'::name))
),
grants as (
SELECT
coalesce(
string_agg(format(
E'GRANT %s ON %s TO %s%s;\n',
privilege_type,
'FOREIGN SERVER {0}',
case grantee
when 'PUBLIC' then 'PUBLIC'
else quote_ident(grantee)
end,
case is_grantable
when 'YES' then ' WITH GRANT OPTION'
else ''
end), ''),
'') as text
FROM privileges g
INNER JOIN pg_foreign_server s
ON s.srvname = g.srv_name
INNER JOIN pg_roles r
ON r.oid = s.srvowner
WHERE g.grantee <> r.rolname
)
select format(E'CREATE SERVER %s%s%s\n FOREIGN DATA WRAPPER %s%s;\n\nALTER SERVER %s OWNER TO %s;\n\n%s',
quote_ident(s.srvname),
(case when s.srvtype is not null
then format(E'\n TYPE %s\n', quote_literal(s.srvtype))
else ''
end),
(case when s.srvversion is not null
then format(E'\n VERSION %s\n', quote_literal(s.srvversion))
else ''
end),
w.fdwname,
(case when (select array_to_string(array(
select format('%s %s', a[1], quote_literal(a[2]))
from (
select string_to_array(unnest(s.srvoptions), '=') as a
from pg_foreign_server s
inner join pg_foreign_data_wrapper w
on w.oid = s.srvfdw
inner join pg_roles r
on r.oid = s.srvowner
where quote_ident(s.srvname) = '{0}'
) x
), ', ')) != ''
then format('\n OPTIONS ( %s )',
(select array_to_string(array(
select format('%s %s', a[1], quote_literal(a[2]))
from (
select string_to_array(unnest(s.srvoptions), '=') as a
from pg_foreign_server s
inner join pg_foreign_data_wrapper w
on w.oid = s.srvfdw
inner join pg_roles r
on r.oid = s.srvowner
where quote_ident(s.srvname) = '{0}'
) x
), ', ')))
else ''
end),
quote_ident(s.srvname),
quote_ident(r.rolname),
g.text
)
from pg_foreign_server s
inner join pg_foreign_data_wrapper w
on w.oid = s.srvfdw
inner join pg_roles r
on r.oid = s.srvowner
inner join grants g on 1=1
where quote_ident(s.srvname) = '{0}'
'''.format(p_object))
def GetDDLForeignDataWrapper(self, p_object):
return self.v_connection.ExecuteScalar('''
WITH privileges AS (
SELECT (u_grantor.rolname)::information_schema.sql_identifier AS grantor,
(grantee.rolname)::information_schema.sql_identifier AS grantee,
(current_database())::information_schema.sql_identifier AS fdw_catalog,
(c.fdwname)::information_schema.sql_identifier AS fdw_name,
(c.prtype)::information_schema.character_data AS privilege_type,
(
CASE
WHEN (pg_has_role(grantee.oid, c.fdwowner, 'USAGE'::text) OR c.grantable) THEN 'YES'::text
ELSE 'NO'::text
END)::information_schema.yes_or_no AS is_grantable,
(
CASE
WHEN (c.prtype = 'SELECT'::text) THEN 'YES'::text
ELSE 'NO'::text
END)::information_schema.yes_or_no AS with_hierarchy
FROM ( SELECT w.oid,
w.fdwname,
w.fdwowner,
(aclexplode(COALESCE(w.fdwacl, acldefault('r', w.fdwowner)))).grantor AS grantor,
(aclexplode(COALESCE(w.fdwacl, acldefault('r', w.fdwowner)))).grantee AS grantee,
(aclexplode(COALESCE(w.fdwacl, acldefault('r', w.fdwowner)))).privilege_type AS privilege_type,
(aclexplode(COALESCE(w.fdwacl, acldefault('r', w.fdwowner)))).is_grantable AS is_grantable
FROM pg_foreign_data_wrapper w
WHERE w.fdwname = '{0}') c(oid, fdwname, fdwowner, grantor, grantee, prtype, grantable),
pg_roles u_grantor,
( SELECT pg_roles.oid,
pg_roles.rolname
FROM pg_roles
UNION ALL
SELECT (0)::oid AS oid,
'PUBLIC'::name) grantee(oid, rolname)
WHERE (c.grantee = grantee.oid) AND (c.grantor = u_grantor.oid)
AND (pg_has_role(u_grantor.oid, 'USAGE'::text) OR pg_has_role(grantee.oid, 'USAGE'::text) OR (grantee.rolname = 'PUBLIC'::name))
),
grants as (
SELECT
coalesce(
string_agg(format(
E'GRANT %s ON %s TO %s%s;\n',
privilege_type,
'FOREIGN DATA WRAPPER {0}',
case grantee
when 'PUBLIC' then 'PUBLIC'
else quote_ident(grantee)
end,
case is_grantable
when 'YES' then ' WITH GRANT OPTION'
else ''
end), ''),
'') as text
FROM privileges g
INNER JOIN pg_foreign_data_wrapper w
ON w.fdwname = g.fdw_name
INNER JOIN pg_roles r
ON r.oid = w.fdwowner
WHERE g.grantee <> r.rolname
)
select format(E'CREATE FOREIGN DATA WRAPPER %s%s%s%s;\n\nALTER FOREIGN DATA WRAPPER %s OWNER TO %s;\n\n%s',
w.fdwname,
(case when w.fdwhandler <> 0
then format(E'\n HANDLER %s', quote_literal(h.proname))
else E'\n NO HANDLER'
end),
(case when w.fdwvalidator <> 0
then format(E'\n VALIDATOR %s', quote_literal(v.proname))
else E'\n NO VALIDATOR'
end),
(case when (select array_to_string(array(
select format('%s %s', a[1], quote_literal(a[2]))
from (
select string_to_array(unnest(w.fdwoptions), '=') as a
from pg_foreign_data_wrapper w
inner join pg_roles r
on r.oid = w.fdwowner
where w.fdwname = '{0}'
) x
), ', ')) <> ''::text
then format('\n OPTIONS ( %s )',
(select array_to_string(array(
select format('%s %s', a[1], quote_literal(a[2]))
from (
select string_to_array(unnest(w.fdwoptions), '=') as a
from pg_foreign_data_wrapper w
inner join pg_roles r
on r.oid = w.fdwowner
where w.fdwname = '{0}'
) x
), ', ')))
else ''
end),
w.fdwname,
quote_ident(r.rolname),
g.text
| |
{}
command = {}
command_list = []
self.queue_dict = {}
for i in args:
tmp[i] = self.command_dict[i]
for k, v in tmp.items():
command_list = command_list + v
for i in command_list:
command = dict(command, **i)
for i in command:
self.queue_dict[i] = Queue.Queue()
# command = [dict(command, **command_list[i]) for i in args][0]
while True:
try:
self.serial_command_queue.get_nowait()
except Queue.Empty:
break
self.serial_command_queue.put_nowait((False, "")) # 待机log分析,清空多余数据
for k, v in self.queue_dict.items():
while True:
try:
v.get_nowait()
except Queue.Empty:
break
while True:
try:
self.serial_result_queue.get_nowait()
except Queue.Empty:
break
self.serial_command_queue.put_nowait((True, command))
self.check_flag = 1
def check_serial_result(self):
self.serial_command_queue.put_nowait((False, ""))
while True:
if not self.serial_command_queue.qsize():
break
time.sleep(0.1)
if self.check_flag:
while True:
try:
tmp = self.serial_result_queue.get_nowait()
except Queue.Empty:
break
for k, v in self.queue_dict.items():
if k in tmp:
v.put_nowait(tmp)
self.check_flag = 0
# 检查启动按钮状态
def check_button_state(self, *args, t_list=False, wait=False):
"""
使用消息队列提取所有继电器状态并进行筛选,返回按照时间顺序排列的开关状态列表
:return: [time, 3层开关状态"000"]
list消息etc:[([2018-01-03 09:47:25:957]_f133u_uart_recv_event: cha_ru [2018-01-03 09:47:25:957]FF 02 00 07 09 FE cha_ru )]
"""
key = "power"
if wait:
while True:
if self.serial_result_queue.qsize():
break
time.sleep(1)
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = bin(int(re.findall("FF .+ (.+?) .+? FE", serial_result)[0], 16))[2:].zfill(4)
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time, value[1:]])
result = {}
arg = set(args)
if t_list:
for i in arg:
result[i] = self.get_last_device_state(tmp, i)
else:
for i in arg:
result[i] = [None, [None, None, None]]
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i] = ii
if wait:
result = tmp
self.debug.info("btn_list: %s" % result)
self.debug.info("btn_dict: %s" % result)
return result
# 检查循环定时设置状态
def check_set_cycle_timer(self, *args):
"""
使用消息队列提取所有循环定时设置状态并进行筛选,返回按照时间顺序排列的循环定时设置状态列表
:return: [time, id, times]
list消息etc:[([2018-01-03 09:47:25:957]bull_timer_pluse_start : cha_ru [2018-01-03 09:47:25:957]the 2 timer, the timer ID 145991072,keep_ontime 0:1 set time 60s , keep_offtime 0:1 set time 60s, times 255 cha_ru )]
"""
key = "set_cycle_timer"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = re.findall("ID (.+?),.+times (.+?) cha_ru ", serial_result)[0]
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + list(value))
result = {}
arg = set(args)
for i in arg:
result[i] = [None, None, None]
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i] = ii
self.debug.info("set_cycle_dict: %s" % result)
return result
# 检查循环定时执行开状态
def check_launch_cycle_timer_on(self, set_dict, *args):
"""
使用消息队列提取所有循环定时执行开状态并进行筛选,返回按照时间顺序排列的循环定时执行开状态列表
:return: [time, id, times]
list消息etc:[([2018-01-03 09:47:25:957]bull_timer_pluse_exe Pluse Timer: the 2 timer, the timer ID 145991072,times 255 cha_ru )]
"""
assert isinstance(set_dict, dict), "the first param must dict"
key = "launch_cycle_timer_on"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = re.findall("ID (.+?),times (.+?) .+ cha_ru ", serial_result)[0]
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + list(value))
result = {}
arg = set(args)
for i in arg:
result[i] = {}
for ii in tmp:
result[i][ii[1]] = [None, None, None]
for ii in set_dict.values():
result[i][ii[1]] = [None, None, None]
for i in arg:
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i][ii[1]] = ii
self.debug.info("launch_cycle_on_dict: %s" % result)
return result
# 检查循环定时执行关状态
def check_launch_cycle_timer_off(self, set_dict, *args):
"""
使用消息队列提取所有循环定时执行关状态并进行筛选,返回按照时间顺序排列的循环定时执行关状态列表
:return: [time, id, times]
list消息etc:[([2018-01-03 09:47:25:957]Pluse Timer: the 2 timer, the timer ID 145991072,times 255 cha_ru )]
"""
assert isinstance(set_dict, dict), "the first param must dict"
key = "launch_cycle_timer_off"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = re.findall("ID (.+?),times (.+?) .+ cha_ru ", serial_result)[0]
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + list(value))
result = {}
arg = set(args)
for i in arg:
result[i] = {}
for ii in tmp:
result[i][ii[1]] = [None, None, None]
for ii in set_dict.values():
result[i][ii[1]] = [None, None, None]
for i in arg:
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i][ii[1]] = ii
self.debug.info("launch_cycle_off_dict: %s" % result)
return result
# 检查延迟定时设置状态
def check_set_delay_timer(self, *args):
"""
使用消息队列提取所有延迟定时设置状态并进行筛选,返回按照时间顺序排列的延迟定时设置状态列表
:return: [time, id]
list消息etc:[([2018-01-03 09:47:25:957]bull_joy_countdown_timer_set countdown_timer: id : 177483693, state : 1, version: 18, interval : 60 cha_ru )]
"""
key = "set_delay_timer"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = re.findall("id : (.+?),", serial_result)
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + value)
result = {}
arg = set(args)
for i in arg:
result[i] = [None, None]
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i] = ii
self.debug.info("set_delay_dict: %s" % result)
return result
# 检查延迟定时执行状态
def check_launch_delay_timer(self, set_dict, *args):
"""
使用消息队列提取所有延迟定时执行状态并进行筛选,返回按照时间顺序排列的延迟定时执行状态列表
:return: [time, id]
list消息etc:[([2018-01-03 09:47:25:957]Count_Down_Time cha_ru [2018-01-03 09:47:25:957]the 3 timer ,the timer ID 177483693 cha_ru )]
"""
assert isinstance(set_dict, dict), "the first param must dict"
key = "launch_delay_timer"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = re.findall("ID (.+?) cha_ru ", serial_result)
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + value)
result = {}
arg = set(args)
for i in arg:
result[i] = {}
for ii in tmp:
result[i][ii[1]] = [None, None]
for ii in set_dict.values():
result[i][ii[1]] = [None, None]
for i in arg:
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i][ii[1]] = ii
self.debug.info("launch_delay_dict: %s" % result)
return result
# 检查普通定时设置状态
def check_set_normal_timer(self, *args):
"""
使用消息队列提取所有普通定时设置状态并进行筛选,返回按照时间顺序排列的普通定时设置状态列表
:return: [time, id, set_time, week, 3层定时开关状态"0FF"]
list消息etc:[([2018-01-03 09:47:25:957]bull_joy_base_timer_set base_timer: id : 201033350, state : 1, version: 1 cha_ru [
2018-01-03 09:47:25:957]2018-1-4 9:58:0 week:0 cha_ru [
2018-01-03 09:47:25:957]BASE TIMER TRANK BUF: cha_ru [
2018-01-03 09:47:25:957]01 FF FF 00 00 00 00 00 00 00 00 00 00 00 00 00 cha_ru )]
"""
key = "set_normal_timer"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = list(re.findall("id : (.+?),.+](.+?) +week:(.+?) cha_ru .+](.+? .+? .+?) .+",
serial_result)[0])
value[1] = value[1].replace(" ", " ")
value[2] = bin(int(value[2]))[2:].zfill(7)
value[3] = value[3].replace("FF", "").replace("00", "0").replace("01", "1").replace(" ", "")
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + value)
result = {}
arg = set(args)
for i in arg:
result[i] = [None, None, None, None, None]
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i] = ii
self.debug.info("set_normal_dict: %s" % result)
return result
# 检查普通定时执行状态
def check_launch_normal_timer(self, set_dict, *args):
"""
:param once: 循环
使用消息队列提取所有普通定时执行状态并进行筛选,返回按照时间顺序排列的普通定时执行状态列表
:return: [time, id, launch_time, week]
list消息etc:[([2018-01-03 09:47:25:957]Repeat Timer exe over: the 1 timer,the timer ID 110458020, start time 10:1 , week:4 cha_ru )]
"""
assert isinstance(set_dict, dict), "the first param must dict"
key = "launch_normal_timer"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = list(re.findall("ID (.+?), start time (.+?) +, week:(.+?) cha_ru ", serial_result)[0])
value[1] = "%s%s%s" % ("0-0-0 ", value[1][2:], ":0")
value[2] = bin(int(value[2]))[2:].zfill(7)
now_time = time.mktime(time.strptime(serial_result[1:20], "%Y-%m-%d %X"))
tmp.append([now_time] + value)
result = {}
arg = set(args)
for i in arg:
result[i] = {}
for ii in tmp:
result[i][ii[1]] = [None, None, None, None]
for ii in set_dict.values():
result[i][ii[1]] = [None, None, None, None]
for i in arg:
for ii in tmp:
if i - 6 < ii[0] < i + 6:
result[i][ii[1]] = ii
self.debug.info("launch_normal_dict: %s" % result)
return result
# 检查普通定时执行状态单次
def check_launch_normal_timer_once(self, set_dict, *args):
"""
:param once: 执行一次/循环
使用消息队列提取所有普通定时执行状态并进行筛选,返回按照时间顺序排列的普通定时执行状态列表
:return: [time, id, launch_time]
list消息etc:[([2018-01-03 09:47:25:957]Once Timer exe over the 1 timer, the timer ID 150954030, start time 18-1-4 9:58 cha_ru )]
"""
assert isinstance(set_dict, dict), "the first param must dict"
key = "launch_normal_timer_once"
self.check_serial_result()
tmp = []
command = list(self.command_dict[key][0].keys())[0]
tmp_queue = self.queue_dict[command]
print(command, tmp_queue.qsize())
while True:
try:
serial_result = tmp_queue.get_nowait()
self.debug.info(serial_result)
except Queue.Empty:
break
print(sys._getframe().f_code.co_name, serial_result)
value = list(re.findall("ID | |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
from collections import defaultdict
import random
import paddle
from paddlenlp.utils.log import logger
from paddlenlp.datasets import MapDataset
def create_dataloader(dataset,
mode='train',
batch_size=1,
batchify_fn=None,
trans_fn=None):
if trans_fn:
dataset = dataset.map(trans_fn)
shuffle = True if mode == 'train' else False
if mode == 'train':
batch_sampler = paddle.io.DistributedBatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle)
else:
batch_sampler = paddle.io.BatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle)
return paddle.io.DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=batchify_fn,
return_list=True)
def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
sentence1 = example["sentence1"]
sentence2 = example["sentence2"]
encoded_inputs = tokenizer(
text=sentence1,
text_pair=sentence2,
max_seq_len=max_seq_length,
truncation_strategy="only_first")
src_ids = encoded_inputs["input_ids"]
token_type_ids = encoded_inputs["token_type_ids"]
if not is_test:
label = example["label"]
if is_test:
return src_ids, token_type_ids
else:
return src_ids, token_type_ids, label
class DataProcessor(object):
"""Base class for data converters for sequence classification datasets."""
def __init__(self, negative_num=1):
# Random negative sample number for efl strategy
self.neg_num = negative_num
def get_train_datasets(self, datasets, task_label_description):
"""See base class."""
return self._create_examples(datasets, "train", task_label_description)
def get_dev_datasets(self, datasets, task_label_description):
"""See base class."""
return self._create_examples(datasets, "dev", task_label_description)
def get_test_datasets(self, datasets, task_label_description):
"""See base class."""
return self._create_examples(datasets, "test", task_label_description)
class IflytekProcessor(DataProcessor):
"""Processor for the IFLYTEK dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = str(example["label"])
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = str(example["label"])
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
examples.append(new_example)
return MapDataset(examples)
class OcnliProcessor(DataProcessor):
"""Processor for the IFLYTEK dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = str(example["label"])
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence1']
new_example["sentence2"] = label_description + example[
'sentence2']
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = example["label"]
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence1']
new_example["sentence2"] = label_description + example[
'sentence2']
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence1']
new_example["sentence2"] = label_description + example[
'sentence2']
examples.append(new_example)
return MapDataset(examples)
class TnewsProcessor(DataProcessor):
"""Processor for the Tnews dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = example["label"]
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = str(example["label"])
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
examples.append(new_example)
return MapDataset(examples)
class BustmProcessor(DataProcessor):
"""Processor for the Bustum dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = example["label"]
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence1']
new_example["sentence2"] = label_description + example[
'sentence2']
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = example["label"]
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence1']
new_example["sentence2"] = label_description + example[
'sentence2']
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence1']
new_example["sentence2"] = label_description + example[
'sentence2']
examples.append(new_example)
return MapDataset(examples)
class EprstmtProcessor(DataProcessor):
"""Processor for the Eprstmt dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = example["label"]
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = str(example["label"])
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['sentence']
new_example["sentence2"] = label_description
examples.append(new_example)
return MapDataset(examples)
class CsldcpProcessor(DataProcessor):
"""Processor for the Csldcp dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = example["label"]
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['content']
new_example["sentence2"] = label_description
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = str(example["label"])
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['content']
new_example["sentence2"] = label_description
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['content']
new_example["sentence2"] = label_description
examples.append(new_example)
return MapDataset(examples)
class CslProcessor(DataProcessor):
"""Processor for the Csl dataset (CLUE version)."""
def _create_examples(self, datasets, phase, task_label_description):
"""Creates examples for the training and dev sets."""
examples = []
if phase == "train":
for example in datasets:
true_label = example["label"]
neg_examples = []
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['abst']
new_example["sentence2"] = label_description + " ".join(
example['keyword'])
# Todo: handle imbanlanced example, maybe hurt model performance
if true_label == label:
new_example["label"] = 1
examples.append(new_example)
else:
new_example["label"] = 0
neg_examples.append(new_example)
neg_examples = random.sample(neg_examples, self.neg_num)
examples.extend(neg_examples)
elif phase == "dev":
for example in datasets:
true_label = str(example["label"])
for label, label_description in task_label_description.items():
new_example = dict()
new_example["sentence1"] = example['abst']
new_example["sentence2"] = label_description + " ".join(
example['keyword'])
# Get true_label's index at task_label_description for evaluate
true_label_index = list(task_label_description.keys(
)).index(true_label)
new_example["label"] = true_label_index
examples.append(new_example)
elif phase == "test":
for example in datasets:
for label, label_description in task_label_description.items():
new_example = dict()
| |
<filename>test/test_punt.py
#!/usr/bin/env python3
import binascii
import random
import socket
import os
import threading
import struct
import copy
import fcntl
import time
from struct import unpack, unpack_from
try:
import unittest2 as unittest
except ImportError:
import unittest
from util import ppp, ppc
from re import compile
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.ipsec import ESP
import scapy.layers.inet6 as inet6
from scapy.layers.inet6 import IPv6, ICMPv6DestUnreach
from scapy.contrib.ospf import OSPF_Hdr, OSPFv3_Hello
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_papi import VppEnum
from vpp_ipsec_tun_interface import VppIpsecTunInterface
NUM_PKTS = 67
class serverSocketThread(threading.Thread):
""" Socket server thread"""
def __init__(self, threadID, sockName):
threading.Thread.__init__(self)
self.threadID = threadID
self.sockName = sockName
self.sock = None
self.rx_pkts = []
self.keep_running = True
def rx_packets(self):
# Wait for some packets on socket
while self.keep_running:
try:
data = self.sock.recv(65536)
# punt socket metadata
# packet_desc = data[0:8]
# Ethernet
self.rx_pkts.append(Ether(data[8:]))
except IOError as e:
if e.errno == 11:
# nothing to receive, sleep a little
time.sleep(0.1)
pass
else:
raise
def run(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
os.unlink(self.sockName)
except:
pass
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65536)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536)
fcntl.fcntl(self.sock, fcntl.F_SETFL, os.O_NONBLOCK)
self.sock.bind(self.sockName)
self.rx_packets()
def close(self):
self.sock.close()
self.keep_running = False
return self.rx_pkts
class TestPuntSocket(VppTestCase):
""" Punt Socket """
ports = [1111, 2222, 3333, 4444]
sock_servers = list()
# FIXME: nr_packets > 3 results in failure
# nr_packets = 3 makes the test unstable
nr_packets = 2
@classmethod
def setUpClass(cls):
super(TestPuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestPuntSocket, cls).tearDownClass()
@classmethod
def setUpConstants(cls):
cls.extra_vpp_punt_config = [
"punt", "{", "socket", cls.tempdir+"/socket_punt", "}"]
super(TestPuntSocket, cls).setUpConstants()
def setUp(self):
super(TestPuntSocket, self).setUp()
random.seed()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
def tearDown(self):
del self.sock_servers[:]
super(TestPuntSocket, self).tearDown()
def socket_client_create(self, sock_name, id=None):
thread = serverSocketThread(id, sock_name)
self.sock_servers.append(thread)
thread.start()
return thread
def socket_client_close(self):
rx_pkts = []
for thread in self.sock_servers:
rx_pkts += thread.close()
thread.join()
return rx_pkts
def verify_port(self, pr, vpr):
self.assertEqual(vpr.punt.type, pr['type'])
self.assertEqual(vpr.punt.punt.l4.port,
pr['punt']['l4']['port'])
self.assertEqual(vpr.punt.punt.l4.protocol,
pr['punt']['l4']['protocol'])
self.assertEqual(vpr.punt.punt.l4.af,
pr['punt']['l4']['af'])
def verify_exception(self, pr, vpr):
self.assertEqual(vpr.punt.type, pr['type'])
self.assertEqual(vpr.punt.punt.exception.id,
pr['punt']['exception']['id'])
def verify_ip_proto(self, pr, vpr):
self.assertEqual(vpr.punt.type, pr['type'])
self.assertEqual(vpr.punt.punt.ip_proto.af,
pr['punt']['ip_proto']['af'])
self.assertEqual(vpr.punt.punt.ip_proto.protocol,
pr['punt']['ip_proto']['protocol'])
def verify_udp_pkts(self, rxs, n_rx, port):
n_match = 0
for rx in rxs:
self.assertTrue(rx.haslayer(UDP))
if rx[UDP].dport == port:
n_match += 1
self.assertEqual(n_match, n_rx)
def set_port(pr, port):
pr['punt']['l4']['port'] = port
return pr
def set_reason(pr, reason):
pr['punt']['exception']['id'] = reason
return pr
def mk_vpp_cfg4():
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
af_ip4 = VppEnum.vl_api_address_family_t.ADDRESS_IP4
udp_proto = VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP
punt_l4 = {
'type': pt_l4,
'punt': {
'l4': {
'af': af_ip4,
'protocol': udp_proto
}
}
}
return punt_l4
def mk_vpp_cfg6():
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
af_ip6 = VppEnum.vl_api_address_family_t.ADDRESS_IP6
udp_proto = VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP
punt_l4 = {
'type': pt_l4,
'punt': {
'l4': {
'af': af_ip6,
'protocol': udp_proto
}
}
}
return punt_l4
class TestIP4PuntSocket(TestPuntSocket):
""" Punt Socket for IPv4 UDP """
@classmethod
def setUpClass(cls):
super(TestIP4PuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIP4PuntSocket, cls).tearDownClass()
def setUp(self):
super(TestIP4PuntSocket, self).setUp()
for i in self.pg_interfaces:
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIP4PuntSocket, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_punt_socket_dump(self):
""" Punt socket registration/deregistration"""
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
af_ip4 = VppEnum.vl_api_address_family_t.ADDRESS_IP4
udp_proto = VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
punt_l4 = mk_vpp_cfg4()
self.vapi.punt_socket_register(set_port(punt_l4, 1111),
"%s/socket_punt_1111" % self.tempdir)
self.vapi.punt_socket_register(set_port(punt_l4, 2222),
"%s/socket_punt_2222" % self.tempdir)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 2)
self.verify_port(set_port(punt_l4, 1111), punts[0])
self.verify_port(set_port(punt_l4, 2222), punts[1])
#
# deregister a punt socket
#
self.vapi.punt_socket_deregister(set_port(punt_l4, 1111))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 1)
#
# configure a punt socket again
#
self.vapi.punt_socket_register(set_port(punt_l4, 1111),
"%s/socket_punt_1111" % self.tempdir)
self.vapi.punt_socket_register(set_port(punt_l4, 3333),
"%s/socket_punt_3333" % self.tempdir)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 3)
self.logger.info(self.vapi.cli("sh punt sock reg"))
#
# deregister all punt socket
#
self.vapi.punt_socket_deregister(set_port(punt_l4, 1111))
self.vapi.punt_socket_deregister(set_port(punt_l4, 2222))
self.vapi.punt_socket_deregister(set_port(punt_l4, 3333))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_single_port_single_socket(self):
""" Punt socket traffic single port single socket"""
port = self.ports[0]
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
punt_l4 = set_port(mk_vpp_cfg4(), port)
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=port) /
Raw(b'\xa5' * 100))
pkts = p * self.nr_packets
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
#
# expect ICMP - port unreachable for all packets
#
rx = self.send_and_expect(self.pg0, pkts, self.pg0)
for p in rx:
self.assertEqual(int(p[IP].proto), 1) # ICMP
self.assertEqual(int(p[ICMP].code), 3) # unreachable
#
# configure a punt socket
#
self.socket_client_create("%s/socket_%d" % (self.tempdir, port))
self.vapi.punt_socket_register(punt_l4, "%s/socket_%d" %
(self.tempdir, port))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 1)
#
# expect punt socket and no packets on pg0
#
self.send_and_assert_no_replies(self.pg0, pkts)
rx = self.socket_client_close()
self.verify_udp_pkts(rx, len(pkts), port)
#
# remove punt socket. expect ICMP - port unreachable for all packets
#
self.vapi.punt_socket_deregister(punt_l4)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
rx = self.send_and_expect(self.pg0, pkts, self.pg0)
for p in rx:
self.assertEqual(int(p[IP].proto), 1) # ICMP
self.assertEqual(int(p[ICMP].code), 3) # unreachable
def test_punt_socket_traffic_multi_ports_multi_sockets(self):
""" Punt socket traffic multi ports and multi sockets"""
punt_l4 = mk_vpp_cfg4()
# configuration for each UDP port
cfgs = dict()
#
# create stream of packets for each port
#
for port in self.ports:
# choose port from port list
cfgs[port] = {}
pkt = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=port) /
Raw(b'\xa5' * 100))
cfgs[port]['pkts'] = pkt * self.nr_packets
cfgs[port]['port'] = port
cfgs[port]['vpp'] = copy.deepcopy(set_port(punt_l4, port))
# configure punt sockets
cfgs[port]['sock'] = self.socket_client_create(
"%s/socket_%d" % (self.tempdir, port))
self.vapi.punt_socket_register(
cfgs[port]['vpp'],
"%s/socket_%d" % (self.tempdir, port))
#
# send the packets that get punted
#
for cfg in cfgs.values():
self.send_and_assert_no_replies(self.pg0, cfg['pkts'])
#
# test that we got the excepted packets on the expected socket
#
for cfg in cfgs.values():
rx = cfg['sock'].close()
self.verify_udp_pkts(rx, len(cfg['pkts']), cfg['port'])
self.vapi.punt_socket_deregister(cfg['vpp'])
def test_punt_socket_traffic_multi_ports_single_socket(self):
""" Punt socket traffic multi ports and single socket"""
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
punt_l4 = mk_vpp_cfg4()
#
# create stream of packets with each port
#
pkts = []
for port in self.ports:
# choose port from port list
pkt = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=port) /
Raw(b'\xa5' * 100))
pkts += pkt * self.nr_packets
#
# configure a punt socket
#
self.socket_client_create("%s/socket_multi" % self.tempdir)
for p in self.ports:
self.vapi.punt_socket_register(set_port(punt_l4, p),
"%s/socket_multi" % self.tempdir)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), len(self.ports))
#
# expect punt socket and no packets on pg0
#
self.send_and_assert_no_replies(self.pg0, pkts)
self.logger.info(self.vapi.cli("show trace"))
rx = self.socket_client_close()
for p in self.ports:
self.verify_udp_pkts(rx, self.nr_packets, p)
self.vapi.punt_socket_deregister(set_port(punt_l4, p))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
class TestIP6PuntSocket(TestPuntSocket):
""" Punt Socket for IPv6 UDP """
@classmethod
def setUpClass(cls):
super(TestIP6PuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIP6PuntSocket, cls).tearDownClass()
def setUp(self):
super(TestIP6PuntSocket, self).setUp()
for i in self.pg_interfaces:
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
super(TestIP6PuntSocket, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip6()
i.admin_down()
def test_punt_socket_dump(self):
""" Punt socket registration """
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
af_ip6 = VppEnum.vl_api_address_family_t.ADDRESS_IP6
udp_proto = VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP
#
# configure a punt socket
#
punt_l4 = {
'type': pt_l4,
'punt': {
'l4': {
'af': af_ip6,
'protocol': udp_proto
}
}
}
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.vapi.punt_socket_register(set_port(punt_l4, 1111),
"%s/socket_1111" % self.tempdir)
self.vapi.punt_socket_register(set_port(punt_l4, 2222),
"%s/socket_2222" % self.tempdir)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 2)
self.verify_port(set_port(punt_l4, 1111), punts[0])
self.verify_port(set_port(punt_l4, 2222), punts[1])
#
# deregister a punt socket
#
self.vapi.punt_socket_deregister(set_port(punt_l4, 1111))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 1)
#
# configure a punt socket again
#
self.vapi.punt_socket_register(set_port(punt_l4, 1111),
"%s/socket_1111" % self.tempdir)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 2)
#
# deregister all punt socket
#
self.vapi.punt_socket_deregister(set_port(punt_l4, 1111))
self.vapi.punt_socket_deregister(set_port(punt_l4, 2222))
self.vapi.punt_socket_deregister(set_port(punt_l4, 3333))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_single_port_single_socket(self):
""" Punt socket traffic single port single socket"""
port = self.ports[0]
pt_l4 = VppEnum.vl_api_punt_type_t.PUNT_API_TYPE_L4
af_ip6 = VppEnum.vl_api_address_family_t.ADDRESS_IP6
udp_proto = VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP
punt_l4 = {
'type': pt_l4,
'punt': {
'l4': {
'af': af_ip6,
'protocol': udp_proto,
'port': port,
}
}
}
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=port) /
Raw(b'\xa5' * 100))
pkts = p * self.nr_packets
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
#
# expect ICMPv6 - destination unreachable for all packets
#
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# rx = self.pg0.get_capture(self.nr_packets)
# for p in rx:
# self.assertEqual(int(p[IPv6].nh), 58) # ICMPv6
# self.assertEqual(int(p[ICMPv6DestUnreach].code),4) # unreachable
#
# configure a punt socket
#
self.socket_client_create("%s/socket_%d" % (self.tempdir, port))
self.vapi.punt_socket_register(punt_l4, "%s/socket_%d" %
(self.tempdir, port))
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 1)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
rx = self.socket_client_close()
self.verify_udp_pkts(rx, len(pkts), port)
#
# remove punt socket. expect ICMP - dest. unreachable for all packets
#
self.vapi.punt_socket_deregister(punt_l4)
punts = self.vapi.punt_socket_dump(type=pt_l4)
self.assertEqual(len(punts), 0)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket | |
address
ApiTypes.AccessLevel accessLevel - Level of access (permission) to our API.
DateTime? expires -
string newTokenName - New name of the AccessToken. (default None)
IEnumerable<string> restrictAccessToIPRange - Comma separated list of CIDR notated IP ranges that this token can connect from. (default None)
ApiTypes.AccessTokenType? type - (default None)
"""
parameters = {
'tokenName': tokenName,
'accessLevel': accessLevel.value,
'expires': expires,
'newTokenName': newTokenName,
'restrictAccessToIPRange': ";".join(map(str, restrictAccessToIPRange)),
'type': type.value}
return ApiClient.Request('GET', '/accesstoken/update', parameters)
"""
Methods for managing your account and subaccounts.
"""
class Account:
@staticmethod
def AddDedicatedSupport(supportPlan):
"""
Request premium support for your account
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
ApiTypes.SupportPlan supportPlan -
"""
parameters = {
'supportPlan': supportPlan.value}
return ApiClient.Request('GET', '/account/adddedicatedsupport', parameters)
@staticmethod
def AddSubAccount(email, password, confirmPassword, allow2fa=False, requiresEmailCredits=False, maxContacts=0, enablePrivateIPRequest=True, sendActivation=False, returnUrl=None, sendingPermission=None, enableContactFeatures=None, poolName=None, emailSizeLimit=10, dailySendLimit=None):
"""
Create new subaccount and provide most important data about it.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string email - Proper email address.
string password - Password
string confirmPassword - <PASSWORD>
bool allow2fa - True, if you want to allow two-factor authentication. Otherwise, false. (default False)
bool requiresEmailCredits - True, if Account needs credits to send emails. Otherwise, false (default False)
int maxContacts - Maximum number of contacts the Account can have (default 0)
bool enablePrivateIPRequest - True, if Account can request for private IP on its own. Otherwise, false (default True)
bool sendActivation - True, if you want to send activation email to this Account. Otherwise, false (default False)
string returnUrl - URL to navigate to after Account creation (default None)
ApiTypes.SendingPermission? sendingPermission - Sending permission setting for Account (default None)
bool? enableContactFeatures - Private IP required. Name of the custom IP Pool which Sub Account should use to send its emails. Leave empty for the default one or if no Private IPs have been bought (default None)
string poolName - Name of your custom IP Pool to be used in the sending process (default None)
int emailSizeLimit - Maximum size of email including attachments in MB's (default 10)
int? dailySendLimit - Amount of emails Account can send daily (default None)
Returns string
"""
parameters = {
'email': email,
'password': password,
'confirmPassword': <PASSWORD>,
'allow2fa': allow2fa,
'requiresEmailCredits': requiresEmailCredits,
'maxContacts': maxContacts,
'enablePrivateIPRequest': enablePrivateIPRequest,
'sendActivation': sendActivation,
'returnUrl': returnUrl,
'sendingPermission': sendingPermission.value,
'enableContactFeatures': enableContactFeatures,
'poolName': poolName,
'emailSizeLimit': emailSizeLimit,
'dailySendLimit': dailySendLimit}
return ApiClient.Request('GET', '/account/addsubaccount', parameters)
@staticmethod
def AddSubAccountCredits(credits, notes, subAccountEmail=None, publicAccountID=None):
"""
Add email credits to a sub-account
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
int credits - Amount of credits to add
string notes - Specific notes about the transaction
string subAccountEmail - Email address of Sub-Account (default None)
string publicAccountID - Public key of sub-account to add credits to. Use subAccountEmail or publicAccountID not both. (default None)
"""
parameters = {
'credits': credits,
'notes': notes,
'subAccountEmail': subAccountEmail,
'publicAccountID': publicAccountID}
return ApiClient.Request('GET', '/account/addsubaccountcredits', parameters)
@staticmethod
def AddWebhook(webNotificationUrl, name, notifyOncePerEmail=None, notificationForSent=None, notificationForOpened=None, notificationForClicked=None, notificationForUnsubscribed=None, notificationForAbuseReport=None, notificationForError=None):
"""
Add notifications webhook
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string webNotificationUrl - URL address to receive web notifications to parse and process.
string name - Filename
bool? notifyOncePerEmail - (default None)
bool? notificationForSent - (default None)
bool? notificationForOpened - (default None)
bool? notificationForClicked - (default None)
bool? notificationForUnsubscribed - (default None)
bool? notificationForAbuseReport - (default None)
bool? notificationForError - (default None)
Returns string
"""
parameters = {
'webNotificationUrl': webNotificationUrl,
'name': name,
'notifyOncePerEmail': notifyOncePerEmail,
'notificationForSent': notificationForSent,
'notificationForOpened': notificationForOpened,
'notificationForClicked': notificationForClicked,
'notificationForUnsubscribed': notificationForUnsubscribed,
'notificationForAbuseReport': notificationForAbuseReport,
'notificationForError': notificationForError}
return ApiClient.Request('GET', '/account/addwebhook', parameters)
@staticmethod
def ChangeEmail(newEmail, confirmEmail, sourceUrl="https://elasticemail.com/account/"):
"""
Change your email address. Remember, that your email address is used as login!
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string newEmail - New email address.
string confirmEmail - New email address.
string sourceUrl - URL from which request was sent. (default "https://elasticemail.com/account/")
Returns string
"""
parameters = {
'newEmail': newEmail,
'confirmEmail': confirmEmail,
'sourceUrl': sourceUrl}
return ApiClient.Request('GET', '/account/changeemail', parameters)
@staticmethod
def ChangePassword(newPassword, confirmPassword, expireDashboardSessions=False, currentPassword=None):
"""
Create new password for your account. Password needs to be at least 6 characters long.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string newPassword - <PASSWORD> password for Account.
string confirmPassword - Repeat <PASSWORD>.
bool expireDashboardSessions - (default False)
string currentPassword - Current password. (default None)
"""
parameters = {
'newPassword': <PASSWORD>Password,
'confirmPassword': <PASSWORD>Password,
'expireDashboardSessions': expireDashboardSessions,
'currentPassword': <PASSWORD>Password}
return ApiClient.Request('GET', '/account/changepassword', parameters)
@staticmethod
def ChangeSubAccountPassword(newPassword, confirmPassword, subAccountEmail, expireDashboardSessions=False):
"""
Create new password for subaccount. Password needs to be at least 6 characters long.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string newPassword - New password for Account.
string confirmPassword - Repeat new password.
string subAccountEmail - Email address of Sub-Account
bool expireDashboardSessions - (default False)
"""
parameters = {
'newPassword': newPassword,
'confirmPassword': <PASSWORD>,
'subAccountEmail': subAccountEmail,
'expireDashboardSessions': expireDashboardSessions}
return ApiClient.Request('GET', '/account/changesubaccountpassword', parameters)
@staticmethod
def DeleteSubAccount(subAccountEmail=None, publicAccountID=None):
"""
Deletes specified Subaccount
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string subAccountEmail - Email address of Sub-Account (default None)
string publicAccountID - Public key of sub-account to delete. Use subAccountEmail or publicAccountID not both. (default None)
"""
parameters = {
'subAccountEmail': subAccountEmail,
'publicAccountID': publicAccountID}
return ApiClient.Request('GET', '/account/deletesubaccount', parameters)
@staticmethod
def DeleteWebhook(webhookID):
"""
Delete notifications webhook
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string webhookID -
"""
parameters = {
'webhookID': webhookID}
return ApiClient.Request('GET', '/account/deletewebhook', parameters)
@staticmethod
def GetSubAccountList(limit=0, offset=0, email=None):
"""
Lists all of your subaccounts
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
int limit - Maximum number of returned items. (default 0)
int offset - How many items should be returned ahead. (default 0)
string email - Proper email address. (default None)
Returns List<ApiTypes.SubAccount>
"""
parameters = {
'limit': limit,
'offset': offset,
'email': email}
return ApiClient.Request('GET', '/account/getsubaccountlist', parameters)
@staticmethod
def Load(parameters=None):
"""
Loads your account. Returns detailed information about your account.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns ApiTypes.Account
"""
return ApiClient.Request('GET', '/account/load', parameters)
@staticmethod
def LoadAdvancedOptions(parameters=None):
"""
Load advanced options of your account
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns ApiTypes.AdvancedOptions
"""
return ApiClient.Request('GET', '/account/loadadvancedoptions', parameters)
@staticmethod
def LoadEmailCreditsHistory(parameters=None):
"""
Lists email credits history
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns List<ApiTypes.EmailCredits>
"""
return ApiClient.Request('GET', '/account/loademailcreditshistory', parameters)
@staticmethod
def LoadInboundOptions(parameters=None):
"""
Load inbound options of your account
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns ApiTypes.InboundOptions
"""
return ApiClient.Request('GET', '/account/loadinboundoptions', parameters)
@staticmethod
def LoadPaymentHistory(limit, offset, fromDate, toDate):
"""
Lists all payments
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
int limit - Maximum number of returned items.
int offset - How many items should be returned ahead.
DateTime fromDate - Starting date for search in YYYY-MM-DDThh:mm:ss format.
DateTime toDate - Ending date for search in YYYY-MM-DDThh:mm:ss format.
Returns List<ApiTypes.Payment>
"""
parameters = {
'limit': limit,
'offset': offset,
'fromDate': fromDate,
'toDate': toDate}
return ApiClient.Request('GET', '/account/loadpaymenthistory', parameters)
@staticmethod
def LoadPayoutHistory(parameters=None):
"""
Lists all referral payout history
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns List<ApiTypes.Payment>
"""
return ApiClient.Request('GET', '/account/loadpayouthistory', parameters)
@staticmethod
def LoadReferralDetails(parameters=None):
"""
Shows information about your referral details
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns ApiTypes.Referral
"""
return ApiClient.Request('GET', '/account/loadreferraldetails', parameters)
@staticmethod
def LoadReputationHistory(parameters=None):
"""
Shows latest changes in your sending reputation
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns List<ApiTypes.ReputationHistory>
"""
return ApiClient.Request('GET', '/account/loadreputationhistory', parameters)
@staticmethod
def LoadReputationImpact(parameters=None):
"""
Shows detailed information about your actual reputation score
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns ApiTypes.ReputationDetail
"""
return ApiClient.Request('GET', '/account/loadreputationimpact', parameters)
@staticmethod
def LoadSpamCheck(limit=20, offset=0):
"""
Returns detailed spam check.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
int limit - Maximum number of returned items. (default 20)
int offset - How many items should be returned ahead. (default 0)
Returns List<ApiTypes.SpamCheck>
"""
parameters = {
'limit': limit,
'offset': offset}
return ApiClient.Request('GET', '/account/loadspamcheck', parameters)
@staticmethod
def LoadSubAccountsEmailCreditsHistory(subAccountEmail=None, publicAccountID=None):
"""
Lists email credits history for sub-account
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string subAccountEmail - Email address of Sub-Account (default None)
string publicAccountID - Public key of sub-account to list history for. Use subAccountEmail or publicAccountID not both. (default None)
Returns List<ApiTypes.EmailCredits>
"""
parameters = {
'subAccountEmail': subAccountEmail,
'publicAccountID': publicAccountID}
return ApiClient.Request('GET', '/account/loadsubaccountsemailcreditshistory', parameters)
@staticmethod
def LoadSubAccountSettings(subAccountEmail=None, publicAccountID=None):
"""
Loads settings of subaccount
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string subAccountEmail - Email address of Sub-Account (default None)
string publicAccountID - Public key of sub-account to load settings for. Use subAccountEmail or publicAccountID not both. (default None)
Returns ApiTypes.SubAccountSettings
"""
parameters = {
'subAccountEmail': subAccountEmail,
'publicAccountID': publicAccountID}
return ApiClient.Request('GET', '/account/loadsubaccountsettings', parameters)
@staticmethod
def LoadUsage(EEfrom, to, loadSubaccountsUsage=True):
"""
Shows usage of your account in given time.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
DateTime from - Starting date for search in YYYY-MM-DDThh:mm:ss format.
DateTime to - Ending date for search in YYYY-MM-DDThh:mm:ss format.
bool loadSubaccountsUsage - (default True)
Returns List<ApiTypes.Usage>
"""
parameters = {
'from': EEfrom,
'to': to,
'loadSubaccountsUsage': loadSubaccountsUsage}
return ApiClient.Request('GET', '/account/loadusage', parameters)
@staticmethod
def LoadWebhook(limit=0, offset=0):
"""
Load notifications webhooks
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
int limit - Maximum number of returned | |
#!/usr/local/env python3
import os
import pathlib
import subprocess
from concurrent import futures
class Qiime2Methods(object):
@staticmethod
def make_folder(folder):
"""
Create output folder.
:param folder: string. Output folder path.
:return:
"""
# Will create parent directories if don't exist and will not return error if already exists
pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
@staticmethod
def list_fastq(my_path):
"""
Walk input directory and list all the fastq files. Accepted file extensions are '.fastq', '.fastq.gz',
'.fq' and '.fq.gz'.
:param my_path: string. Input folder path
:return: list of strings. Fastq files in input folder
"""
# Create empty list to hold the file paths
fastq_list = list()
# Walk the input directory recursively and look for fastq files
for root, directories, filenames in os.walk(my_path):
for filename in filenames:
absolute_path = os.path.join(root, filename)
if os.path.isfile(absolute_path) and filename.endswith(('.fastq', '.fastq.gz', '.fq', '.fq.gz')):
fastq_list.append(absolute_path) # Add fastq file path to the list
return fastq_list
@staticmethod
def rc_fastq(input_fastq, output_fastq):
"""
:param input_fastq:
:param output_fastq:
:return:
"""
cmd = ['reformat.sh',
'ow=t',
'rcomp=t',
'in={}'.format(input_fastq),
'out={}'.format(output_fastq)]
subprocess.run(cmd)
@staticmethod
def rc_fastq_parallel(fastq_list, output_folder, cpu, parallel):
with futures.ThreadPoolExecutor(max_workers=parallel) as executor:
args = ((fastq, output_folder, int(cpu/parallel)) for fastq in fastq_list)
for results in executor.map(lambda p: Qiime2Methods.extract_its_se(*p), args): # (*p) unpacks
pass
@staticmethod
def extract_its_se(fastq_file, output_folder, log_folder, cpu, taxa, region):
"""
Extract Fungi ITS1 sequence from fastq files. Use ITSxpress program.
:param fastq_file: string. Fastq file path
:param output_folder: string. Path of output folder.
:param cpu: int. number of CPU to use.
:return:
"""
cmd = ['itsxpress',
'--threads', str(cpu),
'--single_end',
'--fastq', fastq_file,
'--region', region,
'--taxa', taxa,
'--cluster_id', str(0.99),
'--outfile', output_folder + '/' + os.path.basename(fastq_file),
'--log', log_folder + '/' + os.path.basename(fastq_file).split('_')[0] + '.log',
'--threads', str(cpu)]
subprocess.run(cmd)
@staticmethod
def extract_its_se_parallel(fastq_list, output_folder, log_folder, cpu, parallel, taxa, region):
"""
Run "extract_its" in parallel using 4 cores per instance.
:param fastq_list: string. A list of fastq file paths.
:param output_folder: sting. Path of output folder
:param cpu: int. Number of cpu to use.
:param parallel: int. Number of samples to process in parallel
:return:
"""
with futures.ThreadPoolExecutor(max_workers=parallel) as executor:
args = ((fastq, output_folder, log_folder, int(cpu/parallel), taxa, region) for fastq in fastq_list)
for results in executor.map(lambda p: Qiime2Methods.extract_its_se(*p), args): # (*p) unpacks
pass
@staticmethod
def extract_its_pe(fastq_r1, fastq_r2, output_folder, log_folder, cpu, taxa, region):
"""
Extract Fungi ITS1 sequence from fastq files. Use ITSxpress program.
:param fastq_r1: string. Fastq file path
:param fastq_r2: string. Fastq file path
:param output_folder: string. Path of output folder.
:param log_folder:
:param cpu: int. number of CPU to use.
:return:
"""
cmd = ['itsxpress',
'--threads', str(cpu),
'--fastq', fastq_r1,
'--fastq2', fastq_r2,
'--region', region,
'--taxa', taxa,
'--cluster_id', str(0.99),
'--outfile', output_folder + '/' + os.path.basename(fastq_r1),
'--outfile2', output_folder + '/' + os.path.basename(fastq_r2),
'--log', log_folder + '/' + os.path.basename(fastq_r1).split('_')[0] + '.log',
'--threads', str(cpu)]
subprocess.run(cmd)
@staticmethod
def extract_its_pe_parallel(sample_dict, output_folder, log_folder, cpu, parallel, taxa, region):
"""
Run "extract_its" in parallel using 4 cores per instance.
:param sample_dict: string. A dictionary of fastq file paths.
:param output_folder: sting. Path of output folder
:param cpu: int. Number of cpu to use.
:param parallel: int. Number of samples to process in parallel
:return:
"""
with futures.ThreadPoolExecutor(max_workers=int(parallel)) as executor:
args = ((fastq_list[0], fastq_list[1], output_folder, log_folder, int(cpu/parallel), taxa, region)
for sample, fastq_list in sample_dict.items())
for results in executor.map(lambda p: Qiime2Methods.extract_its_pe(*p), args): # (*p) unpacks arguments
pass
@staticmethod
def fix_fastq_se(install_path, fastq_file):
"""
Remove empty entries from a fastq file. Overwrites the input file with the output file.
:param install_path: string. Path to install folder.
:param fastq_file: string. Path of a fastq file, gzipped or not.
:return:
"""
cmd = ['python', '{}/remove_empty_fastq_entries.py'.format(install_path),
'-f', fastq_file]
subprocess.run(cmd)
@staticmethod
def fix_fastq_se_parallel(install_path, fastq_list, cpu):
"""
Run "fix_fastq" in parallel using all the threads, one file per thread
:param install_path: string. Path to install folder.
:param fastq_list: string. list of fastq file paths
:param cpu: int. number of CPU to use
:return:
"""
with futures.ThreadPoolExecutor(max_workers=cpu) as executor:
args = ((install_path, fastq) for fastq in fastq_list)
for results in executor.map(lambda p: Qiime2Methods.fix_fastq_se(*p), args): # (*p) unpacks arguments
pass
@staticmethod
def fix_fastq_pe(install_path, fastq_r1, fastq_r2):
"""
Remove empty entries from a fastq file. Have to keep R1 and R2 synchronized.
Overwrites the input file with the output file.
:param install_path: string. Path to install folder.
:param fastq_r1: string. Path of a fastq R1 file, gzipped or not.
:param fastq_r2: string. Path of a fastq R2 file, gzipped or not.
:return:
"""
cmd = ['python', '{}/remove_empty_fastq_entries.py'.format(install_path),
'-f', fastq_r1,
'-f2', fastq_r2]
subprocess.run(cmd)
@staticmethod
def fix_fastq_pe_parallel(install_path, sample_dict, cpu):
"""
Run "fix_fastq" in parallel using all the threads, one file per thread
:param install_path: string. Path to install folder.
:param sample_dict: string. Dictionary of fastq file paths
:param cpu: int. number of CPU to use
:return:
"""
with futures.ThreadPoolExecutor(max_workers=cpu) as executor:
args = ((install_path, fastq_list[0], fastq_list[1]) for sample, fastq_list in sample_dict.items())
for results in executor.map(lambda p: Qiime2Methods.fix_fastq_pe(*p), args): # (*p) unpacks arguments
pass
@staticmethod
def qiime2_import_fastq_se(fastq_folder, reads_qza):
"""
Import single-end fastq files
https://docs.qiime2.org/2020.8/tutorials/importing/
:param fastq_folder:
:param reads_qza:
:return:
"""
cmd = ['qiime', 'tools', 'import',
'--type', 'SampleData[SequencesWithQuality]',
'--input-format', 'CasavaOneEightSingleLanePerSampleDirFmt', # For demultiplexed single end fastq
'--input-path', fastq_folder,
'--output-path', reads_qza]
subprocess.run(cmd)
@staticmethod
def qiime2_import_fastq_pe(fastq_folder, reads_qza):
"""
Import paired-end fastq files
https://docs.qiime2.org/2020.8/tutorials/importing/
:param fastq_folder:
:param reads_qza:
:return:
"""
cmd = ['qiime', 'tools', 'import',
'--type', 'SampleData[PairedEndSequencesWithQuality]',
'--input-format', 'CasavaOneEightSingleLanePerSampleDirFmt', # For demultiplexed paired-end fastq
'--input-path', fastq_folder,
'--output-path', reads_qza]
subprocess.run(cmd)
@staticmethod
def qiime2_demux_summary(reads_qza, output_qzv):
"""
Make summary of samples
Subsample 10,000 reads by default, only use 1000 instead (faster)
:param reads_qza:
:param output_qzv:
:return:
"""
cmd = ['qiime', 'demux', 'summarize',
'--p-n', str(1000),
'--i-data', reads_qza,
'--o-visualization', output_qzv]
subprocess.run(cmd)
@staticmethod
def qiime2_dada2_denoise_single(reads_qza, repseq_qza, table_qza, stats_qza):
"""
Denoise single-end reads with DADA2
:param reads_qza:
:param repseq_qza:
:param table_qza:
:param stats_qza:
:return:
"""
cmd = ['qiime', 'dada2', 'denoise-single',
'--p-n-threads', str(0),
'--p-trim-left', str(0), # No trimming
'--p-trunc-len', str(0), # No trimming
'--i-demultiplexed-seqs', reads_qza,
'--o-representative-sequences', repseq_qza,
'--o-table', table_qza,
'--o-denoising-stats', stats_qza]
subprocess.run(cmd)
@staticmethod
def qiime2_dada2_denoise_paired(reads_qza, repseq_qza, table_qza, stats_qza):
"""
Denoise paired-end reads with DADA2
:param reads_qza:
:param repseq_qza:
:param table_qza:
:param stats_qza:
:return:
"""
cmd = ['qiime', 'dada2', 'denoise-paired',
'--p-n-threads', str(0),
'--p-trim-left-f', str(0), # No trimming
'--p-trim-left-r', str(0), # No trimming
'--p-trunc-len-f', str(0), # No trimming
'--p-trunc-len-r', str(0), # No trimming
'--i-demultiplexed-seqs', reads_qza,
'--o-representative-sequences', repseq_qza,
'--o-table', table_qza,
'--o-denoising-stats', stats_qza]
subprocess.run(cmd)
@staticmethod
def qiime2_metadata_tabulate(stats_qza, stats_qzv):
"""
:param stats_qza:
:param stats_qzv:
:return:
"""
cmd = ['qiime', 'metadata', 'tabulate',
'--m-input-file', stats_qza,
'--o-visualization', stats_qzv]
subprocess.run(cmd)
@staticmethod
def qiime2_export(qza, output_folder):
"""
Export biom table
:param qza: string. QIIME2 table
:param output_folder: sting. Output folder
:return:
"""
cmd = ['qiime', 'tools', 'export',
'--input-path', qza,
'--output-path', output_folder] # this is a folder
subprocess.run(cmd)
@staticmethod
def qiime2_sample_summarize(metadata_file, table_qza, table_qzv):
"""
:param metadata_file:
:param table_qza:
:param table_qzv:
:return:
"""
cmd = ['qiime', 'feature-table', 'summarize',
'--m-sample-metadata-file', metadata_file,
'--i-table', table_qza,
'--o-visualization', table_qzv]
subprocess.run(cmd)
@staticmethod
def qiime2_seq_sumamry(repseqs_qza, repseqs_qzv):
"""
:param repseqs_qza:
:param repseqs_qzv:
:return:
"""
cmd = ['qiime', 'feature-table', 'tabulate-seqs',
'--i-data', repseqs_qza,
'--o-visualization', repseqs_qzv]
subprocess.run(cmd)
@staticmethod
def qiime2_phylogeny(repseqs_qza, align_repseqs_qza, masked_align_repseqs_qza, unrooted_tree_qza, rooted_tree_qza):
"""
:param repseqs_qza:
:param align_repseqs_qza:
:param masked_align_repseqs_qza:
:param unrooted_tree_qza:
:param rooted_tree_qza:
:return:
"""
cmd = ['qiime', 'phylogeny', 'align-to-tree-mafft-fasttree',
'--p-n-threads', 'auto',
'--i-sequences', repseqs_qza,
'--o-alignment', align_repseqs_qza,
'--o-masked-alignment', masked_align_repseqs_qza,
'--o-tree', unrooted_tree_qza,
'--o-rooted-tree', rooted_tree_qza]
subprocess.run(cmd)
@staticmethod
def qiime2_core_diversity(cpu, metadata_file, rooted_tree_qza, table_qza, output_folder):
"""
Alpha and Beta analysis
:param cpu:
:param metadata_file:
:param rooted_tree_qza:
:param table_qza:
:param output_folder:
:return:
"""
cmd = ['qiime', 'diversity', 'core-metrics-phylogenetic',
'--p-n-jobs-or-threads', str(cpu),
'--p-sampling-depth', str(1000),
'--i-phylogeny', rooted_tree_qza,
'--i-table', table_qza,
'--m-metadata-file', metadata_file,
'--output-dir', output_folder + '/core-metrics-results']
@staticmethod
def qiime2_rarefaction(metadata_file, rooted_tree_qza, table_qza, rare_qzv):
"""
:param metadata_file:
:param rooted_tree_qza:
:param table_qza:
:param rare_qzv:
:return:
"""
cmd = ['qiime', 'diversity', 'alpha-rarefaction',
'--p-max-depth', str(4000),
'--i-phylogeny', rooted_tree_qza,
'--i-table', table_qza,
'--m-metadata-file', metadata_file,
'--o-visualization', rare_qzv]
subprocess.run(cmd)
@staticmethod
def qiime2_classify(qiime2_classifier, repseqs_qza, taxonomy_qza):
"""
Taxonomic analysis
:param qiime2_classifier:
:param repseqs_qza:
:param taxonomy_qza:
:return:
"""
cmd = ['qiime', 'feature-classifier', 'classify-sklearn',
'--p-n-jobs', str(-1),
'--i-classifier', qiime2_classifier,
'--i-reads', repseqs_qza,
'--o-classification', taxonomy_qza]
subprocess.run(cmd)
@staticmethod
def change_taxonomy_file_header(input_taxo):
"""
:param input_taxo:
:return:
"""
tmp = input_taxo + '.tmp'
with open(tmp, 'w') as out_f:
out_f.write('#OTUID\ttaxonomy\tconfidence\n') # write new header
with open(input_taxo, 'r') as in_f:
next(in_f) # skip header
for line in in_f:
out_f.write(line) # dump rest of file
# overwrite original taxonomy file
os.replace(tmp, input_taxo)
@staticmethod
def biom_add_metadata(input_biom, taxonomy_tsv, | |
"""
Mostly copy-paste from DINO and timm library:
https://github.com/facebookresearch/dino
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import warnings
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import trunc_normal_, drop_path, to_2tuple
from functools import partial
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
q, k, v = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.window_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches_w, self.num_patches_h = self.window_size
self.num_patches = self.window_size[0] * self.window_size[1]
self.img_size = img_size
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(
1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class ViT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self,
model_name='vit_base_patch16_224',
img_size=384,
patch_size=16,
in_chans=3,
embed_dim=1024,
depth=24,
num_heads=16,
num_classes=19,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.1,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
norm_cfg=None,
pos_embed_interp=False,
random_init=False,
align_corners=False,
use_checkpoint=False,
num_extra_tokens=1,
out_features=None,
**kwargs,
):
super(ViT, self).__init__()
self.model_name = model_name
self.img_size = img_size
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.depth = depth
self.num_heads = num_heads
self.num_classes = num_classes
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.hybrid_backbone = hybrid_backbone
self.norm_layer = norm_layer
self.norm_cfg = norm_cfg
self.pos_embed_interp = pos_embed_interp
self.random_init = random_init
self.align_corners = align_corners
self.use_checkpoint = use_checkpoint
self.num_extra_tokens = num_extra_tokens
self.out_features = out_features
self.out_indices = [int(name[5:]) for name in out_features]
# self.num_stages = self.depth
# self.out_indices = tuple(range(self.num_stages))
if self.hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
self.hybrid_backbone, img_size=self.img_size, in_chans=self.in_chans, embed_dim=self.embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim)
self.num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
if self.num_extra_tokens == 2:
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(
1, self.num_patches + self.num_extra_tokens, self.embed_dim))
self.pos_drop = nn.Dropout(p=self.drop_rate)
# self.num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate,
self.depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=self.embed_dim, num_heads=self.num_heads, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias,
qk_scale=self.qk_scale,
drop=self.drop_rate, attn_drop=self.attn_drop_rate, drop_path=dpr[i], norm_layer=self.norm_layer)
for i in range(self.depth)])
# NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here
# self.repr = nn.Linear(embed_dim, representation_size)
# self.repr_act = nn.Tanh()
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
if self.num_extra_tokens==2:
trunc_normal_(self.dist_token, std=0.2)
self.apply(self._init_weights)
# self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
'''
def init_weights(self):
logger = get_root_logger()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}")
load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger)
'''
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def _conv_filter(self, state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
def to_2D(self, x):
n, hw, c = x.shape
h = w = int(math.sqrt(hw))
x = x.transpose(1, 2).reshape(n, c, h, w)
return x
def to_1D(self, x):
n, c, h, w = x.shape
x = x.reshape(n, c, -1).transpose(1, 2)
return x
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - self.num_extra_tokens
N = self.pos_embed.shape[1] - self.num_extra_tokens
if npatch == N and w == h:
return self.pos_embed
class_ORdist_pos_embed = self.pos_embed[:, 0:self.num_extra_tokens]
patch_pos_embed = self.pos_embed[:, self.num_extra_tokens:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size[0]
h0 = h // self.patch_embed.patch_size[1]
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_ORdist_pos_embed, patch_pos_embed), dim=1)
def prepare_tokens(self, x, mask=None):
B, nc, w, h = x.shape
# patch linear embedding
x = self.patch_embed(x)
# mask image modeling
if mask is not None:
x = self.mask_model(x, mask)
x = x.flatten(2).transpose(1, 2)
# add the [CLS] token to the embed patch tokens
all_tokens = [self.cls_token.expand(B, -1, -1)]
if self.num_extra_tokens == 2:
dist_tokens = self.dist_token.expand(B, -1, -1)
all_tokens.append(dist_tokens)
all_tokens.append(x)
x = torch.cat(all_tokens, dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward_features(self, x):
# print(f"==========shape of x is {x.shape}==========")
B, _, H, W = x.shape
Hp, Wp = H // self.patch_size, W // self.patch_size
x = self.prepare_tokens(x)
features = []
for i, blk in enumerate(self.blocks):
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if i in | |
- 1
dic["id"] = num
if len(items) > 0:
zone_bucket.append(dic)
#LOG.info('zone_bucket----%s'%zone_bucket)
return zone_bucket, num
def _get_storage_group_bucket(self, storage_groups, zones, num):
storage_group_bucket = []
for storage_group in storage_groups:
dic = {}
dic["name"] = storage_group
items = []
weight = 0
for zone in zones:
if zone["storage_group"] == storage_group:
item = {}
item["weight"] = zone["weight"]
item["zone_name"] = zone["name"]
items.append(item)
weight = weight + float(zone["weight"])
dic["weight"] = (weight != 0 and weight or FLAGS.default_weight)
dic["item"] = items
num = num - 1
dic["id"] = num
if len(items) > 0:
storage_group_bucket.append(dic)
return storage_group_bucket, num
def _get_root_bucket(self, storage_groups, num):
root_bucket = []
dic = {}
dic["name"] = "vsm"
items = []
for storage_group in storage_groups:
if storage_group["weight"] != 0:
item = {}
item["weight"] = storage_group["weight"]
item["storage_group_name"] = storage_group["name"]
items.append(item)
dic["item"] = items
num = num - 1
dic["id"] = num
root_bucket.append(dic)
return root_bucket, num
def _write_host_bucket(self, hosts):
for host in hosts:
self._write_to_crushmap("host " + host["name"] + " {\n")
self._write_to_crushmap(" id " + str(host["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in host["item"]:
self._write_to_crushmap(" item " + item + " weight 1.00\n")
self._write_to_crushmap("}\n\n")
def _write_zone_bucket(self, zones):
for zone in zones:
self._write_to_crushmap("zone " + zone["name"] + " {\n")
self._write_to_crushmap(" id " + str(zone["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in zone["item"]:
self._write_to_crushmap(" item " + item["host_name"] + \
" weight " + str(item["weight"]) + "\n")
self._write_to_crushmap("}\n\n")
def _write_storage_group_bucket(self, storage_groups):
for storage_group in storage_groups:
self._write_to_crushmap("storage_group " + storage_group["name"] + " {\n")
self._write_to_crushmap(" id " + str(storage_group["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in storage_group["item"]:
self._write_to_crushmap(" item " + item["zone_name"] + \
" weight " + str(item["weight"]) + "\n")
self._write_to_crushmap("}\n\n")
def _write_root_bucket(self, roots):
for root in roots:
self._write_to_crushmap("root " + root["name"] + " {\n")
self._write_to_crushmap(" id " + str(root["id"]) + "\n")
self._write_to_crushmap(" alg straw\n hash 0\n")
for item in root["item"]:
self._write_to_crushmap(" item " + item["storage_group_name"] + \
" weight " + str(item["weight"]) + "\n")
self._write_to_crushmap("}\n\n")
def _key_for_sort(self, dic):
return dic['rule_id']
def _generate_rule(self, context, zone_tag):
osds = self.conductor_api.osd_state_get_all(context)
storage_groups = [ osd['storage_group']['id'] for osd in osds if osd['storage_group']]
storage_groups = list(set(storage_groups))
if not storage_groups :#is None:
LOG.info("Error in getting storage_groups")
try:
raise exception.GetNoneError
except exception.GetNoneError, e:
LOG.error("%s:%s" %(e.code, e.message))
return False
LOG.info("DEBUG in generate rule begin")
LOG.info("DEBUG storage_groups from conductor %s " % storage_groups)
#sorted_storage_groups = sorted(storage_groups, key=self._key_for_sort)
#LOG.info("DEBUG storage_groups after sorted %s" % sorted_storage_groups)
sting_common = """ type replicated
min_size 0
max_size 10
"""
if zone_tag:
string_choose = """ step chooseleaf firstn 0 type zone
step emit
}
"""
else:
string_choose = """ step chooseleaf firstn 0 type host
step emit
}
"""
for storage_group_id in storage_groups:
storage_group = db.storage_group_get(context,storage_group_id)
storage_group_name = storage_group["name"]
rule_id = storage_group["rule_id"]
string = ""
string = string + "\nrule " + storage_group_name + " {\n"
string = string + " ruleset " + str(rule_id) + "\n"
string = string + sting_common
string = string + " step take " + storage_group_name + "\n"
string = string + string_choose
self._write_to_crushmap(string)
#if storage_group_name.find("value_") == -1:
# string = ""
# string = string + "\nrule " + storage_group_name + " {\n"
# string = string + " ruleset " + str(rule_id) + "\n"
# string = string + sting_common
# string = string + " step take " + storage_group_name + "\n"
# string = string + string_choose
# self._write_to_crushmap(string)
#else:
# string = ""
# string = string + "\nrule " + storage_group_name + " {\n"
# string = string + " ruleset " + str(rule_id) + "\n"
# string = string + " type replicated\n min_size 0\n"
# string = string + " max_size 10\n"
# string = string + " step take " + storage_group_name + "\n"
# if zone_tag:
# string = string + " step chooseleaf firstn 1 type zone\n"
# else:
# string = string + " step chooseleaf firstn 1 type host\n"
# string = string + " step emit\n"
# string = string + " step take " + \
# storage_group_name.replace('value_', '') + "\n"
# if zone_tag:
# string = string + " step chooseleaf firstn -1 type zone\n"
# else:
# string = string + " step chooseleaf firstn -1 type host\n"
# string = string + " step emit\n}\n"
# self._write_to_crushmap(string)
return True
def _gen_rule(self):
string = """\n# rules
rule capacity {
ruleset 0
type replicated
min_size 0
max_size 10
step take capacity
step chooseleaf firstn 0 type host
step emit
}
rule performance {
ruleset 1
type replicated
min_size 0
max_size 10
step take performance
step chooseleaf firstn 0 type host
step emit
}
rule high_performance {
ruleset 2
type replicated
min_size 0
max_size 10
step take high_performance
step chooseleaf firstn 0 type host
step emit
}
rule value_capacity {
ruleset 3
type replicated
min_size 0
max_size 10
step take value_capacity
step chooseleaf firstn 1 type host
step emit
step take capacity
step chooseleaf firstn -1 type host
step emit
}
rule value_performance {
ruleset 4
type replicated
min_size 0
max_size 10
step take value_performance
step chooseleaf firstn 1 type host
step emit
step take performance
step chooseleaf firstn -1 type host
step emit
}
# end crush map
"""
self._write_to_crushmap(string)
class DiamondDriver(object):
"""Create diamond file"""
def __init__(self, execute=utils.execute, *args, **kwargs):
self._diamond_config_path = "/etc/diamond/collectors/"
def change_collector_conf(self,collector,values):
'''
:param collector:
:param values: {'enabled':True,
'interval':15
}
:return:
'''
# try:
# out, err = utils.execute('kill_diamond',
# 'll',
# run_as_root=True)
# except:
# LOG.info("kill_diamond error:%s--%s"%(out,err))
config_file = '%s%s.conf'%(self._diamond_config_path,collector)
keys = values.keys()
content = []
for key in keys:
content.append('%s=%s'%(key,values[key]))
out, err = utils.execute('rm','-rf', config_file, run_as_root=True)
out, err = utils.execute('cp','/etc/vsm/vsm.conf', config_file, run_as_root=True)
for line in content:
out, err = utils.execute('sed','-i','1i\%s'%line, config_file, run_as_root=True)
out, err = utils.execute('sed','-i','%s,$d'%(len(content)+1), config_file, run_as_root=True)
out, err = utils.execute('service', 'diamond', 'restart', run_as_root=True)
return out
class ManagerCrushMapDriver(object):
"""Create crushmap file"""
def __init__(self, execute=utils.execute, *args, **kwargs):
self.conductor_api = conductor.API()
self.conductor_rpcapi = conductor_rpcapi.ConductorAPI()
self._crushmap_path = "/var/run/vsm/mg_crushmap"
def _write_to_crushmap(self, string):
utils.execute('chown', '-R', 'vsm:vsm', self._crushmap_path+'_decompiled',
run_as_root=True)
fd = open(self._crushmap_path+'_decompiled', 'a')
fd.write(string)
fd.close()
def get_crushmap(self):
LOG.info("DEBUG Begin to get crushmap")
utils.execute('ceph', 'osd', 'getcrushmap', '-o',
self._crushmap_path,'--keyring',FLAGS.keyring_admin, run_as_root=True)
utils.execute('crushtool', '-d', self._crushmap_path, '-o',
self._crushmap_path+'_decompiled', run_as_root=True)
return True
def set_crushmap(self):
LOG.info("DEBUG Begin to set crushmap")
utils.execute('crushtool', '-c', self._crushmap_path+'_decompiled', '-o',
self._crushmap_path, run_as_root=True)
utils.execute('ceph', 'osd', 'setcrushmap', '-i',
self._crushmap_path, run_as_root=True)
return True
def _generate_one_rule(self,rule_info):
'''
rule_info:{'rule_name':'test-rule',
'rule_id':None,
'type':'replicated',
'min_size':0,
'max_size':10,
'takes':[{'take_id':-12,
'choose_leaf_type':'host',
'choose_num':2,
},
]
}
:return:{'rule_id':3}
'''
crushmap = get_crushmap_json_format()
rule_id = rule_info.get('rule_id',None)
if rule_id is None:
rule_ids =[rule['rule_id'] for rule in crushmap._rules]
rule_ids.sort()
rule_id = rule_ids[-1]+1
types = crushmap._types
types.sort(key=operator.itemgetter('type_id'))
choose_leaf_type_default = types[1]['name']
rule_type = rule_info.get('type','replicated')
min_size = rule_info.get('min_size',0)
max_size = rule_info.get('max_size',10)
rule_name = rule_info.get('rule_name')
takes = rule_info.get('takes')
sting_common = """ type %s
min_size %s
max_size %s
"""%(rule_type,str(min_size),str(max_size))
string = ""
string = string + "\nrule " + rule_name + " {\n"
string = string + " ruleset " + str(rule_id) + "\n"
string = string + sting_common
for take in takes:
take_name = crushmap.get_bucket_by_id(int(take.get('take_id')))['name']
take_choose_leaf_type = take.get('choose_leaf_type',choose_leaf_type_default)
take_choose_num = take.get('choose_num',1)
string_choose = """ step chooseleaf firstn %s type %s
step emit
"""%(str(take_choose_num),take_choose_leaf_type)
string = string + " step take " + take_name + "\n" + string_choose
string = string +" }\n"
LOG.info('---string-----%s---'%string)
self.get_crushmap()
self._write_to_crushmap(string)
self.set_crushmap()
return {'rule_id':rule_id}
def _modify_takes_of_rule(self,rule_info):
'''
rule_info:{'rule_name':'test-rule',
'rule_id':None,
'type':'replicated',
'min_size':0,
'max_size':10,
'takes':[{'take_id':-12,
'choose_leaf_type':'host',
'choose_num':2,
},
]
}
:return:{'rule_id':3}
'''
crushmap = get_crushmap_json_format()
rule_name = rule_info.get('rule_name')
if crushmap.get_rules_by_name(name = rule_name ) is None:
return self._generate_one_rule(rule_info)
types = crushmap._types
types.sort(key=operator.itemgetter('type_id'))
choose_leaf_type_default = types[1]['name']
# rule_type = rule_info.get('type','')
# min_size = rule_info.get('min_size')
# max_size = rule_info.get('max_size')
takes = rule_info.get('takes')
self.get_crushmap()
fd = open(self._crushmap_path+'_decompiled', 'r')
rule_start_line = None
rule_end_line = None
insert_take_line = None
line_number = -1
lines = fd.readlines()
fd.close()
new_lines = []
LOG.info('rulename=====%s'%rule_name)
for line in lines:
line_number += 1
LOG.info('old lines=====%s----type=%s'%(line,type(line)))
if 'rule %s {'%rule_name in line:
rule_start_line = line_number
if rule_start_line is not None:
if rule_end_line is None and '}' in line:
rule_end_line = line_number
if rule_start_line is not None and rule_end_line is None:
if 'ruleset ' in line:
rule_id = line[0:-1].split(' ')[-1]
if 'step take' in line and insert_take_line is None:
insert_take_line = | |
<filename>policies.py
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Policies are functions mapping contexts to actions.
Policies are described in
"<NAME>., <NAME>., <NAME>., & <NAME>. (2021, March).
Confident off-policy evaluation and selection through self-normalized importance
weighting. In International Conference on Artificial Intelligence and Statistics
(pp. 640-648). PMLR.".
class SoftmaxDataPolicy is a mock-up policy which can hold either training
sample or a testing sample (each of which consists of context and labels).
When either set of contexts is passed to the policy (get_probs(...))
it returns action probabilities associated with those contexts.
Note that this is a mock-up policy, so only one of the two samples is supported.
class SoftmaxGAPolicy implements a softmax policy with linear parameterized
potential, where parameters are fitted by the gradient ascent maximizing
either importance weighted or self-normalized importance weighted estimator.
"""
import abc
import enum
import math
from typing import Sequence, NamedTuple
from absl import logging
import jax
from jax import numpy as jnp
from jax import scipy as jsc
import numpy as np
import scipy
import sklearn.preprocessing as skl_prep
from offpolicy_selection_eslb.utils import sample_from_simplices_m_times
class Query(NamedTuple):
"""Actions generated by a (randomized) policy when given a set of contexts.
Attributes:
actions: n-times-1 Array -- chosen (sampled) actions
probabilities: n-times-1 Array -- corresponding probabilities
"""
actions: np.ndarray
probabilities: np.ndarray
def log_vhat_importance_weighting(
parameters: np.ndarray,
temperature: float,
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_prob: np.ndarray,
) -> np.ndarray:
"""Returns the log of importance weighted estimator.
Returns the log of importance weighted estimator where each
importance weight is computed w.r.t. the softmax target policy defined
w.r.t. a linear model as defined in the description of a class.
Args:
parameters: Parameters of the linear model of a target policy.
temperature: Positive float controlling the temperature of a Softmax
policy.
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
actions: Actions (integers).
rewards: Rewards (float).
b_prob: Probabilities corresponding to (context, action) pairs
according to the behavior policy.
Returns: The logarithm of importance-weighted estimate.
"""
n, _ = contexts.shape
v = (1.0 / temperature) * contexts.dot(parameters)
pot = (1.0 / temperature) * (contexts *
parameters[:, actions].T).sum(axis=1)
a = jnp.log(rewards / (n * b_prob)) - jsc.special.logsumexp(v, axis=1)
rs = jsc.special.logsumexp(pot + a, axis=0)
return rs
def log_vhat_sn_importance_weighting(
parameters: np.ndarray,
temperature: float,
contexts: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
b_prob: np.ndarray,
) -> np.ndarray:
"""Returns a log of self-normalized (SN) importance weighted estimator.
Returns a log of (SN) importance weighted estimator where each
importance weight is computed w.r.t. the softmax target policy defined
w.r.t. a linear model as defined in the description of a class.
Args:
parameters: Parameters of the linear model of a target policy.
temperature: Positive float controlling the temperature of a Softmax
policy.
contexts: Array of contexts (n-times-d, d=data dim., n=sample size).
actions: Actions (integers).
rewards: Rewards (float).
b_prob: Probabilities corresponding to (context, action) pairs
according to the behavior policy.
Returns: The logarithm of SN importance-weighted estimate.
"""
v = (1.0 / temperature) * contexts.dot(parameters)
pot = (1.0 / temperature) * (contexts *
parameters[:, actions].T).sum(axis=1)
a = jnp.log(rewards / b_prob) - jsc.special.logsumexp(v, axis=1)
ln_numer = jsc.special.logsumexp(pot + a, axis=0)
a = -jnp.log(b_prob) - jsc.special.logsumexp(v, axis=1)
ln_denom = jsc.special.logsumexp(pot + a, axis=0)
return ln_numer - ln_denom
class Policy(abc.ABC):
"""A Policy samples actions given contexts.
"""
@abc.abstractmethod
def query(self, contexts: np.ndarray) -> Query:
"""Returns actions and their probs sampled by Policy given the contexts.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: A Tuple of arrays of actions (int) and corresponding probs (float)
"""
@abc.abstractmethod
def get_probs(self, contexts: np.ndarray) -> np.ndarray:
"""Returns probability distribution over actions for each context.
The softmax policy is defined as a probability vector
exp(alt_bin_labels / temp) / sum(exp(alt_bin_labels / temp))
where temp is a temperature of a policy and
alt_bin_labels is a binary encoding of labels altered by alter_labels(...)
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: Array of probabilities according to the policy, where K
is the number of actions (size n-times-K).
Raises:
NotImplementedError: when contexts is not training or testing contexts
"""
class TrainedPolicyObjType(enum.Enum):
"""Softmax gradient ascent fitted policy types with Objective function.
TrainedPolicyObjType.IW = importance-weighted estimator.
TrainedPolicyObjType.SNIW = self-normalized importance-weighted.
"""
IW = "IW"
SNIW = "SNIW"
def __str__(self):
return str(self.value)
class SoftmaxDataPolicy(Policy):
"""Memorization policy (using true labels).
This object can hold either training sample or a testing sample
(each of which consists of context and labels).
When either set of contexts is passed to the policy (get_probs(...))
it returns action probabilities associated with those contexts.
Note that this is a mock-up policy, so only one of the two samples is
supported.
Attributes:
action_set: A list of unique integer actions.
train_contexts: A n-times-d array of training contexts(d=data dim., n=sample
size).
train_labels: A n-array of training labels.
test_contexts: A n-times-d array of training contexts(d=data dim., n'=sample
size).
test_labels: A n'-array of training labels.
temperature: A positive float controlling the temp. of a Softmax policy.
faulty_actions: A list of labels where the behavior policy makes mistakes.
rand: Random state of numpy.random.RandomState type.
"""
def __init__(
self,
train_contexts: np.ndarray,
train_labels: np.ndarray,
test_contexts: np.ndarray,
test_labels: np.ndarray,
action_set: Sequence[int],
temperature: float,
faulty_actions: Sequence[int],
):
"""Constructs a Policy.
Args:
train_contexts: Array of training contexts (n-times-d, d=data dim.,
n=sample size).
train_labels: Array of training labels (size n).
test_contexts: Array of training contexts (n-times-d, d=data dim.,
n'=sample size).
test_labels: Array of training labels 9size n).
action_set: List of unique integer actions.
temperature: Positive float controlling the temperature of a Softmax
policy.
faulty_actions: List of labels on which the behavior policy makes
mistakes.
"""
self.action_set = action_set
self.train_contexts = train_contexts
self.train_labels = train_labels
self.test_contexts = test_contexts
self.test_labels = test_labels
self.temperature = temperature
self.faulty_actions = set(faulty_actions)
self.reset_noise(0)
def reset_noise(self, seed: int):
"""Resets a random state given a seed.
Args:
seed: Integer seed for random state
"""
self.rand = np.random.RandomState(seed)
def alter_labels(self, labels: np.ndarray):
"""Returns altered labels according to the self.faulty_actions spec.
Labels are altered by shifting each label contained in self.faulty_action
to one forward (or to 0 if we have an overflow).
Args:
labels: Vector of labels (size 1 by n=sample size)
Returns:
A vector of the same size with all entries in self.faulty_actions shifted.
"""
num_actions = len(self.action_set)
fault = np.zeros(len(labels))
for i in range(len(labels)):
if labels[i] in self.faulty_actions:
fault[i] = 1
return (labels + fault) % num_actions # faulty actions get shifted by one
def get_probs(self, contexts: np.ndarray):
"""Returns probability distribution over actions for given contexts.
The softmax policy is defined as a probability vector
exp(alt_bin_labels / temp) / sum(exp(alt_bin_labels / temp))
where temp is a temperature of a policy and
alt_bin_labels is a binary encoding of labels altered by alter_labels(...)
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
Returns: Array of probabilities according to the policy, where K
is the number of actions (size n-times-K).
Raises:
NotImplementedError: when contexts is not training or testing contexts
"""
# predictions get altered by internal noise :
if contexts is self.train_contexts:
alt_labels = self.alter_labels(self.train_labels)
elif contexts is self.test_contexts:
alt_labels = self.alter_labels(self.test_labels)
else:
raise NotImplementedError
bin_alt_labels = skl_prep.label_binarize(
alt_labels, classes=self.action_set)
v = np.exp(bin_alt_labels / self.temperature)
v = v / v.sum(axis=1)[:, np.newaxis]
return v
def get_probs_by_actions(self, contexts: np.ndarray, actions: np.ndarray):
"""Returns probabilities for each given action in each given context.
Args:
contexts: Array of contexts (n-times-d, d=data dim., n=sample size), which
are either training or testing contexts provided during the
initialization.
actions: Array of actions (integers) for which probabilies | |
<reponame>zbraniecki/pontoon<gh_stars>1-10
"""
Models for working with remote translation data stored in a VCS.
"""
import logging
import os
import shutil
from itertools import chain
from datetime import datetime
from django.utils import timezone
from django.utils.functional import cached_property
from pontoon.base import MOZILLA_REPOS
from pontoon.sync.exceptions import ParseError
from pontoon.sync.utils import (
is_hidden,
directory_contains_resources,
is_resource,
is_asymmetric_resource,
locale_directory_path,
locale_to_source_path,
source_to_locale_path,
)
from pontoon.sync.vcs.repositories import get_changed_files
log = logging.getLogger(__name__)
class MissingSourceRepository(Exception):
"""
Raised when project can't find the repository
which contains source files.
"""
class MissingSourceDirectoryError(Exception):
"""Raised when sync can't find the source directory for the locales."""
class MissingLocaleDirectoryError(IOError):
"""Raised when sync can't find the locale directory."""
class VCSProject(object):
"""
Container for project data that is stored on the filesystem and
pulled from a remote VCS.
"""
SOURCE_DIR_SCORES = {
'templates': 3,
'en-US': 2,
'en': 1
}
SOURCE_DIR_NAMES = SOURCE_DIR_SCORES.keys()
def __init__(self, db_project, locales=None, obsolete_entities_paths=None, full_scan=False):
"""
Load resource paths from the given db_project and parse them
for translation data.
:param Project db_project:
Project model instance for the project we're going to be
reading files for.
:param list locales:
List of Locale model instances for the locales that we want
to parse. Defaults to parsing resources for all enabled
locales on the project.
:param list obsolete_entities_paths:
List of paths to remove translations of obsolete entities from
:param bool full_scan:
Scans all resources in repository
"""
self.db_project = db_project
self.locales = locales if locales is not None else db_project.locales.all()
self.obsolete_entities_paths = obsolete_entities_paths or []
self.full_scan = full_scan
self.synced_locales = set()
@cached_property
def changed_files(self):
if self.full_scan:
# All files are marked as changed
return None
if self.locales:
return self.changed_locales_files
else:
return self.changed_source_files[0]
@cached_property
def changed_source_files(self):
"""
Returns a tuple of changed and removed source files in the project:
(changed_files, removed_files)
"""
source_resources_repo = self.db_project.source_repository
if not source_resources_repo:
raise MissingSourceRepository(self.db_project)
source_directory = self.source_directory_path()
if source_resources_repo.last_synced_revisions:
last_revision = source_resources_repo.last_synced_revisions.get('single_locale')
else:
last_revision = None
modified_files, removed_files = get_changed_files(source_resources_repo.type, source_directory, last_revision)
# Unify filesystem and data model file extensions
modified_files = map(source_to_locale_path, modified_files)
removed_files = map(source_to_locale_path, removed_files)
if source_resources_repo.source_repo or not last_revision:
get_path = lambda path: (path, [])
else:
relative_source_path = source_directory[len(source_resources_repo.checkout_path):].lstrip(os.sep)
get_path = lambda path: (path[len(relative_source_path):].lstrip(os.sep), [])
return dict(map(get_path, modified_files)), dict(map(get_path, removed_files))
@cached_property
def changed_locales_files(self):
"""
Map of repositories and files changed within them after the latest update.
"""
files = {}
def find_changed_files(repo, locale=None):
"""
Returns a dictionary that contains resource paths as keys and their
list of locales as value.
"""
if repo.last_synced_revisions:
last_revision = repo.last_synced_revisions.get(locale.code if locale else 'single_locale')
else:
last_revision = None
# We have to filter out paths that are locale files
checkout_path = repo.locale_checkout_path(locale) if locale else repo.checkout_path
return get_changed_files(repo.type, checkout_path, last_revision)[0]
for repo in self.db_project.repositories.exclude(source_repo=True):
if repo.multi_locale:
for locale in self.db_project.locales.all():
for path in find_changed_files(repo, locale):
files.setdefault(path, []).append(locale)
else:
for changed_file in find_changed_files(repo):
path_info = self.get_path_info(changed_file, repo)
if path_info:
path, locale_path, locale = path_info
path = path[len(locale_path):].lstrip(os.sep)
files.setdefault(path, []).append(locale)
return files
def get_path_info(self, path, repo):
"""
Checks if path inside one of locale directories.
Returns a tuple with information on given path or None if can't find any.
Tuple contains:
- path to the given file
- path to the locale directory
- locale code
"""
if is_hidden(path):
return None
try:
locale_path, locale = next((p, l) for p, l in self.locale_directories(repo).items() if path.startswith(p))
except StopIteration:
return None
return path, locale_path, locale
def locale_directories(self, repo):
"""
A map of paths to their respective locales.
"""
locales_paths = {}
for locale in self.db_project.locales.all():
path = locale_directory_path(repo.checkout_path, locale.code)[len(repo.checkout_path):].lstrip(os.sep)
locales_paths[path] = locale
return locales_paths
@cached_property
def resources(self):
"""
Lazy-loaded mapping of relative paths -> VCSResources.
Waiting until first access both avoids unnecessary file reads
and allows tests that don't need to touch the resources to run
with less mocking.
"""
resources = {}
for path in self.relative_resource_paths():
locales = self.db_project.unsynced_locales
if (self.changed_files is not None and
((not self.changed_files or path not in self.changed_files) and
path not in self.obsolete_entities_paths)):
if not locales:
log.debug('Skipping unchanged file: {}'.format(path))
continue
else:
if self.changed_files is None or path in self.obsolete_entities_paths:
locales += self.locales
else:
locales += self.changed_files[path]
locales = set(locales)
map(self.synced_locales.add, locales)
log.debug('Resource file {} for {}'.format(path, ','.join([l.code for l in locales]) or 'source'))
try:
resources[path] = VCSResource(self, path, locales=locales)
except ParseError as err:
log.error('Skipping resource {path} due to ParseError: {err}'.format(
path=path, err=err
))
return resources
@property
def entities(self):
return chain.from_iterable(
resource.entities.values() for resource in self.resources.values()
)
@property
def checkout_path(self):
return self.db_project.checkout_path
def source_directory_path(self):
"""
Path to the directory where source strings are stored.
Paths are identified using a scoring system; more likely
directory names get higher scores, as do directories with
formats that only used for source strings.
"""
possible_sources = []
for root, dirnames, filenames in os.walk(self.checkout_path):
for dirname in dirnames:
if dirname in self.SOURCE_DIR_NAMES:
score = self.SOURCE_DIR_SCORES[dirname]
# Ensure the matched directory contains resources.
directory_path = os.path.join(root, dirname)
if directory_contains_resources(directory_path):
# Extra points for source resources!
if directory_contains_resources(directory_path, source_only=True):
score += 3
possible_sources.append((directory_path, score))
if possible_sources:
return max(possible_sources, key=lambda s: s[1])[0]
else:
raise MissingSourceDirectoryError('No source directory found for project {0}'.format(self.db_project.slug))
def relative_resource_paths(self):
"""
List of paths relative to the locale directories returned by
self.locale_directory_path() for each resource in this project.
"""
path = self.source_directory_path()
for absolute_path in self.resources_for_path(path):
# .pot files in the source directory need to be renamed to
# .po files for the locale directories.
if absolute_path.endswith('.pot'):
absolute_path = absolute_path[:-1]
yield os.path.relpath(absolute_path, path)
def resources_for_path(self, path):
"""
List of paths for all supported resources found within the given
path.
"""
for root, dirnames, filenames in os.walk(path):
if is_hidden(root):
continue
# Ignore certain files in Mozilla repositories.
if self.db_project.repository_url in MOZILLA_REPOS:
filenames = [f for f in filenames if not f.endswith('region.properties')]
for filename in filenames:
if is_resource(filename):
yield os.path.join(root, filename)
def create_locale_directory(self, locale, path):
if not self.db_project.has_multi_locale_repositories:
source_directory = self.source_directory_path()
parent_directory = os.path.abspath(os.path.join(source_directory, os.pardir))
locale_directory = os.path.join(parent_directory, locale.code.replace('-', '_'))
# For asymmetric formats, create empty folder
if is_asymmetric_resource(path):
os.makedirs(locale_directory)
# For other formats, copy resources from source directory
else:
shutil.copytree(source_directory, locale_directory)
for root, dirnames, filenames in os.walk(locale_directory):
for filename in filenames:
path = os.path.join(root, filename)
if is_resource(filename):
os.rename(path, source_to_locale_path(path))
else:
os.remove(path)
return locale_directory
else:
raise MissingLocaleDirectoryError(
'Directory for locale `{0}` not found'.format(locale.code)
)
class VCSResource(object):
"""Represents a single resource across multiple locales."""
def __init__(self, vcs_project, path, locales=None):
"""
Load the resource file for each enabled locale and store its
translations in VCSEntity instances.
"""
from pontoon.base.models import Locale
from pontoon.sync import formats # Avoid circular import.
self.vcs_project = vcs_project
self.path = path
self.locales = locales or []
self.files = {}
self.entities = {}
# Create entities using resources from the source directory,
source_resource_path = os.path.join(vcs_project.source_directory_path(), self.path)
source_resource_path = locale_to_source_path(source_resource_path)
source_resource_file = formats.parse(source_resource_path, locale=Locale.objects.get(code='en-US'))
for index, translation in enumerate(source_resource_file.translations):
vcs_entity = VCSEntity(
resource=self,
key=translation.key,
string=translation.source_string,
string_plural=translation.source_string_plural,
comments=translation.comments,
source=translation.source,
order=translation.order or index
)
self.entities[vcs_entity.key] = vcs_entity
# Fill in translations from the locale resources.
for locale in locales:
try:
locale_directory = locale_directory_path(
vcs_project.checkout_path, locale.code
)
except IOError:
locale_directory = self.vcs_project.create_locale_directory(
locale, self.path
)
resource_path = os.path.join(locale_directory, self.path)
log.debug('Parsing resource file: %s', resource_path)
try:
resource_file = formats.parse(resource_path, source_resource_path, locale)
except (IOError, ParseError):
continue # File doesn't exist or is invalid, let's move on
self.files[locale] = resource_file
log.debug('Discovered %s translations.', len(resource_file.translations))
for translation in resource_file.translations:
try:
self.entities[translation.key].translations[locale.code] = translation
except KeyError:
# If the source is missing an entity, we consider it
# deleted and don't add it.
pass
def save(self):
"""
Save changes made to any of the translations in this resource
back to the filesystem for all locales.
"""
for locale, resource_file in self.files.items():
resource_file.save(locale)
class VCSEntity(object):
"""
An Entity is a single string to be translated, and a VCSEntity
stores the translations for an entity from several locales.
"""
def __init__(self, resource, key, string, comments, source, string_plural='',
order=0):
self.resource = resource
self.key = key
self.string = string
self.string_plural = string_plural
self.translations = {}
self.comments = comments
self.source = source
self.order = order
def has_translation_for(self, locale_code):
"""Return True if a translation exists for the given locale."""
return locale_code in self.translations
class VCSTranslation(object):
"""
A single translation of a source string into another language.
Since | |
+ 0.0219849*m.x301
- 0.00939134*m.x302 + 0.203443*m.x303 == 0)
m.c207 = Constraint(expr= - m.x102 + 0.101389*m.x204 + 0.00922513*m.x205 + 0.0026992*m.x206 + 0.00246784*m.x207
+ 0.00231482*m.x208 - 0.00125222*m.x209 - 0.00392467*m.x210 - 0.0046966*m.x211
- 0.0142117*m.x212 - 0.00150584*m.x213 - 0.00472481*m.x214 - 0.00116843*m.x215
- 0.00080131*m.x216 + 0.0048818*m.x217 - 0.00479483*m.x218 + 0.00726609*m.x219
+ 0.00902802*m.x220 + 0.0114247*m.x221 - 0.00483357*m.x222 + 9.94895E-5*m.x223
+ 0.00286308*m.x224 - 3.33076E-5*m.x225 + 0.00353482*m.x226 - 0.00220136*m.x227
+ 0.00200825*m.x228 - 0.00767398*m.x229 - 0.00197822*m.x230 - 1.10346E-5*m.x231
- 0.00119407*m.x232 + 0.000602811*m.x233 + 0.0132444*m.x234 - 0.00406553*m.x235
+ 0.00422631*m.x236 - 0.00343731*m.x237 + 0.00651535*m.x238 - 0.00313138*m.x239
+ 0.00359841*m.x240 + 0.00452148*m.x241 + 0.00366834*m.x242 + 0.00592328*m.x243
+ 0.00503367*m.x244 + 0.00629207*m.x245 + 0.00300993*m.x246 - 0.00042207*m.x247
+ 0.0126161*m.x248 + 0.000539029*m.x249 + 0.00171572*m.x250 + 0.00871066*m.x251
- 0.000658519*m.x252 + 0.00632557*m.x253 - 1.12481E-5*m.x254 + 0.00423842*m.x255
- 0.000128391*m.x256 + 0.00428623*m.x257 + 0.00221565*m.x258 + 0.00293236*m.x259
+ 0.00642301*m.x260 - 0.00424851*m.x261 + 0.00130686*m.x262 + 0.0135606*m.x263
- 0.000827581*m.x264 + 0.000319199*m.x265 - 0.00408084*m.x266 - 0.0130473*m.x267
- 0.000488486*m.x268 + 0.00109418*m.x269 + 0.00331627*m.x270 + 0.0045216*m.x271
- 0.00260415*m.x272 - 0.000565556*m.x273 + 0.00157409*m.x274 + 0.00434565*m.x275
- 0.00256408*m.x276 + 6.86355E-5*m.x277 + 0.000115271*m.x278 + 0.00407719*m.x279
- 0.000717764*m.x280 + 0.00421776*m.x281 - 0.00185436*m.x282 + 0.0139088*m.x283
+ 0.00169537*m.x284 - 0.00322776*m.x285 + 0.00738761*m.x286 - 0.00338701*m.x287
+ 0.000632136*m.x288 + 0.00125905*m.x289 + 0.00498931*m.x290 - 0.00223203*m.x291
- 0.00161161*m.x292 + 0.000698504*m.x293 + 0.000973632*m.x294 + 0.00322052*m.x295
- 0.000732426*m.x296 + 0.000574642*m.x297 + 0.0043636*m.x298 - 0.00725725*m.x299
+ 0.00589969*m.x300 + 0.00253513*m.x301 + 3.03939E-5*m.x302 + 0.0013209*m.x303 == 0)
m.c208 = Constraint(expr= - m.x103 + 0.00922513*m.x204 + 0.0975221*m.x205 + 0.0302565*m.x206 + 0.00195468*m.x207
+ 0.00763018*m.x208 + 0.0135627*m.x209 + 0.005177*m.x210 - 0.00166221*m.x211
+ 0.0123801*m.x212 + 0.00801131*m.x213 + 0.0131773*m.x214 - 0.000578789*m.x215
- 0.00121871*m.x216 + 0.00571751*m.x217 + 0.00647641*m.x218 + 0.006769*m.x219
+ 0.00782192*m.x220 + 0.00707125*m.x221 + 0.0133473*m.x222 + 0.013462*m.x223
+ 0.00112892*m.x224 + 0.0172224*m.x225 + 0.000134992*m.x226 - 0.000119948*m.x227
+ 0.000402454*m.x228 + 0.0170077*m.x229 + 0.00468054*m.x230 + 0.00168652*m.x231
+ 0.0382947*m.x232 + 0.0032777*m.x233 + 0.00298262*m.x234 + 0.00157746*m.x235
+ 0.0105061*m.x236 - 0.00233804*m.x237 - 0.000430784*m.x238 + 0.00369645*m.x239
+ 0.00893962*m.x240 + 0.00500082*m.x241 + 0.00579376*m.x242 + 0.0033232*m.x243
+ 0.00493974*m.x244 + 1.72372E-5*m.x245 - 0.00634068*m.x246 + 0.00394893*m.x247
+ 0.02189*m.x248 - 0.00142123*m.x249 + 0.000952018*m.x250 - 0.000699597*m.x251
+ 0.00166555*m.x252 + 0.00413697*m.x253 + 0.0028476*m.x254 + 0.0144721*m.x255
+ 0.00231785*m.x256 - 0.00165532*m.x257 + 0.00730008*m.x258 + 0.00370628*m.x259
+ 0.00688037*m.x260 - 0.00522018*m.x261 + 0.00111515*m.x262 + 0.00698037*m.x263
- 0.000451953*m.x264 + 0.000474809*m.x265 + 0.00265625*m.x266 - 0.00353732*m.x267
+ 0.0119217*m.x268 + 0.00462742*m.x269 + 0.00319077*m.x270 + 0.00271929*m.x271
- 0.00543312*m.x272 + 0.00196136*m.x273 + 0.00104823*m.x274 + 0.0124518*m.x275
- 0.00305148*m.x276 + 0.00752299*m.x277 + 0.00484884*m.x278 + 0.00471928*m.x279
+ 0.00180959*m.x280 - 0.00197105*m.x281 - 0.000409026*m.x282 + 0.00669345*m.x283
+ 0.00587367*m.x284 + 0.00304423*m.x285 + 0.00575059*m.x286 + 0.0022757*m.x287
+ 0.00555164*m.x288 + 0.00580589*m.x289 + 0.00361309*m.x290 + 0.00918406*m.x291
+ 0.00175607*m.x292 + 0.0017579*m.x293 - 0.00440575*m.x294 + 0.000882362*m.x295
+ 0.00390306*m.x296 - 0.00345893*m.x297 + 0.00348614*m.x298 + 0.0230969*m.x299
+ 0.00744792*m.x300 + 0.0101415*m.x301 + 0.00190622*m.x302 + 0.00972401*m.x303 == 0)
m.c209 = Constraint(expr= - m.x104 + 0.0026992*m.x204 + 0.0302565*m.x205 + 0.128438*m.x206 + 0.000554371*m.x207
+ 0.00531804*m.x208 + 0.000108892*m.x209 - 0.00393381*m.x210 + 0.00224874*m.x211
- 0.00567132*m.x212 + 0.0331219*m.x213 + 0.00309274*m.x214 + 0.00219016*m.x215
+ 0.00110325*m.x216 + 0.00750053*m.x217 + 0.0031054*m.x218 - 0.0011808*m.x219
+ 0.0018233*m.x220 - 0.00403332*m.x221 + 0.000400161*m.x222 + 0.00326599*m.x223
+ 0.00271781*m.x224 + 0.0379186*m.x225 - 0.000515796*m.x226 - 0.00228588*m.x227
- 0.00297975*m.x228 + 0.0477231*m.x229 + 0.010936*m.x230 + 0.0065182*m.x231 + 0.0358881*m.x232
+ 0.00724543*m.x233 + 0.0141802*m.x234 - 0.00235266*m.x235 + 0.0360049*m.x236
+ 0.00392202*m.x237 + 0.0042099*m.x238 + 0.00555015*m.x239 + 0.00521599*m.x240
- 0.00112076*m.x241 - 0.00218836*m.x242 - 0.000304719*m.x243 + 0.00236864*m.x244
- 0.0019383*m.x245 - 0.00295089*m.x246 - 0.00106603*m.x247 + 2.7433E-5*m.x248
- 0.00324286*m.x249 + 0.0057922*m.x250 + 0.0166603*m.x251 - 0.000651147*m.x252
+ 0.0025737*m.x253 + 0.00277017*m.x254 - 0.000313893*m.x255 - 0.00295718*m.x256
+ 0.00301212*m.x257 - 0.00536586*m.x258 + 0.00927864*m.x259 - 0.00108853*m.x260
+ 0.00791765*m.x261 - 0.00783196*m.x262 + 0.0121474*m.x263 + 0.00268288*m.x264
- 0.0040862*m.x265 + 0.00434811*m.x266 + 0.00169867*m.x267 + 0.010645*m.x268
+ 0.00861052*m.x269 + 0.00468559*m.x270 + 0.00738855*m.x271 - 0.00180418*m.x272
+ 0.00156566*m.x273 + 0.00730297*m.x274 + 0.00553899*m.x275 - 0.00796275*m.x276
+ 0.00502078*m.x277 - 0.001733*m.x278 + 0.000121446*m.x279 + 0.00524254*m.x280
- 0.000120631*m.x281 - 0.00614724*m.x282 + 0.00806025*m.x283 + 0.0109797*m.x284
+ 0.00481845*m.x285 - 0.003182*m.x286 + 0.00729708*m.x287 + 0.00314668*m.x288
+ 0.00093968*m.x289 + 0.0046813*m.x290 + 0.00588727*m.x291 + 0.00856972*m.x292
+ 0.0014989*m.x293 - 0.0012223*m.x294 - 0.00398758*m.x295 + 0.00447174*m.x296
- 0.00894175*m.x297 - 0.0106183*m.x298 + 0.0196262*m.x299 + 0.00569151*m.x300
+ 0.000408826*m.x301 + 0.0107044*m.x302 + 0.00414926*m.x303 == 0)
m.c210 = Constraint(expr= - m.x105 + 0.00246784*m.x204 + 0.00195468*m.x205 + 0.000554371*m.x206 + 0.0527346*m.x207
+ 0.00613524*m.x208 + 0.00304915*m.x209 + 0.00265026*m.x210 - 0.00168787*m.x211
+ 3.0185E-5*m.x212 + 0.00604227*m.x213 + 0.00608141*m.x214 + 0.0122199*m.x215
+ 0.00562216*m.x216 - 0.005368*m.x217 + 0.00381063*m.x218 + 0.00430191*m.x219
+ 0.0103995*m.x220 + 0.00535853*m.x221 + 0.00443352*m.x222 + 0.00287646*m.x223
+ 0.00194314*m.x224 - 0.00200937*m.x225 + 0.00721471*m.x226 + 0.00153181*m.x227
+ 0.00223523*m.x228 - 0.00637192*m.x229 - 0.00268833*m.x230 + 0.00712607*m.x231
+ 0.00579462*m.x232 + 0.00617075*m.x233 + 0.00178322*m.x234 + 0.00561978*m.x235
- 0.000789221*m.x236 + 0.000342323*m.x237 + 0.00409312*m.x238 + 0.0043798*m.x239
+ 0.00830042*m.x240 + 0.0127205*m.x241 + 0.00431059*m.x242 + 0.00271535*m.x243
+ 0.00301995*m.x244 + 0.00485894*m.x245 + 0.0039211*m.x246 + 0.00224879*m.x247
+ 0.00711934*m.x248 + 0.000965714*m.x249 + 0.00724404*m.x250 + 0.00718655*m.x251
+ 0.00872243*m.x252 + 0.00911811*m.x253 + 0.000986175*m.x254 + 0.00380874*m.x255
+ 0.00525484*m.x256 + 0.00057863*m.x257 - 0.00473576*m.x258 + 0.0100543*m.x259
+ 0.00543729*m.x260 + 0.00571189*m.x261 - 0.00276324*m.x262 + 0.00810983*m.x263
+ 0.00436906*m.x264 + 0.00413471*m.x265 - 0.00244187*m.x266 + 0.00545183*m.x267
+ 0.00495238*m.x268 + 0.00411888*m.x269 + 0.00641475*m.x270 + 0.00111178*m.x271
- 0.00121453*m.x272 + 0.00461528*m.x273 + 0.00171472*m.x274 + 0.00388579*m.x275
+ 0.00967986*m.x276 + 0.0016606*m.x277 + 0.00295113*m.x278 + 0.00353955*m.x279
+ 0.00142511*m.x280 + 0.00243873*m.x281 - 0.00105264*m.x282 + 0.00531498*m.x283
+ 0.00188864*m.x284 + 0.00359302*m.x285 + 0.00189728*m.x286 + 0.00111561*m.x287
+ 0.00562859*m.x288 + 0.00605398*m.x289 + 0.00853829*m.x290 + 0.00794939*m.x291
- 0.00108004*m.x292 + 0.00250493*m.x293 + 6.36275E-5*m.x294 + 0.00972923*m.x295
+ 0.0052055*m.x296 + 0.00106842*m.x297 + 0.00268821*m.x298 - 0.00313171*m.x299
+ 0.0047581*m.x300 - 0.00749006*m.x301 + 0.00065076*m.x302 + 0.0033893*m.x303 == 0)
m.c211 = Constraint(expr= - m.x106 + 0.00231482*m.x204 + 0.00763018*m.x205 + 0.00531804*m.x206 + 0.00613524*m.x207
+ 0.0344878*m.x208 + 0.00244207*m.x209 + 0.00307695*m.x210 + 0.000258656*m.x211
+ 0.00124365*m.x212 + 0.00669557*m.x213 + 0.000908845*m.x214 + 0.00431206*m.x215
+ 0.00836718*m.x216 + 0.00732185*m.x217 + 0.00637045*m.x218 + 0.00307733*m.x219
+ 0.00520295*m.x220 - 0.00352601*m.x221 + 0.00445198*m.x222 + 0.00166877*m.x223
+ 0.00283099*m.x224 + 0.0128329*m.x225 + 0.00506005*m.x226 + 0.00243023*m.x227
+ 0.00179608*m.x228 + 0.00210744*m.x229 + 0.00564026*m.x230 + 0.00132385*m.x231
+ 0.00265952*m.x232 + 0.0155615*m.x233 + 3.93351E-5*m.x234 + 0.00818121*m.x235
+ 0.00576868*m.x236 + 0.00589168*m.x237 + 0.00706516*m.x238 + 0.00432744*m.x239
+ 0.0048282*m.x240 + 0.00624337*m.x241 - 0.00066102*m.x242 + 0.00539504*m.x243
+ 0.0051422*m.x244 + 0.0159499*m.x245 + 0.00287456*m.x246 + 0.00538495*m.x247
+ 0.00440483*m.x248 - 0.000705455*m.x249 + 0.00591159*m.x250 + 0.00637504*m.x251
+ 0.00572145*m.x252 + 0.00369485*m.x253 + 0.00550813*m.x254 + 0.00767889*m.x255
+ 0.00218657*m.x256 + 0.00216232*m.x257 + 0.00209557*m.x258 + 0.00511289*m.x259
+ 0.00375052*m.x260 - 0.000396528*m.x261 - 0.00165313*m.x262 + 0.00186935*m.x263
+ 0.00284817*m.x264 + 0.00147042*m.x265 + 0.00264492*m.x266 - 0.000424681*m.x267
+ 0.00721645*m.x268 + 0.00584868*m.x269 - 0.00125879*m.x270 + 0.00123949*m.x271
+ 0.00134707*m.x272 + 0.00328024*m.x273 - 0.00258347*m.x274 + 0.000703909*m.x275
- 0.00197849*m.x276 + 0.00192358*m.x277 + 0.00996959*m.x278 + 0.00858445*m.x279
+ 0.00237901*m.x280 + 0.00880383*m.x281 + 0.00286914*m.x282 + 0.00026152*m.x283
+ 0.010098*m.x284 + 0.00162371*m.x285 + 0.00109136*m.x286 + 0.00784252*m.x287
+ 0.00882019*m.x288 + 0.000598062*m.x289 + 0.0052387*m.x290 + 0.00384785*m.x291
+ 0.00503042*m.x292 + 0.0024631*m.x293 + 0.00278995*m.x294 + 0.00074696*m.x295
+ 0.00338877*m.x296 - 0.000458376*m.x297 + 0.00936944*m.x298 + 0.00321093*m.x299
+ 0.00494576*m.x300 - 0.00489219*m.x301 + 0.00406797*m.x302 + 0.00689176*m.x303 == 0)
m.c212 = Constraint(expr= - m.x107 - 0.00125222*m.x204 + 0.0135627*m.x205 + 0.000108892*m.x206 + 0.00304915*m.x207
+ 0.00244207*m.x208 + 0.328857*m.x209 + 0.00193872*m.x210 + 0.00238388*m.x211
+ 0.00483704*m.x212 - 0.00180415*m.x213 + 0.00505025*m.x214 + 0.00312816*m.x215
+ 0.00975333*m.x216 + 0.00807353*m.x217 - 0.00043798*m.x218 + 0.0119386*m.x219
+ 0.00270366*m.x220 - 0.000150751*m.x221 - 0.00500595*m.x222 + 0.00148923*m.x223
- 0.00153303*m.x224 + 0.000308528*m.x225 + 0.00379536*m.x226 + 0.00253802*m.x227
+ 0.00318827*m.x228 + 0.00415587*m.x229 + 0.0068299*m.x230 + 0.00946803*m.x231
+ 0.0152758*m.x232 + 0.00176378*m.x233 + 0.0034359*m.x234 + 0.00304328*m.x235
- 0.0024897*m.x236 + 0.0154113*m.x237 + 0.00423352*m.x238 + 0.00420821*m.x239
+ 0.00164738*m.x240 + 0.00313153*m.x241 + 0.000192806*m.x242 + 0.00566827*m.x243
- 0.00114024*m.x244 + 0.0104584*m.x245 - 0.00156619*m.x246 + 0.00492319*m.x247
+ 0.00205128*m.x248 + 0.0034346*m.x249 + 0.00340814*m.x250 + 0.00910415*m.x251
+ 0.00203269*m.x252 - 0.000620515*m.x253 + 0.00733867*m.x254 - 0.00376408*m.x255
+ 0.00661222*m.x256 + 0.000748379*m.x257 - 0.000634982*m.x258 + 0.00346952*m.x259
+ 0.00833195*m.x260 - 0.00565329*m.x261 + 0.00331464*m.x262 + 0.00521247*m.x263
+ 0.00319873*m.x264 + 0.00684361*m.x265 + 0.00146666*m.x266 - 0.00449445*m.x267
+ 0.00890815*m.x268 + 0.00402867*m.x269 - 0.00123966*m.x270 + 0.00107871*m.x271
+ 0.00207255*m.x272 + 0.00348354*m.x273 + 0.00923163*m.x274 + 0.000624611*m.x275
+ 0.00416771*m.x276 - 0.00784802*m.x277 + 0.000769801*m.x278 + 0.00337936*m.x279
+ 0.00116986*m.x280 + 0.00315331*m.x281 - 0.00601197*m.x282 - 0.00277281*m.x283
+ 0.00597799*m.x284 + 0.00573361*m.x285 + 0.0187193*m.x286 + 0.00612747*m.x287
+ 0.00350368*m.x288 + 0.000722427*m.x289 - 0.00222972*m.x290 + 0.00618374*m.x291
+ 0.00230392*m.x292 - 8.26039E-6*m.x293 + 0.00470948*m.x294 + 0.00203412*m.x295
+ 0.000810064*m.x296 - 0.00836969*m.x297 + 0.000538622*m.x298 + 0.00916125*m.x299
+ 0.00409619*m.x300 + 0.0632661*m.x301 + 0.00174019*m.x302 + 0.0149843*m.x303 == 0)
m.c213 = Constraint(expr= - m.x108 - 0.00392467*m.x204 + 0.005177*m.x205 - 0.00393381*m.x206 + 0.00265026*m.x207
+ 0.00307695*m.x208 + 0.00193872*m.x209 + 0.0355431*m.x210 - 0.00186104*m.x211
+ 0.00855968*m.x212 - 0.00115433*m.x213 + 0.00384303*m.x214 + 0.000405725*m.x215
+ 0.0081813*m.x216 - 0.000678388*m.x217 + 0.00818477*m.x218 + 0.00310775*m.x219
| |
<filename>kickstart-menu/menu.py
#!/usr/bin/env python
# pylint: disable=too-many-ancestors
"""Menu system."""
import sys
import npyscreen
import classes
import datetime
import re
from kickstart import *
def str_ljust(_string):
"""Add padding to string."""
pad = 20
return str(_string.ljust(pad, ".") + ":")
def update_enabled_widget(widget):
"""Update."""
if widget.parent.enabled.value == [0]:
widget.parent.interface.editable = True
widget.parent.display()
else:
widget.parent.interface.editable = False
widget.parent.interface.value = None
widget.parent.display()
def update_bootproto_widget(widget):
"""Update."""
if widget.parent.bootproto.value == [1]:
widget.parent.ipaddress.editable = False
widget.parent.ipaddress.hidden = True
widget.parent.ipaddress.color = 'NO_EDIT'
widget.parent.netmask.editable = False
widget.parent.netmask.hidden = True
widget.parent.netmask.color = 'NO_EDIT'
widget.parent.display()
else:
widget.parent.ipaddress.editable = True
widget.parent.ipaddress.hidden = False
widget.parent.ipaddress.color = 'DEFAULT'
widget.parent.netmask.editable = True
widget.parent.netmask.hidden = False
widget.parent.netmask.color = 'DEFAULT'
widget.parent.display()
# pylint: disable=too-many-instance-attributes
class menuSystem(npyscreen.NPSAppManaged):
""" All Forms registered with an NPSAppManaged instance can access the
controlling application as self.parentApp.
"""
def calculate_menu_height(self):
"""Calculate menu height for wid2et."""
return max(2, len(self.host.interfaces))
# pylint: disable=attribute-defined-outside-init
def onStart(self):
"""Register all forms for application."""
self.begin_at = 25
self.bootproto = ["static", "dhcp"]
self.teaming = ['yes', 'no']
self.host = classes.Host()
self.network_pxe = classes.PXENetwork()
self.network_cluster = classes.ClusterNetwork()
self.network_trust = classes.Network()
self.network_untrust = classes.Network()
self.network_passive = classes.Network()
self.storage_os = classes.Storage(mountpoint="/")
self.storage_fast = classes.Storage(mountpoint="/var/EDCOP/fast")
self.storage_bulk = classes.Storage(mountpoint="/var/EDCOP/bulk")
self.storage_shared = classes.Storage(mountpoint="/var/EDCOP/shared")
self.addForm("MAIN", MainForm)
self.addForm("HOSTNAME", HostEditForm)
self.addForm("NETWORKSELECT", NetworkSelectForm)
self.addForm("NETWORKPXE", PXENetForm)
self.addForm("NETWORKCLUSTER", ClusterNetForm)
self.addForm("NETWORKTRUST", NetworkEditForm,
network=self.network_trust, name="Trust (LAN)")
self.addForm("NETWORKUNTRUST", NetworkEditForm,
network=self.network_untrust, name="Untrust (WAN)")
self.addForm("NETWORKPASSIVE", NetworkEditForm,
network=self.network_passive, name="Passive")
self.addForm("STORAGESELECT", StorageSelectForm)
self.addForm("STORAGEOS", StorageEditForm, storage=self.storage_os, name="EDCOP OS")
self.addForm("STORAGEFAST", StorageEditForm, storage=self.storage_fast, name="Fast")
self.addForm("STORAGEBULK", StorageEditForm, storage=self.storage_bulk, name="Bulk")
self.addForm("STORAGESHARED", StorageEditForm, storage=self.storage_shared, name="Shared")
# pylint: disable=too-many-instance-attributes
class MainMenuWidget(npyscreen.MultiLineAction):
"""Display main menu."""
def __init__(self, *args, **keywords):
"""Init."""
super(MainMenuWidget, self).__init__(*args, **keywords)
self.menu_hostname = "Set Hostname"
self.menu_network = "Configure Network"
self.menu_storage = "Configure Storage"
self.values = [self.menu_hostname,
self.menu_network,
self.menu_storage]
self.max_height = len(self.values) + 1
# pylint: disable=invalid-name
def actionHighlighted(self, act_on_this, key_press):
"""Select form."""
if act_on_this == self.menu_hostname:
self.parent.parentApp.switchForm("HOSTNAME")
if act_on_this == self.menu_network:
self.parent.parentApp.switchForm("NETWORKSELECT")
if act_on_this == self.menu_storage:
self.parent.parentApp.switchForm("STORAGESELECT")
# pylint: disable=too-many-instance-attributes
class NetworkMenuWidget(npyscreen.MultiLineAction):
"""Display main menu for networks """
def __init__(self, *args, **keywords):
""" Initalize form
Note: inline trust, inline untrust, and passive networks are currently disabled
"""
super(NetworkMenuWidget, self).__init__(*args, **keywords)
self.menu_pxe = "PXE Network"
self.menu_cluster = "Cluster Network"
# self.menu_trust = "Inline-Trust (LAN) Network"
# self.menu_untrust = "Inline-UnTrust (WAN) Network"
# self.menu_passive = "Passive Network"
self.values = [self.menu_pxe,
self.menu_cluster]
self.max_height = len(self.values) + 1
# pylint: disable=invalid-name
def actionHighlighted(self, act_on_this, key_press):
"""Select form."""
if act_on_this == self.menu_pxe:
self.parent.parentApp.switchForm("NETWORKPXE")
if act_on_this == self.menu_cluster:
self.parent.parentApp.switchForm("NETWORKCLUSTER")
#if act_on_this == self.menu_trust:
# self.parent.parentApp.switchForm("NETWORKTRUST")
#if act_on_this == self.menu_untrust:
# self.parent.parentApp.switchForm("NETWORKUNTRUST")
#if act_on_this == self.menu_passive:
# self.parent.parentApp.switchForm("NETWORKPASSIVE")
# pylint: disable=too-many-instance-attributes
class StorageMenuWidget(npyscreen.MultiLineAction):
"""Display main menu."""
def __init__(self, *args, **keywords):
"""Init."""
super(StorageMenuWidget, self).__init__(*args, **keywords)
self.menu_os = "EDCOP OS"
self.menu_fast = "Local-Fast"
self.menu_bulk = "Local-Bulk"
self.menu_shared = "Shared"
self.values = [self.menu_os,
self.menu_fast,
self.menu_bulk,
self.menu_shared]
self.max_height = len(self.values) + 1
# pylint: disable=invalid-name
def actionHighlighted(self, act_on_this, key_press):
"""Select form."""
if act_on_this == self.menu_os:
self.parent.parentApp.switchForm("STORAGEOS")
if act_on_this == self.menu_fast:
self.parent.parentApp.switchForm("STORAGEFAST")
if act_on_this == self.menu_bulk:
self.parent.parentApp.switchForm("STORAGEBULK")
if act_on_this == self.menu_shared:
self.parent.parentApp.switchForm("STORAGESHARED")
class MainForm(npyscreen.ActionFormMinimal):
"""Home Screen."""
def create(self):
"""Run at instantiation."""
self.name = "EDCOP"
self.add(MainMenuWidget)
def on_ok(self):
"""Next."""
# Validate all forms have the minimum required data
hostnameComplete = False
clusterNetworkComplete = False
pxeNetworkComplete = False
storageComplete = False
incompleteForms = ""
""" Hostname Validation """
if(KICKSTART_MENU.host.name!=""):
hostnameComplete = True
else:
incompleteForms += "\nHostname"
""" PXE Network Validation """
if((KICKSTART_MENU.network_pxe.ip_address != "") and (KICKSTART_MENU.network_pxe.netmask != "") and (KICKSTART_MENU.network_pxe.interface != None)):
if((KICKSTART_MENU.network_pxe.bootproto == "dhcp") and (KICKSTART_MENU.network_pxe.dhcp_start != "") and (KICKSTART_MENU.network_pxe.dhcp_end != "")):
pxeNetworkComplete = True
elif(KICKSTART_MENU.network_pxe.bootproto == "static"):
pxeNetworkComplete = True
else:
incompleteForms += "\nPXE Network"
""" Cluster Network Valdiation """
if((KICKSTART_MENU.network_cluster.ip_address != "") and (KICKSTART_MENU.network_cluster.netmask != "") and (KICKSTART_MENU.network_cluster.interface != None)):
clusterNetworkComplete = True
else:
incompleteForms += "\nCluster Network"
""" Storage Validation """
if((KICKSTART_MENU.storage_os.mountpoint != "") and (KICKSTART_MENU.storage_os.disk != None)):
storageComplete = True
else:
incompleteForms += "\nStorage (EDCOP OS)"
# Raise an error to the user if they are missing data in any mandatory form
if ((hostnameComplete==True) and (clusterNetworkComplete==True) and (pxeNetworkComplete==True) and (storageComplete==True)):
try:
self.editing = False
self.parentApp.setNextForm(None)
except:
npyscreen.notify_confirm("Something went wrong. Please try again.", title="Error")
else:
formMessage = "There appears to be missing data on the following forms: \n \n \n" + incompleteForms
npyscreen.notify_confirm(formMessage, title="Error")
def exit_application(self):
self.editing = False
self.parentApp.setNextForm(None)
class NetworkSelectForm(npyscreen.ActionFormMinimal):
# Class for the form that has options for PXE, cluster, passive, etc sub-menus
"""Form."""
def create(self):
"""Run at instantiation."""
self.name = "EDCOP > Network"
self.add(NetworkMenuWidget)
def on_ok(self):
"""Next."""
self.parentApp.setNextForm("MAIN")
class StorageSelectForm(npyscreen.ActionFormMinimal):
"""Form."""
def create(self):
"""Run at instantiation."""
self.name = "EDCOP > Storage"
self.add(StorageMenuWidget)
def on_ok(self):
"""Next."""
self.parentApp.setNextForm("MAIN")
class HostEditForm(npyscreen.ActionFormV2):
"""Edit Hostname."""
def create(self):
"""Create method is called by the Form constructor.
It does nothing by default - it is there for you to override in subclasses,
but it is the best place to set up all the widgets on a Form. Expect this
method to be full of self.add(...) method calls, then!
"""
self.name = "Host configuration:"
self.hostname = self.add(npyscreen.TitleText, name="Hostname")
# pylint: disable=invalid-name
def beforeEditing(self):
"""Call before form is edited."""
self.hostname.value = self.parentApp.host.name
# pylint: disable=invalid-name
def afterEditing(self):
"""Call when the form is exited."""
self.parentApp.host.name = self.hostname.value
self.parentApp.switchFormPrevious()
def on_ok(self):
if (self.hostname.value != ""):
try:
self.parentApp.host.name = self.hostname.value
except:
npyscreen.notify_confirm("Something went wrong. Please check your hostname", title="Error")
else:
npyscreen.notify_confirm("You must enter a hostname.", title="Error")
class NetForm(npyscreen.ActionFormV2):
# Base Network Form class.
def create(self):
"""Create method is called by the Form constructor."""
self.begin_at = self.parentApp.begin_at
self.bootproto = self.add(npyscreen.TitleSelectOne,
name=str_ljust("Bootproto"),
begin_entry_at=self.begin_at,
max_height=3,
scroll_exit=True)
self.teaming = self.add(npyscreen.TitleSelectOne,
name=str_ljust("NIC Teaming"),
begin_entry_at=self.begin_at,
max_height=3,
scroll_exit=True)
self.interface = self.add(npyscreen.TitleMultiSelect,
name=str_ljust("Interface"),
begin_entry_at=self.begin_at,
#max_height=self.parentApp.calculate_menu_height,
max_height=8,
scroll_exit=True)
self.ipaddress = self.add(npyscreen.TitleText,
name=str_ljust("IP Address"),
begin_entry_at=self.begin_at)
self.netmask = self.add(npyscreen.TitleText,
name=str_ljust("Netmask"),
begin_entry_at=self.begin_at)
self.dhcp_start = self.add(npyscreen.TitleText,
name=str_ljust("DHCP start"),
begin_entry_at=self.begin_at)
self.dhcp_end = self.add(npyscreen.TitleText,
name=str_ljust("DHCP end"),
begin_entry_at=self.begin_at)
self.dns1 = self.add(npyscreen.TitleText,
name=str_ljust("Primary DNS"),
begin_entry_at=self.begin_at)
self.dns2 = self.add(npyscreen.TitleText,
name=str_ljust("Secondary DNS"),
begin_entry_at=self.begin_at)
self.gateway = self.add(npyscreen.TitleText,
name=str_ljust("Gateway"),
begin_entry_at=self.begin_at)
self.dhcp_start.hidden = True
self.dhcp_end.hidden = True
self.dns1.hidden = True
self.dns2.hidden = True
self.gateway.hidden = True
self.bootproto.values = ['static', 'dhcp']
self.teaming.values = ['yes', 'no']
#self.bootproto.value = 0
self.bootproto.value_changed_callback = update_bootproto_widget
def on_cancel(self):
"""Next."""
self.parentApp.switchFormPrevious()
# pylint: disable=too-many-instance-attributes
class PXENetForm(NetForm):
# PXE Network Form. Extends the NetForm class
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
def beforeEditing(self):
"""Call before form is edited."""
self.name = "EDCOP > Network > PXE"
self.network = self.parentApp.network_pxe
# combine interface with current operation state
interfaceState = curOperstate(self.parentApp.host.interfaces)
state = ['']*len(self.parentApp.host.interfaces)
for idx in range(len(self.parentApp.host.interfaces)):
state[idx] = self.parentApp.host.interfaces[idx] + " (" + interfaceState[idx] + ")"
self.interface.values = state
self.ipaddress.value = self.network.ip_address
self.netmask.value = self.network.netmask
self.dhcp_start.value = self.network.dhcp_start
self.dhcp_end.value = self.network.dhcp_end
self.dhcp_start.hidden = False
self.dhcp_end.hidden = False
self.teaming.hidden = True
def on_ok(self):
"""Save network information to object."""
errors = ''
try:
self.network.bootproto = self.parentApp.bootproto[self.bootproto.value[0]]
self.network.interface = self.parentApp.host.interfaces[self.interface.value[0]]
if (validateIP(self.ipaddress.value) == True):
self.network.ip_address = self.ipaddress.value
else:
errors += '\nIP Address'
if (validateNetmask(self.netmask.value) == True):
self.network.netmask = self.netmask.value
else:
errors += '\nNetmask'
self.network.network = networkID(self.network.ip_address, self.network.netmask)
self.network.dhcp_start = self.dhcp_start.value
self.network.dhcp_end = self.dhcp_end.value
# If there are no issues, jump to parent form, otherwise, alert so user can fix
if (errors == ''):
self.parentApp.switchFormPrevious()
else:
formMessage = "There appears to be missing or invalid data on the following fields: \n \n \n" + errors
npyscreen.notify_confirm(formMessage, title="Error")
except IndexError:
npyscreen.notify_confirm("Please select a valid interface", title="Error")
class ClusterNetForm(NetForm):
# Cluster network form. Extends the NetForm class
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
def beforeEditing(self):
"""Update values."""
self.name = "EDCOP > Network > Cluster"
self.network = self.parentApp.network_cluster
self.ipaddress.value = self.network.ip_address
# combine interface with current operation state
interfaceState = curOperstate(self.parentApp.host.interfaces)
state = ['']*len(self.parentApp.host.interfaces)
for idx in range(len(self.parentApp.host.interfaces)):
state[idx] = self.parentApp.host.interfaces[idx] + " (" + interfaceState[idx] + ")"
self.interface.values = state
self.ipaddress.value = self.network.ip_address
self.netmask.value = self.network.netmask
self.dns1.value = self.network.dns1
self.dns2.value = self.network.dns2
self.gateway.value = self.network.gateway
self.dns1.hidden = False
self.dns2.hidden = False
self.gateway.hidden = False
self.teaming.value = self.network.teaming
def on_ok(self):
"""Save network information to object."""
errors = ''
try:
interfaceList = []
for index in range(len(self.interface.value)):
interfaceList.append(self.parentApp.host.interfaces[self.interface.value[index]])
self.network.interface = interfaceList
self.network.bootproto = self.parentApp.bootproto[self.bootproto.value[0]]
self.network.teaming = self.parentApp.teaming[self.teaming.value[0]]
if (validateIP(self.ipaddress.value) == True):
self.network.ip_address = self.ipaddress.value
else:
errors += '\nIP Address'
if (validateNetmask(self.netmask.value) == True):
self.network.netmask = self.netmask.value
else:
errors += '\nNetmask'
self.network.network = networkID(self.network.ip_address, self.network.netmask)
| |
<reponame>etiennesky/ece2cmor3
import datetime
import json
import logging
import os
import re
import resource
import threading
import numpy
from ece2cmor3 import cmor_target, cmor_source, cmor_task, cmor_utils, grib_file, cdoapi
# Log object.
log = logging.getLogger(__name__)
gridpoint_files = {}
spectral_files = {}
ini_gridpoint_file = None
ini_spectral_file = None
temp_dir = None
accum_key = "ACCUMFLD"
accum_codes = []
varsfreq = {}
spvar = None
fxvars = []
starttimes = {}
# Initializes the module, looks up previous month files and inspects the first
# day in the input files to set up an administration of the fields.
def update_sp_key(fname):
global spvar
for key in varsfreq:
freq = varsfreq[key]
if key[0] == 154:
if spvar is None or spvar[1] >= freq:
spvar = (154, freq, fname)
if key[0] == 134:
if spvar is None or spvar[1] > freq:
spvar = (134, freq, fname)
def initialize(gpfiles, shfiles, tmpdir, ini_gpfile=None, ini_shfile=None):
global gridpoint_files, spectral_files, ini_gridpoint_file, ini_spectral_file, temp_dir, varsfreq, accum_codes
grib_file.initialize()
gridpoint_files = {d: (get_prev_file(gpfiles[d]), gpfiles[d]) for d in gpfiles.keys()}
spectral_files = {d: (get_prev_file(shfiles[d]), shfiles[d]) for d in shfiles.keys()}
ini_gridpoint_file, ini_spectral_file = ini_gpfile, ini_shfile
temp_dir = tmpdir
accum_codes = load_accum_codes(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources", "grib_codes.json"))
gpdate = sorted(gridpoint_files.keys())[0] if any(gridpoint_files) else None
shdate = sorted(spectral_files.keys())[0] if any(spectral_files) else None
gpfile = gridpoint_files[gpdate][1] if any(gridpoint_files) else None
shfile = spectral_files[shdate][1] if any(spectral_files) else None
if gpfile is not None:
with open(gpfile) as gpf:
varsfreq.update(inspect_day(grib_file.create_grib_file(gpf), grid=cmor_source.ifs_grid.point))
update_sp_key(gpfile)
if shfile is not None:
with open(shfile) as shf:
varsfreq.update(inspect_day(grib_file.create_grib_file(shf), grid=cmor_source.ifs_grid.spec))
update_sp_key(shfile)
if ini_gpfile is not None:
with open(ini_gpfile) as gpf:
fxvars.extend(inspect_hr(grib_file.create_grib_file(gpf), grid=cmor_source.ifs_grid.point))
if ini_shfile is not None:
with open(ini_shfile) as shf:
fxvars.extend(inspect_hr(grib_file.create_grib_file(shf), grid=cmor_source.ifs_grid.spec))
# Function reading the file with grib-codes of accumulated fields
def load_accum_codes(path):
global accum_key
data = json.loads(open(path).read())
if accum_key in data:
return map(grib_tuple_from_string, data[accum_key])
else:
return []
# Utility to make grib tuple of codes from string
def grib_tuple_from_string(s):
codes = s.split('.')
return int(codes[0]), 128 if len(codes) < 2 else int(codes[1])
# Utility to make grib tuple of codes from string
def grib_tuple_from_int(i):
if i < 256:
return i, 128
return i % 10 ** 3, i / 10 ** 3
# Inspects a single time point in the initial file
def inspect_hr(gribfile, grid):
result = []
while gribfile.read_next(headers_only=True):
result.append(get_record_key(gribfile, grid) + (grid,))
return result
# Inspects the first 24 hours in the input gridpoint and spectral files.
def inspect_day(gribfile, grid):
inidate, initime = -99, -1
records = {}
while gribfile.read_next(headers_only=True):
date = gribfile.get_field(grib_file.date_key)
time = gribfile.get_field(grib_file.time_key) / 100
if date == inidate + 1 and time == initime:
break
if inidate < 0:
inidate = date
if initime < 0:
initime = time
key = get_record_key(gribfile, grid) + (grid,)
if key in records:
if time not in records[key]:
records[key].append(time)
else:
records[key] = [time]
gribfile.release()
result = {}
for key, val in records.iteritems():
hrs = numpy.array(val)
if len(hrs) == 1:
log.warning("Variable %d.%d on level %d of type %d has been detected once in first day "
"of file %s... Assuming daily frequency" % (key[0], key[1], key[3], key[2],
gribfile.file_object.name))
frqs = numpy.array([24])
else:
frqs = numpy.mod(hrs[1:] - hrs[:-1], numpy.repeat(24, len(hrs) - 1))
frq = frqs[0]
if any(frqs != frq):
log.error("Variable %d.%d on level %d of type %d is not output on regular "
"intervals in first day in file %s" % (key[0], key[1], key[3], key[2], gribfile.file_object.name))
else:
result[key] = frq
return result
# TODO: Merge the 2 functions below into one matching function:
# Creates a key (code + table + level type + level) for a grib message iterator
def get_record_key(gribfile, gridtype):
codevar, codetab = grib_tuple_from_int(gribfile.get_field(grib_file.param_key))
levtype, level = gribfile.get_field(grib_file.levtype_key), gribfile.get_field(grib_file.level_key)
if levtype == grib_file.pressure_level_hPa_code:
level *= 100
levtype = grib_file.pressure_level_Pa_code
if levtype == 112 or levtype == grib_file.depth_level_code:
level = 0
levtype = grib_file.depth_level_code
if codevar in [49, 165, 166]:
level = 10
levtype = grib_file.height_level_code
if codevar in [167, 168, 201, 202]:
level = 2
levtype = grib_file.height_level_code
if codevar == 9:
level = 0
levtype = grib_file.surface_level_code
if levtype == grib_file.pv_level_code: # Mapping pv-levels to surface: we don't support more than one pv-level
level = 0
levtype = grib_file.surface_level_code
# Fix for spectral height level fields in gridpoint file:
if cmor_source.grib_code(codevar) in cmor_source.ifs_source.grib_codes_sh and \
gridtype != cmor_source.ifs_grid.spec and \
levtype == grib_file.hybrid_level_code:
levtype = grib_file.height_level_code
return codevar, codetab, levtype, level
# Used to distribute keys created above over cmor tasks
def soft_match_key(varid, tabid, levtype, level, gridtype, keys):
if (varid, tabid, levtype, level, gridtype) in keys:
return varid, tabid, levtype, level, gridtype
# Fix for orog and ps: find them in either GG or SH file
if varid in [134, 129] and tabid == 128 and levtype == grib_file.surface_level_code and level == 0:
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.surface_level_code]
if any(matches):
return matches[0]
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.hybrid_level_code and
k[3] == 0]
if any(matches):
return matches[0]
# Fix for depth levels variables
if levtype == grib_file.depth_level_code:
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.depth_level_code]
if any(matches):
return matches[0]
if levtype == grib_file.hybrid_level_code and level == -1:
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.hybrid_level_code and
k[4] == gridtype]
if any(matches):
return matches[0]
# Fix for spectral fields at height levels being written as model level fields in GG file
if levtype == grib_file.height_level_code and gridtype == cmor_source.ifs_grid.spec:
matches = [k for k in keys if k[:4] == (varid, tabid, grib_file.height_level_code, level)]
if any(matches):
return matches[0]
return None
# Converts cmor-levels to grib levels code
def get_levels(task, code):
global log
# Special cases
if code.tab_id == 128:
gc = code.var_id
if gc in [9, 134]:
return grib_file.surface_level_code, [0]
if gc in [35, 36, 37, 38, 39, 40, 41, 42, 139, 170, 183, 236]:
return grib_file.depth_level_code, [0]
if gc in [49, 165, 166]:
return grib_file.height_level_code, [10]
if gc in [167, 168, 201, 202]:
return grib_file.height_level_code, [2]
# Normal cases
zaxis, levels = cmor_target.get_z_axis(task.target)
if zaxis is None:
return grib_file.surface_level_code, [0]
if zaxis in ["sdepth"]:
return grib_file.depth_level_code, [0]
if zaxis in ["alevel", "alevhalf"]:
return grib_file.hybrid_level_code, [-1]
if zaxis == "air_pressure":
return grib_file.pressure_level_Pa_code, [int(float(level)) for level in levels]
if zaxis in ["height", "altitude"]:
return grib_file.height_level_code, [int(float(level)) for level in levels] # TODO: What about decimal places?
log.error("Could not convert vertical axis type %s to grib vertical coordinate "
"code for %s" % (zaxis, task.target.variable))
return -1, []
# Searches the file system for the previous month file, necessary for the 0-hour
# fields.
def get_prev_file(grb_file):
fname = os.path.basename(grb_file)
exp, year, mon = fname[5:9], int(fname[10:14]), int(fname[14:16])
if mon == 1:
prev_year, prev_mon = year - 1, 12
else:
prev_year, prev_mon = year, mon - 1
output_dir = os.path.abspath(os.path.join(os.path.dirname(grb_file), ".."))
output_files = cmor_utils.find_ifs_output(output_dir, exp)
ini_path = None
for output_path in output_files:
output_name = os.path.basename(output_path)
if output_name == fname[:9] + "+000000":
ini_path = output_path
if output_name[:10] == fname[:10] and int(output_name[10:14]) == prev_year and \
int(output_name[14:]) == prev_mon:
log.info("Found previous month file for %s: %s" % (grb_file, output_path))
return output_path
ece_leg = os.path.split(os.path.dirname(grb_file))[-1]
if re.match(r"^0*\d1$", ece_leg): # First leg
if ini_path is None:
log.error("Previous month file for %s could not be found because the initial state file hasn't been found"
% grb_file)
else:
log.info("Assumed previous month file for %s: %s" % (grb_file, ini_path))
else:
if ini_path is None:
log.error("Previous month file for %s could not be found" % grb_file)
else:
log.error("Assumed previous month file for %s: %s, this is probably not correct!" % (grb_file, ini_path))
return ini_path
# Splits the grib file for the given set of tasks
def mkfname(key):
return '.'.join([str(key[0]), str(key[1]), str(key[2])])
# Construct files for keys and tasks
def cluster_files(valid_tasks, varstasks):
task2files, task2freqs = {}, {}
varsfx = set()
for task in valid_tasks:
task2files[task] = set()
task2freqs[task] = set()
for key, tsklist in varstasks.iteritems():
if task in tsklist:
task2files[task].add('.'.join([str(key[0]), str(key[1]), str(key[2])]))
if key[3] == -1:
task2freqs[task].update([varsfreq[k] for k in varsfreq.keys() if
(k[0], k[1], k[2]) == (key[0], key[1], key[2])])
else:
if key in varsfreq:
task2freqs[task].add(varsfreq[key])
elif key in fxvars:
varsfx.add(key)
for task, fnames in task2files.iteritems():
codes | |
"NT": 2552,
"▁vẹn": 2553,
"▁Lịch": 2554,
"chè": 2555,
"▁bụi": 2556,
"I": 2557,
"▁2013": 2558,
"6%": 2559,
"▁hẹp": 2560,
"▁khoáng": 2561,
"▁Champions": 2562,
"▁hung": 2563,
"▁6.": 2564,
"▁Olympic": 2565,
"ly": 2566,
"-3": 2567,
"se": 2568,
"ley": 2569,
"▁Barca": 2570,
"▁trọn": 2571,
"):": 2572,
"▁muộn": 2573,
"ha": 2574,
"▁HA": 2575,
"Y": 2576,
"▁Galaxy": 2577,
"▁Cha": 2578,
"▁chuột": 2579,
"▁rãi": 2580,
"-9": 2581,
"▁rũ": 2582,
"▁Phí": 2583,
"▁bọn": 2584,
"▁2010,": 2585,
"▁bông": 2586,
"▁khâu": 2587,
"GS": 2588,
"▁2009": 2589,
"▁Nghiên": 2590,
"▁Liverpool": 2591,
"net": 2592,
"▁Triệu": 2593,
"▁Úc": 2594,
"▁Đồ": 2595,
"▁Tho": 2596,
"▁đeo": 2597,
"▁Sử": 2598,
"▁01": 2599,
"▁chán": 2600,
"▁mm": 2601,
"▁mỏng": 2602,
"▁vội": 2603,
"ga": 2604,
"re": 2605,
"▁taxi": 2606,
"▁tú": 2607,
"▁Cai": 2608,
"▁Ta": 2609,
"▁mỡ": 2610,
"▁Lam": 2611,
"9%": 2612,
"▁150": 2613,
"3%": 2614,
"huyện": 2615,
"▁50%": 2616,
"Ch": 2617,
"▁nô": 2618,
"▁vitamin": 2619,
"▁Kiến": 2620,
"▁Đạo": 2621,
"▁thuần": 2622,
"▁hắn": 2623,
"ượt": 2624,
"ro": 2625,
"▁David": 2626,
"▁nghiện": 2627,
"▁ngựa": 2628,
"▁lão": 2629,
"▁Căn": 2630,
"ép": 2631,
"▁Nhận": 2632,
"▁go": 2633,
"▁gh": 2634,
"▁bọc": 2635,
"▁nhạy": 2636,
"▁London": 2637,
"▁rỡ": 2638,
"▁khứ": 2639,
"▁ngón": 2640,
"▁show": 2641,
"8%": 2642,
"▁Xem": 2643,
"▁KH": 2644,
"E": 2645,
"Ph": 2646,
"▁xoay": 2647,
"▁El": 2648,
"▁MT": 2649,
"-2": 2650,
"-5": 2651,
"▁Lo": 2652,
"nam": 2653,
"2%": 2654,
"▁MV": 2655,
"▁vươn": 2656,
"▁vang": 2657,
"phe": 2658,
"▁thâm": 2659,
"▁Vin": 2660,
"▁dọn": 2661,
"chi": 2662,
"▁Sư": 2663,
"2.": 2664,
"kho": 2665,
"▁quyến": 2666,
"▁Ly": 2667,
"▁Po": 2668,
"▁Lee": 2669,
"▁Sản": 2670,
"▁Vị": 2671,
"▁đắp": 2672,
"▁bì": 2673,
"▁Dưới": 2674,
"HQ": 2675,
"ov": 2676,
"▁tiễn": 2677,
"▁tha": 2678,
"20": 2679,
"▁Se": 2680,
"GL": 2681,
"▁ưa": 2682,
"▁May": 2683,
"ul": 2684,
"sin": 2685,
"▁nướng": 2686,
"▁mạn": 2687,
"▁MU": 2688,
"▁sáu": 2689,
"▁Ví": 2690,
"▁tháp": 2691,
"▁Cập": 2692,
"▁Mùa": 2693,
"▁ngu": 2694,
"▁thoái": 2695,
"▁album": 2696,
"▁Top": 2697,
"ka": 2698,
"rung": 2699,
"▁top": 2700,
"▁mày": 2701,
"▁e": 2702,
"▁Android": 2703,
"▁nhầm": 2704,
"▁Đơn": 2705,
"▁co": 2706,
"▁Xin": 2707,
"▁Go": 2708,
"ar": 2709,
"▁ngợi": 2710,
"▁Barcelona": 2711,
"▁Chuyện": 2712,
"▁NATO": 2713,
"/2014": 2714,
"▁Giới": 2715,
"CB": 2716,
"▁7.": 2717,
"▁Tô": 2718,
"▁Si": 2719,
"va": 2720,
"NTT": 2721,
"▁HS": 2722,
"▁Vận": 2723,
"▁thô": 2724,
"▁qué": 2725,
"▁hot": 2726,
"em": 2727,
"▁bổng": 2728,
"▁2014,": 2729,
"▁Games": 2730,
"-4": 2731,
"▁đệ": 2732,
"▁Hiệu": 2733,
"▁nu": 2734,
"▁2015.": 2735,
"▁In": 2736,
"▁rè": 2737,
"▁Cứ": 2738,
"-7": 2739,
"aceae": 2740,
"ce": 2741,
"▁Ge": 2742,
"hung": 2743,
"▁600": 2744,
"▁Moscow": 2745,
"▁Van": 2746,
"▁mọc": 2747,
"-6": 2748,
"▁nấm": 2749,
"kh": 2750,
"▁Argentina": 2751,
"&": 2752,
"▁Khách": 2753,
"con": 2754,
"▁bón": 2755,
"rong": 2756,
"▁dĩ": 2757,
"▁khớp": 2758,
"▁huyền": 2759,
"▁nụ": 2760,
"▁Quận": 2761,
"▁chan": 2762,
"▁Premier": 2763,
"▁chô": 2764,
"▁khống": 2765,
"▁Thật": 2766,
"am": 2767,
"▁Mục": 2768,
"▁kem": 2769,
"kg": 2770,
"▁mè": 2771,
"▁ong": 2772,
"▁Windows": 2773,
"li": 2774,
"▁tuệ": 2775,
"▁Mexico": 2776,
"▁khéo": 2777,
"▁xi": 2778,
"NH": 2779,
"QT": 2780,
"▁Cu": 2781,
"▁Mau": 2782,
"▁bỗng": 2783,
"▁Tí": 2784,
"BOT": 2785,
"▁euro": 2786,
"▁Ô": 2787,
"không": 2788,
"▁Kho": 2789,
"▁kí": 2790,
"▁Ga": 2791,
"2016": 2792,
"▁10.000": 2793,
"▁giãn": 2794,
"▁Chia": 2795,
"▁Lạc": 2796,
"▁li": 2797,
"▁nhẫn": 2798,
"NG": 2799,
"▁hang": 2800,
"1%": 2801,
"▁United": 2802,
"▁1,5": 2803,
"NV": 2804,
"▁rực": 2805,
"ca": 2806,
"▁Cam": 2807,
"-10": 2808,
"UB": 2809,
"▁lỏng": 2810,
"▁Daily": 2811,
"▁Viên": 2812,
"▁mí": 2813,
"ói": 2814,
"YT": 2815,
"ham": 2816,
"▁Microsoft": 2817,
"▁nảy": 2818,
"út": 2819,
"Bank": 2820,
"▁Đừng": 2821,
"▁10.": 2822,
"▁long": 2823,
"▁Thêm": 2824,
"-1": 2825,
"▁2.000": 2826,
"▁Câ": 2827,
"▁cò": 2828,
"▁Tôn": 2829,
"▁8.": 2830,
"▁lùi": 2831,
"▁trin": 2832,
"▁PC": 2833,
"▁Lạt": 2834,
"▁mờ": 2835,
"▁Ja": 2836,
"ão": 2837,
"▁voi": 2838,
"▁Yo": 2839,
"▁Can": 2840,
"rì": 2841,
"▁tò": 2842,
"▁dịu": 2843,
"▁CH": 2844,
"▁33": 2845,
"▁im": 2846,
"▁10%": 2847,
"▁Dịch": 2848,
"▁cài": 2849,
"▁đắc": 2850,
"▁He": 2851,
"NL": 2852,
"-8": 2853,
"▁Tên": 2854,
"▁bơm": 2855,
"ed": 2856,
"▁Bé": 2857,
"12": 2858,
"▁thánh": 2859,
"R": 2860,
"▁Rồi": 2861,
"▁website": 2862,
"Tr": 2863,
"▁120": 2864,
"▁37": 2865,
"▁Honda": 2866,
"▁đùa": 2867,
"▁TV": 2868,
"▁Ra": 2869,
"HCM": 2870,
"▁san": 2871,
"▁Tiếng": 2872,
"▁Pa": 2873,
"thu": 2874,
"▁trượt": 2875,
"mi": 2876,
"▁SEA": 2877,
"os": 2878,
"▁Jong": 2879,
"rò": 2880,
"NA": 2881,
"▁Vu": 2882,
"be": 2883,
"▁38": 2884,
"▁thoáng": 2885,
"▁9.": 2886,
"▁dừa": 2887,
"▁ré": 2888,
"w": 2889,
"ri": 2890,
"▁virus": 2891,
"▁Bán": 2892,
"▁Triển": 2893,
"▁đê": 2894,
"èn": 2895,
"vi": 2896,
"5%": 2897,
"▁Xuất": 2898,
"▁Myanmar": 2899,
"11": 2900,
"▁gối": 2901,
"▁20%": 2902,
"▁Manchester": 2903,
"▁Vấn": 2904,
"▁cai": 2905,
"▁Cuba": 2906,
"-11": 2907,
"▁bă": 2908,
"▁Lập": 2909,
"▁Ha": 2910,
"▁nhắm": 2911,
"▁phô": 2912,
"▁bảy": 2913,
"▁800": 2914,
"▁Chuyên": 2915,
"ịch": 2916,
"/2010": 2917,
"▁tím": 2918,
"▁Milan": 2919,
"▁chép": 2920,
"▁Nên": 2921,
"KT": 2922,
"ettel": 2923,
"▁34": 2924,
"▁ngơi": 2925,
"▁nhạt": 2926,
"Ho": 2927,
"▁Lá": 2928,
"▁30%": 2929,
"▁màng": 2930,
"ai": 2931,
"▁Tuyết": 2932,
"▁tẩy": 2933,
"▁Giả": 2934,
"▁Re": 2935,
"DL": 2936,
"▁2008": 2937,
"▁trội": 2938,
"CV": 2939,
"▁kha": 2940,
"▁Trẻ": 2941,
"▁Hay": 2942,
"ko": 2943,
"▁né": 2944,
"sk": 2945,
"ẳ": 2946,
"▁Sang": 2947,
"Ng": 2948,
"5.000": 2949,
"▁cm": 2950,
"%": 2951,
"2017": 2952,
"▁hiển": 2953,
"▁CN": 2954,
"▁Cậu": 2955,
"▁đớn": 2956,
"▁phà": 2957,
"▁Mình": 2958,
"▁2013,": 2959,
"▁ơi": 2960,
"mo": 2961,
'."': 2962,
"▁chăng": 2963,
"▁and": 2964,
"▁Chứng": 2965,
"Chúng": 2966,
"▁Chân": 2967,
"▁Há": 2968,
"▁Phá": 2969,
"▁si": 2970,
"by": 2971,
"▁Euro": 2972,
"45": 2973,
"▁cậy": 2974,
"▁internet": 2975,
"iếc": 2976,
"▁Quyền": 2977,
"▁tour": 2978,
"▁III": 2979,
"go": 2980,
"▁vạn": 2981,
"ry": 2982,
"▁mă": 2983,
"▁Hiển": 2984,
"HT": 2985,
"▁Me": 2986,
"▁hãi": 2987,
"GDP": 2988,
"▁Quả": 2989,
"▁Kỹ": 2990,
"▁khe": 2991,
"/2013": 2992,
"có": 2993,
"▁Thân": 2994,
"▁2009,": 2995,
"me": 2996,
"▁2014.": 2997,
"Ứ": 2998,
"▁Vy": 2999,
"▁Air": 3000,
"ke": 3001,
"▁2012,": 3002,
"▁2-3": 3003,
"▁Bayern": 3004,
"7%": 3005,
"▁thọ": 3006,
"▁2011,": 3007,
"▁700": 3008,
"▁chém": 3009,
"▁gũi": 3010,
"▁thầm": 3011,
"▁Tha": 3012,
"▁Phố": 3013,
"TM": 3014,
"-12": 3015,
"▁Thần": 3016,
"▁Tre": 3017,
"ki": 3018,
"▁HD": 3019,
"▁ti": 3020,
"ín": 3021,
"▁showbiz": 3022,
"▁Alex": 3023,
"▁Toyota": 3024,
"▁phiền": 3025,
"▁48": 3026,
"di": 3027,
"▁PT": 3028,
"▁Kong": 3029,
"▁Be": 3030,
"▁AT": 3031,
"▁mộng": 3032,
"▁Tác": 3033,
"▁3.000": 3034,
"▁tuyết": 3035,
"▁HIV": 3036,
"é": 3037,
"▁lă": 3038,
"hi": 3039,
"▁Hong": 3040,
"▁55": 3041,
"▁AP": 3042,
"▁42": 3043,
"hang": 3044,
"▁lân": 3045,
"bi": 3046,
"land": 3047,
"BS": 3048,
"▁hé": 3049,
"▁APEC": 3050,
"▁mụn": 3051,
"▁châ": 3052,
"HN": 3053,
"der": 3054,
"▁5.000": 3055,
"▁Inter": 3056,
"▁chuộng": 3057,
"▁oan": 3058,
"▁Sức": 3059,
"▁Times": 3060,
"▁điên": 3061,
"▁Sun": 3062,
"▁Afghanistan": 3063,
"MT": 3064,
"▁James": 3065,
"▁Tiêu": 3066,
"▁Chá": 3067,
"▁Cửa": 3068,
"/2011": 3069,
"▁thấm": 3070,
"300": 3071,
"/12/": 3072,
"15": 3073,
"▁dán": 3074,
"▁Sá": 3075,
"▁Ya": 3076,
"▁Han": 3077,
"▁Italy": 3078,
"▁Bó": 3079,
"▁ló": 3080,
"il": 3081,
"▁đụng": 3082,
"▁chá": 3083,
"▁Hướng": 3084,
"CCC": 3085,
"▁Ne": 3086,
"▁ao": 3087,
"▁12.": 3088,
"▁Kiếm": 3089,
"hà": 3090,
"▁Thầy": 3091,
"▁Plus": 3092,
"▁39": 3093,
"▁đúc": 3094,
"▁gay": 3095,
"si": 3096,
"▁men": 3097,
"▁no": 3098,
"▁lát": 3099,
"▁Sen": 3100,
"▁Pro": 3101,
"▁nhị": 3102,
"▁09": 3103,
"CL": 3104,
"▁Ar": 3105,
"▁SH": 3106,
"▁Bây": 3107,
"▁Za": 3108,
"▁11.": 3109,
"▁tí": 3110,
"▁Thánh": 3111,
"▁ran": 3112,
"▁lõi": 3113,
"▁2019": 3114,
"/11/": 3115,
"▁Kon": 3116,
"▁Ben": 3117,
"▁phơi": 3118,
"▁Viễn": 3119,
"▁Ka": 3120,
"600": 3121,
"▁Michael": 3122,
"▁Airlines": 3123,
"▁Cảm": 3124,
"▁mười": 3125,
"J": 3126,
"▁100.000": 3127,
"▁Cố": 3128,
"Anh": 3129,
"▁Saudi": 3130,
"▁đằng": 3131,
"xit": 3132,
"▁Ngu": 3133,
"▁na": 3134,
"▁Miss": 3135,
"▁Mu": 3136,
"/10/": 3137,
"▁Cung": 3138,
"f": 3139,
"▁Sam": 3140,
"▁Đánh": 3141,
'▁("': 3142,
"ie": 3143,
"▁250": 3144,
"▁Loại": 3145,
"as": 3146,
"▁Italia": 3147,
"800": 3148,
"▁Nguyệt": 3149,
"bo": 3150,
"▁Clip": 3151,
"ve": 3152,
"▁Môn": 3153,
"▁Car": 3154,
"▁nha": 3155,
"▁Phủ": 3156,
"▁Chất": 3157,
"▁Twitter": 3158,
"hó": 3159,
"▁02": 3160,
"ine": 3161,
"▁Dùng": 3162,
"én": 3163,
"▁lé": 3164,
"▁Tokyo": 3165,
"▁65": 3166,
"TN": 3167,
"ura": 3168,
"▁Viết": 3169,
"▁Clinton": 3170,
"uyễn": 3171,
"xi": 3172,
"▁Shi": 3173,
"▁Grab": 3174,
"6.000": 3175,
"▁BS": 3176,
"▁Phân": 3177,
"▁California": 3178,
"▁lưỡi": 3179,
"21": 3180,
"Nam": 3181,
"ny": 3182,
"▁trăng": 3183,
"▁Don": 3184,
"▁thằng": 3185,
"▁Pe": 3186,
"▁thạch": 3187,
"In": 3188,
"GB": 3189,
'")': 3190,
"▁West": 3191,
| |
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfc
import lmfit
import logging
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.analysis.tools import data_manipulation as dm_tools
#################################
# Fitting Functions Library #
#################################
def RandomizedBenchmarkingDecay(numCliff, Amplitude, p, offset):
val = Amplitude * (p ** numCliff) + offset
return val
def DoubleExpDampOscFunc(t, tau_1, tau_2,
freq_1, freq_2,
phase_1, phase_2,
amp_1, amp_2, osc_offset):
cos_1 = amp_1 * (np.cos(2 * np.pi * freq_1 * t + phase_1)) * np.exp(-(t / tau_1))
cos_2 = amp_2 * (np.cos(2 * np.pi * freq_2 * t + phase_2)) * np.exp(-(t / tau_2))
return cos_1 + cos_2 + osc_offset
def double_RandomizedBenchmarkingDecay(numCliff, p, offset,
invert=1):
"""
A variety of the RB-curve that allows fitting both the inverting and
non-inverting exponential.
The amplitude of the decay curve is constrained to start at 0 or 1.
The offset is the common point both curves converge to.
pick invert to be 1 or 0
"""
# Inverting clifford curve
val_inv = (1 - offset) * (p ** numCliff) + offset
# flipping clifford curve
val_flip = -offset * (p ** numCliff) + offset
# Using invert as a boolean but not using if statement to allow for
# arrays to be input in the function
val = (1 - invert) * val_flip + invert * val_inv
return val
def LorentzFunc(f, amplitude, center, sigma):
val = amplitude / np.pi * (sigma / ((f - center) ** 2 + sigma ** 2))
return val
def Lorentzian(f, A, offset, f0, kappa):
val = offset + A / np.pi * (kappa / ((f - f0) ** 2 + kappa ** 2))
return val
def TwinLorentzFunc(f, A_gf_over_2, A, f0_gf_over_2, f0,
kappa_gf_over_2, kappa, background=0):
"""
Twin lorentz with background.
Args:
f (float): frequency sweep points in Hz
A (float): amplitude of the tallest/deepest Lorentzian structure
in the data
A_gf_over_2 (float): amplitude of the other Lorentzian structure in the
data; since this function is used for high power
qubit spectroscopy, this parameter refers to the
Lorentzian structure corresponding to the gf/2
transition
f0 (float): frequency of the tallest/deepest Lorentzian structure
in the data
f0_gf_over_2 (float): frequency of the other Lorentzian structure in the
data; since this function is used for high power
qubit spectroscopy, this parameter refers to the
Lorentzian structure corresponding to the gf/2
transition
kappa (float): kappa (FWHM) of the tallest/deepest Lorentzian structure
in the data
kappa_gf_over_2 (float): kappa (FWHM) of the other Lorentzian structure in
the data; since this function is used for high
power qubit spectroscopy, this parameter refers to
the Lorentzian structure corresponding to the gf/2
transition
background (float): background offset
"""
val = (A_gf_over_2 / np.pi * (kappa_gf_over_2 / ((f - f0_gf_over_2) ** 2 + kappa_gf_over_2 ** 2)) +
A / np.pi * (kappa / ((f - f0) ** 2 + kappa ** 2)) + background)
return val
def Qubit_dac_to_freq(dac_voltage, f_max, E_c,
dac_sweet_spot, V_per_phi0=None,
dac_flux_coefficient=None,
asymmetry=0, **kwargs):
'''
The cosine Arc model for uncalibrated flux for asymmetric qubit.
dac_voltage (V)
f_max (Hz): sweet-spot frequency of the qubit
E_c (Hz): charging energy of the qubit
V_per_phi0 (V): volt per phi0 (convert voltage to flux)
dac_sweet_spot (V): voltage at which the sweet-spot is found
asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2)),
'''
if V_per_phi0 is None and dac_flux_coefficient is None:
raise ValueError('Please specify "V_per_phi0".')
if dac_flux_coefficient is not None:
logging.warning('"dac_flux_coefficient" deprecated. Please use the '
'physically meaningful "V_per_phi0" instead.')
V_per_phi0 = np.pi / dac_flux_coefficient
qubit_freq = (f_max + E_c) * (
asymmetry ** 2 + (1 - asymmetry ** 2) *
np.cos(np.pi / V_per_phi0 *
(dac_voltage - dac_sweet_spot)) ** 2) ** 0.25 - E_c
return qubit_freq
def Resonator_dac_to_freq(dac_voltage, f_max_qubit, f_0_res,
E_c, dac_sweet_spot,
coupling, V_per_phi0=None,
dac_flux_coefficient=None,
asymmetry=0, **kwargs):
qubit_freq = Qubit_dac_to_freq(dac_voltage=dac_voltage, f_max=f_max_qubit, E_c=E_c,
dac_sweet_spot=dac_sweet_spot, V_per_phi0=V_per_phi0,
dac_flux_coefficient=dac_flux_coefficient,
asymmetry=asymmetry)
delta_qr = (qubit_freq - f_0_res)
lamb_shift = (coupling ** 2 / delta_qr)
resonator_freq = f_0_res - lamb_shift
return resonator_freq
def Qubit_dac_to_detun(dac_voltage, f_max, E_c, dac_sweet_spot, V_per_phi0,
asymmetry=0):
'''
The cosine Arc model for uncalibrated flux for asymmetric qubit.
dac_voltage (V)
f_max (Hz): sweet-spot frequency of the qubit
E_c (Hz): charging energy of the qubit
V_per_phi0 (V): volt per phi0 (convert voltage to flux)
dac_sweet_spot (V): voltage at which the sweet-spot is found
asymmetry (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2))
'''
return f_max - Qubit_dac_to_freq(dac_voltage,
f_max=f_max, E_c=E_c,
dac_sweet_spot=dac_sweet_spot,
V_per_phi0=V_per_phi0,
asymmetry=asymmetry)
def Qubit_freq_to_dac(frequency, f_max, E_c,
dac_sweet_spot, V_per_phi0=None,
dac_flux_coefficient=None, asymmetry=0,
branch='positive'):
'''
The cosine Arc model for uncalibrated flux for asymmetric qubit.
This function implements the inverse of "Qubit_dac_to_freq"
frequency (Hz)
f_max (Hz): sweet-spot frequency of the qubit
E_c (Hz): charging energy of the qubit
V_per_phi0 (V): volt per phi0 (convert voltage to flux)
asym (dimensionless asymmetry param) = abs((EJ1-EJ2)/(EJ1+EJ2))
dac_sweet_spot (V): voltage at which the sweet-spot is found
branch (enum: 'positive' 'negative')
'''
if V_per_phi0 is None and dac_flux_coefficient is None:
raise ValueError('Please specify "V_per_phi0".')
# asymm_term = (asymmetry**2 + (1-asymmetry**2))
# dac_term = np.arccos(((frequency+E_c)/((f_max+E_c) * asymm_term))**2)
dac_term = np.arccos(np.sqrt(
(((frequency + E_c) / (f_max + E_c)) ** 4 - asymmetry ** 2) /
(1 - asymmetry ** 2)))
if dac_flux_coefficient is not None:
logging.warning('"dac_flux_coefficient" deprecated. Please use the '
'physically meaningful "V_per_phi0" instead.')
V_per_phi0 = np.pi / dac_flux_coefficient
if branch == 'positive':
dac_voltage = dac_term * V_per_phi0 / np.pi + dac_sweet_spot
elif branch == 'negative':
dac_voltage = -dac_term * V_per_phi0 / np.pi + dac_sweet_spot
else:
raise ValueError('branch {} not recognized'.format(branch))
return dac_voltage
def Qubit_dac_sensitivity(dac_voltage, f_max: float, E_c: float,
dac_sweet_spot: float, V_per_phi0: float,
asymmetry: float = 0):
'''
Derivative of the qubit detuning vs dac at dac_voltage.
The returned quantity is "dfreq/dPhi (dac_voltage)"
'''
cos_term = np.cos(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot))
sin_term = np.sin(np.pi / V_per_phi0 * (dac_voltage - dac_sweet_spot))
return ((f_max + E_c) * (1 - asymmetry ** 2) * np.pi / (2 * V_per_phi0) *
cos_term * sin_term * (asymmetry ** 2 + (1 - asymmetry ** 2) *
cos_term ** 2) ** (-0.75))
def QubitFreqDac(dac_voltage, f_max, E_c,
dac_sweet_spot, dac_flux_coefficient, asymmetry=0):
logging.warning('deprecated, replace QubitFreqDac with Qubit_dac_to_freq')
return Qubit_dac_to_freq(dac_voltage, f_max, E_c,
dac_sweet_spot, dac_flux_coefficient, asymmetry)
def QubitFreqFlux(flux, f_max, E_c,
flux_zero, dac_offset=0):
'The cosine Arc model for calibrated flux.'
calculated_frequency = (f_max + E_c) * np.sqrt(np.abs(
np.cos(np.pi * (flux - dac_offset) / flux_zero))) - E_c
return calculated_frequency
def CosFunc(t, amplitude, frequency, phase, offset):
'''
parameters:
t, time in s
amplitude a.u.
frequency in Hz (f, not omega!)
phase in rad
offset a.u.
'''
return amplitude * np.cos(2 * np.pi * frequency * t + phase) + offset
def ExpDecayFunc(t, tau, amplitude, offset, n):
return amplitude * np.exp(-(t / tau) ** n) + offset
def idle_error_rate_exp_decay(N, N1, N2, A, offset):
"""
exponential decay consisting of two components
"""
return A * np.exp(-N / N1 - (N / N2) ** 2) + offset
def gain_corr_ExpDecayFunc(t, tau, amp, gc):
"""
Specific form of an exponential decay used for flux corrections.
Includes a "gain correction" parameter that is ignored when correcting
the distortions.
"""
y = gc * (1 + amp * np.exp(-t / tau))
return y
def gain_corr_double_ExpDecayFunc(t, tau_A, tau_B, amp_A, amp_B, gc):
"""
Specific form of an exponential decay used for flux corrections.
Includes a "gain correction" parameter that is ignored when correcting
the distortions.
"""
y = gc * (1 + amp_A * np.exp(-t / tau_A) + amp_B * np.exp(-t / tau_B))
return y
def ExpDampOscFunc(t, tau, n, frequency, phase, amplitude,
oscillation_offset, exponential_offset):
return amplitude * np.exp(-(t / tau) ** n) * (np.cos(
2 * np.pi * frequency * t + phase) + oscillation_offset) + exponential_offset
def GaussExpDampOscFunc(t, tau, tau_2, frequency, phase, amplitude,
oscillation_offset, exponential_offset):
return amplitude * np.exp(-(t / tau_2) ** 2 - (t / tau)) * (np.cos(
2 * np.pi * frequency * t + phase) + oscillation_offset) + exponential_offset
def ExpDampDblOscFunc(t, tau, n, freq_1, freq_2, phase_1, phase_2,
amp_1, amp_2,
osc_offset_1, osc_offset_2, exponential_offset):
'''
Exponential decay with double cosine modulation
'''
exp_decay = np.exp(-(t / tau) ** n)
cos_1 = (np.cos(
2 * np.pi * freq_1 * t + phase_1) + osc_offset_1)
cos_2 = (np.cos(
2 * np.pi * freq_2 * t + phase_2) + osc_offset_2)
return amp_1 * exp_decay * | |
print("entro al From2")
# Subquerie
print(Qffrom.clist)
print(Qffrom.id)
# WHERE
if isinstance(Qwhere, SWhere):
print("entro al Where")
for col in Qwhere.clist:
if isinstance(col, SWhereCond1):
print("Es where1")
print(col.conds)
# print(col.conds.param.opIzq.valor)
# print(col.conds.param.operador)
# print(col.conds.param.opDer.valor)
elif isinstance(col, SWhereCond2):
print("Es where2")
print(col.conds)
print(col.isnotNull)
elif isinstance(col, SWhereCond3):
print("Es where3")
print(col.conds)
print(col.directiva)
elif isinstance(col, SWhereCond4):
print("Es where4")
print(col.conds)
print(col.ffrom)
elif isinstance(col, SWhereCond5):
print("Es where5")
print(col.c1)
print(col.c2)
print(col.c3)
elif isinstance(col, SWhereCond6):
print("Es where6")
print(col.cols)
elif isinstance(col, SWhereCond7):
print("Es where7")
print(col.efunc)
print(col.qcols)
print(col.anyallsome)
print(col.operador)
elif isinstance(col, SWhereCond8):
print("Es where8")
print(col.qcols)
print(col.efunc)
elif isinstance(col, SWhereCond9):
print("Es where9")
print(col.between)
print(col.efunc)
print(col.efunc2)
else:
print("col")
print(col)
# GROUP BY
if isinstance(Qgroupby, SGroupBy):
print("entro al Group By")
for col in Qgroupby.slist:
if isinstance(col, SExpresion):
print("Agrupado por")
print(col.valor)
else:
print("Agrupado por")
print(col)
# HAVING
if isinstance(Qhaving, SHaving):
print("entro al Having")
print(Qhaving.efunc)
# ORDER BY
if isinstance(Qorderby, sOrderBy):
print("entro al Order By")
for col in Qorderby.slist:
if isinstance(col, SListOrderBy):
if col.ascdesc == False and col.firstlast == False:
print("OrderBy1")
print(col.listorder)
elif col.ascdesc == False and col.firstlast != False:
print("OrderBy2")
print(col.listorder)
print(col.firstlast)
elif col.ascdesc != False and col.firstlast == False:
print("OrderBy3")
print(col.listorder)
print(col.ascdesc)
elif col.ascdesc != False and col.firstlast != False:
print("OrderBy4")
print(col.listorder)
print(col.ascdesc)
print(col.firstlast)
# LIMIT
if isinstance(Qlimit, SLimit):
print("Entro a Limit")
if isinstance(Qlimit.limit, SExpresion):
print(Qlimit.limit.valor)
else:
print(Qlimit.limit)
if isinstance(Qlimit.offset, SExpresion):
print(Qlimit.offset.valor)
else:
print(Qlimit.offset)
print("Operador " + str(nodo.ope))
print("Query no 2")
if isinstance(nodo.query2, SQuery):
Qselect = nodo.query2.select
Qffrom = nodo.query2.ffrom
Qwhere = nodo.query2.where
Qgroupby = nodo.query2.groupby
Qhaving = nodo.query2.having
Qorderby = nodo.query2.orderby
Qlimit = nodo.query2.limit
# SELECT
if isinstance(Qselect, SSelectCols):
print("Entro a Select")
# Distinct
if Qselect.distinct != False:
print("Distinct True")
# Cantidad de columnas
if Qselect.cols == "*":
print("Todas las Columnas")
else:
print("Columnas Específicas")
for col in Qselect.cols:
##LISTAS
if isinstance(col.cols, SExpresion):
print("Expre")
print(col.cols.valor)
# print("Tipo")
# print(col.cols.tipo)
elif isinstance(col.cols, SOperacion):
print("Operación")
if isinstance(col.cols.opIzq, SExpresion):
print(col.cols.opIzq.valor)
print(col.cols.operador)
print(col.cols.opDer.valor)
##FUNCIONES DE AGREGACION
elif isinstance(col.cols, SFuncAgregacion):
print("Funcion Agregación:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("val")
print(col.cols.param.valor)
else:
print("val")
print(col.cols.param)
##FUNCIONES MATH
elif isinstance(col.cols, SFuncMath):
print("Funcion Math:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("param")
print(col.cols.param.valor)
else:
print("param")
print(col.cols.param)
elif isinstance(col.cols, SFuncMath2):
print("Funcion Math2:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
elif isinstance(col.cols, SFuncMathSimple):
print("Funcion MathSimple:")
print(col.cols.funcion)
##FUNCIONES TRIG
elif isinstance(col.cols, SFuncTrig):
print("Funcion Trig1:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("param")
print(col.cols.param.valor)
else:
print("param")
print(col.cols.param)
elif isinstance(col.cols, SFuncTrig2):
print("Funcion Trig2:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
##FUNCIONES BINARIAS
elif isinstance(col.cols, SFuncBinary):
print("Funcion Binaria1:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("param")
print(col.cols.param.valor)
else:
print("param")
print(col.cols.param)
elif isinstance(col.cols, SFuncBinary2):
print("Funcion Binaria2:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
elif isinstance(col.cols, SFuncBinary3):
print("Funcion Binaria3:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param.det)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.det)
print(col.cols.param2)
elif isinstance(col.cols, SFuncBinary4):
print("Funcion Binaria4:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
print(col.cols.param3.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
print(col.cols.param3)
# EXTRACT
elif isinstance(col.cols, SExtract):
print("Funcion Extract:")
if isinstance(col.cols.field, STipoDato):
print(col.cols.field.dato)
print(col.cols.field.tipo)
print(col.cols.field.cantidad)
print(col.cols.timestampstr)
elif isinstance(col.cols, SExtract2):
print("Funcion Extract2:")
if isinstance(col.cols.field, STipoDato):
print(col.cols.field)
print(col.cols.dtype)
if isinstance(col.cols.timestampstr, SExpresion):
print("param")
print(col.cols.timestampstr.valor)
# FUNCIONES DE FECHA
elif isinstance(col.cols, SSelectFunc):
print("Funcion getFecha:")
print(col.cols.id)
elif isinstance(col.cols, SFechaFunc):
print("Funcion Fecha:")
print(col.cols.param)
print(col.cols.param2)
elif isinstance(col.cols, SFechaFunc2):
print("Funcion Fecha2:")
print(col.cols.id)
print(col.cols.param)
print(col.cols.tipo)
print(col.cols.param2)
# CASE
elif isinstance(col.cols, SCase):
print("Funcion Case:")
if isinstance(col.cols.casos, SCaseList):
print(col.cols.casos.param)
print(col.cols.casos.param2)
print(col.cols.casos.clist)
elif isinstance(col.cols, SCaseElse):
print("Funcion CaseElse:")
if isinstance(col.cols.casos, SCaseList):
print(col.cols.casos.param)
print(col.cols.casos.param2)
print(col.cols.casos.clist)
print(col.cols.casoelse)
# OTRAS FUNCIONES
elif isinstance(col, SColumnasSubstr):
print("Funcion Substr:")
print(col.cols)
print(col.cols2)
print(col.cols3)
elif isinstance(col, SColumnasGreatest):
print("Funcion Greatest:")
print(col.cols)
elif isinstance(col.cols, SColumnasLeast):
print("Funcion Least:")
print(col.cols)
else:
print("Otro")
print(col.id)
print(col.cols)
# ALIAS
if col.id != False:
if isinstance(col.id, SExpresion):
print("Alias")
print(col.id.valor)
# FROM
if isinstance(Qffrom, SFrom):
print("entro al From")
for col in Qffrom.clist:
if isinstance(col, SAlias):
if col.alias == False:
print("id")
print(col.id)
else:
print("id/alias")
print(col.id)
print(col.alias)
elif isinstance(Qffrom, SFrom2):
print("entro al From2")
# Subquerie
print(Qffrom.clist)
print(Qffrom.id)
# WHERE
if isinstance(Qwhere, SWhere):
print("entro al Where")
for col in Qwhere.clist:
if isinstance(col, SWhereCond1):
print("Es where1")
print(col.conds)
# print(col.conds.param.opIzq.valor)
# print(col.conds.param.operador)
# print(col.conds.param.opDer.valor)
elif isinstance(col, SWhereCond2):
print("Es where2")
print(col.conds)
print(col.isnotNull)
elif isinstance(col, SWhereCond3):
print("Es where3")
print(col.conds)
print(col.directiva)
elif isinstance(col, SWhereCond4):
print("Es where4")
print(col.conds)
print(col.ffrom)
elif isinstance(col, SWhereCond5):
print("Es where5")
print(col.c1)
print(col.c2)
print(col.c3)
elif isinstance(col, SWhereCond6):
print("Es where6")
print(col.cols)
elif isinstance(col, SWhereCond7):
print("Es where7")
print(col.efunc)
print(col.qcols)
print(col.anyallsome)
print(col.operador)
elif isinstance(col, SWhereCond8):
print("Es where8")
print(col.qcols)
print(col.efunc)
elif isinstance(col, SWhereCond9):
print("Es where9")
print(col.between)
print(col.efunc)
print(col.efunc2)
else:
print("col")
print(col)
# GROUP BY
if isinstance(Qgroupby, SGroupBy):
print("entro al Group By")
for col in Qgroupby.slist:
if isinstance(col, SExpresion):
print("Agrupado por")
print(col.valor)
else:
print("Agrupado por")
print(col)
# HAVING
if isinstance(Qhaving, SHaving):
print("entro al Having")
print(Qhaving.efunc)
# ORDER BY
if isinstance(Qorderby, sOrderBy):
print("entro al Order By")
for col in Qorderby.slist:
if isinstance(col, SListOrderBy):
if col.ascdesc == False and col.firstlast == False:
print("OrderBy1")
print(col.listorder)
elif col.ascdesc == False and col.firstlast != False:
print("OrderBy2")
print(col.listorder)
print(col.firstlast)
elif col.ascdesc != False and col.firstlast == False:
print("OrderBy3")
print(col.listorder)
print(col.ascdesc)
elif col.ascdesc != False and col.firstlast != False:
print("OrderBy4")
print(col.listorder)
print(col.ascdesc)
print(col.firstlast)
# LIMIT
if isinstance(Qlimit, SLimit):
print("Entro a Limit")
if isinstance(Qlimit.limit, SExpresion):
print(Qlimit.limit.valor)
else:
print(Qlimit.limit)
if isinstance(Qlimit.offset, SExpresion):
print(Qlimit.offset.valor)
else:
print(Qlimit.offset)
for i in listaSemanticos:
print(i)
consola += i.descripcion + "\n"
return consola
def deleteBase(nodo, tablaSimbolos):
global consola
print("Delete Table-----------")
if nodo.listaWhere == False:
print("Sin Where")
else:
registros = jBase.extractTable(useActual, nodo.id)
actualizar = []
if registros != None:
tabla = tablaSimbolos.get(useActual).getTabla(nodo.id)
columnas = tabla.columnas
tupla = {"nombreC": [], "tipo": [], "valor": []}
nombres = []
valores = []
tipos = []
primary = []
llaves = []
for k in columnas:
tupla["nombreC"].append(columnas[k].nombre)
tupla["tipo"].append(columnas[k].tipo)
nombres.append(columnas[k].nombre)
tipos.append(columnas[k].tipo)
for r in registros:
for c in r:
tupla["valor"].append(c)
b = Interpreta_Expresion(nodo.listaWhere, tablaSimbolos, tupla)
tupla["valor"].clear()
if b.valor:
actualizar.append(r)
bandera1 = False
primary = tabla.get_pk_index()
for x in range(len(actualizar)):
for t in range(len(actualizar[x])):
for r in range(len(primary)):
if primary[r] == t:
llaves.append(actualizar[x][t])
rs = jBase.delete(useActual, tabla.nombre, llaves)
if rs == 0:
consola += "La columna con PK '%s' ha sido eliminada con éxito" % str(llaves) + "\n"
elif rs == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error al intentar eliminar la columna con PK '%s', Error en la operación" % (
str(llaves))))
elif rs == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error al intentar eliminar la columna con PK '%s', La base de datos '%s' no ha sido hallada" % (
str(llaves), useActual)))
elif rs == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error al intentar eliminar la columna con PK '%s', La tabla '%s' no ha sido hallada" % (
str(llaves), tabla.nombre)))
elif rs == 4:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error al intentar eliminar la columna con PK '%s', Llave primaria no encontrada" % (
str(llaves))))
llaves.clear()
def crearBase(nodo, tablaSimbolos):
val = nodo.id.valor
global consola
if nodo.replace == False and nodo.exists == False:
if nodo.owner == False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner == False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner.valor, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner.valor, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.replace != False and nodo.exists == False:
jBase.dropDatabase(val)
if nodo.owner == False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner == False and nodo.mode != False:
| |
<filename>Code/frontend.py
import streamlit as st
import pandas as pd
import hashlib
import mysql.connector as mysql
from streamlit import caching
import os
def make_hashes(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def check_hashes(password,hashed_text):
if make_hashes(password) == hashed_text:
return hashed_text
return False
# Database Details
sportify = {"database": "sportify_db"
,"account": "accounts_db"
,"admin": "administrator_db"
,"borrow": "borrowequip_db"
,"coach": "coach_db"
,"collegeEquip": "collegeequip_db"
,"collegeOrder": "collegeorders_db"
,"event": "events_db"
,"favourite": "favorite_db"
,"friend": "friend_db"
,"issue": "issueequip_db"
,"judge": "judges_db"
,"participate": "participation_db"
,"referee": "referee_db"
,"sports": "sports_db"
,"trains": "trains_db"
,"user": "user_db"
,"userEquip": "userequip_db"
,"userOrder": "userorders_db"
,"vendor": "vendor_db"
,"venue": "venue_db"
,"booking": "venuebooking_db"
}
# DB Management
db = mysql.connect(
host = "127.0.0.1",
user = "root",
passwd = "<PASSWORD>",
# passwd = os.environ.get("DB_PASSWORD"),
database = sportify["database"],
auth_plugin="mysql_native_password"
)
cursor = db.cursor()
# DB Functions
def create_usertable():
cursor.execute('CREATE TABLE IF NOT EXISTS accounts_db(Username TEXT, Account_Password TEXT, User_Type TEXT);')
def add_userdata(username,password,user_type):
cursor.execute('INSERT INTO accounts_db(Username, Account_Password, User_Type) VALUES (%s,%s,%s);',(username,password,user_type))
db.commit()
def login_user(username,password,user_type):
cursor.execute("SELECT * FROM accounts_db WHERE Username= %s AND Account_Password= %s AND User_type= %s;",(username,password,user_type))
data = cursor.fetchall()
return data
# All Queries
# Vendor
def VenderQueries(username):
#cursor.execute('CREATE TABLE IF NOT EXISTS vendor_db(Vendor_ID INT NOT NULL PRIMARY KEY AUTO_INCREMENT, Username TEXT, Shop_name VARCHAR(255) NOT NULL, Email VARCHAR(255) NOT NULL, Phone_number CHAR(15) NOT NULL, Equipments_repaired INT DEFAULT 0, Address VARCHAR(255) NOT NULL);')
cursor.execute("SELECT * FROM vendor_db WHERE username= %s",(username,))
profile_created = cursor.fetchall()
if(profile_created):
queries = ["View Profile", "Update Profile", "Check College Orders", "Check User Orders"]
else:
queries = ["Update Profile", "View Profile", "Check College Orders", "Check User Orders"]
query = st.selectbox("Query", queries)
if query == "Update Profile":
st.subheader(query)
shop_name = st.text_input("Shop Name", max_chars=255)
email = st.text_input("Email", max_chars=255)
phone = st.text_input("Phone", max_chars=15)
address = st.text_input("Address", max_chars=255)
if st.button("Update"):
if not profile_created:
cursor.execute('INSERT INTO vendor_db(Username, Shop_name, Email, Phone_number, Address) VALUES (%s,%s,%s,%s,%s)',(username,shop_name,email,phone,address))
db.commit()
st.success("You have successfully created a valid Account")
profile_created = True
else:
cursor.execute('UPDATE vendor_db SET Shop_name = %s, Email = %s, Phone_number = %s, Address = %s WHERE Username = %s',(shop_name,email,phone,address,username))
db.commit()
st.success("You have successfully updated your account")
elif query == "View Profile":
cursor.execute("SELECT * FROM vendor_db WHERE username= %s",(username,))
profile = cursor.fetchall()
st.text("Username: {}".format(profile[0][1]))
st.text("Shop Name: {}".format(profile[0][2]))
st.text("Email: {}".format(profile[0][3]))
st.text("Phone: {}".format(profile[0][4]))
st.text("Address: {}".format(profile[0][6]))
elif query == "Check College Orders":
order_type = st.selectbox("Type of Order", ["Buy Order", "Repair Order"])
if order_type == "Repair Order":
cursor.execute("SELECT Vendor_ID FROM vendor_db WHERE username= %s",(username,))
vendor_id = cursor.fetchall()[0][0]
cursor.execute("SELECT * FROM collegeorders_db WHERE Vendor_ID= {} AND Type= 'Repair'".format(vendor_id,))
buy_order = cursor.fetchall()
buy_order = pd.DataFrame(buy_order, columns=["Order_ID", "Type", "Order_Status", "Equipment_ID", "Vendor_ID"])
st.dataframe(buy_order)
selected_indices = st.multiselect('Select rows:', buy_order.Order_ID)
if st.button("Update Status"):
for index in selected_indices:
cursor.execute('UPDATE collegeorders_db SET Order_Status = "Complete" WHERE Equipment_ID = {}'.format(index))
db.commit()
cursor.execute('UPDATE collegeequip_db SET Ongoing_Repair = 0, Cond = "Good" WHERE Equipment_ID = {}'.format(index))
db.commit()
st.success("Successfully updated the order status")
elif order_type == "Buy Order":
cursor.execute("SELECT Vendor_ID FROM vendor_db WHERE username= %s",(username,))
vendor_id = cursor.fetchall()[0][0]
cursor.execute("SELECT * FROM collegeorders_db WHERE Vendor_ID= {} AND Type= 'Buy'".format(vendor_id,))
buy_order = cursor.fetchall()
buy_order = pd.DataFrame(buy_order, columns=["Order_ID", "Type", "Order_Status", "Equipment_ID", "Vendor_ID"])
st.dataframe(buy_order)
selected_indices = st.multiselect('Select rows:', buy_order.Order_ID)
if st.button("Update Status"):
for index in selected_indices:
cursor.execute('UPDATE collegeorders_db SET Order_Status = "Complete" WHERE Equipment_ID = {}'.format(index))
db.commit()
cursor.execute('UPDATE collegeequip_db SET Ongoing_Repair = 0, Cond = "Good" WHERE Equipment_ID = {}'.format(index))
db.commit()
st.success("Successfully updated the order status")
elif query == "Check User Orders":
order_type = st.selectbox("Type of Order", ["Buy Order", "Repair Order"])
if order_type == "Repair Order":
cursor.execute("SELECT Vendor_ID FROM vendor_db WHERE username= %s",(username,))
vendor_id = cursor.fetchall()[0][0]
cursor.execute("SELECT * FROM userorders_db WHERE Vendor_ID= {} AND Type= 'Repair'".format(vendor_id,))
buy_order = cursor.fetchall()
buy_order = pd.DataFrame(buy_order, columns=["Order_ID", "Type", "Order_Status", "User_ID", "Equipment_ID", "Vendor_ID"])
st.dataframe(buy_order)
selected_indices = st.multiselect('Select rows:', buy_order.Equipment_ID)
if st.button("Update Status"):
for index in selected_indices:
cursor.execute('UPDATE userorders_db SET Order_Status = "Complete" WHERE Equipment_ID = {}'.format(index))
db.commit()
cursor.execute('UPDATE userequip_db SET Ongoing_Repair = 0, Cond = "Good" WHERE Equipment_ID = {}'.format(index))
db.commit()
st.success("Successfully updated the order status")
elif order_type == "Buy Order":
cursor.execute("SELECT Vendor_ID FROM vendor_db WHERE username= %s",(username,))
vendor_id = cursor.fetchall()[0][0]
cursor.execute("SELECT * FROM userorders_db WHERE Vendor_ID= {} AND Type= 'Buy'".format(vendor_id,))
buy_order = cursor.fetchall()
buy_order = pd.DataFrame(buy_order, columns=["Order_ID", "Type", "Order_Status", "User_ID", "Equipment_ID", "Vendor_ID"])
st.dataframe(buy_order)
selected_indices = st.multiselect('Select rows:', buy_order.Equipment_ID)
if st.button("Update Status"):
for index in selected_indices:
cursor.execute('UPDATE userorders_db SET Order_Status = "Complete" WHERE Equipment_ID = {}'.format(index))
db.commit()
cursor.execute('UPDATE userequip_db SET Ongoing_Repair = 0, Cond = "Good" WHERE Equipment_ID = {}'.format(index))
db.commit()
st.success("Successfully updated the order status")
def Coach_Queries(username):
cursor.execute("SELECT * from coach_db WHERE username = %s;", (username,))
coach_profile_created = cursor.fetchall()
if(coach_profile_created):
queries = ["View Profile", "Update Profile", "Change Availability Status", "Check Current Students", "Check Feedbacks"]
else:
queries = ["Update Profile", "View Profile", "Change Availability Status", "Check Current Students", "Check Feedbacks"]
query = st.selectbox("Query", queries)
if query == "Update Profile":
st.subheader(query)
first_name = st.text_input("First Name", max_chars=9)
last_name = st.text_input("Last Name", max_chars=13)
gender = st.text_input("Gender", max_chars=6)
email = st.text_input("Email", max_chars=31)
phone_number = st.text_input("Phone Number", max_chars = 12)
date_of_birth = st.text_input("Date of Birth (YYYY-MM-DD)")
intial_availability_status = "Available"
specialize_sport_id = st.text_input("Specialization Sport ID")
if st.button("Update"):
if not coach_profile_created:
cursor.execute('INSERT INTO coach_db(First_Name, Last_Name, Gender, Email, Phone_number, Date_of_Birth, Availability_Status, Specialization_Sport_ID, username) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s);',(first_name, last_name, gender, email, phone_number, date_of_birth, intial_availability_status, specialize_sport_id, username))
db.commit()
st.success("You have successfully created a valid Account")
coach_profile_created = True
else:
cursor.execute('UPDATE coach_db SET First_Name = %s, Last_Name = %s, Gender = %s, Email = %s, Phone_number = %s, Date_of_Birth = %s, Specialization_Sport_ID = %s WHERE username = %s;',(first_name, last_name, gender, email, phone_number, date_of_birth, specialize_sport_id))
db.commit()
st.success("You have successfully updated your account")
elif query == "View Profile":
cursor.execute("SELECT cdb.username, cdb.First_Name, cdb.Last_Name, cdb.Gender, cdb.Email, cdb.Phone_number, cdb.Date_of_Birth, cdb.Availability_Status,sdb.Name as Sport_Name FROM sports_db sdb NATURAL JOIN (SELECT * FROM coach_db WHERE username = %s) as cdb WHERE sdb.Sport_ID = cdb.Specialization_Sport_ID;",(username,))
coach_profile = cursor.fetchall()
st.text("Username : {}".format(coach_profile[0][0]))
st.text("First Name: {}".format(coach_profile[0][1]))
st.text("Last Name: {}".format(coach_profile[0][2]))
st.text("Gender: {}".format(coach_profile[0][3]))
st.text("Email: {}".format(coach_profile[0][4]))
st.text("Phone_number: {}".format(coach_profile[0][5]))
st.text("Date of Birth : {}".format(coach_profile[0][6]))
st.text("Current Availability Status : {}".format(coach_profile[0][7]))
st.text("Specialized Sport : {}".format(coach_profile[0][8]))
elif query == "Change Availability Status":
st.subheader(query)
cursor.execute('SELECT * FROM coach_db WHERE username = %s;', (username,))
coach_record = cursor.fetchall()
coach_id = coach_record[0][0]
st.text("Your current Availability Status : {}".format(coach_record[0][7]))
choices = ['Available', 'Not Available']
choice_button = st.radio('Set availability status to :', choices)
if(st.button("Confirm Changes")):
if(choice_button == 'Available'):
cursor.execute("UPDATE coach_db SET Availability_Status = 'Available' WHERE Coach_ID = %s;", (coach_id,))
db.commit()
st.success("You availability status has been set to 'Available' :thumbsup:")
else:
cursor.execute("UPDATE coach_db SET Availability_Status = 'Not Available' WHERE Coach_ID = %s;", (coach_id,))
db.commit()
st.success("You availability status has been set to 'Not Available' :thumbsup:")
elif query == "Check Current Students":
st.subheader(query)
cursor.execute("SELECT * FROM coach_db where username = %s;", (username,))
coach_id = cursor.fetchall()[0][0]
cursor.execute("SELECT udb.First_Name, udb.Last_Name, udb.Email, udb.Phone_Number, udb.Date_of_Birth, udb.Gender, udb.Address, trains_db.Start_Date from user_db udb NATURAL JOIN trains_db WHERE trains_db.Coach_ID = %s;", (coach_id,))
current_students = cursor.fetchall()
current_students = pd.DataFrame(current_students, columns=["First Name", "Last Name", "Email", "Phone_Number", "Date_of_Birth", "=Gender", "Address", "Training Start Date"])
st.dataframe(current_students)
elif query == "Check Feedbacks":
st.subheader(query)
cursor.execute("SELECT * FROM coach_db where username = %s;", (username,))
coach_id = cursor.fetchall()[0][0]
cursor.execute("SELECT udb.First_Name, udb.Last_Name, udb.Gender, udb.Phone_Number, udb.Email, udb.Address, fbck.Feedback_Text FROM user_db udb NATURAL JOIN coachfeedback_db fbck where Coach_ID = %s;", (coach_id,))
feedbacks = cursor.fetchall()
feedbacks = pd.DataFrame(feedbacks, columns=["First Name", "Last Name", "Gender", "Phone_Number", "Email", "Address", "Feedback"])
st.dataframe(feedbacks)
def user_queries(username):
cursor.execute("SELECT * FROM user_db WHERE username= %s", (username,))
profile_created = cursor.fetchall()
if(profile_created):
queries = ["View Profile", "Update Profile", "Add An Event", "Choose An Event", "Place An order",
"Take College Inventory", "Borrow From Peers","Hire Coach"]
else:
queries = ["Update Profile","View Profile", "Add An Event", "Choose An Event", "Place An order",
"Take College Inventory", "Borrow From Peers","Hire Coach"]
query = st.selectbox('QUERY', queries)
if(query == "Update Profile"):
st.subheader(query)
first_name = st.text_input("First Name",max_chars=255)
last_name = st.text_input("last Name",max_chars=255)
email = st.text_input("Email",max_chars=255)
phone_number = st.text_input("Phone Number",max_chars=15)
dob = st.text_input('Date of Birth - format(YY_MM_DD)',max_chars=255)
gender = st.text_input("Gender",max_chars=255)
date_of_joining = st.text_input("Date of Joining - format(YY_MM_DD)",max_chars=255)
address = st.text_input("Address",max_chars=255)
if(st.button("Update")):
if not profile_created:
cursor.execute(
'INSERT INTO user_db (username, First_name, Last_name, Email, Phone_Number, Date_of_Birth,Gender,Date_of_Joining,Address) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)',
(username, first_name,last_name,email,phone_number,dob,gender,date_of_joining,address))
db.commit()
st.success("You have successfully created a valid Account")
profile_created = True
else:
cursor.execute(
'UPDATE user_db SET First_name = %s, Last_name = %s, Email = %s, Phone_Number = %s, Date_of_Birth = %s, Gender = %s, Date_of_Joining = %s, Address = %s WHERE Username = %s',
(first_name,last_name,email,phone_number,dob,gender,date_of_joining,address))
db.commit()
st.success("You have successfully updated your account")
if (query == "View Profile"):
st.subheader(query)
cursor.execute("SELECT * FROM user_db WHERE username= %s", (username,))
profile = cursor.fetchall()
st.text("User ID: {}".format(profile[0][0]))
st.text("First Name: {}".format(profile[0][1]))
st.text("Last Name: {}".format(profile[0][2]))
st.text("Email: {}".format(profile[0][3]))
st.text("Phone Number: {}".format(profile[0][4]))
st.text("Date of Birth: {}".format(profile[0][5]))
st.text("Gender: {}".format(profile[0][6]))
st.text("Date of Joining: {}".format(profile[0][7]))
st.text("Address: {}".format(profile[0][8]))
def check_venue_availability(start_datetime, end_datetime, venue_id):
query_input = (venue_id, start_datetime, start_datetime, end_datetime, end_datetime, start_datetime, end_datetime,)
cursor.execute(
"SELECT COUNT(*) FROM (SELECT * FROM venuebooking_db WHERE Venue_ID = %s AND ( (%s >= Start_DateTime AND %s <= End_DateTime) OR (%s >= Start_DateTime AND %s <= End_DateTime) OR (%s <= Start_DateTime AND %s >= End_DateTime))) as A;",
query_input)
l = cursor.fetchall()
cnt = l[0][0]
if (cnt == 0):
return True
else:
return False
if(query == "Add An Event"):
start_datetime = st.text_input("Start Datetime - format(YYYY-MM-DD 'space' HH:MM:SS)",max_chars=255)
end_datetime = st.text_input("End Datetime - format(YYYY-MM-DD 'space' HH:MM:SS)",max_chars=255)
event_name = st.text_input("Event Name", max_chars=255)
participant_limit = st.text_input("Participation Limit", max_chars=255)
# show all venue available.
cursor.execute("SELECT Venue_ID,Venue_Name,Sport_ID from venue_db")
temp = cursor.fetchall()
temp = pd.DataFrame(temp,columns=["Venue_ID","Venue_Name","Sport_ID"])
st.dataframe(temp)
venue_id = st.multiselect("Select rows: ",temp.Venue_ID)
if(st.button("NEXT1")):
venue_id = str(venue_id[0])
if(not check_venue_availability(start_datetime,end_datetime,venue_id)):
st.success("Sorry Venue not available in the given time slot")
else:
with st.spinner("TAKING DATA"):
cursor.execute("SELECT Sport_ID FROM venue_db WHERE Venue_ID = %s", (venue_id,))
sports_id = cursor.fetchall()[0][0]
cursor.execute("SElECT user_id FROM user_db WHERE username = | |
<filename>thermo/flash.py
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains classes and functions for performing flash calculations.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/thermo/>`_.
.. contents:: :local:
Main Interfaces
===============
Pure Components
---------------
.. autoclass:: FlashPureVLS
:show-inheritance:
:members: __init__
:exclude-members: __init__
Vapor-Liquid Systems
--------------------
.. autoclass:: FlashVL
:show-inheritance:
:members: __init__
:exclude-members: __init__
Vapor and Multiple Liquid Systems
---------------------------------
.. autoclass:: FlashVLN
:show-inheritance:
:members: __init__
:exclude-members: __init__
Base Flash Class
----------------
.. autoclass:: Flash
:show-inheritance:
:members: flash
:exclude-members:
Specific Flash Algorithms
=========================
It is recommended to use the Flash classes, which are designed to have generic
interfaces. The implemented specific flash algorithms may be changed in the
future, but reading their source code may be helpful for instructive purposes.
'''
# sequential_substitution_2P sequential_substitution_NP nonlin_equilibrium_NP nonlin_spec_NP nonlin_2P nonlin_2P_HSGUAbeta dew_bubble_newton_zs TPV_solve_HSGUA_1P
from __future__ import division
__all__ = ['sequential_substitution_2P', 'sequential_substitution_GDEM3_2P',
'dew_bubble_Michelsen_Mollerup', 'bubble_T_Michelsen_Mollerup',
'dew_T_Michelsen_Mollerup', 'bubble_P_Michelsen_Mollerup',
'dew_P_Michelsen_Mollerup',
'minimize_gibbs_2P_transformed', 'sequential_substitution_Mehra_2P',
'nonlin_2P', 'nonlin_n_2P', 'sequential_substitution_NP',
'minimize_gibbs_NP_transformed', 'FlashVL','FlashVLN', 'FlashPureVLS',
'TPV_HSGUA_guesses_1P_methods', 'TPV_solve_HSGUA_guesses_1P',
'sequential_substitution_2P_HSGUAbeta',
'sequential_substitution_2P_sat', 'TP_solve_VF_guesses',
'TPV_double_solve_1P', 'nonlin_2P_HSGUAbeta',
'sequential_substitution_2P_double',
'cm_flash_tol', 'nonlin_2P_newton', 'dew_bubble_newton_zs',
'existence_3P_Michelsen_Mollerup',
'SS_VF_simultaneous', 'stabiliy_iteration_Michelsen',
'assert_stab_success_2P', 'nonlin_equilibrium_NP',
'nonlin_spec_NP',
'TPV_solve_HSGUA_guesses_VL',
'solve_P_VF_IG_K_composition_independent',
'solve_T_VF_IG_K_composition_independent'
]
from fluids.constants import R, R2, R_inv
from fluids.numerics import (UnconvergedError, trunc_exp, newton,
brenth, secant, bisect,
ridder, broyden2,
numpy as np, linspace, assert_close, assert_close1d,
logspace, oscillation_checker, damping_maintain_sign,
oscillation_checking_wrapper, OscillationError,
NoSolutionError, NotBoundedError, jacobian,
best_bounding_bounds, isclose, newton_system,
make_damp_initial, newton_minimize,
root, minimize, fsolve)
from fluids.numerics import py_solve, trunc_log
from chemicals.utils import (exp, log, log10, floor, copysign, normalize,
mixing_simple, property_mass_to_molar, rho_to_Vm, Vm_to_rho)
from chemicals.heat_capacity import (Lastovka_Shaw_T_for_Hm, Dadgostar_Shaw_integral,
Dadgostar_Shaw_integral_over_T, Lastovka_Shaw_integral,
Lastovka_Shaw_integral_over_T)
from chemicals.rachford_rice import (flash_inner_loop, Rachford_Rice_solutionN,
Rachford_Rice_flash_error, Rachford_Rice_solution2, Rachford_Rice_solution_LN2)
from chemicals.phase_change import SMK
from chemicals.volume import COSTALD
from chemicals.flash_basic import flash_wilson, flash_Tb_Tc_Pc, flash_ideal
from chemicals.exceptions import TrivialSolutionError, PhaseCountReducedError, PhaseExistenceImpossible
from chemicals.iapws import iapws95_Psat, iapws95_Tsat, iapws95_rhog_sat, iapws95_rhol_sat, iapws95_Tc, iapws95_Pc, iapws95_MW, iapws95_T
from thermo.utils import has_matplotlib
from thermo.equilibrium import EquilibriumState
from thermo.phases import Phase, gas_phases, liquid_phases, solid_phases, CEOSLiquid, CEOSGas, CoolPropGas, CoolPropLiquid, CoolPropPhase, GibbsExcessLiquid, IdealGas, IAPWS95Liquid, IAPWS95Gas, IAPWS95
from thermo.phases import CPPQ_INPUTS, CPQT_INPUTS, CPrhoT_INPUTS, CPunknown, CPiDmolar
from thermo import phases
from thermo.phase_identification import identify_sort_phases
from thermo.bulk import default_settings
from thermo.eos_mix import VDWMIX, IGMIX
from thermo.property_package import StabilityTester
from thermo.coolprop import CPiP_min
CAS_H2O = '7732-18-5'
def sequential_substitution_2P(T, P, V, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
check_G=False, check_V=False, dZ_allow=0.1):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
err, err1, err2, err3 = 0.0, 0.0, 0.0, 0.0
G_old = None
V_over_F_old = V_over_F
restrained = 0
restrained_switch_count = 300
# Code for testing phis at zs
l, g = liquid_phase, gas_phase
if liquid_phase.T != T or liquid_phase.P != P:
liquid_phase = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
if gas_phase.T != T or gas_phase.P != P:
gas_phase = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
for iteration in range(maxiter):
# g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
# l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
# l = liquid_phase.to(xs, T=T, P=P, V=V)
# g = gas_phase.to(ys, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
lnphis_g = gas_phase.lnphis_at_zs(ys)
lnphis_l = liquid_phase.lnphis_at_zs(xs)
limited_Z = False
try:
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)
except OverflowError:
Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)
V_over_F_old = V_over_F
try:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
except Exception as e:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True)
# K_low, K_high = False, False
# for zi, Ki in zip(zs, Ks):
# if zi != 0.0:
# if Ki > 1.0:
# K_high = True
# else:
# K_low = True
# if K_high and K_low:
# break
# if not (K_high and K_low):
# raise TrivialSolutionError("Converged to trivial condition, all K same phase",
# comp_difference, iteration, err)
# else:
if check_G:
V_over_F_G = min(max(V_over_F_old, 0), 1)
G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()
print('new G', G, 'old G', G_old)
if G_old is not None:
if G > G_old:
step = .5
while G > G_old and step > 1e-4:
# ys_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(xs, xs_old)])
# xs_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(ys, ys_old)])
# ys_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])
# xs_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])
# g = gas_phase.to(ys_working, T=T, P=P, V=V)
# l = liquid_phase.to(xs_working, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
# try:
# Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
# except OverflowError:
# Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks_old, Ks)]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)
# V_over_F_G = min(max(V_over_F, 0), 1)
g = gas_phase.to(ys_new, T=T, P=P, V=V)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()
print('step', step, G, V_over_F, Ks)
step *= 0.5
# xs, ys = xs_working, ys_working
# print('Gibbs increased', G/G_old)
G_old = G
if check_V and iteration > 2:
big_Z_change = (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow)
if restrained <= restrained_switch_count and big_Z_change:
limited_Z = True
step = .5 #.5
while (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow ) and step > 1e-8:
# Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks, Ks_old)]
# Ks_working = [Ks[i]*(Ks_old[i]/Ks[i])**(1.0 - step) for i in cmps] # step = 0 - all new; step = 1 - all old
# Ks_working = [Ks_old[i]*(exp(lnphis_l[i])/exp(lnphis_g[i])/Ks_old[i])**(1.0 - step) for i in cmps]
ys_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])
xs_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])
# V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
g = gas_phase.to(ys_new, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
print('step', step, V_over_F, g.Z())
step *= 0.5
xs, ys = xs_new, ys_new
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
restrained += 1
elif restrained > restrained_switch_count and big_Z_change:
restrained = 0
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new)
for i in cmps:
xs_new[i] = abs(xs_new[i])*xs_new_sum_inv
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new)
for i in cmps:
ys_new[i] = abs(ys_new[i])*ys_new_sum_inv
break
# Calculate the error using the new Ks and old compositions
# Claimed error function in CONVENTIONAL AND RAPID FLASH
# CALCULATIONS FOR THE SOAVE-REDLICH-KWONG AND PENG-ROBINSON EQUATIONS OF STATE
err = 0.0
# Suggested tolerance 1e-15
try:
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
err = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
try:
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
pass
if err > 0.0 and err in (err1, err2, err3):
raise OscillationError("Converged to cycle in errors, no progress being made")
# Accept the new compositions
xs_old, ys_old, Ks_old = xs, ys, Ks
# if not limited_Z:
# assert xs == l.zs
# assert ys == g.zs
xs, ys = xs_new, ys_new
lnphis_g_old, lnphis_l_old = lnphis_g, | |
0x/0X, as with integer literals in code. Base 0 means to interpret exactly as a code literal, so that the actual base is 2, 8, 10, or 16, and so that int('010', 0) is not legal, while int('010') is, as well as int('010', 8).
isinstance(object, classinfo)
# Return True if the object argument is an instance of the classinfo argument, or of a (direct, indirect or virtual) subclass thereof. If object is not an object of the given type, the function always returns False. If classinfo is a tuple of type objects (or recursively, other such tuples), return True if object is an instance of any of the types. If classinfo is not a type or tuple of types and such tuples, a TypeError exception is raised.
issubclass(class, classinfo)
# Return True if class is a subclass (direct, indirect or virtual) of classinfo. A class is considered a subclass of itself. classinfo may be a tuple of class objects, in which case every entry in classinfo will be checked. In any other case, a TypeError exception is raised.
iter(object[, sentinel])
# Return an iterator object. The first argument is interpreted very differently depending on the presence of the second argument. Without a second argument, object must be a collection object which supports the iteration protocol (the __iter__() method), or it must support the sequence protocol (the __getitem__() method with integer arguments starting at 0). If it does not support either of those protocols, TypeError is raised. If the second argument, sentinel, is given, then object must be a callable object. The iterator created in this case will call object with no arguments for each call to its __next__() method; if the value returned is equal to sentinel, StopIteration will be raised, otherwise the value will be returned.
len(s)
# Return the length (the number of items) of an object. The argument may be a sequence (such as a string, bytes, tuple, list, or range) or a collection (such as a dictionary, set, or frozen set).
list([iterable])
# Rather than being a function, list is actually a mutable sequence type, as documented in Lists and Sequence Types — list, tuple, range.
map(function, iterable, ...)
# Return an iterator that applies function to every item of iterable, yielding the results. If additional iterable arguments are passed, function must take that many arguments and is applied to the items from all iterables in parallel. With multiple iterables, the iterator stops when the shortest iterable is exhausted. For cases where the function inputs are already arranged into argument tuples, see itertools.starmap().
max()
memoryview()
min()
next()
# Retrieve the next item from the iterator by calling its __next__() method. If default is given, it is returned if the iterator is exhausted, otherwise StopIteration is raised.
object()
# Return a new featureless object. object is a base for all classes. It has the methods that are common to all instances of Python classes. This function does not accept any arguments.
oct(x)
# Convert an integer number to an octal string prefixed with “0o”. The result is a valid Python expression. If x is not a Python int object, it has to define an __index__() method that returns an integer.
open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
# Open file and return a corresponding file object. If the file cannot be opened, an OSError is raised. See Reading and Writing Files for more examples of how to use this function.
# file is a path-like object giving the pathname (absolute or relative to the current working directory) of the file to be opened or an integer file descriptor of the file to be wrapped. (If a file descriptor is given, it is closed when the returned I/O object is closed, unless closefd is set to False.)
# mode is an optional string that specifies the mode in which the file is opened. It defaults to 'r' which means open for reading in text mode. Other common values are 'w' for writing (truncating the file if it already exists), 'x' for exclusive creation and 'a' for appending (which on some Unix systems, means that all writes append to the end of the file regardless of the current seek position). In text mode, if encoding is not specified the encoding used is platform dependent: locale.getpreferredencoding(False) is called to get the current locale encoding. (For reading and writing raw bytes use binary mode and leave encoding unspecified.)
ord(c)
# Given a string representing one Unicode character, return an integer representing the Unicode code point of that character. For example, ord('a') returns the integer 97 and ord('€') (Euro sign) returns 8364. This is the inverse of chr().
print()
# Print objects to the text stream file, separated by sep and followed by end. sep, end, file and flush, if present, must be given as keyword arguments.
property(fget=None, fset=None, fdel=None, doc=None)
# Return a property attribute. fget is a function for getting an attribute value. fset is a function for setting an attribute value. fdel is a function for deleting an attribute value. And doc creates a docstring for the attribute.
# @property
# def test():
# ...
# @test.getter | @test.setter | @test.deleter (put the new function unter that)
# @test.getter
# def test(self, name):
# self.name = name
range(start, stop[, step])
reversed(seq)
# Return a reverse iterator. seq must be an object which has a __reversed__() method or supports the sequence protocol (the __len__() method and the __getitem__() method with integer arguments starting at 0).
round(number[, ndigits])
# Return number rounded to ndigits precision after the decimal point. If ndigits is omitted or is None, it returns the nearest integer to its input.
set([iterable])
# Return a new set object, optionally with elements taken from iterable. set is a built-in class. See set and Set Types — set, frozenset for documentation about this class.
setattr(object, name, value)
# This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123.
slice(stop)
slice(start, stop, [step])
# Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used. For example: a[start:stop:step] or a[start:stop, i].
sorted(iterable, *, key=None, reverse=False)
# Return a new sorted list from the items in iterable. Has two optional arguments which must be specified as keyword arguments. key specifies a function of one argument that is used to extract a comparison key from each element in iterable (for example, key=str.lower). The default value is None (compare the elements directly). reverse is a boolean value. If set to True, then the list elements are sorted as if each comparison were reversed.
@staticmethod
# @staticmethod
# def f(arg1, arg2, ...): ...
# Transform a method into a static method.
str(object=b'', encoding='utf-8', errors='strict')
# Return a str version of object. See str() for details.
sum(iterable, /, start=0)
# Sums start and the items of an iterable from left to right and returns the total. The iterable’s items are normally numbers, and the start value is not allowed to be a string.
super([type[, object-or-type]])
# Return a proxy object that delegates method calls to a parent or sibling class of type. This is useful for accessing inherited methods that have been overridden in a class.
tuple([iterable])
# Rather than being a function, tuple is actually an immutable sequence type, as documented in Tuples and Sequence Types — list, tuple, range.
type(object)
type(name, bases, dict, **kwds)
# With one argument, return the type of an object. The return value is a type object and generally the same object as returned | |
0, "Inca: Map 29 (SW)", [], False, [], [], [], [], [], [], [], []],
75: [False, [72,99], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE)", [], False, [], [], [], [], [], [], [], []],
76: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (statue head)", [], False, [], [], [], [], [], [], [], []],
77: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (first area)", [3, 4], False, [], [], [], [], [], [], [], []],
78: [False, [77], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (second area)", [], False, [], [], [], [], [], [], [], []],
79: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 31", [], False, [], [], [], [], [], [], [], []],
80: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (entrance)", [], False, [], [], [], [], [], [], [], []],
81: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (behind statue)", [], False, [], [], [], [], [], [], [], []],
82: [False, [83], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (entrance)", [], False, [], [], [], [], [], [], [], []],
83: [False, [82], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (over ramp)", [], False, [], [], [], [], [], [], [], []], # Need to prevent softlocks here
84: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 34", [], False, [], [], [], [], [], [], [], []],
85: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (entrance)", [], False, [], [], [], [], [], [], [], []],
86: [False, [85], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (over ramp)", [], False, [], [], [], [], [], [], [], []],
87: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (main)", [8], False, [], [], [], [], [], [], [], []],
88: [False, [87], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (exit opened)", [], False, [], [], [], [], [], [], [], []],
89: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (main area)", [7], False, [], [], [], [], [], [], [], []],
90: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (tile bridge)", [], False, [], [], [], [], [], [], [], []], # Check for potential softlock?
91: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (south section)", [], False, [], [], [], [], [], [], [], []],
92: [False, [91], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (behind statues)", [], False, [], [], [], [], [], [], [], []],
93: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (north section)", [], False, [], [], [], [], [], [], [], []],
94: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 39", [], False, [], [], [], [], [], [], [], []],
95: [False, [96], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (entrance)", [], False, [], [], [], [], [], [], [], []],
96: [False, [95], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (past tiles)", [], False, [], [], [], [], [], [], [], []],
97: [False, [98,503], 2, [1,5,0,b"\x00"], 0, "Inca: Boss Room", [], True, [], [], [], [], [], [], [], []], # might need to add an exit for this
98: [False, [97], 2, [1,5,0,b"\x00"], 0, "Inca: Behind Boss Room", [], False, [], [], [], [], [], [], [], []],
99: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE door)", [], False, [], [], [], [], [], [], [], []],
# Gold Ship / Diamond Coast
100: [False, [104], 1, [1,5,0,b"\x00"], 0, "Gold Ship: Deck", [], False, [], [], [], [], [], [], [], []],
101: [False, [], 2, [1,5,0,b"\x00"], 0, "Gold Ship: Interior", [], False, [], [], [], [], [], [], [], []],
102: [False, [11], 1, [2,6,0,b"\x00"], 0, "Diamond Coast: Main Area", [], False, [], [], [], [], [], [], [], []],
103: [False, [], 2, [2,6,0,b"\x00"], 0, "Diamond Coast: House", [], False, [], [], [], [], [], [], [], []],
104: [False, [], 0, [1,5,0,b"\x00"], 0, "Gold Ship: Crow's Nest Passage", [], False, [], [], [], [], [], [], [], []],
# Freejia
110: [False, [11], 1, [2,7,0,b"\x00"], 0, "Freejia: Main Area", [], False, [], [], [], [], [], [], [], []],
111: [False, [1, 110], 1, [2,7,0,b"\x00"], 0, "Freejia: 2-story House Roof", [], False, [], [], [], [], [], [], [], []],
112: [False, [], 1, [2,7,0,b"\x00"], 0, "Freejia: Laborer House Roof", [], False, [], [], [], [], [], [], [], []],
113: [False, [110, 114], 1, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade Roof", [], False, [], [], [], [], [], [], [], []],
114: [False, [110, 112], 1, [2,7,0,b"\x00"], 0, "Freejia: Back Alley", [], False, [], [], [], [], [], [], [], []],
115: [False, [110], 0, [2,7,0,b"\x00"], 0, "Freejia: Slaver", [], False, [], [], [], [], [], [], [], []],
116: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: West House", [], False, [], [], [], [], [], [], [], []],
117: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: 2-story House", [], False, [], [], [], [], [], [], [], []],
118: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Lovers' House", [], False, [], [], [], [], [], [], [], []],
119: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (common area)", [], False, [], [], [], [], [], [], [], []],
120: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (west room)", [], False, [], [], [], [], [], [], [], []],
121: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (east room)", [], False, [], [], [], [], [], [], [], []],
122: [False, [504], 2, [2,7,0,b"\x00"], 0, "Freejia: Laborer House", [], False, [], [], [], [], [], [], [], []],
123: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Messy House", [], False, [], [], [], [], [], [], [], []],
124: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Erik House", [], False, [], [], [], [], [], [], [], []],
125: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Dark Space House", [], False, [], [], [], [], [], [], [], []],
126: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade House", [], False, [], [], [], [], [], [], [], []],
127: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Market", [], False, [], [], [], [], [], [], [], []],
# Diamond Mine
130: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (entrance)", [], False, [], [], [], [], [], [], [], []],
131: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (behind barriers)", [], False, [], [], [], [], [], [], [], []],
132: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (false wall)", [], False, [], [], [], [], [], [], [], []],
133: [False, [11], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 62", [], False, [], [], [], [], [], [], [], []],
134: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (main)", [], False, [], [], [], [], [], [], [], []],
135: [False, [134], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (elevator)", [], False, [], [], [], [], [], [], [], []],
136: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (main)", [], False, [], [], [], [], [], [], [], []],
137: [False, [136], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (trapped laborer)", [], False, [], [], [], [], [], [], [], []],
138: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (main)", [], False, [], [], [], [], [], [], [], []],
139: [False, [138], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (behind ramp)", [], False, [], [], [], [], [], [], [], []],
140: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 1)", [], False, [], [], [], [], [], [], [], []],
141: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 2)", [], False, [], [], [], [], [], [], [], []],
142: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (Dark Space)", [], False, [], [], | |
clr = rgb(3)
lw1, lw2 = 6, 1
grey = 0.9 * array([1,1,1])
#
plot( y.t, y.amp, color=grey, linewidth=lw1, label='NR' )
plot( y.t,-y.amp, color=grey, linewidth=lw1 )
plot( y.t, y.plus, color=grey, linewidth=lw1 )
plot( y.t, y.cross, color=grey, linewidth=lw1 )
#
plot( g.t, g.amp, color=0.8*clr[1], linewidth=lw2 )
plot( g.t,-g.amp, color=0.8*clr[1], linewidth=lw2 )
plot( g.t, g.plus, color=clr[2], linewidth=lw2, label=r'Fit $+$' )
plot( g.t, g.cross, color=clr[0], linewidth=lw2, label=r'Fit $\times$' )
#
pylim( g.t, g.amp, symmetric=True )
xlabel(r'$t/M$')
ylabel( y.kind )
title( y.label )
legend( frameon=False )
# ############################################################ %
''' Workflow class for applying fitting over NR cases (scentry objects) '''
# ############################################################ %
class modelrd:
''' Workflow class for applying fitting over NR cases (scentry objects).'''
def __init__(this, # The current object
scentry_iterable=None, # A list of scentry (simulation catalog enrty) objects
T0=None, # The starting time relative to the peak luminosity
T1=None, # Ending time of RD fitting region
workdir=None, # Highest level directory for file IO
show=False, # Toggle for showing plots
noplots=False, # Toggle for creating and saving plots
lmlist=None, # List of spherical multipoles to consider
clean=False, # Toggle for removing intermediate files
keyword=None, # Label for output files
greedy=True, # Toggle for use of greedy fitting
use_peak_strain=True, # Toggle for listing time relative to peak strain
scri=False, # Toggle to extrapolate fitting results to infinity
verbose=True): # Let the people know
# Let the people know (what was input)
if verbose:
print '\n\n%s\n## \t %s \n%s\n\n' % ( 'wwwWv~-'*6, yellow('MODEL RINGDOWN'), 'wwwWv~-'*6 )
print 'Settings\n%s'%('--'*20)
for k in dir():
if (eval(k) is not None) and (eval(k) is not False) and not ('this' in k):
print '## %s = %s' % ( cyan(str(k)), yellow(str(eval(k))) )
print '\n\n'
# Import unseful things
import pickle
from nrutils import gwylm,scsearch
from os.path import expanduser,isfile,isdir
from os import remove as rm
from shutil import rmtree as rmdir
from numpy import array
# Define domain vairables that can be understood by the latex
# code, as well as the make_domain code. NOTE that the point of this line
# is to centralize the definition of which variables will be used for modeling
this.model_domain_variables = [ 'eta','chi_s' ]
# NOTE that model quality (particularly overmodeling) is affected by this value
this.fitatol = 1e-2 # 1e-2
# Location of fitting region in M relative to peak strain
this.T0 = 20 if T0 is None else T0
this.T1 = T1
# Toggle to use p_range determined by jf OR the general (-1,1)
# * If true, prange is the sign of jf, else it is (-1,1)
# * This option is applied externally to the qnmfit class via the prange input
# NOTE: setting use_spin_prange to False has been found to reeeally muck things up
this.use_spin_prange = True
# Handle the prange property using the use_spin_prange value
if this.use_spin_prange:
# Setting prange None here means that the spign of
# the final spin will be used in the qnmfit class
this.prange = None
else:
# Both counter and co-rotating QNM will be used
this.prange = [-1,1]
# Default size for figures
this.figsize = 2*array([4,2.8])
# Time step to use for all data
this.dt = 0.35
# Tag for whether or not to use greedy fitting in the qnmfit process
this.greedy = greedy
# Store time listing option
this.use_peak_strain = use_peak_strain
# Toggle for removing junk radiation
# NOTE that removing junk radiation affecs the peak strain location and thus the values of teh QNM amplitudes. Counterintuitively, cleaning appear to (slightly) negatively affect results.
this.use_cleaned_waveforms = False
# Double check that cleaning of intermediate data really is desired
if clean:
clean = 'y' in raw_input(warning('The %s option has been detected. Do you really wish to remove internediate data files? [%s] %s '%( red(bold('CLEAN')), bold(yellow('Yes')+'/'+yellow('no')) ,cyan('If yes, note that it may take a long time to regenerate them using this tool.')),output_string=True) ).lower()
# NOTE that if a new scentry_iterable is given, then previous working data will always be cleaned
if not (scentry_iterable is None):
alert( '%sscentry_iterable is given, then previous working data will always be cleaned'%(red('Note: ')) )
clean = True
# Internalize clean and verbose inputs
this.verbose,this.clean,this.keyword,this.scri = verbose,clean,keyword,scri
# Handle the input keyword option and format it to be joined with file names
keyword = '' if keyword is None else keyword+'_'
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
#[A]# Setup directories for storing intermediate data #
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
workdir = '~/KOALA/kerr_dev/workflows/modelrd/' if (workdir is None) else workdir
workdir = expanduser( workdir ).replace('//','/')
mkdir(workdir,verbose=True)
this.workdir = workdir
# Make directory to store tempoary data
bindir = this.workdir + '/bin/'
bindir.replace('//','/')
# If clean, remove all intermediate data files
if this.clean and isdir(bindir):
import glob, os
map(os.remove, glob.glob("%s/*%s.bin"%(bindir,keyword)))
# If the bindir does not exist, make it, and store it to the current object
mkdir(bindir,verbose=True)
this.bindir = bindir
# Determine if qnmfit data exists; clean if desired
this.qnmfit_data_path = this.bindir + this.keyword + 'qnmfit.bin'
if clean and isfile( this.qnmfit_data_path ):
rm( this.qnmfit_data_path )
# Handle list of spherical modes to use
if lmlist is None:
# lmlist_,lmlist = [ (2,2), (2,1), (3,3), (3,2), (4,4), (4,3), (5,5), (5,4) ],[]
# lmlist_,lmlist = [ (2,2), (2,1) ],[]
lmlist_,lmlist = [ (2,2), (2,1), (3,2), (4,3), (3,3), (4,4), (5,5) ],[]
# Add the m<0 counterparts for consistency checking
if not ( (2,1) in lmlist_ ): lmlist_ += [(2,1)]
for lm in lmlist_:
lmlist.append( (lm[0],lm[1]) )
# lmlist.append( (lm[0],-lm[1]) )
# Sort and set
lmlist = sorted( list( set(lmlist) ) )
# NOTE that we store the list of spherical eigenvalues here to be used later
this.lmlist = lmlist
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
#[B]# Data Collection will proceed in a nested rather than modular fashion to conserve disk space
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
pad = '####'*12+'#'
if this.verbose: print '%s\n# Processing Fit Data from Simulations\t\t#\n%s'%(pad,pad)
# If the core data file does not extist, cull QNM fit data anew
if not isfile( this.qnmfit_data_path ):
#
alert('No scentry_iterable input found. A default list of simulations will now be collected.')
if scentry_iterable is None:
from nrutils import scsearch,jf14067295,Mf14067295
from numpy import isnan,array
# Get all gt simulations of interest
scentry_iterable = scsearch(keyword=('hr','hrq','sq'),nonprecessing=True,verbose=False,unique=True,institute='gt')
scentry_iterable = scsearch(catalog = scentry_iterable, keyword=('bradwr'),nonprecessing=True,verbose=True)
# Add the BAM Runs
bam_runs = scsearch(keyword='silures',nonprecessing=True,verbose=True,unique=True)
# Concat
scentry_iterable = bam_runs + scentry_iterable
# NOTE that the remnant properties from the BAM runs cannot be trusted, so we will use a final mas and spinf fit here
for e in scentry_iterable:
e.mf,e.xf = Mf14067295(e.m1,e.m2,e.X1[-1],e.X2[-1]),jf14067295(e.m1,e.m2,e.X1[-1],e.X2[-1])
e.Sf = e.mf*e.mf*array([0,0,e.xf])
#
scentry_iterable = this.filter_scentry_resolution(scentry_iterable,res_min=160)
# NOTE that the values in scentry_iterable will be stored in a dicionary call qnmfit_by_simulation
this.cull_qnmfit_by_simulation(scentry_iterable)
else:
# Try to load pre-calculated qnmfit objects
this.load_qnmfit_by_simulation()
# NOTE that at this point, this.qnmfit_by_simulation should be set to the contents of this.qnmfit_data_path
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
#[C]# Organize the data into lists that make modeling easier #
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
if this.verbose: print '%s\n# Organizing Fit Data for Modeling\t\t#\n%s'%(pad,pad)
this.organize()
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
#[D]# Model QNM amplitudes over a chosen domain #
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
if this.verbose: print '%s\n# Modeling QNM Complex Amplitudes Over a Chosen Domain\t\t#\n%s'%(pad,pad)
this.qnm_manifold_learn()
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
#[E]# Document the fit results: latex, python #
#%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%###%%#
# this.document()
# Given list of scentry objects, load and fit QNMs, then store(write) using pickle.
def cull_qnmfit_by_simulation(this,scentry_iterable):
'''Given list of scentry objects, load and fit QNMs, then store(write) using pickle.'''
# Import useful things
import pickle
# QNM fit information will be stored to the curernt object in this dictionary
this.qnmfit_by_simulation = {}
this.qnmfit_at_infinity_by_simulation = {}
# For all scentry objects
alert('Collecting qnmfit data ...')
n = len( scentry_iterable )
for k,e in enumerate(scentry_iterable):
# Let the people know
simname = e.raw_metadata.source_dir[-1].split('/')[-1] if e.raw_metadata.source_dir[-1][-1]!='/' else e.raw_metadata.source_dir[-1].split('/')[-2]
if this.verbose: print '%s\n# Processing %i/%i: %s (%s)\n%s'%('===='*12,k+1,n,cyan(e.label),green(simname),'===='*12)
# Process the current scentry object, but don't save. We will save the list when all scetry objects have been processed.
if not this.scri:
this.process_scentry( e,load_and_save=False )
else:
this.process_scentry_at_infinity( e,load_and_save=False )
# Pickle the qnmfit list
this.save_qnmfit_by_simulation()
# Try | |
<reponame>ryanpetrello/sdb
from __future__ import print_function
import cmd
import contextlib
import errno
import logging
import os
import pprint
import re
import rlcompleter
import select
import signal
import socket
import sys
import termios
import threading
import traceback
import tty
from multiprocessing import process
from pdb import Pdb
import six
from six.moves.queue import Queue, Empty
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
__all__ = (
'SDB_HOST', 'SDB_PORT', 'SDB_NOTIFY_HOST', 'SDB_COLORIZE',
'DEFAULT_PORT', 'Sdb', 'debugger', 'set_trace',
)
DEFAULT_PORT = 6899
SDB_HOST = os.environ.get('SDB_HOST') or '127.0.0.1'
SDB_PORT = int(os.environ.get('SDB_PORT') or DEFAULT_PORT)
SDB_NOTIFY_HOST = os.environ.get('SDB_NOTIFY_HOST') or '127.0.0.1'
SDB_CONTEXT_LINES = os.environ.get('SDB_CONTEXT_LINES') or 60
SDB_COLORIZE = bool(int(os.environ.get('SDB_COLORIZE') or 1))
#: Holds the currently active debugger.
_current = [None]
_frame = getattr(sys, '_getframe')
NO_AVAILABLE_PORT = """\
Couldn't find an available port.
Please specify one using the SDB_PORT environment variable.
"""
BANNER = """\
{self.ident}: Ready to connect: telnet {self.host} {self.port}
Type `exit` in session to continue.
{self.ident}: Waiting for client...
"""
SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.'
SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.'
class SocketCompleter(rlcompleter.Completer):
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
matches = []
n = len(text)
for word in self.namespace:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
class Sdb(Pdb):
"""Socket-based debugger."""
me = 'Socket Debugger'
_prev_outs = None
_sock = None
_completer = SocketCompleter()
def __init__(self, host=SDB_HOST, port=SDB_PORT,
notify_host=SDB_NOTIFY_HOST, context_lines=SDB_CONTEXT_LINES,
port_search_limit=100, port_skew=+0, out=sys.stdout,
colorize=SDB_COLORIZE, interactive=False):
self.active = True
self.out = out
self.colorize = colorize
self._prev_handles = sys.stdin, sys.stdout
self.notify_host = notify_host
self.context_lines = int(context_lines)
self._sock, this_port = self.get_avail_port(
host, port, port_search_limit, port_skew,
)
self._sock.setblocking(1)
self._sock.listen(1)
self.host = host
self.port = this_port
self.ident = '{0}:{1}'.format(self.me, this_port)
self.interactive = interactive
if self.interactive is False:
self.say(BANNER.format(self=self))
self._client, address = self._sock.accept()
self._client.setblocking(1)
self.remote_addr = ':'.join(str(v) for v in address)
self.say(SESSION_STARTED.format(self=self))
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
Pdb.__init__(self, stdin=self._handle, stdout=self._handle)
else:
Pdb.__init__(self, stdin=sys.stdin, stdout=sys.stdout)
self.prompt = ''
def complete(self, text):
ns = {}
ns.update(self.curframe.f_globals.copy())
ns.update(self.curframe.f_locals.copy())
ns.update(__builtins__)
self._completer.namespace = ns
self._completer.use_main_ns = 0
self._completer.complete(text, 0)
return self._completer.matches
def get_avail_port(self, host, port, search_limit=100, skew=+0):
try:
_, skew = process._current_process.name.split('-')
skew = int(skew)
except ValueError:
pass
this_port = None
for i in range(search_limit):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
this_port = port + skew + i
try:
_sock.bind((host, this_port))
except socket.error as exc:
if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
continue
raise
else:
if self.notify_host:
socket.socket(socket.AF_INET, socket.SOCK_DGRAM).sendto(
str(this_port).encode('utf-8'),
(self.notify_host, 6899)
)
return _sock, this_port
else:
raise Exception(NO_AVAILABLE_PORT.format(self=self))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self._close_session()
def _close_session(self):
self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
if not self.interactive and self.active:
if self._handle is not None:
self._handle.close()
if self._client is not None:
self._client.close()
if self._sock is not None:
self._sock.close()
self.active = False
self.say(SESSION_ENDED.format(self=self))
def do_continue(self, arg):
self._close_session()
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_quit(self, arg):
self._close_session()
self.set_quit()
return 1
do_q = do_exit = do_quit
def set_quit(self):
# this raises a BdbQuit exception that we're unable to catch.
sys.settrace(None)
def cmdloop(self):
self.do_list(tuple())
return cmd.Cmd.cmdloop(self)
def do_list(self, args):
lines = self.context_lines
context = (lines - 2) / 2
if not args:
first = max(1, self.curframe.f_lineno - context)
last = first + context * 2
args = six.text_type('%s, %s') % (
six.text_type(int(first)),
six.text_type(int(last)),
)
self.lineno = None
with style(self, (
self.curframe.f_code.co_filename, self.curframe.f_lineno - context)
):
Pdb.do_list(self, args)
do_l = do_list
def format_stack_entry(self, *args, **kwargs):
entry = Pdb.format_stack_entry(self, *args, **kwargs)
return '\n'.join(
filter(lambda x: not x.startswith('->'), entry.splitlines())
)
def print_stack_entry(self, *args, **kwargs):
with style(self):
return Pdb.print_stack_entry(self, *args, **kwargs)
def default(self, line):
with style(self):
return Pdb.default(self, line)
def parseline(self, line):
line = line.strip()
match = re.search('^([0-9]+)([a-zA-Z]+.*)', line)
if match:
times, command = match.group(1), match.group(2)
line = command
self.cmdqueue.extend([
command for _ in range(int(times) - 1)
])
if line.startswith('lines '):
try:
self.context_lines = int(line.split(' ')[1])
line = 'l'
except ValueError:
pass
if line == '?':
line = 'dir()'
elif line.endswith('??'):
line = "import inspect; print(''.join(inspect.getsourcelines(%s)[0][:25]))" % line[:-2] # noqa
elif line.endswith('?'):
line = 'dir(%s)' % line[:-1]
return cmd.Cmd.parseline(self, line)
def emptyline(self):
pass
def onecmd(self, line):
line = line.strip()
if line.endswith('<!TAB!>'):
line = line.split('<!TAB!>')[0]
matches = self.complete(line)
if len(matches):
self.stdout.write(' '.join(matches))
self.stdout.flush()
return False
return Pdb.onecmd(self, line)
def displayhook(self, obj):
if obj is not None and not isinstance(obj, list):
return pprint.pprint(obj)
return Pdb.displayhook(self, obj)
def say(self, m):
logging.warning(m)
def _runmodule(self, module_name):
self._wait_for_mainpyfile = True
self._user_requested_quit = False
import runpy
mod_name, mod_spec, code = runpy._get_module_details(module_name)
self.mainpyfile = self.canonic(code.co_filename)
import __main__
__main__.__dict__.update({
"__name__": "__main__",
"__file__": self.mainpyfile,
"__package__": mod_spec.parent,
"__loader__": mod_spec.loader,
"__spec__": mod_spec,
"__builtins__": __builtins__,
})
self.run(code)
def _runscript(self, filename):
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % \
(fp.read(), self.mainpyfile)
import pdb
l = locals()
l['pdb'] = pdb
self.run(statement, locals=l)
def debugger():
"""Return the current debugger instance, or create if none."""
sdb = _current[0]
if sdb is None or not sdb.active:
sdb = _current[0] = Sdb()
return sdb
def set_trace(frame=None):
"""Set break-point at current location, or a specified frame."""
if frame is None:
frame = _frame().f_back
return debugger().set_trace(frame)
def sigtrap(*args, **kw):
signal.signal(
signal.SIGTRAP,
lambda signum, frame: Sdb(*args, **kw).set_trace(frame.f_back)
)
@contextlib.contextmanager
def style(im_self, filepart=None, lexer=None):
lexer = PythonLexer
old_stdout = im_self.stdout
class NoneBuffer(six.StringIO):
def write(self, x):
if x == '':
x = "''"
six.StringIO.write(self, x)
buff = NoneBuffer()
im_self.stdout = buff
yield
value = buff.getvalue()
context = len(value.splitlines())
file_cache = {}
if filepart:
try:
filepath, lineno = filepart
if filepath not in file_cache:
with open(filepath, 'r') as source:
file_cache[filepath] = source.readlines()
value = ''.join(file_cache[filepath][:int(lineno) - 1]) + value
except:
pass
if not value.strip():
value = 'None\n'
if im_self.colorize is True:
formatter = Terminal256Formatter(style='friendly')
value = highlight(value, lexer(), formatter)
# Properly format line numbers when they show up in multi-line strings
strcolor, _ = formatter.style_string['Token.Literal.String']
intcolor, _ = formatter.style_string['Token.Literal.Number.Integer']
value = re.sub(
r'%s([0-9]+)' % re.escape(strcolor),
lambda match: intcolor + match.group(1) + strcolor,
value,
)
# Highlight the "current" line in yellow for visibility
lineno = im_self.curframe.f_lineno
value = re.sub(
'(?<!\()%s%s[^\>]+>[^\[]+\[39m([^\x1b]+)[^m]+m([^\n]+)' % (re.escape(intcolor), lineno), # noqa
lambda match: ''.join([
str(lineno),
' ->',
'\x1b[93m',
match.group(1),
re.sub('\x1b[^m]+m', '', match.group(2)),
'\x1b[0m'
]),
value
)
if filepart:
_, first = filepart
value = '\n'.join(value.splitlines()[-context:]) + '\n'
if value.strip():
old_stdout.write(value)
im_self.stdout = old_stdout
def listen():
queue = Queue()
def _consume(queue):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', 6899))
print('listening for sdb notifications on :6899...')
while True:
r, w, x = select.select([sock], [], [])
for i in r:
data = i.recv(1024)
queue.put(data)
worker = threading.Thread(target=_consume, args=(queue,))
worker.setDaemon(True)
worker.start()
orig_tty = termios.tcgetattr(sys.stdin)
try:
tty.setcbreak(sys.stdin.fileno())
while True:
try:
port = queue.get(timeout=1)
queue.task_done()
if port == 'q':
break
port = int(port)
print('opening telnet session at port :%d...' % port)
telnet(port).connect()
print('listening for sdb notifications on :6899...')
except Empty:
pass
except KeyboardInterrupt:
print('got Ctrl-C')
queue.put('q')
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_tty)
class telnet(object):
line_buff = ''
completing = None
history_pos = 0
def __init__(self, port, stdin=sys.stdin, stdout=sys.stdout):
self.port = port
self.stdin = stdin
self.stdout = stdout
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(2)
self.history = []
def connect(self):
try:
self.sock.connect(('0.0.0.0', self.port))
except Exception:
print('unable to connect')
return
print('connected to %s:%d' % ('0.0.0.0', self.port))
while True:
socket_list = [self.stdin, self.sock]
try:
r, w, e = select.select(socket_list, [], [])
for sock in r:
if sock == self.sock:
data = self.sock.recv(4096 * 4)
if not data:
print('connection closed')
return
self.recv(data)
else:
self.send()
except select.error as e:
if e[0] != errno.EINTR:
raise
def recv(self, data):
if self.completing is not None:
self.stdout.write('\x1b[2K\r>>> ')
matches = data.decode('utf-8').split(' ')
first = matches[0]
if len(matches) > 1:
if self.completing:
self.line_buff = self.line_buff.replace(
self.completing, first
)
matches[0] = (
'\033[93m' + first + '\033[0m'
)
self.stdout.write(
'\n'.join(matches) + '\n' + self.line_buff
)
else:
if self.completing:
self.line_buff = self.line_buff.replace(
self.completing, first
)
self.stdout.write(self.line_buff)
else:
self.stdout.write('\n')
self.stdout.write(data.decode('utf-8'))
self.stdout.write('>>> ')
self.stdout.flush()
def send(self):
char = self.stdin.read(1)
if char == '\x1b':
char += self.stdin.read(2)
if char in ('\x1b[A', '\x1b[B'):
if char == '\x1b[A':
# history up
self.history_pos -= 1
if char == '\x1b[B':
# history down
self.history_pos += 1
if self.history_pos < 0:
self.history_pos = -1
self.line_buff = ''
else:
try:
self.line_buff = self.history[self.history_pos]
except IndexError:
self.history_pos = len(self.history)
self.line_buff = ''
self.stdout.write('\x1b[2K\r>>> %s' % self.line_buff)
elif char == '\n':
# return char
self.completing = None
self.history_pos | |
import os
import errno
import wget
import zipfile
import glob
import librosa
import scipy
import math
from tqdm import tqdm
import torch
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import numpy as np
import scipy.io.wavfile as wavf
class MNIST:
"""
MNIST dataset featuring gray-scale 28x28 images of
hand-written characters belonging to ten different classes.
Dataset implemented with torchvision.datasets.MNIST.
Parameters:
args (dict): Dictionary of (command line) arguments.
Needs to contain batch_size (int) and workers(int).
is_gpu (bool): True if CUDA is enabled.
Sets value of pin_memory in DataLoader.
Attributes:
train_transforms (torchvision.transforms): Composition of transforms
including conversion to Tensor, repeating gray-scale image to
three channel for consistent use with different architectures
and normalization.
val_transforms (torchvision.transforms): Composition of transforms
including conversion to Tensor, repeating gray-scale image to
three channel for consistent use with different architectures
and normalization.
trainset (torch.utils.data.TensorDataset): Training set wrapper.
valset (torch.utils.data.TensorDataset): Validation set wrapper.
train_loader (torch.utils.data.DataLoader): Training set loader with shuffling.
val_loader (torch.utils.data.DataLoader): Validation set loader.
"""
def __init__(self, is_gpu, args):
self.num_classes = 10
self.train_transforms, self.val_transforms = self.__get_transforms(args.patch_size)
self.trainset, self.valset = self.get_dataset()
self.train_loader, self.val_loader = self.get_dataset_loader(args.batch_size, args.workers, is_gpu)
def __get_transforms(self, patch_size):
# optionally scale the images and repeat them to 3 channels to compare with color images
train_transforms = transforms.Compose([
transforms.Resize(size=(patch_size, patch_size)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
])
val_transforms = transforms.Compose([
transforms.Resize(size=(patch_size, patch_size)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
])
return train_transforms, val_transforms
def get_dataset(self):
"""
Uses torchvision.datasets.MNIST to load dataset.
Downloads dataset if doesn't exist already.
Returns:
torch.utils.data.TensorDataset: trainset, valset
"""
trainset = datasets.MNIST('datasets/MNIST/train/', train=True, transform=self.train_transforms,
target_transform=None, download=True)
valset = datasets.MNIST('datasets/MNIST/test/', train=False, transform=self.val_transforms,
target_transform=None, download=True)
return trainset, valset
def get_dataset_loader(self, batch_size, workers, is_gpu):
"""
Defines the dataset loader for wrapped dataset
Parameters:
batch_size (int): Defines the batch size in data loader
workers (int): Number of parallel threads to be used by data loader
is_gpu (bool): True if CUDA is enabled so pin_memory is set to True
Returns:
torch.utils.data.DataLoader: train_loader, val_loader
"""
train_loader = torch.utils.data.DataLoader(
self.trainset,
batch_size=batch_size, shuffle=True,
num_workers=workers, pin_memory=is_gpu, sampler=None)
val_loader = torch.utils.data.DataLoader(
self.valset,
batch_size=batch_size, shuffle=False,
num_workers=workers, pin_memory=is_gpu)
return train_loader, val_loader
class FashionMNIST:
"""
FashionMNIST dataset featuring gray-scale 28x28 images of
Zalando clothing items belonging to ten different classes.
Dataset implemented with torchvision.datasets.FashionMNIST.
Parameters:
args (dict): Dictionary of (command line) arguments.
Needs to contain batch_size (int) and workers(int).
is_gpu (bool): True if CUDA is enabled.
Sets value of pin_memory in DataLoader.
Attributes:
train_transforms (torchvision.transforms): Composition of transforms
including conversion to Tensor, repeating gray-scale image to
three channel for consistent use with different architectures
and normalization.
val_transforms (torchvision.transforms): Composition of transforms
including conversion to Tensor, repeating gray-scale image to
three channel for consistent use with different architectures
and normalization.
trainset (torch.utils.data.TensorDataset): Training set wrapper.
valset (torch.utils.data.TensorDataset): Validation set wrapper.
train_loader (torch.utils.data.DataLoader): Training set loader with shuffling.
val_loader (torch.utils.data.DataLoader): Validation set loader.
"""
def __init__(self, is_gpu, args):
self.num_classes = 10
self.train_transforms, self.val_transforms = self.__get_transforms(args.patch_size)
self.trainset, self.valset = self.get_dataset()
self.train_loader, self.val_loader = self.get_dataset_loader(args.batch_size, args.workers, is_gpu)
def __get_transforms(self, patch_size):
# optionally scale the images and repeat them to 3 channels to compare with color images.
train_transforms = transforms.Compose([
transforms.Resize(size=(patch_size, patch_size)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
])
val_transforms = transforms.Compose([
transforms.Resize(size=(patch_size, patch_size)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
])
return train_transforms, val_transforms
def get_dataset(self):
"""
Uses torchvision.datasets.FashionMNIST to load dataset.
Downloads dataset if doesn't exist already.
Returns:
torch.utils.data.TensorDataset: trainset, valset
"""
trainset = datasets.FashionMNIST('datasets/FashionMNIST/train/', train=True, transform=self.train_transforms,
target_transform=None, download=True)
valset = datasets.FashionMNIST('datasets/FashionMNIST/test/', train=False, transform=self.val_transforms,
target_transform=None, download=True)
return trainset, valset
def get_dataset_loader(self, batch_size, workers, is_gpu):
"""
Defines the dataset loader for wrapped dataset
Parameters:
batch_size (int): Defines the batch size in data loader
workers (int): Number of parallel threads to be used by data loader
is_gpu (bool): True if CUDA is enabled so pin_memory is set to True
Returns:
torch.utils.data.DataLoader: train_loader, val_loader
"""
train_loader = torch.utils.data.DataLoader(
self.trainset,
batch_size=batch_size, shuffle=True,
num_workers=workers, pin_memory=is_gpu, sampler=None)
val_loader = torch.utils.data.DataLoader(
self.valset,
batch_size=batch_size, shuffle=False,
num_workers=workers, pin_memory=is_gpu)
return train_loader, val_loader
class KMNIST:
"""
KMNIST dataset featuring gray-scale 28x28 images of
Japanese Kuzushiji characters belonging to ten different classes.
Dataset implemented with torchvision.datasets.KMNIST.
Parameters:
args (dict): Dictionary of (command line) arguments.
Needs to contain batch_size (int) and workers(int).
is_gpu (bool): True if CUDA is enabled.
Sets value of pin_memory in DataLoader.
Attributes:
train_transforms (torchvision.transforms): Composition of transforms
including conversion to Tensor, repeating gray-scale image to
three channel for consistent use with different architectures
and normalization.
val_transforms (torchvision.transforms): Composition of transforms
including conversion to Tensor, repeating gray-scale image to
three channel for consistent use with different architectures
and normalization.
trainset (torch.utils.data.TensorDataset): Training set wrapper.
valset (torch.utils.data.TensorDataset): Validation set wrapper.
train_loader (torch.utils.data.DataLoader): Training set loader with shuffling.
val_loader (torch.utils.data.DataLoader): Validation set loader.
"""
def __init__(self, is_gpu, args):
self.num_classes = 10
self.train_transforms, self.val_transforms = self.__get_transforms(args.patch_size)
self.trainset, self.valset = self.get_dataset()
self.train_loader, self.val_loader = self.get_dataset_loader(args.batch_size, args.workers, is_gpu)
def __get_transforms(self, patch_size):
# optionally scale the images and repeat to 3 channels to compare to color images.
train_transforms = transforms.Compose([
transforms.Resize(size=(patch_size, patch_size)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
])
val_transforms = transforms.Compose([
transforms.Resize(size=(patch_size, patch_size)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
])
return train_transforms, val_transforms
def get_dataset(self):
"""
Uses torchvision.datasets.KMNIST to load dataset.
Downloads dataset if doesn't exist already.
Returns:
torch.utils.data.TensorDataset: trainset, valset
"""
trainset = datasets.KMNIST('datasets/KMNIST/train/', train=True, transform=self.train_transforms,
target_transform=None, download=True)
valset = datasets.KMNIST('datasets/KMNIST/test/', train=False, transform=self.val_transforms,
target_transform=None, download=True)
return trainset, valset
def get_dataset_loader(self, batch_size, workers, is_gpu):
"""
Defines the dataset loader for wrapped dataset
Parameters:
batch_size (int): Defines the batch size in data loader
workers (int): Number of parallel threads to be used by data loader
is_gpu (bool): True if CUDA is enabled so pin_memory is set to True
Returns:
torch.utils.data.DataLoader: train_loader, val_loader
"""
train_loader = torch.utils.data.DataLoader(
self.trainset,
batch_size=batch_size, shuffle=True,
num_workers=workers, pin_memory=is_gpu, sampler=None)
val_loader = torch.utils.data.DataLoader(
self.valset,
batch_size=batch_size, shuffle=False,
num_workers=workers, pin_memory=is_gpu)
return train_loader, val_loader
class AudioMNIST:
"""
AudioMNIST dataset featuring gray-scale 227x227 images of
ten spoken digits (0-9).
https://github.com/soerenab/AudioMNIST
Interpreting and Explaining Deep Neural Networks for Classification of Audio Signals.
Becker et al. arXiv:abs/1807.03418
Parameters:
args (dict): Dictionary of (command line) arguments.
Needs to contain batch_size (int) and workers(int).
is_gpu (bool): True if CUDA is enabled.
Sets value of pin_memory in DataLoader.
Attributes:
trainset (torch.utils.data.TensorDataset): Training set wrapper.
valset (torch.utils.data.TensorDataset): Validation set wrapper.
train_loader (torch.utils.data.DataLoader): Training set loader with shuffling.
val_loader (torch.utils.data.DataLoader): Validation set loader.
"""
def __init__(self, is_gpu, args):
self.num_classes = 10
self.__path = os.path.expanduser('datasets/AudioMNIST')
self.__download()
self.trainset, self.valset = self.get_dataset(args.patch_size)
self.train_loader, self.val_loader = self.get_dataset_loader(args.batch_size, args.workers, is_gpu)
def __check_exists(self):
"""
Check if dataset has already been downloaded
Returns:
bool: True if downloaded dataset has been found
"""
return os.path.exists(os.path.join(self.__path, 'train_images_tensor.pt')) and \
os.path.exists(os.path.join(self.__path, 'train_labels_tensor.pt')) and \
os.path.exists(os.path.join(self.__path, 'test_images_tensor.pt')) and \
os.path.exists(os.path.join(self.__path, 'test_labels_tensor.pt'))
def __download(self):
"""
Downloads the AudioMNIST dataset from the web if dataset
hasn't already been downloaded and does a spectrogram conversion.
The latter could potentially be refactored into a separate function and conversion parameters (here hard-coded
according to original authors) exposed to the command line parser.
"""
if self.__check_exists():
return
print("Downloading AudioMNIST dataset")
# download files
try:
os.makedirs(self.__path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
if not os.path.exists(os.path.join(self.__path, 'AudioMNIST-master.zip')):
url = 'https://github.com/soerenab/AudioMNIST/archive/master.zip'
wget_data = wget.download(url, out=self.__path)
archive = zipfile.ZipFile(wget_data)
for file in archive.namelist():
if file.startswith('AudioMNIST-master/data/'):
archive.extract(file, self.__path)
print("Download successful")
audio_mnist_src = os.path.join(self.__path, 'AudioMNIST-master/data/')
data = np.array(glob.glob(os.path.join(audio_mnist_src, "**/*.wav")))
train_images = []
train_labels = []
test_images = []
test_labels = []
# first 5-cross-validation set from https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py
train_folders = [28, 56, 7, 19, 35, 1, 6, 16, 23, 34, 46, 53, 36, 57, 9, 24, 37, 2,
8, 17, 29, 39, 48, 54, 43, 58, 14, 25, 38, 3, 10, 20, 30, 40, 49, 55,
12, 47, 59, 15, 27, 41, 4, 11, 21, 31, 44, 50]
test_folders = [26, 52, 60, 18, 32, 42, 5, 13, 22, 33, 45, 51]
print("Converting audio to images")
# create train and test folders and save audios as images
for filepath in tqdm(data):
# the last one is just a counter for repeat of each digit, e.g. say zero once, twice, third time..
dig, vp, rep = filepath.rstrip(".wav").split("/")[-1].split("_")
# according to https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py
fs, data = | |
= event.index,
added = event.added,
removed = event.removed )
#---------------------------------------------------------------------------
# Performs an undoable 'append' operation:
#---------------------------------------------------------------------------
def _undoable_append ( self, node, object, data, make_copy = True ):
""" Performs an undoable append operation.
"""
try:
self._begin_undo()
if make_copy:
data = copy.deepcopy( data )
node.append_child( object, data )
finally:
self._end_undo()
#---------------------------------------------------------------------------
# Performs an undoable 'insert' operation:
#---------------------------------------------------------------------------
def _undoable_insert ( self, node, object, index, data, make_copy = True ):
""" Performs an undoable insert operation.
"""
try:
self._begin_undo()
if make_copy:
data = copy.deepcopy( data )
node.insert_child( object, index, data )
finally:
self._end_undo()
#---------------------------------------------------------------------------
# Performs an undoable 'delete' operation:
#---------------------------------------------------------------------------
def _undoable_delete ( self, node, object, index ):
""" Performs an undoable delete operation.
"""
try:
self._begin_undo()
node.delete_child( object, index )
finally:
self._end_undo()
#---------------------------------------------------------------------------
# Gets the id associated with a specified object (if any):
#---------------------------------------------------------------------------
def _get_object_nid ( self, object, name = '' ):
""" Gets the ID associated with a specified object (if any).
"""
info = self._map.get( id( object ) )
if info is None:
return None
for name2, nid in info:
if name == name2:
return nid
else:
return info[0][1]
#---------------------------------------------------------------------------
# Clears the current editor pane (if any):
#---------------------------------------------------------------------------
def _clear_editor ( self ):
""" Clears the current editor pane (if any).
"""
editor = self._editor
if editor._node_ui is not None:
editor.clear()
editor._node_ui.dispose()
editor._node_ui = editor._editor_nid = None
#---------------------------------------------------------------------------
# Gets/Sets the node specific data:
#---------------------------------------------------------------------------
@staticmethod
def _get_node_data(nid):
""" Gets the node specific data. """
return nid._py_data
@staticmethod
def _set_node_data(nid, data):
""" Sets the node specific data. """
nid._py_data = data
#----- User callable methods: --------------------------------------------------
#---------------------------------------------------------------------------
# Gets the object associated with a specified node:
#---------------------------------------------------------------------------
def get_object ( self, nid ):
""" Gets the object associated with a specified node.
"""
return self._get_node_data( nid )[2]
#---------------------------------------------------------------------------
# Returns the object which is the immmediate parent of a specified object
# in the tree:
#---------------------------------------------------------------------------
def get_parent ( self, object, name = '' ):
""" Returns the object that is the immmediate parent of a specified
object in the tree.
"""
nid = self._get_object_nid( object, name )
if nid is not None:
pnid = nid.parent()
if pnid is not self._tree.getItem( 0 ):
return self.get_object( pnid )
return None
#---------------------------------------------------------------------------
# Returns the node associated with a specified object:
#---------------------------------------------------------------------------
def get_node ( self, object, name = '' ):
""" Returns the node associated with a specified object.
"""
nid = self._get_object_nid( object, name )
if nid is not None:
return self._get_node_data( nid )[1]
return None
#----- Tree event handlers: ----------------------------------------------------
#---------------------------------------------------------------------------
# Handles a tree node being expanded:
#---------------------------------------------------------------------------
def _on_item_expanded(self, nid):
""" Handles a tree node being expanded.
"""
expanded, node, object = self._get_node_data(nid)
# If 'auto_close' requested for this node type, close all of the node's
# siblings:
if node.can_auto_close(object):
parent = nid.parent()
if parent is not None:
for snid in self._nodes_for(parent):
if snid is not nid:
snid.setState(False)
# Expand the node (i.e. populate its children if they are not there
# yet):
self._expand_node(nid)
self._update_icon(nid)
#---------------------------------------------------------------------------
# Handles a tree node being collapsed:
#---------------------------------------------------------------------------
def _on_item_collapsed(self, nid):
""" Handles a tree node being collapsed.
"""
self._update_icon(nid)
#---------------------------------------------------------------------------
# Handles a tree item click:
#---------------------------------------------------------------------------
def _on_item_clicked(self, nid, col):
""" Handles a tree item being clicked.
"""
_, node, object = self._get_node_data(nid)
if node.click(object) is True and self.factory.on_click is not None:
self.ui.evaluate(self.factory.on_click, object)
# Fire the 'click' event with the object as its value:
self.click = object
#---------------------------------------------------------------------------
# Handles a tree item double click:
#---------------------------------------------------------------------------
def _on_item_dclicked(self, nid, col):
""" Handles a tree item being double-clicked.
"""
_, node, object = self._get_node_data(nid)
if node.dclick(object) is True:
if self.factory.on_dclick is not None:
self.ui.evaluate(self.factory.on_dclick, object)
self._veto = True
else:
self._veto = True
# Fire the 'dclick' event with the clicked on object as value:
self.dclick = object
#---------------------------------------------------------------------------
# Handles a tree node being selected:
#---------------------------------------------------------------------------
def _on_tree_sel_changed(self):
""" Handles a tree node being selected.
"""
# Get the new selection:
nids = [ self._tree.getSelectedItem() ]
selected = []
if len(nids) > 0:
for n in nids:
# If there is a real selection, get the associated object:
expanded, node, sel_object = self._get_node_data(n)
selected.append(sel_object)
# Try to inform the node specific handler of the selection, if
# there are multiple selections, we only care about the first
# (or maybe the last makes more sense?)
if n == nids[0]:
nid = n
object = sel_object
not_handled = node.select(sel_object)
else:
nid = None
object = None
not_handled = True
# Set the value of the new selection:
if self.factory.selection_mode == 'single':
self._no_update_selected = True
self.selected = object
self._no_update_selected = False
else:
self._no_update_selected = True
self.selected = selected
self._no_update_selected = False
# If no one has been notified of the selection yet, inform the editor's
# select handler (if any) of the new selection:
if not_handled is True:
self.ui.evaluate(self.factory.on_select, object)
# Check to see if there is an associated node editor pane:
editor = self._editor
if editor is not None:
# If we already had a node editor, destroy it:
# editor.setUpdatesEnabled(False)
self._clear_editor()
# If there is a selected object, create a new editor for it:
if object is not None:
# Try to chain the undo history to the main undo history:
view = node.get_view( object )
if view is None:
view = object.trait_view()
if (self.ui.history is not None) or (view.kind == 'subpanel'):
ui = object.edit_traits( parent = editor,
view = view,
kind = 'subpanel' )
else:
# Otherwise, just set up our own new one:
ui = object.edit_traits( parent = editor,
view = view,
kind = 'panel' )
# Make our UI the parent of the new UI:
ui.parent = self.ui
# Remember the new editor's UI and node info:
editor._node_ui = ui
editor._editor_nid = nid
# Finish setting up the editor:
# ui.control.layout().setMargin(0)
# editor.setWidget( ui.control )
editor.add( ui.control )
# Allow the editor view to show any changes that have occurred:
# editor.setUpdatesEnabled(True)
#---------------------------------------------------------------------------
# Handles the user right clicking on a tree node:
#---------------------------------------------------------------------------
def _on_context_menu(self, pos):
""" Handles the user requesting a context menuright clicking on a tree node.
"""
nid = self._tree.itemAt(pos)
if nid is None:
return
_, node, object = self._get_node_data(nid)
self._data = (node, object, nid)
self._context = {'object': object,
'editor': self,
'node': node,
'info': self.ui.info,
'handler': self.ui.handler}
# Try to get the parent node of the node clicked on:
pnid = nid.parent()
if pnid is None or pnid is self._tree.invisibleRootItem():
parent_node = parent_object = None
else:
_, parent_node, parent_object = self._get_node_data(pnid)
self._menu_node = node
self._menu_parent_node = parent_node
self._menu_parent_object = parent_object
menu = node.get_menu(object)
if menu is None:
# Use the standard, default menu:
menu = self._standard_menu(node, object)
elif isinstance(menu, Menu):
# Use the menu specified by the node:
group = menu.find_group( NewAction )
if group is not None:
# Only set it the first time:
group.id = ''
actions = self._new_actions( node, object )
if len( actions ) > 0:
group.insert( 0, Menu( name = 'New', *actions ) )
else:
# All other values mean no menu should be displayed:
menu = None
# Only display the menu if a valid menu is defined:
if menu is not None:
qmenu = menu.create_menu( self._tree, self )
qmenu.exec_(self._tree.mapToGlobal(pos))
# Reset all menu related cached values:
self._data = self._context = self._menu_node = \
self._menu_parent_node = self._menu_parent_object = None
#---------------------------------------------------------------------------
# Returns the standard contextual pop-up menu:
#---------------------------------------------------------------------------
def _standard_menu ( self, node, object ):
""" Returns the standard contextual pop-up menu.
"""
actions = [ CutAction, CopyAction, PasteAction, Separator(),
DeleteAction, Separator(), RenameAction ]
menubar = MenuBar(vertical=True)
# See if the 'New' menu section should be added:
items = self._new_actions( node, object )
if len( items ) > 0:
new_menu = MenuBar()
for item in items:
new_menu.addItem( item )
new_menu.addItem( Separator() )
menubar.addItem( MenuItem("New", new_menu) )
for action in actions:
menubar.addItem( action )
return menubar
| |
from django.shortcuts import reverse
from django.core.files.storage import default_storage
from rest_framework.test import APIRequestFactory
from trackstats.models import Metric, Period
import pytest
from pytest import approx
from pathlib import Path
import datetime
import os.path
import zipfile
from io import BytesIO
import yaml
from .utils import FIXTURE_DIR, SurfaceFactory, Topography1DFactory, Topography2DFactory, UserFactory, \
two_topos, one_line_scan, user_three_topographies_three_surfaces_three_tags
from ..models import Topography, Surface, MAX_LENGTH_DATAFILE_FORMAT, user_directory_path
from ..forms import TopographyForm, TopographyWizardUnitsForm, SurfaceForm
from topobank.utils import assert_in_content, \
assert_redirects, assert_no_form_errors, assert_form_error
import SurfaceTopography.IO # for mocking
#######################################################################
# Selections
#######################################################################
@pytest.mark.django_db
def test_empty_surface_selection(client, handle_usage_statistics):
#
# database objects
#
user = UserFactory()
surface = SurfaceFactory(creator=user)
assert surface.topography_set.count() == 0
client.force_login(user)
client.post(reverse('manager:surface-select', kwargs=dict(pk=surface.pk)))
#
# Now the selection should contain one empty surface
#
assert client.session['selection'] == [f'surface-{surface.pk}']
@pytest.mark.django_db
def test_download_selection(client, mocker, handle_usage_statistics):
record_mock = mocker.patch('trackstats.models.StatisticByDateAndObject.objects.record')
user = UserFactory()
surface1 = SurfaceFactory(creator=user)
surface2 = SurfaceFactory(creator=user)
topo1a = Topography1DFactory(surface=surface1)
topo1b = Topography2DFactory(surface=surface1)
topo2a = Topography1DFactory(surface=surface2)
factory = APIRequestFactory()
request = factory.get(reverse('manager:download-selection'))
request.user = user
request.session = {
'selection': [f'topography-{topo1a.id}', f'surface-{surface2.id}']
}
from ..views import download_selection_as_surfaces
response = download_selection_as_surfaces(request)
assert response.status_code == 200
# open zip file and look into meta file, there should be two surfaces and three topographies
with zipfile.ZipFile(BytesIO(response.content)) as zf:
meta_file = zf.open('meta.yml')
meta = yaml.safe_load(meta_file)
assert len(meta['surfaces']) == 2
assert len(meta['surfaces'][0]['topographies']) == 2
assert len(meta['surfaces'][1]['topographies']) == 1
# each downloaded surface is counted once
metric = Metric.objects.SURFACE_DOWNLOAD_COUNT
today = datetime.date.today()
record_mock.assert_any_call(metric=metric, object=surface1, period=Period.DAY,
value=1, date=today)
record_mock.assert_any_call(metric=metric, object=surface2, period=Period.DAY,
value=1, date=today)
#######################################################################
# Topographies
#######################################################################
#
# Different formats are handled by SurfaceTopography
# and should be tested there in general, but
# we add some tests for formats which had problems because
# of the topobank code
#
@pytest.mark.django_db
def test_upload_topography_di(client, handle_usage_statistics):
input_file_path = Path(FIXTURE_DIR + '/example3.di') # maybe use package 'pytest-datafiles' here instead
description = "test description"
category = 'exp'
user = UserFactory()
client.force_login(user)
# first create a surface
response = client.post(reverse('manager:surface-create'),
data={
'name': 'surface1',
'creator': user.id,
'category': category,
}, follow=True)
assert_no_form_errors(response)
assert response.status_code == 200
surface = Surface.objects.get(name='surface1')
#
# open first step of wizard: file upload
#
with open(str(input_file_path), mode='rb') as fp:
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'upload',
'upload-datafile': fp,
'upload-datafile_format': '',
'upload-surface': surface.id,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
#
# check contents of second page
#
# now we should be on the page with second step
assert_in_content(response, "Step 2 of 3")
# we should have two datasources as options, "ZSensor" and "Height"
assert_in_content(response, '<option value="0">ZSensor</option>')
assert_in_content(response, '<option value="3">Height</option>')
assert response.context['form'].initial['name'] == 'example3.di'
#
# Send data for second page
#
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'metadata',
'metadata-name': 'topo1',
'metadata-measurement_date': '2018-06-21',
'metadata-data_source': 0,
'metadata-description': description,
})
assert response.status_code == 200
assert_no_form_errors(response)
assert_in_content(response, "Step 3 of 3")
#
# Send data for third page
#
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'units',
'units-size_x': '9000',
'units-size_y': '9000',
'units-unit': 'nm',
'units-height_scale': 0.3,
'units-detrend_mode': 'height',
'units-resolution_x': 256,
'units-resolution_y': 256,
'units-instrument_type': Topography.INSTRUMENT_TYPE_UNDEFINED,
'units-fill_undefined_data_mode': Topography.FILL_UNDEFINED_DATA_MODE_NOFILLING,
}, follow=True)
assert response.status_code == 200
# assert reverse('manager:topography-detail', kwargs=dict(pk=1)) == response.url
# export_reponse_as_html(response)
assert 'form' not in response.context, "Errors:" + str(response.context['form'].errors)
surface = Surface.objects.get(name='surface1')
topos = surface.topography_set.all()
assert len(topos) == 1
t = topos[0]
assert t.measurement_date == datetime.date(2018, 6, 21)
assert t.description == description
assert "example3" in t.datafile.name
assert 256 == t.resolution_x
assert 256 == t.resolution_y
assert t.creator == user
assert t.datafile_format == 'di'
@pytest.mark.django_db
def test_upload_topography_npy(client):
user = UserFactory()
surface = SurfaceFactory(creator=user, name="surface1")
description = "Some description"
client.force_login(user)
#
# open first step of wizard: file upload
#
input_file_path = Path(FIXTURE_DIR + '/example-2d.npy') # maybe use package 'pytest-datafiles' here instead
with open(str(input_file_path), mode='rb') as fp:
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'upload',
'upload-datafile': fp,
'upload-datafile_format': '',
'upload-surface': surface.id,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
#
# now we should be on the page with second step
#
assert_in_content(response, "Step 2 of 3")
assert_in_content(response, '<option value="0">Default</option>')
assert response.context['form'].initial['name'] == 'example-2d.npy'
#
# Send data for second page
#
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'metadata',
'metadata-name': 'topo1',
'metadata-measurement_date': '2020-10-21',
'metadata-data_source': 0,
'metadata-description': description,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
assert_in_content(response, "Step 3 of 3")
#
# Send data for third page
#
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'units',
'units-size_x': '1',
'units-size_y': '1',
'units-unit': 'nm',
'units-height_scale': 1,
'units-detrend_mode': 'height',
'units-resolution_x': 2,
'units-resolution_y': 2,
'units-instrument_type': Topography.INSTRUMENT_TYPE_UNDEFINED,
'units-fill_undefined_data_mode': Topography.FILL_UNDEFINED_DATA_MODE_NOFILLING,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
surface = Surface.objects.get(name='surface1')
topos = surface.topography_set.all()
assert len(topos) == 1
t = topos[0]
assert t.measurement_date == datetime.date(2020, 10, 21)
assert t.description == description
assert "example-2d" in t.datafile.name
assert 2 == t.resolution_x
assert 2 == t.resolution_y
assert t.creator == user
assert t.datafile_format == 'npy'
@pytest.mark.parametrize(("input_filename", "exp_datafile_format",
"exp_resolution_x", "exp_resolution_y",
"physical_sizes_to_be_set", "exp_physical_sizes"),
[(FIXTURE_DIR + "/10x10.txt", 'asc', 10, 10, (1, 1), (1, 1)),
(FIXTURE_DIR + "/line_scan_1.asc", 'xyz', 11, None, None, (9.0,)),
(FIXTURE_DIR + "/line_scan_1_minimal_spaces.asc", 'xyz', 11, None, None, (9.0,)),
(FIXTURE_DIR + "/example6.txt", 'asc', 10, None, (1.,), (1.,))])
# Add this for a larger file: ("topobank/manager/fixtures/500x500_random.txt", 500)]) # takes quire long
@pytest.mark.django_db
def test_upload_topography_txt(client, django_user_model, input_filename,
exp_datafile_format,
exp_resolution_x, exp_resolution_y,
physical_sizes_to_be_set, exp_physical_sizes,
handle_usage_statistics):
input_file_path = Path(input_filename)
expected_toponame = input_file_path.name
description = "test description"
username = 'testuser'
password = '<PASSWORD>'
user = django_user_model.objects.create_user(username=username, password=password)
assert client.login(username=username, password=password)
# first create a surface
response = client.post(reverse('manager:surface-create'),
data={
'name': 'surface1',
'creator': user.id,
'category': 'sim'
}, follow=True)
assert_no_form_errors(response)
assert response.status_code == 200
surface = Surface.objects.get(name='surface1')
#
# open first step of wizard: file upload
#
with input_file_path.open(mode='rb') as fp:
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'upload',
'upload-datafile': fp,
'upload-surface': surface.id,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
#
# check contents of second page
#
# now we should be on the page with second step
assert b"Step 2 of 3" in response.content, "Errors:" + str(response.context['form'].errors)
assert_in_content(response, '<option value="0">Default</option>')
assert response.context['form'].initial['name'] == expected_toponame
#
# Send data for second page
#
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'metadata',
'metadata-name': 'topo1',
'metadata-measurement_date': '2018-06-21',
'metadata-data_source': 0,
'metadata-description': description,
})
assert response.status_code == 200
assert_no_form_errors(response)
assert_in_content(response, "Step 3 of 3")
assert_in_content(response, 'Fill undefined data mode')
#
# Send data for third page
#
if exp_resolution_y is None:
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': "units",
'units-size_editable': physical_sizes_to_be_set is not None,
# would be sent when initialize form
'units-size_x': physical_sizes_to_be_set[0] if physical_sizes_to_be_set else '',
'units-unit': 'nm',
'units-height_scale': 1,
'units-detrend_mode': 'height',
'units-resolution_x': exp_resolution_x,
'units-instrument_type': Topography.INSTRUMENT_TYPE_UNDEFINED,
'units-fill_undefined_data_mode': Topography.FILL_UNDEFINED_DATA_MODE_NOFILLING,
}, follow=True)
else:
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': "units",
'units-size_editable': True, # would be sent when initialize form
'units-unit_editable': True, # would be sent when initialize form
'units-size_x': physical_sizes_to_be_set[0],
'units-size_y': physical_sizes_to_be_set[1],
'units-unit': 'nm',
'units-height_scale': 1,
'units-detrend_mode': 'height',
'units-resolution_x': exp_resolution_x,
'units-resolution_y': exp_resolution_y,
'units-instrument_type': Topography.INSTRUMENT_TYPE_UNDEFINED,
'units-fill_undefined_data_mode': Topography.FILL_UNDEFINED_DATA_MODE_NOFILLING,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
surface = Surface.objects.get(name='surface1')
topos = surface.topography_set.all()
assert len(topos) == 1
t = topos[0]
assert t.measurement_date == datetime.date(2018, 6, 21)
assert t.description == description
assert input_file_path.stem in t.datafile.name
assert exp_resolution_x == t.resolution_x
assert exp_resolution_y == t.resolution_y
assert t.datafile_format == exp_datafile_format
assert t.instrument_type == Topography.INSTRUMENT_TYPE_UNDEFINED
assert t.instrument_parameters == {}
#
# Also check some properties of the SurfaceTopography.Topography
#
st_topo = t.topography(allow_cache=False, allow_squeezed=False)
assert st_topo.physical_sizes == exp_physical_sizes
@pytest.mark.parametrize("instrument_type,resolution_value,resolution_unit,tip_radius_value,tip_radius_unit",
[
(Topography.INSTRUMENT_TYPE_UNDEFINED, '', '', '', ''), # empty instrument params
(Topography.INSTRUMENT_TYPE_UNDEFINED, 10.0, 'km', 2.0, 'nm'), # also empty params
(Topography.INSTRUMENT_TYPE_MICROSCOPE_BASED, 10.0, 'nm', '', ''),
(Topography.INSTRUMENT_TYPE_MICROSCOPE_BASED, '', 'nm', '', ''), # no value! -> also empty
(Topography.INSTRUMENT_TYPE_CONTACT_BASED, '', '', 1.0, 'mm'),
(Topography.INSTRUMENT_TYPE_CONTACT_BASED, '', '', '', 'mm'), # no value! -> also empty
])
@pytest.mark.django_db
def test_upload_topography_instrument_parameters(client, django_user_model,
instrument_type, resolution_value,
resolution_unit, tip_radius_value, tip_radius_unit,
handle_usage_statistics):
input_file_path = Path(FIXTURE_DIR + "/10x10.txt")
expected_toponame = input_file_path.name
description = "test description"
username = 'testuser'
password = '<PASSWORD>'
instrument_name = "My Profilometer"
user = django_user_model.objects.create_user(username=username, password=password)
assert client.login(username=username, password=password)
# first create a surface
response = client.post(reverse('manager:surface-create'),
data={
'name': 'surface1',
'creator': user.id,
'category': 'sim'
}, follow=True)
assert_no_form_errors(response) # it should be allowed to leave out values within instrument
assert response.status_code == 200
surface = Surface.objects.get(name='surface1')
#
# open first step of wizard: file upload
#
with input_file_path.open(mode='rb') as fp:
response = client.post(reverse('manager:topography-create',
kwargs=dict(surface_id=surface.id)),
data={
'topography_create_wizard-current_step': 'upload',
'upload-datafile': fp,
'upload-surface': surface.id,
}, follow=True)
assert response.status_code == 200
assert_no_form_errors(response)
#
# check contents of second page
#
# now we should be on the page with second step
assert b"Step 2 of 3" in response.content, "Errors:" + str(response.context['form'].errors)
assert_in_content(response, '<option value="0">Default</option>')
assert response.context['form'].initial['name'] == expected_toponame
#
| |
int16/10, int16) broadcast;
setLanded() airecv clsend;
requestLeave() airecv clsend;
};
dclass DistributedLawbotChair : DistributedObject {
setBossCogId(uint32) required broadcast ram;
setIndex(uint8) required broadcast ram;
setState(char) broadcast ram;
showCogJurorFlying() broadcast;
setToonJurorIndex(int8) broadcast ram;
};
dclass DistributedLawnDecor : DistributedNode {
setPlot(int8) required broadcast ram;
setHeading(int16/10) required broadcast ram;
setOwnerIndex(int8) required broadcast ram;
setPosition(int16/10, int16/10, int16/10) required broadcast ram;
plotEntered() airecv clsend;
removeItem() airecv clsend;
setMovie(uint8, uint32) broadcast ram;
movieDone() airecv clsend;
interactionDenied(uint32) broadcast ram;
setBoxDoId(uint32, uint8) broadcast ram;
};
dclass DistributedGardenPlot : DistributedLawnDecor {
plantFlower(uint8, uint8) airecv clsend;
plantGagTree(uint8, uint8) airecv clsend;
plantStatuary(uint8) airecv clsend;
plantToonStatuary(uint8, uint16) airecv clsend;
plantNothing(uint8) airecv clsend;
};
dclass DistributedGardenBox : DistributedLawnDecor {
setTypeIndex(uint8) required broadcast ram;
};
dclass DistributedStatuary : DistributedLawnDecor {
setTypeIndex(uint8) required broadcast ram;
setWaterLevel(int8) required broadcast ram;
setGrowthLevel(int8) required broadcast ram;
};
dclass DistributedToonStatuary : DistributedStatuary {
setOptional(uint16) required broadcast ram;
};
dclass DistributedAnimatedStatuary : DistributedStatuary {
};
dclass DistributedChangingStatuary : DistributedStatuary {
setGrowthLevel(int8) required broadcast ram;
};
dclass DistributedPlantBase : DistributedLawnDecor {
setTypeIndex(uint8) required broadcast ram;
setWaterLevel(int8) required broadcast ram;
setGrowthLevel(int8) required broadcast ram;
waterPlant() airecv clsend;
waterPlantDone() airecv clsend;
};
dclass DistributedFlower : DistributedPlantBase {
setTypeIndex(uint8) required broadcast ram;
setVariety(uint8) required broadcast ram;
};
dclass DistributedGagTree : DistributedPlantBase {
setWilted(int8) required broadcast ram;
requestHarvest() airecv clsend;
setFruiting(bool) required broadcast ram;
};
dclass DistributedTravelGame : DistributedMinigame {
setTimerStartTime(int16) broadcast;
setAvatarChoice(uint16, uint8) airecv clsend;
setAvatarVotes(uint32, uint16) broadcast;
setAvatarChose(uint32) broadcast;
setServerChoices(int16[], uint8[], uint8, uint8) broadcast;
setMinigames(uint8[], uint8[]) broadcast;
setBonuses(uint8[], uint8[]) broadcast;
setBoardIndex(uint8) required broadcast ram;
};
dclass DistributedPairingGame : DistributedMinigame {
setDeckSeed(uint32) required broadcast ram;
setMaxOpenCards(uint8) broadcast ram;
openCardRequest(int16, int16) airecv clsend;
openCardResult(int16, uint32, int16, int8, int16[]) broadcast;
reportDone() airecv clsend;
setEveryoneDone() broadcast;
setSignaling(uint32) clsend broadcast;
};
struct golfData {
int16 frame;
int32/100000 x;
int32/100000 y;
int32/100000 z;
};
struct Coord3 {
int32/100000 x;
int32/100000 y;
int32/100000 z;
};
struct CommonObjectData {
uint8 id;
uint8 type;
int32/100000 x;
int32/100000 y;
int32/100000 z;
int32/100000 q1;
int32/100000 q2;
int32/100000 q3;
int32/100000 q4;
int32/100000 aVX;
int32/100000 aVY;
int32/100000 aVZ;
int32/100000 lVX;
int32/100000 lVY;
int32/100000 lVZ;
};
dclass DistributedPhysicsWorld : DistributedObject {
clientCommonObject(uint8, uint8, Coord3, Coord3, int32/100, int32/100, int32/1000) broadcast ram;
setCommonObjects(CommonObjectData []) broadcast;
upSetCommonObjects(CommonObjectData []) airecv clsend;
};
dclass DistributedGolfHole : DistributedPhysicsWorld {
setHoleId(int8) broadcast ram required;
setTimingCycleLength(uint32/1000) broadcast ram required;
setAvatarReadyHole() airecv clsend;
setGolfCourseDoId(uint32) broadcast ram required;
turnDone() airecv clsend;
ballInHole() airecv clsend;
setAvatarTempTee(uint32, uint8) clsend broadcast;
setTempAimHeading(uint32, int32/1000) clsend broadcast;
setAvatarFinalTee(uint32, uint8) broadcast;
setGolferIds(uint32[]) broadcast ram required;
golfersTurn(uint32) broadcast;
golferChooseTee(uint32) broadcast;
setAvatarTee(uint8) airecv clsend;
postSwing(uint32/1000, int32, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000) airecv clsend;
postSwingState(uint32/1000, int32, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, uint16/100, CommonObjectData []) airecv clsend;
swing(uint32, int32, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000) broadcast;
ballMovie2AI(uint32/1000, uint32, golfData [], golfData [], uint16, uint16, uint16, CommonObjectData []) airecv clsend;
ballMovie2Client(uint32/1000, uint32, golfData [], golfData [], uint16, uint16, uint16, CommonObjectData []) broadcast;
assignRecordSwing(uint32, uint32/1000, int32, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, CommonObjectData []);
setBox(int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000) airecv clsend;
sendBox(int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000, int32/1000) broadcast;
};
dclass DistributedGolfCourse : DistributedObject {
setGolferIds(uint32[]) broadcast ram required;
setCourseId(int8) broadcast ram required;
setAvatarJoined() airecv clsend;
setAvatarReadyCourse() airecv clsend;
setAvatarReadyHole() airecv clsend;
setAvatarExited() airecv clsend;
setCurHoleIndex(int8) broadcast ram required;
setCurHoleDoId(uint32) broadcast ram required;
setDoneReward() airecv clsend;
setReward(uint8[] [], int8[], uint8[] [], uint8[] [], uint8[] [], uint32, uint32/100, uint32/100, uint32/100, uint32/100) broadcast;
setCourseReady(int8, int16[], int8) broadcast;
setHoleStart(int16) broadcast;
setCourseExit() broadcast;
setCourseAbort(uint32) broadcast;
setPlayHole() broadcast;
avExited(uint32) broadcast;
setScores(int16 []) broadcast;
changeDrivePermission(uint32, int8) broadcast;
};
dclass DistributedVineGame : DistributedMinigame {
reachedEndVine(int8) clsend airecv;
setNewVine(uint32, int8, uint32/10000, int8) airecv clsend broadcast;
setNewVineT(uint32, uint32/10000, int8) clsend broadcast;
setJumpingFromVine(uint32, int8, int8, int32/100, int16/100, int16/100, int16) clsend broadcast;
claimTreasure(uint32) airecv clsend;
setTreasureGrabbed(uint32, uint32) broadcast;
setScore(uint32, uint32) broadcast;
allAtEndVine() broadcast;
setFallingFromVine(uint32, int8, int8, int32/100, int16/100, int16/100, int16, int8) clsend broadcast;
setFallingFromMidair(uint32, int8, int32/100, int16/100, int16/100, int16, int8) clsend broadcast;
setVineSections(uint8[]) required broadcast ram;
};
dclass TTAvatarFriendsManager : AvatarFriendsManager {
};
dclass TTPlayerFriendsManager : PlayerFriendsManager {
};
dclass TTSpeedchatRelay : SpeedchatRelay {
};
dclass DistributedGolfKart : DistributedObject {
setState(string, int16) broadcast ram;
fillSlot0(uint32) broadcast ram;
fillSlot1(uint32) broadcast ram;
fillSlot2(uint32) broadcast ram;
fillSlot3(uint32) broadcast ram;
emptySlot0(uint32, int16) broadcast ram;
emptySlot1(uint32, int16) broadcast ram;
emptySlot2(uint32, int16) broadcast ram;
emptySlot3(uint32, int16) broadcast ram;
requestBoard() airecv clsend;
rejectBoard(uint32);
requestExit() airecv clsend;
setMinigameZone(uint32, uint16);
setGolfZone(uint32, uint16);
setGolfCourse(int8) required broadcast ram;
setPosHpr(int16/10, int16/10, int16/10, int16/10, int16/10, int16/10) required broadcast ram;
setColor(int16, int16, int16) required broadcast ram;
};
dclass DistributedTimer : DistributedObject {
setStartTime(int32) broadcast ram required;
};
dclass DistributedPicnicBasket : DistributedObject {
setState(string, uint16, int16) broadcast ram;
fillSlot0(uint32) broadcast ram;
fillSlot1(uint32) broadcast ram;
fillSlot2(uint32) broadcast ram;
fillSlot3(uint32) broadcast ram;
emptySlot0(uint32, int16) broadcast ram;
emptySlot1(uint32, int16) broadcast ram;
emptySlot2(uint32, int16) broadcast ram;
emptySlot3(uint32, int16) broadcast ram;
requestBoard(int16) airecv clsend;
rejectBoard(uint32);
requestExit() airecv clsend;
doneExit() airecv clsend;
setMinigameZone(uint32, uint16);
setPicnicDone();
setPosHpr(int16/10, int16/10, int16/10, int16/10, int16/10, int16/10) required broadcast ram;
setTableNumber(int16) required broadcast ram;
};
dclass DistributedGameTable : DistributedObject {
requestJoin(uint8) airecv clsend;
rejectJoin();
requestExit() airecv clsend;
fillSlot0(uint32) broadcast ram;
fillSlot1(uint32) broadcast ram;
fillSlot2(uint32) broadcast ram;
fillSlot3(uint32) broadcast ram;
fillSlot4(uint32) broadcast ram;
fillSlot5(uint32) broadcast ram;
emptySlot0(uint32, int16) broadcast ram;
emptySlot1(uint32, int16) broadcast ram;
emptySlot2(uint32, int16) broadcast ram;
emptySlot3(uint32, int16) broadcast ram;
emptySlot4(uint32, int16) broadcast ram;
emptySlot5(uint32, int16) broadcast ram;
setPosHpr(int16/10, int16/10, int16/10, int16/10, int16/10, int16/10) required broadcast ram;
announceWinner(uint32) broadcast;
};
dclass DistributedBossbotBoss : DistributedBossCog {
setState(string) broadcast ram;
setBattleDifficulty(uint8) broadcast ram;
requestGetFood(int8, int8, uint32) airecv clsend;
toonGotFood(uint32, int8, int8, uint32) broadcast;
requestServeFood(int8, int8) airecv clsend;
toonServeFood(uint32, int8, int8) broadcast;
hitBoss(uint8) airecv clsend;
hitToon(uint32) airecv clsend;
ballHitBoss(uint8) airecv clsend;
setMaxHp(uint16) broadcast ram;
setBossDamage(uint16, uint8, int16) broadcast ram;
setSpeedDamage(uint16, uint8, int16) broadcast ram;
reachedTable(uint8) airecv clsend;
hitTable(uint8) airecv clsend;
awayFromTable(uint8) airecv clsend;
toonGotHealed(uint32) broadcast;
requestGetToonup(int8, int8, uint32) airecv clsend;
toonGotToonup(uint32, int8, int8, uint32) broadcast;
};
dclass DistributedCogKart : DistributedElevatorExt {
setCountryClubId(uint16) required broadcast ram;
setPosHpr(int16/10, int16/10, int16/10, int16/10, int16/10, int16/10) required broadcast ram;
setCountryClubInteriorZone(uint32);
setCountryClubInteriorZoneForce(uint32);
};
dclass DistributedCountryClub : DistributedObject {
setZoneId(uint32) required broadcast ram;
setBlockedRooms(uint8[]) required broadcast ram;
setCountryClubId(uint16) required broadcast ram;
setLayoutIndex(uint16) required broadcast ram;
setFloorNum(uint8) required broadcast ram;
setRoomDoIds(uint32[]) broadcast ram;
setCountryClubZone(uint32) broadcast ram;
elevatorAlert(uint32) broadcast ram;
};
dclass DistributedCountryClubRoom : DistributedLevel {
setCountryClubId(uint16) required broadcast ram;
setRoomId(uint16) required broadcast ram;
setRoomNum(uint8) required broadcast ram;
setSuits(uint32[], uint32[]) broadcast ram;
setBossConfronted(uint32) broadcast ram;
setDefeated() broadcast ram;
forceOuch(uint8) broadcast;
};
dclass DistributedMoleField : DistributedEntity {
setGameStart(int16, uint8, uint16) broadcast;
setClientTriggered() airecv clsend;
whackedMole(int8, int16) airecv clsend;
whackedBomb(int8, int16, int32) airecv clsend;
updateMole(int8, int8) broadcast;
reportToonHitByBomb(uint32, int8, int32) broadcast;
setScore(int16) required broadcast ram;
damageMe() airecv clsend;
setPityWin() broadcast;
};
dclass DistributedCountryClubBattle : DistributedLevelBattle {
};
dclass DistributedClubElevator : DistributedElevatorFSM {
setFloor(int8) broadcast ram;
setLocked(uint16) required broadcast ram;
setEntering(uint16) required broadcast ram;
kickToonsOut() broadcast;
setLatch(uint32) required broadcast ram;
};
dclass DistributedMaze : DistributedEntity {
setRoomDoId(uint32) required broadcast ram;
setGameStart(int16) broadcast;
setClientTriggered() airecv clsend;
setFinishedMaze() airecv clsend;
setGameOver() broadcast;
toonFinished(uint32, uint8, uint8) broadcast;
damageMe() airecv clsend;
};
dclass DistributedBattleWaiters : DistributedBattleFinal {
};
dclass DistributedFoodBelt : DistributedObject {
setBossCogId(uint32) required broadcast ram;
setIndex(uint8) required broadcast ram;
setState(char) broadcast ram;
};
dclass DistributedBanquetTable : DistributedObject {
setIndex(uint8) required broadcast ram;
setNumDiners(uint8) required broadcast ram;
setBossCogId(uint32) required broadcast ram;
setDinerInfo(uint8[], uint8[], uint8[], char[]) required broadcast ram;
setState(char, uint32, int8) broadcast ram;
setDinerStatus(uint8, uint8) broadcast;
requestControl() airecv clsend;
requestFree(int8) airecv clsend;
setPitcherPos(uint8, uint16%360/100, int16) broadcast clsend;
clearSmoothing(int8) broadcast clsend;
firingWater(int32/100, int32/100, int32/100, int32/100, int32/100, int32/100) broadcast clsend;
waterHitBoss(uint8) broadcast clsend;
};
dclass DistributedBattleDiners : DistributedBattleFinal {
};
dclass DistributedGolfSpot : DistributedObject {
setIndex(uint8) required broadcast ram;
setBossCogId(uint32) required broadcast ram;
setState(char, uint32, int8) broadcast ram;
setGoingToReward() broadcast ram;
requestControl() airecv clsend;
requestFree(int8) airecv clsend;
setGolfSpotPos(uint8, uint16%360/100, int16) broadcast clsend;
clearSmoothing(int8) broadcast clsend;
setSwingInfo(uint8, int16/10, uint8) broadcast clsend;
};
struct TireInput {
int32/100 force;
int32/100 heading;
};
dclass DistributedIceGame : DistributedMinigame {
setForceArrowInfo(uint32, int32/100, int32/100) broadcast clsend;
setAvatarChoice(int32/100, int32/100) airecv clsend;
endingPositions(Coord3 []) airecv clsend;
reportScoringMovieDone() airecv clsend;
claimTreasure(uint8) airecv clsend;
claimPenalty(uint8) airecv clsend;
setTireInputs(TireInput []) broadcast;
setTimerStartTime(int16) broadcast;
setFinalPositions(Coord3 []) broadcast;
setMatchAndRound(int8, int8) broadcast;
setScores(int8, int8, int16[]) broadcast;
setNewState(string) broadcast;
setTreasureGrabbed(uint32, uint32) broadcast;
setPenaltyGrabbed(uint32, uint32) broadcast;
};
dclass DistributedCogThiefGame : DistributedMinigame {
throwingPie(uint32, int32, int32/100, int32/100, int32/100, int32/100) clsend broadcast;
hitBySuit(uint32, int32, int8, int32/100, int32/100, int32/100) clsend broadcast airecv;
pieHitSuit(uint32, int32, int8, int32/100, int32/100, int32/100) clsend broadcast airecv;
cogHitBarrel(int32, int8, int8, int32/100, int32/100, int32/100) clsend airecv;
cogAtReturnPos(int32, int8, int8) clsend airecv;
updateSuitGoal(int32, int32, int8, int8, int64, int32/100, int32/100, int32/100) broadcast;
makeCogCarryBarrel(int32, int32, int8, int8, int32/100, int32/100, int32/100) broadcast;
makeCogDropBarrel(int32, int32, int8, int8, int32/100, int32/100, int32/100) broadcast;
markBarrelStolen(int32, int32, | |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.7.2
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class OrganizationsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_organization(self, body, **kwargs): # noqa: E501
"""Create organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_organization(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Organization body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_organization_with_http_info(body, **kwargs) # noqa: E501
def create_organization_with_http_info(self, body, **kwargs): # noqa: E501
"""Create organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_organization_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Organization body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Organization, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_organization" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_organization`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/create', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Organization', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_organization_member(self, owner, body, **kwargs): # noqa: E501
"""Create organization member # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_organization_member(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1OrganizationMember body: Organization body (required)
:param str email: Optional email.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1OrganizationMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_organization_member_with_http_info(owner, body, **kwargs) # noqa: E501
def create_organization_member_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Create organization member # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_organization_member_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1OrganizationMember body: Organization body (required)
:param str email: Optional email.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1OrganizationMember, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body',
'email'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_organization_member" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_organization_member`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_organization_member`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'email' in local_var_params and local_var_params['email'] is not None: # noqa: E501
query_params.append(('email', local_var_params['email'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/members', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1OrganizationMember', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_organization(self, owner, **kwargs): # noqa: E501
"""Delete organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_organization(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str usage: Owner usage query param.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_organization_with_http_info(owner, **kwargs) # noqa: E501
def delete_organization_with_http_info(self, owner, **kwargs): # noqa: E501
"""Delete organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_organization_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str usage: Owner usage query param.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: | |
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
from torch.nn import init
from torchvision import models
from torch.nn.utils import spectral_norm
# Initialization of model
def weights_init_normal(m: nn.Module):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net: nn.Module):
net.apply(weights_init_normal)
# AdaIN modules
def calc_mean_std(feat: torch.Tensor,
eps=1e-5) -> (torch.Tensor, torch.Tensor):
size = feat.size()
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def adain(content_feat: torch.Tensor,
style_feat: torch.Tensor) -> torch.Tensor:
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def adain_linear(content_feat: torch.Tensor,
style_feat: torch.Tensor,
sep_dim: int) -> torch.Tensor:
size = content_feat.size()
style_mean, style_std = style_feat[:, :sep_dim], style_feat[:, sep_dim:]
style_mean = style_mean.unsqueeze(2).unsqueeze(3)
style_std = style_std.unsqueeze(2).unsqueeze(3)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
class Vgg19(nn.Module):
def __init__(self, requires_grad=False, layer=None):
super(Vgg19, self).__init__()
self.layer = layer
vgg_pretrained_features = models.vgg19(pretrained=True).features
if layer == 'four':
self.slice = nn.Sequential()
for x in range(27):
self.slice.add_module(str(x), vgg_pretrained_features[x])
elif layer == 'five':
self.slice = nn.Sequential()
for x in range(30):
self.slice.add_module(str(x), vgg_pretrained_features[x])
else:
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 36):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.layer == 'four':
h = self.slice(x)
elif self.layer == 'five':
h = self.slice(x)
else:
h_relu1 = self.slice1(x)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h = self.slice5(h_relu4)
return h
# Basic components of generator and discriminator
class CBR(nn.Module):
def __init__(self,
in_ch: int,
out_ch: int,
kernel: int,
stride: int,
pad: int,
up=False,
norm="in",
activ="lrelu",
sn=False):
super(CBR, self).__init__()
modules = []
modules = self._preprocess(modules, up)
modules = self._conv(modules, in_ch, out_ch, kernel, stride, pad, sn)
modules = self._norm(modules, norm, out_ch)
modules = self._activ(modules, activ)
self.cbr = nn.ModuleList(modules)
@staticmethod
def _preprocess(modules: List, up: bool) -> List:
if up:
modules.append(nn.Upsample(scale_factor=2, mode="bilinear"))
return modules
@staticmethod
def _conv(modules: List,
in_ch: int,
out_ch: int,
kernel: int,
stride: int,
pad: int,
sn: bool) -> List:
if sn:
modules.append(spectral_norm(nn.Conv2d(in_ch, out_ch, kernel, stride, pad)))
else:
modules.append(nn.Conv2d(in_ch, out_ch, kernel, stride, pad))
return modules
@staticmethod
def _norm(modules: List,
norm: str,
out_ch: int) -> List:
if norm == "bn":
modules.append(nn.BatchNorm2d(out_ch))
elif norm == "in":
modules.append(nn.InstanceNorm2d(out_ch))
return modules
@staticmethod
def _activ(modules: List, activ: str) -> List:
if activ == "relu":
modules.append(nn.ReLU())
elif activ == "lrelu":
modules.append(nn.LeakyReLU())
return modules
def forward(self, x: torch.Tensor) -> torch.Tensor:
for layer in self.cbr:
x = layer(x)
return x
class ResBlock(nn.Module):
def __init__(self,
in_ch: int,
out_ch: int):
super(ResBlock, self).__init__()
self.res = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, 1, 1),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, 1, 1),
nn.InstanceNorm2d(out_ch)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.res(x) + x
class AdaINResBlock(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(AdaINResBlock, self).__init__()
self.c0 = nn.Conv2d(in_ch, out_ch, 3, 1, 1)
self.c1 = nn.Conv2d(out_ch, out_ch, 3, 1, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self,
x: torch.Tensor,
z: torch.Tensor) -> torch.Tensor:
h = self.c0(x)
h = self.relu(adain(h, z))
h = self.c1(h)
h = self.relu(adain(h, z))
return h + x
class AdaINMLPResBlock(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(AdaINMLPResBlock, self).__init__()
self.c0 = nn.Conv2d(in_ch, out_ch, 3, 1, 1)
self.c1 = nn.Conv2d(out_ch, out_ch, 3, 1, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self,
x: torch.Tensor,
z: torch.Tensor) -> torch.Tensor:
h = self.c0(x)
h = self.relu(adain_linear(h, z))
h = self.c1(h)
h = self.relu(adain_linear(h, z))
return h + x
class SACat(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(SACat, self).__init__()
self.c0 = nn.Conv2d(in_ch*2, out_ch, 1, 1, 0)
self.c1 = nn.Conv2d(out_ch, out_ch, 1, 1, 0)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self,
x: torch.Tensor,
extractor: torch.Tensor) -> torch.Tensor:
h = self.relu(self.c0(torch.cat([x, extractor], dim=1)))
h = self.sigmoid(self.c1(h))
return h
class SECat(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(SECat, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.se = nn.Sequential(
nn.Linear(in_ch*2, out_ch, bias=False),
nn.ReLU(),
nn.Linear(out_ch, in_ch),
nn.Sigmoid()
)
def forward(self,
x: torch.Tensor,
extractor: torch.Tensor) -> torch.Tensor:
batch, ch = x.size(0), x.size(1)
x_pool = self.avg_pool(x).view(batch, ch)
extractor = self.avg_pool(extractor).view(batch, ch)
h = self.se(torch.cat([x_pool, extractor], dim=1)).view(batch, ch, 1, 1)
return h.expand_as(x)
class SACatResBlock(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(SACatResBlock, self).__init__()
self.c0 = nn.Conv2d(in_ch, out_ch, 3, 1, 1)
self.bn0 = nn.InstanceNorm2d(out_ch)
self.sa = SACat(out_ch, out_ch)
self.relu = nn.ReLU()
def forward(self,
x: torch.Tensor,
extractor: torch.Tensor) -> torch.Tensor:
h = self.relu(self.bn0(self.c0(x)))
h = h * self.sa(h, extractor)
return h + x
class SECatResBlock(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(SECatResBlock, self).__init__()
self.cbr = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, 1, 1),
nn.BatchNorm2d(out_ch),
nn.ReLU()
)
self.se = SECat(out_ch, int(out_ch/16))
def forward(self,
x: torch.Tensor,
extracotr: torch.Tensor) -> torch.Tensor:
h = self.cbr(x)
h = h * self.se(h, extracotr)
return h + x
# Main components
class ContentEncoder(nn.Module):
def __init__(self,
in_ch=3,
base=64):
super(ContentEncoder, self).__init__()
self.encoder = self._make_encoder(in_ch, base)
self.res = nn.Sequential(
ResBlock(base*8, base*8),
ResBlock(base*8, base*8)
)
@staticmethod
def _make_encoder(in_ch: int, base: int):
modules = []
modules.append(CBR(in_ch, base, 7, 1, 3))
modules.append(CBR(base, base*2, 4, 2, 1))
modules.append(CBR(base*2, base*4, 4, 2, 1))
modules.append(CBR(base*4, base*8, 4, 2, 1))
modules.append(CBR(base*8, base*8, 4, 2, 1))
modules = nn.ModuleList(modules)
return modules
def forward(self,
x: torch.Tensor) -> (torch.Tensor, List[torch.Tensor]):
mid_layer_list = []
for layer in self.encoder:
x = layer(x)
mid_layer_list.append(x)
h = self.res(x)
return h, mid_layer_list
class StyleEncoderVgg(nn.Module):
def __init__(self):
super(StyleEncoderVgg, self).__init__()
self.vgg = Vgg19(requires_grad=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.vgg(x)
class StyleEncoder(nn.Module):
def __init__(self, base=64):
super(StyleEncoder, self).__init__()
self.enc = nn.Sequential(
CBR(3, base, 7, 1, 3),
CBR(base, base*2, 4, 2, 1),
CBR(base*2, base*4, 4, 2, 1),
CBR(base*4, base*8, 4, 2, 1),
CBR(base*8, base*8, 4, 2, 1),
ResBlock(base*8, base*8),
ResBlock(base*8, base*8)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.enc(x)
class StyleEncoderMLP(nn.Module):
def __init__(self, base=64):
super(StyleEncoderMLP, self).__init__()
self.enc = nn.Sequential(
CBR(3, base, 7, 1, 3),
CBR(base, base*2, 4, 2, 1),
CBR(base*2, base*4, 4, 2, 1),
CBR(base*4, base*8, 4, 2, 1),
CBR(base*8, base*8, 4, 2, 1),
ResBlock(base*8, base*8),
ResBlock(base*8, base*8)
)
self.pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Linear(base*8, base*8),
nn.ReLU(),
nn.Linear(base*8, base*8),
nn.ReLU(),
nn.Linear(base*8, base*16),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self.enc(x)
h = self.pool(h).squeeze(3).squeeze(2)
h = self.mlp(h)
return h
class GuideDecoder(nn.Module):
def __init__(self, base=64):
super(GuideDecoder, self).__init__()
self.decoder = self._make_decoder(base)
self.out_layer = nn.Sequential(
nn.Conv2d(base, 3, 3, 1, 1),
nn.Tanh()
)
@staticmethod
def _make_decoder(base: int):
modules = []
modules.append(CBR(base*8, base*4, 3, 1, 1, up=True))
modules.append(CBR(base*4, base*4, 3, 1, 1, up=True))
modules.append(CBR(base*4, base*2, 3, 1, 1, up=True))
modules.append(CBR(base*2, base, 3, 1, 1, up=True))
modules = nn.ModuleList(modules)
return modules
def forward(self,
x: torch.Tensor) -> torch.Tensor:
for layer in self.decoder:
x = layer(x)
return self.out_layer(x)
class Decoder(nn.Module):
def __init__(self, base=64):
super(Decoder, self).__init__()
self.decoder = self._make_decoder(base)
self.out_layer = nn.Sequential(
nn.Conv2d(base*2, 3, 7, 1, 3),
nn.Tanh()
)
@staticmethod
def _make_decoder(base: int):
modules = []
modules.append(CBR(base*16, base*8, 3, 1, 1, up=True))
modules.append(CBR(base*16, base*4, 3, 1, 1, up=True))
modules.append(CBR(base*8, base*2, 3, 1, 1, up=True))
modules.append(CBR(base*4, base*2, 3, 1, 1, up=True))
modules = nn.ModuleList(modules)
return modules
def forward(self,
x: torch.Tensor,
mid_layer_list: torch.Tensor) -> torch.Tensor:
for index, layer in enumerate(self.decoder):
x = layer(torch.cat([x, mid_layer_list[-index-1]], dim=1))
return self.out_layer(x)
class Generator(nn.Module):
def __init__(self,
in_ch=3,
base=64,
num_layers=4,
attn_type="sa",
guide=False):
super(Generator, self).__init__()
self.ce = ContentEncoder(in_ch=in_ch)
self.se = self._make_style_encoder(attn_type)
self.res = self._make_reslayer(attn_type, base, num_layers)
self.dec = Decoder()
self.guide = guide
init_weights(self.ce)
init_weights(self.se)
init_weights(self.res)
init_weights(self.dec)
if guide:
self.g_dec1 = GuideDecoder()
self.g_dec2 = GuideDecoder()
init_weights(self.g_dec1)
init_weights(self.g_dec2)
@staticmethod
def _make_style_encoder(attn_type: str) -> nn.Module:
if attn_type == "linear":
model = StyleEncoderMLP()
else:
model = StyleEncoderVgg()
return model
@staticmethod
def _make_reslayer(attn_type: str, base: int, num_layers: int):
if attn_type == "adain":
modules = [AdaINResBlock(base*8, base*8) for _ in range(num_layers)]
elif attn_type == "linear":
modules = [AdaINMLPResBlock(base*8, base*8) for _ in range(num_layers)]
elif attn_type == "sa":
modules = [SACatResBlock(base*8, base*8) for _ in range(num_layers)]
elif attn_type == "se":
modules = [SECatResBlock(base*8, base*8) for _ in range(num_layers)]
modules = nn.ModuleList(modules)
return modules
def forward(self,
x: | |
<gh_stars>1-10
import hashlib
from typing import List
from ecc import (
S256Point,
Signature,
)
from helper import (
hash160,
hash256,
)
from timelock import Locktime, Sequence
def is_number_op_code(op_code: bytes) -> bool:
return op_code in OP_CODE_TO_NUMBER
def number_to_op_code(n: int) -> bytes:
'''Returns the op code number for a particular number'''
if NUMBER_TO_OP_CODE.get(n) is None:
raise ValueError(f'No OP code exists for {n}')
return NUMBER_TO_OP_CODE[n]
def op_code_to_number(op_code: bytes) -> int:
'''Returns the n for a particular OP code'''
if OP_CODE_TO_NUMBER.get(op_code) is None:
raise ValueError(f'Not a number OP code: {op_code.hex()}')
return OP_CODE_TO_NUMBER[op_code]
def encode_minimal_num(n: int) -> bytes:
if -1 <= n <= 16:
return number_to_op_code(n)
else:
return encode_num(n)
def decode_minimal_num(n: bytes) -> int:
if is_number_op_code(n):
return op_code_to_number(n)
else:
return decode_num(n)
def encode_num(num: int) -> bytes:
if num == 0:
return OP_0
abs_num = abs(num)
negative = num < 0
result = bytearray()
while abs_num:
result.append(abs_num & 0xff)
abs_num >>= 8
# if the top bit is set,
# for negative numbers we ensure that the top bit is set
# for positive numbers we ensure that the top bit is not set
if result[-1] & 0x80:
if negative:
result.append(0x80)
else:
result.append(0)
elif negative:
result[-1] |= 0x80
return bytes(result)
def decode_num(element: bytes) -> int:
if element == OP_0:
return 0
# reverse for big endian
big_endian = element[::-1]
# top bit being 1 means it's negative
if big_endian[0] & 0x80:
negative = True
result = big_endian[0] & 0x7f
else:
negative = False
result = big_endian[0]
for c in big_endian[1:]:
result <<= 8
result += c
if negative:
return -result
else:
return result
def op_0(stack: List[bytes]) -> bool:
stack.append(encode_num(0))
return True
def op_1negate(stack: List[bytes]) -> bool:
stack.append(encode_num(-1))
return True
def op_1(stack: List[bytes]) -> bool:
stack.append(encode_num(1))
return True
def op_2(stack: List[bytes]) -> bool:
stack.append(encode_num(2))
return True
def op_3(stack: List[bytes]) -> bool:
stack.append(encode_num(3))
return True
def op_4(stack: List[bytes]) -> bool:
stack.append(encode_num(4))
return True
def op_5(stack: List[bytes]) -> bool:
stack.append(encode_num(5))
return True
def op_6(stack: List[bytes]) -> bool:
stack.append(encode_num(6))
return True
def op_7(stack: List[bytes]) -> bool:
stack.append(encode_num(7))
return True
def op_8(stack: List[bytes]) -> bool:
stack.append(encode_num(8))
return True
def op_9(stack: List[bytes]) -> bool:
stack.append(encode_num(9))
return True
def op_10(stack: List[bytes]) -> bool:
stack.append(encode_num(10))
return True
def op_11(stack: List[bytes]) -> bool:
stack.append(encode_num(11))
return True
def op_12(stack: List[bytes]) -> bool:
stack.append(encode_num(12))
return True
def op_13(stack: List[bytes]) -> bool:
stack.append(encode_num(13))
return True
def op_14(stack: List[bytes]) -> bool:
stack.append(encode_num(14))
return True
def op_15(stack: List[bytes]) -> bool:
stack.append(encode_num(15))
return True
def op_16(stack: List[bytes]) -> bool:
stack.append(encode_num(16))
return True
def op_nop(stack: List[bytes]) -> bool:
return True
def op_if(stack: List[bytes], items: List[bytes]) -> bool:
if len(stack) < 1:
return False
# go through and re-make the items array based on the top stack element
true_items = []
false_items = []
current_array = true_items
found = False
num_endifs_needed = 1
while len(items) > 0:
item = items.pop(0)
if item in (OP_IF, OP_NOTIF):
# nested if, we have to go another endif
num_endifs_needed += 1
current_array.append(item)
elif num_endifs_needed == 1 and item == OP_ELSE:
current_array = false_items
elif item == OP_ENDIF:
if num_endifs_needed == 1:
found = True
break
else:
num_endifs_needed -= 1
current_array.append(item)
else:
current_array.append(item)
if not found:
return False
element = stack.pop()
if decode_num(element) == 0:
items[:0] = false_items
else:
items[:0] = true_items
return True
def op_notif(stack: List[bytes], items: List[bytes]) -> bool:
if len(stack) < 1:
return False
# go through and re-make the items array based on the top stack element
true_items = []
false_items = []
current_array = true_items
found = False
num_endifs_needed = 1
while len(items) > 0:
item = items.pop(0)
if item in (99, 100):
# nested if, we have to go another endif
num_endifs_needed += 1
current_array.append(item)
elif num_endifs_needed == 1 and item == 103:
current_array = false_items
elif item == 104:
if num_endifs_needed == 1:
found = True
break
else:
num_endifs_needed -= 1
current_array.append(item)
else:
current_array.append(item)
if not found:
return False
element = stack.pop()
if decode_num(element) == 0:
items[:0] = true_items
else:
items[:0] = false_items
return True
def op_verify(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
if decode_num(element) == 0:
return False
return True
def op_return(stack: List[bytes]) -> bool:
return False
def op_toaltstack(stack: List[bytes], altstack: List[bytes]) -> bool:
if len(stack) < 1:
return False
altstack.append(stack.pop())
return True
def op_fromaltstack(stack: List[bytes], altstack: List[bytes]) -> bool:
if len(altstack) < 1:
return False
stack.append(altstack.pop())
return True
def op_2drop(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.pop()
stack.pop()
return True
def op_2dup(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.extend(stack[-2:])
return True
def op_3dup(stack: List[bytes]) -> bool:
if len(stack) < 3:
return False
stack.extend(stack[-3:])
return True
def op_2over(stack: List[bytes]) -> bool:
if len(stack) < 4:
return False
stack.extend(stack[-4:-2])
return True
def op_2rot(stack: List[bytes]) -> bool:
if len(stack) < 6:
return False
stack.extend(stack[-6:-4])
return True
def op_2swap(stack: List[bytes]) -> bool:
if len(stack) < 4:
return False
stack[-4:] = stack[-2:] + stack[-4:-2]
return True
def op_ifdup(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
if decode_num(stack[-1]) != 0:
stack.append(stack[-1])
return True
def op_depth(stack: List[bytes]) -> bool:
stack.append(encode_num(len(stack)))
return True
def op_drop(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
stack.pop()
return True
def op_dup(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
stack.append(stack[-1])
return True
def op_nip(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack[-2:] = stack[-1:]
return True
def op_over(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.append(stack[-2])
return True
def op_pick(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
n = decode_num(stack.pop())
if len(stack) < n + 1:
return False
stack.append(stack[-n - 1])
return True
def op_roll(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
n = decode_num(stack.pop())
if len(stack) < n + 1:
return False
if n == 0:
return True
stack.append(stack.pop(-n - 1))
return True
def op_rot(stack: List[bytes]) -> bool:
if len(stack) < 3:
return False
stack.append(stack.pop(-3))
return True
def op_swap(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.append(stack.pop(-2))
return True
def op_tuck(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.insert(-2, stack[-1])
return True
def op_size(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
stack.append(encode_num(len(stack[-1])))
return True
def op_equal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = stack.pop()
element2 = stack.pop()
if element1 == element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_equalverify(stack: List[bytes]) -> bool:
return op_equal(stack) and op_verify(stack)
def op_1add(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
stack.append(encode_num(element + 1))
return True
def op_1sub(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
stack.append(encode_num(element - 1))
return True
def op_negate(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
stack.append(encode_num(-element))
return True
def op_abs(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
if element < 0:
stack.append(encode_num(-element))
else:
stack.append(encode_num(element))
return True
def op_not(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
if decode_num(element) == 0:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_0notequal(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
if decode_num(element) == 0:
stack.append(encode_num(0))
else:
stack.append(encode_num(1))
return True
def op_add(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
stack.append(encode_num(element1 + element2))
return True
def op_sub(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
stack.append(encode_num(element2 - element1))
return True
def op_booland(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 and element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_boolor(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 or element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_numequal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 == element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_numequalverify(stack: List[bytes]) -> bool:
return op_numequal(stack) and op_verify(stack)
def op_numnotequal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 == element2:
stack.append(encode_num(0))
else:
stack.append(encode_num(1))
return True
def op_lessthan(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element2 < element1:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_greaterthan(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
| |
lays esteem",
"And gives thy pen both skill and argument.",
"Rise, resty Muse, my love's sweet face survey,",
"If Time have any wrinkle graven there;",
"If any, be a satire to decay,",
"And make Time's spoils despised every where.",
"Give my love fame faster than Time wastes life,",
"So thou prevent'st his scythe and crooked knife."),
("Sonnet 101",
"O truant Muse what shall be thy amends",
"For thy neglect of truth in beauty dyed?",
"Both truth and beauty on my love depends;",
"So dost thou too, and therein dignified.",
"Make answer Muse: wilt thou not haply say,",
"Truth needs no colour, with his colour fixed;",
"Beauty no pencil, beauty's truth to lay;",
"But best is best, if never intermixed'?",
"Because he needs no praise, wilt thou be dumb?",
"Excuse not silence so, for't lies in thee",
"To make him much outlive a gilded tomb",
"And to be praised of ages yet to be.",
"Then do thy office, Muse; I teach thee how",
"To make him seem, long hence, as he shows now."),
("Sonnet 102",
"My love is strengthened, though more weak in seeming;",
"I love not less, though less the show appear;",
"That love is merchandized, whose rich esteeming,",
"The owner's tongue doth publish every where.",
"Our love was new, and then but in the spring,",
"When I was wont to greet it with my lays;",
"As Philomel in summer's front doth sing,",
"And stops his pipe in growth of riper days:",
"Not that the summer is less pleasant now",
"Than when her mournful hymns did hush the night,",
"But that wild music burthens every bough,",
"And sweets grown common lose their dear delight.",
"Therefore like her, I sometime hold my tongue:",
"Because I would not dull you with my song."),
("Sonnet 103",
"Alack! what poverty my Muse brings forth,",
"That having such a scope to show her pride,",
"The argument all bare is of more worth",
"Than when it hath my added praise beside!",
"O! blame me not, if I no more can write!",
"Look in your glass, and there appears a face",
"That over-goes my blunt invention quite,",
"Dulling my lines, and doing me disgrace.",
"Were it not sinful then, striving to mend,",
"To mar the subject that before was well?",
"For to no other pass my verses tend",
"Than of your graces and your gifts to tell;",
"And more, much more, than in my verse can sit,",
"Your own glass shows you when you look in it."),
("Sonnet 104",
"To me, fair friend, you never can be old,",
"For as you were when first your eye I ey'd,",
"Such seems your beauty still. Three winters cold,",
"Have from the forests shook three summers' pride,",
"Three beauteous springs to yellow autumn turned,",
"In process of the seasons have I seen,",
"Three April perfumes in three hot Junes burned,",
"Since first I saw you fresh, which yet are green.",
"Ah! yet doth beauty like a dial-hand,",
"Steal from his figure, and no pace perceived;",
"So your sweet hue, which methinks still doth stand,",
"Hath motion, and mine eye may be deceived:",
"For fear of which, hear this thou age unbred:",
"Ere you were born was beauty's summer dead."),
("Sonnet 105",
"Let not my love be called idolatry,",
"Nor my beloved as an idol show,",
"Since all alike my songs and praises be",
"To one, of one, still such, and ever so.",
"Kind is my love to-day, to-morrow kind,",
"Still constant in a wondrous excellence;",
"Therefore my verse to constancy confined,",
"One thing expressing, leaves out difference.",
"Fair, kind, and true, is all my argument,",
"Fair, kind, and true, varying to other words;",
"And in this change is my invention spent,",
"Three themes in one, which wondrous scope affords.",
"Fair, kind, and true, have often lived alone,",
"Which three till now, never kept seat in one."),
("Sonnet 106",
"When in the chronicle of wasted time",
"I see descriptions of the fairest wights,",
"And beauty making beautiful old rhyme,",
"In praise of ladies dead and lovely knights,",
"Then, in the blazon of sweet beauty's best,",
"Of hand, of foot, of lip, of eye, of brow,",
"I see their antique pen would have expressed",
"Even such a beauty as you master now.",
"So all their praises are but prophecies",
"Of this our time, all you prefiguring;",
"And for they looked but with divining eyes,",
"They had not skill enough your worth to sing:",
"For we, which now behold these present days,",
"Have eyes to wonder, but lack tongues to praise."),
("Sonnet 107",
"Not mine own fears, nor the prophetic soul",
"Of the wide world dreaming on things to come,",
"Can yet the lease of my true love control,",
"Supposed as forfeit to a confined doom.",
"The mortal moon hath her eclipse endured,",
"And the sad augurs mock their own presage;",
"Incertainties now crown themselves assured,",
"And peace proclaims olives of endless age.",
"Now with the drops of this most balmy time,",
"My love looks fresh, and Death to me subscribes,",
"Since, spite of him, I'll live in this poor rhyme,",
"While he insults o'er dull and speechless tribes:",
"And thou in this shalt find thy monument,",
"When tyrants' crests and tombs of brass are spent."),
("Sonnet 108",
"What's in the brain that ink may character",
"Which hath not figured to thee my true spirit?",
"What's new to speak, what now to register,",
"That may express my love, or thy dear merit?",
"Nothing, sweet boy; but yet, like prayers divine,",
"I must each day say o'er the very same;",
"Counting no old thing old, thou mine, I thine,",
"Even as when first I hallowed thy fair name.",
"So that eternal love in love's fresh case,",
"Weighs not the dust and injury of age,",
"Nor gives to necessary wrinkles place,",
"But makes antiquity for aye his page;",
"Finding the first conceit of love there bred,",
"Where time and outward form would show it dead."),
("Sonnet 109",
"O! never say that I was false of heart,",
"Though absence seemed my flame to qualify,",
"As easy might I from my self depart",
"As from my soul which in thy breast doth lie:",
"That is my home of love: if I have ranged,",
"Like him that travels, I return again;",
"Just to the time, not with the time exchanged,",
"So that myself bring water for my stain.",
"Never believe though in my nature reigned,",
"All frailties that besiege all kinds of blood,",
"That it could so preposterously be stained,",
"To leave for nothing all thy sum of good;",
"For nothing this wide universe I call,",
"Save thou, my rose, in it thou art my all."),
("Sonnet 110",
"Alas! 'tis true, I have gone here and there,",
"And made my self a motley to the view,",
"Gored mine own thoughts, sold cheap what is most dear,",
"Made old offences of affections new;",
"Most true it is, that I have looked on truth",
"Askance and strangely; but, by all above,",
"These blenches gave my heart another youth,",
"And worse essays proved thee my best of love.",
"Now all is done, have what shall have no end:",
"Mine appetite I never more will grind",
"On newer proof, to try an older friend,",
"A god in love, to whom I am confined.",
"Then give me welcome, next my heaven the best,",
"Even to thy pure and most most loving breast."),
("Sonnet 111",
"O! for my sake do you with Fortune chide,",
"The guilty goddess of my harmful deeds,",
"That did not better for my life provide",
"Than public means which public manners breeds.",
"Thence comes it that my name receives a brand,",
"And almost thence my nature is subdued",
"To what it works in, like the dyer's hand:",
"Pity me, then, | |
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Tuple, List
import pytz
from ..api import MdApi, TdApi
from vnpy.event import EventEngine
from vnpy.trader.utility import get_folder_path
from vnpy.trader.constant import (
Exchange,
Product,
Direction,
Status,
OrderType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
CancelRequest,
OrderRequest,
SubscribeRequest,
TickData,
ContractData,
OrderData,
TradeData,
PositionData,
AccountData
)
# 委托状态映射
STATUS_TAP2VT: Dict[str, Status] = {
"0": Status.SUBMITTING,
"1": Status.SUBMITTING,
"4": Status.NOTTRADED,
"5": Status.PARTTRADED,
"6": Status.ALLTRADED,
"9": Status.CANCELLED,
"A": Status.CANCELLED,
"B": Status.REJECTED,
}
# 多空方向映射
DIRECTION_TAP2VT: Dict[str, Direction] = {
"N": Direction.NET,
"B": Direction.LONG,
"S": Direction.SHORT,
}
DIRECTION_VT2TAP: Dict[Direction, str] = {v: k for k, v in DIRECTION_TAP2VT.items()}
# 委托类型映射
ORDERTYPE_TAP2VT: Dict[str, OrderType] = {
"1": OrderType.MARKET,
"2": OrderType.LIMIT
}
ORDERTYPE_VT2TAP = {v: k for k, v in ORDERTYPE_TAP2VT.items()}
# 交易所映射
EXCHANGE_TAP2VT: Dict[str, Exchange] = {
"SGX": Exchange.SGX,
"INE": Exchange.INE,
"APEX": Exchange.APEX,
"NYMEX": Exchange.NYMEX,
"LME": Exchange.LME,
"COMEX": Exchange.COMEX,
"CBOT": Exchange.CBOT,
"HKEX": Exchange.HKFE,
"CME": Exchange.CME,
"ZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"TOCOM": Exchange.TOCOM,
"KRX": Exchange.KRX,
"ICUS": Exchange.ICE,
"ICEU": Exchange.ICE
}
EXCHANGE_VT2TAP: Dict[Exchange, str] = {v: k for k, v in EXCHANGE_TAP2VT.items()}
# 错误类型映射
ERROR_VT2TAP: Dict[str, int] = {
"TAPIERROR_SUCCEED": 0
}
# 日志级别映射
LOGLEVEL_VT2TAP: Dict[str, str] = {
"APILOGLEVEL_NONE": "N",
"APILOGLEVEL_ERROR": "E",
"APILOGLEVEL_WARNING": "W",
"APILOGLEVEL_DEBUG": "D"
}
# 标示类型映射
FLAG_VT2TAP: Dict[str, str] = {
"APIYNFLAG_YES": "Y",
"APIYNFLAG_NO": "N",
"TAPI_CALLPUT_FLAG_CALL": "C",
"TAPI_CALLPUT_FLAG_PUT": "P",
"TAPI_CALLPUT_FLAG_NONE": "N"
}
# 其他常量
CHINA_TZ = pytz.timezone("Asia/Shanghai")
# 合约数据全局缓存字典
commodity_infos: Dict[str, "CommodityInfo"] = {}
contract_infos: Dict[Tuple[str, "Exchange"], "ContractInfo"] = {}
class TapGateway(BaseGateway):
"""
VeighNa用于对接易盛9.0外盘的交易接口。
"""
default_name: str = "TAP"
default_setting: Dict[str, Any] = {
"行情账号": "",
"行情密码": "",
"行情服务器": "",
"行情端口": 0,
"行情授权码": "",
"交易账号": "",
"交易密码": "",
"交易服务器": "",
"交易端口": 0,
"交易授权码": "",
"子账号": ""
}
exchanges: List[str] = list(EXCHANGE_VT2TAP.keys())
def __init__(self, event_engine: EventEngine, gateway_name: str):
"""构造函数"""
super().__init__(event_engine, gateway_name)
self.md_api: "QuoteApi" = QuoteApi(self)
self.td_api: "TradeApi" = TradeApi(self)
def connect(self, setting: dict) -> None:
"""连接交易接口"""
quote_username: str = setting["行情账号"]
quote_password: str = setting["行情密码"]
quote_host: str = setting["行情服务器"]
quote_port: int = setting["行情端口"]
md_authcode: str = setting["行情授权码"]
trade_username: str = setting["交易账号"]
trade_password: str = setting["交易密码"]
trade_host: str = setting["交易服务器"]
trade_port: int = setting["交易端口"]
td_authcode: str = setting["交易授权码"]
client_id: str = setting["子账号"]
self.md_api.connect(
quote_username,
quote_password,
quote_host,
quote_port,
md_authcode
)
self.td_api.connect(
trade_username,
trade_password,
trade_host,
trade_port,
td_authcode,
client_id
)
def close(self) -> None:
"""关闭接口"""
pass
def subscribe(self, req: SubscribeRequest) -> None:
"""订阅行情"""
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
"""委托下单"""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
"""委托撤单"""
self.td_api.cancel_order(req)
def query_account(self) -> None:
"""查询资金"""
pass
def query_position(self) -> None:
"""查询持仓"""
pass
class QuoteApi(MdApi):
"""行情API"""
def __init__(self, gateway: TapGateway) -> None:
"""构造函数"""
super().__init__()
self.gateway: TapGateway = gateway
self.gateway_name: str = gateway.gateway_name
def onRspLogin(self, error: int, data: dict) -> None:
"""用户登陆请求回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log(f"行情服务器登录失败:{error}")
else:
self.gateway.write_log("行情服务器登录成功")
def onAPIReady(self) -> None:
"""API状态通知回报"""
pass
def onDisconnect(self, reason: int) -> None:
"""服务器连接断开回报"""
self.gateway.write_log(f"行情服务器连接断开,原因:{reason}")
def onRspSubscribeQuote(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""订阅行情回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log(f"订阅行情失败:{error}")
else:
self.update_tick(data)
def onRtnQuote(self, data: dict) -> None:
"""行情数据推送"""
self.update_tick(data)
def update_tick(self, data: dict) -> None:
"""切片数据类型转换"""
symbol: str = data["CommodityNo"] + data["ContractNo1"]
exchange: Exchange = EXCHANGE_TAP2VT[data["ExchangeNo"]]
contract_info: ContractInfo = contract_infos.get((symbol, exchange), None)
if not contract_info:
self.gateway.write_log(f"行情合约信息无法匹配:{symbol}和{exchange}")
return
tick: TickData = TickData(
symbol=symbol,
exchange=exchange,
datetime=generate_datetime(data["DateTimeStamp"]),
name=contract_info.name,
volume=data["QTotalQty"],
last_price=data["QLastPrice"],
last_volume=data["QLastQty"],
limit_up=data["QLimitUpPrice"],
limit_down=data["QLimitDownPrice"],
open_price=data["QOpeningPrice"],
high_price=data["QHighPrice"],
low_price=data["QLowPrice"],
pre_close=data["QPreClosingPrice"],
bid_price_1=data["QBidPrice"][0],
bid_price_2=data["QBidPrice"][1],
bid_price_3=data["QBidPrice"][2],
bid_price_4=data["QBidPrice"][3],
bid_price_5=data["QBidPrice"][4],
ask_price_1=data["QAskPrice"][0],
ask_price_2=data["QAskPrice"][1],
ask_price_3=data["QAskPrice"][2],
ask_price_4=data["QAskPrice"][3],
ask_price_5=data["QAskPrice"][4],
bid_volume_1=data["QBidQty"][0],
bid_volume_2=data["QBidQty"][1],
bid_volume_3=data["QBidQty"][2],
bid_volume_4=data["QBidQty"][3],
bid_volume_5=data["QBidQty"][4],
ask_volume_1=data["QAskQty"][0],
ask_volume_2=data["QAskQty"][1],
ask_volume_3=data["QAskQty"][2],
ask_volume_4=data["QAskQty"][3],
ask_volume_5=data["QAskQty"][4],
gateway_name=self.gateway_name,
)
self.gateway.on_tick(tick)
def connect(
self,
username: str,
password: str,
host: str,
port: int,
auth_code: str
) -> None:
"""连接服务器"""
self.init()
# API基本设置
path: Path = get_folder_path(self.gateway_name.lower())
self.setTapQuoteAPIDataPath(str(path).encode("GBK"))
self.setTapQuoteAPILogLevel(LOGLEVEL_VT2TAP["APILOGLEVEL_NONE"])
# 创建API
req: dict = {
"AuthCode": auth_code,
"KeyOperationLogPath": str(path).encode("GBK")
}
self.createTapQuoteAPI(req, 0)
# 设置服务器地址
self.setHostAddress(host, port)
# 登陆
data: dict = {
"UserNo": username,
"Password": password,
"ISModifyPassword": FLAG_VT2TAP["APIYNFLAG_NO"],
"ISDDA": FLAG_VT2TAP["APIYNFLAG_NO"]
}
self.login(data)
def subscribe(self, req: SubscribeRequest):
"""订阅行情"""
contract_info: ContractInfo = contract_infos.get((req.symbol, req.exchange), None)
if not contract_info:
self.gateway.write_log(
f"找不到匹配的合约:{req.symbol}和{req.exchange.value}")
return
tap_contract: dict = {
"ExchangeNo": EXCHANGE_VT2TAP[req.exchange],
"CommodityType": contract_info.commodity_type,
"CommodityNo": contract_info.commodity_no,
"ContractNo1": contract_info.contract_no,
"CallOrPutFlag1": FLAG_VT2TAP["TAPI_CALLPUT_FLAG_NONE"],
"CallOrPutFlag2": FLAG_VT2TAP["TAPI_CALLPUT_FLAG_NONE"]
}
self.subscribeQuote(tap_contract)
class TradeApi(TdApi):
"""交易API"""
def __init__(self, gateway: TapGateway) -> None:
"""构造函数"""
super().__init__()
self.gateway: TapGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.account_no: str = "" # 委托下单时使用
self.client_id: str = "" # 子账号,没有可不填
self.cancel_reqs: Dict[str, CancelRequest] = {} # 存放未成交订单
self.sys_local_map: Dict[str, str] = {}
self.local_sys_map: Dict[str, str] = {}
self.sys_server_map: Dict[str, str] = {}
def onConnect(self) -> None:
"""服务器连接成功回报"""
self.gateway.write_log("交易服务器连接成功")
def onRspLogin(self, error: int, data: dict) -> None:
"""用户登陆请求回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log(f"交易服务器登录失败,错误码:{error}")
else:
self.gateway.write_log("交易服务器登录成功")
def onAPIReady(self, code: int) -> None:
"""API状态通知回报"""
self.qryCommodity()
def onRspQryCommodity(
self,
session: int,
error: int,
last: str,
data: dict,
) -> None:
"""交易品种查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询交易品种信息失败")
return
commodity_info: CommodityInfo = CommodityInfo(
name=data["CommodityEngName"],
size=int(data["ContractSize"]),
pricetick=data["CommodityTickSize"]
)
key: tuple = (data["CommodityNo"], data["CommodityType"])
commodity_infos[key] = commodity_info
if last == "Y":
self.gateway.write_log("查询交易品种信息成功")
req = {}
self.qryContract(req)
def onRspQryContract(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""交易合约查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询交易合约信息失败")
return
exchange: Exchange = EXCHANGE_TAP2VT.get(data["ExchangeNo"], None)
key: tuple = (data["CommodityNo"], data["CommodityType"])
commodity_info: CommodityInfo = commodity_infos.get(key, None)
if not data or not exchange or not commodity_info:
return
if data["CommodityType"] == "F":
symbol: str = data["CommodityNo"] + data["ContractNo1"]
if commodity_info.name:
name: str = f"{commodity_info.name} {data['ContractNo1']}"
else:
name = symbol
contract: ContractData = ContractData(
symbol=symbol,
exchange=exchange,
name=name,
product=Product.FUTURES,
size=commodity_info.size,
pricetick=commodity_info.pricetick,
net_position=True,
gateway_name=self.gateway.gateway_name
)
self.gateway.on_contract(contract)
contract_info: ContractInfo = ContractInfo(
name=contract.name,
exchange_no=data["ExchangeNo"],
contract_no=data["ContractNo1"],
commodity_type=data["CommodityType"],
commodity_no=data["CommodityNo"],
)
contract_infos[(contract.symbol, contract.exchange)] = contract_info
if last == "Y":
self.gateway.write_log("查询交易合约信息成功")
self.query_account()
def onRtnPositionProfit(self, data: dict) -> None:
"""持仓利润更新推送"""
pass
def onRspQryAccount(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""账户查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询账号信息失败")
return
req: dict = {
"AccountNo": data["AccountNo"]
}
self.qryFund(req)
def onRspQryFund(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""账户资金查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询资金信息失败")
return
self.update_account(data)
if last == "Y":
self.gateway.write_log("查询资金信息成功")
self.query_position()
def onRtnFund(self, data: dict) -> None:
"""账户资金推送"""
self.update_account(data)
def onRspQryPositionSummary(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""持仓汇总查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询持仓信息失败")
return
if data:
self.update_position(data)
if last == "Y":
self.gateway.write_log("查询持仓信息成功")
self.query_order()
def onRtnPositionSummary(self, data: dict) -> None:
"""持仓汇总更新推送"""
self.update_position(data)
def onRspQryOrder(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""当日委托查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询委托信息失败")
return
if data:
self.update_order(data)
if last == "Y":
self.gateway.write_log("查询委托信息成功")
self.query_trade()
def onRtnOrder(self, data: dict) -> None:
"""委托查询推送"""
if data["ErrorCode"] != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log(f"委托下单失败,错误码: {data['ErrorCode']}")
return
self.update_order(data)
def onRspQryFill(
self,
session: int,
error: int,
last: str,
data: dict
) -> None:
"""当日成交查询回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log("查询成交信息失败")
return
if data:
self.update_trade(data)
if last == "Y":
self.gateway.write_log("查询成交信息成功")
def onRtnFill(self, data: dict) -> None:
"""成交更新推送"""
self.update_trade(data)
def onRspOrderAction(
self,
session: int,
error: int,
data: dict
) -> None:
"""撤单和修改委托回报"""
if error != ERROR_VT2TAP["TAPIERROR_SUCCEED"]:
self.gateway.write_log(f"委托操作失败:{error}")
return
def update_account(self, data: dict) -> None:
"""更新并推送资金"""
self.account_no: str = data["AccountNo"]
account: AccountData = AccountData(
accountid=data["AccountNo"],
balance=data["Balance"],
frozen=data["Balance"] - data["Available"],
gateway_name=self.gateway_name
)
self.gateway.on_account(account)
def update_position(self, data: dict) -> None:
"""更新并推送持仓"""
position: PositionData = PositionData(
symbol=data["CommodityNo"] + data["ContractNo"],
exchange=EXCHANGE_TAP2VT.get(data["ExchangeNo"], None),
direction=DIRECTION_TAP2VT[data["MatchSide"]],
volume=data["PositionQty"],
price=data["PositionPrice"],
gateway_name=self.gateway_name
)
self.gateway.on_position(position)
def update_order(self, data: dict) -> None:
"""更新并推送委托"""
# 过滤撤销和修改状态中的委托
if data["OrderState"] in {"7", "8"}:
return
self.local_sys_map[data["ClientOrderNo"]] = data["OrderNo"]
self.sys_local_map[data["OrderNo"]] = data["ClientOrderNo"]
self.sys_server_map[data["OrderNo"]] = data["ServerFlag"]
order: OrderData = OrderData(
symbol=data["CommodityNo"] + data["ContractNo"],
exchange=EXCHANGE_TAP2VT.get(data["ExchangeNo"], None),
orderid=data["ClientOrderNo"],
type=ORDERTYPE_TAP2VT.get(data["OrderType"], data["OrderType"]),
direction=DIRECTION_TAP2VT[data["OrderSide"]],
price=data["OrderPrice"],
volume=data["OrderQty"],
traded=data["OrderMatchQty"],
status=STATUS_TAP2VT.get(data["OrderState"], Status.SUBMITTING),
datetime=generate_datetime(data["OrderInsertTime"]),
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
# 发送等待撤单请求
if data["ClientOrderNo"] in self.cancel_reqs:
req: str = self.cancel_reqs.pop(data["ClientOrderNo"])
self.cancel_order(req)
def update_trade(self, data: dict) -> None:
"""更新并推送成交"""
orderid: str = self.sys_local_map[data["OrderNo"]]
trade: TradeData = TradeData(
symbol=data["CommodityNo"] + data["ContractNo"],
exchange=EXCHANGE_TAP2VT.get(data["ExchangeNo"], None),
orderid=orderid,
tradeid=data["MatchNo"],
direction=DIRECTION_TAP2VT[data["MatchSide"]],
price=data["MatchPrice"],
volume=data["MatchQty"],
datetime=generate_datetime(data["MatchDateTime"]),
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def connect(
self,
username: str,
password: str,
host: str,
port: int,
auth_code: str,
client_id: str
) -> None:
"""连接服务器"""
self.client_id = client_id
self.init()
# API基本设置
path: Path = get_folder_path(self.gateway_name.lower())
self.setITapTradeAPIDataPath(str(path).encode("GBK"))
self.setITapTradeAPILogLevel(LOGLEVEL_VT2TAP["APILOGLEVEL_NONE"])
# 创建API
req: dict = {
"AuthCode": auth_code,
"KeyOperationLogPath": str(path).encode("GBK")
}
self.createITapTradeAPI(req, 0)
# 设置服务器地址
self.setHostAddress(host, port)
# 登陆
data: dict = {
"UserNo": username,
"Password": password,
"ISModifyPassword": FLAG_VT2TAP["APIYNFLAG_NO"],
"NoticeIgnoreFlag": "TAPI_NOTICE_IGNORE_POSITIONPROFIT"
}
self.login(data)
def send_order(self, req: OrderRequest) -> str:
"""委托下单"""
contract_info: ContractInfo = contract_infos.get((req.symbol, req.exchange), None)
if not contract_info:
self.gateway.write_log(f"找不到匹配的合约:{req.symbol}和{req.exchange.value}")
return ""
if req.type not in ORDERTYPE_VT2TAP:
self.gateway.write_log(f"不支持的委托类型: {req.type.value}")
return ""
order_req: dict = {
"AccountNo": self.account_no,
"ExchangeNo": contract_info.exchange_no,
| |
import seaborn as sns
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.ticker import ScalarFormatter
from matplotlib import lines
import pandas as pd
import numpy as np
from pathlib import Path
import os
import sys
import csv
import time
import yaml
cache = {}
exp_base_folder = '/psp/experiments-data/'
distros = {
'Figure3': 'DISP2',
'Figure4_a': 'DISP2',
'Figure4_b': 'SBIM2',
'Figure5_a': 'DISP2',
'Figure5_b': 'SBIM2',
'Figure6': 'TPCC',
'Figure7': 'ROCKSDB'
}
workloads = {
'BIM1': {
'avg_s': 1,
'name': '90.0:0.5 -- 10.0:5.5',
'max_load': 14000000,
'distribution': 'bimodal-90.0:0.5-10.0:5.5',
'SHORT': { 'MEAN': .5, 'RATIO': .9, 'YLIM': 60 },
'LONG': { 'MEAN': 5.5, 'RATIO': .1, 'YLIM': 60 },
'UNKNOWN': { 'MEAN': 1, 'RATIO': 1, 'YLIM': 60 }
},
'BIM2': {
'avg_s': 1,
'max_load': 14000000,
'name': '99.9:0.5-0.1:500.5',
'distribution': 'bimodal-99.9:0.5-0.1:500.5',
'SHORT': { 'MEAN': .5, 'RATIO': .999, 'YLIM': 400 },
'LONG': { 'MEAN': 500.5, 'RATIO': .001, 'YLIM': 1500 },
'UNKNOWN': { 'MEAN': 1, 'RATIO': 1, 'YLIM': 1500 }
},
'SBIM2': {
'avg_s': 2.9975,
'max_load': 4670558,
'name': '99.5:0.5-0.05:500',
'distribution': 'bimodal-99.5:0.5-0.5:500.0',
'SHORT': { 'MEAN': .5, 'RATIO': .995, 'YLIM': 300 },
'LONG': { 'MEAN': 500, 'RATIO': .005, 'YLIM': 3600 },
'UNKNOWN': { 'MEAN': 2.9975, 'RATIO': 1, 'YLIM': 3600 }
},
'DISP1': {
'avg_s': 5.5,
'name': '50.0:1.0 -- 50.0:10.0',
'max_load': 2545454,
'distribution': 'bimodal-50.0:1.0-50.0:10.0',
'SHORT': { 'MEAN': 1, 'RATIO': .5, 'YLIM': 50 },
'LONG': { 'MEAN': 10, 'RATIO': .5, 'YLIM': 300 },
'UNKNOWN': { 'MEAN': 5.5, 'RATIO': 1, 'YLIM': 300
}
},
'DISP2': {
'avg_s': 50.5,
'name': '50.0:1.0 -- 50.0:100.0',
'max_load': 277227,
'distribution': 'bimodal-50.0:1.0-50.0:100.0',
'SHORT': { 'MEAN': 1.0, 'RATIO': .5, 'YLIM': 300 },
'LONG': { 'MEAN': 100.0, 'RATIO': .5, 'YLIM': 300 },
'UNKNOWN': { 'MEAN': 50.5, 'RATIO': 1, 'YLIM': 300 }
},
'DISP3': {
'avg_s': 50.950,
'name': '95.0.0:1.0 -- 0.5:100.0',
'max_load': 274779,
'distribution': 'bimodal-95.0:1.0-0.5:100.0',
'SHORT': { 'MEAN': 1.0, 'RATIO': .95, 'YLIM': 300 },
'LONG': { 'MEAN': 100.0, 'RATIO': .5, 'YLIM': 300 },
'UNKNOWN': { 'MEAN': 50.5, 'RATIO': 1, 'YLIM': 300 }
},
'ROCKSDB': {
'avg_s': 526,
'name': 'ROCKSDB',
'max_load': 45000,
'distribution': 'bimodal-50.0:0.0-50.0:0.0',
'GET': { 'MEAN': 2.0, 'RATIO': .5, 'YLIM': 300 },
'SCAN': { 'MEAN': 1050.0, 'RATIO': .5, 'YLIM': 1000 },
'UNKNOWN': { 'MEAN': 526, 'RATIO': 1, 'YLIM': 200 }
},
'TPCC': {
'avg_s': 19,
'name': 'TPC-C',
'max_load': 735000,
'distribution': 'tpcc',
'NewOrder': { 'MEAN': 20, 'RATIO': .44, 'YLIM': 250 },
'Payment': { 'MEAN': 5.7, 'RATIO': .44, 'YLIM': 250 },
'Delivery': { 'MEAN': 88, 'RATIO': .04, 'YLIM': 250 },
'OrderStatus': { 'MEAN': 6, 'RATIO': .04, 'YLIM': 250 },
'StockLevel': { 'MEAN': 100, 'RATIO': .04, 'YLIM': 250 },
'UNKNOWN': { 'MEAN': 19, 'RATIO': 1, 'YLIM': 50 }
}
}
apps = {
'TPCC': ['Payment', 'OrderStatus', 'NewOrder', 'Delivery', 'StockLevel'],
'MB': ['SHORT', 'LONG'],
'REST': ['PAGE', 'REGEX'],
'ROCKSDB': ['GET', 'SCAN'],
}
policies = {
'DFCFS': 'd-FCFS',
'CFCFS': 'c-FCFS',
'shen-DFCFS': 'shen-DFCFS',
'shen-CFCFS': 'shen-CFCFS',
'SJF': 'ARS-FP',
'EDF': 'EDF',
# 'CSCQ-half': 'CSCQ-half',
# 'CSCQ': 'ARS-CS',
# 'EDFNP': 'ARS-EDF',
'cPRESQ': 'cPRESQ',
'cPREMQ': 'cPREMQ',
'DARC': 'DARC'
}
# For final print
pol_names = {
'DARC': 'DARC',
'c-FCFS': 'c-FCFS',
'd-FCFS': 'd-FCFS',
'cPREMQ': 'c-PRE',
'cPRESQ': 'c-PRE',
'ARS-FP': 'FP',
'EDF': 'EDF',
'shen-DFCFS': 'd-FCFS',
'shen-CFCFS': 'c-FCFS'
}
system_pol = {
'DFCFS': 'Perséphone',
'CFCFS': 'Perséphone',
'shen-DFCFS': 'Shenango',
'shen-CFCFS': 'Shenango',
'SJF': 'Perséphone',
'CSCQ-half': 'Perséphone',
'CSCQ': 'Perséphone',
'EDF': 'Perséphone',
'cPRESQ': 'Shinjuku',
'cPREMQ': 'Shinjuku',
'DARC': 'Perséphone'
}
trace_label_to_dtype = {
'client-end-to-end' : ['SENDING', 'COMPLETED'],
'client-receive' : ['READING', 'COMPLETED'],
'client-send' : ['SENDING', 'READING'],
}
CLT_TRACE_ORDER = [
'SENDING',
'READING',
'COMPLETED'
]
def read_profiling_node(exp, app, orders=CLT_TRACE_ORDER, verbose=True):
# First get traces
exp_folder = os.path.join(exp_base_folder, exp, app, '')
filename = os.path.join(exp_folder, 'traces')
if not Path(filename).is_file():
print('{} does not exist. Skipping {} {} traces.'.format(filename, exp, app))
return pd.DataFrame()
if verbose:
print(f"Parsing {filename}")
app_trace_df = pd.read_csv(filename, delimiter='\t')
app_trace_df = app_trace_df[app_trace_df.COMPLETED > 0]
if verbose:
print(f'{app} traces shape: {app_trace_df.shape}')
#Rename QLEN
cols = list(set(orders) & set(app_trace_df.columns)) + ['REQ_ID', 'REQ_TYPE', 'MEAN_NS', 'SCHED_ID']
return app_trace_df[cols]#.set_index('REQ_ID')
def read_exp_traces(exp, verbose=True, clients=None):
if clients is None:
df = read_profiling_node(exp, 'client', verbose=verbose)
else:
clt_dfs = []
for clt in clients:
clt_df = read_profiling_node(
exp, 'client'+str(clt), verbose=verbose
)
clt_df['TIME'] = clt_df['SENDING'] - min(clt_df['SENDING'])
clt_dfs.append(clt_df)
df = pd.concat(clt_dfs)
#TODO check number of rows here
return df
def prepare_traces(exps, data_types=list(trace_label_to_dtype), reset_time=True,
reset_cache=False, seconds=True, pctl=1, req_type=None,
verbose=False, get_schedule_data=False, **kwargs):
if not isinstance(data_types, list):
data_types = [data_types]
setups = {}
for exp in exps:
if (not reset_cache) and exp in cache:
setups[exp]= cache[exp]
continue
# First gather the traces
workload = exp.split('_')[2].split('.')[0]
if verbose:
print(f'================= PREPARING DATA FOR EXP {exp} =================')
main_df = read_exp_traces(exp, verbose=verbose, **kwargs)
if main_df.empty:
print('No data for {}'.format(exp))
continue
setups[exp] = {}
for data_type in data_types:
if verbose:
print(f'PARSING {data_type}')
c0 = trace_label_to_dtype[data_type][0]
c1 = trace_label_to_dtype[data_type][1]
if c0 not in main_df.columns or c1 not in main_df.columns:
print('{} not present in traces'.format(c0))
continue
setups[exp][data_type] = pd.DataFrame({
#'TIME': main_df[c0],
'TIME': main_df.TIME,
'VALUE': main_df[c1] - main_df[c0],
'SLOWDOWN': (main_df[c1] - main_df[c0]) / main_df['MEAN_NS'],
'SCHED_ID': main_df.SCHED_ID,
'REQ_TYPE': main_df.REQ_TYPE,
})
setups[exp][data_type].SCHED_ID = setups[exp][data_type].SCHED_ID.astype('uint64')
setups[exp][data_type].TIME = setups[exp][data_type].TIME.astype('uint64') #FIXME: is this forcing a copy???
setups[exp][data_type].VALUE = setups[exp][data_type].VALUE.astype('uint64')
if req_type is not None:
if setups[exp][data_type][setups[exp][data_type].REQ_TYPE == req_type].empty:
setups[exp] = {}
print('No {} in {} traces'.format(req_type, exp))
continue
setups[exp][data_type] = setups[exp][data_type][setups[exp][data_type].REQ_TYPE == req_type]
if verbose:
print('Filtering {} requests ({} found)'.format(req_type, setups[exp][data_type].shape[0]))
if reset_time:
# This is wrong for multiple clients
setups[exp][data_type].TIME -= min(setups[exp][data_type].TIME)
duration = (max(setups[exp][data_type].TIME) - min(setups[exp][data_type].TIME)) / 1e9
if verbose:
print(f"Experiment spanned {duration} seconds")
if verbose:
print(setups[exp][data_type].VALUE.describe([.5, .75, .9, .99, .9999, .99999, .999999]))
if seconds:
setups[exp][data_type].TIME /= 1e9
if pctl != 1:
setups[exp][data_type] = setups[exp][data_type][setups[exp][data_type].VALUE >= setups[exp][data_type].VALUE.quantile(pctl)]
# Then if needed retrieve other experiment data
if get_schedule_data:
df = setups[exp][data_type]
# Define number of bins (total duration in nanoseconds / 1e8, for 100ms bins)
bins = int((max(df.TIME) - min(df.TIME)) / 1e8)
print(f'Slicing {(max(df.TIME) - min(df.TIME)) / 1e9} seconds of data in {bins} bins')
# Create the bins
t0 = time.time()
df['time_bin'] = pd.cut(x=df.TIME, bins=bins)
print(f'Bins created in {time.time() - t0}')
# Get schedule information
sched_name = exp.split('_')[2] + '.yml'
sched_file = os.path.join(exp_base_folder, exp, sched_name)
with open(sched_file, 'r') as f:
schedule = yaml.load(f, Loader=yaml.FullLoader)
alloc_file = os.path.join(exp_base_folder, exp, 'server', 'windows')
alloc = pd.DataFrame()
if os.path.exists(alloc_file):
with open(alloc_file, 'r') as f:
types = {
'ID': 'uint32', 'START': 'float64', 'END': 'float64',
'GID': 'uint32', 'RES': 'uint32', 'STEAL': 'uint32',
'COUNT': 'uint32', 'UPDATED': 'uint32', 'QLEN': 'uint32'
}
alloc = pd.read_csv(f, delimiter='\t', dtype=types)
if not alloc.empty:
# Add a last datapoint to prolongate the line
alloc.START -= min(alloc.START)
alloc.START /= 1e9
alloc.END -= min(alloc.END)
alloc.END /= 1e9
last_dp1 = pd.DataFrame(alloc[-1:].values, index=[max(alloc.index)+1], columns=alloc.columns).astype(alloc.dtypes.to_dict())
last_dp2 = pd.DataFrame(alloc[-2:-1].values, index=[max(alloc.index)+2], columns=alloc.columns).astype(alloc.dtypes.to_dict())
assert(last_dp1.iloc[0].GID != last_dp2.iloc[0].GID)
last_dp1.START = max(df.TIME) / 1e9
last_dp2.START = max(df.TIME) / 1e9
alloc = alloc.append([last_dp1, last_dp2])
# Get throughput
throughput_df = read_client_tp(exp)
# throughput_df.N /= 1000
throughput_df.TIME -= min(throughput_df.TIME)
setups[exp]['bins'] = df
setups[exp]['tp'] = throughput_df
setups[exp]['schedule'] = schedule
setups[exp]['alloc'] = alloc
cache[exp] = setups[exp]
return setups
def read_client_tp(exp, clients=[0]):
clt_dfs = []
for client in clients:
filename = os.path.join(exp_base_folder, exp, 'client'+str(client), 'traces_throughput')
clt_dfs.append(pd.read_csv(filename, delimiter="\t"))
return pd.concat(clt_dfs)
def read_exp_names_from_file(filename, basedir=exp_base_folder):
filepath = Path(basedir, filename)
if not filepath.is_file():
print('{} does not exist'.format(filepath))
exps = []
with open(filepath, 'r') as f:
lines = f.readlines()
for line in lines:
exps.append(os.path.basename(line.rstrip()))
return exps
def merge_hists(df):
base_cols = {}
base_cols['MIN'] = min(df['MIN'])
base_cols['MAX'] = max(df['MAX'])
base_cols['COUNT'] = df['COUNT'].sum()
base_cols['TOTAL'] = df['TOTAL'].sum()
# This is stupid
buckets = list(map(str,sorted(list(map(int, df.drop(['MIN', 'MAX', 'COUNT', 'TOTAL'], axis=1).columns)))))
return pd.DataFrame({**base_cols, **dict(df[buckets].sum())}, index=[0])
def compute_pctls(hist):
pctls = {}
pctls['MIN'] = hist['MIN'] / 1000
pctls['MAX'] = hist['MAX'] / 1000
count = int(hist['COUNT'])
pctls['MEAN'] = (int(hist['TOTAL']) / count) / 1000
hist = hist.drop(['MIN', 'MAX', 'COUNT', 'TOTAL'], axis=1).sum()
c = 0
#Assume 1000ns buckets and consider average value.
for bucket in range(hist.shape[0]):
b_count = hist.iloc[bucket]
b_value = (int(hist.index[bucket]) + 500) / 1000
if c < count * .25 and c + b_count >= count * .25:
pctls['p25'] = b_value
if c < count * .5 and c + b_count >= count * .5:
pctls['MEDIAN'] = b_value
if c < count * .75 and c + b_count >= count * .75:
pctls['p75'] = b_value
if c < count * .99 and c + b_count >= count * .99:
pctls['p99'] = b_value
if c < count * .999 and c + b_count >= count * .999:
pctls['p99.9'] = b_value
if c < count * .9999 and c + b_count >= count * .9999:
pctls['p99.99'] = b_value
c += b_count
return | |
*args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Adressen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Adressen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Adressen)
signals.post_delete.connect(remove_permissions, sender=Adressen)
#
# Straßen
#
class Strassen(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
strasse = models.CharField('Straße', max_length=255, editable=False)
class Meta:
managed = False
codelist = True
db_table = 'basisdaten\".\"strassenliste_datenerfassung'
verbose_name = 'Straße'
verbose_name_plural = 'Straßen'
description = 'Straßen in Mecklenburg-Vorpommern'
list_fields = {
'strasse': 'Straße'
}
# wichtig, denn nur so werden Drop-down-Einträge in Formularen von
# Kindtabellen sortiert aufgelistet
ordering = ['strasse']
def __str__(self):
return self.strasse
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Strassen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Strassen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Strassen)
signals.post_delete.connect(remove_permissions, sender=Strassen)
#
# Codelisten
#
# Angebote bei Mobilpunkten
class Angebote_Mobilpunkte(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
angebot = models.CharField(
'Angebot', max_length=255, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
class Meta:
managed = False
codelist = True
db_table = 'codelisten\".\"angebote_mobilpunkte'
verbose_name = 'Angebot bei einem Mobilpunkt'
verbose_name_plural = 'Angebote bei Mobilpunkten'
description = 'Angebote bei Mobilpunkten'
list_fields = {
'angebot': 'Angebot'
}
# wichtig, denn nur so werden Drop-down-Einträge in Formularen von
# Kindtabellen sortiert aufgelistet
ordering = ['angebot']
def __str__(self):
return self.angebot
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Angebote_Mobilpunkte, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Angebote_Mobilpunkte, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Angebote_Mobilpunkte)
signals.post_delete.connect(remove_permissions, sender=Angebote_Mobilpunkte)
# Angelberechtigungen
class Angelberechtigungen(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
angelberechtigung = models.CharField(
'Angelberechtigung', max_length=255, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
class Meta:
managed = False
codelist = True
db_table = 'codelisten\".\"angelberechtigungen'
verbose_name = 'Angelberechtigung'
verbose_name_plural = 'Angelberechtigungen'
description = 'Angelberechtigungen'
list_fields = {
'angelberechtigung': 'Angelberechtigung'
}
# wichtig, denn nur so werden Drop-down-Einträge in Formularen von
# Kindtabellen sortiert aufgelistet
ordering = ['angelberechtigung']
def __str__(self):
return self.angelberechtigung
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Angelberechtigungen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Angelberechtigungen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Angelberechtigungen)
signals.post_delete.connect(remove_permissions, sender=Angelberechtigungen)
# Arten von Baudenkmalen
class Arten_Baudenkmale(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_baudenkmale'
verbose_name = 'Art eines Baudenkmals'
verbose_name_plural = 'Arten von Baudenkmalen'
description = 'Arten von Baudenkmalen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Baudenkmale, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Baudenkmale, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Baudenkmale)
signals.post_delete.connect(remove_permissions, sender=Arten_Baudenkmale)
# Arten von Durchlässen
class Arten_Durchlaesse(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_durchlaesse'
verbose_name = 'Art eines Durchlasses'
verbose_name_plural = 'Arten von Durchlässen'
description = 'Arten von Durchlässen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Durchlaesse, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Durchlaesse, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Durchlaesse)
signals.post_delete.connect(remove_permissions, sender=Arten_Durchlaesse)
# Arten von Fair-Trade-Einrichtungen
class Arten_FairTrade(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_fairtrade'
verbose_name = 'Art einer Fair-Trade-Einrichtung'
verbose_name_plural = 'Arten von Fair-Trade-Einrichtungen'
description = 'Arten von Fair-Trade-Einrichtungen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_FairTrade, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_FairTrade, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_FairTrade)
signals.post_delete.connect(remove_permissions, sender=Arten_FairTrade)
# Arten von Feldsportanlagen
class Arten_Feldsportanlagen(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_feldsportanlagen'
verbose_name = 'Art einer Feldsportanlage'
verbose_name_plural = 'Arten von Feldsportanlagen'
description = 'Arten von Feldsportanlagen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Feldsportanlagen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Feldsportanlagen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Feldsportanlagen)
signals.post_delete.connect(remove_permissions, sender=Arten_Feldsportanlagen)
# Arten von Feuerwachen
class Arten_Feuerwachen(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_feuerwachen'
verbose_name = 'Art einer Feuerwache'
verbose_name_plural = 'Arten von Feuerwachen'
description = 'Arten von Feuerwachen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Feuerwachen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Feuerwachen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Feuerwachen)
signals.post_delete.connect(remove_permissions, sender=Arten_Feuerwachen)
# Arten von Fließgewässern
class Arten_Fliessgewaesser(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_fliessgewaesser'
verbose_name = 'Art eines Fließgewässers'
verbose_name_plural = 'Arten von Fließgewässern'
description = 'Arten von Fließgewässern'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Fliessgewaesser, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Fliessgewaesser, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Fliessgewaesser)
signals.post_delete.connect(remove_permissions, sender=Arten_Fliessgewaesser)
# Arten von Hundetoiletten
class Arten_Hundetoiletten(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_hundetoiletten'
verbose_name = 'Art einer Hundetoilette'
verbose_name_plural = 'Arten von Hundetoiletten'
description = 'Arten von Hundetoiletten'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Hundetoiletten, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Hundetoiletten, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Hundetoiletten)
signals.post_delete.connect(remove_permissions, sender=Arten_Hundetoiletten)
# Arten von Meldediensten (flächenhaft)
class Arten_Meldedienst_flaechenhaft(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_meldedienst_flaechenhaft'
verbose_name = 'Art eines Meldedienstes (flächenhaft)'
verbose_name_plural = 'Arten von Meldediensten (flächenhaft)'
description = 'Arten von Meldediensten (flächenhaft)'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Meldedienst_flaechenhaft, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Meldedienst_flaechenhaft, self).delete(*args, **kwargs)
signals.post_save.connect(
assign_permissions,
sender=Arten_Meldedienst_flaechenhaft)
signals.post_delete.connect(
remove_permissions,
sender=Arten_Meldedienst_flaechenhaft)
# Arten von Meldediensten (punkthaft)
class Arten_Meldedienst_punkthaft(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_meldedienst_punkthaft'
verbose_name = 'Art eines Meldedienstes (punkthaft)'
verbose_name_plural = 'Arten von Meldediensten (punkthaft)'
description = 'Arten von Meldediensten (punkthaft)'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Meldedienst_punkthaft, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Meldedienst_punkthaft, self).delete(*args, **kwargs)
signals.post_save.connect(
assign_permissions,
sender=Arten_Meldedienst_punkthaft)
signals.post_delete.connect(
remove_permissions,
sender=Arten_Meldedienst_punkthaft)
# Arten von Parkmöglichkeiten
class Arten_Parkmoeglichkeiten(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_parkmoeglichkeiten'
verbose_name = 'Art einer Parkmöglichkeit'
verbose_name_plural = 'Arten von Parkmöglichkeiten'
description = 'Arten von Parkmöglichkeiten'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Parkmoeglichkeiten, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Parkmoeglichkeiten, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Parkmoeglichkeiten)
signals.post_delete.connect(
remove_permissions,
sender=Arten_Parkmoeglichkeiten)
# Arten von Pflegeeinrichtungen
class Arten_Pflegeeinrichtungen(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_pflegeeinrichtungen'
verbose_name = 'Art einer Pflegeeinrichtung'
verbose_name_plural = 'Arten von Pflegeeinrichtungen'
description = 'Arten von Pflegeeinrichtungen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Pflegeeinrichtungen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Pflegeeinrichtungen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Pflegeeinrichtungen)
signals.post_delete.connect(
remove_permissions,
sender=Arten_Pflegeeinrichtungen)
# Arten von Pollern
class Arten_Poller(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_poller'
verbose_name = 'Art eines Pollers'
verbose_name_plural = 'Arten von Pollern'
description = 'Arten von Pollern'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Poller, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Poller, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Poller)
signals.post_delete.connect(remove_permissions, sender=Arten_Poller)
# Arten von UVP-Vorprüfungen
class Arten_UVP_Vorpruefungen(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_uvp_vorpruefungen'
verbose_name = 'Art einer UVP-Vorprüfung'
verbose_name_plural = 'Arten von UVP-Vorprüfungen'
description = 'Arten von UVP-Vorprüfungen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_UVP_Vorpruefungen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_UVP_Vorpruefungen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_UVP_Vorpruefungen)
signals.post_delete.connect(remove_permissions, sender=Arten_UVP_Vorpruefungen)
# Arten von Wegen
class Arten_Wege(Art):
class Meta(Art.Meta):
db_table = 'codelisten\".\"arten_wege'
verbose_name = 'Art eines Wege'
verbose_name_plural = 'Arten von Wegen'
description = 'Arten von Wegen'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Wege, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Arten_Wege, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Arten_Wege)
signals.post_delete.connect(remove_permissions, sender=Arten_Wege)
# Auftraggeber von Baustellen
class Auftraggeber_Baustellen(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
auftraggeber = models.CharField(
'Auftraggeber', max_length=255, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
class Meta:
managed = False
codelist = True
db_table = 'codelisten\".\"auftraggeber_baustellen'
verbose_name = 'Auftraggeber einer Baustelle'
verbose_name_plural = 'Auftraggeber von Baustellen'
description = 'Auftraggeber von Baustellen'
list_fields = {
'auftraggeber': 'Auftraggeber'
}
# wichtig, denn nur so werden Drop-down-Einträge in Formularen von
# Kindtabellen sortiert aufgelistet
ordering = ['auftraggeber']
def __str__(self):
return self.auftraggeber
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Auftraggeber_Baustellen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Auftraggeber_Baustellen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Auftraggeber_Baustellen)
signals.post_delete.connect(remove_permissions, sender=Auftraggeber_Baustellen)
# Ausführungen innerhalb eines Haltestellenkatasters
class Ausfuehrungen_Haltestellenkataster(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
ausfuehrung = models.CharField(
'Ausführung', max_length=255, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
class Meta:
managed = False
codelist = True
db_table = 'codelisten\".\"ausfuehrungen_haltestellenkataster'
verbose_name = 'Ausführung innerhalb eines Haltestellenkatasters'
verbose_name_plural = 'Ausführungen innerhalb eines Haltestellenkatasters'
description = 'Ausführungen innerhalb eines Haltestellenkatasters'
list_fields = {
'ausfuehrung': 'Ausführung'
}
# wichtig, denn nur so werden Drop-down-Einträge in Formularen von
# Kindtabellen sortiert aufgelistet
ordering = ['ausfuehrung']
def __str__(self):
return self.ausfuehrung
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Ausfuehrungen_Haltestellenkataster, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Ausfuehrungen_Haltestellenkataster, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions,
sender=Ausfuehrungen_Haltestellenkataster)
signals.post_delete.connect(remove_permissions,
sender=Ausfuehrungen_Haltestellenkataster)
# Befestigungsarten der Aufstellfläche Bus innerhalb eines
# Haltestellenkatasters
class Befestigungsarten_Aufstellflaeche_Bus_Haltestellenkataster(
Befestigungsart):
class Meta(Befestigungsart.Meta):
db_table = 'codelisten\".\"befestigungsarten_aufstellflaeche_bus_haltestellenkataster'
verbose_name = 'Befestigungsart der Aufstellfläche Bus innerhalb eines Haltestellenkatasters'
verbose_name_plural = 'Befestigungsarten der Aufstellfläche Bus innerhalb eines Haltestellenkatasters'
description = 'Befestigungsarten der Aufstellfläche Bus innerhalb eines Haltestellenkatasters'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(
Befestigungsarten_Aufstellflaeche_Bus_Haltestellenkataster,
self).save(
*
args,
**kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(
Befestigungsarten_Aufstellflaeche_Bus_Haltestellenkataster,
self).delete(
*
args,
**kwargs)
signals.post_save.connect(
assign_permissions,
sender=Befestigungsarten_Aufstellflaeche_Bus_Haltestellenkataster)
signals.post_delete.connect(
remove_permissions,
sender=Befestigungsarten_Aufstellflaeche_Bus_Haltestellenkataster)
# Befestigungsarten der Wartefläche innerhalb eines Haltestellenkatasters
class Befestigungsarten_Warteflaeche_Haltestellenkataster(Befestigungsart):
class Meta(Befestigungsart.Meta):
db_table = 'codelisten\".\"befestigungsarten_warteflaeche_haltestellenkataster'
verbose_name = 'Befestigungsart der Wartefläche innerhalb eines Haltestellenkatasters'
verbose_name_plural = 'Befestigungsarten der Wartefläche | |
["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"force": {
"description": "If set to True then both new and running task hyper params can be deleted. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"hyperparams": {
"description": "List of hyper parameters to delete. In case a parameter with an empty name is passed all the section will be deleted",
"items": {"$ref": "#/definitions/param_key"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "hyperparams"],
"type": "object",
}
def __init__(self, task, hyperparams, force=None, **kwargs):
super(DeleteHyperParamsRequest, self).__init__(**kwargs)
self.task = task
self.hyperparams = hyperparams
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", (ParamKey, dict), is_array=True)
value = [(ParamKey(**v) if isinstance(v, dict) else v) for v in value]
self._property_hyperparams = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class DeleteHyperParamsResponse(Response):
"""
Response of tasks.delete_hyper_params endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_hyper_params"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteHyperParamsResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
class DeleteManyRequest(Request):
"""
Delete tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param move_to_trash: Move task to trash instead of deleting it. For internal
use only, tasks in the trash are not visible from the API and cannot be
restored!
:type move_to_trash: bool
:param force: If not true, call fails if the task status is 'in_progress'
:type force: bool
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by the tasks. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of the
tasks that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
"""
_service = "tasks"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"delete_output_models": {
"description": "If set to 'true' then delete output models of the tasks that are not referenced by other tasks. Default value is 'true'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'in_progress'",
"type": "boolean",
},
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"move_to_trash": {
"default": False,
"description": "Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored!",
"type": "boolean",
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by the tasks. Default value is 'false'",
"type": "boolean",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids,
move_to_trash=False,
force=False,
return_file_urls=None,
delete_output_models=None,
**kwargs
):
super(DeleteManyRequest, self).__init__(**kwargs)
self.ids = ids
self.move_to_trash = move_to_trash
self.force = force
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("move_to_trash")
def move_to_trash(self):
return self._property_move_to_trash
@move_to_trash.setter
def move_to_trash(self, value):
if value is None:
self._property_move_to_trash = None
return
self.assert_isinstance(value, "move_to_trash", (bool,))
self._property_move_to_trash = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("return_file_urls")
def return_file_urls(self):
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value):
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self):
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value):
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
class DeleteManyResponse(Response):
"""
Response of tasks.delete_many endpoint.
:param deleted: Number of tasks deleted
:type deleted: int
:param updated_children: Number of child tasks whose parent property was
updated
:type updated_children: int
:param updated_models: Number of models whose task property was updated
:type updated_models: int
:param deleted_models: Number of deleted output models
:type deleted_models: int
:param urls: The urls of the files that were uploaded by the tasks. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"deleted": {
"description": "Number of tasks deleted",
"type": ["integer", "null"],
},
"deleted_models": {
"description": "Number of deleted output models",
"type": ["integer", "null"],
},
"updated_children": {
"description": "Number of child tasks whose parent property was updated",
"type": ["integer", "null"],
},
"updated_models": {
"description": "Number of models whose task property was updated",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the tasks. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
}
def __init__(
self,
deleted=None,
updated_children=None,
updated_models=None,
deleted_models=None,
urls=None,
**kwargs
):
super(DeleteManyResponse, self).__init__(**kwargs)
self.deleted = deleted
self.updated_children = updated_children
self.updated_models = updated_models
self.deleted_models = deleted_models
self.urls = urls
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("updated_children")
def updated_children(self):
return self._property_updated_children
@updated_children.setter
def updated_children(self, value):
if value is None:
self._property_updated_children = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_children", six.integer_types)
self._property_updated_children = value
@schema_property("updated_models")
def updated_models(self):
return self._property_updated_models
@updated_models.setter
def updated_models(self, value):
if value is None:
self._property_updated_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_models", six.integer_types)
self._property_updated_models = value
@schema_property("deleted_models")
def deleted_models(self):
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value):
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("urls")
def urls(self):
return self._property_urls
@urls.setter
def urls(self, value):
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
class DeleteModelsRequest(Request):
"""
Delete models from task
:param task: ID of the task
:type task: str
:param models: The list of models to delete
:type models: Sequence[dict]
"""
_service = "tasks"
_action = "delete_models"
_version = "2.13"
_schema = {
"definitions": {
"model_type_enum": {"enum": ["input", "output"], "type": "string"}
},
"properties": {
"models": {
"description": "The list of models to delete",
"items": {
"properties": {
"name": {
"description": "The task model name",
"type": "string",
},
"type": {
"$ref": "#/definitions/model_type_enum",
"description": "The task model type",
},
},
"required": ["name", "type"],
"type": "object",
},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
},
"required": ["task", "models"],
"type": "object",
}
def __init__(self, task, models, **kwargs):
super(DeleteModelsRequest, self).__init__(**kwargs)
self.task = task
self.models = models
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
| |
322
X86_INS_LLDT = 323
X86_INS_LMSW = 324
X86_INS_OR = 325
X86_INS_LOCK = 326
X86_INS_SUB = 327
X86_INS_XOR = 328
X86_INS_LODSB = 329
X86_INS_LODSD = 330
X86_INS_LODSQ = 331
X86_INS_LODSW = 332
X86_INS_LOOP = 333
X86_INS_LOOPE = 334
X86_INS_LOOPNE = 335
X86_INS_RETF = 336
X86_INS_RETFQ = 337
X86_INS_LSL = 338
X86_INS_LSS = 339
X86_INS_LTR = 340
X86_INS_XADD = 341
X86_INS_LZCNT = 342
X86_INS_MASKMOVDQU = 343
X86_INS_MAXPD = 344
X86_INS_MAXPS = 345
X86_INS_MAXSD = 346
X86_INS_MAXSS = 347
X86_INS_MFENCE = 348
X86_INS_MINPD = 349
X86_INS_MINPS = 350
X86_INS_MINSD = 351
X86_INS_MINSS = 352
X86_INS_CVTPD2PI = 353
X86_INS_CVTPI2PD = 354
X86_INS_CVTPI2PS = 355
X86_INS_CVTPS2PI = 356
X86_INS_CVTTPD2PI = 357
X86_INS_CVTTPS2PI = 358
X86_INS_EMMS = 359
X86_INS_MASKMOVQ = 360
X86_INS_MOVD = 361
X86_INS_MOVDQ2Q = 362
X86_INS_MOVNTQ = 363
X86_INS_MOVQ2DQ = 364
X86_INS_MOVQ = 365
X86_INS_PABSB = 366
X86_INS_PABSD = 367
X86_INS_PABSW = 368
X86_INS_PACKSSDW = 369
X86_INS_PACKSSWB = 370
X86_INS_PACKUSWB = 371
X86_INS_PADDB = 372
X86_INS_PADDD = 373
X86_INS_PADDQ = 374
X86_INS_PADDSB = 375
X86_INS_PADDSW = 376
X86_INS_PADDUSB = 377
X86_INS_PADDUSW = 378
X86_INS_PADDW = 379
X86_INS_PALIGNR = 380
X86_INS_PANDN = 381
X86_INS_PAND = 382
X86_INS_PAVGB = 383
X86_INS_PAVGW = 384
X86_INS_PCMPEQB = 385
X86_INS_PCMPEQD = 386
X86_INS_PCMPEQW = 387
X86_INS_PCMPGTB = 388
X86_INS_PCMPGTD = 389
X86_INS_PCMPGTW = 390
X86_INS_PEXTRW = 391
X86_INS_PHADDSW = 392
X86_INS_PHADDW = 393
X86_INS_PHADDD = 394
X86_INS_PHSUBD = 395
X86_INS_PHSUBSW = 396
X86_INS_PHSUBW = 397
X86_INS_PINSRW = 398
X86_INS_PMADDUBSW = 399
X86_INS_PMADDWD = 400
X86_INS_PMAXSW = 401
X86_INS_PMAXUB = 402
X86_INS_PMINSW = 403
X86_INS_PMINUB = 404
X86_INS_PMOVMSKB = 405
X86_INS_PMULHRSW = 406
X86_INS_PMULHUW = 407
X86_INS_PMULHW = 408
X86_INS_PMULLW = 409
X86_INS_PMULUDQ = 410
X86_INS_POR = 411
X86_INS_PSADBW = 412
X86_INS_PSHUFB = 413
X86_INS_PSHUFW = 414
X86_INS_PSIGNB = 415
X86_INS_PSIGND = 416
X86_INS_PSIGNW = 417
X86_INS_PSLLD = 418
X86_INS_PSLLQ = 419
X86_INS_PSLLW = 420
X86_INS_PSRAD = 421
X86_INS_PSRAW = 422
X86_INS_PSRLD = 423
X86_INS_PSRLQ = 424
X86_INS_PSRLW = 425
X86_INS_PSUBB = 426
X86_INS_PSUBD = 427
X86_INS_PSUBQ = 428
X86_INS_PSUBSB = 429
X86_INS_PSUBSW = 430
X86_INS_PSUBUSB = 431
X86_INS_PSUBUSW = 432
X86_INS_PSUBW = 433
X86_INS_PUNPCKHBW = 434
X86_INS_PUNPCKHDQ = 435
X86_INS_PUNPCKHWD = 436
X86_INS_PUNPCKLBW = 437
X86_INS_PUNPCKLDQ = 438
X86_INS_PUNPCKLWD = 439
X86_INS_PXOR = 440
X86_INS_MONITOR = 441
X86_INS_MONTMUL = 442
X86_INS_MOV = 443
X86_INS_MOVABS = 444
X86_INS_MOVBE = 445
X86_INS_MOVDDUP = 446
X86_INS_MOVDQA = 447
X86_INS_MOVDQU = 448
X86_INS_MOVHLPS = 449
X86_INS_MOVHPD = 450
X86_INS_MOVHPS = 451
X86_INS_MOVLHPS = 452
X86_INS_MOVLPD = 453
X86_INS_MOVLPS = 454
X86_INS_MOVMSKPD = 455
X86_INS_MOVMSKPS = 456
X86_INS_MOVNTDQA = 457
X86_INS_MOVNTDQ = 458
X86_INS_MOVNTI = 459
X86_INS_MOVNTPD = 460
X86_INS_MOVNTPS = 461
X86_INS_MOVNTSD = 462
X86_INS_MOVNTSS = 463
X86_INS_MOVSB = 464
X86_INS_MOVSD = 465
X86_INS_MOVSHDUP = 466
X86_INS_MOVSLDUP = 467
X86_INS_MOVSQ = 468
X86_INS_MOVSS = 469
X86_INS_MOVSW = 470
X86_INS_MOVSX = 471
X86_INS_MOVSXD = 472
X86_INS_MOVUPD = 473
X86_INS_MOVUPS = 474
X86_INS_MOVZX = 475
X86_INS_MPSADBW = 476
X86_INS_MUL = 477
X86_INS_MULPD = 478
X86_INS_MULPS = 479
X86_INS_MULSD = 480
X86_INS_MULSS = 481
X86_INS_MULX = 482
X86_INS_FMUL = 483
X86_INS_FIMUL = 484
X86_INS_FMULP = 485
X86_INS_MWAIT = 486
X86_INS_NEG = 487
X86_INS_NOP = 488
X86_INS_NOT = 489
X86_INS_OUT = 490
X86_INS_OUTSB = 491
X86_INS_OUTSD = 492
X86_INS_OUTSW = 493
X86_INS_PACKUSDW = 494
X86_INS_PAUSE = 495
X86_INS_PAVGUSB = 496
X86_INS_PBLENDVB = 497
X86_INS_PBLENDW = 498
X86_INS_PCLMULQDQ = 499
X86_INS_PCMPEQQ = 500
X86_INS_PCMPESTRI = 501
X86_INS_PCMPESTRM = 502
X86_INS_PCMPGTQ = 503
X86_INS_PCMPISTRI = 504
X86_INS_PCMPISTRM = 505
X86_INS_PDEP = 506
X86_INS_PEXT = 507
X86_INS_PEXTRB = 508
X86_INS_PEXTRD = 509
X86_INS_PEXTRQ = 510
X86_INS_PF2ID = 511
X86_INS_PF2IW = 512
X86_INS_PFACC = 513
X86_INS_PFADD = 514
X86_INS_PFCMPEQ = 515
X86_INS_PFCMPGE = 516
X86_INS_PFCMPGT = 517
X86_INS_PFMAX = 518
X86_INS_PFMIN = 519
X86_INS_PFMUL = 520
X86_INS_PFNACC = 521
X86_INS_PFPNACC = 522
X86_INS_PFRCPIT1 = 523
X86_INS_PFRCPIT2 = 524
X86_INS_PFRCP = 525
X86_INS_PFRSQIT1 = 526
X86_INS_PFRSQRT = 527
X86_INS_PFSUBR = 528
X86_INS_PFSUB = 529
X86_INS_PHMINPOSUW = 530
X86_INS_PI2FD = 531
X86_INS_PI2FW = 532
X86_INS_PINSRB = 533
X86_INS_PINSRD = 534
X86_INS_PINSRQ = 535
X86_INS_PMAXSB = 536
X86_INS_PMAXSD = 537
X86_INS_PMAXUD = 538
X86_INS_PMAXUW = 539
X86_INS_PMINSB = 540
X86_INS_PMINSD = 541
X86_INS_PMINUD = 542
X86_INS_PMINUW = 543
X86_INS_PMOVSXBD = 544
X86_INS_PMOVSXBQ = 545
X86_INS_PMOVSXBW = 546
X86_INS_PMOVSXDQ = 547
X86_INS_PMOVSXWD = 548
X86_INS_PMOVSXWQ = 549
X86_INS_PMOVZXBD = 550
X86_INS_PMOVZXBQ = 551
X86_INS_PMOVZXBW = 552
X86_INS_PMOVZXDQ = 553
X86_INS_PMOVZXWD = 554
X86_INS_PMOVZXWQ = 555
X86_INS_PMULDQ = 556
X86_INS_PMULHRW = 557
X86_INS_PMULLD = 558
X86_INS_POP = 559
X86_INS_POPAW = 560
X86_INS_POPAL = 561
X86_INS_POPCNT = 562
X86_INS_POPF = 563
X86_INS_POPFD = 564
X86_INS_POPFQ = 565
X86_INS_PREFETCH = 566
X86_INS_PREFETCHNTA = 567
X86_INS_PREFETCHT0 = 568
X86_INS_PREFETCHT1 = 569
X86_INS_PREFETCHT2 = 570
X86_INS_PREFETCHW = 571
X86_INS_PSHUFD = 572
X86_INS_PSHUFHW = 573
X86_INS_PSHUFLW = 574
X86_INS_PSLLDQ = 575
X86_INS_PSRLDQ = 576
X86_INS_PSWAPD = 577
X86_INS_PTEST = 578
X86_INS_PUNPCKHQDQ = 579
X86_INS_PUNPCKLQDQ = 580
X86_INS_PUSH = 581
X86_INS_PUSHAW = 582
X86_INS_PUSHAL = 583
X86_INS_PUSHF = 584
X86_INS_PUSHFD = 585
X86_INS_PUSHFQ = 586
X86_INS_RCL = 587
X86_INS_RCPPS = 588
X86_INS_RCPSS = 589
X86_INS_RCR = 590
X86_INS_RDFSBASE = 591
X86_INS_RDGSBASE = 592
X86_INS_RDMSR = 593
X86_INS_RDPMC = 594
X86_INS_RDRAND = 595
X86_INS_RDSEED = 596
X86_INS_RDTSC = 597
X86_INS_RDTSCP = 598
X86_INS_REPNE = 599
X86_INS_REP = 600
X86_INS_ROL = 601
X86_INS_ROR = 602
X86_INS_RORX = 603
X86_INS_ROUNDPD = 604
X86_INS_ROUNDPS = 605
X86_INS_ROUNDSD = 606
X86_INS_ROUNDSS = 607
X86_INS_RSM = 608
X86_INS_RSQRTPS = 609
X86_INS_RSQRTSS = 610
X86_INS_SAHF = 611
X86_INS_SAL = 612
X86_INS_SALC = 613
X86_INS_SAR = 614
X86_INS_SARX = 615
X86_INS_SBB = 616
X86_INS_SCASB = 617
X86_INS_SCASD = 618
X86_INS_SCASQ = 619
X86_INS_SCASW = 620
X86_INS_SETAE = 621
X86_INS_SETA = 622
X86_INS_SETBE = 623
X86_INS_SETB = 624
X86_INS_SETE = 625
X86_INS_SETGE = 626
X86_INS_SETG = 627
X86_INS_SETLE = 628
X86_INS_SETL = 629
X86_INS_SETNE = 630
X86_INS_SETNO = 631
X86_INS_SETNP = 632
X86_INS_SETNS = 633
X86_INS_SETO = 634
X86_INS_SETP = 635
X86_INS_SETS = 636
X86_INS_SFENCE = 637
X86_INS_SGDT = 638
X86_INS_SHA1MSG1 = 639
X86_INS_SHA1MSG2 = 640
X86_INS_SHA1NEXTE = 641
X86_INS_SHA1RNDS4 = 642
X86_INS_SHA256MSG1 = 643
X86_INS_SHA256MSG2 = 644
X86_INS_SHA256RNDS2 = 645
X86_INS_SHL = 646
X86_INS_SHLD = 647
X86_INS_SHLX = 648
X86_INS_SHR = 649
X86_INS_SHRD = 650
X86_INS_SHRX = 651
X86_INS_SHUFPD = 652
X86_INS_SHUFPS = 653
X86_INS_SIDT = 654
X86_INS_FSIN = 655
X86_INS_SKINIT = 656
X86_INS_SLDT = 657
X86_INS_SMSW = 658
X86_INS_SQRTPD = 659
X86_INS_SQRTPS = 660
X86_INS_SQRTSD = 661
X86_INS_SQRTSS = 662
X86_INS_FSQRT = 663
X86_INS_STAC = 664
X86_INS_STC = 665
X86_INS_STD = 666
X86_INS_STGI = 667
X86_INS_STI = 668
X86_INS_STMXCSR = 669
X86_INS_STOSB = 670
X86_INS_STOSD = 671
X86_INS_STOSQ = 672
X86_INS_STOSW = 673
X86_INS_STR = 674
X86_INS_FST = 675
X86_INS_FSTP = 676
X86_INS_FSTPNCE = 677
X86_INS_SUBPD = 678
X86_INS_SUBPS = 679
X86_INS_FSUBR = 680
X86_INS_FISUBR = 681
X86_INS_FSUBRP = 682
X86_INS_SUBSD = 683
X86_INS_SUBSS = 684
X86_INS_FSUB = 685
X86_INS_FISUB = 686
X86_INS_FSUBP = 687
X86_INS_SWAPGS = 688
X86_INS_SYSCALL = 689
X86_INS_SYSENTER = 690
X86_INS_SYSEXIT = 691
X86_INS_SYSRET = 692
X86_INS_T1MSKC = 693
X86_INS_TEST = 694
X86_INS_UD2 = 695
X86_INS_FTST = 696
X86_INS_TZCNT = 697
X86_INS_TZMSK = 698
X86_INS_FUCOMPI = 699
X86_INS_FUCOMI = 700
X86_INS_FUCOMPP = 701
X86_INS_FUCOMP = 702
X86_INS_FUCOM = 703
X86_INS_UD2B = 704
X86_INS_UNPCKHPD = 705
X86_INS_UNPCKHPS = 706
X86_INS_UNPCKLPD = 707
X86_INS_UNPCKLPS = 708
X86_INS_VADDPD = 709
X86_INS_VADDPS = 710
X86_INS_VADDSD = 711
X86_INS_VADDSS = 712
X86_INS_VADDSUBPD = 713
X86_INS_VADDSUBPS = 714
X86_INS_VAESDECLAST = 715
X86_INS_VAESDEC = 716
X86_INS_VAESENCLAST = 717
X86_INS_VAESENC = 718
X86_INS_VAESIMC = 719
X86_INS_VAESKEYGENASSIST = 720
X86_INS_VALIGND = 721
X86_INS_VALIGNQ = 722
X86_INS_VANDNPD = 723
X86_INS_VANDNPS = 724
X86_INS_VANDPD = 725
X86_INS_VANDPS = 726
X86_INS_VBLENDMPD = 727
X86_INS_VBLENDMPS = 728
X86_INS_VBLENDPD = 729
X86_INS_VBLENDPS = 730
X86_INS_VBLENDVPD = 731
X86_INS_VBLENDVPS = 732
X86_INS_VBROADCASTF128 = 733
X86_INS_VBROADCASTI128 = 734
X86_INS_VBROADCASTI32X4 = 735
X86_INS_VBROADCASTI64X4 = 736
X86_INS_VBROADCASTSD = 737
X86_INS_VBROADCASTSS = 738
X86_INS_VCMPPD = 739
X86_INS_VCMPPS = 740
X86_INS_VCMPSD = 741
X86_INS_VCMPSS = 742
X86_INS_VCVTDQ2PD = 743
X86_INS_VCVTDQ2PS = 744
X86_INS_VCVTPD2DQX = 745
X86_INS_VCVTPD2DQ = 746
X86_INS_VCVTPD2PSX = 747
X86_INS_VCVTPD2PS = 748
X86_INS_VCVTPD2UDQ = 749
X86_INS_VCVTPH2PS = 750
X86_INS_VCVTPS2DQ = 751
X86_INS_VCVTPS2PD = 752
X86_INS_VCVTPS2PH = 753
X86_INS_VCVTPS2UDQ = 754
X86_INS_VCVTSD2SI = 755
X86_INS_VCVTSD2USI = 756
X86_INS_VCVTSS2SI = 757
X86_INS_VCVTSS2USI = 758
X86_INS_VCVTTPD2DQX = 759
X86_INS_VCVTTPD2DQ = 760
X86_INS_VCVTTPD2UDQ = 761
X86_INS_VCVTTPS2DQ = 762
X86_INS_VCVTTPS2UDQ = 763
X86_INS_VCVTUDQ2PD = 764
X86_INS_VCVTUDQ2PS = 765
X86_INS_VDIVPD = 766
X86_INS_VDIVPS = 767
X86_INS_VDIVSD = 768
X86_INS_VDIVSS = 769
X86_INS_VDPPD = 770
X86_INS_VDPPS = 771
X86_INS_VERR = 772
X86_INS_VERW = 773
X86_INS_VEXTRACTF128 = 774
X86_INS_VEXTRACTF32X4 = 775
X86_INS_VEXTRACTF64X4 = 776
X86_INS_VEXTRACTI128 = 777
X86_INS_VEXTRACTI32X4 = 778
X86_INS_VEXTRACTI64X4 = 779
X86_INS_VEXTRACTPS = 780
X86_INS_VFMADD132PD = 781
X86_INS_VFMADD132PS = 782
X86_INS_VFMADD213PD = 783
X86_INS_VFMADD213PS = 784
X86_INS_VFMADDPD = 785
X86_INS_VFMADD231PD = 786
X86_INS_VFMADDPS = 787
X86_INS_VFMADD231PS = 788
X86_INS_VFMADDSD = 789
X86_INS_VFMADD213SD = 790
X86_INS_VFMADD132SD = 791
X86_INS_VFMADD231SD = 792
X86_INS_VFMADDSS = 793
X86_INS_VFMADD213SS = 794
X86_INS_VFMADD132SS = 795
X86_INS_VFMADD231SS = 796
X86_INS_VFMADDSUB132PD = 797
X86_INS_VFMADDSUB132PS = 798
X86_INS_VFMADDSUB213PD = 799
X86_INS_VFMADDSUB213PS = 800
X86_INS_VFMADDSUBPD = 801
X86_INS_VFMADDSUB231PD = 802
X86_INS_VFMADDSUBPS = 803
X86_INS_VFMADDSUB231PS = 804
X86_INS_VFMSUB132PD = 805
X86_INS_VFMSUB132PS = 806
X86_INS_VFMSUB213PD = 807
X86_INS_VFMSUB213PS = 808
X86_INS_VFMSUBADD132PD = 809
X86_INS_VFMSUBADD132PS = 810
X86_INS_VFMSUBADD213PD = 811
X86_INS_VFMSUBADD213PS = 812
X86_INS_VFMSUBADDPD = 813
X86_INS_VFMSUBADD231PD = 814
X86_INS_VFMSUBADDPS = 815
X86_INS_VFMSUBADD231PS = 816
X86_INS_VFMSUBPD = 817
X86_INS_VFMSUB231PD = 818
X86_INS_VFMSUBPS = 819
X86_INS_VFMSUB231PS = 820
X86_INS_VFMSUBSD = 821
X86_INS_VFMSUB213SD = 822
X86_INS_VFMSUB132SD = 823
X86_INS_VFMSUB231SD = 824
X86_INS_VFMSUBSS = 825
X86_INS_VFMSUB213SS = 826
X86_INS_VFMSUB132SS = 827
X86_INS_VFMSUB231SS = 828
X86_INS_VFNMADD132PD = 829
X86_INS_VFNMADD132PS = 830
X86_INS_VFNMADD213PD = 831
X86_INS_VFNMADD213PS = 832
X86_INS_VFNMADDPD = 833
X86_INS_VFNMADD231PD = 834
X86_INS_VFNMADDPS = 835
X86_INS_VFNMADD231PS = 836
X86_INS_VFNMADDSD = 837
X86_INS_VFNMADD213SD = 838
X86_INS_VFNMADD132SD = 839
X86_INS_VFNMADD231SD = 840
X86_INS_VFNMADDSS = 841
X86_INS_VFNMADD213SS = 842
X86_INS_VFNMADD132SS = 843
X86_INS_VFNMADD231SS = 844
X86_INS_VFNMSUB132PD = 845
X86_INS_VFNMSUB132PS = 846
X86_INS_VFNMSUB213PD = 847
X86_INS_VFNMSUB213PS = 848
X86_INS_VFNMSUBPD = 849
X86_INS_VFNMSUB231PD = 850
X86_INS_VFNMSUBPS = 851
X86_INS_VFNMSUB231PS = 852
X86_INS_VFNMSUBSD = 853
X86_INS_VFNMSUB213SD = 854
X86_INS_VFNMSUB132SD = 855
X86_INS_VFNMSUB231SD = 856
X86_INS_VFNMSUBSS = 857
X86_INS_VFNMSUB213SS = 858
X86_INS_VFNMSUB132SS = 859
X86_INS_VFNMSUB231SS = 860
X86_INS_VFRCZPD = 861
X86_INS_VFRCZPS = 862
X86_INS_VFRCZSD = 863
X86_INS_VFRCZSS = 864
X86_INS_VORPD = 865
X86_INS_VORPS = 866
X86_INS_VXORPD = 867
X86_INS_VXORPS = 868
X86_INS_VGATHERDPD = 869
X86_INS_VGATHERDPS = 870
X86_INS_VGATHERPF0DPD = 871
X86_INS_VGATHERPF0DPS = 872
X86_INS_VGATHERPF0QPD = 873
X86_INS_VGATHERPF0QPS = 874
X86_INS_VGATHERPF1DPD = 875
X86_INS_VGATHERPF1DPS = 876
X86_INS_VGATHERPF1QPD = 877
X86_INS_VGATHERPF1QPS = 878
X86_INS_VGATHERQPD = 879
X86_INS_VGATHERQPS = 880
X86_INS_VHADDPD = 881
X86_INS_VHADDPS = 882
X86_INS_VHSUBPD = 883
X86_INS_VHSUBPS = 884
X86_INS_VINSERTF128 = 885
X86_INS_VINSERTF32X4 = 886
X86_INS_VINSERTF64X4 = 887
X86_INS_VINSERTI128 = 888
X86_INS_VINSERTI32X4 = 889
X86_INS_VINSERTI64X4 = 890
X86_INS_VINSERTPS = 891
X86_INS_VLDDQU = 892
X86_INS_VLDMXCSR = 893
X86_INS_VMASKMOVDQU = 894
X86_INS_VMASKMOVPD = 895
X86_INS_VMASKMOVPS = 896
X86_INS_VMAXPD = 897
X86_INS_VMAXPS = 898
X86_INS_VMAXSD = 899
X86_INS_VMAXSS = 900
X86_INS_VMCALL = 901
X86_INS_VMCLEAR = 902
X86_INS_VMFUNC = 903
X86_INS_VMINPD = 904
X86_INS_VMINPS = 905
X86_INS_VMINSD = 906
X86_INS_VMINSS = 907
X86_INS_VMLAUNCH = 908
X86_INS_VMLOAD = 909
X86_INS_VMMCALL = 910
X86_INS_VMOVQ = 911
X86_INS_VMOVDDUP = 912
X86_INS_VMOVD = 913
X86_INS_VMOVDQA32 = 914
X86_INS_VMOVDQA64 = 915
X86_INS_VMOVDQA = 916
X86_INS_VMOVDQU16 = 917
X86_INS_VMOVDQU32 = 918
X86_INS_VMOVDQU64 = 919
X86_INS_VMOVDQU8 = 920
X86_INS_VMOVDQU = 921
X86_INS_VMOVHLPS = 922
X86_INS_VMOVHPD = 923
X86_INS_VMOVHPS = 924
X86_INS_VMOVLHPS = 925
X86_INS_VMOVLPD = 926
X86_INS_VMOVLPS = 927
X86_INS_VMOVMSKPD = 928
X86_INS_VMOVMSKPS = 929
X86_INS_VMOVNTDQA = 930
X86_INS_VMOVNTDQ = 931
X86_INS_VMOVNTPD = 932
X86_INS_VMOVNTPS = 933
X86_INS_VMOVSD = 934
X86_INS_VMOVSHDUP = 935
X86_INS_VMOVSLDUP = 936
X86_INS_VMOVSS = 937
X86_INS_VMOVUPD = 938
X86_INS_VMOVUPS = 939
X86_INS_VMPSADBW = 940
X86_INS_VMPTRLD = 941
X86_INS_VMPTRST = 942
X86_INS_VMREAD = 943
X86_INS_VMRESUME = 944
X86_INS_VMRUN = 945
X86_INS_VMSAVE = 946
X86_INS_VMULPD = 947
X86_INS_VMULPS = 948
X86_INS_VMULSD = 949
X86_INS_VMULSS = 950
X86_INS_VMWRITE = 951
X86_INS_VMXOFF = 952
X86_INS_VMXON = 953
X86_INS_VPABSB = 954
X86_INS_VPABSD = 955
X86_INS_VPABSQ = 956
X86_INS_VPABSW = 957
X86_INS_VPACKSSDW = 958
X86_INS_VPACKSSWB = 959
X86_INS_VPACKUSDW = 960
X86_INS_VPACKUSWB = 961
X86_INS_VPADDB = | |
how accesses are split before the device is accessed. A device mapping
may swap the bytes of an access based on the <param>swap</param> argument, that
should be one of <tt>none</tt>, <tt>bus</tt>, <tt>bus-trans</tt> and
<tt>trans</tt>. For a description of these, see the documentation of the
<attr>map</attr> attribute in the <class>memory-space</class> class.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1595")
def del_map_cmd(space, object, fn, base):
map = [x for x in space.map if
not (x[1] == object
and (fn == -1 or fn == x[2])
and (base == -1 or base == x[0]))]
if len(map) == len(space.map):
print "No matching mappings in %s." % (space.name)
return
try:
space.map = map
if fn == -1:
func_str = ""
else:
func_str = "%d " % fn
if base == -1:
addr_str = ""
else:
addr_str = "at 0x%x " % base
if fn == -1 and base == -1:
print "Removing all mappings of '%s' from '%s'." % (object.name,
space.name)
else:
print ("Removing mapping %sof '%s' %sfrom '%s'."
% (func_str, object.name, addr_str, space.name))
except Exception, msg:
print "Failed removing mappings for '%s' from '%s': %s" % (object.name,
space.name,
msg)
def mapped_objs_expander(comp, space):
objs = [x[1].name for x in space.map]
return get_completions(comp, objs)
new_command("del-map", del_map_cmd,
[arg(obj_t('object'), 'device', expander = mapped_objs_expander),
arg(int_t, 'function', '?', -1),
arg(int_t, 'base', '?', -1)],
namespace = "memory-space",
type = ["Memory", "Configuration"],
see_also = ['<memory-space>.map', '<memory-space>.add-map'],
short = "remove device map from a memory-space",
doc = """
Remove the mapping of <param>device</param> from a memory-space. If a function
number is given by the <param>function</param> argument, then only mappings
with a matching number are removed. If an <param>address</param> is given,
only mappings with a matching address are removed. If both a
<param>function</param> and an <param>address</param> are specified, then only
mappings with a matching function number, at the specified address, are
removed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1660")
#
# add-map / del-map for port-space
#
def add_map_cmd(space, object, base, length, fn, offset):
try:
space.map += [[base, object, fn, offset, length]]
except Exception, msg:
print "Failed mapping '%s' in '%s': %s" % (object.name,
space.name, msg)
SIM_command_has_problem()
return
else:
print "Mapped '%s' in '%s' at address 0x%x." % (object.name,
space.name, base)
new_command("add-map", add_map_cmd,
[arg(obj_t('object'), 'device'),
arg(uint64_t, 'base'),
arg(uint64_t, 'length'),
arg(int_t, 'function', '?', 0),
arg(uint64_t, 'offset', '?', 0)],
namespace = "port-space",
type = ["Memory", "Configuration"],
see_also = ['<port-space>.map', '<port-space>.del-map'],
short = "map device in a port-space",
doc = """
Map <param>device</param> into a port-space at address <param>base</param>
and with length <param>length</param>. Different mappings of the same device
may be indentified by a device specific <param>function</param> number. The
mapping may specify an offset into the device's memory space, using the
<param>offset</param> argument.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1694")
def del_map_cmd(space, object, fn):
map = [x for x in space.map if
not (x[1] == object and (fn == -1 or fn == x[2]))]
if len(map) == len(space.map):
print "No matching mappings in %s." % (space.name)
return
space.map = map
try:
space.map = map
if fn == -1:
print "Removing all mappings of '%s' from '%s'." % (object.name,
space.name)
else:
print "Removing mapping %d of '%s' from '%s'." % (fn, object.name,
space.name)
except Exception, msg:
print "Failed removing mappings for '%s' from '%s': %s" % (object.name,
space.name,
msg)
def mapped_objs_expander(comp, space):
objs = [x[1].name for x in space.map]
return get_completions(comp, objs)
new_command("del-map", del_map_cmd,
[arg(obj_t('object'), 'device', expander = mapped_objs_expander),
arg(int_t, 'function', '?', -1)],
namespace = "port-space",
type = ["Memory", "Configuration"],
see_also = ['<port-space>.map', '<port-space>.add-map'],
short = "remove device map from a port-space",
doc = """
Remove the mapping of <param>device</param> from a port-space. If a function
number is given by the <param>function</param> argument, then only mappings
with a matching number is removed.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1736")
#
# -------------------- set --------------------
#
def obj_set_cmd(obj, address, value, size, little_endian, big_endian):
if size < 1 or size > 8:
print "size must be 1-8 bytes."
return
try:
cpu = current_processor()
except:
# do not really need a processor
cpu = None
if little_endian and big_endian:
print "Cannot use both -l and -b."
return
if not little_endian and not big_endian and size > 1:
if not cpu:
print "When no processor exists, -l or -b has to be specified."
SIM_command_has_problem()
return
if cpu.big_endian:
big_endian = 1
else:
little_endian = 1
val_list = []
for i in range(size):
val_list.append(value & 0xff)
value = value >> 8
if big_endian:
val_list.reverse()
try:
ex = obj.iface.memory_space.write(obj, cpu,
address, tuple(val_list), 1)
if ex != Sim_PE_No_Exception:
print "Failed writing memory (exception %d)" % ex
SIM_command_has_problem()
except Exception, msg:
print "Failed writing memory: %s" % msg
SIM_command_has_problem()
def set_cmd(address, value, size, le, be):
obj_set_cmd(current_processor().physical_memory, address, value, size, le, be)
new_command("set", set_cmd,
[arg(int_t,"address"), arg(int_t,"value"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
type = ["Memory", "Changing Simulated State"],
short = "set physical address to specified value",
see_also = ["get", "x", "pselect"],
doc = """
Set the <i>size</i> bytes of physical memory at location
<i>address</i> to <i>value</i>. The default <i>size</i> is 4 bytes,
but can be anywhere between 1 and 8 (inclusive).
If <i>value</i> is larger than the specified size, behavior is undefined.
The <i>-l</i> and <i>-b</i> flags are used to select little-endian and
big-endian byte order, respectively. If neither is given, the byte
order of the currently selected processor is used.
The non-namespace version of this command operates on the physical memory
associated with the current processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1800")
new_command("set", obj_set_cmd,
[arg(int_t,"address"), arg(int_t,"value"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
short = "set physical address to specified value",
see_also = ["get", "signed"],
namespace = "memory-space",
doc_with = "set", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1821")
#
# -------------------- get --------------------
#
def obj_get_cmd(obj, address, size, little_endian, big_endian):
if size < 1 or size > 8:
print "size must be 1-8 bytes."
return
try:
cpu = current_processor()
except:
# do not really need a processor
cpu = None
if little_endian and big_endian:
print "Cannot use both -l and -b."
return
if not little_endian and not big_endian and size > 1:
if not cpu:
print "When no processor exists, -l or -b has to be specified."
SIM_command_has_problem()
return
if cpu.big_endian:
big_endian = 1
else:
little_endian = 1
try:
bytes = obj.iface.memory_space.read(obj, cpu,
address, size, 1)
except Exception, msg:
print "Failed reading memory: %s" % msg
SIM_command_has_problem()
return 0
# Make sure we have the msb in bytes[0]
if little_endian:
bytes = list(bytes)
bytes.reverse()
ret = 0
for x in bytes:
ret = (ret << 8) | x
return ret
def get_cmd(address, size, le, be):
return obj_get_cmd(current_processor().physical_memory, address, size, le, be)
new_command("get", get_cmd,
[arg(int_t,"address"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
type = ["Memory", "Inspecting Simulated State"],
short = "get value of physical address",
pri = 1000,
see_also = ["x", "set", "signed"],
doc = """
Get value of physical memory location. The size argument specifies how
many bytes should be read. This defaults to 4, but can be any number
of bytes between 1 and 8 (inclusive).
The <i>-l</i> and <i>-b</i> flags are used to select little-endian and
big-endian byte order, respectively. If neither is given, the byte
order of the currently selected processor is used.
This command operates on the physical memory associated with the
current processor.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1879")
new_command("get", obj_get_cmd,
[arg(int_t,"address"), arg(int_t, "size", "?", 4),
arg(flag_t,"-l"), arg(flag_t,"-b")],
short = "get value of physical address",
namespace = "memory-space",
pri = 1000,
doc_with = "get", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1899")
#
# -------------------- read-reg --------------------
#
def obj_read_reg_cmd(cpu, reg_name):
try:
return local_read_int_register(cpu, reg_name)
except:
return SIM_read_register(cpu, SIM_get_register_number(cpu, reg_name))
def read_reg_cmd(cpu, reg_name):
if not cpu:
(cpu, _) = get_cpu()
return obj_read_reg_cmd(cpu, reg_name)
def read_default_reg_cmd(reg_name):
try:
(cpu, _) = get_cpu()
val = obj_read_reg_cmd(cpu, reg_name)
except:
(exception, value, traceback) = sys.exc_info()
raise CliError, ("'%' command: reading register '" + reg_name
+ "' failed (" + str(value) + ").\n\nIf you meant to "
+ "use a path like %simics%, check that you quote "
+ "the string properly, i.e., \"%simics%/...\"")
return val
def exp_regs(comp):
cpu,_ = get_cpu()
regs = [ SIM_get_register_name(cpu, r) for r in SIM_get_all_registers(cpu) ]
return get_completions(comp, regs)
def register_number_cmd(cpu, reg_name):
return SIM_get_register_number(cpu, reg_name)
new_command("register-number", register_number_cmd,
[arg(str_t, "reg-name", expander = exp_regs)],
type = ["Registers", "Inspecting Simulated State"],
short = "get the number of a processor register",
namespace = "processor",
see_also = ['%', 'read-reg'],
doc = """
Returns the register number for a named processor register.
The register number is used as hap indexing for example.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1942")
new_command("read-reg", read_reg_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(str_t, | |
def dataCleaning(marriageNum='Full', classNum=2, dropColumns=True):
'''
This is the data cleaning function for the CE264 final project: Marriage Analysis
Return a pandas data frame and a numpy array: data (X) and observation (y)
The name of each column refers to the JGSS data codebook
For some combined categories in a particular data feature, please refer to the source code
Parameters:
marriageNum: integer or 'Full'(as default); the number of sampled 'Marriage' class,
should be smaller than its actual number
classNum: 2(as default), 3 or 'Full'; the class number of the observation;
if 2, class 1 for 'Currently Marriage' and 2 for other
if 3, class 1 for 'Currently Marriage', 2 for 'Divorced', and 3 for other
if 'Full', check the code book for the meaning of each class
dropColumns: whether drop the columns that may have correlation with the marriage status (default is True)
'''
# parameter check
if classNum not in [2, 3, 'Full']:
raise ValueError('Invalid class number: should be 2, 3, or "Full".')
# import libraries and raw data
import numpy as np
import pandas as pd
rawData = pd.read_stata('34623-0001-Data.dta')
# data selected primarily by genetic algorithm
selectedCol = ['SIZE', 'SEXA', 'TP5UNEMP', 'AGESTPWK', 'SZSJBHWK', 'TPJOBP', 'TPJBDP', 'SZCMTHR', 'XXJOB',
'XJOBDWK', 'SZTTLSTA', 'ST5JOB', 'SSJB1WK', 'SSTPUNEM', 'SSSJBHWK', 'SSTPJOB','SSTPJBDP',
'SSTPJBSE', 'SSXJBSCH', 'SSSZWKYR', 'SSSZSTFA', 'SPAGEX', 'SPLVTG', 'PPLVTG', 'MMLVTG', 'PPAGE',
'MMAGE', 'MMJOB', 'CCNUMTTL', 'CC01SEX', 'CC01AGE', 'CC02LVTG', 'CC02AGE', 'CC02MG', 'CC03MG',
'CC03JOB', 'CC04LVTG', 'CC04MG', 'CC05SEX', 'CC05JOB', 'CC06SEX', 'CC06AGE', 'CC07SEX',
'CC07LVTG', 'CC07AGE', 'CC07JOB', 'CC08MG', 'CC08JOB', 'SZFFOTHR', 'FFH01REL', 'FFH03SEX',
'FFH04REL', 'FFH04SEX', 'FFH05REL', 'FFH05SEX', 'FFH07REL', 'FFH07SEX', 'FFH07AGE', 'SZFFONLY',
'SZFFTTL', 'FFHEAD', 'SZFFOUT', 'FFO01REL', 'FFO01WHY', 'FFO02REL', 'FFO02WHY', 'FFO03REL',
'FFO03WHY', 'FFO05REL', 'FFO05WHY', 'FFO06REL', 'FFO06WHY', 'INCSELF', 'INCSP', 'INCPEN',
'INCUEB', 'INCIRR', 'INCRENT', 'INCMAIN', 'SZINCOMA', 'XNUMSISE', 'XNUMBROY', 'XSSNBROY',
'XSSNSISY', 'PREF15', 'TP5LOC15', 'PPJBXX15', 'PPJBSZ15', 'MMJBTP15', 'XXLSTSCH','SSLSTSCH',
'PPLSTSCH', 'DOLSTSCH', 'XGRADE', 'XSPSCH', 'MARC']
selectedData = rawData[selectedCol]
# filter data with more than 1500 'NA' values
NAfilter = []
for i in selectedCol:
if len(selectedData[selectedData[i] == 'Not applicable']) < 1500:
NAfilter += [i]
selectedData = selectedData[NAfilter]
# sample the marriage class
if marriageNum != 'Full':
marriageClass = selectedData[selectedData['MARC'] == 'Currently married']
dropNum = len(marriageClass) - marriageNum
if not dropNum > 0:
raise ValueError('The number of sampled "Marriage" class should be smaller than its actual number. Try another value.')
drop_id = np.random.choice(marriageClass.index, dropNum, replace=False)
selectedData = selectedData.drop(drop_id)
# data cleaning...
selectedData['SIZE'].replace('Largest cities', 1, inplace=True)
selectedData['SIZE'].replace('Cities with population of 200000 or more', 2, inplace=True)
selectedData['SIZE'].replace('Cities with population of less than 200000', 3, inplace=True)
selectedData['SIZE'].replace('Town/village', 4, inplace=True)
selectedData['SEXA'].replace('Male', 1, inplace=True)
selectedData['SEXA'].replace('Female', 2, inplace=True)
selectedData['SSJB1WK'].replace('He/she worked last week.', 1, inplace=True)
selectedData['SSJB1WK'].replace('He/she was going to work last week, but did not work.', 2, inplace=True)
selectedData['SSJB1WK'].replace('He/she did not work.', 3, inplace=True)
selectedData['SPLVTG'].replace('Living together', 1, inplace=True)
selectedData['SPLVTG'].replace('Not living together (because of work circumstances)', 2, inplace=True)
selectedData['SPLVTG'].replace('Not living together (for other reasons)', 3, inplace=True)
selectedData['PPLVTG'].replace('Living together', 1, inplace=True)
selectedData['PPLVTG'].replace('Not living together', 2, inplace=True)
selectedData['PPLVTG'].replace('Deceased', 3, inplace=True)
selectedData['MMLVTG'].replace('Living together', 1, inplace=True)
selectedData['MMLVTG'].replace('Not living together', 2, inplace=True)
selectedData['MMLVTG'].replace('Deceased', 3, inplace=True)
selectedData['CC01SEX'].replace('Male', 1, inplace=True)
selectedData['CC01SEX'].replace('Female', 2, inplace=True)
for i in np.unique(selectedData['FFHEAD']):
if i not in ['Respondent himself/herself', 'Husband', 'Wife']:
selectedData['FFHEAD'].replace(i, 4, inplace=True)
selectedData['FFHEAD'].replace('Respondent himself/herself', 1, inplace=True)
selectedData['FFHEAD'].replace('Husband', 2, inplace=True)
selectedData['FFHEAD'].replace('Wife', 3, inplace=True)
selectedData['INCSELF'].replace('Chosen', 0, inplace=True)
selectedData['INCSELF'].replace('Not chosen', 1, inplace=True)
selectedData['INCSP'].replace('Chosen', 1, inplace=True)
selectedData['INCSP'].replace('Not chosen', 1, inplace=True)
selectedData['INCPEN'].replace('Chosen', 0, inplace=True)
selectedData['INCPEN'].replace('Not chosen', 1, inplace=True)
selectedData['INCUEB'].replace('Chosen', 0, inplace=True)
selectedData['INCUEB'].replace('Not chosen', 1, inplace=True)
selectedData['INCIRR'].replace('Chosen', 0, inplace=True)
selectedData['INCIRR'].replace('Not chosen', 1, inplace=True)
selectedData['INCRENT'].replace('Chosen', 0, inplace=True)
selectedData['INCRENT'].replace('Not chosen', 1, inplace=True)
selectedData.drop('INCMAIN', axis=1, inplace=True)
for i in np.unique(selectedData['SZINCOMA']):
if i == 'None':
selectedData['SZINCOMA'].replace(i, 1, inplace=True)
elif i in ['Less than 700,000 yen', '700,000 yen - 1 million yen']:
selectedData['SZINCOMA'].replace(i, 2, inplace=True)
elif i in ['1 million yen - 1.3 million yen',
'1.3 million yen - 1.5 million yen',
'1.5 million yen - 2.5 million yen']:
selectedData['SZINCOMA'].replace(i, 3, inplace=True)
elif i in ['2.5 million yen - 3.5 million yen',
'3.5 million yen - 4.5 million yen',
'4.5 million yen - 5.5 million yen',]:
selectedData['SZINCOMA'].replace(i, 4, inplace=True)
elif i in ['5.5 million yen - 6.5 million yen',
'6.5 million yen - 7.5 million yen',
'7.5 million yen - 8.5 million yen',
'8.5 million yen - 10 million yen',
'10 million yen - 12 million yen',
'12 million yen - 14 million yen',
'14 million yen - 16 million yen',
'16 million yen - 18.5 million yen',
'18.5 million yen - 23 million yen',]:
selectedData['SZINCOMA'].replace(i, 5, inplace=True)
else:
selectedData['SZINCOMA'].replace(i, 0, inplace=True)
selectedData.drop('PREF15', axis=1, inplace=True)
selectedData['TP5LOC15'].replace('Large city', 1, inplace=True)
selectedData['TP5LOC15'].replace('Small to medium sized city', 2, inplace=True)
selectedData['TP5LOC15'].replace('Town', 3, inplace=True)
selectedData['TP5LOC15'].replace('Village', 4, inplace=True)
for i in np.unique(selectedData['PPJBXX15']):
if i == 'managers in companies/organizations':
selectedData['PPJBXX15'].replace(i, 1, inplace=True)
elif i == 'Not applicable':
selectedData['PPJBXX15'].replace(i, 0, inplace=True)
else:
selectedData['PPJBXX15'].replace(i, 2, inplace=True)
selectedData['PPJBSZ15'].replace('1', 1, inplace=True)
selectedData['PPJBSZ15'].replace('Small company (2-29 employees)', 2, inplace=True)
selectedData['PPJBSZ15'].replace('Medium-sized company (30-299 employees)', 3, inplace=True)
selectedData['PPJBSZ15'].replace('Large company (300-999 employees)', 4, inplace=True)
selectedData['PPJBSZ15'].replace('Major company (1000 or more employees', 5, inplace=True)
selectedData['PPJBSZ15'].replace('Government agency', 6, inplace=True)
selectedData['PPJBSZ15'].replace("Don't know", 0, inplace=True)
for i in np.unique(selectedData['MMJBTP15']):
if i == 'She was not working.':
selectedData['MMJBTP15'].replace(i, 1, inplace=True)
elif i == 'Temporary worker, Daily worker, Part-time temporary worker':
selectedData['MMJBTP15'].replace(i, 2, inplace=True)
elif i in ["Regular employee - don't know about occupation",
'Regular employee - managerial position',
'Regular employee - non-management',
'Regular employee - professional (nurse, teacher, etc.)']:
selectedData['MMJBTP15'].replace(i, 3, inplace=True)
elif i in ["Don't know", 'No answer']:
selectedData['MMJBTP15'].replace(i, 0, inplace=True)
else:
selectedData['MMJBTP15'].replace(i, 4, inplace=True)
for i in np.unique(selectedData['XXLSTSCH']):
if i in ['Ordinary elementary school in the old system',
'Higher elementary school in the old system']:
selectedData['XXLSTSCH'].replace(i, 1, inplace=True)
elif i in ["Junior high school/Girls' high school in the old system",
'Vocational school/Commerce school in the old system',
'Normal school in the old system',
'Higher school or vocational school in the old system',
'Junior high school',
'High school']:
selectedData['XXLSTSCH'].replace(i, 2, inplace=True)
elif i in ['No answer', "Don't know"]:
selectedData['XXLSTSCH'].replace(i, 0, inplace=True)
else:
selectedData['XXLSTSCH'].replace(i, 3, inplace=True)
for i in np.unique(selectedData['SSLSTSCH']):
if i in ['Ordinary elementary school in the old system',
'Higher elementary school in the old system']:
selectedData['SSLSTSCH'].replace(i, 1, inplace=True)
elif i in ["Junior high school/Girls' high school in the old system",
'Vocational school/Commerce school in the old system',
'Normal school in the old system',
'Higher school or vocational school in the old system',
'Junior high school',
'High school']:
selectedData['SSLSTSCH'].replace(i, 2, inplace=True)
elif i in ['No answer', "Don't know", 'Never-married/Divorced']:
selectedData['SSLSTSCH'].replace(i, 0, inplace=True)
else:
selectedData['SSLSTSCH'].replace(i, 3, inplace=True)
for i in np.unique(selectedData['PPLSTSCH']):
if i in ['Ordinary elementary school in the old system',
'Higher elementary school in the old system']:
selectedData['PPLSTSCH'].replace(i, 1, inplace=True)
elif i in ["Junior high school/Girls' high school in the old system",
'Vocational school/Commerce school in the old system',
'Normal school in the old system',
'Higher school or vocational school in the old system',
'Junior high school',
'High school']:
selectedData['PPLSTSCH'].replace(i, 2, inplace=True)
elif i in ['No answer', "Don't know"]:
selectedData['PPLSTSCH'].replace(i, 0, inplace=True)
else:
selectedData['PPLSTSCH'].replace(i, 3, inplace=True)
selectedData['DOLSTSCH'].replace('Graduated', 1, inplace=True)
selectedData['DOLSTSCH'].replace('Quit', 2, inplace=True)
selectedData['DOLSTSCH'].replace('Still a student', 3, inplace=True)
selectedData['XSPSCH'].replace('Yes', 1, inplace=True)
selectedData['XSPSCH'].replace('No', 2, inplace=True)
# drop those terms correlated to marriage status
selectedData.drop('SSJB1WK', axis=1, inplace=True)
selectedData.drop('SPAGEX', axis=1, inplace=True)
selectedData.drop('SPLVTG', axis=1, inplace=True)
selectedData.drop('INCSP', axis=1, inplace=True)
selectedData.drop('SSLSTSCH', axis=1, inplace=True)
selectedData.drop('XSSNBROY', axis=1, inplace=True)
selectedData.drop('XSSNSISY', axis=1, inplace=True)
if dropColumns == True:
selectedData.drop('CCNUMTTL', axis=1, inplace=True)
selectedData.drop('CC01SEX', axis=1, inplace=True)
selectedData.drop('CC01AGE', axis=1, inplace=True)
selectedData.drop('SZFFOTHR', axis=1, inplace=True)
selectedData.drop('SZFFONLY', axis=1, inplace=True)
selectedData.drop('SZFFTTL', axis=1, inplace=True)
selectedData.drop('FFHEAD', axis=1, inplace=True)
selectedData.replace('No answer', 0, inplace=True)
selectedData.replace('Not applicable', 0, inplace=True)
# observation cleaning
observation = np.array(selectedData['MARC'])
cleanedObservation = np.zeros(len(observation))
if classNum == 2:
for i in range(len(observation)):
if observation[i] == 'Currently married':
cleanedObservation[i] = 1
else:
cleanedObservation[i] = 2
elif classNum == 3:
for i in range(len(observation)):
if observation[i] == 'Currently married':
cleanedObservation[i] = 1
elif observation[i] == 'Divorced':
cleanedObservation[i] = 2
else:
cleanedObservation[i] = 3
else:
for i in range(len(observation)):
if observation[i] == 'Currently married':
cleanedObservation[i] = 1
elif observation[i] == 'Divorced':
cleanedObservation[i] = 2
elif observation[i] == | |
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_1, 1, self.author_id_1,
self.add_translation_change_dict, 'test description')
# Assert that there is one translation suggestion with the given
# exploration id found.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids(
[self.target_id_1])), 1)
def test_get_translation_suggestion_ids_with_exp_ids_with_multiple_exps(
self):
# Create the translation suggestion associated with exploration id
# target_id_2.
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
exp_domain.Exploration, 'get_content_html',
self.MockExploration.get_content_html):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_2, 1, self.author_id_1,
self.add_translation_change_dict, 'test description')
# Create the translation suggestion associated with exploration id
# target_id_3.
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
exp_domain.Exploration, 'get_content_html',
self.MockExploration.get_content_html):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id_3, 1, self.author_id_1,
self.add_translation_change_dict, 'test description')
# Assert that there are two translation suggestions with the given
# exploration ids found.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids(
[self.target_id_2, self.target_id_3])), 2)
def test_get_translation_suggestion_ids_with_exp_ids_with_invalid_exp(
self):
# Assert that there are no translation suggestions with an invalid
# exploration id found.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids(
['invalid_exp_id'])), 0)
def test_get_translation_suggestion_ids_with_exp_ids_with_empty_exp_list(
self):
# Assert that there are no translation suggestions found when we
# use an empty exp_ids list.
self.assertEqual(
len(
suggestion_services
.get_translation_suggestion_ids_with_exp_ids([])), 0)
def test_get_translation_suggestions_waiting_longest_for_review_per_lang(
self):
suggestion_1 = self._create_translation_suggestion_with_language_code(
'hi')
suggestion_2 = self._create_translation_suggestion_with_language_code(
'hi')
suggestion_3 = self._create_translation_suggestion_with_language_code(
'hi')
suggestions = (
suggestion_services
.get_translation_suggestions_waiting_longest_for_review(
'hi'))
# Assert that the suggestions are in the order that they were created.
self.assertEqual(len(suggestions), 3)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_1.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_2.suggestion_id)
self.assertEqual(
suggestions[2].suggestion_id, suggestion_3.suggestion_id)
for i in python_utils.RANGE(len(suggestions) - 1):
self.assertLess(
suggestions[i].last_updated, suggestions[i + 1].last_updated)
def test_get_translation_suggestions_waiting_longest_for_review_wrong_lang(
self):
suggestions = (
suggestion_services
.get_translation_suggestions_waiting_longest_for_review(
'wrong_language_code'))
self.assertEqual(len(suggestions), 0)
def test_get_question_suggestions_waiting_longest_for_review_keeps_order(
self):
"""This test makes sure that if a suggestion is rejected and is then
resubmitted, we count the time that the suggestion has been waiting for
review from when it was resubmitted, not from when it was first
submitted.
"""
suggestion_1 = self._create_question_suggestion_with_skill_id('skill1')
suggestion_2 = self._create_question_suggestion_with_skill_id('skill2')
# Verify that both suggestions are returned and in the right order.
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
self.assertEqual(len(suggestions), 2)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_1.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_2.suggestion_id)
self.assertLess(
suggestions[0].last_updated, suggestions[1].last_updated)
# Reject the suggestion that was created first since it is the one that
# has been waiting the longest for review.
suggestion_services.reject_suggestion(
suggestion_1.suggestion_id, self.reviewer_id_1, 'Reject message')
# Verify that only the suggestion that was created second is returned.
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_2.suggestion_id)
# Change the question_dict of the question suggestion that got rejected
# so we can resubmit the suggestion for review.
resubmit_question_change = suggestion_1.change
resubmit_question_change.question_dict['linked_skill_ids'] = ['skill1']
# Resubmit the rejected question suggestion.
suggestion_services.resubmit_rejected_suggestion(
suggestion_1.suggestion_id, 'resubmit summary message',
self.author_id_1, resubmit_question_change
)
# Verify that both suggestions are returned again and the suggestion
# that was created second is now the first suggestion in the returned
# list, since it has been waiting longer (due to it not being updated).
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
self.assertEqual(len(suggestions), 2)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_2.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_1.suggestion_id)
self.assertLess(
suggestions[0].last_updated, suggestions[1].last_updated)
def test_get_question_suggestions_waiting_longest_for_review(self):
suggestion_1 = self._create_question_suggestion_with_skill_id('skill1')
suggestion_2 = self._create_question_suggestion_with_skill_id('skill2')
suggestion_3 = self._create_question_suggestion_with_skill_id('skill3')
suggestions = (
suggestion_services
.get_question_suggestions_waiting_longest_for_review()
)
# Assert that the suggestions are in the order that they were created.
self.assertEqual(len(suggestions), 3)
self.assertEqual(
suggestions[0].suggestion_id, suggestion_1.suggestion_id)
self.assertEqual(
suggestions[1].suggestion_id, suggestion_2.suggestion_id)
self.assertEqual(
suggestions[2].suggestion_id, suggestion_3.suggestion_id)
for i in python_utils.RANGE(len(suggestions) - 1):
self.assertLess(
suggestions[i].last_updated, suggestions[i + 1].last_updated)
def test_query_suggestions_that_can_be_reviewed_by_user(self):
# User proficiency models for user1.
user_models.UserContributionProficiencyModel.create(
'user1', 'category1', 15)
user_models.UserContributionProficiencyModel.create(
'user1', 'category2', 15)
user_models.UserContributionProficiencyModel.create(
'user1', 'category3', 5)
# User proficiency models for user2.
user_models.UserContributionProficiencyModel.create(
'user2', 'category1', 5)
user_models.UserContributionProficiencyModel.create(
'user2', 'category2', 5)
user_models.UserContributionProficiencyModel.create(
'user2', 'category3', 5)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
'exp1', 1, suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category1',
'exploration.exp1.thread_1', None)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category2',
'exploration.exp1.thread_2', None)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category3',
'exploration.exp1.thread_3', None)
# This suggestion does not count as a suggestion that can be reviewed
# by a user because it has already been rejected.
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_REJECTED, 'author_3',
'reviewer_2', self.change, 'category1',
'exploration.exp1.thread_4', None)
suggestion_models.GeneralSuggestionModel.create(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, 'exp1', 1,
suggestion_models.STATUS_IN_REVIEW, 'author_3',
'reviewer_2', self.change, 'category2',
'exploration.exp1.thread_5', None)
self.assertEqual(len(
suggestion_services
.get_all_suggestions_that_can_be_reviewed_by_user('user1')), 3)
self.assertEqual(len(
suggestion_services
.get_all_suggestions_that_can_be_reviewed_by_user('user2')), 0)
class SuggestionIntegrationTests(test_utils.GenericTestBase):
EXP_ID = 'exp1'
TOPIC_ID = 'topic1'
STORY_ID = 'story1'
TRANSLATION_LANGUAGE_CODE = 'en'
AUTHOR_EMAIL = '<EMAIL>'
score_category = (
suggestion_models.SCORE_TYPE_CONTENT +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'Algebra')
THREAD_ID = 'exploration.exp1.thread_1'
COMMIT_MESSAGE = 'commit message'
def mock_generate_new_thread_id(self, unused_entity_type, unused_entity_id):
return self.THREAD_ID
def setUp(self):
super(SuggestionIntegrationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.reviewer_id = self.editor_id
self.editor = user_services.UserActionsInfo(self.editor_id)
# Login and create exploration and suggestions.
self.login(self.EDITOR_EMAIL)
# Create exploration.
exploration = (
self.save_new_linear_exp_with_state_names_and_interactions(
self.EXP_ID, self.editor_id, ['State 1', 'State 2'],
['TextInput'], category='Algebra'))
self.old_content = state_domain.SubtitledHtml(
'content', '<p>old content</p>').to_dict()
recorded_voiceovers_dict = {
'voiceovers_mapping': {
'content': {
self.TRANSLATION_LANGUAGE_CODE: {
'filename': 'filename3.mp3',
'file_size_bytes': 3000,
'needs_update': False,
'duration_secs': 42.43
}
},
'default_outcome': {},
'ca_placeholder_0': {}
}
}
self.old_recorded_voiceovers = (
state_domain.RecordedVoiceovers.from_dict(recorded_voiceovers_dict))
# Create content in State A with a single audio subtitle.
exploration.states['State 1'].update_content(
state_domain.SubtitledHtml.from_dict(self.old_content))
exploration.states['State 1'].update_recorded_voiceovers(
self.old_recorded_voiceovers)
exp_services._save_exploration(self.editor_id, exploration, '', []) # pylint: disable=protected-access
rights_manager.publish_exploration(self.editor, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.editor, self.EXP_ID, self.owner_id,
rights_domain.ROLE_EDITOR)
self.new_content = state_domain.SubtitledHtml(
'content', '<p>new content</p>').to_dict()
self.change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': self.new_content
}
self.target_version_at_submission = exploration.version
# Set up for testing translation suggestions. Translation suggestions
# correspond to a given topic, story and exploration.
self.save_new_topic(self.TOPIC_ID, self.owner_id)
self.save_new_story(
self.STORY_ID, self.owner_id, self.TOPIC_ID, title='A story',
description='Description', notes='Notes')
# Adds the story to the topic.
topic_services.add_canonical_story(
self.owner_id, self.TOPIC_ID, self.STORY_ID)
# Adds the exploration to the story.
story_change_list_to_add_an_exp = [
story_domain.StoryChange({
'cmd': 'add_story_node',
'node_id': 'node_1',
'title': 'Node1',
}), story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': None,
'new_value': self.EXP_ID
})
]
story_services.update_story(
self.owner_id, self.STORY_ID,
story_change_list_to_add_an_exp, 'Added exploration.')
def create_translation_suggestion_associated_with_exp(
self, exp_id, author_id):
"""Creates a translation suggestion that is associated with an
exploration with id exp_id. The author of the created suggestion is
author_id.
"""
# Gets the html content in the exploration to be translated.
exploration = exp_fetchers.get_exploration_by_id(exp_id)
content_html = exploration.states['State 1'].content.html
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': content_html,
'translation_html': '<p>This is translated html.</p>'
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
exp_id, 1, author_id, add_translation_change_dict,
'test description')
def assert_created_suggestion_is_valid(self, target_id, author_id):
"""Assert that the created suggestion is in review and that only one
suggestion with the given target_id and author_id exists.
"""
suggestions = suggestion_services.query_suggestions(
[('author_id', author_id), ('target_id', target_id)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_IN_REVIEW)
def test_create_and_accept_suggestion(self):
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.EXP_ID, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion_id = self.THREAD_ID
suggestion_services.accept_suggestion(
suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE, None)
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(
exploration.states['State 1'].content.html,
'<p>new content</p>')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED)
def test_create_and_reject_suggestion(self):
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.EXP_ID, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion_id = self.THREAD_ID
suggestion_services.reject_suggestion(
suggestion_id, self.reviewer_id, 'Reject message')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'Reject message')
self.assertEqual(
exploration.states['State 1'].content.html,
'<p>old content</p>')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, suggestion_models.STATUS_REJECTED)
def test_create_and_accept_suggestion_with_message(self):
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.EXP_ID, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion_id = self.THREAD_ID
suggestion_services.accept_suggestion(
suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'Accept message')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'Accept message')
self.assertEqual(
exploration.states['State 1'].content.html,
'<p>new content</p>')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, suggestion_models.STATUS_ACCEPTED)
def test_delete_skill_rejects_question_suggestion(self):
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion_change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL, skill_id, 1,
self.author_id, suggestion_change, 'test description')
self.assert_created_suggestion_is_valid(skill_id, self.author_id)
skill_services.delete_skill(self.author_id, skill_id)
# Suggestion should be rejected after corresponding skill is deleted.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', skill_id)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
def test_delete_topic_rejects_translation_suggestion(self):
self.create_translation_suggestion_associated_with_exp(
self.EXP_ID, self.author_id)
self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id)
topic_services.delete_topic(self.author_id, self.TOPIC_ID)
# Suggestion should be rejected after the topic is deleted.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
def test_delete_story_rejects_translation_suggestion(self):
self.create_translation_suggestion_associated_with_exp(
self.EXP_ID, self.author_id)
self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id)
story_services.delete_story(self.author_id, self.STORY_ID)
# Suggestion should be rejected after the story is deleted.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
def test_remove_exp_from_story_rejects_translation_suggestion(self):
self.create_translation_suggestion_associated_with_exp(
self.EXP_ID, self.author_id)
self.assert_created_suggestion_is_valid(self.EXP_ID, self.author_id)
# Removes the exploration from the story.
story_services.update_story(
self.owner_id, self.STORY_ID, [story_domain.StoryChange({
'cmd': 'update_story_node_property',
| |
+ "=" + str(value) + ' ' + state, "vm-alter")[2]
def appdrsalter(self, cloud_name, identifier, attribute, value):
return self.passive.alter_object({}, cloud_name + ' ' + identifier + ' ' + attribute + "=" + str(value), "aidrs-alter")[2]
def vmcalter(self, cloud_name, identifier, attribute, value) :
return self.passive.alter_object({}, cloud_name + ' ' + identifier + ' ' + attribute + "=" + str(value), "vmc-alter")[2]
def hostalter(self, cloud_name, identifier, attribute, value):
return self.passive.alter_object({}, cloud_name + ' ' + identifier + ' ' + attribute + "=" + str(value), "host-alter")[2]
def vmcrsalter(self, cloud_name, identifier, attribute, value):
return self.passive.alter_object({}, cloud_name + ' ' + identifier + ' ' + attribute + "=" + str(value), "vmcrs-alter")[2]
def firsalter(self, cloud_name, identifier, attribute, value):
return self.passive.alter_object({}, cloud_name + ' ' + identifier + ' ' + attribute + "=" + str(value), "firs-alter")[2]
def vmcattach(self, cloud_name, identifier, temp_attr_list = "empty=empty", async = False) :
if async and str(async).count("async") :
if identifier == "all" :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + temp_attr_list + (' ' + async), "vmc-attachall")[2]
else :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + temp_attr_list + (' ' + async), "vmc-attach")[2]
else :
if identifier == "all" :
return self.active.vmcattachall({}, cloud_name + ' ' + identifier + ' ' + temp_attr_list, "vmc-attachall")[2]
else :
return self.active.objattach({}, cloud_name + ' ' + identifier + ' ' + temp_attr_list, "vmc-attach")[2]
def vmcrsattach(self, cloud_name, identifier, scope = '', max_simultaneous_cap_reqs = '', max_total_cap_reqs = '', ivmcat = '', min_cap_age = '', temp_attr_list = "empty=empty", async = False):
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + scope + ' ' + max_simultaneous_cap_reqs + ' ' + max_total_cap_reqs + ' ' + ivmcat + ' ' + min_cap_age + ' ' + temp_attr_list + (' ' + async), "vmcrs-attach")[2]
else :
return self.active.objattach({}, cloud_name + ' ' + identifier + ' ' + scope + ' ' + max_simultaneous_cap_reqs + ' ' + max_total_cap_reqs + ' ' + ivmcat + ' ' + min_cap_age + ' ' + temp_attr_list, "vmcrs-attach")[2]
def firsattach(self, cloud_name, identifier, scope = '', max_simultaenous_faults = '', max_total_faults = '', ifat = '', min_fault_age = '', ftl = '', temp_attr_list = "empty=empty", async = False):
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + scope + ' ' + max_simultaenous_faults + ' ' + max_total_faults + ' ' + ifat + ' ' + min_fault_age + ' ' + ftl + ' ' + temp_attr_list + (' ' + async), "firs-attach")[2]
else :
return self.active.objattach({}, cloud_name + ' ' + identifier + ' ' + scope + ' ' + max_simultaenous_faults + ' ' + max_total_faults + ' ' + ifat + ' ' + min_fault_age + ' ' + ftl + ' ' + temp_attr_list, "firs-attach")[2]
def appattach(self, cloud_name, gtype, load_level = "default", load_duration = "default", lifetime = "none", aidrs = "none", pause_step = "none", temp_attr_list = "empty=empty", async = False):
parameters = cloud_name + ' ' + gtype + ' ' + str(load_level) + ' ' + str(load_duration) + ' ' + str(lifetime) + ' ' + aidrs + ' ' + pause_step + ' ' + temp_attr_list
if async :
async=str(async)
async=async.replace("async",'')
async=async.replace('=','')
if str(async.split(':')[0]).isdigit() :
_res = self.active.background_execute(parameters + (" async=" + str(async)), "ai-attach")[2]
else :
_res = self.active.background_execute(parameters + (" async"), "ai-attach")[2]
# This is hacky, but in order for an asynchronous attach to appear, introduce a delay between when the attach starts
# and when a user can safely issue `applist pending`, in order for the pending object to actually show up.
# We need a better fix for this later to ensure that the pending object is registered before the API command returns.
sleep(10)
return _res
else :
return self.active.objattach({}, parameters, "ai-attach")[2]
def appinit(self, cloud_name, gtype, load_level = "default", load_duration = "default", lifetime = "none", aidrs = "none", pause_step = "prepare_provision_complete"):
return self.appattach(cloud_name, gtype, str(load_level), str(load_duration), str(lifetime), aidrs, pause_step)
def apprun(self, cloud_name, uuid) :
return self.apprunstate(cloud_name, uuid, "attached", "run")
def appdrsattach(self, cloud_name, pattern, temp_attr_list = "empty=empty", async = False):
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + pattern + ' ' + temp_attr_list + (' ' + async), "aidrs-attach")[2]
else :
return self.active.objattach({}, cloud_name + ' ' + pattern + ' ' + temp_attr_list, "aidrs-attach")[2]
def vmattach(self, cloud_name, role, vm_location = "auto", meta_tags = "empty", size = "default", pause_step = "none", temp_attr_list = "empty=empty", async = False):
parameters = cloud_name + ' ' + role + ' ' + vm_location + ' ' + meta_tags + ' ' + size + ' ' + pause_step + ' ' + temp_attr_list
if async :
async=str(async)
async=async.replace("async",'')
async=async.replace('=','')
if str(async.split(':')[0]).isdigit() :
return self.active.background_execute(parameters + (" async=" + str(async)), "vm-attach")[2]
else :
return self.active.background_execute(parameters + (" async"), "vm-attach")[2]
else :
return self.active.objattach({}, parameters, "vm-attach")[2]
def vminit(self, cloud_name, role, vmc_pool = "auto", size = "default", pause_step = "prepare_provision_complete"):
return self.vmattach(cloud_name, role, vmc_pool, size, pause_step)
def vmrun(self, cloud_name, uuid):
return self.vmrunstate(cloud_name, uuid, "attached", "run")
def vmdetach(self, cloud_name, identifier, force = False, async = False):
force = str(force).lower() if force else "false"
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + force + (' ' + async), "vm-detach")[2]
else :
return self.active.objdetach({}, cloud_name + ' ' + identifier + ' ' + force, "vm-detach")[2]
def vmcdetach(self, cloud_name, identifier, force = False, async = False):
force = str(force).lower() if force else "false"
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + force + (' ' + async), "vmc-detach")[2]
else :
return self.active.objdetach({}, cloud_name + ' ' + identifier + ' ' + force, "vmc-detach")[2]
def vmccleanup(self, cloud_name, identifier) :
return self.active.vmccleanup({}, cloud_name + ' ' + identifier, "vmc-cleanup")[2]
def imgdelete(self, cloud_name, identifier, vmc, force = False) :
force = str(force).lower() if force else "false"
return self.active.imgdelete({}, cloud_name + ' ' + identifier + ' ' + vmc + ' ' + force, "img-delete")[2]
def vmcrsdetach(self, cloud_name, identifier, force = False, async = False):
force = str(force).lower() if force else "false"
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + force + (' ' + async), "vmcrs-detach")[2]
else :
return self.active.objdetach({}, cloud_name + ' ' + identifier + ' ' + force, "vmcrs-detach")[2]
def firsdetach(self, cloud_name, identifier, force = False, async = False):
force = str(force).lower() if force else "false"
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + force + (' ' + async), "firs-detach")[2]
else :
return self.active.objdetach({}, cloud_name + ' ' + identifier + ' ' + force, "vmcrs-detach")[2]
def appdetach(self, cloud_name, identifier, force = False, async = False):
force = str(force).lower() if force else "false"
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + force + (' ' + async), "ai-detach")[2]
else :
return self.active.objdetach({}, cloud_name + ' ' + identifier + ' ' + force, "ai-detach")[2]
def appdrsdetach(self, cloud_name, identifier, force = False, async = False):
force = str(force).lower() if force else "false"
if async and str(async).count("async") :
return self.active.background_execute(cloud_name + ' ' + identifier + ' ' + force + (' ' + async), "aidrs-detach")[2]
else :
return self.active.objdetach({}, cloud_name + ' ' + identifier + ' ' + force, "aidrs-detach")[2]
def monlist(self, cloud_name, object_type):
return self.passive.monitoring_list(cloud_name + ' ' + object_type, "mon-list")[2]
def monextract(self, cloud_name, object_type, metric_type, expid = "current"):
if str(object_type).lower() == "all" :
return self.passive.monitoring_extractall(cloud_name + ' ' + object_type + ' ' + metric_type + ' ' + expid, "mon-extract")[2]
else | |
<reponame>seanandrews/DSHARP_CPDs<filename>CSD_modeling/geom_tests/reduction_utils.py
"""
Functions useful for data reduction
"""
import os
import matplotlib.pyplot as plt
def LSRKvel_to_chan(msfile, field, spw, restfreq, LSRKvelocity):
"""
Identifies the channel(s) corresponding to input LSRK velocities.
Useful for choosing which channels to split out or flag if a line is expected to be present
Parameters
==========
msfile: Name of measurement set (string)
spw: Spectral window number (int)
obsid: Observation ID corresponding to the selected spectral window
restfreq: Rest frequency in Hz (float)
LSRKvelocity: input velocity in LSRK frame in km/s (float or array of floats)
Returns
=======
Channel number most closely corresponding to input LSRK velocity
"""
cc = 299792458. #speed of light in m/s
tb.open(msfile)
spw_col = tb.getcol('DATA_DESC_ID')
obs_col = tb.getcol('OBSERVATION_ID')
tb.close()
obsid = np.unique(obs_col[np.where(spw_col==spw)])
tb.open(msfile+'/SPECTRAL_WINDOW')
chanfreqs = tb.getcol('CHAN_FREQ', startrow = spw, nrow = 1)
tb.close()
tb.open(msfile+'/FIELD')
fieldnames = tb.getcol('NAME')
tb.close()
tb.open(msfile+'/OBSERVATION')
obstime = np.squeeze(tb.getcol('TIME_RANGE', startrow = obsid, nrow = 1))[0]
tb.close()
nchan = len(chanfreqs)
ms.open(msfile)
lsrkfreqs = ms.cvelfreqs(spwids = [spw], fieldids = np.where(fieldnames==field)[0][0], mode = 'channel', nchan = nchan, obstime = str(obstime)+'s', start = 0, outframe = 'LSRK')
chanvelocities = (restfreq-lsrkfreqs)/restfreq*cc/1.e3 #converted to LSRK velocities in km/s
ms.close()
if type(LSRKvelocity)==np.ndarray:
outchans = np.zeros_like(LSRKvelocity)
for i in range(len(LSRKvelocity)):
outchans[i] = np.argmin(np.abs(chanvelocities - LSRKvelocity[i]))
return outchans
else:
return np.argmin(np.abs(chanvelocities - LSRKvelocity))
def get_flagchannels(ms_dict, output_prefix, velocity_range = np.array([-20,20])):
"""
Identify channels to flag based on provided velocity range of the line emission
Parameters
==========
ms_dict: Dictionary of information about measurement set
output_prefix: Prefix for all output file names (string)
velocity_range: Velocity range (in km/s) over which line emission has been identified, in the format np.array([min_velocity, max_velocity])
Returns
=======
String of channels to be flagged, in a format that can be passed to the spw parameter in CASA's flagdata task.
"""
flagchannels_string = ''
for j,spw in enumerate(ms_dict['line_spws']):
chans = LSRKvel_to_chan(ms_dict['vis'], ms_dict['field'], spw, ms_dict['line_freqs'][j] , velocity_range)
if j==0:
flagchannels_string+='%d:%d~%d' % (spw, np.min([chans[0], chans[1]]), np.max([chans[0], chans[1]]))
else:
flagchannels_string+=', %d:%d~%d' % (spw, np.min([chans[0], chans[1]]), np.max([chans[0], chans[1]]))
print "# Flagchannels input string for %s: \'%s\'" % (ms_dict['name'], flagchannels_string)
return flagchannels_string
def avg_cont(ms_dict, output_prefix, flagchannels = '', maxchanwidth = 125, datacolumn = 'data', contspws = None, width_array = None):
"""
Produce spectrally averaged continuum measurement sets
Parameters
==========
ms_dict: Dictionary of information about measurement set
output_prefix: Prefix for all output file names (string)
flagchannels: Argument to be passed for flagchannels parameter in flagdata task
maxchanwidth: Maximum width of channel (MHz). This is the value recommended by ALMA for Band 6 to avoid bandwidth smearing
datacolumn: Column to pull from for continuum averaging (usually will be 'data', but may sometimes be 'corrected' if there was flux rescaling applied)
contspws: Argument to be passed to CASA for the spw parameter in split. If not set, all SPWs will be selected by default. (string)
width_array: Argument to be passed to CASA for the width parameter in split. If not set, all SPWs will be selected by default. (array)
"""
msfile = ms_dict['vis']
tb.open(msfile+'/SPECTRAL_WINDOW')
total_bw = tb.getcol('TOTAL_BANDWIDTH')
num_chan = tb.getcol('NUM_CHAN')
tb.close()
if width_array is None and contspws is None:
width_array = (num_chan/np.ceil(total_bw/(1.e6*maxchanwidth))).astype('int').tolist() #array of number of channels to average to form an output channel (to be passed to mstransform)
contspws = '%d~%d' % (0, len(total_bw)-1)#by default select all SPWs
elif (width_array is not None and contspws is None) or (width_array is None and contspws is not None):
print "If either contspws or width_array is set to a value, the other parameter has to be manually set as well"
return
if ms_dict['name']=='LB1':
timebin = '6s'
else:
timebin = '0s' #default in CASA
#start of CASA commands
if len(flagchannels)==0:
outputvis = output_prefix+'_'+ms_dict['name']+'_initcont.ms'
os.system('rm -rf '+outputvis)
split(vis=msfile,
field = ms_dict['field'],
spw = contspws,
outputvis = outputvis,
width = width_array,
timebin = timebin,
datacolumn=datacolumn,
intent = 'OBSERVE_TARGET#ON_SOURCE',
keepflags = False)
else:
if os.path.isdir(msfile+'.flagversions/flags.before_cont_flags'):
flagmanager(vis = msfile, mode = 'delete', versionname = 'before_cont_flags') # clear out old versions of the flags
flagmanager(vis = msfile, mode = 'save', versionname = 'before_cont_flags', comment = 'Flag states before spectral lines are flagged') #save flag state before flagging spectral lines
flagdata(vis=msfile, mode='manual', spw=flagchannels, flagbackup=False, field = ms_dict['field']) #flag spectral lines
outputvis = output_prefix+'_'+ms_dict['name']+'_initcont.ms'
os.system('rm -rf '+outputvis)
split(vis=msfile,
field = ms_dict['field'],
spw = contspws,
outputvis = outputvis,
width = width_array,
timebin = timebin,
datacolumn=datacolumn,
intent = 'OBSERVE_TARGET#ON_SOURCE',
keepflags = False)
flagmanager(vis = msfile, mode = 'restore', versionname = 'before_cont_flags') #restore flagged spectral line channels
print "#Averaged continuum dataset saved to %s" % outputvis
def tclean_wrapper(vis, imagename, scales, smallscalebias = 0.6, mask = '', threshold = '0.2mJy', imsize = None, cellsize = None, interactive = False, robust = 0.5, gain = 0.3, niter = 50000, cycleniter = 300, uvtaper = [], savemodel = 'none'):
"""
Wrapper for tclean with keywords set to values desired for the Large Program imaging
See the CASA 5.1.1 documentation for tclean to get the definitions of all the parameters
"""
if imsize is None:
if 'LB' in vis or 'combined' in vis:
imsize = 3000
elif 'SB' in vis:
imsize = 900
else:
print "Error: need to set imsize manually"
if cellsize is None:
if 'LB' in vis or 'combined' in vis:
cellsize = '.003arcsec'
elif 'SB' in vis:
cellsize = '.03arcsec'
else:
print "Error: need to set cellsize manually"
for ext in ['.image', '.mask', '.model', '.pb', '.psf', '.residual', '.sumwt']:
os.system('rm -rf '+ imagename + ext)
tclean(vis= vis,
imagename = imagename,
specmode = 'mfs',
deconvolver = 'multiscale',
scales = scales,
weighting='briggs',
robust = robust,
gain = gain,
imsize = imsize,
cell = cellsize,
smallscalebias = smallscalebias, #set to CASA's default of 0.6 unless manually changed
niter = niter, #we want to end on the threshold
interactive = interactive,
threshold = threshold,
cycleniter = cycleniter,
cyclefactor = 1,
uvtaper = uvtaper,
mask = mask,
savemodel = savemodel,
nterms = 1)
#this step is a workaround a bug in tclean that doesn't always save the model during multiscale clean. See the "Known Issues" section for CASA 5.1.1 on NRAO's website
if savemodel=='modelcolumn':
print ""
print "Running tclean a second time to save the model..."
tclean(vis= vis,
imagename = imagename,
specmode = 'mfs',
deconvolver = 'multiscale',
scales = scales,
weighting='briggs',
robust = robust,
gain = gain,
imsize = imsize,
cell = cellsize,
smallscalebias = smallscalebias, #set to CASA's default of 0.6 unless manually changed
niter = 0,
interactive = False,
threshold = threshold,
cycleniter = cycleniter,
cyclefactor = 1,
uvtaper = uvtaper,
mask = '',
savemodel = savemodel,
calcres = False,
calcpsf = False,
nterms = 1)
def image_each_obs(ms_dict, prefix, scales, smallscalebias = 0.6, mask = '', threshold = '0.2mJy', imsize = None, cellsize = None, interactive = False, robust = 0.5, gain = 0.3, niter = 50000, cycleniter = 300):
"""
Wrapper for tclean that will loop through all the observations in a measurement set and image them individual
Parameters
==========
ms_dict: Dictionary of information about measurement set
prefix: Prefix for all output file names (string)
See the CASA 5.1.1 documentation for tclean to get the definitions of all other parameters
"""
msfile = prefix+'_'+ms_dict['name']+'_initcont.ms'
tb.open(msfile+'/OBSERVATION')
num_observations = (tb.getcol('TIME_RANGE')).shape[1] #picked an arbitrary column to count the number of observations
tb.close()
if imsize is None:
if ms_dict['name']=='LB1':
imsize = 3000
else:
imsize = 900
if cellsize is None:
if ms_dict['name']=='LB1':
cellsize = '.003arcsec'
else:
imsize = 900
cellsize = '.03arcsec'
#start of CASA commands
for i in range(num_observations):
observation = '%d' % i
imagename = prefix+'_'+ms_dict['name']+'_initcont_exec%s' % observation
for ext in ['.image', '.mask', '.model', '.pb', '.psf', '.residual', '.sumwt']:
os.system('rm -rf '+ imagename + ext)
tclean(vis= msfile,
imagename = imagename,
observation = observation,
specmode = 'mfs',
deconvolver = 'multiscale',
scales = scales,
weighting='briggs',
robust = robust,
gain = gain,
imsize = imsize,
cell | |
from datetime import timedelta
from sys import maxint
import unittest
from webob import Request
from sqlalchemy import null
import pecan
from draughtcraft import model
from draughtcraft.lib.units import InvalidUnitException
from draughtcraft.tests import TestModel
class TestRecipeAddition(unittest.TestCase):
def test_fermentable_ingredient(self):
addition = model.RecipeAddition()
fermentable = model.Fermentable()
addition.fermentable = fermentable
assert addition.ingredient == fermentable
def test_hop_ingredient(self):
addition = model.RecipeAddition()
hop = model.Hop()
addition.hop = hop
assert addition.ingredient == hop
def test_yeast_ingredient(self):
addition = model.RecipeAddition()
yeast = model.Yeast()
addition.yeast = yeast
assert addition.ingredient == yeast
def test_printable_amount(self):
addition = model.RecipeAddition(
amount=5,
unit='POUND'
)
assert addition.printable_amount == '5 lb'
def test_international_printable_pound_conversion(self):
pecan.core.state.request = Request.blank('/')
pecan.request.context = {'metric': True}
r = model.Recipe()
addition = model.RecipeAddition(
amount=5,
unit='POUND',
recipe=r
)
assert addition.printable_amount == '2.268 kg'
def test_international_printable_ounce_conversion(self):
pecan.core.state.request = Request.blank('/')
pecan.request.context = {'metric': True}
r = model.Recipe()
addition = model.RecipeAddition(
amount=5,
unit='OUNCE',
recipe=r
)
assert addition.printable_amount == '141.748 g'
def test_international_printable_gallon_conversion(self):
pecan.core.state.request = Request.blank('/')
pecan.request.context = {'metric': True}
r = model.Recipe()
addition = model.RecipeAddition(
amount=5,
unit='GALLON',
recipe=r
)
assert addition.printable_amount == '18.927 L'
def test_printable_hop_amount(self):
addition = model.HopAddition(
amount=0.0625, # 1 oz
unit='POUND'
)
assert addition.printable_amount == '1 oz'
addition = model.HopAddition(
amount=0,
unit='POUND'
)
assert addition.printable_amount == '0 oz'
def test_printable_metric_hop_amount(self):
pecan.core.state.request = Request.blank('/')
pecan.request.context = {'metric': True}
r = model.Recipe()
addition = model.HopAddition(
amount=0.0625, # 1 oz
unit='POUND',
recipe=r
)
assert addition.printable_amount == '28.35 g'
addition = model.HopAddition(
amount=0,
unit='POUND',
recipe=r
)
assert addition.printable_amount == '0 g'
def test_percentage(self):
recipe = model.Recipe()
a1 = model.RecipeAddition(
use='MASH',
amount=6,
unit='POUND',
fermentable=model.Fermentable()
)
a2 = model.RecipeAddition(
use='MASH',
amount=2,
unit='POUND',
fermentable=model.Fermentable()
)
a3 = model.RecipeAddition(
use='BOIL',
amount=.046875, # .75 oz
unit='POUND',
hop=model.Hop()
)
a4 = model.RecipeAddition(
use='BOIL',
amount=.015625, # .25 oz
unit='POUND',
hop=model.Hop()
)
recipe.additions = [a1, a2, a3, a4]
assert a1.percentage == .75
assert a2.percentage == .25
assert a3.percentage == .75
assert a4.percentage == .25
def test_zero_percentage(self):
recipe = model.Recipe()
a1 = model.RecipeAddition(
use='MASH',
amount=0,
unit='POUND',
fermentable=model.Fermentable()
)
a2 = model.RecipeAddition(
use='MASH',
amount=0,
unit='POUND',
fermentable=model.Fermentable()
)
recipe.additions = [a1, a2]
assert a1.percentage == 0
assert a2.percentage == 0
def test_pounds(self):
a = model.RecipeAddition(amount=1, unit='POUND')
assert a.pounds == 1
a = model.RecipeAddition(amount=16, unit='OUNCE')
assert a.pounds == 1
a = model.RecipeAddition(amount=2, unit='TEASPOON')
try:
assert a.pounds
except InvalidUnitException:
pass
else:
raise AssertionError('Teaspoons cannot be converted to pounds.')
def test_minutes(self):
a = model.RecipeAddition()
assert a.minutes == 0
a.duration = timedelta(seconds=120)
assert a.minutes == 2
def test_sortable_minutes(self):
a = model.RecipeAddition(use='FIRST WORT')
assert a.sortable_minutes == maxint
a = model.RecipeAddition(use='POST BOIL')
assert a.sortable_minutes == -1
a = model.RecipeAddition(use='FLAME-OUT')
assert a.sortable_minutes == -1
a = model.RecipeAddition(
use='BOIL', duration=timedelta(seconds=3600))
assert a.sortable_minutes == 60
def test_eta(self):
r = model.Recipe(boil_minutes=60)
a = model.HopAddition(recipe=r, duration=timedelta(seconds=3600))
assert a.eta == '0m'
a = model.HopAddition(recipe=r, duration=timedelta(seconds=1800))
assert a.eta == '30m'
a = model.HopAddition(recipe=r, duration=timedelta(seconds=0))
assert a.eta == '60m'
class TestRecipe(unittest.TestCase):
def test_recipe_components(self):
recipe = model.Recipe()
recipe.additions = [
model.RecipeAddition(
use='MASH',
fermentable=model.Fermentable()
),
model.RecipeAddition(
use='MASH',
hop=model.Hop()
),
model.RecipeAddition(
use='FIRST WORT',
hop=model.Hop()
),
model.RecipeAddition(
use='BOIL',
hop=model.Hop()
),
model.RecipeAddition(
use='POST-BOIL',
hop=model.Hop()
),
model.RecipeAddition(
use='FLAME OUT',
hop=model.Hop()
),
model.RecipeAddition(
use='PRIMARY',
yeast=model.Yeast()
),
model.RecipeAddition(
use='SECONDARY',
yeast=model.Yeast()
)
]
assert len(recipe.mash[model.Fermentable]) == 1
assert len(recipe.mash[model.Hop]) == 1
assert len(recipe.boil[model.Hop]) == 4
assert len(recipe.fermentation[model.Yeast]) == 2
def test_metric_true(self):
"""
When request.context['metric'] == True,
model.Recipe.metric == True
"""
pecan.core.state.request = Request.blank('/')
pecan.request.context = {'metric': True}
recipe = model.Recipe()
assert recipe.metric is True
def test_metric_false(self):
"""
When request.context['metric'] == False,
model.Recipe.metric == False
"""
pecan.core.state.request = Request.blank('/')
pecan.request.context = {'metric': False}
recipe = model.Recipe()
assert recipe.metric is False
def test_recipe_international_volume(self):
recipe = model.Recipe(gallons=5)
recipe.liters = 10
assert recipe.gallons == 2.6417205199999998
assert recipe.liters == 10
# 0, 0.25, 0.50, ... 99.50, 99.75, 100.00
for i in [x * 0.25 for x in range(0, 401)]:
recipe.liters = i
assert recipe.liters == i
def test_ingredient_partition(self):
recipe = model.Recipe()
recipe.additions = [
model.RecipeAddition(
use='MASH',
fermentable=model.Fermentable()
),
model.RecipeAddition(
use='MASH',
hop=model.Hop()
)
]
partitions = recipe._partition(recipe.additions)
assert len(partitions[model.Fermentable]) == 1
assert len(partitions[model.Hop]) == 1
recipe.additions = [
model.RecipeAddition(
use='FIRST WORT',
hop=model.Hop()
),
model.RecipeAddition(
use='BOIL',
hop=model.Hop()
),
model.RecipeAddition(
use='POST-BOIL',
hop=model.Hop()
),
model.RecipeAddition(
use='FLAME OUT',
hop=model.Hop()
)
]
partitions = recipe._partition(recipe.additions)
assert len(partitions[model.Hop]) == 4
recipe.additions = [
model.RecipeAddition(
use='PRIMARY',
yeast=model.Yeast()
),
model.RecipeAddition(
use='SECONDARY',
yeast=model.Yeast()
)
]
partitions = recipe._partition(recipe.additions)
assert len(partitions[model.Yeast]) == 2
def test_ingredient_percent(self):
recipe = model.Recipe()
a1 = model.RecipeAddition(
use='MASH',
amount=6,
unit='POUND',
fermentable=model.Fermentable()
)
a2 = model.RecipeAddition(
use='MASH',
amount=2,
unit='POUND',
fermentable=model.Fermentable()
)
a3 = model.RecipeAddition(
use='MASH',
amount=.046875, # .75 oz
unit='POUND',
hop=model.Hop()
)
a4 = model.RecipeAddition(
use='MASH',
amount=.015625, # .25 oz
unit='POUND',
hop=model.Hop()
)
percent = recipe._percent({
'Fermentable': [a1, a2],
'Hop': [a3, a4]
})
assert percent[a1] == .75
assert percent[a2] == .25
assert percent[a3] == .75
assert percent[a4] == .25
def test_recipe_contains(self):
recipe = model.Recipe()
f1 = model.Fermentable()
h1 = model.Hop()
h2 = model.Hop()
h3 = model.Hop()
h4 = model.Hop()
h5 = model.Hop()
y1 = model.Yeast()
y2 = model.Yeast()
recipe.additions = [
model.RecipeAddition(
use='MASH',
fermentable=f1
),
model.RecipeAddition(
use='MASH',
hop=h1
),
model.RecipeAddition(
use='FIRST WORT',
hop=h2
),
model.RecipeAddition(
use='BOIL',
hop=h3
),
model.RecipeAddition(
use='POST-BOIL',
hop=h4
),
model.RecipeAddition(
use='FLAME OUT',
hop=h5
),
model.RecipeAddition(
use='PRIMARY',
yeast=y1
),
model.RecipeAddition(
use='SECONDARY',
yeast=y2
)
]
assert recipe.contains(f1, 'mash')
assert recipe.contains(f1, 'boil') is False
assert recipe.contains(f1, 'fermentation') is False
assert recipe.contains(h1, 'mash')
assert recipe.contains(h1, 'boil') is False
assert recipe.contains(h1, 'fermentation') is False
assert recipe.contains(h2, 'mash') is False
assert recipe.contains(h2, 'boil')
assert recipe.contains(h2, 'fermentation') is False
assert recipe.contains(h3, 'mash') is False
assert recipe.contains(h3, 'boil')
assert recipe.contains(h3, 'fermentation') is False
assert recipe.contains(h4, 'mash') is False
assert recipe.contains(h4, 'boil')
assert recipe.contains(h4, 'fermentation') is False
assert recipe.contains(h5, 'mash') is False
assert recipe.contains(h5, 'boil')
assert recipe.contains(h5, 'fermentation') is False
assert recipe.contains(y1, 'mash') is False
assert recipe.contains(y1, 'boil') is False
assert recipe.contains(y1, 'fermentation')
assert recipe.contains(y2, 'mash') is False
assert recipe.contains(y2, 'boil') is False
assert recipe.contains(y2, 'fermentation')
assert recipe.contains(f1, 'invalid') is False
assert recipe.contains(h1, 'invalid') is False
assert recipe.contains(h2, 'invalid') is False
assert recipe.contains(h3, 'invalid') is False
assert recipe.contains(h4, 'invalid') is False
assert recipe.contains(h5, 'invalid') is False
assert recipe.contains(y1, 'invalid') is False
assert recipe.contains(y2, 'invalid') is False
def test_next_fermentation_step(self):
recipe = model.Recipe()
recipe.fermentation_steps.append(
model.FermentationStep(
step='PRIMARY',
days=7,
fahrenheit=50
)
)
assert recipe.next_fermentation_step == 'SECONDARY'
recipe.fermentation_steps.append(
model.FermentationStep(
step='SECONDARY',
days=14,
fahrenheit=35
)
)
assert recipe.next_fermentation_step == 'TERTIARY'
recipe.fermentation_steps.append(
model.FermentationStep(
step='TERTIARY',
days=31,
fahrenheit=35
)
)
assert recipe.next_fermentation_step is None
def test_efficiency(self):
assert model.Recipe().efficiency == .75
user = model.User()
model.UserSetting(
user=user,
name='brewhouse_efficiency',
value=.80
)
assert model.Recipe(author=user).efficiency == .80
def test_url(self):
recipe = model.Recipe(
id=1,
name=u'Rocky Mountain River IPA'
)
assert recipe.url() == '/recipes/1/rocky-mountain-river-ipa/'
assert recipe.url(
False) == '/recipes/1/rocky-mountain-river-ipa/builder'
def test_url_is_hex(self):
for i in range(128):
recipe = model.Recipe(
id=i,
name=u'Rocky Mountain River IPA'
)
assert recipe.url(
) == '/recipes/%s/rocky-mountain-river-ipa/' % ('%x' % i)
assert recipe.url(False) == \
'/recipes/%s/rocky-mountain-river-ipa/builder' % ('%x' % i)
def test_printable_type(self):
assert model.Recipe(
type=u'MASH'
).printable_type == 'All Grain'
assert model.Recipe(
type=u'EXTRACT'
).printable_type == 'Extract'
assert model.Recipe(
type=u'EXTRACTSTEEP'
).printable_type == 'Extract w/ Steeped Grains'
assert model.Recipe(
type=u'MINIMASH'
).printable_type == 'Mini-Mash'
class TestFermentationStep(unittest.TestCase):
def test_fermentation_step(self):
recipe = model.Recipe()
recipe.fermentation_steps.extend([
model.FermentationStep(
step='PRIMARY',
days=7,
fahrenheit=50
),
model.FermentationStep(
step='SECONDARY',
days=14,
fahrenheit=35
),
model.FermentationStep(
step='TERTIARY',
days=31,
fahrenheit=35
)
])
steps = recipe.fermentation_steps
assert steps[0].step == 'PRIMARY'
assert steps[0].days == 7
assert steps[0].fahrenheit == 50
assert steps[0].recipe == recipe
assert steps[1].step == 'SECONDARY'
assert steps[1].days == 14
assert steps[1].fahrenheit == 35
assert steps[1].recipe == recipe
assert steps[2].step == 'TERTIARY'
assert steps[2].days == 31
assert steps[2].fahrenheit == 35
assert steps[2].recipe == recipe
def test_celcius(self):
recipe = model.Recipe()
recipe.fermentation_steps.extend([
model.FermentationStep(
step='PRIMARY',
days=7,
fahrenheit=50
)
])
steps = recipe.fermentation_steps
s = steps[0]
assert s.celsius == 10
s.celsius = 20
assert s.fahrenheit == 68
s.fahrenheit = 32
assert s.celsius == 0
s.celcius = 0
assert s.fahrenheit == 32
for i in range(0, 40):
s.celsius = i
assert s.celsius == i
class TestRecipeCopy(TestModel):
def test_simple_copy(self):
model.Recipe(
type='MASH',
name='Rocky Mountain River IPA',
gallons=5,
boil_minutes=60,
notes=u'This is my favorite recipe.'
)
model.commit()
recipe = model.Recipe.query.first()
recipe.duplicate()
model.commit()
assert model.Recipe.query.count() == 2
assert model.RecipeSlug.query.count() == 2
r1, r2 = model.Recipe.get(1), model.Recipe.get(2)
assert r1.type == r2.type == 'MASH'
assert r1.name == r2.name == 'Rocky Mountain River IPA'
assert r1.gallons == r2.gallons == 5
assert r1.boil_minutes == r2.boil_minutes == 60
assert r1.notes == r2.notes == u'This is | |
<reponame>neeker/chromium_extract<gh_stars>10-100
#!/usr/bin/python
#
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for generate_gyp_py.
It's tough to test the lower-level GetSourceFiles() and GetObjectFiles()
functions, so this focuses on the higher-level functions assuming those two
functions are working as intended (i.e., producing lists of files).
"""
import generate_gyp as gg
from generate_gyp import SourceSet, SourceListCondition
import string
import unittest
class ModuleUnittest(unittest.TestCase):
def testGetObjectToSourceMapping(self):
srcs = [
'a.c',
'b.asm',
'c.cc',
]
expected = {
'a.o': 'a.c',
'b.o': 'b.asm',
'c.o': 'c.cc',
}
self.assertEqual(expected, gg.GetObjectToSourceMapping(srcs))
def testGetSourceFileSet(self):
objs_to_srcs = {
'a.o': 'a.c',
'b.o': 'b.asm',
'c.o': 'c.cc',
}
objs = [
'a.o',
'c.o',
]
expected = set(['a.c', 'c.cc'])
self.assertEqual(expected, gg.GetSourceFileSet(objs_to_srcs, objs))
def testGetSourceFileSet_NotFound(self):
objs_to_srcs = {
'a.o': 'a.c',
'b.o': 'b.asm',
'c.o': 'c.cc',
}
objs = [
'd.o',
]
self.assertRaises(KeyError, gg.GetSourceFileSet, objs_to_srcs, objs)
class SourceSetUnittest(unittest.TestCase):
def testEquals(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
b = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
c = SourceSet(set(['c', 'd']), set([SourceListCondition('1', '2', '3')]))
d = SourceSet(set(['a', 'b']), set([SourceListCondition('0', '2', '3')]))
e = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '0', '3')]))
f = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '0')]))
self.assertEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, d)
self.assertNotEqual(a, e)
self.assertNotEqual(a, f)
def testIntersect_Exact(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
b = SourceSet(set(['a', 'b']), set([SourceListCondition('3', '4', '6')]))
c = a.Intersect(b)
self.assertEqual(c.sources, set(['a', 'b']))
self.assertEqual(c.conditions, set([SourceListCondition('1', '2', '3'),
SourceListCondition('3', '4', '6')]))
self.assertFalse(c.IsEmpty())
def testIntersect_Disjoint(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
b = SourceSet(set(['c', 'd']), set([SourceListCondition('3', '4', '6')]))
c = a.Intersect(b)
self.assertEqual(c.sources, set())
self.assertEqual(c.conditions, set([SourceListCondition('1', '2', '3'),
SourceListCondition('3', '4', '6')]))
self.assertTrue(c.IsEmpty())
def testIntersect_Overlap(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
b = SourceSet(set(['b', 'c']), set([SourceListCondition('3', '4', '6')]))
c = a.Intersect(b)
self.assertEqual(c.sources, set(['b']))
self.assertEqual(c.conditions, set([SourceListCondition('1', '2', '3'),
SourceListCondition('3', '4', '6')]))
self.assertFalse(c.IsEmpty())
def testDifference_Exact(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
b = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
c = a.Difference(b)
self.assertEqual(c.sources, set())
self.assertEqual(c.conditions, set([SourceListCondition('1', '2', '3')]))
self.assertTrue(c.IsEmpty())
def testDifference_Disjoint(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '3')]))
b = SourceSet(set(['c', 'd']), set([SourceListCondition('3', '4', '6')]))
c = a.Difference(b)
self.assertEqual(c.sources, set(['a', 'b']))
self.assertEqual(c.conditions, set())
self.assertTrue(c.IsEmpty())
def testDifference_Overlap(self):
a = SourceSet(set(['a', 'b']), set([SourceListCondition('1', '2', '5')]))
b = SourceSet(set(['b', 'c', 'd']),
set([SourceListCondition('1', '2', '5'),
SourceListCondition('3', '4', '6')]))
c = a.Difference(b)
self.assertEqual(c.sources, set(['a']))
self.assertEqual(c.conditions, set([SourceListCondition('1', '2', '5')]))
self.assertFalse(c.IsEmpty())
def testGenerateGypStanza(self):
# ia32 should just be ia32. Win should appear as an OS restriction.
a = SourceSet(set(['a', 'b']),
set([SourceListCondition('ia32', 'Chromium', 'win')]))
a_stanza = a.GenerateGypStanza()
string.index(a_stanza, 'target_arch == "ia32"')
string.index(a_stanza, 'OS == "win"')
# x64 should just be x64. Linux should appear as an OS restriction.
b = SourceSet(set(['a', 'b']),
set([SourceListCondition('x64', 'Chromium', 'linux')]))
b_stanza = b.GenerateGypStanza()
string.index(b_stanza, 'target_arch == "x64"')
string.index(b_stanza, 'OS == "linux"')
# arm should just be arm.
c = SourceSet(set(['a', 'b']),
set([SourceListCondition('arm', 'Chromium', 'linux')]))
c_stanza = c.GenerateGypStanza()
string.index(c_stanza, 'target_arch == "arm"')
# arm-neon should be arm and flip the arm_neon switch.
d = SourceSet(set(['a', 'b']),
set([SourceListCondition('arm-neon', 'Chromium', 'linux')]))
d_stanza = d.GenerateGypStanza()
string.index(d_stanza, 'target_arch == "arm" and arm_neon == 1')
# Multiple conditions
e = SourceSet(set(['a', 'b']),
set([SourceListCondition('arm', 'Chrome', 'win'),
SourceListCondition('x64', 'Chromium', 'linux')]))
e_stanza = e.GenerateGypStanza()
string.index(e_stanza, ('OS == "win" and target_arch == "arm"'
' and ffmpeg_branding == "Chrome"'))
string.index(e_stanza, ('OS == "linux" and target_arch == "x64"'
' and ffmpeg_branding == "Chromium"'))
def testGenerateGnStanza(self):
# ia32 should be x86. Win should appear as an OS restriction.
a = SourceSet(set(['a', 'b']),
set([SourceListCondition('ia32', 'Chromium', 'win')]))
a_stanza = a.GenerateGnStanza()
string.index(a_stanza, 'current_cpu == "x86"')
string.index(a_stanza, 'is_win')
# x64 should just be x64. Linux should appear as an OS restriction.
b = SourceSet(set(['a', 'b']),
set([SourceListCondition('x64', 'Chromium', 'linux')]))
b_stanza = b.GenerateGnStanza()
string.index(b_stanza, 'current_cpu == "x64"')
string.index(b_stanza, 'is_linux')
# arm should just be arm.
c = SourceSet(set(['a', 'b']),
set([SourceListCondition('arm', 'Chromium', 'linux')]))
c_stanza = c.GenerateGnStanza()
string.index(c_stanza, 'current_cpu == "arm"')
# arm-neon should be arm and flip the arm_neon switch.
d = SourceSet(set(['a', 'b']),
set([SourceListCondition('arm-neon', 'Chromium', 'linux')]))
d_stanza = d.GenerateGnStanza()
string.index(d_stanza, 'current_cpu == "arm" && arm_use_neon')
# Multiple conditions
e = SourceSet(set(['a', 'b']),
set([SourceListCondition('arm', 'Chrome', 'win'),
SourceListCondition('x64', 'Chromium', 'linux')]))
e_stanza = e.GenerateGnStanza()
string.index(e_stanza, ('is_win && current_cpu == "arm"'
' && ffmpeg_branding == "Chrome"'))
string.index(e_stanza, ('is_linux && current_cpu == "x64"'
' && ffmpeg_branding == "Chromium"'))
def testComplexSourceListConditions(self):
# Create 2 sets with intersecting source 'a', but setup such that 'a'
# is only valid for combinations (x86 && windows) || (x64 && linux). The
# generated gyp stanza should then not allow for inclusion of the 'a' file
# for combinations like x86 && linux.
a = SourceSet(set(['a']), set([SourceListCondition('x86', 'c', 'win')]))
b = SourceSet(set(['a']), set([SourceListCondition('x64', 'c', 'linux')]))
disjoint_sets = gg.CreatePairwiseDisjointSets([a, b])
# This condition is bad because x86 && linux would pass. Admittedly a very
# fragile way to test this, but evaulating gn stanzas is hard, and it at
# least serves to document the motivation for the associated changes to
# our generate_gyp.py
bad_condition = ('(current_cpu == "x86" || current_cpu == "x64")'
' && (ffmpeg_branding == "c")'
' && (is_win || is_linux)')
# Expect only a single set since the two original sets have the same source
# list.
self.assertEqual(1, len(disjoint_sets))
stanza = disjoint_sets[0].GenerateGnStanza()
self.assertEqual(string.find(stanza, bad_condition), -1)
def assertEqualSets(self, expected, actual):
# Do pairwise checks for easier debugging.
for a in actual:
self.assertTrue(a in expected, msg='Unexpected set: %s' % a)
for e in expected:
self.assertTrue(e in actual, msg='Did not find expected set: %s' % e)
def testCreatePairwiseDisjointSets_Pair(self):
a = SourceSet(set(['common', 'intel']),
set([SourceListCondition('ia32', 'Chromium', 'win')]))
b = SourceSet(set(['common', 'intel', 'chrome']),
set([SourceListCondition('ia32', 'Chrome', 'win')]))
expected = []
expected.append(
SourceSet(set(['common', 'intel']),
set([SourceListCondition('ia32', 'Chromium', 'win'),
SourceListCondition('ia32', 'Chrome', 'win')])))
expected.append(
SourceSet(set(['chrome']),
set([SourceListCondition('ia32', 'Chrome', 'win')])))
sets = gg.CreatePairwiseDisjointSets([a, b])
self.assertEqualSets(expected, sets)
def testCreatePairwiseDisjointSets_Triplet(self):
a = SourceSet(set(['common', 'intel']),
set([SourceListCondition('ia32', 'Chromium', 'win')]))
b = SourceSet(set(['common', 'intel', 'chrome']),
set([SourceListCondition('x64', 'Chrome', 'win')]))
c = SourceSet(set(['common', 'arm']),
set([SourceListCondition('arm', 'Chromium', 'win')]))
expected = []
expected.append(
SourceSet(set(['common']),
set([SourceListCondition('ia32', 'Chromium', 'win'),
SourceListCondition('x64', 'Chrome', 'win'),
SourceListCondition('arm', 'Chromium', 'win')])))
expected.append(
SourceSet(set(['intel']),
set([SourceListCondition('ia32', 'Chromium', 'win'),
SourceListCondition('x64', 'Chrome', 'win')])))
expected.append(
SourceSet(set(['chrome']),
set([SourceListCondition('x64', 'Chrome', 'win')])))
expected.append(
SourceSet(set(['arm']),
set([SourceListCondition('arm', 'Chromium', 'win')])))
sets = gg.CreatePairwiseDisjointSets([a, b, c])
self.assertEqualSets(expected, sets)
def testCreatePairwiseDisjointSets_Multiple(self):
a = SourceSet(set(['common', 'intel']),
set([SourceListCondition('ia32', 'Chromium', 'linux')]))
b = SourceSet(set(['common', 'intel', 'chrome']),
set([SourceListCondition('ia32', 'Chrome', 'linux')]))
c = SourceSet(set(['common', 'intel']),
set([SourceListCondition('x64', 'Chromium', 'linux')]))
d = SourceSet(set(['common', 'intel', 'chrome']),
set([SourceListCondition('x64', 'Chrome', 'linux')]))
e = SourceSet(set(['common', 'arm']),
set([SourceListCondition('arm', 'Chromium', 'linux')]))
f = SourceSet(set(['common', 'arm-neon', 'chrome', 'chromeos']),
set([SourceListCondition('arm-neon', 'ChromeOS', 'linux')]))
expected = []
expected.append(SourceSet(set(['common']), set([
SourceListCondition('ia32', 'Chromium', 'linux'),
SourceListCondition('ia32', 'Chrome', 'linux'),
SourceListCondition('x64', 'Chromium', 'linux'),
SourceListCondition('x64', 'Chrome', 'linux'),
SourceListCondition('arm', 'Chromium', 'linux'),
SourceListCondition('arm-neon', 'ChromeOS', 'linux')])))
expected.append(SourceSet(set(['intel']), set([
SourceListCondition('ia32', 'Chromium', 'linux'),
SourceListCondition('ia32', 'Chrome', 'linux'),
SourceListCondition('x64', 'Chromium', 'linux'),
SourceListCondition('x64', 'Chrome', 'linux')])))
expected.append(SourceSet(set(['arm']), set([
SourceListCondition('arm', 'Chromium', 'linux')])))
expected.append(SourceSet(set(['chrome']), set([
SourceListCondition('ia32', 'Chrome', 'linux'),
SourceListCondition('x64', 'Chrome', 'linux'),
SourceListCondition('arm-neon', 'ChromeOS', 'linux')])))
expected.append(SourceSet(set(['arm-neon', 'chromeos']), set([
SourceListCondition('arm-neon', 'ChromeOS', 'linux')])))
sets = gg.CreatePairwiseDisjointSets([a, b, c, d, e, f])
self.assertEqualSets(expected, sets)
def testReduceConditions(self):
# Set conditions span all of the supported architectures for linux.
a = SourceSet(set(['foo.c']),
set([SourceListCondition('ia32', 'Chromium', 'linux'),
SourceListCondition('x64', 'Chromium', 'linux'),
SourceListCondition('arm', 'Chromium', 'linux'),
SourceListCondition('arm64', 'Chromium', 'linux'),
SourceListCondition('arm-neon', 'Chromium', 'linux'),
SourceListCondition('mipsel', 'Chromium', 'linux')]))
gg.ReduceConditionalLogic(a)
# Conditions should reduce to a single condition with wild-card for
expected = set([SourceListCondition('*', 'Chromium', 'linux')])
self.assertEqualSets(expected, a.conditions)
# Set conditions span all of the supported architectures for windows.
b = SourceSet(set(['foo.c']),
set([SourceListCondition('ia32', 'Chromium', 'win'),
SourceListCondition('x64', 'Chromium', 'win')]))
gg.ReduceConditionalLogic(b)
# Conditions should reduce to a single condition with wild-card for
expected = set([SourceListCondition('*', 'Chromium', 'win')])
self.assertEqualSets(expected, b.conditions)
# Set conditions span all supported architectures and brandings for windows.
b = SourceSet(set(['foo.c']),
set([SourceListCondition('ia32', 'Chromium', 'win'),
SourceListCondition('x64', 'Chromium', 'win'),
SourceListCondition('ia32', 'Chrome', 'win'),
SourceListCondition('x64', 'Chrome', 'win')]))
gg.ReduceConditionalLogic(b)
expected = set([SourceListCondition('*', '*', 'win')])
self.assertEqualSets(expected, b.conditions)
# Set conditions span all supported platforms.
c = SourceSet(set(['foo.c']),
set([SourceListCondition('x64', 'Chromium', 'win'),
SourceListCondition('x64', 'Chromium', 'mac'),
SourceListCondition('x64', 'Chromium', 'linux'),
SourceListCondition('x64', 'Chromium', 'android')]))
gg.ReduceConditionalLogic(c)
expected = set([SourceListCondition('x64', 'Chromium', '*')])
self.assertEqualSets(expected, c.conditions)
def testReduceConditions_fullSpan(self):
# Build SourceSet with conditions spanning every combination of attributes.
ss = SourceSet(set(['foo.c']), set())
for arch in gg.SUPPORT_MATRIX[gg.Attr.ARCHITECTURE]:
for target in gg.SUPPORT_MATRIX[gg.Attr.TARGET]:
for platform in gg.SUPPORT_MATRIX[gg.Attr.PLATFORM]:
ss.conditions.add(SourceListCondition(arch, target, platform))
gg.ReduceConditionalLogic(ss)
expected = set([SourceListCondition('*', '*', '*')])
self.assertEqualSets(expected, ss.conditions)
def testGenerateGypStanzaWildCard(self):
a = SourceSet(set(['foo.c']),
set([SourceListCondition('x64', 'Chromium', '*')]))
gyp_stanza = a.GenerateGypStanza()
gn_stanza = a.GenerateGnStanza()
for stanza in | |
set name]')
if arg[1] != '=' or arg[2] != '{' or arg[-1] != '}':
raise ValueError('[Set specification must be in format "set_name = {element1, element2, ...}"]')
set_elements_string = arg[3:-1]
list_dict[arg[0]] = parse_list_elements(set_elements_string)
set_dict[arg[0]] = parse_set_elements(set_elements_string)
return [set_dict, list_dict]
def parse_list_elements(string):
"""Takes a string of format 1,2,3,{1,2} and returns the LIST!!! of those things
If an element contains {} or <> takes it as a set (LIST) or a tuple"""
string = string.replace('{', 'set(')
string = string.replace('}', ')')
string = string.replace('<', 'tuple(')
string = string.replace('>', ')')
args = separate_arguments(f'({string})')
for x in range(len(args)):
# Empty set
if args[x] == 'set()':
args[x] = list()
# Empty tuple
if args[x] == 'tuple()':
args[x] = tuple()
# Set
elif args[x][:4] == 'set(' and args[x][-1] == ')':
args[x] = parse_list_elements(args[x][4:-1])
# Tuple
elif args[x][:6] == 'tuple(' and args[x][-1] == ')':
args[x] = tuple(parse_list_elements(args[x][6:-1]))
return args
def parse_set_elements(string):
"""Takes a string of format 1,2,3,{1,2} and returns the frozenset of those things
If an element contains {} or <> takes it as a frozenset or a tuple"""
string = string.replace('{', 'set(')
string = string.replace('}', ')')
string = string.replace('<', 'tuple(')
string = string.replace('>', ')')
args = separate_arguments(f'({string})')
for x in range(len(args)):
# Empty set
if args[x] == 'set()':
args[x] = frozenset()
# Empty tuple
if args[x] == 'tuple()':
args[x] = tuple()
# Set
elif args[x][:4] == 'set(' and args[x][-1] == ')':
args[x] = parse_set_elements(args[x][4:-1])
# Tuple
elif args[x][:6] == 'tuple(' and args[x][-1] == ')':
args[x] = tuple(parse_set_elements(args[x][6:-1]))
args = frozenset(args)
return args
def unparse_set_solution(solution):
if type(solution) == frozenset:
parsed_args = [unparse_set_solution(x) for x in solution]
parsed_args.sort()
string = '{' + str(parsed_args)[1:-1] + '}'
string = string.replace("'", "").replace('"', '')
return string
elif type(solution) == tuple:
parsed_args = [unparse_set_solution(x) for x in solution]
string = '<' + str(parsed_args)[1:-1] + '>'
string = string.replace("'", "").replace('"', '')
return string
else:
return solution
def parse_user_solution(string):
if string == 'set()':
return frozenset()
elif string == 'tuple()':
return tuple()
elif string[:4] == 'set(' and string[-1] == ')':
args = separate_arguments(string[3:])
args = [parse_user_solution(x) for x in args]
return frozenset(args)
elif string[:6] == 'tuple(' and string[-1] == ')':
args = separate_arguments(string[5:])
args = [parse_user_solution(x) for x in args]
return tuple(args)
else:
return string.replace(" ", "")
# ----------------------------------------------------------------------------------------------------------------------
# PREDICATE LOGIC
def parse_predicate_formula(string, logic, predicate_arity_dict=None):
"""Takes a string an transforms it into a formula of the format defined above
¡¡¡This function prepares the formula, the next one parses it!!!
Returns the parsed formula and the predicate arity dict!"""
# An empty string returns an error
if not string:
ValueError("An empty string is not a well-formed propositional formula")
# Delete the ifs
string = string.replace('if ', '')
# Parse constants (including quantifiers)
for con in logic.parsed_constants:
string = string.replace(con, logic.parsed_constants[con])
string = string.replace("falsum", "⊥")
string = string.replace("Falsum", "⊥")
# Remove spaces
string = string.replace(" ", "")
# Trick so that binaries do not have to contain external parentheses
try:
formula_and_dict = parse_predicate_formula2('(' + string + ')', logic, None, predicate_arity_dict)
return formula_and_dict
except ValueError:
pass
formula_and_dict = parse_predicate_formula2(string, logic, None, predicate_arity_dict)
return formula_and_dict
def parse_predicate_formula2(string, logic, bound_variables=None, predicate_arity_dict=None):
"""Parses a FOL string into a list of lists
Returns the parsed formula and the predicate arity dict"""
if bound_variables is None:
bound_variables = list()
if predicate_arity_dict is None:
predicate_arity_dict = dict()
# Falsum
if string == '⊥':
return [[string], predicate_arity_dict]
# Atomics
if string[0] in FOL_predicates:
arity = 0
for char in string[1:]:
if char in FOL_individual_constants:
arity += 1
elif char in FOL_variables:
if char in bound_variables:
arity += 1
else:
raise ValueError(f"Variable {char} is free in {string}")
else:
raise ValueError(f"Invalid formula")
if string[0] not in predicate_arity_dict:
predicate_arity_dict[string[0]] = arity
else:
if predicate_arity_dict[string[0]] != arity:
raise ValueError("Inconsistent assignment of predicate arity")
return [[string], predicate_arity_dict]
# Checks if unary:
if string[0] in logic.constants(1):
parse_inside = parse_predicate_formula2(string[1:], logic, bound_variables, predicate_arity_dict)
formula_inside = parse_inside[0]
predicate_arity_dict = parse_inside[1]
return [[string[0], formula_inside], predicate_arity_dict]
# Checks if binary (starts and ends with parentheses)
elif string[0] == '(' and string[-1] == ')':
# Searches for a constant that has 1 more left parenthesis open than right
num_parentheses_left = 0
num_parentheses_right = 0
for x in range(len(string)):
if string[x] == '(':
num_parentheses_left += 1
elif string[x] == ')':
num_parentheses_right += 1
elif string[x] in logic.constants(2) and num_parentheses_left == num_parentheses_right + 1:
result1 = parse_predicate_formula2(string[1:x], logic, bound_variables, predicate_arity_dict)
formula1 = result1[0]
predicate_arity_dict = result1[1]
result2 = parse_predicate_formula2(string[x + 1:-1], logic, bound_variables, predicate_arity_dict)
formula2 = result2[0]
predicate_arity_dict = result2[1]
return [[formula1, string[x], formula2], predicate_arity_dict]
# If the string starts and ends with parentheses, but did not return at this point, raise an error
raise ValueError(string + " is not a well-formed propositional formula")
# Checks if quantifier
elif string[0] in logic.quantifiers and string[1] in FOL_variables:
bound_variables.append(string[1])
parse_inside = parse_predicate_formula2(string[2:], logic, bound_variables, predicate_arity_dict)
formula_inside = parse_inside[0]
predicate_arity_dict = parse_inside[1]
del bound_variables[-1]
return [[string[0], string[1], formula_inside], predicate_arity_dict]
# If we did not enter any of the above, then the string is not a formula, and we just return an error
else:
raise ValueError("Invalid formula")
def unparse_predicate_formula(formula, logic):
form1 = deepcopy(formula)
form1 = unparse_predicate_parentheses(form1, logic)
form1 = unparse_predicate_rest(form1, logic)
return form1
def unparse_predicate_parentheses(formula, logic):
# If atomic
if len(formula) == 1:
return formula[0]
# If unary connective
elif len(formula) == 2:
formula[1] = unparse_predicate_parentheses(formula[1], logic)
# If the next one is atomic (i.e. [¬, p])
if type(formula[1]) == str:
return formula
# If the next one is unary (i.e. [¬, [¬, phi]] )
elif len(formula[1]) == 2:
formula = [formula[0], formula[1][0], formula[1][1]] # Turns it into [¬, ¬, phi]
return formula
# If the next one has length 3
elif len(formula[1]) == 3:
# If a unary is in the middle [¬ [¬, ¬, phi]]
if formula[1][1] in logic.constants(1):
formula[1].insert(0, formula[0])
formula = formula[1]
return formula
# If that does not happen, the next is a binary and should be left as is
else:
return formula
# If no contitional was entered before, then length > 3, eg [¬, [¬, ¬, ¬, phi]]
else:
formula[1].insert(0, formula[0])
formula = formula[1]
return formula
# If the formula is binary
elif len(formula) == 3 and formula[1] in logic.constants(2):
formula[0] = unparse_predicate_parentheses(formula[0], logic)
formula[2] = unparse_predicate_parentheses(formula[2], logic)
return formula
# If quantified
elif len(formula) == 3 and formula[0] in logic.quantifiers:
formula[2] = unparse_predicate_parentheses(formula[2], logic)
return formula
def unparse_predicate_rest(formula, logic):
# If atomic (eg 'Px') or falsum
if type(formula) == str:
return formula
# If binary
elif len(formula) == 3 and formula[1] in logic.constants(2):
formula0 = unparse_predicate_rest(formula[0], logic)
formula2 = unparse_predicate_rest(formula[2], logic)
unparsed_formula = f"({formula0} {formula[1]} {formula2})"
return unparsed_formula
# If quantified
elif len(formula) == 3 and formula[0] in logic.quantifiers:
formula_inside = unparse_predicate_rest(formula[2], logic)
unparsed_formula = f"{formula[0]}{formula[1]} {formula_inside}"
return unparsed_formula
# If not atomic or binary or quantified, it is some chain of unaries [¬, ¬, .., phi]
else:
last_formula = unparse_predicate_rest(formula[-1], logic)
formula = formula[:-1] # Remove unparsed Phi
formula.append(last_formula) # Add parsed Phi
unparsed_formula = str(formula)
unparsed_formula = unparsed_formula.replace("'", "")
unparsed_formula = unparsed_formula.replace(",", "")
for ucon in logic.constants(1):
# Remove space after unary connectives
while ucon + " " in unparsed_formula:
unparsed_formula = unparsed_formula.replace(ucon + " ", ucon)
return unparsed_formula[1:-1] # This is to remove the []
def parse_own_model(string, unparsed_formula, logic_name):
string_list = string.split('\n')
if string_list[0].replace(" ", "") != "M=<D,I>":
raise ValueError("First line must be 'M = <D, I>'")
if len(string_list) == 1:
raise ValueError("Please enter a domain")
if string_list[1].replace(" ", "")[:3] != "D={" or string_list[1][-1] != '}':
raise ValueError("Second line must have form 'D = {...}'")
if string_list[1].replace(" ", "") == "D={}":
raise ValueError("Domain cannot be empty")
domain_string = string_list[1].replace(" ", "")[3:-1]
if '{' in domain_string or '}' in domain_string or '<' in domain_string or '>' in domain_string:
raise ValueError("Domain cannot contain sets or tuples")
if len(string_list) == 2:
raise ValueError("You must enter interpretations for the individual constants and predicates")
model = dict()
model['Domain'] = domain_string.split(",")
for line in string_list[2:]:
line2 = line.replace(" ", "")
if line2[:2] != 'I(' or line[3] != ')':
raise ValueError(f"Error in | |
0.5*m.b90*m.b95 + 0.5*m.b90*m.b97 + 0.5*m.b90*m.b98 + 0.5*m.b90*m.b99 + 0.5*m.b90*
m.b103 + 0.5*m.b90*m.b104 + 0.5*m.b90*m.b107 + 0.5*m.b90*m.b109 + m.b90*m.b111 + m.b90*m.b114 +
m.b90*m.b115 + m.b90*m.b118 + 0.5*m.b90*m.b119 + 0.5*m.b90*m.b282 + 0.5*m.b90*m.b290 + 0.5*m.b90*
m.b292 + 0.5*m.b90*m.b294 + 0.5*m.b90*m.b305 + m.b90*m.x845 + 0.5*m.b91*m.b123 + 0.5*m.b91*m.b278
+ 0.5*m.b92*m.b101 + 0.5*m.b92*m.b102 + 0.5*m.b92*m.b105 + m.b92*m.b106 + m.b92*m.b108 + m.b92*
m.b112 + m.b92*m.b116 + 0.5*m.b92*m.b117 + 0.5*m.b92*m.b122 + 0.5*m.b92*m.b261 + 0.5*m.b92*m.b273
+ 0.5*m.b92*m.b277 + 0.5*m.b92*m.b293 + 0.5*m.b92*m.b296 + 0.5*m.b92*m.b318 + 0.5*m.b92*m.b381
+ 0.5*m.b92*m.b397 + 0.5*m.b92*m.b471 + 0.5*m.b92*m.b491 + 0.5*m.b92*m.b547 + 0.5*m.b92*m.b559
+ 0.5*m.b92*m.b591 + 0.5*m.b92*m.b602 + 0.5*m.b92*m.b661 + 0.5*m.b92*m.b671 + 0.5*m.b92*m.b672
+ 0.5*m.b92*m.b678 + 0.5*m.b92*m.b714 + 0.5*m.b92*m.b761 + 0.5*m.b92*m.b765 + 0.5*m.b92*m.b790
+ 0.5*m.b92*m.b798 + 0.5*m.b92*m.b804 + 0.5*m.b92*m.b809 + 0.5*m.b92*m.b811 + 0.5*m.b92*m.b816
+ 0.5*m.b92*m.b823 + 0.5*m.b92*m.b826 + 0.5*m.b93*m.b96 + 0.5*m.b93*m.b100 + 0.5*m.b93*m.b113 +
m.b93*m.b120 + m.b93*m.b121 + 0.5*m.b93*m.b123 + 0.5*m.b93*m.b255 + 0.5*m.b93*m.b274 + 0.5*m.b93*
m.b280 + 0.5*m.b93*m.b287 + 0.5*m.b93*m.b301 + 0.5*m.b93*m.b302 + 0.5*m.b93*m.b369 + m.b93*m.x837
+ 0.5*m.b94*m.b95 + 0.5*m.b94*m.b97 + 0.5*m.b94*m.b98 + 0.5*m.b94*m.b99 + 0.5*m.b94*m.b100 + 0.5
*m.b94*m.b103 + 0.5*m.b94*m.b104 + 0.5*m.b94*m.b107 + 0.5*m.b94*m.b109 + 0.5*m.b94*m.b110 + 0.5*
m.b94*m.b111 + 0.5*m.b94*m.b113 + 0.5*m.b94*m.b114 + 0.5*m.b94*m.b115 + 0.5*m.b94*m.b118 + 0.5*
m.b94*m.b119 + 0.5*m.b94*m.b282 + 0.5*m.b94*m.b290 + 0.5*m.b94*m.b292 + 0.5*m.b94*m.b294 + 0.5*
m.b94*m.b305 + 0.5*m.b95*m.b97 + 0.5*m.b95*m.b98 + 0.5*m.b95*m.b99 + 0.5*m.b95*m.b102 + 0.5*m.b95
*m.b103 + 0.5*m.b95*m.b104 + 0.5*m.b95*m.b107 + m.b95*m.b109 + 0.5*m.b95*m.b111 + 0.5*m.b95*
m.b114 + 0.5*m.b95*m.b115 + 0.5*m.b95*m.b118 + 0.5*m.b95*m.b119 + 0.5*m.b95*m.b282 + 0.5*m.b95*
m.b290 + 0.5*m.b95*m.b292 + 0.5*m.b95*m.b294 + 0.5*m.b95*m.b305 + 0.5*m.b96*m.b100 + 0.5*m.b96*
m.b113 + 0.5*m.b96*m.b120 + 0.5*m.b96*m.b121 + 0.5*m.b96*m.b123 + m.b96*m.b255 + 0.5*m.b96*m.b274
+ 0.5*m.b96*m.b280 + 0.5*m.b96*m.b287 + 0.5*m.b96*m.b301 + 0.5*m.b96*m.b302 + m.b96*m.b369 + 0.5
*m.b97*m.b98 + 0.5*m.b97*m.b99 + 0.5*m.b97*m.b103 + 0.5*m.b97*m.b104 + m.b97*m.b107 + 0.5*m.b97*
m.b109 + 0.5*m.b97*m.b111 + 0.5*m.b97*m.b114 + 0.5*m.b97*m.b115 + 0.5*m.b97*m.b118 + 0.5*m.b97*
m.b119 + 0.5*m.b97*m.b282 + 0.5*m.b97*m.b290 + 0.5*m.b97*m.b292 + 0.5*m.b97*m.b294 + 0.5*m.b97*
m.b305 + m.b97*m.x844 + m.b98*m.b99 + m.b98*m.b103 + 0.5*m.b98*m.b104 + 0.5*m.b98*m.b107 + 0.5*
m.b98*m.b109 + 0.5*m.b98*m.b111 + 0.5*m.b98*m.b114 + 0.5*m.b98*m.b115 + 0.5*m.b98*m.b118 + m.b98*
m.b119 + 0.5*m.b98*m.b282 + 0.5*m.b98*m.b290 + 0.5*m.b98*m.b292 + 0.5*m.b98*m.b294 + 0.5*m.b98*
m.b305 + m.b98*m.x839 + m.b99*m.b103 + 0.5*m.b99*m.b104 + 0.5*m.b99*m.b107 + 0.5*m.b99*m.b109 +
0.5*m.b99*m.b111 + 0.5*m.b99*m.b114 + 0.5*m.b99*m.b115 + 0.5*m.b99*m.b118 + m.b99*m.b119 + 0.5*
m.b99*m.b282 + 0.5*m.b99*m.b290 + 0.5*m.b99*m.b292 + 0.5*m.b99*m.b294 + 0.5*m.b99*m.b305 + m.b99*
m.x839 + 0.5*m.b100*m.b110 + m.b100*m.b113 + 0.5*m.b100*m.b120 + 0.5*m.b100*m.b121 + 0.5*m.b100*
m.b123 + 0.5*m.b100*m.b255 + 0.5*m.b100*m.b274 + 0.5*m.b100*m.b280 + 0.5*m.b100*m.b287 + 0.5*
m.b100*m.b301 + 0.5*m.b100*m.b302 + 0.5*m.b100*m.b369 + m.b101*m.b105 + 0.5*m.b101*m.b106 + 0.5*
m.b101*m.b108 + 0.5*m.b101*m.b112 + 0.5*m.b101*m.b116 + 0.5*m.b101*m.b117 + 0.5*m.b101*m.b122 +
0.5*m.b101*m.b261 + 0.5*m.b101*m.b273 + 0.5*m.b101*m.b277 + 0.5*m.b101*m.b293 + 0.5*m.b101*m.b296
+ 0.5*m.b101*m.b318 + 0.5*m.b101*m.b381 + 0.5*m.b101*m.b397 + 0.5*m.b101*m.b471 + 0.5*m.b101*
m.b491 + 0.5*m.b101*m.b547 + 0.5*m.b101*m.b559 + 0.5*m.b101*m.b591 + 0.5*m.b101*m.b602 + 0.5*
m.b101*m.b661 + 0.5*m.b101*m.b671 + 0.5*m.b101*m.b672 + 0.5*m.b101*m.b678 + 0.5*m.b101*m.b714 +
0.5*m.b101*m.b761 + 0.5*m.b101*m.b765 + 0.5*m.b101*m.b790 + 0.5*m.b101*m.b798 + 0.5*m.b101*m.b804
+ 0.5*m.b101*m.b809 + 0.5*m.b101*m.b811 + 0.5*m.b101*m.b816 + 0.5*m.b101*m.b823 + 0.5*m.b101*
m.b826 + 0.5*m.b102*m.b106 + 0.5*m.b102*m.b108 + 0.5*m.b102*m.b109 + 0.5*m.b102*m.b112 + 0.5*
m.b102*m.b116 + 0.5*m.b103*m.b104 + 0.5*m.b103*m.b107 + 0.5*m.b103*m.b109 + 0.5*m.b103*m.b111 +
0.5*m.b103*m.b114 + 0.5*m.b103*m.b115 + 0.5*m.b103*m.b118 + m.b103*m.b119 + 0.5*m.b103*m.b282 +
0.5*m.b103*m.b290 + 0.5*m.b103*m.b292 + 0.5*m.b103*m.b294 + 0.5*m.b103*m.b305 + m.b103*m.x839 +
0.5*m.b104*m.b107 + 0.5*m.b104*m.b109 + 0.5*m.b104*m.b111 + 0.5*m.b104*m.b114 + 0.5*m.b104*m.b115
+ 0.5*m.b104*m.b118 + 0.5*m.b104*m.b119 + 0.5*m.b104*m.b282 + 0.5*m.b104*m.b290 + 0.5*m.b104*
m.b292 + 0.5*m.b104*m.b294 + 0.5*m.b104*m.b305 + m.b104*m.x840 + 0.5*m.b105*m.b106 + 0.5*m.b105*
m.b108 + 0.5*m.b105*m.b112 + 0.5*m.b105*m.b116 + 0.5*m.b105*m.b117 + 0.5*m.b105*m.b122 + 0.5*
m.b105*m.b261 + 0.5*m.b105*m.b273 + 0.5*m.b105*m.b277 + 0.5*m.b105*m.b293 + 0.5*m.b105*m.b296 +
0.5*m.b105*m.b318 + 0.5*m.b105*m.b381 + 0.5*m.b105*m.b397 + 0.5*m.b105*m.b471 + 0.5*m.b105*m.b491
+ 0.5*m.b105*m.b547 + 0.5*m.b105*m.b559 + 0.5*m.b105*m.b591 + 0.5*m.b105*m.b602 + 0.5*m.b105*
m.b661 + 0.5*m.b105*m.b671 + 0.5*m.b105*m.b672 + 0.5*m.b105*m.b678 + 0.5*m.b105*m.b714 + 0.5*
m.b105*m.b761 + 0.5*m.b105*m.b765 + 0.5*m.b105*m.b790 + 0.5*m.b105*m.b798 + 0.5*m.b105*m.b804 +
0.5*m.b105*m.b809 + 0.5*m.b105*m.b811 + 0.5*m.b105*m.b816 + 0.5*m.b105*m.b823 + 0.5*m.b105*m.b826
+ m.b106*m.b108 + m.b106*m.b112 + m.b106*m.b116 + 0.5*m.b106*m.b117 + 0.5*m.b106*m.b122 + 0.5*
m.b106*m.b261 + 0.5*m.b106*m.b273 + 0.5*m.b106*m.b277 + 0.5*m.b106*m.b293 + 0.5*m.b106*m.b296 +
0.5*m.b106*m.b318 + 0.5*m.b106*m.b381 + 0.5*m.b106*m.b397 + 0.5*m.b106*m.b471 + 0.5*m.b106*m.b491
+ 0.5*m.b106*m.b547 + 0.5*m.b106*m.b559 + 0.5*m.b106*m.b591 + 0.5*m.b106*m.b602 + 0.5*m.b106*
m.b661 + 0.5*m.b106*m.b671 + 0.5*m.b106*m.b672 + 0.5*m.b106*m.b678 + 0.5*m.b106*m.b714 + 0.5*
m.b106*m.b761 + 0.5*m.b106*m.b765 + 0.5*m.b106*m.b790 + 0.5*m.b106*m.b798 + 0.5*m.b106*m.b804 +
0.5*m.b106*m.b809 + 0.5*m.b106*m.b811 + 0.5*m.b106*m.b816 + 0.5*m.b106*m.b823 + 0.5*m.b106*m.b826
+ 0.5*m.b107*m.b109 + 0.5*m.b107*m.b111 + 0.5*m.b107*m.b114 + 0.5*m.b107*m.b115 + 0.5*m.b107*
m.b118 + 0.5*m.b107*m.b119 + 0.5*m.b107*m.b282 + 0.5*m.b107*m.b290 + 0.5*m.b107*m.b292 + 0.5*
m.b107*m.b294 + 0.5*m.b107*m.b305 + m.b107*m.x844 + m.b108*m.b112 + m.b108*m.b116 + 0.5*m.b108*
m.b117 + 0.5*m.b108*m.b122 + 0.5*m.b108*m.b261 + 0.5*m.b108*m.b273 + 0.5*m.b108*m.b277 + 0.5*
m.b108*m.b293 + 0.5*m.b108*m.b296 + 0.5*m.b108*m.b318 + 0.5*m.b108*m.b381 + 0.5*m.b108*m.b397 +
0.5*m.b108*m.b471 + 0.5*m.b108*m.b491 + 0.5*m.b108*m.b547 + 0.5*m.b108*m.b559 + 0.5*m.b108*m.b591
+ 0.5*m.b108*m.b602 + 0.5*m.b108*m.b661 + 0.5*m.b108*m.b671 + 0.5*m.b108*m.b672 + 0.5*m.b108*
m.b678 + 0.5*m.b108*m.b714 + 0.5*m.b108*m.b761 + 0.5*m.b108*m.b765 + 0.5*m.b108*m.b790 + 0.5*
m.b108*m.b798 + 0.5*m.b108*m.b804 + 0.5*m.b108*m.b809 + 0.5*m.b108*m.b811 + 0.5*m.b108*m.b816 +
0.5*m.b108*m.b823 + 0.5*m.b108*m.b826 + 0.5*m.b109*m.b111 + 0.5*m.b109*m.b114 + 0.5*m.b109*m.b115
+ 0.5*m.b109*m.b118 + 0.5*m.b109*m.b119 + 0.5*m.b109*m.b282 + 0.5*m.b109*m.b290 + 0.5*m.b109*
m.b292 + 0.5*m.b109*m.b294 + 0.5*m.b109*m.b305 + 0.5*m.b110*m.b113 + m.b110*m.x838 + m.b111*
m.b114 + m.b111*m.b115 + m.b111*m.b118 + 0.5*m.b111*m.b119 + 0.5*m.b111*m.b282 + 0.5*m.b111*
m.b290 + 0.5*m.b111*m.b292 + 0.5*m.b111*m.b294 + 0.5*m.b111*m.b305 + m.b111*m.x845 + m.b112*
m.b116 + 0.5*m.b112*m.b117 + 0.5*m.b112*m.b122 + 0.5*m.b112*m.b261 + 0.5*m.b112*m.b273 + 0.5*
m.b112*m.b277 + 0.5*m.b112*m.b293 + 0.5*m.b112*m.b296 + 0.5*m.b112*m.b318 + 0.5*m.b112*m.b381 +
0.5*m.b112*m.b397 + 0.5*m.b112*m.b471 + 0.5*m.b112*m.b491 + 0.5*m.b112*m.b547 + 0.5*m.b112*m.b559
+ 0.5*m.b112*m.b591 + 0.5*m.b112*m.b602 + 0.5*m.b112*m.b661 + 0.5*m.b112*m.b671 + 0.5*m.b112*
m.b672 + 0.5*m.b112*m.b678 + 0.5*m.b112*m.b714 + 0.5*m.b112*m.b761 + 0.5*m.b112*m.b765 + 0.5*
m.b112*m.b790 + 0.5*m.b112*m.b798 + 0.5*m.b112*m.b804 + 0.5*m.b112*m.b809 + 0.5*m.b112*m.b811 +
0.5*m.b112*m.b816 + 0.5*m.b112*m.b823 + 0.5*m.b112*m.b826 + 0.5*m.b113*m.b120 + 0.5*m.b113*m.b121
+ 0.5*m.b113*m.b123 + 0.5*m.b113*m.b255 + 0.5*m.b113*m.b274 + 0.5*m.b113*m.b280 + 0.5*m.b113*
m.b287 + 0.5*m.b113*m.b301 + 0.5*m.b113*m.b302 + 0.5*m.b113*m.b369 + m.b114*m.b115 + m.b114*
m.b118 + 0.5*m.b114*m.b119 + 0.5*m.b114*m.b282 + 0.5*m.b114*m.b290 + 0.5*m.b114*m.b292 + 0.5*
m.b114*m.b294 + 0.5*m.b114*m.b305 + m.b114*m.x845 + m.b115*m.b118 + 0.5*m.b115*m.b119 + 0.5*
m.b115*m.b282 + 0.5*m.b115*m.b290 + 0.5*m.b115*m.b292 + 0.5*m.b115*m.b294 + 0.5*m.b115*m.b305 +
m.b115*m.x845 + 0.5*m.b116*m.b117 + 0.5*m.b116*m.b122 + 0.5*m.b116*m.b261 + 0.5*m.b116*m.b273 +
0.5*m.b116*m.b277 + 0.5*m.b116*m.b293 + 0.5*m.b116*m.b296 + 0.5*m.b116*m.b318 + 0.5*m.b116*m.b381
+ 0.5*m.b116*m.b397 + 0.5*m.b116*m.b471 + 0.5*m.b116*m.b491 + 0.5*m.b116*m.b547 + 0.5*m.b116*
m.b559 + 0.5*m.b116*m.b591 + 0.5*m.b116*m.b602 + 0.5*m.b116*m.b661 + 0.5*m.b116*m.b671 + 0.5*
m.b116*m.b672 + 0.5*m.b116*m.b678 + 0.5*m.b116*m.b714 + 0.5*m.b116*m.b761 + 0.5*m.b116*m.b765 +
0.5*m.b116*m.b790 + 0.5*m.b116*m.b798 + 0.5*m.b116*m.b804 + 0.5*m.b116*m.b809 + 0.5*m.b116*m.b811
+ 0.5*m.b116*m.b816 + 0.5*m.b116*m.b823 + 0.5*m.b116*m.b826 + 0.5*m.b117*m.b122 + 0.5*m.b117*
m.b261 + 0.5*m.b117*m.b273 + 0.5*m.b117*m.b277 + 0.5*m.b117*m.b293 + 0.5*m.b117*m.b296 + 0.5*
m.b117*m.b318 + 0.5*m.b117*m.b381 + 0.5*m.b117*m.b397 + 0.5*m.b117*m.b471 + 0.5*m.b117*m.b491 +
0.5*m.b117*m.b547 + 0.5*m.b117*m.b559 + 0.5*m.b117*m.b591 + 0.5*m.b117*m.b602 + 0.5*m.b117*m.b661
+ 0.5*m.b117*m.b671 + 0.5*m.b117*m.b672 + 0.5*m.b117*m.b678 + 0.5*m.b117*m.b714 + 0.5*m.b117*
m.b761 + 0.5*m.b117*m.b765 + 0.5*m.b117*m.b790 + 0.5*m.b117*m.b798 + 0.5*m.b117*m.b804 + 0.5*
m.b117*m.b809 + 0.5*m.b117*m.b811 + 0.5*m.b117*m.b816 + 0.5*m.b117*m.b823 + 0.5*m.b117*m.b826 +
0.5*m.b118*m.b119 + 0.5*m.b118*m.b282 + 0.5*m.b118*m.b290 + 0.5*m.b118*m.b292 + 0.5*m.b118*m.b294
+ 0.5*m.b118*m.b305 + m.b118*m.x845 + 0.5*m.b119*m.b282 + 0.5*m.b119*m.b290 + 0.5*m.b119*m.b292
+ 0.5*m.b119*m.b294 + 0.5*m.b119*m.b305 + m.b119*m.x839 + m.b120*m.b121 + 0.5*m.b120*m.b123 +
0.5*m.b120*m.b255 + 0.5*m.b120*m.b274 + 0.5*m.b120*m.b280 + 0.5*m.b120*m.b287 + 0.5*m.b120*m.b301
+ 0.5*m.b120*m.b302 + 0.5*m.b120*m.b369 + m.b120*m.x837 + 0.5*m.b121*m.b123 + 0.5*m.b121*m.b255
+ 0.5*m.b121*m.b274 + 0.5*m.b121*m.b280 + 0.5*m.b121*m.b287 + 0.5*m.b121*m.b301 + 0.5*m.b121*
m.b302 + 0.5*m.b121*m.b369 + m.b121*m.x837 + 0.5*m.b122*m.b261 + 0.5*m.b122*m.b273 + 0.5*m.b122*
m.b277 + 0.5*m.b122*m.b293 + 0.5*m.b122*m.b296 + 0.5*m.b122*m.b318 + 0.5*m.b122*m.b381 + 0.5*
m.b122*m.b397 + 0.5*m.b122*m.b471 + 0.5*m.b122*m.b491 + 0.5*m.b122*m.b547 + 0.5*m.b122*m.b559 +
0.5*m.b122*m.b591 + 0.5*m.b122*m.b602 + 0.5*m.b122*m.b661 + 0.5*m.b122*m.b671 + 0.5*m.b122*m.b672
+ 0.5*m.b122*m.b678 + 0.5*m.b122*m.b714 + 0.5*m.b122*m.b761 + 0.5*m.b122*m.b765 + 0.5*m.b122*
m.b790 + 0.5*m.b122*m.b798 + 0.5*m.b122*m.b804 + 0.5*m.b122*m.b809 + 0.5*m.b122*m.b811 + 0.5*
m.b122*m.b816 + 0.5*m.b122*m.b823 + 0.5*m.b122*m.b826 + 0.5*m.b123*m.b255 + 0.5*m.b123*m.b274 +
0.5*m.b123*m.b278 + 0.5*m.b123*m.b280 + 0.5*m.b123*m.b287 + 0.5*m.b123*m.b301 + 0.5*m.b123*m.b302
+ 0.5*m.b123*m.b369 + 0.5*m.b124*m.b129 + 0.5*m.b124*m.b130 + 0.5*m.b124*m.b134 + m.b124*m.b136
+ 0.5*m.b124*m.b138 + 0.5*m.b124*m.b149 + 0.5*m.b124*m.b151 + 0.5*m.b124*m.b162 + 0.5*m.b124*
m.b169 + 0.5*m.b124*m.b174 + 0.5*m.b124*m.b184 + 0.5*m.b125*m.b135 + 0.5*m.b125*m.b153 + m.b125*
m.b156 + m.b125*m.b157 + 0.5*m.b125*m.b167 + m.b125*m.b168 + 0.5*m.b125*m.b171 + | |
True)].copy()
if eod_nogeo.empty == False:
#eod_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
eod_nogeo["gps_accuracy_m"] = np.where(eod_nogeo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
eod_nogeo["effort_distance_m"] = 8047 # eBird best practices allows distance up to 5 mi length.
eod_nogeo["radius_m"] = eod_nogeo["effort_distance_m"] + eod_nogeo["gps_accuracy_m"] + eod_nogeo["detection_distance_m"]
# Concat df's if necessary
if filter_set['has_coordinate_uncertainty'] == True:
df_unfiltered2 = georef
to_concat = []
for x in [gbif_nogeo, georef, eod_nogeo, ebd_geo]:
if x.empty == False:
to_concat.append(x)
if len(to_concat) > 1:
df_unfiltered2 = pd.concat(to_concat)
if len(to_concat) == 1:
df_unfiltered2 = to_concat[0]
# Where coordinate precision is poor, overwrite the radius to be the precision.
df_unfiltered2["radius_m"] = np.where(df_unfiltered2["nominal_xy_precision"] > df_unfiltered2["radius_m"], df_unfiltered2["nominal_xy_precision"], df_unfiltered2["radius_m"])
#df_unfiltered2["radius_m"] = np.where(df_unfiltered2["coordinatePrecision"] > df_unfiltered2["radius_m"], df_unfiltered2["coordinatePrecision"], df_unfiltered2["radius_m"])
# Test to make sure that no records were lost in the previous steps
if len(df_unfiltered2) != len(df_unfiltered):
print("AN ERROR OCCURRED !!!!!!!!!!!!!")
else:
print("Prepared records and calculated radii:" + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< FILTER
timestamp = datetime.now()
# Some filters to be prepped for use
for x in ['bases_omit', 'collection_codes_omit', 'datasets_omit',
'institutions_omit', 'issues_omit', 'sampling_protocols_omit']:
if filter_set[x] == None:
filter_set[x] = []
df_filter2 = (df_unfiltered2[df_unfiltered2['radius_m'] <= filter_set['max_coordinate_uncertainty']]
[lambda x: x['collectionCode'].isin(filter_set['collection_codes_omit']) == False]
[lambda x: x['institutionID'].isin(filter_set['institutions_omit']) == False]
[lambda x: x['basisOfRecord'].isin(filter_set['bases_omit']) == False]
[lambda x: x['samplingProtocol'].isin(filter_set['sampling_protocols_omit']) == False]
[lambda x: x['datasetName'].isin(filter_set['datasets_omit']) == False]
[lambda x: x['occurrenceStatus'] != "ABSENT"]
)
# Case where user demands records had coordinate uncertainty
if filter_set['has_coordinate_uncertainty'] == True:
df_filter2 = df_filter2[df_filter2["coordinateUncertaintyInMeters"] > 0]
''' ISSUES are more complex because multiple issues can be listed per record
Method used is complex, but hopefully faster than simple iteration over all records
'''
df_filter2.fillna(value={'issues': ""}, inplace=True)
# Format of issues entries differ by method, change json format to email
# format
if filter_set['get_dwca'] == True:
df_filter2['issues'] = [x.replace(', ', ';').replace('[', '').replace(']', '').replace("'", "")
for x in df_filter2['issues']]
unique_issue = list(df_filter2['issues'].unique())
violations = [x for x in unique_issue if len(set(str(x).split(";")) & set(filter_set['issues_omit'])) != 0] # entries that contain violations
df_filter3 = df_filter2[df_filter2['issues'].isin(violations) == False] # Records without entries that are violations.
print("Performed filtering: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< REMOVE SPACE-TIME DUPLICATES
# Prep some columns by changing data type
df_filter3 = (df_filter3
.astype({'decimalLatitude': 'str',
'decimalLongitude': 'str'})
.reset_index(drop=True))
if filter_set["duplicate_coord_date_OK"] == False:
df_filterZ = drop_duplicates_latlongdate(df_filter3)
if filter_set["duplicate_coord_date_OK"] == True:
df_filterZ = df_filter3.copy()
print("DUPLICATES ON LATITUDE, LONGITUDE, DATE-TIME INCLUDED")
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SPATIAL FILTERING
# Spatial filtering happens in the get functions (ebird and gbif), not here
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE VALUES AGAIN
timestamp = datetime.now()
# Store value summary in a data frame
if df_filterZ.empty == False:
retained = summarize_values(dataframe=df_filterZ, step='retained')
if df_filterZ.empty == True:
retained = acquired.copy().drop(["acquired"], axis=1)
retained["retained"] = 0
# Concat acquired and retained data frames
summary_df = pd.merge(retained, acquired, on=['attribute', 'value'],
how='inner')
# Calculate a difference column
summary_df['removed'] = summary_df['acquired'] - summary_df['retained']
summary_df = summary_df[['attribute', 'value', 'acquired', 'removed',
'retained']]
# Summarize sources
if df_filterZ.empty == False:
source_df2 = df_filterZ[['institutionID', 'collectionCode',
'datasetName', 'record_id']]
source_summary2 = (source_df2
.groupby(by=['institutionID', 'collectionCode',
'datasetName'])
.size()
.reset_index(name='retained'))
if df_filterZ.empty == True:
print(source_summary1)
source_summary2 = source_summary1.copy().drop(["acquired"], axis=1)
source_summary2["retained"] = 0
# Concat acquired and retained source summary data frames
source_summaries = pd.merge(source_summary1, source_summary2,
on=['institutionID', 'collectionCode',
'datasetName'],
how='inner')
# Calculate a difference column
source_summaries['removed'] = source_summaries['acquired'] - source_summaries['retained']
source_summaries = source_summaries[['institutionID', 'collectionCode',
'datasetName', 'acquired', 'removed',
'retained']]
# Save the summaries in the output database
summary_df.to_sql(name='attribute_value_counts', con=conn,
if_exists='replace')
source_summaries.to_sql(name='sources', con=conn,
if_exists='replace')
print("Saved summary of filtering results: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SAVE
# Reformat data to strings and insert into db.
df_filterZ.replace("nan",
pd.NA).applymap(str).to_sql(name='occurrence_records',
con=conn,
if_exists='replace')
conn.close()
return None
def nominal_precisions(longitude, latitude, produce):
'''
Calculates the nominal precisions based on WGS84 coordinates.
Method is based on information from wikipedia page on latitude and posts at
https://gis.stackexchange.com/questions/8650/measuring-accuracy-of-latitude-and-longitude
https://wiki.openstreetmap.org/wiki/Precision_of_coordinates
Parameters
----------
latitude : decimal degrees (EPSG:4326) latitude as string.
longitude : decimal degrees (EPSG:4326) longitude as string.
produce : 'longitude', 'latitude', or 'both'
Returns
-------
x : uncertainty in longitude (meters) as float.
y : uncertianty in latitude (meters) as float.
EXAMPLE
-------
x, y = nominal_precisions("-93.455", "26.3455", produce="both")
'''
lat = latitude.split(".")
long = longitude.split(".")
# Longitude - decimal gets moved based on digits.
digitsX = {1: 10, 2: 100, 3: 1000, 4: 10000, 5: 100000}
x = (111321 * np.cos(float(latitude) * np.pi/180))/digitsX[len(long[1])]
# Latitude lookup
digitsY = {1: 11112.0, 2: 1111.2, 3: 111.1, 4: 11.1, 5: 1.1}
y = digitsY[len(lat[1])]
if produce == "both":
return x, y
if produce == "longitude":
return x
if produce == "latitude":
return y
def drop_duplicates_latlongdate(df):
'''
Function to find and remove duplicate occurrence records within the
wildlife wrangler workflow. When duplicates exist, the record with the
higher decimal precision is kept, and if precision values are equal, then
the record with the smallest radius_m is retained. Accounts for existence
of records with a mix of decimal precision in latitude and longitude
values. The process is a little complex. The first data frame is cleaned
up by dropping duplicates based on which record has smaller buffer radius.
Before doing that, records with unequal decimal precision in the latitude
and longitude fields and those fields are rounded to the coarser
precision present. An input data frame likely contains records with equal
decimal precision in latitude and longitude fields, but that is lower than
the rest (i.e. latitude and longitude have 3 places right of the decimal
whereas most records have 4). Duplication may occur between lower and
higher precision records at the lower precision. Therefore, duplication
must be assessed at each of the lower precision levels present. The
strategy for that is to, at each precision level, split the main data
frame in two: one with records having the precision level of the
investigation and another with records greater than the precision level.
The "greater than" data frame records' latitude and longitude values are
then rounded to the precision level. Records are identified from the
"equals precision" data frame that have their latitude, longitude, and date
values represented in the "greater than" df, and such records ID’s are
collected in a list of records to be removed from the input/main data
frame. This process is iterated over all precision levels present in the
data.
Parameters
----------
df : input pandas data frame.
Returns
-------
df2 : a data frame equal to df but without duplicates. Use to drop records
from the occurrences table.
'''
startduptime = datetime.now()
# Record df length before removing duplicates
initial_length = len(df)
"""
############ RECTIFY UNEQUAL LAT-LONG PRECISION
First, trim decimal length in cases where decimal length differs between
latitude and longitude values, result is equal latitude and longitude
length. Record the trimmed decimal precision in a temp column for use
later as a record to "verbatim" precision.
"""
df['dup_latPlaces'] = [len(x.split(".")[1]) for x in df['decimalLatitude']]
df['dup_lonPlaces'] = [len(x.split(".")[1]) for x in df['decimalLongitude']]
df['dup_OGprec'] = df['dup_latPlaces']
prec_unequal = df[df['dup_latPlaces'] != df['dup_lonPlaces']]
for i in prec_unequal.index:
x = prec_unequal.loc[i]
if x['dup_latPlaces'] < x['dup_lonPlaces']:
trim_len = int(x['dup_latPlaces'])
else:
trim_len = int(x['dup_lonPlaces'])
df.loc[i, 'decimalLatitude'] = x['decimalLatitude'][:trim_len + 3]
df.loc[i, 'decimalLongitude'] = x['decimalLongitude'][:trim_len + 4]
# Record the resulting precision for reference later
df.loc[i, 'dup_OGprec'] = trim_len
df.drop(['dup_latPlaces', 'dup_lonPlaces'], axis=1, inplace=True)
"""
######## INITIAL DROP OF DUPLICATES
Initial drop of duplicates on 'latitude', 'longitude', 'eventDate',
keeping the first (lowest radius_m)
Sort so that the lowest radius_m is first
"""
df = (df
.sort_values(by=['decimalLatitude', 'decimalLongitude', 'eventDate',
'radius_m'],
ascending=True, kind='mergesort', na_position='last')
.drop_duplicates(subset=['decimalLatitude', 'decimalLongitude',
'eventDate'],
keep='first'))
"""
######### FIND IMPRECISE DUPLICATES
Get a list of "verbatim" precisions that are present in the data to loop
through. Next, iterate through this list collecting id's of records that
need to be removed from the main df.
"""
# Get list of unique precisions. Order is important: descending.
precisions = list(set(df['dup_OGprec']))
precisions.sort(reverse=True)
# The highest precisions listed at this point has been done: drop it.
precisions = precisions[1:]
# List for collecting records that are duplicates
duplis = []
# The precision-specific duplicate testing happens repeatedly, so make it a
# function.
def | |
i in sm.xrange(len(images))])
assert len(caught_warnings) >= 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_reduce_to_nonempty():
kpsois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=0)],
shape=(4, 4, 3)),
ia.KeypointsOnImage([], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(4, 4, 3)),
ia.KeypointsOnImage([], shape=(4, 4, 3))
]
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == [kpsois[0], kpsois[1], kpsois[3]]
assert ids == [0, 1, 3]
kpsois = [
ia.KeypointsOnImage([], shape=(4, 4, 3)),
ia.KeypointsOnImage([], shape=(4, 4, 3))
]
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == []
assert ids == []
kpsois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(4, 4, 3))
]
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == [kpsois[0]]
assert ids == [0]
kpsois = []
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == []
assert ids == []
def test_invert_reduce_to_nonempty():
kpsois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1),
ia.Keypoint(x=1, y=0)], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(4, 4, 3)),
]
kpsois_recovered = iaa.invert_reduce_to_nonempty(
kpsois, [0, 1, 2], ["foo1", "foo2", "foo3"])
assert kpsois_recovered == ["foo1", "foo2", "foo3"]
kpsois_recovered = iaa.invert_reduce_to_nonempty(kpsois, [1], ["foo1"])
assert np.all([
isinstance(kpsoi, ia.KeypointsOnImage)
for kpsoi
in kpsois]) # assert original list not changed
assert kpsois_recovered == [kpsois[0], "foo1", kpsois[2]]
kpsois_recovered = iaa.invert_reduce_to_nonempty(kpsois, [], [])
assert kpsois_recovered == [kpsois[0], kpsois[1], kpsois[2]]
kpsois_recovered = iaa.invert_reduce_to_nonempty([], [], [])
assert kpsois_recovered == []
class _DummyAugmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def get_parameters(self):
return []
class _DummyAugmenterBBs(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_bounding_boxes(self, bounding_boxes_on_images, random_state,
parents, hooks):
return [bbsoi.shift(x=1)
for bbsoi
in bounding_boxes_on_images]
def get_parameters(self):
return []
# TODO remove _augment_heatmaps() and _augment_keypoints() here once they are
# no longer abstract methods but default to noop
class _DummyAugmenterCallsParent(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return super(_DummyAugmenterCallsParent, self)\
._augment_images(images, random_state, parents, hooks)
def get_parameters(self):
return super(_DummyAugmenterCallsParent, self)\
.get_parameters()
def _same_rs(rs1, rs2):
return rs1.equals(rs2)
# TODO the test in here do not check everything, but instead only the cases
# that were not yet indirectly tested via other tests
class TestAugmenter(unittest.TestCase):
def setUp(self):
reseed()
def test___init___global_rng(self):
aug = _DummyAugmenter()
assert not aug.deterministic
assert aug.random_state.is_global_rng()
def test___init___deterministic(self):
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
aug = _DummyAugmenter(deterministic=True)
assert aug.deterministic
assert not aug.random_state.is_global_rng()
assert len(caught_warnings) == 1
assert (
"is deprecated"
in str(caught_warnings[-1].message))
# old name for parameter `seed`
def test___init___random_state_is_rng(self):
rs = iarandom.RNG(123)
aug = _DummyAugmenter(seed=rs)
assert aug.random_state.generator is rs.generator
# old name for parameter `seed`
def test___init___random_state_is_seed(self):
aug = _DummyAugmenter(seed=123)
assert aug.random_state.equals(iarandom.RNG(123))
def test___init___seed_is_random_state(self):
rs = iarandom.RNG(123)
aug = _DummyAugmenter(seed=rs)
assert aug.random_state.generator is rs.generator
def test___init___seed_is_seed(self):
aug = _DummyAugmenter(seed=123)
assert aug.random_state.equals(iarandom.RNG(123))
def test_augment_images_called_probably_with_single_image(self):
aug = _DummyAugmenter()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = aug.augment_images(np.zeros((16, 32, 3), dtype=np.uint8))
assert len(caught_warnings) == 1
assert (
"indicates that you provided a single image with shape (H, W, C)"
in str(caught_warnings[-1].message)
)
def test_augment_images_array_in_list_out(self):
self._test_augment_images_array_in_list_out_varying_channels(
[3] * 20)
def test_augment_images_array_in_list_out_single_channel(self):
self._test_augment_images_array_in_list_out_varying_channels(
[1] * 20)
def test_augment_images_array_in_list_out_no_channels(self):
self._test_augment_images_array_in_list_out_varying_channels(
[None] * 20)
def test_augment_images_array_in_list_out_varying_channels(self):
self._test_augment_images_array_in_list_out_varying_channels(
["random"] * 20)
@classmethod
def _test_augment_images_array_in_list_out_varying_channels(cls,
nb_channels):
assert len(nb_channels) == 20
aug = iaa.Crop(((1, 8), (1, 8), (1, 8), (1, 8)), keep_size=False)
seen = [0, 0]
for nb_channels_i in nb_channels:
if nb_channels_i == "random":
channels = np.random.choice([None, 1, 3, 4, 9], size=(16,))
elif nb_channels_i is None:
channels = np.random.choice([None], size=(16,))
else:
channels = np.random.choice([nb_channels_i], size=(16,))
images = [np.zeros((64, 64), dtype=np.uint8)
if c is None
else np.zeros((64, 64, c), dtype=np.uint8)
for c in channels]
if nb_channels_i != "random":
images = np.array(images)
observed = aug.augment_images(images)
if ia.is_np_array(observed):
seen[0] += 1
else:
seen[1] += 1
for image, c in zip(observed, channels):
if c is None:
assert image.ndim == 2
else:
assert image.ndim == 3
assert image.shape[2] == c
assert 48 <= image.shape[0] <= 62
assert 48 <= image.shape[1] <= 62
assert seen[0] <= 3
assert seen[1] >= 17
def test_augment_images_with_2d_inputs(self):
base_img1 = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 1, 1, 1]], dtype=np.uint8)
base_img2 = np.array([[0, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 0]], dtype=np.uint8)
base_img1_flipped = np.array([[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]], dtype=np.uint8)
base_img2_flipped = np.array([[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
images = np.array([base_img1, base_img2])
images_flipped = np.array([base_img1_flipped, base_img2_flipped])
images_list = [base_img1, base_img2]
images_flipped_list = [base_img1_flipped, base_img2_flipped]
images_list2d3d = [base_img1, base_img2[:, :, np.newaxis]]
images_flipped_list2d3d = [
base_img1_flipped,
base_img2_flipped[:, :, np.newaxis]]
aug = iaa.Fliplr(1.0)
noaug = iaa.Fliplr(0.0)
# one numpy array as input
observed = aug.augment_images(images)
assert np.array_equal(observed, images_flipped)
observed = noaug.augment_images(images)
assert np.array_equal(observed, images)
# list of 2d images
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_flipped_list)
observed = noaug.augment_images(images_list)
assert array_equal_lists(observed, images_list)
# list of images, one 2d and one 3d
observed = aug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_flipped_list2d3d)
observed = noaug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_list2d3d)
def test_augment_keypoints_single_instance(self):
kpsoi = ia.KeypointsOnImage([ia.Keypoint(10, 10)], shape=(32, 32, 3))
aug = iaa.Affine(translate_px={"x": 1})
kpsoi_aug = aug.augment_keypoints(kpsoi)
assert len(kpsoi_aug.keypoints) == 1
assert kpsoi_aug.keypoints[0].x == 11
def test_augment_keypoints_single_instance_rot90(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=5),
ia.Keypoint(x=3, y=3)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 10, 3))
aug = iaa.Rot90(1, keep_size=False)
kpsoi_aug = aug.augment_keypoints(kpsoi)
# set offset to -1 if Rot90 uses int-based coordinate transformation
kp_offset = 0
assert np.allclose(kpsoi_aug.keypoints[0].x, 5 - 2 + kp_offset)
assert np.allclose(kpsoi_aug.keypoints[0].y, 1)
assert np.allclose(kpsoi_aug.keypoints[1].x, 5 - 5 + kp_offset)
assert np.allclose(kpsoi_aug.keypoints[1].y, 2)
assert np.allclose(kpsoi_aug.keypoints[2].x, 5 - 3 + kp_offset)
assert np.allclose(kpsoi_aug.keypoints[2].y, 3)
def test_augment_keypoints_many_instances_rot90(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=5),
ia.Keypoint(x=3, y=3)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 10, 3))
aug = iaa.Rot90(1, keep_size=False)
kpsoi_aug = aug.augment_keypoints([kpsoi, kpsoi, kpsoi])
# set offset to -1 if Rot90 uses int-based coordinate transformation
kp_offset = 0
for i in range(3):
assert np.allclose(kpsoi_aug[i].keypoints[0].x, 5 - 2 + kp_offset)
assert np.allclose(kpsoi_aug[i].keypoints[0].y, 1)
assert np.allclose(kpsoi_aug[i].keypoints[1].x, 5 - 5 + kp_offset)
assert np.allclose(kpsoi_aug[i].keypoints[1].y, 2)
assert np.allclose(kpsoi_aug[i].keypoints[2].x, 5 - 3 + kp_offset)
assert np.allclose(kpsoi_aug[i].keypoints[2].y, 3)
def test_augment_keypoints_empty_instance(self):
# test empty KeypointsOnImage objects
kpsoi = ia.KeypointsOnImage([], shape=(32, 32, 3))
aug = iaa.Affine(translate_px={"x": 1})
kpsoi_aug = aug.augment_keypoints([kpsoi])
assert len(kpsoi_aug) == 1
assert len(kpsoi_aug[0].keypoints) == 0
def test_augment_keypoints_mixed_filled_and_empty_instances(self):
kpsoi1 = ia.KeypointsOnImage([], shape=(32, 32, 3))
kpsoi2 = ia.KeypointsOnImage([ia.Keypoint(10, 10)], shape=(32, 32, 3))
aug = iaa.Affine(translate_px={"x": 1})
kpsoi_aug = aug.augment_keypoints([kpsoi1, kpsoi2])
assert len(kpsoi_aug) == 2
assert len(kpsoi_aug[0].keypoints) == 0
assert len(kpsoi_aug[1].keypoints) == 1
assert kpsoi_aug[1].keypoints[0].x == 11
def test_augment_keypoints_aligned_despite_empty_instance(self):
# Test if augmenting lists of KeypointsOnImage is still aligned with
# image augmentation when one KeypointsOnImage instance is empty
# (no keypoints)
kpsoi_lst = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=1, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([], shape=(1, 8)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=1, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10))
]
image = np.zeros((1, 10), dtype=np.uint8)
image[0, 0] = 255
images = np.tile(image[np.newaxis, :, :], (len(kpsoi_lst), 1, 1))
aug = iaa.Affine(translate_px={"x": (0, 8)}, order=0, mode="constant",
cval=0)
for i in sm.xrange(10):
for is_list in [False, True]:
with self.subTest(i=i, is_list=is_list):
aug_det = aug.to_deterministic()
if is_list:
images_aug = aug_det.augment_images(list(images))
else:
images_aug = aug_det.augment_images(images)
kpsoi_lst_aug = aug_det.augment_keypoints(kpsoi_lst)
if is_list:
images_aug = np.array(images_aug, dtype=np.uint8)
translations_imgs = np.argmax(images_aug[:, 0, :], axis=1)
translations_kps = [
kpsoi.keypoints[0].x
if len(kpsoi.keypoints) > 0
else None
for kpsoi
in kpsoi_lst_aug]
assert len([kpresult
for kpresult
in translations_kps
if kpresult is None]) == 1
assert translations_kps[5] is None
translations_imgs = np.concatenate(
[translations_imgs[0:5], translations_imgs[6:]])
translations_kps = np.array(
translations_kps[0:5] + translations_kps[6:],
dtype=translations_imgs.dtype)
translations_kps[2] -= 1
translations_kps[8-1] -= 1
assert np.array_equal(translations_imgs, translations_kps)
def test_augment_keypoints_aligned_despite_nongeometric_image_ops(self):
# Verify for keypoints that adding augmentations that only
# affect images doesn't lead to misalignments between image
# and keypoint transformations
augs = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.AdditiveGaussianNoise(scale=(0.01, 0.1)),
iaa.Affine(translate_px={"x": (-10, 10), "y": (-10, 10)},
order=0, mode="constant", cval=0),
iaa.AddElementwise((0, 1)),
iaa.Flipud(0.5)
], random_order=True)
kps = [ia.Keypoint(x=15.5, y=12.5), ia.Keypoint(x=23.5, y=20.5),
ia.Keypoint(x=61.5, y=36.5), ia.Keypoint(x=47.5, y=32.5)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 80, 4))
image = kpsoi.to_keypoint_image(size=1)
images = np.tile(image[np.newaxis, ...], (20, 1, 1, 1))
for _ in sm.xrange(50):
images_aug, kpsois_aug = augs(images=images,
keypoints=[kpsoi]*len(images))
for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):
kpsoi_recovered = ia.KeypointsOnImage.from_keypoint_image(
image_aug, nb_channels=4, threshold=100
)
for kp, kp_image in zip(kpsoi_aug.keypoints,
kpsoi_recovered.keypoints):
distance = np.sqrt((kp.x - kp_image.x)**2
+ (kp.y - kp_image.y)**2)
assert distance <= 1
def test_augment_bounding_boxes(self):
aug = _DummyAugmenterBBs()
bb = ia.BoundingBox(x1=1, y1=4, x2=2, y2=5)
bbs = | |
<gh_stars>0
#!/usr/bin/python -tt
"""This implements the OpenShift-specific logic for validating Yum
repositories
"""
import sys
from yumvalidator import repo_db
from yumvalidator.check_sources import CheckSources
from itertools import chain
from yum import Errors
import logging
OSE_PRIORITY = 10
RHEL_PRIORITY = 20
JBOSS_PRIORITY = 30
OTHER_PRIORITY = 40
UNKNOWN, RHSM, RHN = ('unknown', 'rhsm', 'rhn')
SUBS_NAME = {'unknown': '', 'rhsm': 'Red Hat Subscription Manager',
'rhn': 'RHN Classic or RHN Satellite'}
VALID_SUBS = SUBS_NAME.keys()[1:]
ATTACH_ENTITLEMENTS_URL = 'https://access.redhat.com/site/articles/522923'
VALID_OO_VERSIONS = ['1.2', '2.0']
VALID_ROLES = ['node', 'broker', 'client', 'node-eap']
def flatten_uniq(llist):
"""Flatten nested iterables and filter result for uniqueness
"""
return list(set(chain.from_iterable(llist)))
class UnrecoverableYumError(Exception):
"""The Yum API is giving up, and so should we"""
pass
class OpenShiftYumValidator(object):
"""This class encapsulates OpenShift-specific yum validator logic
"""
pri_header = False
pri_resolve_header = False
problem = False
resolved_repos = {}
committed_resolved_repos = {}
def __init__(self, opts, opt_parser):
self.opts = opts
self.opt_parser = opt_parser
self._setup_logger()
self.oscs = CheckSources()
if not self.opts.subscription:
self.opts.subscription = UNKNOWN
else:
self.opts.subscription = self.opts.subscription.lower()
if self.opts.repo_config:
self.rdb = repo_db.RepoDB(file(self.opts.repo_config),
user_repos_only=self.opts.user_repos_only)
else:
self.rdb = repo_db.RepoDB()
def _setup_logger(self):
self.opts.loglevel = logging.INFO
# TODO: log to file if specified, with requested severity
self.logger = logging.getLogger()
self.logger.setLevel(self.opts.loglevel)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(self.opts.loglevel)
ch.setFormatter(logging.Formatter("%(message)s"))
self.logger.addHandler(ch)
# if self.opts.logfile:
# self.logger.addHandler(logfilehandler)
def required_repos(self):
"""Return a list of RepoTuples that match the specified role,
subscription type and oo-version
"""
# Include the base RHEL repo in the required repos
roles = self.opts.role + ['base']
sub = self.opts.subscription
o_ver = self.opts.oo_version
return flatten_uniq([self.rdb.find_repos(subscription=sub,
role=role,
product_version=o_ver)
for role in roles])
def required_repoids(self):
"""Return a list of repoids as Strings that match the specified role,
subscription type and oo-version
"""
return [repo.repoid for repo in self.required_repos()]
def enabled_blessed_repos(self):
"""Return a list of RepoTuples from self.rdb that match repoids of
enabled repositories
"""
enabled = self.oscs.enabled_repoids()
return [repo for repo in self.rdb.find_repos_by_repoid(enabled)
if repo.subscription == self.opts.subscription
and repo.product_version == self.opts.oo_version]
def blessed_repoids(self, **kwargs):
"""Return a list of just repoids for the results of blessed_repos
called with the provided arguments
"""
return [repo.repoid for repo in self.blessed_repos(**kwargs)]
def blessed_repos(self, enabled = False, required = False, product = None):
"""Return a list of RepoTuples from self.rdb that match the provided
criteria
Keyword arguments:
enabled -- if True, constrain results to those matching the
repoids of currently enabled repositories
Default: False
required -- if True, constrain search to the list provided by
required_repos
Default: False
product -- if provided, constrain results to RepoTuples with a
product field that matches the string provided
Default: None
"""
kwargs = {'subscription': self.opts.subscription,
'product_version': self.opts.oo_version}
if product:
kwargs['product'] = product
if enabled:
if required:
return [repo for repo in self.required_repos()
if repo.repoid in self.oscs.enabled_repoids()
and (not product or repo.product == product)]
return [repo for repo in self.rdb.find_repos(**kwargs)
if repo.repoid in self.oscs.enabled_repoids()]
if required:
return [repo for repo in self.required_repos()
if not product or repo.product == product]
return self.rdb.find_repos(**kwargs)
def _sub(self, subscription):
self.opts.subscription = subscription
self.logger.info('Detected OpenShift Enterprise repository '
'subscription managed by %s.' %
SUBS_NAME[self.opts.subscription])
def _oo_ver(self, version):
self.opts.oo_version = version
self.logger.info('Detected installed OpenShift Enterprise '
'version %s' % self.opts.oo_version)
def _sub_ver(self, subscription, version = None):
if self.opts.subscription == UNKNOWN and not self.opts.oo_version:
self._sub(subscription)
if version:
self._oo_ver(version)
return True
# We still haven't gotten a version guess - fail to force
# user to specify version
return False
if self.opts.subscription == UNKNOWN and self.opts.oo_version:
if not version or version == self.opts.oo_version:
self._sub(subscription)
return True
if self.opts.subscription != UNKNOWN and not self.opts.oo_version:
if subscription == self.opts.subscription and version:
self._oo_ver(version)
return True
if self.opts.subscription != UNKNOWN and self.opts.oo_version:
if (subscription == self.opts.subscription and
(not version or version == self.opts.oo_version)):
return True
return False
def guess_ose_version_and_subscription(self):
"""Attempt to determine the product version and subscription
management tool in use if one or both arguments aren't
provided by the user.
TODO: Better description of guess criteria
"""
if self.opts.subscription != UNKNOWN and self.opts.oo_version:
# Short-circuit guess if user specifies sub and ver
return True
matches = self.rdb.find_repos_by_repoid(self.oscs.all_repoids())
rhsm_ose_2_0 = [repo for repo in matches if
repo in self.rdb.find_repos(subscription = 'rhsm',
product_version = '2.0',
product = 'ose')]
rhn_ose_2_0 = [repo for repo in matches if
repo in self.rdb.find_repos(subscription = 'rhn',
product_version = '2.0',
product = 'ose')]
rhsm_ose_1_2 = [repo for repo in matches if
repo in self.rdb.find_repos(subscription = 'rhsm',
product_version = '1.2',
product = 'ose')]
rhn_ose_1_2 = [repo for repo in matches if
repo in self.rdb.find_repos(subscription = 'rhn',
product_version = '1.2',
product = 'ose')]
rhsm_2_0_avail = [repo for repo in rhsm_ose_2_0 if repo.repoid in
self.oscs.enabled_repoids()]
rhn_2_0_avail = [repo for repo in rhn_ose_2_0 if repo.repoid in
self.oscs.enabled_repoids()]
rhsm_1_2_avail = [repo for repo in rhsm_ose_1_2 if repo.repoid in
self.oscs.enabled_repoids()]
rhn_1_2_avail = [repo for repo in rhn_ose_1_2 if repo.repoid in
self.oscs.enabled_repoids()]
rhsm_2_0_pkgs = filter(None,
[self.oscs.verify_package(repo.key_pkg,
source=repo.repoid)
for repo in rhsm_2_0_avail])
rhn_2_0_pkgs = filter(None,
[self.oscs.verify_package(repo.key_pkg,
source=repo.repoid)
for repo in rhn_2_0_avail])
rhsm_1_2_pkgs = filter(None,
[self.oscs.verify_package(repo.key_pkg,
source=repo.repoid)
for repo in rhsm_1_2_avail])
rhn_1_2_pkgs = filter(None,
[self.oscs.verify_package(repo.key_pkg,
source=repo.repoid)
for repo in rhn_1_2_avail])
# This if ladder detects the subscription type and version
# based on available OSE repos and which repos provide
# installed packages. Maybe there's a better way?
if ((rhsm_2_0_pkgs and self._sub_ver('rhsm', '2.0')) or
(rhn_2_0_pkgs and self._sub_ver('rhn', '2.0')) or
(rhsm_1_2_pkgs and self._sub_ver('rhsm', '1.2')) or
(rhn_1_2_pkgs and self._sub_ver('rhn', '1.2')) or
(rhsm_2_0_avail and self._sub_ver('rhsm', '2.0')) or
(rhn_2_0_avail and self._sub_ver('rhn', '2.0')) or
(rhsm_1_2_avail and self._sub_ver('rhsm', '1.2')) or
(rhn_1_2_avail and self._sub_ver('rhn', '1.2')) ):
return True
# This section detects just the subscription type if the
# version has been specified or couldn't be determined by the
# preceding logic.
for fxn_rcheck, sub in [(self.oscs.repo_is_rhsm, 'rhsm'),
(self.oscs.repo_is_rhn, 'rhn')]:
if self.opts.subscription == UNKNOWN:
for repoid in self.oscs.all_repoids():
if fxn_rcheck(repoid) and self._sub_ver(sub):
return True
else:
# No need to check for a value the user has provided
break
return False
def check_version_conflict(self):
"""Determine if repositories for multiple versions of OpenShift have
been wrongly enabled, and advise or fix accordingly.
"""
matches = self.rdb.find_repos_by_repoid(self.oscs.enabled_repoids())
conflicts = [repo for repo in matches if
(not hasattr(repo.product_version, '__iter__') and
not (repo.product_version == self.opts.oo_version) or
not (repo.subscription == self.opts.subscription))]
if conflicts:
self.problem = True
if self.opts.fix:
for repo in conflicts:
if self.oscs.disable_repo(repo.repoid):
self.logger.warning('Disabled repository %s' %
repo.repoid)
else:
rhsm_conflicts = [repo.repoid for repo in conflicts if
self.oscs.repo_is_rhsm(repo.repoid)]
rhn_conflicts = [repo.repoid for repo in conflicts if
self.oscs.repo_is_rhn(repo.repoid)]
other_conflicts = [repo.repoid for repo in conflicts if
not (repo.repoid in rhsm_conflicts or
repo.repoid in rhn_conflicts)]
if rhsm_conflicts:
self.logger.error('The following OpenShift Enterprise '
'repositories conflict with the '
'detected or specified product version.')
self.logger.error('To prevent package conflicts, disable '
'these repositories by running these '
'commands:')
for repoid in rhsm_conflicts:
self.logger.error(' # subscription-manager repos '
'--disable=%s' % repoid)
if rhn_conflicts:
self.logger.error('The following RHN Classic or RHN '
'Satellite-managed OpenShift Enterprise '
'repositories conflict with the '
'detected or specified product version.')
self.logger.error('To prevent package conflicts, disable '
'these repositories by making the '
'following modifications to '
'/etc/yum/pluginconf.d/rhnplugin.conf')
for repoid in rhn_conflicts:
self.logger.error(' Set enabled=0 in the [%s] '
'section' % repoid)
if other_conflicts:
self.logger.error('The following Yum repositories conflict '
'with the detected or specified product '
'version.')
self.logger.error('Disable these repositories by running '
'these commands:')
for repoid in other_conflicts:
self.logger.error(' # yum-config-manager '
'--disable %s' % repoid)
return False
return True
def verify_yum_plugin_priorities(self):
"""Determine if the required yum plugin package yum-plugin-priorities
is installed. No action should be taken if the package can't
be found (advise only)
"""
self.logger.info('Checking if yum-plugin-priorities is installed')
try:
if not self.oscs.verify_package('yum-plugin-priorities'):
self.problem = True
if self.oscs.package_available('yum-plugin-priorities'):
self.logger.error('Required package yum-plugin-priorities '
'is not installed. Install the package '
'with the following command:')
self.logger.error('# yum install yum-plugin-priorities')
else:
self.logger.error('Required package yum-plugin-priorities '
'is not available.')
return False
except Errors.RepoError as exc:
raise UnrecoverableYumError(exc)
return True
def _get_pri(self, repoid):
return self.resolved_repos.get(repoid, self.oscs.repo_priority(repoid))
def _limit_pri(self, repolist, minpri=False):
"""Determine the highest or lowest priority for the provided repos,
depending on minpri value
"""
res = -1
c_fxn, p_limit = max, 0
if minpri:
c_fxn, p_limit = min, 99
res = c_fxn(chain((self._get_pri(repoid) for
repoid in repolist), [p_limit]))
return res
def _set_pri(self, repoid, priority):
self.problem = True
if not self.pri_header:
self.pri_header = True
self.logger.info('Resolving repository/channel/subscription '
'priority | |
<reponame>s0nskar/sunpy
# Author: <NAME> <<EMAIL>>
#
# This module was developed with funding provided by
# the Google Summer of Code (2013).
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import os
from sqlalchemy.orm import make_transient
from sqlalchemy.exc import InvalidRequestError
from sunpy.extern import six
from sunpy.extern.six.moves import range
__all__ = [
'EmptyCommandStackError', 'NoSuchEntryError', 'NonRemovableTagError',
'DatabaseOperation', 'AddEntry', 'RemoveEntry', 'EditEntry',
'CommandManager']
class EmptyCommandStackError(Exception):
"""This exception is raised if it is attempted to pop from a command stack
even though it is empty.
"""
class NoSuchEntryError(Exception):
"""This exception is raised if it is attempted to remove an entry even
though it does not exist in the database.
"""
def __init__(self, database_entry):
self.database_entry = database_entry
def __str__(self): # pragma: no cover
return (
'the database entry {0!r} cannot be removed because it '
'is not stored in the database'.format(self.database_entry))
class NonRemovableTagError(Exception):
"""This exception is raised if it is attempted to remove a tag from a
database entry even though it is not saved in this entry.
"""
def __init__(self, database_entry, tag):
self.database_entry = tag
self.tag = tag
def __str__(self): # pragma: no cover
errmsg = 'the tag {0} cannot be removed from the database entry {1!r}'
return errmsg.format(self.database_entry, self.tag)
@six.add_metaclass(ABCMeta)
class DatabaseOperation(object):
"""This is the abstract main class for all database operations. To
implement a new operation, inherit from this class and override the methods
__call__ and undo. Both these methods get no parameters (except for self of
course). The undo method is expected to do the exact opposite of the
__call__ method, so that calling __call__ *and* undo multiple times in a
row must not have any side-effects. This is not checked in any way, though.
"""
@abstractmethod
def __call__(self):
return # pragma: no cover
@abstractmethod
def undo(self):
return # pragma: no cover
class CompositeOperation(DatabaseOperation):
def __init__(self, operations=None):
if operations is None:
self._operations = []
else:
self._operations = operations
@property
def operations(self):
return self._operations
def add(self, operation):
self._operations.append(operation)
def remove(self, operation):
self._operations.remove(operation)
def __call__(self):
for operation in self._operations:
# FIXME: What follows is the worst hack of my life. Enjoy.
# Without it, the test test_clear_database would fail.
f = open(os.devnull, 'w'); f.write(repr(operation)); f.flush()
operation()
def undo(self):
for operation in self._operations:
operation.undo()
def __len__(self):
return len(self._operations)
class AddEntry(DatabaseOperation):
"""Add a new database entry to this session. It is not checked whether an
equivalent entry is already saved in the session; this has to be checked by
the caller. The ``undo`` method removes the entry from the session again.
"""
def __init__(self, session, database_entry):
self.session = session
self.database_entry = database_entry
def __call__(self):
try:
self.session.add(self.database_entry)
except InvalidRequestError:
# database entry cannot be added because it was removed from the
# database -> use make_transient to send this object back to
# the transient state
make_transient(self.database_entry)
self.session.add(self.database_entry)
def undo(self):
try:
self.session.delete(self.database_entry)
except InvalidRequestError:
# database entry cannot be removed because the last call was not
# followed by a commit -> use make_transient to revert putting the
# entry into the pending state
make_transient(self.database_entry)
def __repr__(self):
return '<{0}(session {1!r}, entry id {2})>'.format(
self.__class__.__name__, self.session, self.database_entry.id)
class RemoveEntry(DatabaseOperation):
"""Remove the given database entry from the session. If it cannot be
removed, because it is not stored in the session,
:exc:`sunpy.database.NoSuchEntryError` is raised. The ``undo`` method puts
the database entry back into the session object.
"""
def __init__(self, session, entry):
self.session = session
self.entry = entry
def __call__(self):
try:
self.session.delete(self.entry)
except InvalidRequestError:
# self.database_entry cannot be removed because it's not stored in
# the database
raise NoSuchEntryError(self.entry)
def undo(self):
make_transient(self.entry)
self.session.add(self.entry)
def __repr__(self):
return '<{0}(session {1!r}, entry {2!r})>'.format(
self.__class__.__name__, self.session, self.entry)
class EditEntry(DatabaseOperation):
"""Change the properties of the database entry. The given keyword arguments
are used to set the attributes of the entry. The keys represent the
attribute name and the values represent the new value of this attribute.
Example: ``EditEntry(entry, foo='bar')`` will set the attribute ``foo`` of
``entry`` to the value ``'bar'``.
"""
def __init__(self, database_entry, **kwargs):
self.database_entry = database_entry
if not kwargs:
raise ValueError("at least one keyword argument must be given")
self.kwargs = kwargs
self.prev_values = {}
def __call__(self):
for k, v in six.iteritems(self.kwargs):
# save those values in the dict prev_values that will be changed
# so that they can be recovered
self.prev_values[k] = getattr(self.database_entry, k)
setattr(self.database_entry, k, v)
def undo(self):
for k, v in six.iteritems(self.prev_values):
setattr(self.database_entry, k, v)
def __repr__(self):
return '<EditEntry(kwargs {0!r}, entry id {1})>'.format(
self.kwargs, self.database_entry.id)
class AddTag(DatabaseOperation):
def __init__(self, session, database_entry, tag):
self.session = session
self.database_entry = database_entry
self.tag = tag
def __call__(self):
try:
self.database_entry.tags.append(self.tag)
except InvalidRequestError:
# self.tag cannot be added because it was just removed
# -> put it back to transient state
make_transient(self.tag)
self.database_entry.tags.append(self.tag)
def undo(self):
self.database_entry.tags.remove(self.tag)
if not self.tag.data:
# remove the tag from the database as well if it was the last tag
# assigned to an entry
try:
RemoveEntry(self.session, self.tag)()
except NoSuchEntryError:
# entry cannot be removed because tag is only connected to
# entries which are not saved in the database
# -> can be safely ignored
pass
def __repr__(self):
return "<AddTag(tag '{0}', session {1!r}, entry id {2})>".format(
self.tag, self.session, self.database_entry.id)
class RemoveTag(DatabaseOperation):
"""Remove the tag from the given database entry. If the tag cannot be
removed from the database entry because it is not assigned to the entry,
:exc:`sunpy.database.NonRemovableTagError` is raised. The ``undo`` method
puts the removed tag back into the tag list of the database entry.
"""
def __init__(self, session, database_entry, tag):
self.session = session
self.database_entry = database_entry
self.tag = tag
def __call__(self):
try:
self.database_entry.tags.remove(self.tag)
except ValueError:
# tag not saved in entry
raise NonRemovableTagError(self.database_entry, self.tag)
else:
if not self.tag.data:
# remove the tag from the database as well if it was the last tag
# assigned to an entry
try:
RemoveEntry(self.session, self.tag)()
except NoSuchEntryError:
# entry cannot be removed because tag is only connected to
# entries which are not saved in the database
# -> can be safely ignored
pass
def undo(self):
try:
self.database_entry.tags.append(self.tag)
except InvalidRequestError:
# self.tag cannot be added because it was just removed
# -> put it back to transient state
try:
make_transient(self.tag)
self.database_entry.tags.append(self.tag)
except InvalidRequestError:
# self.database_entry has been removed
# -> put it back to transient state
make_transient(self.database_entry)
self.database_entry.tags.append(self.tag)
def __repr__(self):
return "<RemoveTag(tag '{0}', session {1!r}, entry id {2})>".format(
self.tag, self.session, self.database_entry.id)
class CommandManager(object):
"""The CommandManager saves all executed and reverted commands to act as an
undo-redo-manager. All executed commands are saved in the list attribute
``undo_commands`` and all undone commands are saved in the list attribute
``redo_commands``. It is not recommended to alter these stacks directly;
instead, use the methods ``push_undo_command``, ``pop_undo_command``,
``push_redo_command``, and ``pop_redo_command``, respectively.
"""
def __init__(self):
self.undo_commands = []
self.redo_commands = []
def clear_histories(self):
"""Clears all entries from the undo and redo history. If one or
both of the histories are already empty, no exception is raised.
"""
del self.undo_commands[:]
del self.redo_commands[:]
def push_undo_command(self, command):
"""Push the given command to the undo command stack."""
self.undo_commands.append(command)
def pop_undo_command(self):
"""Remove the last command from the undo command stack and return it.
If the command stack is empty,
:exc:`sunpy.database.commands.EmptyCommandStackError` is raised.
"""
try:
last_undo_command = self.undo_commands.pop()
except IndexError:
raise EmptyCommandStackError()
return last_undo_command
def push_redo_command(self, command):
"""Push the given command to the redo command stack."""
self.redo_commands.append(command)
def pop_redo_command(self):
"""Remove the last command from the redo command stack and return it.
If the command stack is empty,
:exc:`sunpy.database.commands.EmptyCommandStackError` is raised.
"""
try:
last_redo_command = self.redo_commands.pop()
except IndexError:
raise EmptyCommandStackError()
return last_redo_command
def do(self, command):
"""Execute the given command (a subclass of DatabaseOperation).
Exceptions raised from the command are not caught. The passed argument
may also be an iterable of commands. In this case, every command of the
iterable is executed and only one entry is saved in the undo history.
"""
command()
self.push_undo_command(command)
# clear the redo stack when a new command was executed
self.redo_commands[:] = []
def undo(self, n=1):
"""Undo the last n commands. The default is to undo only the last
command. If there is no command that can be undone because n is too big
or | |
import time
from collections import namedtuple
from enum import IntEnum
from os.path import join
from ovos_utils import resolve_ovos_resource_file, resolve_resource_file
from ovos_utils.log import LOG
from ovos_utils.messagebus import wait_for_reply, get_mycroft_bus, Message
from ovos_utils.system import is_installed, has_screen, is_process_running
def can_display():
return has_screen()
def is_gui_installed():
return is_installed("mycroft-gui-app") or \
is_installed("mycroft-embedded-shell") or \
is_installed("plasmashell")
def is_gui_running():
return is_process_running("mycroft-gui-app") or \
is_process_running("mycroft-embedded-shell") or \
is_process_running("plasmashell")
def is_gui_connected(bus=None):
# bus api for https://github.com/MycroftAI/mycroft-core/pull/2682
# send "gui.status.request"
# receive "gui.status.request.response"
response = wait_for_reply("gui.status.request",
"gui.status.request.response", bus=bus)
if response:
return response.data["connected"]
return False
def can_use_local_gui():
if can_display() and is_gui_installed() and is_gui_running():
return True
return False
def can_use_gui(bus=None, local=False):
if local:
return can_use_local_gui()
return can_use_local_gui() or is_gui_connected(bus)
class GUIPlaybackStatus(IntEnum):
STOPPED = 0
PLAYING = 1
PAUSED = 2
UNDEFINED = 3
class GUITracker:
""" Replicates GUI API from mycroft-core,
does not interact with GUI but exactly mimics status"""
Namespace = namedtuple('Namespace', ['name', 'pages'])
RESERVED_KEYS = ['__from', '__idle']
IDLE_MESSAGE = "mycroft.mark2.collect_idle" # TODO this will change
def __init__(self, bus=None,
host='0.0.0.0', port=8181, route='/core', ssl=False):
self.bus = bus or get_mycroft_bus(host, port, route, ssl)
self._active_skill = None
self._is_idle = False
self.idle_ts = 0
# This datastore holds the data associated with the GUI provider. Data
# is stored in Namespaces, so you can have:
# self.datastore["namespace"]["name"] = value
# Typically the namespace is a meaningless identifier, but there is a
# special "SYSTEM" namespace.
self._datastore = {}
# self.loaded is a list, each element consists of a namespace named
# tuple.
# The namespace namedtuple has the properties "name" and "pages"
# The name contains the namespace name as a string and pages is a
# mutable list of loaded pages.
#
# [Namespace name, [List of loaded qml pages]]
# [
# ["SKILL_NAME", ["page1.qml, "page2.qml", ... , "pageN.qml"]
# [...]
# ]
self._loaded = [] # list of lists in order.
# Listen for new GUI clients to announce themselves on the main bus
self._active_namespaces = []
# GUI handlers
self.bus.on("gui.value.set", self._on_gui_set_value)
self.bus.on("gui.page.show", self._on_gui_show_page)
self.bus.on("gui.page.delete", self._on_gui_delete_page)
self.bus.on("gui.clear.namespace", self._on_gui_delete_namespace)
# Idle screen handlers TODO message cleanup...
self._idle_screens = {}
self.bus.on("mycroft.device.show.idle", self._on_show_idle) # legacy
self.bus.on(self.IDLE_MESSAGE, self._on_show_idle)
self.bus.on("mycroft.mark2.register_idle", self._on_register_idle)
self.bus.emit(Message("mycroft.mark2.collect_idle"))
@staticmethod
def is_gui_installed():
return is_gui_installed()
@staticmethod
def is_gui_running():
return is_gui_running()
def is_gui_connected(self):
return is_gui_connected(self.bus)
@staticmethod
def can_display():
return can_display()
def is_displaying(self):
return self.active_skill is not None
def is_idle(self):
return self._is_idle
@property
def active_skill(self):
return self._active_skill
@property
def gui_values(self):
return self._datastore
@property
def idle_screens(self):
return self._idle_screens
@property
def active_namespaces(self):
return self._active_namespaces
@property
def gui_pages(self):
return self._loaded
# GUI event handlers
# user can/should subclass this
def on_idle(self, namespace):
pass
def on_active(self, namespace):
pass
def on_new_page(self, namespace, page, index):
pass
def on_delete_page(self, namespace, index):
pass
def on_gui_value(self, namespace, key, value):
pass
def on_new_namespace(self, namespace):
pass
def on_move_namespace(self, namespace, from_index, to_index):
pass
def on_remove_namespace(self, namespace, index):
pass
######################################################################
# GUI client API
# TODO see how much of this can be removed
@staticmethod
def _get_page_data(message):
""" Extract page related data from a message.
Args:
message: messagebus message object
Returns:
tuple (page, namespace, index)
Raises:
ValueError if value is missing.
"""
data = message.data
# Note: 'page' can be either a string or a list of strings
if 'page' not in data:
raise ValueError("Page missing in data")
if 'index' in data:
index = data['index']
else:
index = 0
page = data.get("page", "")
namespace = data.get("__from", "")
return page, namespace, index
def _set(self, namespace, name, value):
""" Perform the send of the values to the connected GUIs. """
if namespace not in self._datastore:
self._datastore[namespace] = {}
if self._datastore[namespace].get(name) != value:
self._datastore[namespace][name] = value
def __find_namespace(self, namespace):
for i, skill in enumerate(self._loaded):
if skill[0] == namespace:
return i
return None
def __insert_pages(self, namespace, pages):
""" Insert pages into the namespace
Args:
namespace (str): Namespace to add to
pages (list): Pages (str) to insert
"""
LOG.debug("Inserting new pages")
# Insert the pages into local reprensentation as well.
updated = self.Namespace(self._loaded[0].name,
self._loaded[0].pages + pages)
self._loaded[0] = updated
def __remove_page(self, namespace, pos):
""" Delete page.
Args:
namespace (str): Namespace to remove from
pos (int): Page position to remove
"""
LOG.debug("Deleting {} from {}".format(pos, namespace))
self.on_delete_page(namespace, pos)
# Remove the page from the local reprensentation as well.
self._loaded[0].pages.pop(pos)
def __insert_new_namespace(self, namespace, pages):
""" Insert new namespace and pages.
This first sends a message adding a new namespace at the
highest priority (position 0 in the namespace stack)
Args:
namespace (str): The skill namespace to create
pages (str): Pages to insert (name matches QML)
"""
LOG.debug("Inserting new namespace")
self.on_new_namespace(namespace)
# Make sure the local copy is updated
self._loaded.insert(0, self.Namespace(namespace, pages))
if time.time() - self.idle_ts > 1:
# we cant know if this page is idle or not, but when it is we
# received a idle event within the same second
self._is_idle = False
self.on_active(namespace)
else:
self.on_idle(namespace)
def __move_namespace(self, from_pos, to_pos):
""" Move an existing namespace to a new position in the stack.
Args:
from_pos (int): Position in the stack to move from
to_pos (int): Position to move to
"""
LOG.debug("Activating existing namespace")
# Move the local representation of the skill from current
# position to position 0.
namespace = self._loaded[from_pos].name
self.on_move_namespace(namespace, from_pos, to_pos)
self._loaded.insert(to_pos, self._loaded.pop(from_pos))
def _show(self, namespace, page, index):
""" Show a page and load it as needed.
Args:
page (str or list): page(s) to show
namespace (str): skill namespace
index (int): ??? TODO: Unused in code ???
TODO: - Update sync to match.
- Separate into multiple functions/methods
"""
LOG.debug("GUIConnection activating: " + namespace)
self._active_skill = namespace
pages = page if isinstance(page, list) else [page]
# find namespace among loaded namespaces
try:
index = self.__find_namespace(namespace)
if index is None:
# This namespace doesn't exist, insert them first so they're
# shown.
self.__insert_new_namespace(namespace, pages)
return
else: # Namespace exists
if index > 0:
# Namespace is inactive, activate it by moving it to
# position 0
self.__move_namespace(index, 0)
# Find if any new pages needs to be inserted
new_pages = [p for p in pages if
p not in self._loaded[0].pages]
if new_pages:
self.__insert_pages(namespace, new_pages)
except Exception as e:
LOG.exception(repr(e))
######################################################################
# Internal GUI events
def _on_gui_set_value(self, message):
data = message.data
namespace = data.get("__from", "")
# Pass these values on to the GUI renderers
for key in data:
if key not in self.RESERVED_KEYS:
try:
self._set(namespace, key, data[key])
self.on_gui_value(namespace, key, data[key])
except Exception as e:
LOG.exception(repr(e))
def _on_gui_delete_page(self, message):
""" Bus handler for removing pages. """
page, namespace, _ = self._get_page_data(message)
try:
self._remove_pages(namespace, page)
except Exception as e:
LOG.exception(repr(e))
def _on_gui_delete_namespace(self, message):
""" Bus handler for removing namespace. """
try:
namespace = message.data['__from']
self._remove_namespace(namespace)
except Exception as e:
LOG.exception(repr(e))
def _on_gui_show_page(self, message):
try:
page, namespace, index = self._get_page_data(message)
# Pass the request to the GUI(s) to pull up a page template
self._show(namespace, page, index)
self.on_new_page(namespace, page, index)
except Exception as e:
LOG.exception(repr(e))
def _remove_namespace(self, namespace):
""" Remove namespace.
Args:
namespace (str): namespace to remove
"""
index = self.__find_namespace(namespace)
if index is None:
return
else:
LOG.debug("Removing namespace {} at {}".format(namespace, index))
self.on_remove_namespace(namespace, index)
# Remove namespace from loaded namespaces
self._loaded.pop(index)
def _remove_pages(self, namespace, pages):
""" Remove the listed pages from the provided namespace.
Args:
namespace (str): The namespace to modify
pages (list): List of page names (str) to delete
"""
try:
index = self.__find_namespace(namespace)
if index is None:
return
else:
# Remove any pages that doesn't exist in the namespace
pages = [p for p in pages if p in self._loaded[index].pages]
# Make sure to remove pages from the back
indexes = [self._loaded[index].pages.index(p) for p in pages]
indexes = sorted(indexes)
indexes.reverse()
for page_index in indexes:
self.__remove_page(namespace, page_index)
except Exception as e:
LOG.exception(repr(e))
def _on_register_idle(self, message):
"""Handler for catching incoming idle screens."""
if "name" in message.data and "id" in message.data:
screen = message.data["name"]
if screen not in self._idle_screens:
self.bus.on("{}.idle".format(message.data["id"]),
self._on_show_idle)
self._idle_screens[screen] = message.data["id"]
LOG.info("Registered {}".format(message.data["name"]))
else:
LOG.error("Malformed idle screen registration received")
def _on_show_idle(self, message):
self.idle_ts = time.time()
self._is_idle = True
class _GUIDict(dict):
""" this is an helper dictionay subclass, it ensures that value changed
in it are propagated to the GUI service real time"""
def __init__(self, | |
<filename>pytorch_blade/torch_blade/onnx_backends/backend_testbed.py
# Copyright 2022 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from collections import defaultdict
import onnx
import torch
from torch_blade import pass_manager, tools
from torch_blade.config import Config
from torch_blade.logging import logger
from torch_blade.tools import onnx_lower_guard
class OnnxBackendChecker:
def __init__(self, subgraph, onnx_backend_test_func, backend_name):
self._graph = subgraph
self._onnx_backend_test_func = onnx_backend_test_func
self._backend_name = backend_name
def _check_concrete_shape(self, graph):
def is_tensor(val):
return isinstance(val.type(), torch._C.TensorType)
all_concrete_inputs = all(
tools.is_concrete_shape_tensor_type(inp) for inp in graph.inputs()
)
all_tensor_outputs = all(
is_tensor(out) for node in graph.nodes() for out in node.outputs()
)
return all_concrete_inputs and all_tensor_outputs
def __call__(self):
try:
graph = self._graph
# use mannual rules to filter the graph
if not onnx_lower_guard.check_graph_with_rules(graph):
return False
graph, _ = pass_manager._jit_pass_lower_to_onnx(graph)
if not self._check_concrete_shape(graph):
return False
proto = pass_manager._export_onnx(graph, {})
onnx_model = onnx.load_from_string(proto)
# pylint: disable=maybe-no-member
if len(onnx_model.graph.node) == 0:
node_kinds = [n.kind() for n in self._graph.nodes()]
logger.warning(f"The subgraph exported from {str(node_kinds)} is empty")
# TODO: Currently we believe that empty onnx export
# means the corresponding TorchScript graph has no
# meanings in a typical inference backend
# (e.g. contiguous, dropout, detach). If we see
# counterexamples in the future, we will switch the
# default to execute such graphs with fallback and use
# a whitelist to let ops like dropout, contiguous and
# detach through.
return True
supported = self._onnx_backend_test_func(proto)
if not supported:
node_kinds = [n.kind() for n in graph.nodes()]
logger.warning(
f"{str(node_kinds)} export to onnx success, but is not supported by the backend: {self._backend_name}"
)
return supported
except Exception as error:
logger.debug(error)
return False
class OnnxBackendTestBed:
"""
Try to test whether it can be converted to the backend for each node in the original graph.
"""
def __init__(
self, graph, ignore_device, onnx_backend_test_func, backend_name, q_info=None
):
# the original graph must not be modified
self._orig_graph = graph
self._onnx_backend_test_func = onnx_backend_test_func
self._backend_name = backend_name
self._ignore_device = ignore_device
self._unsupported_node_from_orig_graph = set()
self._current_segment = torch._C.Graph()
self._current_segment_size = 0
# TODO: Add some comments
self._max_segment_size = 1
self._segment_list = []
# store whether a view-kind node should be considered as an inplace node.
self._seen_view_kinds_node = dict()
# was used to create clone node from original graph -> segment
self._orig2segment_value_map = dict()
cfg = Config.get_current_context_or_new()
self._black_list = set(
[
"prim::If",
"prim::Loop", # add here currently not supported
"prim::TupleConstruct",
"prim::DictConstruct", # onnx not support
"prim::CallMethod", # onnx & hie not support
"aten::empty_like", # hie not supported
"aten::sort", # raise exception when converting to TopK
]
+ [op for op in cfg.customize_op_black_list if "@" not in op]
)
node_kind_list = defaultdict(list)
for node in graph.node_list():
node_kind_list[node.kind()].append(node)
customize_node_black_list = [
op for op in cfg.customize_op_black_list if "@" in op
]
self._black_node_list = []
for node_name in customize_node_black_list:
try:
op_kind, node_idx = node_name.split("@")
node_idx = int(node_idx)
node = node_kind_list[op_kind][node_idx]
self._black_node_list.append(node)
except Exception as error:
logger.warning(error)
self._shape_white_list = []
if cfg.enable_onnx_shape_white_list:
# Because many shape operations on scalar, which would be placed on CPU device,
# so the _ignore_device is also enable if enable_onnx_shape_white_list
self._ignore_device = True
# TODO(gty): try to find shape computation subgraph automatic
self._shape_white_list = [
"aten::view",
"aten::size",
"aten::reshape",
"aten::mul",
"aten::floor_divide",
"aten::floordiv",
"aten::Int",
"prim::NumToTensor",
]
logger.warning(
"Enable _ignore_device because of enable_onnx_shape_white_list"
)
self._white_list = set(self._shape_white_list + cfg.customize_op_white_list)
self._view_kinds = set(
[
"aten::select",
"aten::view",
"aten::slice",
"aten::expand",
"aten::expand_as",
]
)
self._fp16_excluded_list = q_info.fp16_excluded_list if q_info else []
def _is_inplace_kinds(self, node):
"""
Check whether a view-kind node should be considered as an inplace node.
let A defines a node whose kind is defined in self._view_kinds, we have:
A -> A -> aten::add | A should not be considered as a inplace node
A -> A -> aten::add_ | A should be considered as a inplace node
Given a view-kind node, we 'build' a tree with the following steps:
1. mark its inputs as the root_values
2. iterate through all child nodes who are the consumers of the root_values
3. For each child node:
a. If it is not of view-kind or its outputs have no consumers, add it to the tree
as a leaf.
b. If it is of view-kind, add it to the tree as a tree node and mark its outputs
as the root_values, then go back to step 2.
If there is no inplace node in this tree, then all the view-kind nodes in this tree
should not be considered as inplace nodes, otherwise they should.
"""
if node.kind() not in self._view_kinds:
return False
if node in self._seen_view_kinds_node:
return self._seen_view_kinds_node[node]
def _check_tensor(val):
is_tensor = val.type().isSubtypeOf(torch._C.TensorType.get())
is_non_const = val.node().kind() != "prim::Constant"
return is_tensor and is_non_const
def _check_all_user(val):
for u in val.uses():
new_node = u.user
if new_node.kind() in self._view_kinds:
seen_node.append(new_node)
for oup in new_node.output_list():
if _check_tensor(oup):
_check_all_user(oup)
else:
if new_node.kind().endswith("_"):
nonlocal result
result = True
result = False
seen_node = []
# start from the input tensor to build the tree
for inp in node.input_list():
if _check_tensor(inp):
_check_all_user(inp)
# one node may be added twice (e.g. aten::expand_as)
seen_node = set(seen_node)
for n in seen_node:
assert n not in self._seen_view_kinds_node
self._seen_view_kinds_node[n] = result
return result
def _is_inplace(self, node):
return node.kind().endswith("_") or self._is_inplace_kinds(node)
@functools.lru_cache(maxsize=None)
def _is_inplace_safe(self, node):
"""This method is stateful, it cache safe inplace ops flags"""
# In many models inplace ops formed up a pipeline on the
# features map. We would like to find out the simplest case,
# that an inplace op is the last consumer of its input values.
# In this case, we could replace the inplace ops with it's
# corresponding out-of-place version safely.
#
# See the following graph:
# o ---. .-> w_
# \ /
# o --> o_ --> o_ --> x_ --> y_ ------> v_ --> z_
# t t t t f f f | inplace_safe?
#
# Some inplace ops that can be replace with it's correspoding out-of-place version:
# o ---. .-> w_
# \ /
# o --> o --> o --> x ---> y -------> v_ --> z_
# t t t t f f f | inplace_safe?
#
# We cache these inplace ops that is safe during the OnnxBackendTestBed is building,
# so that it is able to escape the verification of black list
if node.kind() == "prim::Param":
# We should never change inplace modifications on graph inputs,
# because this side-effect may be used outside the graph scope.
return False
if not self._is_inplace(node):
return True
non_const_inputs = set(
inp for inp in node.inputs() if inp.node().kind() != "prim::Constant"
)
def is_last_user(val):
for u in val.uses():
if u.user is node:
continue
if u.user.isAfter(node):
return False
return True
all_is_last_user = all(is_last_user(inp) for inp in non_const_inputs)
all_inplace_inps_safe = all(
self._is_inplace_safe(inp.node()) for inp in non_const_inputs
)
is_safe = all_is_last_user and all_inplace_inps_safe
return is_safe
def _hit_black_list(self, node):
is_inplace_safe = self._is_inplace_safe(node)
is_hit = (node.kind() in self._black_list) or not is_inplace_safe
is_hit = (node in self._black_node_list) or is_hit
return is_hit
def get_unsupported(self):
return self._unsupported_node_from_orig_graph
def _clear(self):
self._segment_list.append(self._current_segment)
self._current_segment = torch._C.Graph()
self._current_segment_size = 0
self._orig2segment_value_map.clear()
def _add_graph_input_if_need(self, old_node):
# all prim::Constant inputs of node is fused into subgraph if need
for inp in old_node.inputs():
if inp in self._orig2segment_value_map:
continue
inp_node_kind = inp.node().kind()
is_const = inp_node_kind == "prim::Constant"
is_listconstruct = inp_node_kind == "prim::ListConstruct"
if is_const or is_listconstruct:
# prim::Constant, prim::ListConstruct
self._appendNode(inp.node())
continue
inp_ = self._current_segment.addInput()
self._orig2segment_value_map[inp] = inp_
inp_.copyMetadata(inp)
def _add_unsupported(self, node):
logger.debug("Found unsupported: %s" % (node.kind()))
self._unsupported_node_from_orig_graph.add(node)
self._clear()
def appendNode(self, node):
if self._max_segment_size <= self._current_segment_size:
# to save conversion time, create a new segment & store the old one
self._clear()
if self._fp16_excluded_list:
for oup in node.output_list():
if oup.debugName() in self._fp16_excluded_list:
self._add_unsupported(node)
return False
| |
import queue
import subprocess
from time import sleep
from enum import Enum
from classes.MediaLibrary import MediaLibrary
from classes.MediaPlayerInfo import MediaPlayerInfo, CurrentTrackInfo, TrackInfo
import json
import musicbrainzngs as m
import libdiscid
class MediaPlayer:
"""
Contains logic for controlling mpv and getting information about CD.
"""
class DiskType(Enum):
AUDIO_CD = 'audio_cd'
MP3_CD = 'mp3_cd'
class BranchType(Enum):
FOLDERS = 'folders'
ARTISTS = 'artists'
ALBUMS = 'albums'
def __init__(self, config):
self._config = config
self.MPV_COMMAND = ["mpv", "--quiet", "--vo=null",
"--no-audio-display",
"--cache=1024", "--loop",
"--input-ipc-server=" + self._config['MPV_SOCKET_PATH']]
self._cd = CD()
self._mpv = None
self._current_disk_type = None
self._media_library = None
self._current_track_list = None
self._current_media_library_branch_type_index = None
self._info_events = None
self._current_track = 0
self._volume = 95
def get_current_info(self, status=True, cur_track_info=True, volume=True, track_list=False, library=False):
info = MediaPlayerInfo()
if self.is_running:
if status:
status_res = self._run_command('get_property', 'pause')
info.status = 'paused' if status_res else 'playing'
if cur_track_info:
info.cur_track_info = CurrentTrackInfo()
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
chapter_res = self._run_command('get_property', 'chapter')
self._current_track = chapter_res
info.cur_track_info.track_number = chapter_res
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
playlist_pos_res = self._run_command('get_property', 'playlist-pos')
self._current_track = playlist_pos_res
info.cur_track_info.track_number = playlist_pos_res
if self._current_track is not None:
time_res = self._run_command('get_property', 'time-pos')
if time_res is not None:
time_millis = time_res * 1000
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
for track in self._current_track_list[0:self._current_track]:
time_millis -= track.total_time
info.cur_track_info.cur_time = time_millis
if volume:
vol = self._run_command('get_property', 'volume')
if vol is not None:
self._volume = vol
info.volume = vol
if track_list and self._current_track_list is not None:
info.track_list = list(map(lambda x: x.as_dict(), self._current_track_list))
if library and self._media_library is not None:
info.library = self._media_library
else:
info.volume = self._volume
info.status = 'waitingForCD'
return info
def poll_info(self):
try:
info_event = self._info_events.get_nowait()
return info_event
except queue.Empty:
return None
def try_play_cd(self):
"""
Tries to play CD in CD drive, if there is any (or USB drive).
Sets the current media library branch type and index attribute and puts info into the info queue.
:return: None
"""
self._info_events = queue.Queue()
if not self.is_running:
cd_type = self._check_for_cd()
if cd_type is None:
return
if cd_type == MediaPlayer.DiskType.AUDIO_CD:
# check for audio CD
print('playing audio CD')
self._mpv = subprocess.Popen(self.MPV_COMMAND + [
'cdda://', '--volume=' + self._config['DEFAULT_VOLUME']
], bufsize=1)
elif cd_type == MediaPlayer.DiskType.MP3_CD:
# check for MP3 CD
print('playing MP3 CD')
self._mpv = subprocess.Popen(self.MPV_COMMAND + ['--volume=' + self._config['DEFAULT_VOLUME']] +
list(map(lambda file: file.full_path,
self._media_library.media_folders[0].media_files)),
bufsize=1)
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.FOLDERS, 0)
info = self.get_current_info(True, True, True, True, True)
# info = self.get_current_info(True, False, True, True, True)
# fill cur_track_info with zeros, because it may not be initialized yet (mpv loading)
info.cur_track_info = CurrentTrackInfo()
info.cur_track_info.cur_time = 0
info.cur_track_info.track_number = 0
self._info_events.put(info)
def _check_for_cd(self):
self._current_disk_type = None
self._current_track_list = []
self._cd.load_cd_info()
df = []
if CD.is_cd_inserted():
if self._cd.numtracks > 1:
# CD that isn't audio CD has 1 track
self._current_disk_type = MediaPlayer.DiskType.AUDIO_CD
try:
artist = self._cd._cd_info['disc']['release-list'][0]['artist-credit-phrase']
album = self._cd._cd_info['disc']['release-list'][0]['title']
self._current_track_list = list(map(
lambda x, y: TrackInfo(y, artist, album, x['recording']['title']),
self._cd._cd_info['disc']['release-list'][0]['medium-list'][0]['track-list'],
self._cd.track_lengths))
except:
self._current_track_list = list(map(lambda x: TrackInfo(x), self._cd.track_lengths))
else:
df = subprocess.getoutput('df | grep ' + self._config['CD_DEVICE']).split()
else:
df = subprocess.getoutput('df | grep ' + self._config['USB_DEVICE']).split()
if len(df) > 0:
mount_point = ' '.join(df[5:])
self._media_library = MediaLibrary()
self._media_library.init(mount_point)
if self._media_library.media_file_count > 0:
self._current_disk_type = MediaPlayer.DiskType.MP3_CD
self._current_track_list = list(map(
lambda media_info: TrackInfo(media_info.total_time, media_info.artist, media_info.album,
media_info.title),
self._media_library.media_folders[0].media_files))
# print(self._media_library.as_dict())
return self._current_disk_type
def _run_command(self, *command):
command_dict = {
"command": command
}
command_json = json.dumps(command_dict) + '\n'
socat = subprocess.Popen(['socat', '-', self._config['MPV_SOCKET_PATH']], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
socat_output = socat.communicate(command_json.encode('utf-8'))
if socat_output[0] is not None and \
len(socat_output[0]) != 0 and \
socat_output[1] is None:
try:
data = json.loads(socat_output[0].decode())
return data['data']
except:
return None
def _put_info_with_delay(self, full=False):
if full:
sleep(0.2)
self._info_events.put(self.get_current_info(True, True, True, True, True))
sleep(1)
self._info_events.put(self.get_current_info(True, True, True, True, True))
else:
sleep(0.2)
self._info_events.put(self.get_current_info())
sleep(1)
self._info_events.put(self.get_current_info())
def next_track(self):
last_track = len(self._current_track_list) - 1
if self._current_track != last_track:
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('add', 'chapter', '1')
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('add', 'playlist-pos', '1')
else:
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('set', 'chapter', '0')
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('set', 'playlist-pos', '0')
self._put_info_with_delay()
def prev_track(self):
if self._current_track != 0:
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('add', 'chapter', '-1')
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('add', 'playlist-pos', '-1')
else:
last_track = len(self._current_track_list) - 1
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('set', 'chapter', str(last_track))
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('set', 'playlist-pos', str(last_track))
self._put_info_with_delay()
def next_branch(self):
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self.next_track()
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
type_index = self._current_media_library_branch_type_index
folder_index = None
artist_index = None
album_index = None
if type_index[0] == MediaPlayer.BranchType.FOLDERS:
folder_index = (type_index[1] + 1) % len(self._media_library.media_folders)
elif type_index[0] == MediaPlayer.BranchType.ALBUMS:
artist_index = (type_index[1] + 1) % len(self._media_library.artists)
album_index = type_index[2] + 1
if album_index >= len(self._media_library.artists[artist_index].albums):
album_index = 0
elif type_index[0] == MediaPlayer.BranchType.ARTISTS:
artist_index = (type_index[1] + 1) % len(self._media_library.artists)
self.play_file(type_index[0], (folder_index, artist_index, album_index, 0))
def prev_branch(self):
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self.prev_track()
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
type_index = self._current_media_library_branch_type_index
folder_index = None
artist_index = None
album_index = None
if type_index[0] == MediaPlayer.BranchType.FOLDERS:
folder_index = type_index[1] - 1
folder_index = folder_index if folder_index != -1 else len(self._media_library.media_folders) - 1
elif type_index[0] == MediaPlayer.BranchType.ALBUMS:
album_index = type_index[2] - 1
if album_index == -1:
artist_index = type_index[1] - 1
artist_index = artist_index if artist_index != -1 else len(self._media_library.artist) - 1
album_index = len(self._media_library.artists[artist_index].albums) - 1
elif type_index[0] == MediaPlayer.BranchType.ARTISTS:
artist_index = type_index[1] - 1
artist_index = artist_index if artist_index != -1 else len(self._media_library.artists) - 1
self.play_file(type_index[0], (folder_index, artist_index, album_index, 0))
def play_track(self, track_number):
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('set', 'chapter', str(track_number))
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('set', 'playlist-pos', str(track_number))
self._put_info_with_delay()
def play_file(self, media_library_type, indexes):
# indexes = (folder_index, artist_index, album_index, file_index)
if self._current_disk_type == MediaPlayer.DiskType.MP3_CD and \
media_library_type is not None and \
indexes is not None:
files = None
if media_library_type == MediaPlayer.BranchType.FOLDERS:
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.FOLDERS,
indexes[0])
files = self._media_library.media_folders[indexes[0]].media_files
elif media_library_type == MediaPlayer.BranchType.ALBUMS:
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.ALBUMS,
indexes[1],
indexes[2])
files = self._media_library.artists[indexes[1]].albums[indexes[2]].songs
elif media_library_type == MediaPlayer.BranchType.ARTISTS:
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.ARTISTS,
indexes[1])
files = self._media_library.artists[indexes[1]].songs
file_index = indexes[3]
if files is not None:
ordered_files = files[file_index:] + files[0:file_index]
self._current_track_list = list(map(
lambda media_info: TrackInfo(media_info.total_time, media_info.artist, media_info.album,
media_info.title),
ordered_files))
self._run_command('playlist-clear')
self._run_command('loadfile', files[file_index].full_path)
for file in ordered_files[1:]:
self._run_command('loadfile', file.full_path, 'append')
self._put_info_with_delay(True)
def volume_up(self):
self._volume = (self._volume + 5) % 101
self._run_command('set', 'volume', str(self._volume))
self._info_events.put(self.get_current_info(False, False, True, False, False))
def volume_down(self):
volume = self._volume - 5
volume = volume if volume >= 0 else 0
self._volume = volume
self._run_command('set', 'volume', str(self._volume))
self._info_events.put(self.get_current_info(False, False, True, False, False))
def play_pause(self):
pause = self._run_command('get_property', 'pause')
if pause:
self._run_command('set', 'pause', 'no')
else:
self._run_command('set', 'pause', 'yes')
self._info_events.put(self.get_current_info())
def stop(self):
try:
self._mpv.kill()
except:
print("Nothing is playing.")
subprocess.call(['umount', '/dev/' + self._config['USB_DEVICE']])
self._current_disk_type = None
self._current_track = 0
self._current_track_list = None
self._current_media_library_branch_type_index = None
self._media_library = None
self.eject()
def eject(self):
subprocess.Popen(['eject', self._config['CD_DEVICE']])
def seek(self, seek_percent):
time_millis = self._current_track_list[self._current_track].total_time * seek_percent / 100
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
for track in self._current_track_list[:self._current_track]:
time_millis += track.total_time
self._run_command('set', 'time-pos', str(time_millis / 1000))
# (time_millis / 1000) * (seek_percent / 100)
self._put_info_with_delay()
@property
def is_running(self):
return self._mpv is not None and self._mpv.poll() is None
@property
def current_track_list(self):
return self._current_track_list
class CD:
"""
Represents CD drive and disc inside.
"""
def __init__(self):
self._numtracks = 0
self._track_lengths = []
self._cd_info = None
def load_cd_info(self):
# JH - added code to query musicbrainz for disk info, build track list and times from that info
# instead of the cd-discid output, if available.
track_offsets = []
m.set_useragent('raspberry-pi-cdplayer', '0.2', 'https://github.com/JoeHartley3/raspberry-pi-cdplayer')
try:
this_disc = libdiscid.read('/dev/cdrom')
except:
print('DiskID could not read /dev/cdrom')
self._numtracks = 0
self._track_lengths = []
self._cd_info = None
return
try:
# A CD stub is an anonymously submitted track list that contains a disc ID, barcode, comment field, and
# basic metadata like a release title and track names. ( https://wiki.musicbrainz.org/CD_Stub )
# By using cdstubs=False here, we force a ResponseError rather than try and parse the stub. Remove the
# argument to enable cdstubs.
self._cd_info = m.get_releases_by_discid(this_disc.id, includes=["recordings", "artists"], cdstubs=False)
except m.ResponseError:
print("Disk not found or database unavailable")
discid = subprocess.getstatusoutput('cd-discid --musicbrainz')
if discid[0] == 0:
output_split = discid[1].split()
self._numtracks = int(output_split[0])
track_offsets = list(map(lambda i: int(i), output_split[1:]))
if self._cd_info is not None:
if self._cd_info.get("disc"):
self._numtracks = self._cd_info['disc']['offset-count']
track_offsets = self._cd_info['disc']['offset-list']
# Append the total time to the track_offsets
track_offsets.append(int(self._cd_info['disc']['sectors']))
elif self._cd_info.get("cdstub"):
pass
else:
# We should | |
<reponame>maparent/leo-editor
#@+leo-ver=5-thin
#@+node:ekr.20031218072017.3320: * @file leoNodes.py
'''Leo's fundamental data classes.'''
use_zodb = False
#@+<< imports >>
#@+node:ekr.20060904165452.1: ** << imports >> (leoNodes)
import leo.core.leoGlobals as g
import leo.core.signal_manager as sig
# if g.app and g.app.use_psyco:
# # g.pr("enabled psyco classes",__file__)
# try: from psyco.classes import *
# except ImportError: pass
import copy
import time
import re
import itertools
if use_zodb:
# It may be important to import ZODB first.
try:
import ZODB
import ZODB.FileStorage
except ImportError:
ZODB = None
else:
ZODB = None
#@-<< imports >>
#@+others
#@+node:ekr.20031218072017.1991: ** class NodeIndices
class NodeIndices(object):
'''A class managing global node indices (gnx's).'''
#@+others
#@+node:ekr.20031218072017.1992: *3* ni.__init__
def __init__(self, id_):
'''Ctor for NodeIndices class.'''
self.defaultId = id_
self.lastIndex = 0
self.stack = []
# A stack of open commanders.
self.timeString = ''
# Set by setTimeStamp.
self.userId = id_
# Assign the initial timestamp.
self.setTimeStamp()
#@+node:ekr.20150321161305.8: *3* ni.check_gnx
def check_gnx(self, c, gnx, v):
'''Check that no vnode exists with the given gnx in fc.gnxDict.'''
fc = c.fileCommands
if fc is None:
g.internalError('getNewIndex: fc is None! c:' % c)
else:
v2 = fc.gnxDict.get(gnx)
if v2 and v2 != v:
g.internalError(
'getNewIndex: gnx clash %s: v: %s v2: %s' % (gnx, v, v2))
#@+node:ekr.20150302061758.14: *3* ni.compute_last_index
def compute_last_index(self, c):
'''Scan the entire leo outline to compute ni.last_index.'''
ni = self
# Partial, experimental, fix for #658.
# Do not change self.lastIndex here!
# self.lastIndex = 0
for v in c.all_unique_nodes():
gnx = v.fileIndex
if gnx:
id_, t, n = self.scanGnx(gnx)
if t == ni.timeString and n is not None:
try:
n = int(n)
self.lastIndex = max(self.lastIndex, n)
except Exception:
g.es_exception()
self.lastIndex += 1
#@+node:ekr.20031218072017.1994: *3* ni.get/setDefaultId
# These are used by the FileCommands read/write code.
def getDefaultId(self):
"""Return the id to be used by default in all gnx's"""
return self.defaultId
def setDefaultId(self, theId):
"""Set the id to be used by default in all gnx's"""
self.defaultId = theId
#@+node:ekr.20031218072017.1995: *3* ni.getNewIndex
def getNewIndex(self, v, cached=False):
'''
Create a new gnx for v or an empty string if the hold flag is set.
**Important**: the method must allocate a new gnx even if v.fileIndex exists.
'''
if v is None:
g.internalError('getNewIndex: v is None')
return ''
c = v.context
fc = c.fileCommands
t_s = self.update()
# Updates self.lastTime and self.lastIndex.
gnx = g.toUnicode("%s.%s.%d" % (self.userId, t_s, self.lastIndex))
v.fileIndex = gnx
self.check_gnx(c, gnx, v)
fc.gnxDict[gnx] = v
return gnx
#@+node:ekr.20150322134954.1: *3* ni.new_vnode_helper
def new_vnode_helper(self, c, gnx, v):
'''Handle all gnx-related tasks for VNode.__init__.'''
ni = self
if gnx:
v.fileIndex = gnx
ni.check_gnx(c, gnx, v)
c.fileCommands.gnxDict[gnx] = v
else:
v.fileIndex = ni.getNewIndex(v)
#@+node:ekr.20031218072017.1997: *3* ni.scanGnx
def scanGnx(self, s, i=0):
"""Create a gnx from its string representation."""
if not g.isString(s):
g.error("scanGnx: unexpected index type:", type(s), '', s)
return None, None, None
s = s.strip()
theId, t, n = None, None, None
i, theId = g.skip_to_char(s, i, '.')
if g.match(s, i, '.'):
i, t = g.skip_to_char(s, i + 1, '.')
if g.match(s, i, '.'):
i, n = g.skip_to_char(s, i + 1, '.')
# Use self.defaultId for missing id entries.
if not theId:
theId = self.defaultId
return theId, t, n
#@+node:ekr.20031218072017.1998: *3* ni.setTimeStamp
def setTimestamp(self):
"""Set the timestamp string to be used by getNewIndex until further notice"""
self.timeString = time.strftime(
"%Y%m%d%H%M%S", # Help comparisons; avoid y2k problems.
time.localtime())
setTimeStamp = setTimestamp
#@+node:ekr.20141015035853.18304: *3* ni.tupleToString
def tupleToString(self, aTuple):
'''
Convert a gnx tuple returned by scanGnx
to its string representation.
'''
theId, t, n = aTuple
# This logic must match the existing logic so that
# previously written gnx's can be found.
if n in (None, 0, '',):
s = "%s.%s" % (theId, t)
else:
s = "%s.%s.%s" % (theId, t, n)
return g.toUnicode(s)
#@+node:ekr.20150321161305.13: *3* ni.update
def update(self):
'''Update self.timeString and self.lastIndex'''
t_s = time.strftime("%Y%m%d%H%M%S", time.localtime())
if self.timeString == t_s:
self.lastIndex += 1
else:
self.lastIndex = 1
self.timeString = t_s
return t_s
#@+node:ekr.20141023110422.4: *3* ni.updateLastIndex
def updateLastIndex(self, gnx):
'''Update ni.lastIndex if the gnx affects it.'''
id_, t, n = self.scanGnx(gnx)
if not id_ or (n is not 0 and not n):
return # the gnx is not well formed or n in ('',None)
if id_ == self.userId and t == self.timeString:
try:
n = int(n)
if n > self.lastIndex:
self.lastIndex = n
g.trace(gnx, '-->', n)
except Exception:
g.trace('can not happen', repr(n))
#@-others
#@+node:ekr.20031218072017.889: ** class Position
#@+<< about the position class >>
#@+node:ekr.20031218072017.890: *3* << about the position class >>
#@@killcolor
#@+at
#
# A position marks the spot in a tree traversal. A position p consists of a VNode
# p.v, a child index p._childIndex, and a stack of tuples (v,childIndex), one for
# each ancestor **at the spot in tree traversal. Positions p has a unique set of
# parents.
#
# The p.moveToX methods may return a null (invalid) position p with p.v = None.
#
# The tests "if p" or "if not p" are the _only_ correct way to test whether a
# position p is valid. In particular, tests like "if p is None" or "if p is not
# None" will not work properly.
#@-<< about the position class >>
# Positions should *never* be saved by the ZOBD.
class Position(object):
#@+others
#@+node:ekr.20040228094013: *3* p.ctor & other special methods...
#@+node:ekr.20080416161551.190: *4* p.__init__
def __init__(self, v, childIndex=0, stack=None):
'''Create a new position with the given childIndex and parent stack.'''
# To support ZODB the code must set v._p_changed = 1
# whenever any mutable VNode object changes.
self._childIndex = childIndex
self.v = v
# New in Leo 4.5: stack entries are tuples (v,childIndex).
if stack:
self.stack = stack[:] # Creating a copy here is safest and best.
else:
self.stack = []
g.app.positions += 1
# self.txtOffset = None # see self.textOffset()
#@+node:ekr.20080920052058.3: *4* p.__eq__ & __ne__
def __eq__(self, p2):
"""Return True if two positions are equivalent."""
p1 = self
# Don't use g.trace: it might call p.__eq__ or p.__ne__.
if not isinstance(p2, Position):
return False
if p2 is None or p2.v is None:
return p1.v is None
elif isinstance(p2, self.__class__):
return (p1.v == p2.v and
p1._childIndex == p2._childIndex and
p1.stack == p2.stack)
else:
# Do this only after testing for None.
return NotImplemented
def __ne__(self, p2):
"""Return True if two postions are not equivalent."""
return not self.__eq__(p2) # For possible use in Python 2.x.
#@+node:ekr.20091210082012.6230: *4* p.__ge__ & __le__& __lt__
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __lt__(self, other):
return not self.__eq__(other) and not self.__gt__(other)
#@+node:ekr.20091210082012.6233: *4* p.__gt__
def __gt__(self, other):
'''Return True if self appears after other in outline order.'''
stack1, stack2 = self.stack, other.stack
n1, n2 = len(stack1), len(stack2); n = min(n1, n2)
# Compare the common part of the stacks.
for item1, item2 in zip(stack1, stack2):
v1, x1 = item1; v2, x2 = item2
if x1 > x2: return True
elif x1 < x2: return False
# Finish the comparison.
if n1 == n2:
x1, x2 = self._childIndex, other._childIndex
return x1 > x2
elif n1 < n2:
x1 = self._childIndex; v2, x2 = other.stack[n]
return x1 > x2
else: # n1 > n2
# 2011/07/28: Bug fix suggested by SegundoBob.
x1 = other._childIndex; v2, x2 = self.stack[n]
return x2 >= x1
#@+node:ekr.20040117170612: *4* p.__getattr__ (no longer used)
# No longer used. All code must now be aware of the one-node world.
# def __getattr__ (self,attr):
# """Convert references to p.t into references to p.v."""
# if attr=="t":
# return self.v
# else:
# # New in 4.3: _silently_ raise the attribute error.
# # This allows plugin code to use hasattr(p,attr) !
# if 0:
# print("unknown position attribute: %s" % attr)
# import traceback ; traceback.print_stack()
# raise AttributeError(attr)
#@+node:ekr.20040117173448: *4* p.__nonzero__ & __bool__
#@+at
# Tests such as 'if p' or 'if not p' are the _only_ correct ways to test
# whether a position p is valid. In particular, tests like 'if p is
# None' or 'if p is not None' will not work properly.
#@@c
if g.isPython3:
def __bool__(self):
"""Return True if a position | |
host. For other
cases, this argument has no effect.
""")
add_docstr_all('cos',
r"""
cos() -> Tensor
See :func:`torch.cos`
""")
add_docstr_all('cos_',
r"""
cos_() -> Tensor
In-place version of :meth:`~Tensor.cos`
""")
add_docstr_all('cosh',
r"""
cosh() -> Tensor
See :func:`torch.cosh`
""")
add_docstr_all('cosh_',
r"""
cosh_() -> Tensor
In-place version of :meth:`~Tensor.cosh`
""")
add_docstr_all('cpu',
r"""
cpu() -> Tensor
Returns a copy of this object in CPU memory.
If this object is already in CPU memory and on the correct device,
then no copy is performed and the original object is returned.
""")
add_docstr_all('cross',
r"""
cross(other, dim=-1) -> Tensor
See :func:`torch.cross`
""")
add_docstr_all('cuda',
r"""
cuda(device=None, non_blocking=False) -> Tensor
Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination GPU device.
Defaults to the current CUDA device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
""")
add_docstr_all('cumprod',
r"""
cumprod(dim, dtype=None) -> Tensor
See :func:`torch.cumprod`
""")
add_docstr_all('cumsum',
r"""
cumsum(dim, dtype=None) -> Tensor
See :func:`torch.cumsum`
""")
add_docstr_all('data_ptr',
r"""
data_ptr() -> int
Returns the address of the first element of :attr:`self` tensor.
""")
add_docstr_all('dense_dim',
r"""
dense_dim() -> int
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a the number of dense dimensions. Otherwise, this throws an
error.
See also :meth:`Tensor.sparse_dim`.
""")
add_docstr_all('diag',
r"""
diag(diagonal=0) -> Tensor
See :func:`torch.diag`
""")
add_docstr_all('diag_embed',
r"""
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
See :func:`torch.diag_embed`
""")
add_docstr_all('diagflat',
r"""
diagflat(diagonal=0) -> Tensor
See :func:`torch.diagflat`
""")
add_docstr_all('diagonal',
r"""
diagonal(offset=0, dim1=0, dim2=1) -> Tensor
See :func:`torch.diagonal`
""")
add_docstr_all('digamma',
r"""
digamma() -> Tensor
See :func:`torch.digamma`
""")
add_docstr_all('digamma_',
r"""
digamma_() -> Tensor
In-place version of :meth:`~Tensor.digamma`
""")
add_docstr_all('dim',
r"""
dim() -> int
Returns the number of dimensions of :attr:`self` tensor.
""")
add_docstr_all('dist',
r"""
dist(other, p=2) -> Tensor
See :func:`torch.dist`
""")
add_docstr_all('div',
r"""
div(value) -> Tensor
See :func:`torch.div`
""")
add_docstr_all('div_',
r"""
div_(value) -> Tensor
In-place version of :meth:`~Tensor.div`
""")
add_docstr_all('dot',
r"""
dot(tensor2) -> Tensor
See :func:`torch.dot`
""")
add_docstr_all('eig',
r"""
eig(eigenvectors=False) -> (Tensor, Tensor)
See :func:`torch.eig`
""")
add_docstr_all('element_size',
r"""
element_size() -> int
Returns the size in bytes of an individual element.
Example::
>>> torch.tensor([]).element_size()
4
>>> torch.tensor([], dtype=torch.uint8).element_size()
1
""")
add_docstr_all('eq',
r"""
eq(other) -> Tensor
See :func:`torch.eq`
""")
add_docstr_all('eq_',
r"""
eq_(other) -> Tensor
In-place version of :meth:`~Tensor.eq`
""")
add_docstr_all('equal',
r"""
equal(other) -> bool
See :func:`torch.equal`
""")
add_docstr_all('erf',
r"""
erf() -> Tensor
See :func:`torch.erf`
""")
add_docstr_all('erf_',
r"""
erf_() -> Tensor
In-place version of :meth:`~Tensor.erf`
""")
add_docstr_all('erfc',
r"""
erfc() -> Tensor
See :func:`torch.erfc`
""")
add_docstr_all('erfc_',
r"""
erfc_() -> Tensor
In-place version of :meth:`~Tensor.erfc`
""")
add_docstr_all('erfinv',
r"""
erfinv() -> Tensor
See :func:`torch.erfinv`
""")
add_docstr_all('erfinv_',
r"""
erfinv_() -> Tensor
In-place version of :meth:`~Tensor.erfinv`
""")
add_docstr_all('exp',
r"""
exp() -> Tensor
See :func:`torch.exp`
""")
add_docstr_all('exp_',
r"""
exp_() -> Tensor
In-place version of :meth:`~Tensor.exp`
""")
add_docstr_all('expm1',
r"""
expm1() -> Tensor
See :func:`torch.expm1`
""")
add_docstr_all('expm1_',
r"""
expm1_() -> Tensor
In-place version of :meth:`~Tensor.expm1`
""")
add_docstr_all('exponential_',
r"""
exponential_(lambd=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the exponential distribution:
.. math::
f(x) = \lambda e^{-\lambda x}
""")
add_docstr_all('fill_',
r"""
fill_(value) -> Tensor
Fills :attr:`self` tensor with the specified value.
""")
add_docstr_all('floor',
r"""
floor() -> Tensor
See :func:`torch.floor`
""")
add_docstr_all('flip',
r"""
flip(dims) -> Tensor
See :func:`torch.flip`
""")
add_docstr_all('roll',
r"""
roll(shifts, dims) -> Tensor
See :func:`torch.roll`
""")
add_docstr_all('floor_',
r"""
floor_() -> Tensor
In-place version of :meth:`~Tensor.floor`
""")
add_docstr_all('fmod',
r"""
fmod(divisor) -> Tensor
See :func:`torch.fmod`
""")
add_docstr_all('fmod_',
r"""
fmod_(divisor) -> Tensor
In-place version of :meth:`~Tensor.fmod`
""")
add_docstr_all('frac',
r"""
frac() -> Tensor
See :func:`torch.frac`
""")
add_docstr_all('frac_',
r"""
frac_() -> Tensor
In-place version of :meth:`~Tensor.frac`
""")
add_docstr_all('flatten',
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
see :func:`torch.flatten`
""")
add_docstr_all('gather',
r"""
gather(dim, index) -> Tensor
See :func:`torch.gather`
""")
add_docstr_all('ge',
r"""
ge(other) -> Tensor
See :func:`torch.ge`
""")
add_docstr_all('ge_',
r"""
ge_(other) -> Tensor
In-place version of :meth:`~Tensor.ge`
""")
add_docstr_all('gels',
r"""
gels(A) -> Tensor
See :func:`torch.gels`
""")
add_docstr_all('geometric_',
r"""
geometric_(p, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the geometric distribution:
.. math::
f(X=k) = (1 - p)^{k - 1} p
""")
add_docstr_all('geqrf',
r"""
geqrf() -> (Tensor, Tensor)
See :func:`torch.geqrf`
""")
add_docstr_all('ger',
r"""
ger(vec2) -> Tensor
See :func:`torch.ger`
""")
add_docstr_all('gesv',
r"""
gesv(A) -> Tensor, Tensor
See :func:`torch.gesv`
""")
add_docstr_all('indices',
r"""
indices() -> Tensor
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a view of the contained indices tensor. Otherwise, this throws an
error.
See also :meth:`Tensor.values`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('get_device',
r"""
get_device() -> Device ordinal (Integer)
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
For CPU tensors, an error is thrown.
Example::
>>> x = torch.randn(3, 4, 5, device='cuda:0')
>>> x.get_device()
0
>>> x.cpu().get_device() # RuntimeError: get_device is not implemented for type torch.FloatTensor
""")
add_docstr_all('values',
r"""
values() -> Tensor
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a view of the contained values tensor. Otherwise, this throws an
error.
See also :meth:`Tensor.indices`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('gt',
r"""
gt(other) -> Tensor
See :func:`torch.gt`
""")
add_docstr_all('gt_',
r"""
gt_(other) -> Tensor
In-place version of :meth:`~Tensor.gt`
""")
add_docstr_all('hardshrink',
r"""
hardshrink(lambd=0.5) -> Tensor
See :func:`torch.nn.functional.hardshrink`
""")
add_docstr_all('histc',
r"""
histc(bins=100, min=0, max=0) -> Tensor
See :func:`torch.histc`
""")
add_docstr_all('index_add_',
r"""
index_add_(dim, index, tensor) -> Tensor
Accumulate the elements of :attr:`tensor` into the :attr:`self` tensor by adding
to the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is added to the
``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
.. include:: cuda_deterministic.rst
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to add
Example::
>>> x = torch.ones(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_add_(0, index, t)
tensor([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]])
""")
add_docstr_all('index_copy_',
r"""
index_copy_(dim, index, tensor) -> Tensor
Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to copy
Example::
>>> x = torch.zeros(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_copy_(0, index, t)
tensor([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 7., 8., 9.],
[ 0., 0., 0.],
[ 4., 5., 6.]])
""")
add_docstr_all('index_fill_',
r"""
index_fill_(dim, index, val) -> Tensor
Fills the elements of the :attr:`self` tensor with value :attr:`val` by
selecting the indices in the order given in :attr:`index`.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`self` tensor to fill in
val (float): the value to fill with
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 2])
>>> x.index_fill_(1, index, -1)
tensor([[-1., 2., -1.],
[-1., 5., -1.],
[-1., 8., -1.]])
""")
add_docstr_all('index_put_',
r"""
index_put_(indices, value, accumulate=False) -> Tensor
Puts values from the tensor :attr:`value` into the tensor :attr:`self` using
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
expression ``tensor.index_put_(indices, value)`` is equivalent to
``tensor[indices] = value``. Returns :attr:`self`.
If :attr:`accumulate` is ``True``, the elements in :attr:`tensor` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
contain duplicate elements.
Args:
indices (tuple of LongTensor): tensors used to index into `self`.
value (Tensor): tensor of same dtype as `self`.
accumulate (bool): whether to accumulate into self
""")
add_docstr_all('index_select',
r"""
index_select(dim, index) -> Tensor
See :func:`torch.index_select`
""")
add_docstr_all('sparse_mask',
r"""
sparse_mask(input, mask) -> Tensor
Returns a new SparseTensor with values from Tensor :attr:`input` filtered
by indices of :attr:`mask` and values are ignored. :attr:`input` and :attr:`mask`
must have the same shape.
Args:
input (Tensor): an input Tensor
mask (SparseTensor): a SparseTensor which we filter :attr:`input` based on its indices
Example::
>>> nnz = 5
>>> dims = [5, 5, 2, 2]
>>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
>>> V = torch.randn(nnz, dims[2], dims[3])
>>> size = torch.Size(dims)
>>> S = torch.sparse_coo_tensor(I, V, size).coalesce()
>>> D = torch.randn(dims)
>>> D.sparse_mask(S)
tensor(indices=tensor([[0, 0, 0, 2],
[0, 1, 4, 3]]),
values=tensor([[[ 1.6550, 0.2397],
[-0.1611, -0.0779]],
[[ 0.2326, -1.0558],
[ 1.4711, 1.9678]],
[[-0.5138, -0.0411],
[ 1.9417, 0.5158]],
[[ 0.0793, 0.0036],
[-0.2569, -0.1055]]]),
size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
""")
add_docstr_all('inverse',
r"""
inverse() -> Tensor
See :func:`torch.inverse`
""")
add_docstr_all('is_contiguous',
r"""
is_contiguous() -> bool
Returns True if :attr:`self` tensor is contiguous in memory in C order.
""")
add_docstr_all('is_floating_point',
r"""
is_floating_point() -> bool
Returns True if the data type of :attr:`self` is a floating point data type.
""")
add_docstr_all('is_set_to',
r"""
is_set_to(tensor) -> bool
Returns True if this object refers to the same ``THTensor`` object from the
Torch C API as the given tensor.
""")
add_docstr_all('item', r"""
item() -> number
Returns the value of this tensor as a standard Python number. This only works
for tensors with one | |
<filename>sdk/python/pulumi_google_native/jobs/v3/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'ApplicationInfoArgs',
'CompensationEntryArgs',
'CompensationInfoArgs',
'CompensationRangeArgs',
'MoneyArgs',
'ProcessingOptionsArgs',
]
@pulumi.input_type
class ApplicationInfoArgs:
def __init__(__self__, *,
emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instruction: Optional[pulumi.Input[str]] = None,
uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Application related details of a job posting.
:param pulumi.Input[Sequence[pulumi.Input[str]]] emails: Optional but at least one of uris, emails or instruction must be specified. Use this field to specify email address(es) to which resumes or applications can be sent. The maximum number of allowed characters for each entry is 255.
:param pulumi.Input[str] instruction: Optional but at least one of uris, emails or instruction must be specified. Use this field to provide instructions, such as "Mail your application to ...", that a candidate can follow to apply for the job. This field accepts and sanitizes HTML input, and also accepts bold, italic, ordered list, and unordered list markup tags. The maximum number of allowed characters is 3,000.
:param pulumi.Input[Sequence[pulumi.Input[str]]] uris: Optional but at least one of uris, emails or instruction must be specified. Use this URI field to direct an applicant to a website, for example to link to an online application form. The maximum number of allowed characters for each entry is 2,000.
"""
if emails is not None:
pulumi.set(__self__, "emails", emails)
if instruction is not None:
pulumi.set(__self__, "instruction", instruction)
if uris is not None:
pulumi.set(__self__, "uris", uris)
@property
@pulumi.getter
def emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional but at least one of uris, emails or instruction must be specified. Use this field to specify email address(es) to which resumes or applications can be sent. The maximum number of allowed characters for each entry is 255.
"""
return pulumi.get(self, "emails")
@emails.setter
def emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "emails", value)
@property
@pulumi.getter
def instruction(self) -> Optional[pulumi.Input[str]]:
"""
Optional but at least one of uris, emails or instruction must be specified. Use this field to provide instructions, such as "Mail your application to ...", that a candidate can follow to apply for the job. This field accepts and sanitizes HTML input, and also accepts bold, italic, ordered list, and unordered list markup tags. The maximum number of allowed characters is 3,000.
"""
return pulumi.get(self, "instruction")
@instruction.setter
def instruction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instruction", value)
@property
@pulumi.getter
def uris(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional but at least one of uris, emails or instruction must be specified. Use this URI field to direct an applicant to a website, for example to link to an online application form. The maximum number of allowed characters for each entry is 2,000.
"""
return pulumi.get(self, "uris")
@uris.setter
def uris(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "uris", value)
@pulumi.input_type
class CompensationEntryArgs:
def __init__(__self__, *,
amount: Optional[pulumi.Input['MoneyArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
expected_units_per_year: Optional[pulumi.Input[float]] = None,
range: Optional[pulumi.Input['CompensationRangeArgs']] = None,
type: Optional[pulumi.Input['CompensationEntryType']] = None,
unit: Optional[pulumi.Input['CompensationEntryUnit']] = None):
"""
A compensation entry that represents one component of compensation, such as base pay, bonus, or other compensation type. Annualization: One compensation entry can be annualized if - it contains valid amount or range. - and its expected_units_per_year is set or can be derived. Its annualized range is determined as (amount or range) times expected_units_per_year.
:param pulumi.Input['MoneyArgs'] amount: Optional. Compensation amount.
:param pulumi.Input[str] description: Optional. Compensation description. For example, could indicate equity terms or provide additional context to an estimated bonus.
:param pulumi.Input[float] expected_units_per_year: Optional. Expected number of units paid each year. If not specified, when Job.employment_types is FULLTIME, a default value is inferred based on unit. Default values: - HOURLY: 2080 - DAILY: 260 - WEEKLY: 52 - MONTHLY: 12 - ANNUAL: 1
:param pulumi.Input['CompensationRangeArgs'] range: Optional. Compensation range.
:param pulumi.Input['CompensationEntryType'] type: Optional. Compensation type. Default is CompensationUnit.COMPENSATION_TYPE_UNSPECIFIED.
:param pulumi.Input['CompensationEntryUnit'] unit: Optional. Frequency of the specified amount. Default is CompensationUnit.COMPENSATION_UNIT_UNSPECIFIED.
"""
if amount is not None:
pulumi.set(__self__, "amount", amount)
if description is not None:
pulumi.set(__self__, "description", description)
if expected_units_per_year is not None:
pulumi.set(__self__, "expected_units_per_year", expected_units_per_year)
if range is not None:
pulumi.set(__self__, "range", range)
if type is not None:
pulumi.set(__self__, "type", type)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter
def amount(self) -> Optional[pulumi.Input['MoneyArgs']]:
"""
Optional. Compensation amount.
"""
return pulumi.get(self, "amount")
@amount.setter
def amount(self, value: Optional[pulumi.Input['MoneyArgs']]):
pulumi.set(self, "amount", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Compensation description. For example, could indicate equity terms or provide additional context to an estimated bonus.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="expectedUnitsPerYear")
def expected_units_per_year(self) -> Optional[pulumi.Input[float]]:
"""
Optional. Expected number of units paid each year. If not specified, when Job.employment_types is FULLTIME, a default value is inferred based on unit. Default values: - HOURLY: 2080 - DAILY: 260 - WEEKLY: 52 - MONTHLY: 12 - ANNUAL: 1
"""
return pulumi.get(self, "expected_units_per_year")
@expected_units_per_year.setter
def expected_units_per_year(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "expected_units_per_year", value)
@property
@pulumi.getter
def range(self) -> Optional[pulumi.Input['CompensationRangeArgs']]:
"""
Optional. Compensation range.
"""
return pulumi.get(self, "range")
@range.setter
def range(self, value: Optional[pulumi.Input['CompensationRangeArgs']]):
pulumi.set(self, "range", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['CompensationEntryType']]:
"""
Optional. Compensation type. Default is CompensationUnit.COMPENSATION_TYPE_UNSPECIFIED.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['CompensationEntryType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input['CompensationEntryUnit']]:
"""
Optional. Frequency of the specified amount. Default is CompensationUnit.COMPENSATION_UNIT_UNSPECIFIED.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input['CompensationEntryUnit']]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class CompensationInfoArgs:
def __init__(__self__, *,
entries: Optional[pulumi.Input[Sequence[pulumi.Input['CompensationEntryArgs']]]] = None):
"""
Job compensation details.
:param pulumi.Input[Sequence[pulumi.Input['CompensationEntryArgs']]] entries: Optional. Job compensation information. At most one entry can be of type CompensationInfo.CompensationType.BASE, which is referred as ** base compensation entry ** for the job.
"""
if entries is not None:
pulumi.set(__self__, "entries", entries)
@property
@pulumi.getter
def entries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CompensationEntryArgs']]]]:
"""
Optional. Job compensation information. At most one entry can be of type CompensationInfo.CompensationType.BASE, which is referred as ** base compensation entry ** for the job.
"""
return pulumi.get(self, "entries")
@entries.setter
def entries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CompensationEntryArgs']]]]):
pulumi.set(self, "entries", value)
@pulumi.input_type
class CompensationRangeArgs:
def __init__(__self__, *,
max_compensation: Optional[pulumi.Input['MoneyArgs']] = None,
min_compensation: Optional[pulumi.Input['MoneyArgs']] = None):
"""
Compensation range.
:param pulumi.Input['MoneyArgs'] max_compensation: Optional. The maximum amount of compensation. If left empty, the value is set to a maximal compensation value and the currency code is set to match the currency code of min_compensation.
:param pulumi.Input['MoneyArgs'] min_compensation: Optional. The minimum amount of compensation. If left empty, the value is set to zero and the currency code is set to match the currency code of max_compensation.
"""
if max_compensation is not None:
pulumi.set(__self__, "max_compensation", max_compensation)
if min_compensation is not None:
pulumi.set(__self__, "min_compensation", min_compensation)
@property
@pulumi.getter(name="maxCompensation")
def max_compensation(self) -> Optional[pulumi.Input['MoneyArgs']]:
"""
Optional. The maximum amount of compensation. If left empty, the value is set to a maximal compensation value and the currency code is set to match the currency code of min_compensation.
"""
return pulumi.get(self, "max_compensation")
@max_compensation.setter
def max_compensation(self, value: Optional[pulumi.Input['MoneyArgs']]):
pulumi.set(self, "max_compensation", value)
@property
@pulumi.getter(name="minCompensation")
def min_compensation(self) -> Optional[pulumi.Input['MoneyArgs']]:
"""
Optional. The minimum amount of compensation. If left empty, the value is set to zero and the currency code is set to match the currency code of max_compensation.
"""
return pulumi.get(self, "min_compensation")
@min_compensation.setter
def min_compensation(self, value: Optional[pulumi.Input['MoneyArgs']]):
pulumi.set(self, "min_compensation", value)
@pulumi.input_type
class MoneyArgs:
def __init__(__self__, *,
currency_code: Optional[pulumi.Input[str]] = None,
nanos: Optional[pulumi.Input[int]] = None,
units: Optional[pulumi.Input[str]] = None):
"""
Represents an amount of money with its currency type.
:param pulumi.Input[str] currency_code: The three-letter currency code defined in ISO 4217.
:param pulumi.Input[int] nanos: Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
:param pulumi.Input[str] units: The whole units of the amount. For example if `currencyCode` is | |
return self._build(sparse, aligned)
def _build_pattern(self, residues):
"""
Build and return a sparse regular rexpression for C{residues}.
"""
fragments = []
for rn, r in enumerate(residues):
res_name = self._encode(r)
if rn == 0:
# First residue, start a new fragment:
fragments.append([res_name])
elif r.insertion_code: # and not residues[rn - 1].insertion_code:
# If residue i has an insertion code, initiate a new fragment:
fragments.append([res_name])
elif r.sequence_number - residues[rn - 1].sequence_number in (0, 1, -1):
# If the seq numbers of residues [i-1, i] are consecutive, extend the last fragment:
fragments[-1].append(res_name)
else:
# They are not consecutive, so we better start a new fragment:
fragments.append([res_name])
for i, frag in enumerate(fragments):
fragments[i] = ''.join(frag)
if len(fragments) > FastResidueMapper.MAX_FRAGMENTS:
# Wow, that's a lot of fragments. Better use a different mapper
raise ResidueMappingError("Can't map chain with large number of fragments")
blocks = FastResidueMapper.DELIMITER.join(fragments)
pattern = FastResidueMapper.PATTERN.format(blocks)
return pattern
def _encode(self, r):
"""
Return a unique single-letter representation of C{r.type}.
"""
if not r.is_modified:
return str(r.type)
else:
return self._register_label(r.label)
def _encode_sequence(self, s):
return ''.join(map(self._encode, s.residues))
def _register_label(self, label):
"""
Assign a new unicode character to C{label} and cache it.
@return: cached single-letter representation of label.
@rtype: unicode char
"""
if label not in self._cache:
if set(label).intersection(FastResidueMapper.FORBIDDEN_CHARS):
raise ResidueMappingError("Invalid residue label")
self._charcode += 1
code = self._charcode
self._cache[label] = csb.io.unichr(code)
return self._cache[label]
class RobustResidueMapper(AbstractResidueMapper):
"""
Exhaustive residue mapper, which uses Needleman-Wunsch global alignment.
Much slower (quadratic), but fail-proof even with incompatible sequences
(can insert gaps in both the C{sparse} and the C{reference} sequence).
@param match: score for a match
@type match: float
@param mismatch: score for a mismatch (by default mismatches are heavily
penalized, while gaps are allowed)
@type mismatch: float
@param gap: gap penalty
@type gap: float
"""
class GlobalAligner(alignment.GlobalAlignmentAlgorithm):
def _sequence(self, s):
return [r.label for r in s.residues]
def __init__(self, match=1, mismatch=-10, gap=0):
scoring = alignment.IdentityMatrix(match=match, mismatch=mismatch)
aligner = RobustResidueMapper.GlobalAligner(scoring=scoring, gap=gap)
self._aligner = aligner
def map(self, sparse, reference):
aligned = []
ali = self._aligner.align(sparse, reference)
if ali.is_empty:
raise ResidueMappingError("Global alignment failed")
for mapped, residue in zip(ali.query, ali.subject):
if residue.type == reference.alphabet.GAP:
continue
elif mapped.type == sparse.alphabet.GAP:
aligned.append(self.create_gap(sparse.alphabet))
else:
aligned.append(mapped)
return self._build(sparse, aligned)
class CombinedResidueMapper(AbstractResidueMapper):
"""
The best of both worlds: attempts to map the residues using
L{FastResidueMapper}, but upon failure secures success by switching to
L{RobustResidueMapper}.
"""
FAST = FastResidueMapper()
ROBUST = RobustResidueMapper()
def map(self, sparse, reference):
try:
return CombinedResidueMapper.FAST.map(sparse, reference)
except ResidueMappingError:
return CombinedResidueMapper.ROBUST.map(sparse, reference)
class FileBuilder(object):
"""
Base abstract files for all structure file formatters.
Defines a common step-wise interface according to the Builder pattern.
@param output: output stream (this is where the product is constructed)
@type output: stream
"""
__metaclass__ = ABCMeta
def __init__(self, output):
if not hasattr(output, 'write'):
raise TypeError(output)
def isnull(this, that, null=None):
if this is null:
return that
else:
return this
self._out = output
self._isnull = isnull
@property
def output(self):
"""
Destination stream
@rtype: stream
"""
return self._out
@property
def isnull(self):
"""
ISNULL(X, Y) function
@rtype: callable
"""
return self._isnull
def write(self, text):
"""
Write a chunk of text
"""
self._out.write(text)
def writeline(self, text):
"""
Write a chunk of text and append a new line terminator
"""
self._out.write(text)
self._out.write('\n')
@abstractmethod
def add_header(self, master_structure):
pass
@abstractmethod
def add_structure(self, structure):
pass
def finalize(self):
pass
class PDBFileBuilder(FileBuilder):
"""
PDB file format builder.
"""
def writeline(self, text):
self.write('{0:80}\n'.format(text))
def add_header(self, master):
"""
Write the HEADER of the file using C{master}
@type master: L{Structure}
"""
isnull = self.isnull
header = 'HEADER {0:40}{1:%d-%b-%y} {2:4}'
self.writeline(header.format('.', datetime.datetime.now(), master.accession.upper()))
molecules = { }
for chain_id in master.chains:
chain = master.chains[chain_id]
if chain.molecule_id not in molecules:
molecules[chain.molecule_id] = [ ]
molecules[chain.molecule_id].append(chain_id)
k = 0
for mol_id in sorted(molecules):
chains = molecules[mol_id]
first_chain = master.chains[ chains[0] ]
self.writeline('COMPND {0:3} MOL_ID: {1};'.format(k + 1, isnull(mol_id, '0')))
self.writeline('COMPND {0:3} MOLECULE: {1};'.format(k + 2, isnull(first_chain.name, '')))
self.writeline('COMPND {0:3} CHAIN: {1};'.format(k + 3, ', '.join(chains)))
k += 3
for chain_id in master.chains:
chain = master.chains[chain_id]
res = [ r.label for r in chain.residues ]
rn = 0
for j in range(0, chain.length, 13):
rn += 1
residues = [ '{0:>3}'.format(r) for r in res[j : j + 13] ]
self.writeline('SEQRES {0:>3} {1} {2:>4} {3}'.format(
rn, chain.id, chain.length, ' '.join(residues) ))
def add_structure(self, structure):
"""
Append a new model to the file
@type structure: L{Structure}
"""
isnull = self.isnull
for chain_id in structure.chains:
chain = structure.chains[chain_id]
for residue in chain.residues:
atoms = [ ]
for an in residue.atoms:
atom = residue.atoms[an]
if isinstance(atom, csb.bio.structure.DisorderedAtom):
for dis_atom in atom: atoms.append(dis_atom)
else:
atoms.append(atom)
atoms.sort()
for atom in atoms:
alt = atom.alternate
if alt is True:
alt = 'A'
elif alt is False:
alt = ' '
if atom.element:
element = repr(atom.element)
else:
element = ' '
self.writeline('ATOM {0:>5} {1:>4}{2}{3:>3} {4}{5:>4}{6} {7:>8.3f}{8:>8.3f}{9:>8.3f}{10:>6.2f}{11:>6.2f}{12:>12}{13:2}'.format(
atom.serial_number, atom._full_name, isnull(alt, ' '),
residue.label, chain.id,
isnull(residue.sequence_number, residue.rank), isnull(residue.insertion_code, ' '),
atom.vector[0], atom.vector[1], atom.vector[2], isnull(atom.occupancy, 0.0), isnull(atom.bfactor, 0.0),
element, isnull(atom.charge, ' ') ))
self.writeline('TER')
def finalize(self):
"""
Add the END marker
"""
self.writeline('END')
self._out.flush()
class PDBEnsembleFileBuilder(PDBFileBuilder):
"""
Supports serialization of NMR ensembles.
Functions as a simple decorator, which wraps C{add_structure} with
MODEL/ENDMDL records.
"""
def add_structure(self, structure):
model_id = self.isnull(structure.model_id, 1)
self.writeline('MODEL {0:>4}'.format(model_id))
super(PDBEnsembleFileBuilder, self).add_structure(structure)
self.writeline('ENDMDL')
class StructureProvider(object):
"""
Base class for all PDB data source providers.
Concrete classes need to implement the C{find} method, which abstracts the
retrieval of a PDB structure file by a structure identifier. This is a hook
method called internally by C{get}, but subclasses can safely override both
C{find} and {get} to in order to achieve completely custom behavior.
"""
__metaclass__ = ABCMeta
def __getitem__(self, id):
return self.get(id)
@abstractmethod
def find(self, id):
"""
Attempt to discover a PDB file, given a specific PDB C{id}.
@param id: structure identifier (e.g. 1x80)
@type id: str
@return: path and file name on success, None otherwise
@rtype: str or None
"""
pass
def get(self, id, model=None):
"""
Discover, parse and return the PDB structure, corresponding to the
specified C{id}.
@param id: structure identifier (e.g. 1x80)
@type id: str
@param model: optional model identifier
@type model: str
@rtype: L{csb.bio.Structure}
@raise StructureNotFoundError: when C{id} could not be found
"""
pdb = self.find(id)
if pdb is None:
raise StructureNotFoundError(id)
else:
return StructureParser(pdb).parse_structure(model=model)
class FileSystemStructureProvider(StructureProvider):
"""
Simple file system based PDB data source. Scans a list of local directories
using pre-defined file name templates.
@param paths: a list of paths
@type paths: iterable or str
"""
def __init__(self, paths=None):
self._templates = ['pdb{id}.ent', 'pdb{id}.pdb', '{id}.pdb', '{id}.ent']
self._paths = csb.core.OrderedDict()
if paths is not None:
if isinstance(paths, csb.core.string):
paths = [paths]
for path in paths:
self.add(path)
@property
def paths(self):
"""
Current search paths
@rtype: tuple
"""
return tuple(self._paths)
@property
def templates(self):
"""
Current file name match templates
@rtype: tuple
"""
return tuple(self._templates)
def add(self, path):
"""
Register a new local C{path}.
@param path: directory name
@type path: str
@raise IOError: if C{path} is not a valid directory
"""
if os.path.isdir(path):
self._paths[path] = path
else:
raise IOError(path)
def add_template(self, template):
"""
Register a custom file name name C{template}. The template must contain
an E{lb}idE{rb} macro, e.g. pdbE{lb}idE{rb}.ent
@param template: pattern
@type template: str
"""
if '{id}' not in template:
raise ValueError('Template does not contain an "{id}" macro')
if template not in self._templates:
self._templates.append(template)
def remove(self, path):
"""
Unregister an existing local C{path}.
@param path: directory name
@type path: str
| |
find an empty space, quitting.")
# Choose the farthest empty space for maximum drag
chosen_space = spaces[np.argmax(spaces.sum(axis=1))]
# An arbitrary position in the top left region
drag_location = (200, 200)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_location[0], drag_location[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Are we there yet?
# i.e., have we reached Ambidexterity, which in that case is at
# roughly (646, 296) in absolute 1440p screen px position
ambidexterity_position = self._find_icon(
assumed_ambidexterity_position, "AmbidexterityCluster.png"
)
# Ambidexterity is located (-560, 850) from socket 21
# Thus, this plus any (scaled) offset found by the template matcher is
# our tree position
self.ingame_pos = [
SOCKETS[21][0]
- 560
+ ambidexterity_position[0] / (X_SCALE * self.px_multiplier),
SOCKETS[21][1]
+ 850
+ ambidexterity_position[1] / (Y_SCALE * self.px_multiplier),
]
def _find_empty_space(self, quadrant):
# Finds empty spaces that can be used to drag the screen
# Used to recenter the screen
# The quadrant argument is an int in [0, 1, 2, 3], corresponding to
# [top-right, top-left, bottom-left, bottom-right]
quadrant_translation = {0: [0.5, 0], 1: [0, 0], 2: [0, 0.5], 3: [0.5, 0.5]}
fractional_lt = quadrant_translation[quadrant]
lt = [
int(fractional_lt[0] * self.resolution[0]),
int(fractional_lt[1] * self.resolution[1]),
]
rb = [int(lt[0] + self.resolution[0] / 2),
int(lt[1] + self.resolution[1] / 2)]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros_like(searched_area)
centered_coordinates = self._match_image(searched_area, "FreeSpace.png")
locations[tuple(centered_coordinates)] = 1
rel_space_pos_yx = np.argwhere(locations == 1)
rel_space_pos = rel_space_pos_yx.T[::-1].T
if len(rel_space_pos) == 0:
self.log.warning("Could not find any free spaces in tree!")
return None
screen_space_pos = rel_space_pos + lt
# remove positions that are close to edges as these trigger scroll
screen_space_pos = screen_space_pos[(screen_space_pos[:, 0] > 100) &
(screen_space_pos[:, 1] > 100) &
(screen_space_pos[:, 0] < self.resolution[0] - 100) &
(screen_space_pos[:, 1] < self.resolution[1] - 100)]
return screen_space_pos
def _find_icon(self, assumed_position, icon_name):
# Finds the ambidexerity cluster icon in the region it sits in
# if we are at the bottom-right corner of the tree
# The exact location is used to refine our knowledge of our position
abs_assumed_position = (
assumed_position[0] * self.resolution[0],
assumed_position[1] * self.resolution[1],
)
margin_side = int(0.05 * self.resolution[0])
lt = [
int(abs_assumed_position[0] - margin_side / 2),
int(abs_assumed_position[1] - margin_side / 2),
]
rb = [
int(abs_assumed_position[0] + margin_side / 2),
int(abs_assumed_position[1] + margin_side / 2),
]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros((margin_side, margin_side))
centered_coordinates = self._match_image(searched_area, icon_name)
locations[tuple(centered_coordinates)] = 1
rel_icon_pos_yx = np.argwhere(locations == 1)
rel_icon_pos = rel_icon_pos_yx.T[::-1].T
if len(rel_icon_pos) == 0:
return None
icon_offset = [
int(rel_icon_pos[0][0] - margin_side / 2 + abs_assumed_position[0]),
int(rel_icon_pos[0][1] - margin_side / 2 + abs_assumed_position[1]),
]
return icon_offset
def _click_socket(self, socket_pos, insert=True):
self.log.debug("Clicking socket")
xy = socket_pos
lt = [xy[0] - 5 * self.px_multiplier, xy[1] - 5 * self.px_multiplier]
rb = [xy[0] + 5 * self.px_multiplier, xy[1] + 5 * self.px_multiplier]
if insert:
self.input_handler.click(*lt, *rb, button="left", raw=True)
else:
self.input_handler.click(*lt, *rb, button="right", raw=True)
self.input_handler.rnd_sleep(min=200, mean=300)
def _tree_pos_to_xy(self, pos, offset=False):
if offset:
return [
pos[0] * X_SCALE * self.px_multiplier,
pos[1] * Y_SCALE * self.px_multiplier,
]
uncentered_xy = [
(pos[0] - self.ingame_pos[0]) * X_SCALE * self.px_multiplier,
(pos[1] - self.ingame_pos[1]) * Y_SCALE * self.px_multiplier,
]
xy = [
int(uncentered_xy[0] + self.origin_pos[0]),
int(uncentered_xy[1] + self.origin_pos[1]),
]
return xy
def _add_xy_offset_to_tree_pos(self, offset):
tree_pos = [
self.ingame_pos[0] + offset[0] / (X_SCALE * self.px_multiplier),
self.ingame_pos[1] + offset[1] / (Y_SCALE * self.px_multiplier),
]
return tree_pos
def _analyze_nodes(self, socket_id):
self.log.info("Analyzing nodes for socket id %s" % socket_id)
nodes = []
node_locations, socket_pos = self._find_nodes(socket_id)
self.log.debug(
"Found %s nodes for socket id %s" % (len(node_locations), socket_id)
)
self._click_socket(socket_pos)
for location in node_locations:
if not self._run():
return
node_stats = self._get_node_data(location)
node = {
"location": self._socket_offset_pos(socket_pos, location),
"stats": node_stats,
}
nodes.append(node)
self._click_socket(socket_pos, insert=False)
return nodes
def _socket_offset_pos(self, socket_pos, node_location):
circle_radius = CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier
return [
(node_location[0] - socket_pos[0]) / circle_radius,
(node_location[1] - socket_pos[1]) / circle_radius,
]
def _filter_ocr_lines(self, nodes_lines, max_dist=4):
filtered_nodes = []
for node in nodes_lines:
names = []
mods = []
for line in node["stats"]:
filtered_line = self._filter_nonalpha(line)
if len(filtered_line) < 4 or filtered_line == "Unallocated":
continue
if filtered_line in self.passive_names:
names.append(self.passive_names[filtered_line])
elif filtered_line in self.passive_mods:
filtered_mod, value = filter_mod(line, regex=self.nonalpha_re)
new_mod = re.sub(
self.find_mod_value_re,
str(value),
self.passive_mods[filtered_line],
count=1,
)
mods.append(new_mod)
else:
# Sometimes the OCR might return strange results. If so,
# as a last resort, check levenshtein distance to closest
# node. This shouldn't happen often.
best_distance = 99999999999
best_match = None
for possible_mod in self.passive_nodes:
d = distance(filtered_line, possible_mod)
if d < best_distance:
best_distance = d
best_match = possible_mod
if best_distance > max_dist:
continue
if best_match in self.passive_names:
names.append(self.passive_names[best_match])
elif best_match in self.passive_mods:
filtered_mod, value = filter_mod(line, regex=self.nonalpha_re)
new_mod = re.sub(
self.find_mod_value_re,
str(value),
self.passive_mods[best_match],
count=1,
)
mods.append(new_mod)
if mods:
filtered_nodes.append(
{"location": node["location"], "name": names, "mods": mods}
)
return filtered_nodes
def _find_nodes(self, socket_id):
self.input_handler.click(0.5, 0.07, 0.51, 0.083, button=None)
socket_pos = self._tree_pos_to_xy(SOCKETS[socket_id])
socket_offset = self._find_socket(socket_pos)
if socket_offset is None:
found_socket = False
socket_offset = [0, 0]
else:
found_socket = True
self.log.debug("Jewel socket offset correction: %s" % socket_offset)
socket_pos[0] += socket_offset[0]
socket_pos[1] += socket_offset[1]
# Add some margin so that we dont accidentally cut any nodes off
margin = 20 * self.px_multiplier
x1 = int(socket_pos[0] - CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier - margin)
y1 = int(socket_pos[1] - CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier - margin)
x2 = int(x1 + 2 * CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier + 2 * margin)
y2 = int(y1 + 2 * CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier + 2 * margin)
nodes = self._get_node_locations_from_screen((x1, y1, x2, y2))
nodes = self._filter_nodes(nodes, socket_pos)
return nodes, socket_pos
def _find_socket(self, socket_pos, side_len=100):
lt = [int(socket_pos[0] - side_len / 2), int(socket_pos[1] - side_len / 2)]
rb = [lt[0] + side_len, lt[1] + side_len]
socket_area = grab_screen(tuple(lt + rb))
socket_area = cv2.cvtColor(socket_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros((side_len, side_len))
for template_name in [
"Jewel.png",
"JewelSocketed.png",
"LargeJewel.png",
"LargeJewelSocketed.png",
]:
centered_coordinates = self._match_image(socket_area, template_name)
locations[tuple(centered_coordinates)] = 1
rel_node_pos_yx = np.argwhere(locations == 1)
rel_node_pos = rel_node_pos_yx.T[::-1].T
if len(rel_node_pos) == 0:
self.log.warning("Could not find any jewel socket for compensating offset!")
return None
socket_offset = [
int(rel_node_pos[0][0] - side_len / 2),
int(rel_node_pos[0][1] - side_len / 2),
]
return socket_offset
def _filter_nodes(self, nodes, socket_pos, duplicate_min_dist=10):
# filter duplicate nodes
kept_node_indices = [len(nodes) - 1]
z = np.array([[complex(c[0], c[1]) for c in nodes]])
dist_matrix = abs(z.T - z)
for node_idx in range(len(nodes) - 1):
if np.min(dist_matrix[node_idx + 1 :, node_idx]) >= duplicate_min_dist:
kept_node_indices.append(node_idx)
nodes = np.array(nodes)
nodes = nodes[kept_node_indices, :]
# filter nodes outside jewel socket radius
distances_to_socket = np.sqrt(np.sum((nodes - socket_pos) ** 2, axis=1))
nodes = nodes[
distances_to_socket <= CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier
]
return nodes
def _get_node_locations_from_screen(self, box):
jewel_area_bgr = grab_screen(box)
jewel_area_gray = cv2.cvtColor(jewel_area_bgr, cv2.COLOR_BGR2GRAY)
locations = np.zeros((box[2] - box[0], box[3] - box[1]))
for template_name in [
"Notable.png",
"NotableAllocated.png",
"Skill.png",
"SkillAllocated.png",
]:
centered_coordinates = self._match_image(jewel_area_gray, template_name)
locations[tuple(centered_coordinates)] = 1
rel_node_pos_yx = np.argwhere(locations == 1)
rel_node_pos = rel_node_pos_yx.T[::-1].T
abs_node_pos = rel_node_pos + [box[0], box[1]]
return abs_node_pos
def _match_image(self, screen, template_name):
template = self.templates_and_masks[template_name]["image"]
mask = self.templates_and_masks[template_name]["mask"]
res = cv2.matchTemplate(screen, template, cv2.TM_CCORR_NORMED, mask=mask)
coordinates = np.where(
res >= TEMPLATES[template_name][self.resolution_prefix + "threshold"]
)
icon_size = (
int(TEMPLATES[template_name][self.resolution_prefix + "size"][0]),
int(TEMPLATES[template_name][self.resolution_prefix + "size"][1]),
)
icon_center_offset = [int(icon_size[0] / 2), int(icon_size[1] / 2)]
centered_coordinates = [
coordinates[0] + icon_center_offset[0],
coordinates[1] + icon_center_offset[1],
]
return centered_coordinates
def _get_node_data(self, location):
self.log.debug("Getting node stats at location %s" % location)
lt = [
location[0] - 7 * self.px_multiplier,
location[1] - 7 * self.px_multiplier,
]
rb = [
location[0] + 7 * self.px_multiplier,
location[1] + 7 * self.px_multiplier,
]
self.input_handler.click(
*lt,
*rb,
button=None,
raw=True,
speed_factor=self.config["node_search_speed_factor"]
)
textbox_lt = location + [TXT_BOX["x"], TXT_BOX["y"]]
textbox_rb = textbox_lt + [
int(TXT_BOX["w"] * self.px_multiplier),
int(TXT_BOX["h"] * self.px_multiplier),
]
jewel_area_bgr = grab_screen(tuple(np.concatenate([textbox_lt, textbox_rb])))
return jewel_area_bgr
def _setup(self, item_location, copy=False):
item_desc = None
item_name = None
self.input_handler.click_hotkey("p")
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
self.input_handler.click_hotkey("i")
if copy:
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
item = self.input_handler.inventory_copy(
*item_location, OWN_INVENTORY_ORIGIN, speed_factor=2
)
item_desc = item.split("\n")[9].strip()
item_name = item.split("\n")[1].strip()
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
self.input_handler.inventory_click(*item_location, OWN_INVENTORY_ORIGIN)
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
self.input_handler.click_hotkey("i")
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
return item_name, item_desc
def generate_good_strings(self, files):
| |
(im2.shape[1]-im1.shape[1])/2
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
mask = np.zeros(im1.shape[:2], dtype=np.uint8)
#TODO - maybe find something better than median as the threshold
# x0, x1, y0, y1 = []
# mask[y0:y1, x0:x1] = 0
# print('BG'+str(rectangle))
# cv2.GC_BGD, cv2.GC_FGD, cv2.GC_PR_BGD, cv2.GC_PR_FGD, or
#prob. backgnd - entire image
h,w = im1.shape[0:2]
x0, x1, y0, y1 = [0,w,0,h]
mask[y0:y1, x0:x1] = cv2.GC_PR_BGD
# print('PBG x0 {} x1 {} y0 {} y1 {} '.format(x0,x1,y0,y1))
#prob. fgnd - center rectangle
bb_percent_w = 0.5 #percent of image center to use as bb
bb_percent_h = 0.8 #percent of image center to use as bb
w = int(im1.shape[1]*bb_percent_w)
h = int(im1.shape[0]*bb_percent_h)
x = int((im1.shape[1]-w)/2)
y = int((im1.shape[0]-h)/2)
x0, x1, y0, y1 = [x,x+w,y,y+h]
mask[y0:y1, x0:x1] = cv2.GC_PR_FGD
print('PFG x0 {} x1 {} y0 {} y1 {} '.format(x0,x1,y0,y1))
#prob. fgnd - center rectangle
bb_percent = 0.1 #percent of image center to use as bb
w = int(im1.shape[1]*bb_percent)
h = int(im1.shape[0]*bb_percent)
x = int((im1.shape[1]-w)/2)
y = int((im1.shape[0]-h)/2)
x0, x1, y0, y1 = [x,x+w,y,y+h]
mask[y0:y1, x0:x1] = cv2.GC_FGD
# print('FG x0 {} x1 {} y0 {} y1 {} '.format(x0,x1,y0,y1))
try:
#TODO - try more than 1 grabcut call in itr
itr = 2
cv2.grabCut(im1, mask, None, bgdmodel, fgdmodel, itr, cv2.GC_INIT_WITH_MASK) #im, mask, rect, bgmodel, fgmoel, iterations
except:
print('grabcut exception')
return None
mask2 = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255, 0).astype(np.uint8) #return all fg and prob. fg
# mask = background_removal.get_fg_mask(im1,bounding_box=bb)
# print('got mask shape {} uniques {} '.format(mask.shape,np.unique(mask)))
# cv2.imshow('mask_b4gc',mask)
# cv2.imshow('mask_aftergc',mask2)
# cv2.waitKey(0)
overlaid = overlay(mask2, im1,im2)
return overlaid
def overlay(im1_mask,im1, bgnd_img,position=None,rotation=0,scale=1,save=True,visual_output=True):
bgnd_img = Utils.get_cv2_img_array(bgnd_img)
w,h = im1.shape[0:2]
if im1_mask.shape[0]>bgnd_img.shape[0] or im1_mask.shape[1]>bgnd_img.shape[1]:
print('overlay larger than image im1 {} im2 {}'.format(im1_mask.shape,bgnd_img.shape))
return
if position == None:
im2,contours,hierarchy = cv2.findContours(im1_mask, 1, 2)
# cv2.imshow('mask1',im1_mask)
# cv2.waitKey(0)
cnt = contours[0]
M = cv2.moments(cnt)
# print('contour moments:'+str(M))
# From this moments, you can extract useful data like area, centroid etc. Centroid is given by the relations, Cx=M10M00 and Cy=M01M00. This can be done as follows:
try:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print('cx {} cy {}'.format(cx,cy))
except:
print('prob division by zero, m00={}'.format(M['m00']))
cx = im1_mask.shape[0]/2
cy = im1_mask.shape[1]/2
# cv2.circle(im1_mask,(cx,cy),20,(255,100,50),thickness=5)
# cv2.rectangle(img_arr,(bbox[0],bbox[1]),(bbox[0]+bbox[2],bbox[1]+bbox[3]),color=(255,255,0),thickness=2)
# cv2.imshow('mask1',im1_mask)
# cv2.waitKey(0)
dx = im1_mask.shape[0]/2-cx
dy = im1_mask.shape[1]/2-cy
position = (dx,dy)
print('cx {} cy {} dx {} dy {}'.format(cx,cy,dx,dy))
print('shifting by {}'.format(position))
translation_matrix = np.float32([ [1,0,position[1]], [0,1,position[0]]] )
im1_mask = cv2.warpAffine(im1_mask, translation_matrix, (w, h)) # cv2.INTER_LINEAR, cv2.BORDER_CONSTANT, 255)
im1 = cv2.warpAffine(im1, translation_matrix, (w, h)) #cv2.INTER_LINEAR, cv2.BORDER_CONSTANT, 255)
if scale != 1:
print('im1_mask {} im1 {} before resize'.format(im1_mask.shape,im1.shape))
h,w = im1.shape[0:2]
dsize = (int(w*scale),int(h*scale))
im1_mask = cv2.resize(im1_mask,dsize)
im1 = cv2.resize(im1,dsize)
print('im1_mask {} im1 {} after resize'.format(im1_mask.shape,im1.shape))
if scale>1: #crop extra
extra = (dsize[0]-h,dsize[1]-w)
starty=extra[0]/2
endy = extra[0]/2+h
startx=extra[1]/2
endx = extra[1]/2+w
print('sy {} endy {} sx {} edx {}'.format(starty,endy,startx,endx))
im1 = im1[starty:endy,startx:endx,:]
im1_mask=im1_mask[starty:endy,startx:endx]
print('im1_mask {} im1 {} after crop'.format(im1_mask.shape,im1.shape))
else: #add missing
extra = (h-dsize[0],w-dsize[1])
print('extra {} h {} w {} dsize {} e0 {} e1 {}'.format(extra,h,w,dsize,extra[0],extra[1]))
starty=extra[0]/2
endy = extra[0]/2+dsize[0]
startx=extra[1]/2
endx = extra[1]/2+dsize[1]
print('sy {} endy {} sx {} edx {}'.format(starty,endy,startx,endx))
im1_dest = np.zeros((h,w,3))
im1_mask_dest = np.zeros((h,w))
im1_dest[starty:endy,startx:endx,:]= im1
im1_mask_dest[starty:endy,startx:endx]=im1_mask
print('im1_mask {} im1 {} after padding'.format(im1_mask.shape,im1.shape))
if rotation != 0:
center = (w/2,h/2)
r = cv2.getRotationMatrix2D(center,rotation,scale=1)
im1_mask = cv2.warpAffine(im1_mask, r, (w, h)) # cv2.INTER_LINEAR, cv2.BORDER_CONSTANT, 255)
im1 = cv2.warpAffine(im1, r, (w, h)) #cv2.INTER_LINEAR, cv2.BORDER_CONSTANT, 255)
mask_y = (bgnd_img.shape[0]-im1_mask.shape[0])/2
mask_x = (bgnd_img.shape[1]-im1_mask.shape[1])/2
final_canvas = np.zeros_like(bgnd_img)
mask_height = im1_mask.shape[0]
mask_width = im1_mask.shape[1]
mask_on_canvas = np.zeros_like(bgnd_img)
mask_on_canvas[mask_y:mask_y+mask_height,mask_x:mask_x+mask_width,0] = im1[:,:,0]
mask_on_canvas[mask_y:mask_y+mask_height,mask_x:mask_x+mask_width,1] = im1[:,:,1]
mask_on_canvas[mask_y:mask_y+mask_height,mask_x:mask_x+mask_width,2] = im1[:,:,2]
print('im1 {} bgndd {} final canvas {} maskh {} maskw {}'.format(im1_mask.shape,bgnd_img.shape,final_canvas.shape,mask_height,mask_width))
final_canvas[mask_y:mask_y+mask_height,mask_x:mask_x+mask_width,0] = im1_mask
final_canvas[mask_y:mask_y+mask_height,mask_x:mask_x+mask_width,1] = im1_mask
final_canvas[mask_y:mask_y+mask_height,mask_x:mask_x+mask_width,2] = im1_mask
masked_1 = np.where(final_canvas!=0,mask_on_canvas,bgnd_img)
if visual_output:
# cv2.imshow('mask1',im1_mask)
# cv2.imshow('mask_on_canvas',mask_on_canvas)
# cv2.imshow('final',final_canvas)
# cv2.imshow('bgnd',bgnd_img)
cv2.imshow('masked_1',masked_1)
print('use arrow keys to translate:awds rotate:er scale:o-,p+ (q)uit, return to save')
k = cv2.waitKey(0)
#shift mask interactively
print('pressed value:'+str(k))
shift = 5 #pixels to translate each time
if k == 37 or k == ord('a'): #left
return(overlay(im1_mask,im1,bgnd_img,position=(0,-shift)))
elif k == 38 or k == ord('w'): #up
return(overlay(im1_mask,im1,bgnd_img,position=(-shift,0)))
elif k == 39 or k == ord('d'): #right
return(overlay(im1_mask,im1,bgnd_img,position=(0,+shift)))
elif k == 40 or k == ord('s'): #down
return(overlay(im1_mask,im1,bgnd_img,position=(shift,0)))
elif k == ord('+') or k==ord('p'): #enlargen
return(overlay(im1_mask,im1,bgnd_img,scale=1.05))
elif k == ord('-') or k==ord('o'): #smallen
return(overlay(im1_mask,im1,bgnd_img,scale=.95))
elif k == ord('e'): #rot-
return(overlay(im1_mask,im1,bgnd_img,rotation=-shift))
elif k == ord('r'): #rot+
return(overlay(im1_mask,im1,bgnd_img,rotation=shift))
elif k == ord('q'): #quit
return
return masked_1
# overlaid = np.where(mask_3channels>0,im1,im2)
def get_fg_mask(image, bounding_box=None):
rect = (0, 0, image.shape[1]-1, image.shape[0]-1)
bgdmodel = np.zeros((1, 65), np.float64) # what is this wierd size about? (jr)
fgdmodel = np.zeros((1, 65), np.float64)
# bounding box was sent from a human - grabcut with bounding box mask
if Utils.legal_bounding_box(bounding_box):
if Utils.all_inclusive_bounding_box(image, bounding_box): # bb is nearly the whole image
mask = np.zeros(image.shape[:2], dtype=np.uint8)
cv2.grabCut(image, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT)
else:
mask = bb_mask(image, bounding_box)
cv2.grabCut(image, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_MASK)
# grabcut on the whole image, with/without face
else:
faces_dict = find_face_cascade(image)
# if len(faces_dict['faces']) > 0: # grabcut with mask
# try:
# rectangles = body_estimation(image, faces_dict['faces'][0])
# mask = create_mask_for_gc(rectangles, image)
# except:
# mask = create_mask_for_gc(image)
#
# else: # grabcut with arbitrary rect
mask = create_arbitrary(image)
cv2.grabCut(image, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 1) + (mask == 3), 255, 0).astype(np.uint8)
return mask2
def smallify_and_implant(arr_url_or_file,reduction_percent=30,background_image=None,bb=None,fade_in=True):
'''
WIP - finish this to augment yolo stuff - and call it from augment_images , checking size of largest object
and smallifying accordingly. so we have to keep track of bb's too and return those smallified in same way
:param arr_url_or_file:
:param reduction_percent:
:param background_image:
:return:
'''
img_arr = Utils.get_cv2_img_array(arr_url_or_file)
orig_h,orig_w = img_arr.shape[0:2]
if background_image is not None:
new_arr = resize_keep_aspect(background_image,output_size=(orig_h,orig_w))
else:
new_arr = np.zeros_like(img_arr)
dsize=(orig_w*(1-reduction_percent),orig_h*(1-reduction_percent))# #make sure resize wants width,height not height,width
reduced = cv2.resize(img_arr,dsize)
x_wiggleroom = orig_w - dsize[0]
y_wiggleroom = orig_h - dsize[1]
def dominant_colors(img_arr,n_components=2):
'''
:param img_arr: this will generally be a subimage (orig image cropped to a bb)
:return:
'''
dom_color = None
if img_arr is None:
print('got non arr in dominant_colors')
return None
hsv = cv2.cvtColor(img_arr, cv2.COLOR_BGR2HSV)
if hsv is None:
print('some prob with hsv')
return None
try:
avg_hue = np.mean(hsv[:,:,0])
avg_sat = np.mean(hsv[:,:,1])
avg_val = np.mean(hsv[:,:,2])
stdev_hue = np.std(hsv[:,:,0])
stdev_sat = np.std(hsv[:,:,1])
stdev_val = np.std(hsv[:,:,2])
#take care of large std for red (which wraps around from 180 to 0
if stdev_hue>60:
print('correcting hue modulo, orig mean {} std {}'.format(avg_hue,stdev_hue))
hue=hsv[:,:,0]
mask=hue>90
hue=hue-mask*180
avg_hue = np.mean(hue)
stdev_hue = np.std(hue)
print('corrected hue modulo, new mean {} std {}'.format(avg_hue,stdev_hue))
except:
print('problem calculating sat or val')
print('avg hue {} std {} avg sat {} std {} avg val {} std {}'.format(avg_hue,stdev_hue,avg_sat,stdev_sat,avg_val,stdev_val))
min_sat_for_color = 0.3*255 #102
min_val_for_color=0.3*255 #76
max_std_for_color=70
max_val_for_black=0.35*255 #89
min_val_for_white=0.8*255 #204
max_sat_for_white=0.15*255 #38
max_sat_for_gray=0.1*255
max_val_for_gray=0.8*255
min_val_for_gray=0.3*255
if avg_sat > min_sat_for_color and avg_val > min_val_for_color and stdev_hue<max_std_for_color: #color in visible range
# print('got visible color')
colors = ['red','orange','yellow','green','aqua','blue','purple','pink','red']
# range_edges=[20,45,70,140,180,260,290,291,340] #for range 0-360
range_edges=[13,22,35,75,90,130,145,170,180]
i=0
while(avg_hue>range_edges[i]):
i=i+1
# i=i-1
# print('range edge '+str(i)+' color '+colors[i])
dom_color = colors[i]
elif avg_val < max_val_for_black:
# print('got black')
dom_color = 'black'
elif avg_val>min_val_for_white and avg_sat<max_sat_for_white:
# print('got white')
dom_color = 'white'
elif avg_val<max_val_for_gray and avg_val>min_val_for_gray and avg_sat<max_sat_for_gray:
dom_color='gray'
# grab the image channels, initialize the tuple of colors,
# the figure and the flattened feature vector
debug=False
if(debug):
chans = cv2.split(hsv)
colors = ("b", "g", "r")
plt.figure()
plt.title("'Flattened' Color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
features = []
# loop over the image channels
for (chan, color) in zip(chans, colors):
# create a histogram for the current channel and
# concatenate the resulting histograms for each
# channel
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
features.extend(hist)
# plot the histogram
plt.plot(hist, color = color)
plt.xlim([0, 256])
blu_patch = mpatches.Patch(color='blue', label='Hue')
# plt.legend(handles=[blu_patch])
grn_patch = mpatches.Patch(color='green', label='Sat')
# plt.legend(handles=[grn_patch])
red_patch = mpatches.Patch(color='red', label='Val')
plt.legend(handles=[red_patch,blu_patch,grn_patch])
# here we are simply showing the dimensionality of the
# flattened color histogram 256 bins for each channel
# x 3 channels = 768 total values -- in practice, we would
# normally not use | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ClaimResponse) on 2019-07-29.
# 2019, SMART Health IT.
import sys
from dataclasses import dataclass
from typing import ClassVar, Optional, List
from .fhirabstractbase import empty_list
from .address import Address
from .attachment import Attachment
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
from .money import Money
from .period import Period
from .quantity import Quantity
@dataclass
class ClaimResponseAddItemDetailSubDetail(BackboneElement):
""" Insurer added line items.
The third-tier service adjudications for payor added services.
"""
resource_type: ClassVar[str] = "ClaimResponseAddItemDetailSubDetail"
productOrService: CodeableConcept = None
modifier: Optional[List[CodeableConcept]] = empty_list()
quantity: Optional[Quantity] = None
unitPrice: Optional[Money] = None
factor: Optional[float] = None
net: Optional[Money] = None
noteNumber: Optional[List[int]] = empty_list()
adjudication: List["ClaimResponseItemAdjudication"] = empty_list()
def elementProperties(self):
js = super(ClaimResponseAddItemDetailSubDetail, self).elementProperties()
js.extend([
("productOrService", "productOrService", CodeableConcept, False, None, True),
("modifier", "modifier", CodeableConcept, True, None, False),
("quantity", "quantity", Quantity, False, None, False),
("unitPrice", "unitPrice", Money, False, None, False),
("factor", "factor", float, False, None, False),
("net", "net", Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("adjudication", "adjudication", ClaimResponseItemAdjudication, True, None, True),
])
return js
@dataclass
class ClaimResponseAddItemDetail(BackboneElement):
""" Insurer added line details.
The second-tier service adjudications for payor added services.
"""
resource_type: ClassVar[str] = "ClaimResponseAddItemDetail"
productOrService: CodeableConcept = None
modifier: Optional[List[CodeableConcept]] = empty_list()
quantity: Optional[Quantity] = None
unitPrice: Optional[Money] = None
factor: Optional[float] = None
net: Optional[Money] = None
noteNumber: Optional[List[int]] = empty_list()
adjudication: List["ClaimResponseItemAdjudication"] = empty_list()
subDetail: Optional[List[ClaimResponseAddItemDetailSubDetail]] = empty_list()
def elementProperties(self):
js = super(ClaimResponseAddItemDetail, self).elementProperties()
js.extend([
("productOrService", "productOrService", CodeableConcept, False, None, True),
("modifier", "modifier", CodeableConcept, True, None, False),
("quantity", "quantity", Quantity, False, None, False),
("unitPrice", "unitPrice", Money, False, None, False),
("factor", "factor", float, False, None, False),
("net", "net", Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("adjudication", "adjudication", ClaimResponseItemAdjudication, True, None, True),
("subDetail", "subDetail", ClaimResponseAddItemDetailSubDetail, True, None, False),
])
return js
@dataclass
class ClaimResponseItemDetailSubDetail(BackboneElement):
""" Adjudication for claim sub-details.
A sub-detail adjudication of a simple product or service.
"""
resource_type: ClassVar[str] = "ClaimResponseItemDetailSubDetail"
subDetailSequence: int = None
noteNumber: Optional[List[int]] = empty_list()
adjudication: Optional[List["ClaimResponseItemAdjudication"]] = empty_list()
def elementProperties(self):
js = super(ClaimResponseItemDetailSubDetail, self).elementProperties()
js.extend([
("subDetailSequence", "subDetailSequence", int, False, None, True),
("noteNumber", "noteNumber", int, True, None, False),
("adjudication", "adjudication", ClaimResponseItemAdjudication, True, None, False),
])
return js
@dataclass
class ClaimResponseItemAdjudication(BackboneElement):
""" Adjudication details.
If this item is a group then the values here are a summary of the
adjudication of the detail items. If this item is a simple product or
service then this is the result of the adjudication of this item.
"""
resource_type: ClassVar[str] = "ClaimResponseItemAdjudication"
category: CodeableConcept = None
reason: Optional[CodeableConcept] = None
amount: Optional[Money] = None
value: Optional[float] = None
def elementProperties(self):
js = super(ClaimResponseItemAdjudication, self).elementProperties()
js.extend([
("category", "category", CodeableConcept, False, None, True),
("reason", "reason", CodeableConcept, False, None, False),
("amount", "amount", Money, False, None, False),
("value", "value", float, False, None, False),
])
return js
@dataclass
class ClaimResponseItemDetail(BackboneElement):
""" Adjudication for claim details.
A claim detail. Either a simple (a product or service) or a 'group' of sub-
details which are simple items.
"""
resource_type: ClassVar[str] = "ClaimResponseItemDetail"
detailSequence: int = None
noteNumber: Optional[List[int]] = empty_list()
adjudication: List[ClaimResponseItemAdjudication] = empty_list()
subDetail: Optional[List[ClaimResponseItemDetailSubDetail]] = empty_list()
def elementProperties(self):
js = super(ClaimResponseItemDetail, self).elementProperties()
js.extend([
("detailSequence", "detailSequence", int, False, None, True),
("noteNumber", "noteNumber", int, True, None, False),
("adjudication", "adjudication", ClaimResponseItemAdjudication, True, None, True),
("subDetail", "subDetail", ClaimResponseItemDetailSubDetail, True, None, False),
])
return js
@dataclass
class ClaimResponseItem(BackboneElement):
""" Adjudication for claim line items.
A claim line. Either a simple (a product or service) or a 'group' of
details which can also be a simple items or groups of sub-details.
"""
resource_type: ClassVar[str] = "ClaimResponseItem"
itemSequence: int = None
noteNumber: Optional[List[int]] = empty_list()
adjudication: List[ClaimResponseItemAdjudication] = empty_list()
detail: Optional[List[ClaimResponseItemDetail]] = empty_list()
def elementProperties(self):
js = super(ClaimResponseItem, self).elementProperties()
js.extend([
("itemSequence", "itemSequence", int, False, None, True),
("noteNumber", "noteNumber", int, True, None, False),
("adjudication", "adjudication", ClaimResponseItemAdjudication, True, None, True),
("detail", "detail", ClaimResponseItemDetail, True, None, False),
])
return js
@dataclass
class ClaimResponseAddItem(BackboneElement):
""" Insurer added line items.
The first-tier service adjudications for payor added product or service
lines.
"""
resource_type: ClassVar[str] = "ClaimResponseAddItem"
itemSequence: Optional[List[int]] = empty_list()
detailSequence: Optional[List[int]] = empty_list()
subdetailSequence: Optional[List[int]] = empty_list()
provider: Optional[List[FHIRReference]] = empty_list()
productOrService: CodeableConcept = None
modifier: Optional[List[CodeableConcept]] = empty_list()
programCode: Optional[List[CodeableConcept]] = empty_list()
servicedDate: Optional[FHIRDate] = None
servicedPeriod: Optional[Period] = None
locationCodeableConcept: Optional[CodeableConcept] = None
locationAddress: Optional[Address] = None
locationReference: Optional[FHIRReference] = None
quantity: Optional[Quantity] = None
unitPrice: Optional[Money] = None
factor: Optional[float] = None
net: Optional[Money] = None
bodySite: Optional[CodeableConcept] = None
subSite: Optional[List[CodeableConcept]] = empty_list()
noteNumber: Optional[List[int]] = empty_list()
adjudication: List[ClaimResponseItemAdjudication] = empty_list()
detail: Optional[List[ClaimResponseAddItemDetail]] = empty_list()
def elementProperties(self):
js = super(ClaimResponseAddItem, self).elementProperties()
js.extend([
("itemSequence", "itemSequence", int, True, None, False),
("detailSequence", "detailSequence", int, True, None, False),
("subdetailSequence", "subdetailSequence", int, True, None, False),
("provider", "provider", FHIRReference, True, None, False),
("productOrService", "productOrService", CodeableConcept, False, None, True),
("modifier", "modifier", CodeableConcept, True, None, False),
("programCode", "programCode", CodeableConcept, True, None, False),
("servicedDate", "servicedDate", FHIRDate, False, "serviced", False),
("servicedPeriod", "servicedPeriod", Period, False, "serviced", False),
("locationCodeableConcept", "locationCodeableConcept", CodeableConcept, False, "location", False),
("locationAddress", "locationAddress", Address, False, "location", False),
("locationReference", "locationReference", FHIRReference, False, "location", False),
("quantity", "quantity", Quantity, False, None, False),
("unitPrice", "unitPrice", Money, False, None, False),
("factor", "factor", float, False, None, False),
("net", "net", Money, False, None, False),
("bodySite", "bodySite", CodeableConcept, False, None, False),
("subSite", "subSite", CodeableConcept, True, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("adjudication", "adjudication", ClaimResponseItemAdjudication, True, None, True),
("detail", "detail", ClaimResponseAddItemDetail, True, None, False),
])
return js
@dataclass
class ClaimResponseTotal(BackboneElement):
""" Adjudication totals.
Categorized monetary totals for the adjudication.
"""
resource_type: ClassVar[str] = "ClaimResponseTotal"
category: CodeableConcept = None
amount: Money = None
def elementProperties(self):
js = super(ClaimResponseTotal, self).elementProperties()
js.extend([
("category", "category", CodeableConcept, False, None, True),
("amount", "amount", Money, False, None, True),
])
return js
@dataclass
class ClaimResponsePayment(BackboneElement):
""" Payment Details.
Payment details for the adjudication of the claim.
"""
resource_type: ClassVar[str] = "ClaimResponsePayment"
type: CodeableConcept = None
adjustment: Optional[Money] = None
adjustmentReason: Optional[CodeableConcept] = None
date: Optional[FHIRDate] = None
amount: Money = None
identifier: Optional[Identifier] = None
def elementProperties(self):
js = super(ClaimResponsePayment, self).elementProperties()
js.extend([
("type", "type", CodeableConcept, False, None, True),
("adjustment", "adjustment", Money, False, None, False),
("adjustmentReason", "adjustmentReason", CodeableConcept, False, None, False),
("date", "date", FHIRDate, False, None, False),
("amount", "amount", Money, False, None, True),
("identifier", "identifier", Identifier, False, None, False),
])
return js
@dataclass
class ClaimResponseProcessNote(BackboneElement):
""" Note concerning adjudication.
A note that describes or explains adjudication results in a human readable
form.
"""
resource_type: ClassVar[str] = "ClaimResponseProcessNote"
number: Optional[int] = None
type: Optional[str] = None
text: str = None
language: Optional[CodeableConcept] = None
def elementProperties(self):
js = super(ClaimResponseProcessNote, self).elementProperties()
js.extend([
("number", "number", int, False, None, False),
("type", "type", str, False, None, False),
("text", "text", str, False, None, True),
("language", "language", CodeableConcept, False, None, False),
])
return js
@dataclass
class ClaimResponseInsurance(BackboneElement):
""" Patient insurance information.
Financial instruments for reimbursement for the health care products and
services specified on the claim.
"""
resource_type: ClassVar[str] = "ClaimResponseInsurance"
sequence: int = None
focal: bool = None
coverage: FHIRReference = None
businessArrangement: Optional[str] = None
claimResponse: Optional[FHIRReference] = None
def elementProperties(self):
js = super(ClaimResponseInsurance, self).elementProperties()
js.extend([
("sequence", "sequence", int, False, None, True),
("focal", "focal", bool, False, None, True),
("coverage", "coverage", FHIRReference, False, None, True),
("businessArrangement", "businessArrangement", str, False, None, False),
("claimResponse", "claimResponse", FHIRReference, False, None, False),
])
return js
@dataclass
class ClaimResponseError(BackboneElement):
""" Processing errors.
Errors encountered during the processing of the adjudication.
"""
resource_type: ClassVar[str] = "ClaimResponseError"
itemSequence: Optional[int] = None
detailSequence: Optional[int] = None
subDetailSequence: Optional[int] = None
code: CodeableConcept = None
def elementProperties(self):
js = super(ClaimResponseError, self).elementProperties()
js.extend([
("itemSequence", "itemSequence", int, False, None, False),
("detailSequence", "detailSequence", int, False, None, False),
("subDetailSequence", "subDetailSequence", int, False, None, False),
("code", "code", CodeableConcept, False, None, True),
])
return js
@dataclass
class ClaimResponse(DomainResource):
""" Response to a claim predetermination or preauthorization.
This resource provides the adjudication details from the processing of a
Claim resource.
"""
resource_type: ClassVar[str] = "ClaimResponse"
identifier: Optional[List[Identifier]] = empty_list()
status: str = None
type: CodeableConcept = None
subType: Optional[CodeableConcept] = None
use: str = None
patient: FHIRReference = None
created: FHIRDate = None
insurer: FHIRReference = None
requestor: Optional[FHIRReference] = None
| |
<gh_stars>10-100
import tensorflow as tf
from tf_utils import ops as _ops
def norm(value, name_or_scope=None):
with tf.variable_scope(name_or_scope,
default_name='norm',
values=[value]):
# import ipdb
# ipdb.set_trace()
# value.get_shape().assert_has_rank(1)
return tf.sqrt(tf.reduce_sum(tf.square(value)))
def gradient_sign_by_prod(grad_cur,
grad_prev,
activation,
barrier=1,
name_or_scope=None,
is_debug=False):
"""Returns the scaled gradients to update the memory.
grad_sign = grad_cur * grad_prev
We magnify the learning rate if the sign aggrees
positive * positive -> > 0
positive * negative -> < 0
negative * negative -> > 0
Args:
grad_cur: A tensor of arbitrary shape.
grad_prev: A tensor of arbitrary shape, same as grad_cur
activation: centers * size(grad_cur)
Returns:
A tensor of size(grad_cur).
"""
with tf.variable_scope(name_or_scope,
default_name='gradient_sign_ref',
values=[grad_cur, grad_prev, activation]):
grad_cur.get_shape().assert_is_compatible_with(grad_prev.get_shape())
grad_cur = tf.reshape(grad_cur, [-1])
grad_prev = tf.reshape(grad_prev, [-1])
assert (activation.get_shape().as_list()[0] ==
grad_cur.get_shape().as_list()[0])
barrier_max = tf.constant(barrier, dtype=tf.float32)
barrier_min = -barrier_max
grad_change_raw = tf.maximum(
barrier_min,
tf.minimum(
barrier_max,
tf.multiply(grad_cur, grad_prev)))
grad_change_raw_tiled = tf.tile(
tf.expand_dims(grad_change_raw, axis=1),
[1, activation.get_shape().as_list()[1]])
# everything is multiplied by its activation
grad_change_activated = tf.reduce_mean(
tf.multiply(activation, grad_change_raw_tiled), axis=0)
# previously we basically always set it to 1 and -1 for the
# numerical precision of a Gaussian.
# grad_change = _ops.safe_div(grad_change_activated,
# activation_sum)
# We have to divide by the number of models which can be active.
# actually this is dividing by the size of the gradient
# - which makes maore sense
# grad_change = tf.div(grad_change_activated,
# activation.get_shape().as_list()[0])
return grad_change_activated
def gradient_sign(grad_cur,
grad_prev,
activation,
barrier=1,
name_or_scope=None,
is_debug=False):
"""Returns the scaled gradients to update the memory.
grad_sign = sign(grad_cur * grad_prev)
Args:
grad_cur: A tensor of arbitrary shape.
grad_prev: A tensor of arbitrary shape, same as grad_cur
activation: centers * size(grad_cur)
Returns:
A tensor of size(grad_cur).
"""
with tf.variable_scope(name_or_scope,
default_name='gradient_sign_ref',
values=[grad_cur, grad_prev, activation]):
grad_cur.get_shape().assert_is_compatible_with(grad_prev.get_shape())
grad_cur = tf.reshape(grad_cur, [-1])
grad_prev = tf.reshape(grad_prev, [-1])
assert (activation.get_shape().as_list()[0] ==
grad_cur.get_shape().as_list()[0])
grad_change_raw = tf.sign(tf.multiply(grad_cur, grad_prev))
grad_change_raw_tiled = tf.tile(
tf.expand_dims(grad_change_raw, axis=1),
[1, activation.get_shape().as_list()[1]])
# everything is multiplied by its activation
grad_change_activated = tf.reduce_sum(
tf.multiply(activation, grad_change_raw_tiled), axis=0)
activation_sum = tf.reduce_sum(activation, axis=0)
grad_change = _ops.safe_div(grad_change_activated,
activation_sum)
return grad_change
def activation_rbf_2d(data, center, sigma, active, name_or_scope=None,
is_debug=False):
"""Return an activation tensor for the active centers.
We return the rbf function for one dimensional data.
exp(-0.5 / sigma * (data - center[:active])**2)
Args:
data (2): A tensor concerning the current data value.
center (max_centers): A variable containing the center values.
sigma (1): A variable containing the sigma scaling.
active (0): A variable to indicate the number of active centers.
Returns:
A tensor of shape [active].
"""
with tf.variable_scope(name_or_scope,
default_name='activation_rbf_1d',
values=[data, center, sigma, active]):
data.get_shape().assert_has_rank(2)
center.get_shape().assert_has_rank(2)
sigma.get_shape().assert_has_rank(1)
active.get_shape().assert_has_rank(0)
data_tiled = tf.tile(data, [active, 1])
center_sliced = tf.slice(center,
begin=[0, 0], size=[active, -1])
sigma_sliced = tf.slice(sigma, begin=[0], size=[active])
sigma_scaled = tf.divide(tf.constant(-0.5), sigma_sliced)
center_diff = tf.subtract(data_tiled, center_sliced)
center_diff_square = tf.reduce_sum(tf.square(center_diff), axis=1)
def _do_print():
sigma_ = sigma_scaled
sigma_ = _ops.print_debug(sigma_,
[active],
"mth_rbf",
is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [sigma_[active - 1]],
"rbf_sigma", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [data_tiled[active - 1]],
"rbf_x", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [center_sliced[active - 1]],
"rbf_c", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [center_diff[active - 1]],
"rbf_x_minus_c", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_,
[center_diff_square[active - 1]],
"rbf_centerdiff", is_debug=is_debug)
return sigma_
def _dont_print():
return sigma_scaled
sigma_scaled = tf.cond(tf.equal(active, 0), _dont_print, _do_print)
return tf.exp(tf.multiply(sigma_scaled, center_diff_square))
def multi_activation_rbf(data,
center,
sigma,
name_or_scope=None,
is_debug=False):
"""Return an activation tensor for the active centers.
We return the rbf function for one dimensional data.
exp(-0.5 / sigma * (data - center[:active])**2)
Args:
data (1): A tensor concerning the current data value.
center (max_centers): A variable containing the center values.
sigma: A scalar constant containing for the sigma scaling.
Returns:
A tensor of shape [active].
"""
with tf.variable_scope(name_or_scope,
default_name='multi_activation_rbf',
values=[data, center, ]):
data.get_shape().assert_has_rank(1)
center.get_shape().assert_has_rank(1)
data_tiled = tf.tile(tf.expand_dims(data, axis=1),
[1, center.get_shape().as_list()[0]],
name='tile_data_to_center')
center_diff = tf.subtract(data_tiled, center, name='d_minus_c')
center_diff_square = tf.square(center_diff, name='d_minus_c_squared')
sigma_scaled = tf.divide(tf.constant(-0.5), sigma)
return tf.exp(tf.multiply(
sigma_scaled, center_diff_square, name='sigma_times_mu'))
def activation_rbf_1d(data,
center,
sigma,
active,
name_or_scope=None,
is_debug=False):
"""Return an activation tensor for the active centers.
We return the rbf function for one dimensional data.
exp(-0.5 / sigma * (data - center[:active])**2)
Args:
data (1): A tensor concerning the current data value.
center (max_centers): A variable containing the center values.
sigma (1): A variable containing the sigma scaling.
active (0): A variable to indicate the number of active centers.
Returns:
A tensor of shape [active].
"""
with tf.variable_scope(name_or_scope,
default_name='activation_rbf_1d',
values=[data, center, sigma, active]):
data.get_shape().assert_has_rank(1)
center.get_shape().assert_has_rank(1)
sigma.get_shape().assert_has_rank(1)
active.get_shape().assert_has_rank(0)
data_tiled = tf.tile(data, [active])
center_sliced = tf.slice(center,
begin=[0], size=[active])
sigma_sliced = tf.slice(sigma, begin=[0], size=[active])
sigma_scaled = tf.divide(tf.constant(-0.5), sigma_sliced)
center_diff = tf.subtract(data_tiled, center_sliced)
center_diff_square = tf.square(center_diff)
def _do_print():
sigma_ = sigma_scaled
sigma_ = _ops.print_debug(sigma_,
[active],
"mth_rbf",
is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [sigma_[active - 1]],
"rbf_sigma", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [data_tiled[active - 1]],
"rbf_x", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [center_sliced[active - 1]],
"rbf_c", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_, [center_diff[active - 1]],
"rbf_x_minus_c", is_debug=is_debug)
sigma_ = _ops.print_debug(sigma_,
[center_diff_square[active - 1]],
"rbf_centerdiff", is_debug=is_debug)
return sigma_
def _dont_print():
return sigma_scaled
sigma_scaled = tf.cond(tf.equal(active, 0), _dont_print, _do_print)
return tf.exp(tf.multiply(sigma_scaled, center_diff_square))
def activation_update_freq(activation,
active,
threshold,
activation_used,
activation_count,
name_or_scope=None):
"""Update the variables _used and _count with the latest activations.
If an activation is above a threshold we increment the
activation_used count and always the activation_cout.
We only do this for the parts of the actiations which are already
active meaning we have set the centers.
Args:
activation (active): A tensor with the activations.
active (0): A variable with the number of active centers.
threshold (0): A tensor for >= threshold.
activation_used (max_centers): A variable containing the used count.
activation_count (max_centers): A variable continanit the total count.
Returns:
The assigned activation_used and activation_count.
"""
with tf.variable_scope(name_or_scope,
default_name='activation_update_active',
values=[activation,
active,
threshold,
activation_used,
activation_count]):
activation.get_shape().assert_has_rank(1)
active.get_shape().assert_has_rank(0)
activation_used.get_shape().assert_has_rank(1)
activation_count.get_shape().assert_has_rank(1)
# We use the fact that true = 1.0 and false 0.0
update_used_sliced = tf.to_int32(
tf.greater_equal(activation, threshold))
elements_unused = tf.shape(activation_used)[0] - active
update_used = tf.concat(
0, [update_used_sliced, tf.zeros([elements_unused],
dtype=tf.int32)])
activation_used = activation_used.assign_add(update_used)
update_count = tf.concat(
0, [tf.ones([active], dtype=tf.int32),
tf.zeros([elements_unused], dtype=tf.int32)])
activation_count = activation_count.assign_add(update_count)
return activation_used, activation_count
def multi_compute_eta(activation,
value,
name_or_scope=None,
is_debug=False):
"""Return the new scaling for the learning rate.
Args:
activation (active): A tensor with the activations.
value (max_centers): A variable containing the value for the center.
active (0): A variable with the number of active centers.
Returns:
A tensor of shape [active, 1].
"""
with tf.variable_scope(name_or_scope,
default_name='compute_eta',
values=[activation, value]):
assert (activation.get_shape().as_list()[1] ==
value.get_shape().as_list()[0])
activation_total = tf.reduce_sum(activation, axis=1)
value_tiled = tf.tile(
tf.expand_dims(value, axis=0),
[activation.get_shape().as_list()[0], 1])
activated_value = tf.reduce_sum(
tf.multiply(value_tiled, activation), axis=1)
return _ops.safe_div(activated_value, activation_total)
def compute_eta(learning_rate,
activation,
value,
active,
name_or_scope=None,
is_debug=False):
"""Return the new scaling for the learning rate.
Args:
activation (active): A tensor with the activations.
value (max_centers): A variable containing the value for the center.
active (0): A variable with the number of active centers.
Returns:
A tensor of shape [active, 1].
"""
with tf.variable_scope(name_or_scope,
default_name='compute_eta',
values=[activation, value, active]):
activation.get_shape().assert_has_rank(1)
active.get_shape().assert_has_rank(0)
def _empty():
return tf.constant(learning_rate)
def _eta():
active_ = _ops.print_debug(
active, [active], 'compute_eta_active', is_debug=is_debug)
value_sliced = tf.slice(value, begin=[0], size=[active_])
activation_ = _ops.print_debug(
activation,
[activation[active_ - 1], value_sliced],
'compute_eta_activation_value',
is_debug=is_debug)
eta = tf.multiply(value_sliced, activation_)
activation_total = tf.reduce_sum(activation)
return tf.cond(
tf.equal(activation_total, 0),
_empty,
lambda: _ops.safe_div(tf.reduce_sum(eta), activation_total))
return tf.cond(tf.equal(active, 0), _empty, _eta)
def update_memory(activation,
activation_used,
activation_count,
grad_cur,
grad_prev,
grad_data,
value,
center,
sigma,
active,
threshold_activation,
update_beta,
sigma_init,
name_or_scope=None,
is_debug=False):
"""Update the memory by either creating a new center or updating a one.
"""
with tf.variable_scope(name_or_scope,
default_name='update_memory',
values=[activation, grad_cur, grad_prev, value,
center, sigma, active, threshold_activation,
update_beta, sigma_init]):
activation.get_shape().assert_has_rank(1)
activation_used.get_shape().assert_has_rank(1)
activation_count.get_shape().assert_has_rank(1)
grad_prev = _ops.print_debug(grad_prev,
[update_beta, grad_cur, grad_prev],
"update_memory_beta_grad_cur_grad_prev",
is_debug=is_debug)
tf.summary.scalar('update_memory_active', active)
# tf.summary.scalar('update_memory_grad_cur', grad_cur)
# Franzi: apparently the gradients have rank 0
# grad_cur.get_shape().assert_has_rank(0)
# grad_prev.get_shape().assert_has_rank(0)
# center.get_shape().assert_has_rank(1)
sigma.get_shape().assert_has_rank(1)
active.get_shape().assert_has_rank(0)
barrier_max = tf.constant(1, dtype=tf.float32)
barrier_min = -barrier_max
grad_dotproduct = tf.reduce_sum(tf.multiply(grad_prev, grad_cur))
grad_data_shape = grad_data.get_shape().as_list()
sign_f = tf.minimum(barrier_max, tf.maximum(
barrier_min, grad_dotproduct))
sign_f = _ops.print_debug(
sign_f, [sign_f, grad_dotproduct], 'sign_f, grad_dotproduct',
is_debug=is_debug)
tf.summary.scalar('sign_f', sign_f)
def _add_center_empty():
with tf.variable_scope('_add_center_free'):
index = active
active_update = tf.add(active, tf.constant(1, dtype=tf.int32))
activation_used_update = tf.constant([1], dtype=tf.int32)
activation_count_update = tf.constant([1], dtype=tf.int32)
# we should use the value of the closest neighbor
value_update = tf.reshape(update_beta, [1])
center_update = grad_data
sigma_update = tf.reshape(sigma_init, [1])
return (
index,
active_update,
activation_used_update,
activation_count_update,
value_update,
center_update,
sigma_update)
def _add_center_free():
with tf.variable_scope('_add_center_free'):
index = active
# pick the index with largest activation
index_closest = tf.to_int32(tf.argmax(activation, axis=0))
active_update = | |
<filename>envi/archs/arm/disasm.py
import sys
import struct
import traceback
import envi
import envi.bits as e_bits
from envi.archs.arm.const import *
from envi.archs.arm.regs import *
# FIXME: codeflow needs to identify the following pattern as a call with fallthrough
# (currently identifying the xref and making the fallthrough into a function):
# mov lr, pc
# sub pc, <blah>
# Possible future extensions:
# * VectorPointFloat subsystem (coproc 10+11)
# * Debug subsystem (coproc 14)
# * other 'default' coprocs we can handle and add value?
# FIXME this seems to be universal...
def addrToName(mcanv, va):
sym = mcanv.syms.getSymByAddr(va)
if sym is not None:
return repr(sym)
return "0x%.8x" % va
# The keys in this table are made of the
# concat of bits 27-21 and 7-4 (only when
# ienc == mul!
iencmul_codes = {
# Basic multiplication opcodes
0b000000001001: ("mul", INS_MUL, (0, 4, 2), 0),
0b000000011001: ("mul", INS_MUL, (0, 4, 2), IF_PSR_S),
0b000000101001: ("mla", INS_MLA, (0, 4, 2, 1), 0),
0b000000111001: ("mla", INS_MLA, (0, 4, 2, 1), IF_PSR_S),
0b000001101001: ("mls", INS_MLS, (0, 4, 2, 1), 0),
0b000001001001: ("umaal", INS_UMAAL,(1, 0, 4, 2), 0),
0b000010001001: ("umull", INS_UMULL, (1, 0, 4, 2), 0),
0b000010011001: ("umull", INS_UMULL, (1, 0, 4, 2), IF_PSR_S),
0b000010101001: ("umlal", INS_UMLAL, (1, 0, 4, 2), 0),
0b000010111001: ("umlal", INS_UMLAL, (1, 0, 4, 2), IF_PSR_S),
0b000011001001: ("smull", INS_SMULL, (1, 0, 4, 2), 0),
0b000011011001: ("smull", INS_SMULL, (1, 0, 4, 2), IF_PSR_S),
0b000011101001: ("smlal", INS_SMLAL, (1, 0, 4, 2), 0),
0b000011111001: ("smlal", INS_SMLAL, (1, 0, 4, 2), IF_PSR_S),
# multiplys with <x><y>
# "B
0b000100001000: ("smlabb", INS_SMLABB, (0, 4, 2, 1), 0),
0b000100001010: ("smlatb", INS_SMLATB, (0, 4, 2, 1), 0),
0b000100001100: ("smlabt", INS_SMLABT, (0, 4, 2, 1), 0),
0b000100001110: ("smlatt", INS_SMLATT, (0, 4, 2, 1), 0),
0b000100101010: ("smulwb", INS_SMULWB, (0, 4, 2), 0),
0b000100101110: ("smulwt", INS_SMULWT, (0, 4, 2), 0),
0b000100101000: ("smlawb", INS_SMLAWB, (0, 4, 2), 0),
0b000100101100: ("smlawt", INS_SMLAWT, (0, 4, 2), 0),
0b000101001000: ("smlalbb",INS_SMLALBB, (1, 0, 4, 2), 0),
0b000101001010: ("smlaltb",INS_SMLALTB, (1, 0, 4, 2), 0),
0b000101001100: ("smlalbt",INS_SMLALBT, (1, 0, 4, 2), 0),
0b000101001110: ("smlaltt",INS_SMLALTT, (1, 0, 4, 2), 0),
0b000101101000: ("smulbb", INS_SMULBB, (0, 4, 2), 0),
0b000101101010: ("smultb", INS_SMULTB, (0, 4, 2), 0),
0b000101101100: ("smulbt", INS_SMULBT, (0, 4, 2), 0),
0b000101101110: ("smultt", INS_SMULTT, (0, 4, 2), 0),
# type 2 multiplys
0b011100000001: ("smuad", INS_SMUAD, (0, 4, 2), 0),
0b011100000011: ("smuadx", INS_SMUADX, (0, 4, 2), 0),
0b011100000101: ("smusd", INS_SMUSD, (0, 4, 2), 0),
0b011100000111: ("smusdx", INS_SMUSDX, (0, 4, 2), 0),
0b011100000001: ("smlad", INS_SMLAD, (0, 4, 2, 1), 0),
0b011100000011: ("smladx", INS_SMLADX, (0, 4, 2, 1), 0),
0b011100000101: ("smlsd", INS_SMLSD, (0, 4, 2, 1), 0),
0b011100000111: ("smlsdx", INS_SMLSDX, (0, 4, 2, 1), 0),
0b011101000001: ("smlald", INS_SMLALD, (1, 0, 4, 2), 0),
0b011101000011: ("smlaldx",INS_SMLALDX, (1, 0, 4, 2), 0),
0b011101000101: ("smlsld", INS_SMLSLD, (1, 0, 4, 2), 0),
0b011101000111: ("smlsldx",INS_SMLSLDX, (1, 0, 4, 2), 0),
0b011101010001: ("smmla", INS_SMMLA, (0, 4, 2, 1), 0),
0b011101010011: ("smmlar", INS_SMMLAR, (0, 4, 2, 1), 0),
0b011101011101: ("smmls", INS_SMMLS, (0, 4, 2, 1), 0),
0b011101011111: ("smmlsr", INS_SMMLSR, (0, 4, 2, 1), 0),
#note for next two must check that Ra = 1111 otherwise is smmla
#hard coding values until find better solution
#0b011101010001: ("smmul", (0,4,2), 0),
#0b011101010011: ("smmulr", (0,4,2), 0),
}
def sh_lsl(num, shval, size=4, emu=None):
return (num&e_bits.u_maxes[size]) << shval
def sh_lsr(num, shval, size=4, emu=None):
return (num&e_bits.u_maxes[size]) >> shval
def sh_asr(num, shval, size=4, emu=None):
return num >> shval
def sh_ror(num, shval, size=4, emu=None):
return ((num >> shval) | (num << ((8*size)-shval))) & e_bits.u_maxes[size]
def sh_rrx(num, shval, size=4, emu=None):
# shval should always be 0
newC = num & 1
if emu is not None:
oldC = emu.getFlag(PSR_C_bit)
if emu.getMeta('forrealz', False):
emu.setFlag(PSR_C_bit, newC)
else:
# total hack! should we just bomb here without an emu?
oldC = 0
half1 = (num&e_bits.u_maxes[size]) >> 1
half2 = oldC<<(31)
retval = (half1 | half2 | (oldC << (32-shval))) & e_bits.u_maxes[size]
return retval
shifters = (
sh_lsl,
sh_lsr,
sh_asr,
sh_ror,
sh_rrx,
)
####################################################################
# Mnemonic tables for opcode based mnemonic lookup
# Dataprocessing mnemonics
dp_mnem = (
("and", INS_AND),
("eor", INS_EOR),
("sub", INS_SUB),
("rsb", INS_RSB),
("add", INS_ADD),
("adc", INS_ADC),
("sbc", INS_SBC),
("rsc", INS_RSC),
("tst", INS_TST),
("teq", INS_TEQ),
("cmp", INS_CMP),
("cmn", INS_CMN),
("orr", INS_ORR),
("mov", INS_MOV),
("bic", INS_BIC),
("mvn", INS_MVN),
("adr", INS_ADR) # added
)
dp_shift_mnem = (
("lsl", INS_LSL),
("lsr", INS_LSR),
("asr", INS_ASR),
("ror", INS_ROR),
("rrx", INS_RRX),
)
dp_noRn = (INS_MOV,INS_MVN)
dp_noRd = (INS_TST,INS_TEQ,INS_CMP,INS_CMN)
dp_silS = dp_noRd
# IF_PSR_S_SIL is silent s for tst, teq, cmp cmn
DP_PSR_S = [IF_PSR_S for x in range(17)]
for x in dp_silS:
DP_PSR_S[x] |= IF_PSR_S_SIL
# somehow this list has vanished into the ether. add seems like the right one here.
dp_ADR = (INS_SUB, INS_ADD,)
# FIXME: !!! Don't make SBZ and SBO's part of the list of opers !!!
# first parm SBZ: mov,mvn
# second parm SBZ: tst,teq,cmp,cmn,
def dpbase(opval):
"""
Parse and return opcode,sflag,Rn,Rd for a standard
dataprocessing instruction.
"""
ocode = (opval >> 21) & 0xf
sflag = (opval >> 20) & 0x1
Rn = (opval >> 16) & 0xf
Rd = (opval >> 12) & 0xf
#print "DPBASE:",ocode,sflag,Rn,Rd
return ocode,sflag,Rn,Rd
####################################################################
# Parser functions for each of the instruction encodings
def p_dp_imm_shift(opval, va):
ocode,sflag,Rn,Rd = dpbase(opval)
Rm = opval & 0xf
shtype = (opval >> 5) & 0x3
shval = (opval >> 7) & 0x1f # effectively, rot*2
if (shtype==S_ROR) & (shval ==0): # is it an rrx?
shtype = S_RRX
mnem, opcode = dp_mnem[ocode]
iflags = 0
if ocode in dp_noRn:
#is it a mov? Only if shval is a 0, type is lsl, and ocode = 13
if (ocode == INS_MOV) and ((shval != 0) or (shtype != S_LSL)):
mnem, opcode = dp_shift_mnem[shtype]
if shtype != S_RRX: #if not rrx
if shtype in (S_ASR, S_LSR) and shval == 0:
shval = 32
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
ArmImmOper(shval, va=va),
)
else:
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rm, va=va),
)
else:
olist = (
ArmRegOper(Rd, va=va),
ArmRegShiftImmOper(Rm, shtype, shval, va),
)
# case: mov pc, lr
if Rd == REG_PC:
if Rm == REG_LR:
iflags |= envi.IF_RET | envi.IF_NOFALL
else:
iflags |= envi.IF_BRANCH
elif ocode in dp_noRd:
olist = (
ArmRegOper(Rn, va=va),
ArmRegShiftImmOper(Rm, shtype, shval, va),
)
else:
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegShiftImmOper(Rm, shtype, shval, va),
)
if sflag > 0:
iflags |= DP_PSR_S[ocode]
return (opcode, mnem, olist, iflags, 0)
# specialized mnemonics for p_misc
qop_mnem = (('qadd', INS_QADD),('qsub', INS_QSUB),('qdadd', INS_QDADD),('qdsub', INS_QDSUB)) # used in misc1
smla_mnem = (('smlabb', INS_SMLABB),('smlatb', INS_SMLATB),('smlabt', INS_SMLATB),('smlatt', INS_SMLATB),)
smlal_mnem = (('smlalbb', INS_SMLALBB),('smlaltb', INS_SMLALTB),('smlalbt', INS_SMLALTB),('smlaltt', INS_SMLALTB),)
smul_mnem = (('smulbb', INS_SMULBB),('smultb', INS_SMULTB),('smulbt', INS_SMULTB),('smultt', INS_SMULTB),)
smlaw_mnem = (('smlawb', INS_SMLAWB),('smlawt', INS_SMLAWT),)
smulw_mnem = (('smulwb', INS_SMULWB),('smulwt', INS_SMULWT),)
def p_misc(opval, va):
# 0x0f900000 = 0x01000000 or 0x01000010 (misc and misc1 are both parsed at the same time. see the footnote [2] on dp instructions in the Atmel AT91SAM7 docs
#Including SBO and SBZ - rearranged for most exclusive to least
#updated reference names to match v7 reference ie Rm Rn Rd Rs m n etc
#if opval & 0x0ff000f0 == 0x01200020:
if opval & 0x0FFFFFF0 == 0x012FFF20:
opcode = INS_BXJ
mnem = 'bxj'
Rm = opval & 0xf
olist = ( ArmRegOper(Rm, va=va), )
#elif opval & 0x0fb002f0 == 0x01200000:
elif opval & 0x0DB0F000 == 0x0120F000:
opcode = INS_MSR
mnem = 'msr' # register. immediate has it's own parser in the 001 section
r = (opval>>22) & 1
Rn = (opval) & 0xf
mask = (opval>>16) & 0xf
olist = (
ArmPgmStatRegOper(r, mask),
ArmRegOper(Rn, va=va),
)
#smla
#Mask and value are OK
elif opval & 0x0FF00090 == 0x01000080:
mn = (opval>>5)&3
mnem, opcode = smla_mnem[mn]
Rd = (opval>>16) & 0xf
Ra = (opval>>12) & 0xf
Rm = (opval>>8) & 0xf
Rn = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Ra, va=va),
)
#smlaw
#mask and value are OK
elif opval & 0x0ff000b0 == 0x01200080:
m = (opval>>6)&1
mnem, opcode = smlaw_mnem[m]
Rd = (opval>>16) & 0xf
Ra = (opval>>12) & 0xf
Rm = (opval>>8) & 0xf
Rn = opval & 0xf
olist = (
ArmRegOper(Rd, va=va),
ArmRegOper(Rn, va=va),
ArmRegOper(Rm, va=va),
ArmRegOper(Ra, va=va),
)
#smulw
#mask and value are ok
elif opval & 0x0ff000b0 | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This module provides scrapers for a variety of feeds and web pages. """
import os
import re
import time
import json
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from helpers import kill_firefox, fix_double_quotes
### Bs4 based scrapers ###
class WikiPerson:
"""Information about a person entity gleaned from Wikipedia """
def __init__(self, name_or_url):
if re.search(r"^http", name_or_url):
self.url = name_or_url
self.name = re.sub(r"_", " ", self.url.split(r"\/")[-1])
else:
self.name = name_or_url
self.url = "https://wikipedia.org/wiki/{}".format(
re.sub(r"\s+", "_", name_or_url)
)
request = requests.get(self.url)
self.found = False
self.canonical_name = None
self.bio = None
if request.status_code == 200:
soup = BeautifulSoup(request.text, "html.parser")
self.canonical_name = soup.find("h1").text
for element in soup.findAll("p"):
bold = [b.text for b in element.findAll("b")]
if bold:
self.found = True
self.bio = element
if self.canonical_name not in bold:
self.canonical_name = bold[0]
break
del soup
@property
def full_name(self):
"""Person's full name """
if self.found:
return self.bio.find("b").text
return None
@property
def gender(self):
"""Person's gender (if discoverable) """
if re.search(r"\s[Ss]he\s|\s[Hh]er\s", self.bio.text):
return "Female"
if re.search(r"\s+[Hh]e\s|\s[Hh]is\s", self.bio.text):
return "Male"
return "Unspecified"
def __repr__(self):
return "<WikiPerson {}>".format(self.full_name)
class WikiOrg:
"""Information about an orgaization entity gleaned from Wikipedia """
# pylint: disable=too-many-instance-attributes
# Eight is reasonable in this case.
# pylint: disable=too-few-public-methods
# Wiki lookups are like french eggs: one is 'un oeuf'
def __init__(self, name_or_url):
"""Scrape available org info from wikipedia """
self.determiner = False
if re.search(r"^http", name_or_url):
self.url = name_or_url
self.name = re.sub(r"_", " ", self.url.split(r"\/")[-1])
else:
if re.search(r"^the\s+", name_or_url, flags=re.IGNORECASE):
self.determiner = True
self.name = re.sub(r"^the\s+", "", name_or_url, flags=re.IGNORECASE)
self.url = "https://wikipedia.org/wiki/{}".format(
re.sub(r"\s+", "_", self.name)
)
self.canonical_name = None
self.abbr = None
self.found = False
self.description = None
request = requests.get(self.url)
if request.status_code == 200:
soup = BeautifulSoup(request.text, "html.parser")
self.canonical_name = soup.find("h1").text
for element in soup.findAll("p"):
self.bold = [b.text for b in element.findAll("b")]
if self.canonical_name and self.canonical_name in self.bold:
self.found = True
self.description = element
try:
if re.search(r"^[A-Z\.]+", self.bold[1]):
self.abbr = self.bold[1]
except IndexError:
pass
break
del soup
def __repr__(self):
return "<WikiOrg {}>".format(self.canonical_name)
class WikiGPE:
"""Information about an geopolitical entity gleaned from Wikipedia """
# pylint: disable=too-few-public-methods
# Wiki lookups are like french eggs: one is 'un oeuf'
def __init__(self, name_or_url):
"""Scrape available geo-political entity info fromm wikipedia """
self.determiner = False
if re.search(r"^http", name_or_url):
self.url = name_or_url
self.name = re.sub(r"_", " ", self.url.split(r"\/")[-1])
else:
if re.search(r"^the\s+", name_or_url, flags=re.IGNORECASE):
self.determiner = True
self.name = re.sub(r"^the\s+", "", name_or_url, flags=re.IGNORECASE)
self.url = "https://wikipedia.org/wiki/{}".format(
re.sub(r"\s+", "_", self.name)
)
self.canonical_name = None
self.abbr = None
self.found = False
self.description = None
request = requests.get(self.url)
if request.status_code == 200:
soup = BeautifulSoup(request.text, "html.parser")
self.canonical_name = soup.find("h1").text
for element in enumerate(soup.findAll("p")):
bold = [b.text for b in element.findAll("b")]
if self.canonical_name and self.canonical_name in bold:
self.found = True
self.description = element
try:
if re.search(r"^[A-Z\.]+", bold[1]):
self.abbr = bold[1]
except IndexError:
pass
break
del soup
def __repr__(self):
return "<WikiGPE {}>".format(self.canonical_name)
class APArticle:
""" AP Article contents fetched and scraped from the specified url."""
def __init__(self, url):
"""Fetch and scrape news article
ARGS:
url (required)
"""
self.url = url
self._title = None
self._byline = None
self._timestamp = None
self._content = None
request = requests.get(url)
if request.status_code == 200:
print("Article page loaded from {}".format(self.url))
by_pat = re.compile(r"bylines")
time_pat = re.compile(r"timestamp", flags=re.IGNORECASE)
story_pat = re.compile(
r"^.*?storyHTML\"\:\"\\+u003cp>(.*)\}?", flags=re.MULTILINE
)
soup = BeautifulSoup(request.text, "html.parser")
self._title = soup.find("title").text
for span in (s for s in soup.find_all("span") if "class" in s.attrs):
for class_name in span.attrs["class"]:
if by_pat.search(class_name):
self._byline = span.text
if time_pat.search(class_name):
self._timestamp = span.attrs["data-source"]
print("Title: {}".format(self._title))
print("Byline: {}".format(self._byline))
story_html = re.sub(r"\\+u003c", "<", story_pat.search(request.text)[1])
story_html = re.sub(r"\\+", "", story_html)
soup = BeautifulSoup(story_html, "html.parser")
paragraphs = [fix_double_quotes(p.text) for p in soup.find_all("p")]
end = sorted([p for p in paragraphs if re.match(r"^_+$", p)], key=len)[0]
self._content = {
"html": story_html,
"text": "\n".join(paragraphs[: paragraphs.index(end)]),
}
@property
def title(self):
"""Article title (headline) """
return self._title
@property
def byline(self):
"""Article byline """
return self._byline
@property
def timestamp(self):
"""Artice timestamp """
return self._timestamp
@property
def content(self):
"""Dict containing the article's text and html """
return self._content
def __repr__(self):
return "<APArticle object: title={}, timestamp={}, url={}>".format(
self.title, self.timestamp, self.url
)
### Selenium based scrapers ###
class HeavyScraper:
"""A resource intensive, selemium-based Soup-Nazi countermeasure
(Base class for scrapers requiring gekodriver instead of Beautiful Soup)
"""
# pylint: disable=too-few-public-methods
# These scrapers are meant to be instantiated once and discarded
def __init__(self, url=None):
"""ARGS: url ; DEFAULT: None """
self.url = url
options = Options()
options.headless = True
options.add_argument("--window-size=1920,1200")
options.add_argument("--incognito")
self.driver = webdriver.Firefox(options=options)
self.driver.implicitly_wait(3)
def __repr__(self):
return "<HeavyScraper object: url={}>".format(self.url)
class Trends(HeavyScraper):
"""Top Google Search terms scraped from Google Trends"""
url = "https://trends.google.com/trends/trendingsearches/daily?geo=US"
def __init__(self):
""" Fetch search terms and immediately close the marionette driver"""
super().__init__(self.url)
self.driver.get(self.url)
self._trends = [
(
topic.text.split("\n")[1],
topic.text.split("\n")[2],
topic.text.split("\n")[6],
)
for topic in self.driver.find_elements_by_class_name("feed-item")
]
self.driver.close()
self.driver.quit()
kill_firefox()
del self.driver
@property
def trends(self):
"""List of tuples containing data scraped fro feet-items """
return self._trends
@property
def ngrams(self):
"""Trending colocations fro google searches """
return [n[0] for n in self._trends]
def __repr__(self):
return "<Trends object: url={}>".format(self.url)
class APHeadlines(HeavyScraper):
""" Scrape AP News Topics and optionally retrieve headlines by topic """
# pylint: disable=too-few-public-methods
# These scrapers are meant to be instantiated once and discarded
topic_list = []
url = "https://apnews.com/"
def __init__(self, topic_id=0):
"""Fetch topics and immediatly close the marionette driver.
If the topic_id arg is supplied, headlines filed under that
topic are also retrieved before closing the marionette driver.
"""
super().__init__(self.url)
self.driver.get(self.url)
self.headlines = []
self.ap_nav = self.driver.find_elements_by_class_name("nav-action")
print("Got AP Nav")
time.sleep(3)
self.ap_nav[1].click()
time.sleep(3)
self.topic_nav = self.driver.find_element_by_class_name(
"TopicsDropdown"
).find_elements_by_tag_name("li")
# create_topic_list
for index, element in enumerate(self.topic_nav):
if index > 0:
self.topic_list.append((index, element.text))
if topic_id > 0:
topic = self.topic_nav[topic_id]
time.sleep(3)
if not topic.find_element_by_tag_name("a").is_displayed():
self.ap_nav[1].click()
time.sleep(1)
print(
"Navigating to {}".format(
topic.find_element_by_tag_name("a").get_attribute("href")
)
)
topic.find_element_by_tag_name("a").click()
time.sleep(3)
self.url = self.driver.current_url
print("{} is loaded; retrieving headlines ...".format(self.url))
stories = self.driver.find_elements_by_class_name("FeedCard")
for story in stories:
try:
# pylint: disable=broad-except
# These are triggered by ads and countermeasures
# no need to handle; note them and move on
if story.location_once_scrolled_into_view:
txt = story.text
href = story.find_element_by_tag_name("a").get_attribute("href")
self.headlines.append((self.driver.title, href, txt))
except Exception as err:
print(f"Failed to load headline:\n{err}")
self.driver.close()
self.driver.quit()
kill_firefox()
def __repr__(self):
return "<APHeadlines object: url={}>".format(self.url)
class Aggregator:
""" Collect News Headlines and Stories """
def __init__(self):
""" Delcare private vars and retrieve the topic list """
self._topics = []
self._headlines = []
self._stories = []
if os.path.isfile("topics.json"):
self.restore_ap_topics()
else:
self.refresh_ap_topics()
def refresh_ap_topics(self):
""" Collects the list of AP News topics and caches it """
headlines = APHeadlines()
self._topics = headlines.topic_list
self.cache_ap_topics()
def cache_ap_topics(self):
""" Dumps self._.topics too json file """
with open("topics.json", "w+") as outfile:
json.dump(self._topics, outfile)
def restore_ap_topics(self):
""" Reads previously cached topics back into self._topics """
with open("topics.json", "r") as infile:
self._topics = json.load(infile)
def collect_ap_headlines(self):
""" Collects AP Headlines by topic in self._hadlines.
Retruns self._headlines
"""
self._headlines = []
for topic in self._topics:
try:
# pylint: disable=broad-except
# These are triggered by ads and countermeasures
# no need to handle; note them and move on
top = APHeadlines(topic[0])
self._headlines.extend(top.headlines)
except Exception as ex:
print(ex)
kill_firefox()
time.sleep(3)
continue
self.cache_headlines()
return self._headlines
def cache_headlines(self):
""" Dumps self._headlines to json file """
if os.path.exists("headlines.json"):
os.rename("headlines.json", "headlines.bak")
with open("headlines.json", "w+") as outfile:
json.dump(self._headlines, outfile)
def restore_headlines(self):
""" Reads previously cached headlines back into self._headlines """
try:
with open("headlines.json", "r") as infile:
self._headlines = json.load(infile)
except IOError as err:
print("Can't read from 'headlines.json': {}".format(err))
def fetch_ap_article(self, url):
""" Fetches a new APArticle and appends its content to stories
ARGS: url
"""
if re.search(r"apnews", url):
try:
# pylint: disable=broad-except
# These are triggered by ads and countermeasures
# no need to handle; note them and move on
article = APArticle(url)
self._stories.append(article)
except Exception as ex:
kill_firefox()
time.sleep(3)
print("Unable to retrieve article", ex)
@property
def topics(self):
"""List of topics """
return self._topics
@property
def headlines(self):
"""List of headlines """
return self._headlines
@property
def stories(self):
"""List of stories """
return self._stories
def __repr__(self):
return "<Aggregator object - properties: {}>".format(
"'topics', 'headlines', 'stories'"
)
if __name__ == "__main__":
import unittest
# from tests import TestSeleniumScrapers
# from tests import | |
if 42 - 42: iII111i % i1IIi + Ii1I
if 74 - 74: O0 * I11i * OoOoOO00 / Ii1I / iIii1I11I1II1 * I1IiiI
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
i1I1i1Iiiiiii = { }
for Ooooo00 in lisp_db_list :
for iIII in Ooooo00 . rloc_set :
lisp_update_local_rloc ( iIII )
if ( iIII . rloc . is_null ( ) ) : continue
if ( iIII . interface == None ) : continue
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
IiiIIi1 = iIII . rloc . print_address_no_iid ( )
if ( IiiIIi1 in i1I1i1Iiiiiii ) : continue
i1I1i1Iiiiiii [ IiiIIi1 ] = iIII . interface
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
if ( i1I1i1Iiiiiii == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
return
if 42 - 42: OoOoOO00 . I1IiiI / OoOoOO00 / I1ii11iIi11i . OoO0O00
if 67 - 67: Ii1I - O0 . OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
if 40 - 40: I1ii11iIi11i
if 76 - 76: Oo0Ooo - I11i
for IiiIIi1 in i1I1i1Iiiiiii :
II1i = i1I1i1Iiiiiii [ IiiIIi1 ]
OO0o = red ( IiiIIi1 , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( OO0o ,
II1i ) )
OoO0o0OOOO = II1i if len ( i1I1i1Iiiiiii ) > 1 else None
for dest in I1i1Ii111 :
lisp_send_info_request ( lisp_sockets , dest , port , OoO0o0OOOO )
if 82 - 82: OoO0O00 % oO0o . I11i / O0 - I1Ii111
if 39 - 39: I1IiiI
if 8 - 8: IiII * i1IIi * i1IIi * O0
if 69 - 69: Oo0Ooo
if 48 - 48: iII111i
if 11 - 11: i11iIiiIii * OoOoOO00 . OoO0O00
if ( Oo0oOoOO0o != [ ] ) :
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
O0o00000o0O . resolve_dns_name ( )
if 47 - 47: Oo0Ooo % I1Ii111 + ooOoO0o
if 89 - 89: iII111i
return
if 29 - 29: I1ii11iIi11i . ooOoO0o * II111iiii / iII111i . OoooooooOO - OoOoOO00
if 99 - 99: IiII % O0 - I1Ii111 * OoO0O00
if 77 - 77: OoooooooOO - I11i / I1IiiI % OoOoOO00 - OOooOOo
if 37 - 37: ooOoO0o
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 82 - 82: iII111i - I1Ii111 - OoOoOO00
if 96 - 96: Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
if 44 - 44: O0
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
if 62 - 62: I1IiiI + II111iiii * iIii1I11I1II1 % iII111i + IiII / ooOoO0o
if 14 - 14: iIii1I11I1II1 * I1ii11iIi11i + OOooOOo + O0
if 79 - 79: II111iiii - iII111i
if ( value . find ( "." ) != - 1 ) :
IiiIIi1 = value . split ( "." )
if ( len ( IiiIIi1 ) != 4 ) : return ( False )
if 89 - 89: O0 - OoO0O00
for IIIII in IiiIIi1 :
if ( IIIII . isdigit ( ) == False ) : return ( False )
if ( int ( IIIII ) > 255 ) : return ( False )
if 17 - 17: Ii1I * i11iIiiIii - I1IiiI
return ( True )
if 27 - 27: IiII . iII111i * I1ii11iIi11i
if 49 - 49: oO0o % iII111i
if 42 - 42: iII111i
if 74 - 74: Oo0Ooo / Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if 17 - 17: OOooOOo
if ( value . find ( "-" ) != - 1 ) :
IiiIIi1 = value . split ( "-" )
for IiIIi1IiiIiI in [ "N" , "S" , "W" , "E" ] :
if ( IiIIi1IiiIiI in IiiIIi1 ) :
if ( len ( IiiIIi1 ) < 8 ) : return ( False )
return ( True )
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
if 46 - 46: II111iiii * OoO0O00
if 77 - 77: ooOoO0o * I11i
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
if 76 - 76: iII111i * OoooooooOO
if 49 - 49: II111iiii - OOooOOo + II111iiii + OoOoOO00
if 51 - 51: i11iIiiIii
if ( value . find ( "-" ) != - 1 ) :
IiiIIi1 = value . split ( "-" )
if ( len ( IiiIIi1 ) != 3 ) : return ( False )
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
for OO0O0oOO in IiiIIi1 :
try : int ( OO0O0oOO , 16 )
except : return ( False )
if 22 - 22: Oo0Ooo / OOooOOo - iIii1I11I1II1 / ooOoO0o
return ( True )
if 7 - 7: ooOoO0o . OoooooooOO . iII111i * II111iiii . II111iiii / OOooOOo
if 46 - 46: Ii1I - Oo0Ooo / i1IIi % IiII - I1ii11iIi11i + OOooOOo
if 42 - 42: i1IIi - IiII % OOooOOo % iIii1I11I1II1
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if ( value . find ( ":" ) != - 1 ) :
IiiIIi1 = value . split ( ":" )
if ( len ( IiiIIi1 ) < 2 ) : return ( False )
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
o0oo = False
OO = 0
for OO0O0oOO in IiiIIi1 :
OO += 1
if ( OO0O0oOO == "" ) :
if ( o0oo ) :
if ( len ( IiiIIi1 ) == OO ) : break
if ( OO > 2 ) : return ( False )
if 84 - 84: iIii1I11I1II1
o0oo = True
continue
if 44 - 44: O0
try : int ( OO0O0oOO , 16 )
except : return ( False )
if 67 - 67: Oo0Ooo % I11i / I1Ii111 . Oo0Ooo % II111iiii . I1ii11iIi11i
return ( True )
if 86 - 86: OoooooooOO
if 19 - 19: OOooOOo - OOooOOo / iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
if ( value [ 0 ] == "+" ) :
IiiIIi1 = value [ 1 : : ]
for oo0Oo in IiiIIi1 :
if ( oo0Oo . isdigit ( ) == False ) : return ( False )
if 70 - 70: OoO0O00 * II111iiii / I11i + I11i
return ( True )
if 23 - 23: I1IiiI
return ( False )
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + | |
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AttributeToRoleIdentityMapperArgs', 'AttributeToRoleIdentityMapper']
@pulumi.input_type
class AttributeToRoleIdentityMapperArgs:
def __init__(__self__, *,
identity_provider_alias: pulumi.Input[str],
realm: pulumi.Input[str],
role: pulumi.Input[str],
attribute_friendly_name: Optional[pulumi.Input[str]] = None,
attribute_name: Optional[pulumi.Input[str]] = None,
attribute_value: Optional[pulumi.Input[str]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AttributeToRoleIdentityMapper resource.
:param pulumi.Input[str] identity_provider_alias: IDP Alias
:param pulumi.Input[str] realm: Realm Name
:param pulumi.Input[str] role: Role Name
:param pulumi.Input[str] attribute_friendly_name: Attribute Friendly Name
:param pulumi.Input[str] attribute_name: Attribute Name
:param pulumi.Input[str] attribute_value: Attribute Value
:param pulumi.Input[str] claim_name: OIDC Claim Name
:param pulumi.Input[str] claim_value: OIDC Claim Value
:param pulumi.Input[str] name: IDP Mapper Name
"""
pulumi.set(__self__, "identity_provider_alias", identity_provider_alias)
pulumi.set(__self__, "realm", realm)
pulumi.set(__self__, "role", role)
if attribute_friendly_name is not None:
pulumi.set(__self__, "attribute_friendly_name", attribute_friendly_name)
if attribute_name is not None:
pulumi.set(__self__, "attribute_name", attribute_name)
if attribute_value is not None:
pulumi.set(__self__, "attribute_value", attribute_value)
if claim_name is not None:
pulumi.set(__self__, "claim_name", claim_name)
if claim_value is not None:
pulumi.set(__self__, "claim_value", claim_value)
if extra_config is not None:
pulumi.set(__self__, "extra_config", extra_config)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="identityProviderAlias")
def identity_provider_alias(self) -> pulumi.Input[str]:
"""
IDP Alias
"""
return pulumi.get(self, "identity_provider_alias")
@identity_provider_alias.setter
def identity_provider_alias(self, value: pulumi.Input[str]):
pulumi.set(self, "identity_provider_alias", value)
@property
@pulumi.getter
def realm(self) -> pulumi.Input[str]:
"""
Realm Name
"""
return pulumi.get(self, "realm")
@realm.setter
def realm(self, value: pulumi.Input[str]):
pulumi.set(self, "realm", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
Role Name
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="attributeFriendlyName")
def attribute_friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Attribute Friendly Name
"""
return pulumi.get(self, "attribute_friendly_name")
@attribute_friendly_name.setter
def attribute_friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_friendly_name", value)
@property
@pulumi.getter(name="attributeName")
def attribute_name(self) -> Optional[pulumi.Input[str]]:
"""
Attribute Name
"""
return pulumi.get(self, "attribute_name")
@attribute_name.setter
def attribute_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_name", value)
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) -> Optional[pulumi.Input[str]]:
"""
Attribute Value
"""
return pulumi.get(self, "attribute_value")
@attribute_value.setter
def attribute_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_value", value)
@property
@pulumi.getter(name="claimName")
def claim_name(self) -> Optional[pulumi.Input[str]]:
"""
OIDC Claim Name
"""
return pulumi.get(self, "claim_name")
@claim_name.setter
def claim_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_name", value)
@property
@pulumi.getter(name="claimValue")
def claim_value(self) -> Optional[pulumi.Input[str]]:
"""
OIDC Claim Value
"""
return pulumi.get(self, "claim_value")
@claim_value.setter
def claim_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_value", value)
@property
@pulumi.getter(name="extraConfig")
def extra_config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "extra_config")
@extra_config.setter
def extra_config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "extra_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
IDP Mapper Name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _AttributeToRoleIdentityMapperState:
def __init__(__self__, *,
attribute_friendly_name: Optional[pulumi.Input[str]] = None,
attribute_name: Optional[pulumi.Input[str]] = None,
attribute_value: Optional[pulumi.Input[str]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
identity_provider_alias: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
realm: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AttributeToRoleIdentityMapper resources.
:param pulumi.Input[str] attribute_friendly_name: Attribute Friendly Name
:param pulumi.Input[str] attribute_name: Attribute Name
:param pulumi.Input[str] attribute_value: Attribute Value
:param pulumi.Input[str] claim_name: OIDC Claim Name
:param pulumi.Input[str] claim_value: OIDC Claim Value
:param pulumi.Input[str] identity_provider_alias: IDP Alias
:param pulumi.Input[str] name: IDP Mapper Name
:param pulumi.Input[str] realm: Realm Name
:param pulumi.Input[str] role: Role Name
"""
if attribute_friendly_name is not None:
pulumi.set(__self__, "attribute_friendly_name", attribute_friendly_name)
if attribute_name is not None:
pulumi.set(__self__, "attribute_name", attribute_name)
if attribute_value is not None:
pulumi.set(__self__, "attribute_value", attribute_value)
if claim_name is not None:
pulumi.set(__self__, "claim_name", claim_name)
if claim_value is not None:
pulumi.set(__self__, "claim_value", claim_value)
if extra_config is not None:
pulumi.set(__self__, "extra_config", extra_config)
if identity_provider_alias is not None:
pulumi.set(__self__, "identity_provider_alias", identity_provider_alias)
if name is not None:
pulumi.set(__self__, "name", name)
if realm is not None:
pulumi.set(__self__, "realm", realm)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter(name="attributeFriendlyName")
def attribute_friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Attribute Friendly Name
"""
return pulumi.get(self, "attribute_friendly_name")
@attribute_friendly_name.setter
def attribute_friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_friendly_name", value)
@property
@pulumi.getter(name="attributeName")
def attribute_name(self) -> Optional[pulumi.Input[str]]:
"""
Attribute Name
"""
return pulumi.get(self, "attribute_name")
@attribute_name.setter
def attribute_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_name", value)
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) -> Optional[pulumi.Input[str]]:
"""
Attribute Value
"""
return pulumi.get(self, "attribute_value")
@attribute_value.setter
def attribute_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attribute_value", value)
@property
@pulumi.getter(name="claimName")
def claim_name(self) -> Optional[pulumi.Input[str]]:
"""
OIDC Claim Name
"""
return pulumi.get(self, "claim_name")
@claim_name.setter
def claim_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_name", value)
@property
@pulumi.getter(name="claimValue")
def claim_value(self) -> Optional[pulumi.Input[str]]:
"""
OIDC Claim Value
"""
return pulumi.get(self, "claim_value")
@claim_value.setter
def claim_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_value", value)
@property
@pulumi.getter(name="extraConfig")
def extra_config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "extra_config")
@extra_config.setter
def extra_config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "extra_config", value)
@property
@pulumi.getter(name="identityProviderAlias")
def identity_provider_alias(self) -> Optional[pulumi.Input[str]]:
"""
IDP Alias
"""
return pulumi.get(self, "identity_provider_alias")
@identity_provider_alias.setter
def identity_provider_alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_provider_alias", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
IDP Mapper Name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def realm(self) -> Optional[pulumi.Input[str]]:
"""
Realm Name
"""
return pulumi.get(self, "realm")
@realm.setter
def realm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "realm", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role Name
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
class AttributeToRoleIdentityMapper(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_friendly_name: Optional[pulumi.Input[str]] = None,
attribute_name: Optional[pulumi.Input[str]] = None,
attribute_value: Optional[pulumi.Input[str]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
identity_provider_alias: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
realm: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a AttributeToRoleIdentityMapper resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attribute_friendly_name: Attribute Friendly Name
:param pulumi.Input[str] attribute_name: Attribute Name
:param pulumi.Input[str] attribute_value: Attribute Value
:param pulumi.Input[str] claim_name: OIDC Claim Name
:param pulumi.Input[str] claim_value: OIDC Claim Value
:param pulumi.Input[str] identity_provider_alias: IDP Alias
:param pulumi.Input[str] name: IDP Mapper Name
:param pulumi.Input[str] realm: Realm Name
:param pulumi.Input[str] role: Role Name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AttributeToRoleIdentityMapperArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a AttributeToRoleIdentityMapper resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param AttributeToRoleIdentityMapperArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AttributeToRoleIdentityMapperArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_friendly_name: Optional[pulumi.Input[str]] = None,
attribute_name: Optional[pulumi.Input[str]] = None,
attribute_value: Optional[pulumi.Input[str]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
identity_provider_alias: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
realm: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AttributeToRoleIdentityMapperArgs.__new__(AttributeToRoleIdentityMapperArgs)
__props__.__dict__["attribute_friendly_name"] = attribute_friendly_name
__props__.__dict__["attribute_name"] = attribute_name
__props__.__dict__["attribute_value"] = attribute_value
__props__.__dict__["claim_name"] = claim_name
__props__.__dict__["claim_value"] = claim_value
__props__.__dict__["extra_config"] = extra_config
if identity_provider_alias is None and not opts.urn:
raise TypeError("Missing required property 'identity_provider_alias'")
__props__.__dict__["identity_provider_alias"] = identity_provider_alias
__props__.__dict__["name"] = name
if realm is None and not opts.urn:
raise TypeError("Missing required property 'realm'")
__props__.__dict__["realm"] = realm
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
super(AttributeToRoleIdentityMapper, __self__).__init__(
'keycloak:index/attributeToRoleIdentityMapper:AttributeToRoleIdentityMapper',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attribute_friendly_name: Optional[pulumi.Input[str]] = None,
attribute_name: Optional[pulumi.Input[str]] = None,
attribute_value: Optional[pulumi.Input[str]] = None,
claim_name: Optional[pulumi.Input[str]] = None,
claim_value: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
identity_provider_alias: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
realm: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None) -> 'AttributeToRoleIdentityMapper':
"""
Get an existing AttributeToRoleIdentityMapper resource's state with the given name, id, and optional extra
properties | |
},
params_map={
'all': [
'forgot_password_request',
],
'required': [
'forgot_password_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'forgot_password_request':
(ForgotPasswordRequest,),
},
'attribute_map': {
},
'location_map': {
'forgot_password_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.get_auth_capabilities_endpoint = _Endpoint(
settings={
'response_type': (AuthCapabilities,),
'auth': [],
'endpoint_path': '/auth/capabilities',
'operation_id': 'get_auth_capabilities',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_credentials_endpoint = _Endpoint(
settings={
'response_type': (Credentials,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/users/{userId}/credentials/{accessKeyId}',
'operation_id': 'get_credentials',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'access_key_id',
],
'required': [
'user_id',
'access_key_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'user_id':
(str,),
'access_key_id':
(str,),
},
'attribute_map': {
'user_id': 'userId',
'access_key_id': 'accessKeyId',
},
'location_map': {
'user_id': 'path',
'access_key_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_current_user_endpoint = _Endpoint(
settings={
'response_type': (CurrentUser,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/user',
'operation_id': 'get_current_user',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_group_endpoint = _Endpoint(
settings={
'response_type': (Group,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/groups/{groupId}',
'operation_id': 'get_group',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'group_id',
],
'required': [
'group_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'group_id':
(str,),
},
'attribute_map': {
'group_id': 'groupId',
},
'location_map': {
'group_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_policy_endpoint = _Endpoint(
settings={
'response_type': (Policy,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/policies/{policyId}',
'operation_id': 'get_policy',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'policy_id',
],
'required': [
'policy_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'policy_id':
(str,),
},
'attribute_map': {
'policy_id': 'policyId',
},
'location_map': {
'policy_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_user_endpoint = _Endpoint(
settings={
'response_type': (User,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/users/{userId}',
'operation_id': 'get_user',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
],
'required': [
'user_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'user_id':
(str,),
},
'attribute_map': {
'user_id': 'userId',
},
'location_map': {
'user_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_group_members_endpoint = _Endpoint(
settings={
'response_type': (UserList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/groups/{groupId}/members',
'operation_id': 'list_group_members',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'group_id',
'prefix',
'after',
'amount',
],
'required': [
'group_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'group_id':
(str,),
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
},
'attribute_map': {
'group_id': 'groupId',
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
},
'location_map': {
'group_id': 'path',
'prefix': 'query',
'after': 'query',
'amount': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_group_policies_endpoint = _Endpoint(
settings={
'response_type': (PolicyList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/groups/{groupId}/policies',
'operation_id': 'list_group_policies',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'group_id',
'prefix',
'after',
'amount',
],
'required': [
'group_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'group_id':
(str,),
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
},
'attribute_map': {
'group_id': 'groupId',
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
},
'location_map': {
'group_id': 'path',
'prefix': 'query',
'after': 'query',
'amount': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_groups_endpoint = _Endpoint(
settings={
'response_type': (GroupList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/groups',
'operation_id': 'list_groups',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'prefix',
'after',
'amount',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
},
'attribute_map': {
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
},
'location_map': {
'prefix': 'query',
'after': 'query',
'amount': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_policies_endpoint = _Endpoint(
settings={
'response_type': (PolicyList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/policies',
'operation_id': 'list_policies',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'prefix',
'after',
'amount',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
},
'attribute_map': {
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
},
'location_map': {
'prefix': 'query',
'after': 'query',
'amount': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_user_credentials_endpoint = _Endpoint(
settings={
'response_type': (CredentialsList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/users/{userId}/credentials',
'operation_id': 'list_user_credentials',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'prefix',
'after',
'amount',
],
'required': [
'user_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'user_id':
(str,),
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
},
'attribute_map': {
'user_id': 'userId',
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
},
'location_map': {
'user_id': 'path',
'prefix': 'query',
'after': 'query',
'amount': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_user_groups_endpoint = _Endpoint(
settings={
'response_type': (GroupList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/users/{userId}/groups',
'operation_id': 'list_user_groups',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'prefix',
'after',
'amount',
],
'required': [
'user_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'user_id':
(str,),
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
},
'attribute_map': {
'user_id': 'userId',
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
},
'location_map': {
'user_id': 'path',
'prefix': 'query',
'after': 'query',
'amount': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_user_policies_endpoint = _Endpoint(
settings={
'response_type': (PolicyList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/users/{userId}/policies',
'operation_id': 'list_user_policies',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'prefix',
'after',
'amount',
'effective',
],
'required': [
'user_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'user_id':
(str,),
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
'effective':
(bool,),
},
'attribute_map': {
'user_id': 'userId',
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
'effective': 'effective',
},
'location_map': {
'user_id': 'path',
'prefix': 'query',
'after': 'query',
'amount': 'query',
'effective': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.list_users_endpoint = _Endpoint(
settings={
'response_type': (UserList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token',
'oidc_auth'
],
'endpoint_path': '/auth/users',
'operation_id': 'list_users',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'prefix',
'after',
'amount',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): | |
import unittest
import pathlib
import cpptypeinfo
from cpptypeinfo.usertype import (Field, Struct, Pointer, Param, Function)
HERE = pathlib.Path(__file__).absolute().parent
IMGUI_H = HERE.parent / 'libs/imgui/imgui.h'
parser = cpptypeinfo.TypeParser()
EXPECTS = {
'ImDrawChannel':
parser.parse('struct ImDrawChannel'),
'ImDrawCmd':
parser.parse('struct ImDrawCmd'),
'ImDrawData':
parser.parse('struct ImDrawData'),
'ImDrawList':
parser.parse('struct ImDrawList'),
'ImDrawListSharedData':
parser.parse('struct ImDrawListSharedData'),
'ImDrawListSplitter':
parser.parse('struct ImDrawListSplitter'),
'ImDrawVert':
parser.parse('struct ImDrawVert'),
'ImFont':
parser.parse('struct ImFont'),
'ImFontAtlas':
parser.parse('struct ImFontAtlas'),
'ImFontConfig':
parser.parse('struct ImFontConfig'),
'ImFontGlyph':
parser.parse('struct ImFontGlyph'),
'ImFontGlyphRangesBuilder':
parser.parse('struct ImFontGlyphRangesBuilder'),
'ImColor':
parser.parse('struct ImColor'),
'ImGuiContext':
parser.parse('struct ImGuiContext'),
'ImGuiIO':
parser.parse('struct ImGuiIO'),
'ImGuiInputTextCallbackData':
parser.parse('struct ImGuiInputTextCallbackData'),
'ImGuiListClipper':
parser.parse('struct ImGuiListClipper'),
'ImGuiOnceUponAFrame':
parser.parse('struct ImGuiOnceUponAFrame'),
'ImGuiPayload':
parser.parse('struct ImGuiPayload'),
'ImGuiSizeCallbackData':
parser.parse('struct ImGuiSizeCallbackData'),
'ImGuiStorage':
parser.parse('struct ImGuiStorage'),
'ImGuiStyle':
parser.parse('struct ImGuiStyle'),
'ImGuiTextBuffer':
parser.parse('struct ImGuiTextBuffer'),
'ImGuiTextFilter':
parser.parse('struct ImGuiTextFilter'),
'ImTextureID':
parser.typedef('ImTextureID', Pointer(cpptypeinfo.Void())),
'ImGuiID':
parser.typedef('ImGuiID', cpptypeinfo.UInt32()),
'ImWchar':
parser.typedef('ImWchar', cpptypeinfo.UInt16()),
'ImGuiCol':
parser.typedef('ImGuiCol', cpptypeinfo.Int32()),
'ImGuiCond':
parser.typedef('ImGuiCond', cpptypeinfo.Int32()),
'ImGuiDataType':
parser.typedef('ImGuiDataType', cpptypeinfo.Int32()),
'ImGuiDir':
parser.typedef('ImGuiDir', cpptypeinfo.Int32()),
'ImGuiKey':
parser.typedef('ImGuiKey', cpptypeinfo.Int32()),
'ImGuiNavInput':
parser.typedef('ImGuiNavInput', cpptypeinfo.Int32()),
'ImGuiMouseCursor':
parser.typedef('ImGuiMouseCursor', cpptypeinfo.Int32()),
'ImGuiStyleVar':
parser.typedef('ImGuiStyleVar', cpptypeinfo.Int32()),
'ImDrawCornerFlags':
parser.typedef('ImDrawCornerFlags', cpptypeinfo.Int32()),
'ImDrawListFlags':
parser.typedef('ImDrawListFlags', cpptypeinfo.Int32()),
'ImFontAtlasFlags':
parser.typedef('ImFontAtlasFlags', cpptypeinfo.Int32()),
'ImGuiBackendFlags':
parser.typedef('ImGuiBackendFlags', cpptypeinfo.Int32()),
'ImGuiColorEditFlags':
parser.typedef('ImGuiColorEditFlags', cpptypeinfo.Int32()),
'ImGuiConfigFlags':
parser.typedef('ImGuiConfigFlags', cpptypeinfo.Int32()),
'ImGuiComboFlags':
parser.typedef('ImGuiComboFlags', cpptypeinfo.Int32()),
'ImGuiDragDropFlags':
parser.typedef('ImGuiDragDropFlags', cpptypeinfo.Int32()),
'ImGuiFocusedFlags':
parser.typedef('ImGuiFocusedFlags', cpptypeinfo.Int32()),
'ImGuiHoveredFlags':
parser.typedef('ImGuiHoveredFlags', cpptypeinfo.Int32()),
'ImGuiInputTextFlags':
parser.typedef('ImGuiInputTextFlags', cpptypeinfo.Int32()),
'ImGuiSelectableFlags':
parser.typedef('ImGuiSelectableFlags', cpptypeinfo.Int32()),
'ImGuiTabBarFlags':
parser.typedef('ImGuiTabBarFlags', cpptypeinfo.Int32()),
'ImGuiTabItemFlags':
parser.typedef('ImGuiTabItemFlags', cpptypeinfo.Int32()),
'ImGuiTreeNodeFlags':
parser.typedef('ImGuiTreeNodeFlags', cpptypeinfo.Int32()),
'ImGuiWindowFlags':
parser.typedef('ImGuiWindowFlags', cpptypeinfo.Int32()),
'ImGuiInputTextCallback':
parser.typedef(
'ImGuiInputTextCallback',
Function(cpptypeinfo.Int32(), [
Param(Pointer(parser.parse('struct ImGuiInputTextCallbackData')))
])),
'ImGuiSizeCallback':
parser.typedef(
'ImGuiSizeCallback',
Function(
cpptypeinfo.Void(),
[Param(Pointer(parser.parse('struct ImGuiSizeCallbackData')))])),
'ImS8':
parser.typedef('ImS8', cpptypeinfo.Int8()),
'ImU8':
parser.typedef('ImU8', cpptypeinfo.UInt8()),
'ImS16':
parser.typedef('ImS16', cpptypeinfo.Int16()),
'ImU16':
parser.typedef('ImU16', cpptypeinfo.UInt16()),
'ImS32':
parser.typedef('ImS32', cpptypeinfo.Int32()),
'ImU32':
parser.typedef('ImU32', cpptypeinfo.UInt32()),
'ImS64':
parser.typedef('ImS64', cpptypeinfo.Int64()),
'ImU64':
parser.typedef('ImU64', cpptypeinfo.UInt64()),
'ImVec2':
parser.struct(
'ImVec2',
[Field(cpptypeinfo.Float(), 'x'),
Field(cpptypeinfo.Float(), 'y')]),
'ImVec4':
parser.struct('ImVec4', [
Field(cpptypeinfo.Float(), 'x'),
Field(cpptypeinfo.Float(), 'y'),
Field(cpptypeinfo.Float(), 'z'),
Field(cpptypeinfo.Float(), 'w')
]),
'CreateContext':
Function(Pointer(parser.parse('struct ImGuiContext')), [
Param(Pointer(parser.parse('struct ImFontAtlas')), 'shared_font_atlas',
'NULL')
]),
'DestroyContext':
Function(
cpptypeinfo.Void(),
[Param(Pointer(parser.parse('struct ImGuiContext')), 'ctx', 'NULL')]),
'GetCurrentContext':
Function(Pointer(parser.parse('struct ImGuiContext')), []),
'SetCurrentContext':
Function(cpptypeinfo.Void(),
[Param(Pointer(parser.parse('struct ImGuiContext')), 'ctx')]),
'DebugCheckVersionAndDataLayout':
Function(cpptypeinfo.Bool(), [
Param(parser.parse('const char*'), 'version_str'),
Param(cpptypeinfo.UInt64(), 'sz_io'),
Param(cpptypeinfo.UInt64(), 'sz_style'),
Param(cpptypeinfo.UInt64(), 'sz_vec2'),
Param(cpptypeinfo.UInt64(), 'sz_vec4'),
Param(cpptypeinfo.UInt64(), 'sz_drawvert'),
Param(cpptypeinfo.UInt64(), 'sz_drawidx'),
]),
# ImGuiIO & GetIO ( )
'GetIO':
Function(parser.parse('ImGuiIO &'), []),
# ImGuiStyle & GetStyle ( )
'GetStyle':
Function(parser.parse('ImGuiStyle &'), []),
# void NewFrame ( )
'NewFrame':
Function(cpptypeinfo.Void(), []),
'EndFrame':
Function(cpptypeinfo.Void(), []),
'Render':
Function(cpptypeinfo.Void(), []),
# ImDrawData * GetDrawData ( )
'GetDrawData':
Function(parser.parse('ImDrawData *'), []),
# void ShowDemoWindow ( bool * p_open = NULL )
'ShowDemoWindow':
Function(cpptypeinfo.Void(),
[Param(parser.parse('bool *'), 'p_open', 'NULL')]),
# void ShowAboutWindow ( bool * p_open = NULL )
'ShowAboutWindow':
Function(cpptypeinfo.Void(),
[Param(parser.parse('bool *'), 'p_open', 'NULL')]),
# void ShowMetricsWindow ( bool * p_open = NULL )
'ShowMetricsWindow':
Function(cpptypeinfo.Void(),
[Param(parser.parse('bool *'), 'p_open', 'NULL')]),
# void ShowStyleEditor ( ImGuiStyle * ref = NULL )
'ShowStyleEditor':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'ref', 'NULL')]),
# bool ShowStyleSelector ( const char * label )
'ShowStyleSelector':
Function(cpptypeinfo.Bool(),
[Param(parser.parse('const char*'), 'label')]),
# void ShowFontSelector ( const char * label )
'ShowFontSelector':
Function(cpptypeinfo.Void(),
[Param(parser.parse('const char*'), 'label')]),
# void ShowUserGuide ( )
'ShowUserGuide':
Function(cpptypeinfo.Void(), []),
# const char * GetVersion ( )
'GetVersion':
Function(parser.parse('const char*'), []),
# void StyleColorsDark ( ImGuiStyle * dst = NULL )
'StyleColorsDark':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'dst', 'NULL')]),
# void StyleColorsClassic ( ImGuiStyle * dst = NULL )
'StyleColorsClassic':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'dst', 'NULL')]),
# void StyleColorsLight ( ImGuiStyle * dst = NULL )
'StyleColorsLight':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'dst', 'NULL')]),
# bool Begin ( const char * name , bool * p_open = NULL , ImGuiWindowFlags flags = 0 )
'Begin':
Function(cpptypeinfo.Bool(), [
Param(parser.parse('const char *'), 'name'),
Param(parser.parse('bool *'), 'p_open', 'NULL'),
Param(parser.parse('ImGuiWindowFlags'), 'flags', '0')
]),
'End':
Function(cpptypeinfo.Void(), []),
# bool BeginChild ( const char * str_id , const ImVec2 & size = ImVec2 ( 0 , 0 ) , bool border = false , ImGuiWindowFlags flags = 0 )
# bool BeginChild(ImGuiID id, const ImVec2& size = ImVec2(0,0), bool border = false, ImGuiWindowFlags flags = 0);
# function overloading
'BeginChild': [
Function(cpptypeinfo.Bool(), [
Param(parser.parse('const char *'), 'str_id'),
Param(Param('const ImVec2 &'), 'size', 'ImVec2(0,0)'),
Param(Param('bool'), 'border', 'false'),
Param(parser.parse('ImGuiWindowFlags'), 'flags', '0')
])
],
'__dummy__0':
None,
'EndChild':
Function(cpptypeinfo.Void(), []),
# bool IsWindowAppearing ( )
'IsWindowAppearing':
Function(cpptypeinfo.Bool(), []),
# bool IsWindowCollapsed ( )
'IsWindowCollapsed':
Function(cpptypeinfo.Bool(), []),
# bool IsWindowFocused ( ImGuiFocusedFlags flags = 0 )
'IsWindowFocused':
Function(cpptypeinfo.Bool(),
[Param(parser.parse('ImGuiFocusedFlags'), 'flags', '0')]),
# bool IsWindowHovered ( ImGuiHoveredFlags flags = 0 )
'IsWindowHovered':
Function(cpptypeinfo.Bool(),
[Param(parser.parse('ImGuiHoveredFlags'), 'flags', '0')]),
# ImDrawList * GetWindowDrawList ( )
'GetWindowDrawList':
Function(parser.parse('ImDrawList*'), []),
# ImVec2 GetWindowPos ( )
'GetWindowPos':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetWindowSize ( )
'GetWindowSize':
Function(parser.parse('ImVec2'), []),
# float GetWindowWidth ( )
'GetWindowWidth':
Function(cpptypeinfo.Float(), []),
'GetWindowHeight':
Function(cpptypeinfo.Float(), []),
# void SetNextWindowPos ( const ImVec2 & pos , ImGuiCond cond = 0 , const ImVec2 & pivot = ImVec2 ( 0 , 0 ) )
'SetNextWindowPos':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2&'), 'pos'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
Param(parser.parse('const ImVec2 &'), 'pivot', 'ImVec2(0,0)'),
]),
# void SetNextWindowSize ( const ImVec2 & size , ImGuiCond cond = 0 )
'SetNextWindowSize':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size'),
Param(parser.parse('ImGuiCond'), 'cond', '0')
]),
# void SetNextWindowSizeConstraints ( const ImVec2 & size_min , const ImVec2 & size_max , ImGuiSizeCallback custom_callback = NULL , void * custom_callback_data = NULL )
'SetNextWindowSizeConstraints':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size_min'),
Param(parser.parse('const ImVec2 &'), 'size_max'),
Param(parser.parse('ImGuiSizeCallback'), 'custom_callback', 'NULL'),
Param(parser.parse('void *'), 'custom_callback_data', 'NULL')
]),
# void SetNextWindowContentSize ( const ImVec2 & size )
'SetNextWindowContentSize':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size'),
]),
# void SetNextWindowCollapsed ( bool collapsed , ImGuiCond cond = 0 )
'SetNextWindowCollapsed':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Bool(), 'collapsed'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
]),
# void SetNextWindowFocus ( )
'SetNextWindowFocus':
Function(cpptypeinfo.Void(), []),
# void SetNextWindowBgAlpha ( float alpha )
'SetNextWindowBgAlpha':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'alpha')]),
# void SetWindowPos ( const ImVec2 & pos , ImGuiCond cond = 0 )
# void SetWindowPos(const char* name, const ImVec2& pos, ImGuiCond cond = 0);
# function overloading
'SetWindowPos': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'pos'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
])
],
'__dummy__1':
None,
# void SetWindowSize ( const ImVec2 & size , ImGuiCond cond = 0 )
# void SetWindowSize(const char* name, const ImVec2& size, ImGuiCond cond = 0);
# function overloading
'SetWindowSize': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
])
],
'__dummy__2':
None,
# void SetWindowCollapsed ( bool collapsed , ImGuiCond cond = 0 )
# IMGUI_API void SetWindowCollapsed(const char* name, bool collapsed, ImGuiCond cond = 0); // set named window collapsed state
'SetWindowCollapsed': [
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Bool(), 'collapsed'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
])
],
'__dummy__3':
None,
# void SetWindowFocus ( )
# IMGUI_API void SetWindowFocus(const char* name);
'SetWindowFocus': [Function(cpptypeinfo.Void(), [])],
'__dummy__4':
None,
# void SetWindowFontScale ( float scale )
'SetWindowFontScale':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'scale')]),
# ImVec2 GetContentRegionMax ( )
'GetContentRegionMax':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetContentRegionAvail ( )
'GetContentRegionAvail':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetWindowContentRegionMin ( )
'GetWindowContentRegionMin':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetWindowContentRegionMax ( )
'GetWindowContentRegionMax':
Function(parser.parse('ImVec2'), []),
# float GetWindowContentRegionWidth ( )
'GetWindowContentRegionWidth':
Function(cpptypeinfo.Float(), []),
# float GetScrollX ( )
'GetScrollX':
Function(cpptypeinfo.Float(), []),
'GetScrollY':
Function(cpptypeinfo.Float(), []),
'GetScrollMaxX':
Function(cpptypeinfo.Float(), []),
'GetScrollMaxY':
Function(cpptypeinfo.Float(), []),
# void SetScrollX ( float scroll_x )
'SetScrollX':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'scroll_x')]),
'SetScrollY':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'scroll_y')]),
# void SetScrollHereX ( float center_x_ratio = 0.5f )
'SetScrollHereX':
Function(cpptypeinfo.Void(),
[Param(cpptypeinfo.Float(), 'center_x_ratio', '0.5f')]),
'SetScrollHereY':
Function(cpptypeinfo.Void(),
[Param(cpptypeinfo.Float(), 'center_y_ratio', '0.5f')]),
# void SetScrollFromPosX ( float local_x , float center_x_ratio = 0.5f )
'SetScrollFromPosX':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Float(), 'local_x'),
Param(cpptypeinfo.Float(), 'center_x_ratio', '0.5f')
]),
'SetScrollFromPosY':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Float(), 'local_y'),
Param(cpptypeinfo.Float(), 'center_y_ratio', '0.5f')
]),
# void PushFont ( ImFont * font )
'PushFont':
Function(cpptypeinfo.Void(), [Param(parser.parse('ImFont*'), 'font')]),
# void PopFont ( )
'PopFont':
Function(cpptypeinfo.Void(), []),
# void PushStyleColor ( ImGuiCol idx , ImU32 col )
# void PushStyleColor ( ImGuiCol idx , ImU32 col )
'PushStyleColor': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('ImGuiCol'), 'idx'),
Param(parser.parse('ImU32'), 'col')
])
],
'__dummy__5':
None,
# void PopStyleColor ( int count = 1 )
'PopStyleColor':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Int32(), 'count', '1')]),
# void PushStyleVar ( ImGuiStyleVar idx , float val )
# void PushStyleVar(ImGuiStyleVar idx, const ImVec2& val);
'PushStyleVar': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('ImGuiCol'), 'idx'),
Param(cpptypeinfo.Float(), 'val')
])
],
'__dummy__6':
None,
# :void PopStyleVar ( int count = 1 )
'PopStyleVar':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Int32(), 'count', '1'),
]),
# const ImVec4 & GetStyleColorVec4 ( ImGuiCol idx )
'GetStyleColorVec4':
Function(parser.parse('const ImVec4 &'),
[Param(parser.parse('ImGuiCol'), 'idx')]),
# ImFont * GetFont ( )
'GetFont':
Function(parser.parse('ImFont*'), []),
'GetFontSize': [],
'GetFontTexUvWhitePixel': [],
# 3 overloading
'GetColorU32': [],
'__dummy__7':
None,
'__dummy__8':
None,
'PushItemWidth': [],
'PopItemWidth': [],
'SetNextItemWidth': [],
'CalcItemWidth': [],
'PushTextWrapPos': [],
'PopTextWrapPos': [],
| |
<reponame>sychen6192/Computer-Vision<filename>SFM/SFM.py
#!/usr/bin/env python
# coding: utf-8
# In[282]:
import cv2
import matplotlib.pyplot as plt
import numpy as np
import random
from tqdm.notebook import tqdm
from math import sqrt
import mpl_toolkits.mplot3d.axes3d as p3
plt.rcParams['figure.figsize'] = [15, 15]
# In[283]:
# Read image and convert them to gray!!
def read_image(path):
img = cv2.imread(path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img_gray, img, img_rgb
# In[284]:
def sift(img):
siftDetector= cv2.xfeatures2d.SIFT_create()
kp, des = siftDetector.detectAndCompute(img, None)
return kp, des
# In[285]:
def plot_matches(matches, total_img):
match_img = total_img.copy()
offset = total_img.shape[1]/2
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.imshow(np.array(match_img).astype('uint8')) # RGB is integer type
ax.plot(matches[:, 0], matches[:, 1], 'xr')
ax.plot(matches[:, 2] + offset, matches[:, 3], 'xr')
ax.plot([matches[:, 0], matches[:, 2] + offset], [matches[:, 1], matches[:, 3]],
'r', linewidth=0.5)
plt.show()
# In[286]:
def plot_sift(gray, rgb, kp):
tmp = rgb.copy()
img = cv2.drawKeypoints(gray, kp, tmp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return img
# In[287]:
def matcher(kp1, des1, img1, kp2, des2, img2, threshold):
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < threshold*n.distance:
good.append([m])
matches = []
for pair in good:
matches.append(list(kp1[pair[0].queryIdx].pt + kp2[pair[0].trainIdx].pt))
matches = np.array(matches)
return matches
# In[288]:
def normalization(points):
# De-mean to center the origin at mean.
mean = np.mean(points, axis=0)
# Rescale.
std_x = np.std(points[:, 0])
std_y = np.std(points[:, 1])
# Matrix for transforming points to do normalization.
transform = np.array([[sqrt(2)/std_x, 0, -sqrt(2)/std_x*mean[0]],
[0, sqrt(2)/std_y, -sqrt(2)/std_y*mean[1]],
[0, 0, 1]])
points = np.concatenate((points, np.ones((points.shape[0], 1))), axis=1)
normalized = np.dot(transform, points.T).T
return normalized[:, 0:2], transform
# In[289]:
def fundamental_matrix(pairs):
all_p1 = matches[:, 0:2] # All points of image1 in matching pairs.
all_p2 = matches[:, 2:4] # All points of image2 in matching pairs.
# Normalization
all_p1, T1 = normalization(all_p1)
all_p2, T2 = normalization(all_p2)
# Solving F
A_rows = [] # Every row in A is a sublist of A_row.
for i in range(all_p1.shape[0]):
p1 = all_p1[i]
p2 = all_p2[i]
row = [p2[0]*p1[0], p2[0]*p1[1], p2[0],
p2[1]*p1[0], p2[1]*p1[1], p2[1], p1[0], p1[1], 1]
A_rows.append(row)
A = np.array(A_rows)
U, s, V = np.linalg.svd(A)
F = V[-1].reshape(3, 3)
F = F / F[2, 2]
# Enforce rank-2 constraint.
U, s, Vh = np.linalg.svd(F)
s_prime = np.diag(s)
s_prime[-1] = 0
F = np.dot(U, np.dot(s_prime, Vh))
# Denormalization
F = np.dot(np.dot(T2.T, F), T1)
return F
# In[290]:
def get_errors(matches, F):
# Compute average geometric distances between epipolar line and its
# corresponding point in both images
ones = np.ones((matches.shape[0], 1))
all_p1 = np.concatenate((matches[:, 0:2], ones), axis=1)
all_p2 = np.concatenate((matches[:, 2:4], ones), axis=1)
# Epipolar lines.
F_p1 = np.dot(F, all_p1.T).T # F*p1, dims [#points, 3].
F_p2 = np.dot(F.T, all_p2.T).T # (F^T)*p2, dims [#points, 3].
# Geometric distances.
p1_line2 = np.sum(all_p1 * F_p2, axis=1)[:, np.newaxis]
p2_line1 = np.sum(all_p2 * F_p1, axis=1)[:, np.newaxis]
d1 = np.absolute(p1_line2) / np.linalg.norm(F_p2, axis=1)[:, np.newaxis]
d2 = np.absolute(p2_line1) / np.linalg.norm(F_p1, axis=1)[:, np.newaxis]
return (d1 + d2) / 2
# In[291]:
def random_pairs(matches, k=4):
idx = random.sample(range(len(matches)), k)
pairs = [matches[i] for i in idx ]
return np.array(pairs)
# In[292]:
def ransac(matches, threshold, iters):
print("running ransac ...")
num_best_inliers = 0
for i in tqdm(range(iters)):
pairs = random_pairs(matches)
F = fundamental_matrix(pairs)
errors = get_errors(matches, F)
idx = np.where(errors < threshold)[0]
inliers = matches[idx]
num_inliers = len(inliers)
if num_inliers > num_best_inliers:
best_inliers = inliers.copy()
num_best_inliers = num_inliers
best_F = F.copy()
print("inliers/matches: {}/{}".format(num_best_inliers, len(matches)))
return best_inliers, best_F
# In[293]:
def triangulate(P1, P2, matches):
# Don't know why needs to transpose V, but it just works..
U, s, V = np.linalg.svd(P1)
center1 = V.T[:, -1]
center1 = center1/center1[-1]
U, s, V = np.linalg.svd(P2)
center2 = V.T[:, -1]
center2 = center2/center2[-1]
# Convert on homogeneous.
ones = np.ones((matches.shape[0], 1))
points1 = np.concatenate((matches[:, 0:2], ones), axis=1)
points2 = np.concatenate((matches[:, 2:4], ones), axis=1)
# Reconstruct 3D points.
X_3d = np.zeros((matches.shape[0], 4))
for i in range(matches.shape[0]):
x1_cross_P1 = np.array([[0, -points1[i,2], points1[i,1]],
[points1[i,2], 0, -points1[i,0]],
[-points1[i,1], points1[i,0], 0]])
x2_cross_P2 = np.array([[0, -points2[i,2], points2[i,1]],
[points2[i,2], 0, -points2[i,0]],
[-points2[i,1], points2[i,0], 0]])
x_cross_P = np.concatenate((x1_cross_P1.dot(P1), x2_cross_P2.dot(P2)),
axis=0)
# X_3d will become inf when I don't use the tmp var, I don't know why.
U, s, V = np.linalg.svd(x_cross_P)
temp = V.T[:, -1]
temp = temp / temp[-1]
X_3d[i] = temp
return center1, center2, X_3d
# In[294]:
def reconstruct(K1, K2, F):
E = np.dot(np.dot(K2.T, F), K1)
U, s, Vh = np.linalg.svd(E)
W = np.array([0, -1, 0, 1, 0, 0, 0, 0, 1]).reshape(3, 3)
R1 = np.dot(np.dot(U, W), Vh)
R2 = np.dot(np.dot(U, W.T), Vh)
T1 = (U[:, 2]).reshape(3, 1)
T2 = -T1
P1 = np.concatenate((R1, T1), axis=1)
P2 = np.concatenate((R1, T2), axis=1)
P3 = np.concatenate((R2, T1), axis=1)
P4 = np.concatenate((R2, T2), axis=1)
return K2.dot(P1), K2.dot(P2), K2.dot(P3), K2.dot(P4)
# In[295]:
def plot_epipolar(matches, F, image):
# Display second image with epipolar lines reprojected
# from the first image.
# first, fit fundamental matrix to the matches
N = len(matches)
M = np.c_[matches[:,0:2], np.ones((N,1))].transpose()
L1 = np.matmul(F, M).transpose() # transform points from
# the first image to get epipolar lines in the second image
# find points on epipolar lines L closest to matches(:,3:4)
l = np.sqrt(L1[:,0]**2 + L1[:,1]**2)
L = np.divide(L1,np.kron(np.ones((3,1)),l).transpose())# rescale the line
pt_line_dist = np.multiply(L, np.c_[matches[:,2:4], np.ones((N,1))]).sum(axis = 1)
closest_pt = matches[:,2:4] - np.multiply(L[:,0:2],np.kron(np.ones((2,1)), pt_line_dist).transpose())
# find endpoints of segment on epipolar line (for display purposes)
pt1 = closest_pt - np.c_[L[:,1], -L[:,0]]*10# offset from the closest point is 10 pixels
pt2 = closest_pt + np.c_[L[:,1], -L[:,0]]*10
# display points and segments of corresponding epipolar lines
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.imshow(np.array(image).astype('uint8'))
ax.plot(matches[:,2],matches[:,3], 'or', markersize=2)
ax.plot([matches[:,2], closest_pt[:,0]],[matches[:,3], closest_pt[:,1]], 'r')
ax.plot([pt1[:,0], pt2[:,0]],[pt1[:,1], pt2[:,1]], 'g', linewidth=1)
plt.axis('off')
plt.show()
# In[296]:
def plot_3d(center1, center2, X_3d):
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.scatter(X_3d[:,0], X_3d[:,1], X_3d[:,2], c='b', marker='o', alpha=0.6)
ax.scatter(center1[0], center1[1], center1[2], c='r', marker='+', s=200)
ax.scatter(center2[0], center2[1], center2[2], c='g', marker='+', s=200)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
# In[297]:
is_box = 1
is_statue = 0
is_house = 0
is_library =0
# In[298]:
# box
if is_box:
left_gray, left_origin, left_rgb = read_image('1.jpeg')
right_gray, right_origin, right_rgb = read_image('2.jpeg')
# In[299]:
if is_statue:
left_gray, left_origin, left_rgb = read_image('3.jpg')
right_gray, right_origin, right_rgb = read_image('4.jpg')
# In[300]:
if is_house:
left_gray, left_origin, left_rgb = read_image('house1.jpg')
right_gray, right_origin, right_rgb = read_image('house2.jpg')
# In[301]:
if is_library:
left_gray, left_origin, left_rgb = read_image('library1.jpg')
right_gray, right_origin, right_rgb = read_image('library2.jpg')
# In[302]:
# SIFT only can use gray
kp_left, des_left = sift(left_gray)
kp_right, des_right = sift(right_gray)
# In[303]:
kp_left_img = plot_sift(left_gray, left_rgb, kp_left)
kp_right_img = plot_sift(right_gray, right_rgb, kp_right)
total_kp = np.concatenate((kp_left_img, kp_right_img), axis=1)
plt.imshow(total_kp)
# In[304]:
matches = matcher(kp_left, des_left, left_rgb, kp_right, des_right, right_rgb, 0.5)
# In[305]:
total_img = np.concatenate((left_rgb, right_rgb), axis=1)
plot_matches(matches, total_img) # Good mathces
# In[306]:
inliers, F = ransac(matches, 0.5, 2000)
# In[307]:
plot_matches(inliers, total_img) # show inliers matches
# In[308]:
plot_epipolar(matches, F, right_rgb)
# In[318]:
if is_box:
K1 = np.array([1.4219, 0.0005, 0.5092, 0, 1.4219, 0.3802, 0, 0, 0.0010]).reshape(3,3)
K1 = K1 * 1000
# the first camera matrix
P0 = np.array([1, 0 ,0 ,0, 0, 1, 0, 0, 0, 0, 1, 0]).reshape(3, 4)
# possible camera matrix for the second one
P1, P2, P3, P4 = reconstruct(K1, K1, F)
# In[319]:
# case 1
if is_box:
center1, center2, X_3D = triangulate(P0, P1, matches)
plot_3d(center1, center2, X_3D)
# In[320]:
# case 2
if is_box:
center1, center2, X_3D = triangulate(P0, P2, matches)
plot_3d(center1, center2, X_3D)
# In[321]:
# case 3
if is_box:
center1, center2, X_3D = triangulate(P0, P3, matches)
plot_3d(center1, center2, X_3D)
# In[322]:
# case 4
if is_box:
center1, center2, X_3D = triangulate(P0, P4, matches)
plot_3d(center1, center2, X_3D)
# In[323]:
if is_statue:
K1 = np.array([5426.566895, 0.678017, 330.096680,
0.000000, 5423.133301, 648.950012,
0.000000, 0.000000, 1.000000
]).reshape(3,3)
E1 = np.array([0.140626, 0.989027, -0.045273, -1.71427019,
0.475766, -0.107607, -0.872965, 2.36271724,
-0.868258, 0.101223, -0.485678, 78.73528449]).reshape(3, 4)
K2 = np.array([5426.566895, 0.678017, 387.430023,
0.000000, 5423.133301, 620.616699,
0.000000, 0.000000, 1.000000
]).reshape(3,3)
E2 = np.array([0.336455, 0.940689, -0.043627, 0.44275193,
0.446741, -0.200225, -0.871970, 3.03985054,
-0.828988, 0.273889, -0.487611, 77.67276126]).reshape(3, 4)
camera1 = np.dot(K1, E1)
camera2 = np.dot(K2, E2)
center1, center2, X_3D = triangulate(camera1, camera2, matches)
plot_3d(center1, center2, X_3D)
# In[324]:
'''
reference:
https://cmsc426.github.io/sfm/
http://www.cs.cmu.edu/~16385/s17/Slides/12.4_8Point_Algorithm.pdf
http://www.cs.cmu.edu/~16385/s17/Slides/12.5_Reconstruction.pdf
'''
# In[325]:
if is_house:
camera1 = np.array([ 1.6108033e+001, 1.3704159e+001 ,-6.7351564e+001 ,-1.8838024e+002,
8.2886212e-001 ,-6.1257005e+001 ,-2.7985739e+001 ,-7.4190016e+000,
1.6739784e-001 ,-4.5720139e-002 ,-8.4811075e-002 ,5.6548906e-001
]).reshape(3, 4)
camera2 = np.array([ 1.0571624e+001 , 4.0812730e+000 ,-2.2538413e+001, -5.9593366e+001,
3.1827253e-001 ,-2.1616617e+001, -9.8820962e+000, -2.7146868e+000,
6.1142503e-002, -2.0656640e-002,-2.0701037e-002 , 2.5211789e-001]).reshape(3, 4)
center1, center2, X_3D = triangulate(camera1, camera2, matches)
plot_3d(center1, center2, X_3D)
# In[326]:
if is_library:
camera1 = np.array([ -4.5250208e+001, | |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 17:35:28 2015
@author: oligschlager
"""
import pandas as pd
import seaborn as sns
import numpy as np
sns.set_style('white')
##############################################################################
#################### Creative Achievement Questionnaire ######################
##############################################################################
def run_CAQ(df, out_dir=None):
zero = ['CAQ_14','CAQ_22','CAQ_31','CAQ_40','CAQ_49','CAQ_58','CAQ_66','CAQ_76','CAQ_86','CAQ_95']
one = ['CAQ_15','CAQ_23','CAQ_32','CAQ_41','CAQ_50','CAQ_59','CAQ_67','CAQ_77','CAQ_87','CAQ_96']
two = ['CAQ_16','CAQ_24','CAQ_33','CAQ_42','CAQ_51','CAQ_60','CAQ_68','CAQ_78','CAQ_88','CAQ_97']
three = ['CAQ_17','CAQ_25','CAQ_34','CAQ_43','CAQ_52','CAQ_61','CAQ_69','CAQ_79','CAQ_89','CAQ_98']
four = ['CAQ_18','CAQ_26','CAQ_35','CAQ_44','CAQ_53','CAQ_62','CAQ_70','CAQ_80','CAQ_90','CAQ_99']
five = ['CAQ_19', 'CAQ_27', 'CAQ_36', 'CAQ_45', 'CAQ_54', 'CAQ_63', 'CAQ_71', 'CAQ_81', 'CAQ_91', 'CAQ_100']
six = ['CAQ_20', 'CAQ_28', 'CAQ_37', 'CAQ_46', 'CAQ_55', 'CAQ_64', 'CAQ_72', 'CAQ_83', 'CAQ_92', 'CAQ_101']
seven = ['CAQ_29', 'CAQ_38', 'CAQ_47', 'CAQ_56', 'CAQ_65', 'CAQ_74', 'CAQ_85', 'CAQ_93', 'CAQ_102']
df[one] = df[one] * 1
df[two] = df[two] * 2
df[three] = df[three] * 3
df[four] = df[four] * 4
df[five] = df[five] * 5
df[six] = df[six] * 6
df[seven] =df[seven] * 7
#only the questions for patents, scientific achievements and movies have scores and are used!
for col in ['CAQ_73', 'CAQ_94', 'CAQ_84']:
for i in df.index:
try:
df[col].iloc[i] = int(df[col].iloc[i]) * 7
except:
df[col].iloc[i] = 0
df['seven_sum'] = df[['CAQ_73', 'CAQ_94', 'CAQ_84']].sum(axis=1)
df['CAQ_score'] = np.log(df[one + two + three + four + five + six + ['seven_sum']].sum(axis=1))
cols_export = ['ids'] + ['CAQ_score']
df[cols_export].to_csv('%s/CAQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
#################### Meta Cognition Questionnaire 30 #########################
##############################################################################
def run_MCQ30(df, out_dir):
df['MCQ_lack_of_cogn_conf_mean'] = df[['MCQ_1', 'MCQ_6', 'MCQ_11',
'MCQ_16', 'MCQ_21', 'MCQ_26']].mean(axis=1)
df['MCQ_pos_bel_about_worry_mean'] = df[['MCQ_2', 'MCQ_7', 'MCQ_12',
'MCQ_17','MCQ_22','MCQ_27']].mean(axis=1)
df['MCQ_cogn_self-consc_mean'] = df[['MCQ_3', 'MCQ_8', 'MCQ_13',
'MCQ_18','MCQ_23','MCQ_28']].mean(axis=1)
df['MCQ_neg_bel_about_uncontr_danger_mean'] = df[['MCQ_4', 'MCQ_9', 'MCQ_14',
'MCQ_19', 'MCQ_24', 'MCQ_29']].mean(axis=1)
df['MCQ_need_contr_thoughts_mean'] = df[['MCQ_5','MCQ_10', 'MCQ_15',
'MCQ_20', 'MCQ_25', 'MCQ_30']].mean(axis=1)
cols_export = ['ids'] + ['MCQ_lack_of_cogn_conf_mean',
'MCQ_pos_bel_about_worry_mean',
'MCQ_cogn_self-consc_mean',
'MCQ_neg_bel_about_uncontr_danger_mean',
'MCQ_need_contr_thoughts_mean']
df[cols_export].to_csv('%s/MCQ30.csv' % out_dir, decimal='.', index=False)
##############################################################################
#################### Body Consciousness Questionnaire ########################
##############################################################################
def run_BCQ(df, out_dir):
df['BCQ_private_body_mean'] = df[['BCQ_3', 'BCQ_4','BCQ_5',
'BCQ_8', 'BCQ_12',]].mean(axis=1)
df['BCQ_public_body_mean'] = df[['BCQ_1', 'BCQ_7', 'BCQ_10',
'BCQ_11', 'BCQ_13','BCQ_15']].mean(axis=1)
df['BCQ_body_competence_mean'] = df[['BCQ_2', 'BCQ_6',
'BCQ_9', 'BCQ_14']].mean(axis=1)
cols_export = ['ids'] + ['BCQ_private_body_mean',
'BCQ_public_body_mean',
'BCQ_body_competence_mean']
df[cols_export].to_csv('%s/BCQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
################### Five Facet Mindfulness Questionnaire #####################
##############################################################################
def run_FFMQ(df, out_dir):
#items to be recoded
items_recoded = ['FFMQ_12',
'FFMQ_16',
'FFMQ_22',
'FFMQ_5',
'FFMQ_8',
'FFMQ_13',
'FFMQ_18',
'FFMQ_23',
'FFMQ_28',
'FFMQ_34',
'FFMQ_38',
'FFMQ_3',
'FFMQ_10',
'FFMQ_14',
'FFMQ_17',
'FFMQ_25',
'FFMQ_30',
'FFMQ_35',
'FFMQ_39']
#recode items
recoder = {1:5, 2:4, 3:3, 4:2, 5:1 }
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
df['FFMQ_observe_sum'] = df[['FFMQ_1', 'FFMQ_6','FFMQ_11', 'FFMQ_15',
'FFMQ_20','FFMQ_26','FFMQ_31', 'FFMQ_36',]].sum(axis=1)
df['FFMQ_describe_sum'] = df[['FFMQ_2', 'FFMQ_7', 'FFMQ_12', 'FFMQ_16',
'FFMQ_22', 'FFMQ_27', 'FFMQ_32', 'FFMQ_37']].sum(axis=1)
df['FFMQ_act_awareness_sum'] = df[['FFMQ_5', 'FFMQ_8','FFMQ_13', 'FFMQ_18',
'FFMQ_23', 'FFMQ_28', 'FFMQ_34', 'FFMQ_38']].sum(axis=1)
df['FFMQ_nonjudge_sum'] = df[['FFMQ_3', 'FFMQ_10', 'FFMQ_14', 'FFMQ_17',
'FFMQ_25', 'FFMQ_30','FFMQ_35', 'FFMQ_39']].sum(axis=1)
df['FFMQ_nonreact_sum'] = df[['FFMQ_4', 'FFMQ_9', 'FFMQ_19', 'FFMQ_21',
'FFMQ_24', 'FFMQ_29', 'FFMQ_33']].sum(axis=1)
cols_export = ['ids'] + ['FFMQ_observe_sum',
'FFMQ_describe_sum',
'FFMQ_act_awareness_sum',
'FFMQ_nonjudge_sum',
'FFMQ_nonreact_sum']
df[cols_export].to_csv('%s/FFMQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
#################### Abbreviated Math Anxiety Scale ##########################
##############################################################################
def run_AMAS(df, out_dir):
#Calculate total score as the sum of Item 1-9.
cols = ['AMAS_1',
'AMAS_2',
'AMAS_3',
'AMAS_4',
'AMAS_5',
'AMAS_6',
'AMAS_7',
'AMAS_8',
'AMAS_9']
df['AMAS_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ['AMAS_sum']
df[cols_export].to_csv('%s/AMAS.csv' % out_dir, decimal='.', index=False)
##############################################################################
########################## self control scale ################################
##############################################################################
def run_SelfCtrl(df, out_dir):
#items to be recoded
items_recoded = ['SCS_2',
'SCS_3',
'SCS_4',
'SCS_5',
'SCS_6',
'SCS_7',
'SCS_8',
'SCS_10',
'SCS_11' ]
#recode items
recoder = {1:5, 2:4, 3:3, 4:2, 5:1 }
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum of Item 1-13.
cols = ['SCS_1',
'SCS_2',
'SCS_3',
'SCS_4',
'SCS_5',
'SCS_6',
'SCS_7',
'SCS_8',
'SCS_9',
'SCS_10',
'SCS_11',
'SCS_12',
'SCS_13']
df['SCS_SelfCtrl_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ['SCS_SelfCtrl_sum']
df[cols_export].to_csv('%s/SCS.csv' % out_dir, decimal='.', index=False)
##############################################################################
################ Internet Addiction test #####################################
##############################################################################
#note: Item 3 not included due to differerent scale format
def run_IAT(df, out_dir):
#Calculate total score as the sum of Item 1-19.
cols = ['IAT_1',
'IAT_2',
'IAT_3',
'IAT_4',
'IAT_5',
'IAT_6',
'IAT_7',
'IAT_8',
'IAT_9',
'IAT_10',
'IAT_11',
'IAT_12',
'IAT_13',
'IAT_14',
'IAT_15',
'IAT_16',
'IAT_17',
'IAT_18',
'IAT_19',
'IAT_20']
#recode items
recoder = {1:1, 2:2, 3:3, 4:4, 5:5, 6:0}
for i in cols:
df[i] = df[i].map(recoder).astype('float64')
df['IAT_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ["IAT_sum"]
df[cols_export].to_csv('%s/IAT.csv' % out_dir, decimal='.', index=False)
##############################################################################
########################### Arten innerer Sprache ############################
#################### varieties of inner speech (VIS) #########################
##############################################################################
def run_VIS(df, out_dir=None):
#items to be recoded
items_recoded = ['VIS_7',
'VIS_15']
#recode items
recoder = {1:6, 2:5, 3:4, 4:3, 5:2, 6:1}
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#Calculate subscales (Dialogic, Condensed, Otherpeople, Evaluative/Motivat.) - sumscores
#dialogic inner speech
df['VIS_dialog_sum'] = df[['VIS_2',
'VIS_6',
'VIS_10',
'VIS_13']].sum(axis=1)
#condensed inner speech
df['VIS_condensed_sum'] = df[['VIS_1',
'VIS_7',
'VIS_8',
'VIS_14',
'VIS_15']].sum(axis=1)
#other people in inner speech
df['VIS_other_sum'] = df[['VIS_3',
'VIS_4',
'VIS_5',
'VIS_12',
'VIS_16']].sum(axis=1)
#evaluative/motivational inner speech
df['VIS_eval_sum'] = df[['VIS_9',
'VIS_11',
'VIS_17',
'VIS_18']].sum(axis=1)
cols_export = ['ids'] + ['VIS_dialog_sum', 'VIS_condensed_sum', 'VIS_other_sum', 'VIS_eval_sum']
df[cols_export].to_csv('%s/VISQ.csv' % out_dir, decimal='.', index=False)
##############################################################################
############# Spontaneous and Deliberate Mind Wandering ######################
##############################################################################
def run_MW_SD(df, out_dir):
df['S-D-MW_delib_mean'] = df[["S-D-MW_1",
"S-D-MW_2",
"S-D-MW_3",
"S-D-MW_4"]].mean(axis=1).round(3)
df['S-D-MW_spont_mean'] = df[["S-D-MW_5",
"S-D-MW_6",
"S-D-MW_7",
"S-D-MW_8"]].mean(axis=1).round(3)
cols_export = ['ids'] + ['S-D-MW_delib_mean', 'S-D-MW_spont_mean']
df[cols_export].to_csv('%s/S-D-MW.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################# short dark triad ##############################
##############################################################################
def run_SDT(df, out_dir):
#items to be recoded
items_recoded = ['SD3_11',
'SD3_15',
'SD3_17',
'SD3_20',
'SD3_25']
#recode items
recoder = {1:5, 2:4, 3:3, 4:2, 5:1 }
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum of Item 1-9 for Machiavellism.
df['SD3_Mach_sum'] = df[['SD3_1',
'SD3_2',
'SD3_3',
'SD3_4',
'SD3_5',
'SD3_6',
'SD3_7',
'SD3_8',
'SD3_9']].sum(axis=1)
#Calculate total score as the sum of Item 1-9 for Narcissism.
df['SD3_Narc_sum'] = df[['SD3_10',
'SD3_11',
'SD3_12',
'SD3_13',
'SD3_14',
'SD3_15',
'SD3_16',
'SD3_17',
'SD3_18']].sum(axis=1)
#Calculate total score as the sum of Item 1-9 for Psychopathy.
df['SD3_Psycho_sum'] = df[['SD3_19',
'SD3_20',
'SD3_21',
'SD3_22',
'SD3_23',
'SD3_24',
'SD3_25',
'SD3_26',
'SD3_27']].sum(axis=1)
cols_export = ['ids'] + ['SD3_Mach_sum', 'SD3_Narc_sum', 'SD3_Psycho_sum']
df[cols_export].to_csv('%s/SD3.csv' % out_dir, decimal='.', index=False)
##############################################################################
################################ SDS #########################################
##############################################################################
# social desirability
def run_SDS(df, out_dir):
#items to be recoded
cols = ['SDS_1',
'SDS_2',
'SDS_3',
'SDS_4',
'SDS_5',
'SDS_6',
'SDS_7',
'SDS_8',
'SDS_9',
'SDS_10',
'SDS_11',
'SDS_12',
'SDS_13',
'SDS_14',
'SDS_15',
'SDS_16',
'SDS_17']
#recode items
recoder = {1:1, 2:0}
for i in cols:
df[i] = df[i].map(recoder).astype('float64')
items_reversed = ['SDS_1',
'SDS_4',
'SDS_6',
'SDS_7',
'SDS_11',
'SDS_15',
'SDS_17']
#recode items
recoder = {1:0, 0:1}
for i in items_reversed:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum of Item 1-17.
df['SDS_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ['SDS_sum']
df[cols_export].to_csv('%s/SDS.csv' % out_dir, decimal='.', index=False)
##############################################################################
##################### UPPSP - impulsivity ####################################
##############################################################################
def run_UPPSP(df, out_dir):
#items that need to be recoded
items_recoded = ['UPPS_2','UPPS_3','UPPS_5',
'UPPS_7','UPPS_8','UPPS_9',
'UPPS_10','UPPS_12','UPPS_13',
'UPPS_15','UPPS_17','UPPS_18',
'UPPS_20','UPPS_22','UPPS_23',
'UPPS_25','UPPS_26','UPPS_29',
'UPPS_30','UPPS_31','UPPS_34',
'UPPS_35','UPPS_36','UPPS_39',
'UPPS_40','UPPS_41','UPPS_44',
'UPPS_45','UPPS_46','UPPS_47',
'UPPS_49','UPPS_50','UPPS_51',
'UPPS_52','UPPS_54','UPPS_56',
'UPPS_57','UPPS_58','UPPS_59']
#recode items
recoder = {1:4, 2:3, 3:2, 4:1}
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#calculate subscales (averages)
#Negative Urgency
df['UPPS_Mean_NegUrg'] = df[['UPPS_2',
'UPPS_7',
'UPPS_12',
'UPPS_17',
'UPPS_22',
'UPPS_29',
'UPPS_34',
'UPPS_39',
'UPPS_44',
'UPPS_50',
'UPPS_53',
'UPPS_58']].mean(axis=1).round(3)
#lack of premeditation
df['UPPS_Mean_Premed'] = df[['UPPS_1',
'UPPS_6',
'UPPS_11',
'UPPS_16',
'UPPS_21',
'UPPS_28',
'UPPS_33',
'UPPS_38',
'UPPS_43',
'UPPS_48',
'UPPS_55']].mean(axis=1).round(3)
#lack of perseverance
df['UPPS_Mean_Persev'] = df[['UPPS_4',
'UPPS_9',
'UPPS_14',
'UPPS_19',
'UPPS_24',
'UPPS_27',
'UPPS_32',
'UPPS_37',
'UPPS_42',
'UPPS_47']].mean(axis=1).round(3)
#sensation seeking
df['UPPS_Mean_SS'] = df[['UPPS_3',
'UPPS_8',
'UPPS_13',
'UPPS_18',
'UPPS_23',
'UPPS_26',
'UPPS_31',
'UPPS_36',
'UPPS_41',
'UPPS_46',
'UPPS_51',
'UPPS_56']].mean(axis=1).round(3)
#Positive Urgency
df['UPPS_Mean_PosUrg'] = df[['UPPS_5',
'UPPS_10',
'UPPS_15',
'UPPS_20',
'UPPS_25',
'UPPS_30',
'UPPS_35',
'UPPS_40',
'UPPS_45',
'UPPS_49',
'UPPS_52',
'UPPS_54',
'UPPS_57',
'UPPS_59']].mean(axis=1).round(3)
cols_export = ['ids'] + ['UPPS_Mean_NegUrg', 'UPPS_Mean_Premed', 'UPPS_Mean_Persev', 'UPPS_Mean_SS','UPPS_Mean_PosUrg']
df[cols_export].to_csv('%s/UPPS-P.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## TPS-D #########################################
################ Tuckmann Procrastination Scale (TPS_D)#######################
##############################################################################
def run_TPS(df, out_dir):
#items to be recoded
items_recoded = ['TPS_7',
'TPS_12',
'TPS_14',
'TPS_16']
#recode items
recoder = {1:5, 2:4, 3:3, 4:2, 5:1 }
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum of Item 1-16.
cols = ['TPS_1',
'TPS_2',
'TPS_3',
'TPS_4',
'TPS_5',
'TPS_6',
'TPS_7',
'TPS_8',
'TPS_9',
'TPS_10',
'TPS_11',
'TPS_12',
'TPS_13',
'TPS_14',
'TPS_15',
'TPS_16']
df['TPS_D_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ['TPS_D_sum']
df[cols_export].to_csv('%s/TPS.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################ ASR 18-59 #######################################
##############################################################################
def run_ASR(df, out_dir):
######################## adaptive functioning #################################
##### friends #####
df['ASR_summary_adaptiveFunctioning_friends_sum' ] = df[['ASR_I_A',
'ASR_I_B',
'ASR_I_C',
'ASR_I_D']].sum(axis=1)
##### spouse / partner #####
recoded = ['ASR_II_B', 'ASR_II_E', 'ASR_II_F', 'ASR_II_H']
for item in recoded:
df[item] = -df[item]
df['ASR_summary_adaptiveFunctioning_spouse_sum'] = df[['ASR_II_A',
'ASR_II_B',
'ASR_II_C',
'ASR_II_D',
'ASR_II_E',
'ASR_II_F',
'ASR_II_G',
'ASR_II_H']].sum(axis=1)
##### family #####
# also in literature: 'ASR_summary_adaptiveFunctioning_family_mean'
items = ['ASR_III_A', 'ASR_III_B', 'ASR_III_C',
'ASR_III_D', 'ASR_III_E_1', 'ASR_III_E_2',
'ASR_III_E_3', 'ASR_III_E_4', 'ASR_III_F']
df['ASR_summary_adaptiveFunctioning_family_sum'] = pd.Series('NaN', index=df.index)
for sub in range(len(df)):
score = 0
for i in items:
try:
if int(df[i].iloc[[sub]]) in [0,1,2,3]:
score += 1
except:
pass
df['ASR_summary_adaptiveFunctioning_family_sum'].iloc[[sub]] = float(score)
##### job #####
#satisfied_job = df['ASR_IV_E'] is not scored
recoded = ['ASR_IV_B', 'ASR_IV_D', 'ASR_IV_F',
'ASR_IV_G', 'ASR_IV_H', 'ASR_IV_I']
for item in recoded:
df[item] = -df[item]
df['ASR_summary_adaptiveFunctioning_job_sum'] = df[['ASR_IV_A',
'ASR_IV_B',
'ASR_IV_C',
'ASR_IV_D',
'ASR_IV_F',
'ASR_IV_G',
'ASR_IV_H',
'ASR_IV_I']].sum(axis=1)
##### education #####
# careful with older ages
# though we're using raw total scores, it's important to notice that normed scores only available for ages 18-29
recoded = ['ASR_V_C', 'ASR_V_E']
for item in recoded:
df[item] = -df[item]
df['ASR_summary_adaptiveFunctioning_education_sum'] = df[['ASR_V_A',
'ASR_V_B',
'ASR_V_C',
'ASR_V_D',
| |
#!/usr/local/sci/bin/python2.7
#*****************************
#
# merge _day and _night netCDF files
#
#
#************************************************************************
'''
Author: <NAME>
Created: March 2016
Last update: 12 April 2016
Location: /project/hadobs2/hadisdh/marine/PROGS/Build
-----------------------
CODE PURPOSE AND OUTPUT
-----------------------
Merge outputs from _day and _night to create _both. An alternative approach to the _all files
For uncertainty this assumes correlation of r=1 for SLR, SCN, HGT and C and no correlation (r=0) for R, M and TOT
-----------------------
LIST OF MODULES
-----------------------
utils.py
-----------------------
DATA
-----------------------
Input data stored in:
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2noQC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSERAclimNBC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim1NBC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2NBC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCtotal/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BChgt/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCinstr/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCtotalship/
-----------------------
HOW TO RUN THE CODE
-----------------------
python2.7 merge_day_night.py --suffix relax --clims --months --start_year YYYY --end_year YYYY --start_month MM --end_month MM (OPTIONAL: one of --doQC1it, --doQC2it, --doQC3it, --doBCtotal, --doBCinstr, --doBChgt, --doNOWHOLE,+ --ShipOnly)
Run for uncertainty (with BCtotal and ShipOnly)
python2.7 merge_day_night.py --suffix relax --months --start_year YYYY --end_year YYYY --start_month MM --end_month MM --doBCtotal --doUSCN --ShipOnly
(--doUHGT, --doUR, --doUC, --doUM, --doUTOT, --doUSLR)
python2.7 gridding_cam.py --help
will show all options
--clims - run for the climatologies
--months - run for the monthly files (will need years and months)
-----------------------
OUTPUT
-----------------------
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2noQC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSERAclimNBC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim1NBC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2NBC/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCtotal/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BChgt/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCinstr/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCtotalship/
/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/GRIDSOBSclim2BCtotalshipNOWHOLE/
-----------------------
VERSION/RELEASE NOTES
-----------------------
Version 4 (11 May 2020) <NAME>
---------
Enhancements
This now works with --doNOWHOLE which runs a BCtotal version with all of the rounding flagged data removed (run with --ShipOnly
Changes
Bug fixes
Version 3 (9 Oct 2018) <NAME>
---------
Enhancements
This now works with the uncertainty fields which are only present for --doBCtotal --ShipOnly
Changes
Bug fixes
Version 2 (26 Sep 2016) <NAME>
---------
Enhancements
This can now work with the iterative approach which requires doQCit1, doQCit2 and doQCit3 to set the correct filepaths
It can also work with bias corrected grids which requires --doBCtotal, --doBChgt or --doBCscn
It can also work with --ShipOnly
Look for:
# KATE modified
...
# end
Changes
This hard wires the MEAN in places where I think that is sensible, despite settings.doMedian being set to True.
Look for # KATE MEDIAN WATCH
ACTUALLY - A TEST OF np.mean AND np.median ON A 2-ELEMENT ARRAY GIVES THE SAME ANSWER!!!!
Bug fixes
set_up_merge had issues with start_year = START_YEAR. I commented out the four time elements as these are all defined in the call
to function and do not need to be redefined here
The output latitudes were one box too high (92.5 to -82.5) so I switched the + for a - to solve this
Version 1 (release date)
---------
Enhancements
Changes
Bug fixes
-----------------------
OTHER INFORMATION
-----------------------
'''
import os
import datetime as dt
import numpy as np
import sys
import argparse
import matplotlib
matplotlib.use('Agg')
import calendar
import netCDF4 as ncdf
import pdb
import utils
import set_paths_and_vars
defaults = set_paths_and_vars.set()
#************************************************************************
def do_merge(fileroot, mdi, suffix = "relax", clims = False, doMedian = False, TimeFreq = 'M',
# UNC NEW
doUSLR = False, doUSCN = False, doUHGT = False, doUR = False, doUM = False, doUC = False, doUTOT = False):
'''
Merge the _day and _night files
Do a np.ma.mean or median for the data and a sum for the n_obs and n_grids
Output with a _both suffix
:param str fileroot: root for filenames
:param flt mdi: missing data indicator
:param str suffix: "relax" or "strict" criteria
:param bool clims: if climatologies then don't try and process anomalies.
:param bool doMedian: switch to enforce use of median over means
:param str TimeFreq: note to say which time resolution we're working with to write out - default M = monthly
# UNC NEW
:param bool doUSLR: do solar adjustment uncertainties
:param bool doUSCN: do instrument adjustment uncertainties
:param bool doUHGT: do height adjustment uncertainties
:param bool doUR: do rounding uncertainties
:param bool doUM: do measurement uncertainties
:param bool doUC: do climatology uncertainties
:param bool doUTOT: do total uncertainties
'''
# UNC NEW
# If there is an uncertainty run set then set uSource to the name of hte uncertainty
if doUSLR:
uSource = 'uSLR'
elif doUSCN:
uSource = 'uSCN'
elif doUHGT:
uSource = 'uHGT'
elif doUR:
uSource = 'uR'
elif doUM:
uSource = 'uM'
elif doUC:
uSource = 'uC'
elif doUTOT:
uSource = 'uTOT'
OBS_ORDER = utils.make_MetVars(mdi, multiplier = False)
if clims:
# KW make OBS_ORDER only the actual variables - remove anomalies
NEWOBS_ORDER = []
for v, var in enumerate(OBS_ORDER):
if "anomalies" not in var.name:
NEWOBS_ORDER.append(var)
del OBS_ORDER
OBS_ORDER = np.copy(NEWOBS_ORDER)
del NEWOBS_ORDER
# spin through both periods
for p, period in enumerate(["day", "night"]):
print period
# go through the variables
for v, var in enumerate(OBS_ORDER):
print " {}".format(var.name)
ncdf_file = ncdf.Dataset("{}_{}_{}.nc".format(fileroot, period, suffix),'r', format='NETCDF4')
if v == 0 and p == 0:
if doUSLR | doUSCN | doUHGT | doUR | doUM | doUC | doUTOT:
shape = list(ncdf_file.variables[var.name+"_"+uSource][:].shape)
else:
shape = list(ncdf_file.variables[var.name][:].shape)
shape.insert(0, len(OBS_ORDER)+2) # add all the variables
shape.insert(0, 2) # insert extra dimension to allow day + night
all_data = np.ma.zeros(shape)
if doUSLR | doUSCN | doUHGT | doUR | doUM | doUC | doUTOT:
all_data[p, v] = ncdf_file.variables[var.name+"_"+uSource][:]
else:
all_data[p, v] = ncdf_file.variables[var.name][:]
# get lats/lons of box centres
lat_centres = ncdf_file.variables["latitude"]
# KATE modified - this results in lats that go from 92.5 to -82,5 so I've switched the + for a -
latitudes = lat_centres - (lat_centres[1] - lat_centres[0])/2.
#latitudes = lat_centres + (lat_centres[1] - lat_centres[0])/2.
# end
lon_centres = ncdf_file.variables["longitude"]
longitudes = lon_centres + (lon_centres[1] - lon_centres[0])/2.
# get times - make a dummy object and then populate attributes
times = utils.TimeVar("time", "time since 1/{}/{} in hours".format(1, 1973), "hours", "time")
times.long_name = ncdf_file.variables["time"].long_name
times.standard_name = ncdf_file.variables["time"].standard_name
times.long_name = ncdf_file.variables["time"].long_name
times.units = ncdf_file.variables["time"].units
times.data = ncdf_file.variables["time"][:]
else:
if doUSLR | doUSCN | doUHGT | doUR | doUM | doUC | doUTOT:
all_data[p, v] = ncdf_file.variables[var.name+"_"+uSource][:]
else:
all_data[p, v] = ncdf_file.variables[var.name][:]
# and get n_obs and n_grids
all_data[p, -2] = ncdf_file.variables["n_grids"][:]
all_data[p, -1] = ncdf_file.variables["n_obs"][:]
# invert latitudes
latitudes = latitudes[::-1]
all_data = all_data[:,:,:,::-1,:]
# got all the info, now merge
# If this is an uncertainty field then combine in quadrature with or without correlations
if doMedian: # THIS IS A BIG PILE OF RUBBISH FOR UNCERTAINTY SO DON'T DO IT
# UNC NEW
# Assumed correlating at r=1
if doUSLR | doUSCN | doUHGT | doUC:
merged_data = utils.bn_median(all_data[:, :len(OBS_ORDER)], axis = 0) / np.sqrt(np.ma.count(all_data[:, :len(OBS_ORDER)], axis = 0))
# Assumed no correlation r=0
elif doUR | doUM | doUTOT:
merged_data = utils.bn_median(all_data[:, :len(OBS_ORDER)], axis = 0) / np.sqrt(np.ma.count(all_data[:, :len(OBS_ORDER)], axis = 0))
else:
merged_data = utils.bn_median(all_data[:, :len(OBS_ORDER)], axis = 0)
else:
# Assumed correlating at r=1
if doUSLR | doUSCN | doUHGT | doUC:
# <NAME> thinks that this should be /N rather than /SQRT(N) which will make uncertainties smaller so I'm trying it
# merged_data = np.sqrt(np.ma.power(np.ma.sum(all_data[:, :len(OBS_ORDER)], axis = 0),2.)) / np.sqrt(np.ma.count(all_data[:, :len(OBS_ORDER)], axis = 0))
merged_data = np.sqrt(np.ma.power(np.ma.sum(all_data[:, :len(OBS_ORDER)], axis = 0),2.)) / np.ma.count(all_data[:, :len(OBS_ORDER)], axis = 0)
# print('Doing correlated mean combo:',merged_data)
# pdb.set_trace()
# Assumed no correlation r=0
elif doUR | doUM | doUTOT:
# <NAME> thinks that this should be /N rather than /SQRT(N) which will make uncertainties smaller so I'm trying it
# merged_data = np.sqrt(np.ma.sum(np.ma.power(all_data[:, :len(OBS_ORDER)],2.), axis = 0)) / np.sqrt(np.ma.count(all_data[:, :len(OBS_ORDER)], axis = 0))
merged_data = np.sqrt(np.ma.sum(np.ma.power(all_data[:, :len(OBS_ORDER)],2.), axis = 0)) / np.ma.count(all_data[:, :len(OBS_ORDER)], axis = 0)
# print('Doing uncorrelated mean combo:',merged_data)
# pdb.set_trace()
else:
merged_data = np.ma.mean(all_data[:, :len(OBS_ORDER)], axis = 0)
# print('Doing flat mean combo:',merged_data)
# pdb.set_trace()
# and process the grids and observations (split off here so have incorporated latitude inversion)
n_grids = np.ma.sum(all_data[:, -2], axis = 0)
n_obs = np.ma.sum(all_data[:, -1], axis = 0)
n_obs.fill_value = -1
n_grids.fill_value = -1
# write the output file
# UNC NEW
if doUSLR | doUSCN | doUHGT | doUR | doUM | doUC | doUTOT:
utils.netcdf_write_unc(uSource, "{}_{}_{}.nc".format(fileroot, "both", suffix), merged_data, n_grids, n_obs, OBS_ORDER, latitudes, longitudes, times, frequency = TimeFreq, \
doUSLR = doUSLR, doUSCN = doUSCN, doUHGT = doUHGT, doUR = doUR, doUM = doUM, doUC = doUC, doUTOT = doUTOT)
else:
utils.netcdf_write("{}_{}_{}.nc".format(fileroot, "both", suffix), merged_data, n_grids, n_obs, OBS_ORDER, latitudes, longitudes, times, frequency = TimeFreq)
# test distribution of obs with grid boxes
outfile = file("{}_{}_{}.txt".format(fileroot.split("/")[-1], "both", suffix), "w")
utils.boxes_with_n_obs(outfile, n_obs, merged_data[0], "")
return # do_merge
#************************************************************************
def get_fileroot(settings, climatology = False, pentads = False, months = [], do3hr = True, time = [], daily = True, stdev = False,
# UNC NEW
doUSLR = False, doUSCN = False, doUHGT = False, doUR = False, doUM = False, doUC = False, doUTOT = False):
'''
Get the filename | |
<filename>ptpip/ptpip.py
import uuid
import time
import socket
import struct
class PtpIpConnection(object):
"""docstring for PtpIP"""
def __init__(self):
super(PtpIpConnection, self).__init__()
self.session = None
self.session_events = None
self.session_id = None
self.cmd_queue = []
self.event_queue = []
self.object_queue = []
def open(self, host='192.168.1.1', port=15740):
# Open both session, first one for for commands, second for events
self.session = self.connect(host=host, port=port)
self.send_recieve_ptpip_packet(PtpIpInitCmdReq(), self.session)
self.session_events = self.connect(host=host, port=port)
self.send_recieve_ptpip_packet(PtpIpEventReq(), self.session_events)
# 0x1002 OpenSession
ptip_cmd = PtpIpCmdRequest(cmd=0x1002, param1=struct.unpack('L', self.session_id)[0])
self.send_recieve_ptpip_packet(ptip_cmd, self.session)
def communication_thread(self):
while True:
if len(self.cmd_queue) == 0:
# do a ping receive a pong (same as ping) as reply to keep the connection alive
# couldnt get any reply onto a propper PtpIpPing packet so i am querying the status
# of the device
ptpip_packet_reply = self.send_recieve_ptpip_packet(PtpIpCmdRequest(cmd=0x90C8),
self.session)
if isinstance(ptpip_packet_reply, PtpIpCmdResponse):
time.sleep(1)
continue
else:
# get the next command from command the queue
ptip_cmd = self.cmd_queue.pop()
ptpip_packet_reply = self.send_recieve_ptpip_packet(ptip_cmd, self.session)
if (ptpip_packet_reply.ptp_response_code == 0x2001 and \
ptpip_packet_reply.ptp_response_code == 0x2019):
print("Cmd send successfully")
else:
print(f"cmd reply is: {ptpip_packet_reply.ptp_response_code}")
# wait 1 second before new packets are processed/send to the camera
time.sleep(1)
pass
def send_ptpip_cmd(self, ptpip_packet):
self.cmd_queue.append(ptpip_packet)
def connect(self, host='192.168.1.1', port=15740):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.connect((host, port))
except socket.error as message:
if s:
s.close()
print(f"Could not open socket: {message}")
return s
def send_recieve_ptpip_packet(self, ptpip_packet, session):
if isinstance(ptpip_packet, PtpIpInitCmdReq):
self.send_data(ptpip_packet.data(), session)
# set the session id of the object if the reply is of type PtpIpInitCmdAck
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpInitCmdAck):
self.session_id = ptpip_packet_reply.session_id
elif isinstance(ptpip_packet, PtpIpEventReq):
self.send_ptpip_event_req(ptpip_packet, session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
elif isinstance(ptpip_packet, PtpIpCmdRequest) and ptpip_packet.ptp_cmd == 0x90C7:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpStartDataPacket):
data_length = struct.unpack('I', ptpip_packet_reply.length)[0]
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
data = ptpip_packet_reply.data
while isinstance(ptpip_packet_reply, PtpIpDataPacket):
data = data + ptpip_packet_reply.data
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if data_length == len(data):
events = PtpIpEventFactory(data).get_events()
for event in events:
self.event_queue.append(event)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
elif isinstance(ptpip_packet, PtpIpCmdRequest) and ptpip_packet.ptp_cmd == 0x1009:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpStartDataPacket):
data_length = struct.unpack('I', ptpip_packet_reply.length)[0]
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
data = ptpip_packet_reply.data
while isinstance(ptpip_packet_reply, PtpIpDataPacket):
data = data + ptpip_packet_reply.data
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if data_length == len(data):
self.object_queue.append(PtpIpDataObject(ptpip_packet.param1, data))
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
else:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
return ptpip_packet_reply
def send_ptpip_event_req(self, ptpip_packet, session):
# add the session id of the object itself if it is not specified in the package
if ptpip_packet.session_id is None:
ptpip_packet.session_id = self.session_id
self.send_data(ptpip_packet.data(), session)
def send_data(self, data, session):
session.send(struct.pack('I', len(data) + 4) + data)
def recieve_data(self, session):
data = session.recv(4)
(data_length,) = struct.unpack('I', data)
print(f"Packet length: {data_length}")
while (data_length) > len(data):
data += session.recv(data_length - len(data))
return data[4:]
class PtpIpPacket(object):
"""docstring for PtpIpCmd"""
def __init__(self):
super(PtpIpPacket, self).__init__()
def factory(self, data=None):
if data is None:
self.cmdtype = None
else:
print(f"Cmd Type: {struct.unpack('I', data[0:4])[0]}")
self.cmdtype = struct.unpack('I', data[0:4])[0]
if self.cmdtype == 1:
return PtpIpInitCmdReq(data[4:])
elif self.cmdtype == 2:
return PtpIpInitCmdAck(data[4:])
elif self.cmdtype == 3:
return PtpIpEventReq(data[4:])
elif self.cmdtype == 4:
return PtpIpEventAck(data[4:])
elif self.cmdtype == 5:
return PtpIpInitFail(data[4:])
elif self.cmdtype == 6:
return PtpIpCmdRequest(data[4:])
elif self.cmdtype == 7:
return PtpIpCmdResponse(data[4:])
elif self.cmdtype == 9:
return PtpIpStartDataPacket(data[4:])
elif self.cmdtype == 10:
return PtpIpDataPacket(data[4:])
elif self.cmdtype == 12:
return PtpIpEndDataPacket(data[4:])
elif self.cmdtype == 13:
return PtpIpPing(data[4:])
def data(self):
pass
class PtpIpInitCmdReq(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitCmdReq, self).__init__()
self.cmdtype = struct.pack('I', 0x01)
self.version = struct.pack('>I', 0x0100)
if data is None:
guid = uuid.uuid4()
self.guid = guid.bytes
self.hostname = socket.gethostname() + '\x00'
self.hostname = self.hostname.encode('utf-16-le')
else:
self.guid = data[0:16]
self.hostname = data[16:0]
def data(self):
return self.cmdtype + self.guid + self.hostname + self.version
class PtpIpInitCmdAck(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitCmdAck, self).__init__()
self.cmdtype = struct.pack('I', 0x02)
if data is not None:
self.session_id = data[0:4]
self.guid = data[4:20]
self.hostname = data[20:]
class PtpIpEventReq(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None, session_id=None):
super(PtpIpEventReq, self).__init__()
self.cmdtype = struct.pack('I', 0x03)
self.session_id = None
if data is not None:
self.session_id = data[0:4]
elif session_id is not None:
self.session_id = session_id
def data(self):
if self.session_id:
return self.cmdtype + self.session_id
return self.cmdtype
class PtpIpEventAck(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpEventAck, self).__init__()
self.cmdtype = struct.pack('I', 0x04)
class PtpIpInitFail(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitFail, self).__init__()
self.cmdtype = struct.pack('I', 0x05)
class PtpIpCmdRequest(PtpIpPacket):
"""
Operation Code Description
0x1001 GetDeviceInfo
0x1002 OpenSession
0x1003 CloseSession
0x1004 GetStorageIDs
0x1005 GetStorageInfo
0x1006 GetNumObjects
0x1007 GetObjectHandles
0x1008 GetObjectInfo
0x1009 GetObject
0x100A GetThumb
0x100B DeleteObject
0x100C SendObjectInfo
0x100D SendObject
0x100E InitiateCapture
0x100F FormatStore
0x1014 GetDevicePropDesc
0x1015 GetDevicePropValue
0x1016 SetDevicePropValue
0x101B GetPartialObject
0x90C0 InitiateCaptureRecInSdram
0x90C1 AfDrive
0x90C2 ChangeCameraMode
0x90C3 DeleteImagesInSdram
0x90C4 GetLargeThumb
0x90C7 GetEvent
0x90C8 DeviceReady
0x90C9 SetPreWbData
0x90CA GetVendorPropCodes
0x90CB AfAndCaptureRecInSdram
0x90CC GetPicCtrlData
0x90CD SetPicCtrlData
0x90CE DeleteCustomPicCtrl
0x90CF GetPicCtrlCapability
0x9201 StartLiveView
0x9202 EndLiveView
0x9203 GetLiveViewImage
0x9204 MfDrive
0x9205 ChangeAfArea
0x9206 AfDriveCancel
0x9207 InitiateCaptureRecInMedia
0x9209 GetVendorStorageIDs
0x920A StartMovieRecInCard
0x920B EndMovieRec
0x920C TerminateCapture
0x9400 GetPartialObjectHighSpeed
0x9407 SetTransferListLock
0x9408 GetTransferList
0x9409 NotifyFileAcquisitionStart
0x940A NotifyFileAcquisitionEnd
0x940B GetSpecificSizeObject
0x9801 GetObjectPropsSupported
0x9802 GetObjectPropDesc
0x9803 GetObjectPropValue
0x9805 GetObjectPropList
"""
def __init__(self, data=None, cmd=None, param1=None, param2=None, param3=None, param4=None,
param5=None):
super(PtpIpCmdRequest, self).__init__()
self.cmdtype = struct.pack('I', 0x06)
self.unkown = struct.pack('I', 0x01)
self.ptp_cmd = cmd
self.param1 = param1
self.param2 = param2
self.param3 = param3
self.param4 = param4
self.param5 = param5
# Todo: Transaction ID generieren
self.transaction_id = struct.pack('I', 0x06)
self.args = ''
if self.param1 is not None:
self.args = self.args + struct.pack('L', self.param1)
if self.param2 is not None:
self.args = self.args + struct.pack('L', self.param2)
if self.param3 is not None:
self.args = self.args + struct.pack('L', self.param3)
if self.param4 is not None:
self.args = self.args + struct.pack('L', self.param4)
if self.param5 is not None:
self.args = self.args + struct.pack('L', self.param5)
def data(self):
return self.cmdtype + self.unkown + struct.pack('H', self.ptp_cmd) + \
self.transaction_id + self.args
class PtpIpCmdResponse(PtpIpPacket):
"""
ResponseCode Description
0x2000 Undefined
0x2001 OK
0x2002 General Error
0x2003 Session Not Open
0x2004 Invalid TransactionID
0x2005 Operation Not Supported
0x2006 Parameter Not Supported
0x2007 Incomplete Transfer
0x2008 Invalid StorageID
0x2009 Invalid ObjectHandle
0x200A DeviceProp Not Supported
0x200B Invalid ObjectFormatCode
0x200C Store Full
0x200D Object WriteProtected
0x200E Store Read-Only
0x200F Access Denied
0x2010 No Thumbnail Present
0x2011 SelfTest Failed
0x2012 Partial Deletion
0x2013 Store Not Available
0x2014 Specification By Format Unsupported
0x2015 No Valid ObjectInfo
0x2016 Invalid Code Format
0x2017 Unknown Vendor Code
0x2018 Capture Already Terminated
0x2019 Device Busy
0x201A Invalid ParentObject
0x201B Invalid DeviceProp Format
0x201C Invalid DeviceProp Value
0x201D Invalid Parameter
0x201E Session Already Open
0x201F Transaction Cancelled
0x2020 Specification of Destination Unsupported
"""
def __init__(self, data=None):
super(PtpIpCmdResponse, self).__init__()
self.cmdtype = struct.pack('I', 0x07)
if data is not None:
self.ptp_response_code = struct.unpack('H', data[0:2])[0]
self.transaction_id = data[2:6]
self.args = data[6:]
class PtpIpStartDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x09)
super(PtpIpStartDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
self.length = data[4:8]
class PtpIpDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x10)
super(PtpIpDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
self.data = data[4:]
class PtpIpCancelTransaction(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x11)
super(PtpIpCancelTransaction, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
class PtpIpEndDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x12)
super(PtpIpEndDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
print(f"transaction_id: {struct.unpack('I', self.transaction_id)[0]}")
self.data = data[4:]
class PtpIpPing(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x13)
super(PtpIpPing, self).__init__()
if data is not None:
self.data = ''
def data(self):
return self.cmdtype
class PtpIpEvent(object):
"""
EventCode Description
0x4001 CancelTransaction
0x4002 ObjectAdded
0x4003 ObjectRemoved
0x4004 StoreAdded
0x4005 StoreRemoved
0x4006 DevicePropChanged
0x4007 ObjectInfoChanged
0x4008 DeviceInfoChanged
0x4009 RequestObjectTransfer
0x400A StoreFull
0x400C StorageInfoChanged
0x400D CaptureComplete
0xC101 ObjectAddedInSdram
0xC102 CaptureCompleteRecInSdram
0xC105 RecordingInterrupted
"""
def __init__(self, event_code, event_parameter):
super(PtpIpEvent, self).__init__()
self.event_code = int(event_code)
self.event_parameter = int(event_parameter)
class PtpIpEventFactory(object):
"""
This is a factory to produce an array of PtpIpEvent objects if it got passd a data reply
from a GetEvent request 0x90C7
"""
def __init__(self, data):
super(PtpIpEventFactory, self).__init__()
# create an empty array for the PtpIpEvent object which will be replied
self.events = []
# get the amount of events passed from the data passed to the factory
amount_of_events = struct.unpack('H', data[0:2])[0]
# set an counter and an offset of 2 as the first two bytes are already | |
import os
import sys
import pytest
import tempfile
import time
import random
from collections import defaultdict
import queue
import ray
from ray._private.test_utils import SignalActor
from ray.util.multiprocessing import Pool, TimeoutError
def teardown_function(function):
# Delete environment variable if set.
if "RAY_ADDRESS" in os.environ:
del os.environ["RAY_ADDRESS"]
@pytest.fixture
def pool():
pool = Pool(processes=1)
yield pool
pool.terminate()
pool.join()
ray.shutdown()
@pytest.fixture
def pool_4_processes():
pool = Pool(processes=4)
yield pool
pool.terminate()
pool.join()
ray.shutdown()
def test_ray_init(shutdown_only):
def getpid(args):
return os.getpid()
def check_pool_size(pool, size):
args = [tuple() for _ in range(size)]
assert len(set(pool.map(getpid, args))) == size
# Check that starting a pool starts ray if not initialized.
pool = Pool(processes=2)
assert ray.is_initialized()
assert int(ray.cluster_resources()["CPU"]) == 2
check_pool_size(pool, 2)
pool.terminate()
pool.join()
ray.shutdown()
# Check that starting a pool doesn't affect ray if there is a local
# ray cluster running.
ray.init(num_cpus=3)
assert ray.is_initialized()
pool = Pool(processes=2)
assert int(ray.cluster_resources()["CPU"]) == 3
check_pool_size(pool, 2)
pool.terminate()
pool.join()
ray.shutdown()
# Check that trying to start a pool on an existing ray cluster throws an
# error if there aren't enough CPUs for the number of processes.
ray.init(num_cpus=1)
assert ray.is_initialized()
with pytest.raises(ValueError):
Pool(processes=2)
assert int(ray.cluster_resources()["CPU"]) == 1
ray.shutdown()
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 1,
"num_nodes": 1,
"do_init": False,
}],
indirect=True)
def test_connect_to_ray(ray_start_cluster):
def getpid(args):
return os.getpid()
def check_pool_size(pool, size):
args = [tuple() for _ in range(size)]
assert len(set(pool.map(getpid, args))) == size
address = ray_start_cluster.address
# Use different numbers of CPUs to distinguish between starting a local
# ray cluster and connecting to an existing one.
start_cpus = 1 # Set in fixture.
init_cpus = 2
# Check that starting a pool still starts ray if RAY_ADDRESS not set.
pool = Pool(processes=init_cpus)
assert ray.is_initialized()
assert int(ray.cluster_resources()["CPU"]) == init_cpus
check_pool_size(pool, init_cpus)
pool.terminate()
pool.join()
ray.shutdown()
# Check that starting a pool connects to a running ray cluster if
# ray_address is passed in.
pool = Pool(ray_address=address)
assert ray.is_initialized()
assert int(ray.cluster_resources()["CPU"]) == start_cpus
check_pool_size(pool, start_cpus)
pool.terminate()
pool.join()
ray.shutdown()
# Set RAY_ADDRESS, so pools should connect to the running ray cluster.
os.environ["RAY_ADDRESS"] = address
# Check that starting a pool connects to a running ray cluster if
# RAY_ADDRESS is set.
pool = Pool()
assert ray.is_initialized()
assert int(ray.cluster_resources()["CPU"]) == start_cpus
check_pool_size(pool, start_cpus)
pool.terminate()
pool.join()
ray.shutdown()
# Check that trying to start a pool on an existing ray cluster throws an
# error if there aren't enough CPUs for the number of processes.
with pytest.raises(Exception):
Pool(processes=start_cpus + 1)
assert int(ray.cluster_resources()["CPU"]) == start_cpus
ray.shutdown()
def test_initializer(shutdown_only):
def init(dirname):
with open(os.path.join(dirname, str(os.getpid())), "w") as f:
print("hello", file=f)
with tempfile.TemporaryDirectory() as dirname:
num_processes = 4
pool = Pool(
processes=num_processes, initializer=init, initargs=(dirname, ))
assert len(os.listdir(dirname)) == 4
pool.terminate()
pool.join()
def test_close(pool_4_processes):
def f(signal):
ray.get(signal.wait.remote())
return "hello"
signal = SignalActor.remote()
result = pool_4_processes.map_async(f, [signal for _ in range(4)])
assert not result.ready()
pool_4_processes.close()
assert not result.ready()
# Signal the head of line tasks to finish.
ray.get(signal.send.remote())
pool_4_processes.join()
# close() shouldn't interrupt pending tasks, so check that they succeeded.
result.wait(timeout=10)
assert result.ready()
assert result.successful()
assert result.get() == ["hello"] * 4
def test_terminate(pool_4_processes):
def f(signal):
return ray.get(signal.wait.remote())
signal = SignalActor.remote()
result = pool_4_processes.map_async(f, [signal for _ in range(4)])
assert not result.ready()
pool_4_processes.terminate()
# terminate() should interrupt pending tasks, so check that join() returns
# even though the tasks should be blocked forever.
pool_4_processes.join()
result.wait(timeout=10)
assert result.ready()
assert not result.successful()
with pytest.raises(ray.exceptions.RayError):
result.get()
def test_apply(pool):
def f(arg1, arg2, kwarg1=None, kwarg2=None):
assert arg1 == 1
assert arg2 == 2
assert kwarg1 is None
assert kwarg2 == 3
return 1
assert pool.apply(f, (1, 2), {"kwarg2": 3}) == 1
with pytest.raises(AssertionError):
pool.apply(f, (
2,
2,
), {"kwarg2": 3})
with pytest.raises(Exception):
pool.apply(f, (1, ))
with pytest.raises(Exception):
pool.apply(f, (1, 2), {"kwarg1": 3})
def test_apply_async(pool):
def f(arg1, arg2, kwarg1=None, kwarg2=None):
assert arg1 == 1
assert arg2 == 2
assert kwarg1 is None
assert kwarg2 == 3
return 1
assert pool.apply_async(f, (1, 2), {"kwarg2": 3}).get() == 1
with pytest.raises(AssertionError):
pool.apply_async(f, (
2,
2,
), {
"kwarg2": 3
}).get()
with pytest.raises(Exception):
pool.apply_async(f, (1, )).get()
with pytest.raises(Exception):
pool.apply_async(f, (1, 2), {"kwarg1": 3}).get()
# Won't return until the input ObjectRef is fulfilled.
def ten_over(args):
signal, val = args
ray.get(signal.wait.remote())
return 10 / val
signal = SignalActor.remote()
result = pool.apply_async(ten_over, ([signal, 10], ))
result.wait(timeout=0.01)
assert not result.ready()
with pytest.raises(TimeoutError):
result.get(timeout=0.01)
# Fulfill the ObjectRef.
ray.get(signal.send.remote())
result.wait(timeout=10)
assert result.ready()
assert result.successful()
assert result.get() == 1
signal = SignalActor.remote()
result = pool.apply_async(ten_over, ([signal, 0], ))
with pytest.raises(ValueError, match="not ready"):
result.successful()
# Fulfill the ObjectRef with 0, causing the task to fail (divide by zero).
ray.get(signal.send.remote())
result.wait(timeout=10)
assert result.ready()
assert not result.successful()
with pytest.raises(ZeroDivisionError):
result.get()
def test_map(pool_4_processes):
def f(index):
return index, os.getpid()
results = pool_4_processes.map(f, range(1000))
assert len(results) == 1000
pid_counts = defaultdict(int)
for i, (index, pid) in enumerate(results):
assert i == index
pid_counts[pid] += 1
# Check that the functions are spread somewhat evenly.
for count in pid_counts.values():
assert count > 100
def bad_func(args):
raise Exception("test_map failure")
with pytest.raises(Exception, match="test_map failure"):
pool_4_processes.map(bad_func, range(100))
def test_map_async(pool_4_processes):
def f(args):
index, signal = args
ray.get(signal.wait.remote())
return index, os.getpid()
signal = SignalActor.remote()
async_result = pool_4_processes.map_async(
f, [(i, signal) for i in range(1000)])
assert not async_result.ready()
with pytest.raises(TimeoutError):
async_result.get(timeout=0.01)
async_result.wait(timeout=0.01)
# Send the signal to finish the tasks.
ray.get(signal.send.remote())
async_result.wait(timeout=10)
assert async_result.ready()
assert async_result.successful()
results = async_result.get()
assert len(results) == 1000
pid_counts = defaultdict(int)
for i, (index, pid) in enumerate(results):
assert i == index
pid_counts[pid] += 1
# Check that the functions are spread somewhat evenly.
for count in pid_counts.values():
assert count > 100
def bad_func(index):
if index == 50:
raise Exception("test_map_async failure")
async_result = pool_4_processes.map_async(bad_func, range(100))
async_result.wait(10)
assert async_result.ready()
assert not async_result.successful()
with pytest.raises(Exception, match="test_map_async failure"):
async_result.get()
def test_starmap(pool):
def f(*args):
return args
args = [tuple(range(i)) for i in range(100)]
assert pool.starmap(f, args) == args
assert pool.starmap(lambda x, y: x + y, zip([1, 2], [3, 4])) == [4, 6]
def test_callbacks(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
err_indices = args[1]
if index in err_indices:
raise Exception("intentional failure")
return index
callback_queue = queue.Queue()
def callback(result):
callback_queue.put(result)
def error_callback(error):
callback_queue.put(error)
# Will not error, check that callback is called.
result = pool_4_processes.apply_async(f, ((0, [1]), ), callback=callback)
assert callback_queue.get() == 0
result.get()
# Will error, check that error_callback is called.
result = pool_4_processes.apply_async(
f, ((0, [0]), ), error_callback=error_callback)
assert isinstance(callback_queue.get(), Exception)
with pytest.raises(Exception, match="intentional failure"):
result.get()
# Test callbacks for map_async.
error_indices = [2, 50, 98]
result = pool_4_processes.map_async(
f, [(index, error_indices) for index in range(100)],
callback=callback,
error_callback=error_callback)
callback_results = []
while len(callback_results) < 100:
callback_results.append(callback_queue.get())
assert result.ready()
assert not result.successful()
# Check that callbacks were called on every result, error or not.
assert len(callback_results) == 100
# Check that callbacks were processed in the order that the tasks finished.
# NOTE: this could be flaky if the calls happened to finish in order due
# to the random sleeps, but it's very unlikely.
assert not all(i in error_indices or i == result
for i, result in enumerate(callback_results))
# Check that the correct callbacks were called on errors/successes.
assert all(index not in callback_results for index in error_indices)
assert [isinstance(result, Exception)
for result in callback_results].count(True) == len(error_indices)
def test_imap(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
err_indices = args[1]
if index in err_indices:
raise Exception("intentional failure")
return index
error_indices = [2, 50, 98]
result_iter = pool_4_processes.imap(
f, [(index, error_indices) for index in range(100)], chunksize=11)
for i in range(100):
result = result_iter.next()
if i in error_indices:
assert isinstance(result, Exception)
else:
assert result == i
with pytest.raises(StopIteration):
result_iter.next()
def test_imap_unordered(pool_4_processes):
def f(args):
time.sleep(0.1 * random.random())
index = args[0]
err_indices = args[1]
if index in err_indices:
raise Exception("intentional failure")
return index
error_indices = [2, 50, 98]
in_order = []
num_errors = 0
result_iter = pool_4_processes.imap_unordered(
f, [(index, error_indices) for index in range(100)], chunksize=11)
for i in range(100):
result = result_iter.next()
if isinstance(result, Exception):
in_order.append(True)
num_errors += 1
else:
in_order.append(result == i)
# Check that the results didn't come back all in order.
# NOTE: this could be flaky if the calls happened to finish in order due
# to the random sleeps, but it's very unlikely.
assert not all(in_order)
assert num_errors == len(error_indices)
with pytest.raises(StopIteration):
result_iter.next()
def test_imap_timeout(pool_4_processes):
def f(args):
index, wait_index, signal | |
"-" + vm3_macvlan_mac_addr + \
"-" + "0.0.0.0/32", "Mac route is absent in EVPN table. "
# checking bridge table
peer = self.agent_inspect[vm2_node_ip].get_vna_layer2_route(
vm2_vrf_id, mac=vm3_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
assert peer == "EVPN", "Peer is not EVPN."
# checking if route for macvlan_ip3 is present in vm2 agent inet table
inspect_h = self.agent_inspect[vm2_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm2_vrf_id,
ip=self.vm3_macvlan_ip.split("/")[0])
assert route, ('No route seen in inet table for %s' %
(self.vm3_macvlan_ip.split("/")[0]))
# checking if route for macvlan_ip3 is present in vm2 vrouter inet
# table
route = inspect_h.get_vrouter_route_table(vm2_vrf_id,
prefix=self.vm3_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert route, ('No route seen in vrouter for %s' %
(self.vm3_macvlan_ip))
nh_id = self.inputs.run_cmd_on_server(
vm2_node_ip,
"contrail-tools rt --dump %s --family inet6 | grep %s | awk '{print $5}' " %
(vm2_vrf_id,
route[0]['prefix'] +
"/" +
route[0]['prefix_len']))
nh_type = self.inputs.run_cmd_on_server(
vm2_node_ip,
"contrail-tools nh --get %s | grep Type | awk {'print $2'}" %
nh_id).split(":")[1]
assert nh_type == "Tunnel", "Nh type is not Tunnel."
# checking stitched MAC addr
stitched_mac_cmd = 'contrail-tools rt --get %s --vrf %d --family inet6 | awk \'{print $6}\'| grep \':\'' % (
self.vm3_macvlan_ip, int(vm2_vrf_id))
output = self.inputs.run_cmd_on_server(
vm2_node_ip, stitched_mac_cmd).split("(")[0]
assert EUI(output, dialect=mac_unix_expanded) == EUI(
vm3_macvlan_mac_addr, dialect=mac_unix_expanded), "Stitched mac address is invalid."
cmd = ['ip link delete macvlan1']
self.vm2_fixture.run_cmd_on_vm(cmd, as_sudo=True)
self.vm3_fixture.run_cmd_on_vm(cmd, as_sudo=True)
return True
# end test_intra_vn_inter_compute_l2l3_pkt_mode
@test.attr(type=['sanity'])
@preposttest_wrapper
def test_intra_vn_intra_compute_l2l3(self):
'''
Description: Learn MAC_IPv6 bindings on VM interface in the same VN and same compute with forwarding mode L2/L3.
Test steps:
1. Create macvlan intf on vm1 and vm4.
Pass criteria:
1. Ping from vm to macvlan intf should go through fine.
2. MAC/IP and MAC/0-IP route should be present in evpn table
3. derived bridge route with peer as EVPN for MAC2
4. L3VPN route for IP2 in agent.
5. On vrouter: Verify stitched mac addr is available
6. On vrouter: Verify POD IP is added to inet table, Encap data replaced with MAC2 in nh
Maintainer : <EMAIL>
'''
cmds_vm1 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm1_macvlan_ip.split('/')[0]+'/64'),
'ifup --force eth0']
cmds_vm4 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm4_macvlan_ip.split('/')[0]+'/64'),
'ifup --force eth0']
self.vm1_fixture.run_cmd_on_vm(cmds_vm1, as_sudo=True)
self.vm4_fixture.run_cmd_on_vm(cmds_vm4, as_sudo=True)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm1_macvlan_mac_addr = list(
self.vm1_fixture.run_cmd_on_vm(mac_cmd).values())[0]
vm4_macvlan_mac_addr = list(
self.vm4_fixture.run_cmd_on_vm(mac_cmd).values())[0]
# from vm1 to mac4 intf
assert self.vm1_fixture.ping_to_ip(self.vm4_macvlan_ip.split('/')[0])
# ping from macvlan1 intf on vm1 to macvlan intf on vm4
assert self.vm1_fixture.ping_to_ip(
self.vm4_macvlan_ip.split('/')[0], intf="macvlan1")
# ping from macvlan1 intf on vm4 to macvlan intf on vm1
assert self.vm4_fixture.ping_to_ip(
self.vm1_macvlan_ip.split('/')[0], intf="macvlan1")
# checking evpn table
vm1_node_ip = self.vm1_fixture.vm_node_ip
vm1_vrf_id = self.get_vrf_id(self.vn1_fixture, self.vm1_fixture)
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip=self.vm4_macvlan_ip)['mac']
assert evpn_route == str(self.vn1_vxlan_id) + "-" + vm4_macvlan_mac_addr + \
"-" + self.vm4_macvlan_ip, "Mac route is absent in EVPN table. "
# 0 ip should also be present
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip="0.0.0.0/32")['mac']
assert evpn_route == str(self.vn1_vxlan_id) + "-" + vm4_macvlan_mac_addr + \
"-" + "0.0.0.0/32", "Mac route is absent in EVPN table. "
# checking bridge table
peer = self.agent_inspect[vm1_node_ip].get_vna_layer2_route(
vm1_vrf_id, mac=vm4_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
assert peer == "EVPN", "Peer is not EVPN."
# checking if vm4_macvlan_ip is present in vm1 agent inet table
inspect_h = self.agent_inspect[vm1_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm1_vrf_id,
ip=self.vm4_macvlan_ip.split("/")[0])
assert route, ('No route seen in agent inet table for %s' %
(self.vm4_macvlan_ip.split("/")[0]))
# checking if vm4_macvlan_ip is present in vm1 vrouter inet table
route = inspect_h.get_vrouter_route_table(vm1_vrf_id,
prefix=self.vm4_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert route, ('No route seen in vrouter for %s' %
(self.vm4_macvlan_ip))
nh_id = self.inputs.run_cmd_on_server(
vm1_node_ip,
"contrail-tools rt --dump %s --family inet6 | grep %s | awk '{print $5}' " %
(vm1_vrf_id,
route[0]['prefix'] +
"/" +
route[0]['prefix_len']))
nh_type = self.inputs.run_cmd_on_server(
vm1_node_ip,
"contrail-tools nh --get %s | grep Type | awk {'print $2'}" %
nh_id).split(":")[1]
assert nh_type == "Encap", "Nh type is not Encap."
encap_data = self.inputs.run_cmd_on_server(
vm1_node_ip, r"contrail-tools nh --get %s | grep Encap\ Data" % nh_id).split(":")[1][1:18]
assert vm4_macvlan_mac_addr.replace(
":", " ") == encap_data, "Mac of macvlan intf on vm4 is not present in encap data."
# checking stitched MAC addr
stitched_mac_cmd = 'contrail-tools rt --get %s --vrf %d --family inet6 | awk \'{print $6}\'| grep \':\'' % (
self.vm4_macvlan_ip, int(vm1_vrf_id))
output = self.inputs.run_cmd_on_server(
vm1_node_ip, stitched_mac_cmd).split("(")[0]
assert EUI(output, dialect=mac_unix_expanded) == EUI(
vm4_macvlan_mac_addr, dialect=mac_unix_expanded), "Stitched mac address is invalid."
cmd = ['ip link delete macvlan1']
self.vm1_fixture.run_cmd_on_vm(cmd, as_sudo=True)
self.vm4_fixture.run_cmd_on_vm(cmd, as_sudo=True)
return True
# end test_intra_vn_intra_compute_l2l3
@preposttest_wrapper
def test_intra_vn_inter_compute_l2l3(self):
'''
Description: Learn MAC_IPv6 bindings on VM interface in the same VN and different compute with forwarding mode L2/L3.
Test steps:
1. Create macvlan intf on vm2 and vm3.
Pass criteria:
1. Ping from vm to macvlan intf should go through fine.
2. MAC/IP and MAC/0-IP route should be present in evpn table
3. derived bridge route with peer as EVPN for MAC2
4. L3VPN route for IP2 in agent.
5. On vrouter: Verify stitched mac addr is available
6. On vrouter: Verify POD IP is added to inet table, Encap data replaced with MAC2 in nh, nh type is Tunnel
Maintainer : <EMAIL>
'''
cmds_vm2 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm2_macvlan_ip.split('/')[0]+'/64'),
'ifup --force eth0']
cmds_vm3 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm3_macvlan_ip.split('/')[0]+'/64'),
'ifup --force eth0']
self.vm2_fixture.run_cmd_on_vm(cmds_vm2, as_sudo=True)
self.vm3_fixture.run_cmd_on_vm(cmds_vm3, as_sudo=True)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm2_macvlan_mac_addr = list(
self.vm2_fixture.run_cmd_on_vm(mac_cmd).values())[0]
vm3_macvlan_mac_addr = list(
self.vm3_fixture.run_cmd_on_vm(mac_cmd).values())[0]
# from vm2 to mac3 intf
assert self.vm2_fixture.ping_to_ip(self.vm3_macvlan_ip.split('/')[0])
# ping from macvlan1 intf on vm2 to macvlan intf on vm3
assert self.vm2_fixture.ping_to_ip(
self.vm3_macvlan_ip.split('/')[0], intf="macvlan1")
# ping from macvlan1 intf on vm3 to macvlan intf on vm2
assert self.vm3_fixture.ping_to_ip(
self.vm2_macvlan_ip.split('/')[0], intf="macvlan1")
# checking evpn table
vm2_node_ip = self.vm2_fixture.vm_node_ip
vm2_vrf_id = self.get_vrf_id(self.vn2_fixture, self.vm2_fixture)
evpn_route = self.agent_inspect[vm2_node_ip].get_vna_evpn_route(
vm2_vrf_id,
vxlanid=self.vn2_vxlan_id,
mac=vm3_macvlan_mac_addr,
ip=self.vm3_macvlan_ip)['mac']
assert evpn_route == str(self.vn2_vxlan_id) + "-" + vm3_macvlan_mac_addr + \
"-" + self.vm3_macvlan_ip, "Mac route for macvlan1 is absent in EVPN table. "
# 0 ip should also be present
evpn_route = self.agent_inspect[vm2_node_ip].get_vna_evpn_route(
vm2_vrf_id,
vxlanid=self.vn2_vxlan_id,
mac=vm3_macvlan_mac_addr,
ip="0.0.0.0/32")['mac']
assert evpn_route == str(self.vn2_vxlan_id) + "-" + vm3_macvlan_mac_addr + \
"-" + "0.0.0.0/32", "Mac route is absent in EVPN table. "
# checking bridge table
peer = self.agent_inspect[vm2_node_ip].get_vna_layer2_route(
vm2_vrf_id, mac=vm3_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
assert peer == "EVPN", "Peer is not EVPN."
# checking if route for macvlan_ip3 is present in vm2 agent inet table
inspect_h = self.agent_inspect[vm2_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm2_vrf_id,
ip=self.vm3_macvlan_ip.split("/")[0])
assert route, ('No route seen in inet table for %s' %
(self.vm3_macvlan_ip.split("/")[0]))
# checking if route for macvlan_ip3 is present in vm2 vrouter inet
# table
route = inspect_h.get_vrouter_route_table(vm2_vrf_id,
prefix=self.vm3_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert route, ('No route seen in vrouter for %s' %
(self.vm3_macvlan_ip))
nh_id = self.inputs.run_cmd_on_server(
vm2_node_ip,
"contrail-tools rt --dump %s --family inet6 | grep %s | awk '{print $5}' " %
(vm2_vrf_id,
route[0]['prefix'] +
"/" +
route[0]['prefix_len']))
nh_type = self.inputs.run_cmd_on_server(
vm2_node_ip,
"contrail-tools nh --get %s | grep Type | awk {'print $2'}" %
nh_id).split(":")[1]
assert nh_type == "Tunnel", "Nh type is not Tunnel."
# checking stitched MAC addr
stitched_mac_cmd = 'contrail-tools rt --get %s --vrf %d --family inet6 | awk \'{print $6}\'| grep \':\'' % (
self.vm3_macvlan_ip, int(vm2_vrf_id))
output = self.inputs.run_cmd_on_server(
vm2_node_ip, stitched_mac_cmd).split("(")[0]
assert EUI(output, dialect=mac_unix_expanded) == EUI(
vm3_macvlan_mac_addr, dialect=mac_unix_expanded), "Stitched mac address is invalid."
cmd = ['ip link delete macvlan1']
self.vm2_fixture.run_cmd_on_vm(cmd, as_sudo=True)
self.vm3_fixture.run_cmd_on_vm(cmd, as_sudo=True)
return True
# end test_intra_vn_inter_compute_l2l3
@preposttest_wrapper
def test_intra_vn_intra_compute_vlan_pkt_mode_l2l3(self):
'''
Description: Learn MAC_IPv6 bindings on VLAN sub intf in same VN and same compute with forwarding mode as L2L3 only mode and disabled policy (in Packet mode)
Test steps:
1. Disable policy on vm1 and vm4
2. Create macvlan intf on vlan intf on vm1 and vm4. Intf | |
"Paper Hearts": 0xCC4466,
"Paper Lamb": 0xF2EBE1,
"Paper Lantern": 0xF2E0C4,
"Paper Plane": 0xF1ECE0,
"Paper Sack": 0xB4A07A,
"Paper Tiger": 0xFDF1AF,
"Paper White": 0xEEF0F3,
"Paperboy's Lawn": 0x249148,
"Paperwhite": 0xF6EFDF,
"Papier Blanc": 0xEFEADC,
"<NAME>": 0x8590AE,
"Pappardelle Noodle": 0xF9EBCC,
"Paprika": 0x7C2D37,
"Papyrus": 0x999911,
"Papyrus Map": 0xC0AC92,
"Papyrus Paper": 0xF5EDD6,
"Par Four": 0x507069,
"Par Four Green": 0x3F8F45,
"Parachute": 0xBEB755,
"Parachute Purple": 0x392852,
"Parachute Silk": 0xFFE2B5,
"Parachuting": 0x00589B,
"Paradise": 0xDEF1EA,
"Paradise Bird": 0xFF8C55,
"Paradise City": 0x5F7475,
"Paradise Found": 0x83988C,
"Paradise Grape": 0x746565,
"Paradise Green": 0xB2E79F,
"Paradise Island": 0x5AA7A0,
"Paradise Landscape": 0x009494,
"Paradise of Greenery": 0x398749,
"Paradise Palms": 0x006622,
"Paradise Pink": 0xE4445E,
"Paradise Sky": 0x66C6D0,
"Paradiso": 0x488084,
"Parador Inn": 0xA99A8A,
"Parador Stone": 0x908D86,
"Parakeet": 0x78AE48,
"Parakeet Blue": 0x7EB6FF,
"Parakeet Green": 0x1AA36D,
"Parakeet Pete": 0xCBD3C6,
"Paramount": 0x5B6161,
"Parasailing": 0x00736C,
"Parasite Brown": 0x914B13,
"Parasol": 0xE9DFDE,
"<NAME>": 0x824A53,
"Parchment": 0xFEFCAF,
"Parchment Paper": 0xF0E7D8,
"Parchment White": 0xF9EAE5,
"Parfait": 0xC8A6A1,
"Parfait d'Amour": 0x734F96,
"Parfait Pink": 0xE9C3CF,
"Paris": 0x91A7BC,
"Paris Blue": 0xB7DDED,
"Paris Creek": 0x888873,
"Paris Daisy": 0xFBEB50,
"Paris Green": 0x50C87C,
"Paris M": 0x312760,
"Paris Paving": 0x737274,
"Paris Pink": 0xDA6D91,
"Paris White": 0xBFCDC0,
"Parisian Blue": 0x4F7CA4,
"Parisian Cafè": 0xA49085,
"Parisian Cashmere": 0xD1C7B8,
"Parisian Green": 0x6B9C42,
"Parisian Night": 0x323441,
"Parisian Patina": 0x7D9B89,
"Parisian Violet": 0x787093,
"Park Avenue": 0x465048,
"Park Bench": 0x537F6C,
"Park Green Flat": 0x88C9A6,
"Park Picnic": 0x428F46,
"Parkview": 0x46483E,
"Parkwater": 0x477BBD,
"Parlor Rose": 0xBAA1B2,
"Parlour Blue": 0x465F7E,
"Parlour Red": 0xA12D5D,
"Parma Grey": 0x806E85,
"Parma Mauve": 0x5F5680,
"Parma Plum Red": 0x5E3958,
"Parma Violet": 0x55455A,
"Parmentier": 0x887CAB,
"Parmesan": 0xFFFFDD,
"Parrot Green": 0x8DB051,
"Parrot Pink": 0xD998A0,
"Parrot Tulip": 0xEEBFD5,
"Parsley": 0x305D35,
"Parsley Green": 0x5A9F4D,
"Parsley Sprig": 0x3D7049,
"Parsnip": 0xD6C69A,
"Partial Pink": 0xFFEDF8,
"Particle Cannon": 0xDEF3E6,
"Particle Ioniser Red": 0xCB3215,
"Particular Mint": 0xD0D2C5,
"Partly Cloudy": 0x9DBBCD,
"Partridge": 0x844C44,
"Partridge Grey": 0x919098,
"Partridge Knoll": 0xA9875B,
"Party Hat": 0xCAC1E2,
"Party Pig": 0xEE99FF,
"Party Time": 0xD0252F,
"Partytime": 0xE3A9C4,
"Pasadena Rose": 0xA84A49,
"Paseo Verde": 0x929178,
"Pasha Brown": 0xC3B7A4,
"Paspalum Grass": 0xB9BD97,
"Pass Time Blue": 0x5D98B3,
"Passion Flower": 0x6D5698,
"Passion Fruit": 0x907895,
"Passion Fruit Punch": 0xE8AA9D,
"Passion Plum": 0x9C5F77,
"Passion Potion": 0xE398AF,
"Passion Razz": 0x59355E,
"Passionate Blue": 0x1F3465,
"Passionate Blueberry": 0x334159,
"Passionate Pause": 0xEDEFCB,
"Passionate Pink": 0xDD00CC,
"Passionate Plum": 0x753A58,
"Passionate Purple": 0x882299,
"Passionfruit Mauve": 0x513E49,
"Passive": 0xCBCCC9,
"Passive Pink": 0xDBA29E,
"Passive Royal": 0x795365,
"Pasta": 0xF7DFAF,
"Pasta Rasta": 0xEEC474,
"Pastel Blue": 0xA2BFFE,
"Pastel Brown": 0x836953,
"Pastel China": 0xF0E4E0,
"Pastel Day": 0xDFD8E1,
"Pastel Green": 0x77DD77,
"Pastel Grey": 0xCFCFC4,
"Pastel Grey Green": 0xBCCBB9,
"Pastel Jade": 0xD2F0E0,
"Pastel Lavender": 0xD8A1C4,
"Pastel Lilac": 0xBDB0D0,
"Pastel Magenta": 0xF49AC2,
"Pastel Mint": 0xCEF0CC,
"Pastel Mint Green": 0xADD0B3,
"Pastel Orange": 0xFF964F,
"Pastel Parchment": 0xE5D9D3,
"Pastel Pea": 0xBEE7A5,
"Pastel Peach": 0xF1CAAD,
"Pastel Pink": 0xDEA5A4,
"Pastel Purple": 0xB39EB5,
"Pastel Red": 0xFF6961,
"Pastel Rose Tan": 0xE9D1BF,
"Pastel Sand": 0xD5C6B4,
"Pastel Smirk": 0xDEECE1,
"Pastel Turquoise": 0x99C5C4,
"Pastel Violet": 0xCB99C9,
"Pastel Yellow": 0xFDFD96,
"Pastoral": 0xEDFAD9,
"Pastry": 0xF8DEB8,
"Pastry Dough": 0xFAEDD5,
"Pastry Shell": 0xBD8C66,
"Pasture Green": 0x506351,
"Patch of Land": 0x225511,
"Patches": 0x8A7D6B,
"Patchwork Pink": 0xC4A89E,
"Patchwork Plum": 0x7E696A,
"Paternoster": 0xC7C7C6,
"Path to the Sky": 0xC4EEE8,
"Pathway": 0xDBD6D2,
"Patience": 0xE6DDD6,
"Patient White": 0xEDE2DE,
"Patina": 0x639283,
"Patina Creek": 0xB6C4BD,
"Patina Green": 0xB9EAB3,
"Patina Violet": 0x695A67,
"Patio Green": 0x3F5A50,
"Patio Stone": 0x6B655B,
"Patriarch": 0x800070,
"Patrice": 0x8CD9A1,
"Patrician Purple": 0x6C4E79,
"Patrinia Flowers": 0xD9B611,
"Patrinia Scabiosaefolia": 0xF2F2B0,
"Patriot Blue": 0x363756,
"Pattens Blue": 0xD3E5EF,
"Pattipan": 0xBCC6B1,
"Paua": 0x2A2551,
"Paua Shell": 0x245056,
"Pauley": 0x629191,
"Pauper": 0x343445,
"Paved Path": 0x828782,
"Pavement": 0x524D50,
"Pavestone": 0xC9C4BA,
"Pavilion": 0xBEBF84,
"Pavilion Beige": 0xC5B6A4,
"Pavilion Peach": 0xDF9C45,
"Pavillion": 0xEDE4D4,
"Paving Stone": 0xA8A498,
"Paving Stones": 0xCBCCC4,
"Pavlova": 0xBAAB87,
"Paw Paw": 0xFBD49C,
"Paw Print": 0x827A6D,
"Pawn Broker": 0x473430,
"Pax": 0xC8C6DA,
"Payne's Grey": 0x536878,
"PCB Green": 0x002D04,
"Pea": 0xA4BF20,
"Pea Aubergine Green": 0x7C9865,
"Pea Case": 0x709D3D,
"Pea Green": 0x8EAB12,
"Pea Soup": 0x929901,
"Pea Soup Green": 0x94A617,
"Peabody": 0x3F7074,
"Peace": 0xA2B2BD,
"Peace N Quiet": 0xCACFE0,
"Peace of Mind": 0xC1875F,
"Peace River": 0xA8BFCC,
"Peace Yellow": 0xEECF9E,
"Peaceable Kingdom": 0xDDCCAC,
"Peaceful Blue": 0x9AB6C0,
"Peaceful Glade": 0x878E83,
"Peaceful Night": 0xD6E7E3,
"Peaceful Pastures": 0x94D8AC,
"Peaceful Peach": 0xFFDDCD,
"Peaceful Purple": 0x660088,
"Peaceful Rain": 0xF1FBF1,
"Peaceful River": 0x47A0D2,
"Peach": 0xFFB07C,
"Peach A La Mode": 0xEFC9AA,
"Peach Amber": 0xFB9F93,
"Peach Ash": 0xEFC4BB,
"Peach Beauty": 0xE7C3AB,
"Peach Beige": 0xD3A297,
"Peach Bellini": 0xFEDCAD,
"Peach Bloom": 0xD99B7C,
"Peach Blossom": 0xDE8286,
"Peach Blossom Red": 0xEECFBF,
"Peach Blush": 0xE4CCC6,
"Peach Breeze": 0xFFECE5,
"Peach Brick": 0xE5CCBD,
"Peach Bud": 0xFDB2AB,
"Peach Buff": 0xCC99BB,
"Peach Burst": 0xF39998,
"Peach Butter": 0xFFAC3A,
"Peach Caramel": 0xC5733D,
"Peach Cider": 0xFFD9AA,
"Peach Cloud": 0xFCE2D8,
"Peach Cobbler": 0xFFB181,
"Peach Crayon": 0xFFCBA7,
"Peach Cream": 0xFFF0DB,
"Peach Crème Brûlée": 0xFFE19D,
"Peach Damask": 0xF6C4A6,
"Peach Darling": 0xEFCDB4,
"Peach Dip": 0xF4DEBF,
"Peach Dust": 0xF0D8CC,
"Peach Echo": 0xF7786B,
"Peach Everlasting": 0xF4E2D4,
"Peach Fade": 0xFCE9D6,
"Peach Fizz": 0xFFA883,
"Peach Flower": 0xE198B4,
"Peach Fury": 0xF88435,
"Peach Fuzz": 0xFFC7B9,
"Peach Glow": 0xFFDCAC,
"Peach Juice": 0xFFCFAB,
"Peach Latte": 0xE7C19F,
"Peach Macaron": 0xC67464,
"Peach Melba": 0xFBBDAF,
"Peach Mimosa": 0xF4A28C,
"Peach Nectar": 0xFFB59B,
"Peach Nirvana": 0xEDB48F,
"Peach Nougat": 0xE6AF91,
"Peach of Mind": 0xFFE2B4,
"Peach Orange": 0xFFCC99,
"Peach Parfait": 0xF8BFA8,
"Peach Patch": 0xF3D5A1,
"Peach Pearl": 0xFFB2A5,
"Peach Pink": 0xFF9A8A,
"Peach Poppy": 0xDDAAAA,
"Peach Powder": 0xE2BDB3,
"Peach Preserve": 0xD29487,
"Peach Puff": 0xFFDAB9,
"Peach Puree": 0xEFCFBA,
"Peach Quartz": 0xF5B895,
"Peach Red": 0xF9CDC4,
"Peach Rose": 0xF6E3D5,
"Peach Sachet": 0xF6D9C9,
"Peach Schnapps": 0xFFDCD6,
"Peach Shortcake": 0xF3DFD4,
"Peach Smoothie": 0xFFE5BD,
"Peach Souffle": 0xECBCB2,
"Peach Surprise": 0xF3E3D1,
"Peach Temptation": 0xF2C5B2,
"Peach Tile": 0xEFA498,
"Peach Tone": 0xF2E3DC,
"Peach Umbrella": 0xF9E8CE,
"Peach Whip": 0xDBBEB7,
"Peach Yellow": 0xFADFAD,
"Peachade": 0xFADFC7,
"Peaches of Immortality": 0xD98586,
"Peaches'n'Cream": 0xEEC9A6,
"Peachskin": 0xDFB8B6,
"Peachtree": 0xF3DDCD,
"Peachy Bon-Bon": 0xFFD2B9,
"Peachy Confection": 0xD4A88D,
"Peachy Ethereal": 0xFDE0DC,
"Peachy Feeling": 0xED8666,
"Peachy Keen": 0xFFDEDA,
"Peachy Maroney": 0xE8956B,
"Peachy Milk": 0xF3E0D8,
"Peachy Pico": 0xFFCCAA,
"Peachy Pinky": 0xFF775E,
"Peachy Sand": 0xFFDCB7,
"Peachy Scene": 0xDD7755,
"Peachy Skin": 0xF0CFA0,
"Peacoat": 0x2B2E43,
"Peacock Blue": 0x016795,
"Peacock Feather": 0x12939A,
"Peacock Green": 0x006A50,
"Peacock Plume": 0x206D71,
"Peacock Pride": 0x006663,
"Peacock Purple": 0x513843,
"Peacock Silk": 0x6DA893,
"Peacock Tail": 0x01636D,
"Peahen": 0x719E8A,
"Peak Point": 0x768083,
"Peak Season": 0xFFDFC9,
"Peanut": 0x7A4434,
"Peanut Brittle": 0xA6893A,
"Peanut Butter": 0xBE893F,
"Peanut Butter Chicken": 0xFFB75F,
"Peanut Butter Crust": 0xC8A38A,
"Peanut Butter Jelly": 0xCE4A2D,
"Peapod": 0x82B185,
"Peapod Green": 0x8E916D,
"Pear": 0xD1E231,
"Pear Cactus": 0x91AF88,
"Pear Perfume": 0xCCDD99,
"Pear Sorbet": 0xF3EAC3,
"Pear Spritz": 0xCBF85F,
"Pearl": 0xEAE0C8,
"Pearl Aqua": 0x88D8C0,
"Pearl Ash": 0xD0C9C3,
"Pearl Bay": 0x7FC6CC,
"Pearl Blue": 0x79B4C9,
"Pearl Blush": 0xF4CEC5,
"Pearl Brite": 0xE6E6E3,
"Pearl Bush": 0xDED1C6,
"Pearl City": 0xDCE4E9,
"Pearl Drops": 0xF0EBE4,
"Pearl Dust": 0xEFE5D9,
"Pearl Gray": 0xCBCEC5,
"Pearl Grey": 0xB0B7BE,
"Pearl Lusta": 0xEAE1C8,
"Pearl Necklace": 0xFCF7EB,
"Pearl Onion": 0xEEEFE1,
"Pearl Oyster": 0xDDD6CB,
"Pearl Pebble": 0xDED7DA,
"Pearl Rose": 0xDFD3D4,
"Pearl Sugar": 0xF4F1EB,
"Pearl Violet": 0xE6E0E3,
"Pearl White": 0xF3F2ED,
"Pearl Yellow": 0xF1E3BC,
"Pearled Couscous": 0xF2E9D5,
"Pearled Ivory": 0xF0DFCC,
"Pearls & Lace": 0xDCD0CB,
"Pearls and Lace": 0xEEE7DC,
"Pearly Flesh": 0xF4E3DF,
"Pearly Purple": 0xB768A2,
"Pearly Putty": 0xDBD3BD,
"Pearly Star": 0xE4E4DA,
"Pearly Swirly": 0xEEE9D8,
"Pearly White": 0xFEEFD3,
"Peas in a Pod": 0x7B9459,
"Peas In A Pod": 0xA9D689,
"Peas Please": 0x8C7F3C,
"Peaslake": 0x8CAA95,
"Peat": 0x766D52,
"Peat Brown": 0x5A3D29,
"Peat Red Brown": 0x6C5755,
"Peat Swamp Forest": 0x988C75,
"Peaty Brown": 0x552211,
"Pebble": 0x9D9880,
"Pebble Beach": 0x7F8285,
"Pebble Cream": 0xF3E1CA,
"Pebble Path": 0xD5BC94,
"Pebble Soft Blue White": 0xD3D7DC,
"Pebble Stone": 0xE0D9DA,
"Pebble Walk": 0xAFB2A7,
"Pebblebrook": 0xD8D0BC,
"Pebbled Courtyard": 0xDECAB9,
"Pebbled Path": 0xA0968D,
"Pebbled Shore": 0xDBD5CA,
"Pebbles": 0xDED8DC,
"Pecan": 0xB17D64,
"Pecan Brown": 0xA36E51,
"Pecan Sandie": 0xF4DECB,
"Pecan Veneer": 0xE09F78,
"Peche": 0xFDDCB7,
"Pecos Spice": 0xE1A080,
"Pedestrian Green": 0x00BB22,
"Pedestrian Lemon": 0xFFFF22,
"Pedestrian Red": 0xCC1122,
"Pedigree": 0x31646E,
"Pediment": 0xD3CCC4,
"Peek a Blue": 0xC5E1E1,
"Peekaboo": 0xE6DEE6,
"Peeled Asparagus": 0x87A96B,
"Peeps": 0xFFCF38,
"Peevish Red": 0xFF2266,
"Pegasus": 0xE8E9E4,
"Pegeen Peony": 0xEA9FB4,
"Pekin Chicken": 0xF5D2AC,
"Pelagic": 0x355D83,
"Pelati": 0xFF3333,
"Pelican": 0xC1BCAC,
"Pelican Bay": 0x9EACB1,
"Pelican Bill": 0xD7C0C7,
"Pelican Feather": 0xE8C3C2,
"Pelican Pecker": 0xFB9A30,
"Pelican Pink": 0xE2A695,
"Pelican Tan": 0xC8A481,
"Pelorus": 0x2599B2,
"Pencil Eraser": 0xDBB7BB,
"Pencil Lead": 0x5C6274,
"Pencil Point": 0x595D61,
"Pencil Sketch": 0x999D9E,
"Pendula Garden": 0x7B8267,
"Penelope": 0xE3E3EB,
"Penelope Pink": 0x9D6984,
"Peninsula": 0x37799C,
"Penna": 0xB9C8E0,
"Pennywise": 0xA2583A,
"Pensive": 0xC2C1CB,
"Pensive Pink": 0xEAB6AD,
"Pentagon": 0x96CCD1,
"Pentalon": 0xDBB2BC,
"Penthouse View": 0xCABFB3,
"Penzance": 0x627E75,
"Peony": 0xED9CA8,
"Peony Blush": 0xD8C1BE,
"Peony Mauve": 0x9F86B7,
"Peony Pink": 0xE38C7F,
"Peony Prize": 0xFADDD4,
"People's Choice": 0xB6A8D0,
"Pepper Grass": 0x7C9D47,
"Pepper Green": 0x007D60,
"Pepper Jelly": 0xCC2244,
"Pepper Mill": 0x777568,
"Pepper Spice": 0x8E7059,
"Pepper | |
<reponame>dbanys/glide-text2im<filename>glide_text2im/clip/encoders.py
import math
from collections import OrderedDict
from typing import List, Optional, Tuple, cast
import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .attention import (
AttentionInfo,
DenseAttentionMask,
DenseCausalAttentionMask,
make_full_layout,
to_attention_info,
)
from .utils import Affine, LayerNorm, zero_key_bias_grad
# Constants used in the original CLIP implementation.
image_channel_means = [122.77093945, 116.74601272, 104.09373519]
image_channel_stds = [68.50053285, 66.63215831, 70.32316309]
@attr.s(repr=False)
class TextEmbedding(nn.Module):
n_vocab: int = attr.ib()
n_context: int = attr.ib()
n_state: int = attr.ib()
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
w_voc = torch.empty((self.n_vocab, self.n_state), dtype=torch.float32, device=self.device)
w_pos = torch.empty((self.n_context, self.n_state), dtype=torch.float32, device=self.device)
with torch.no_grad():
w_voc.normal_(std=0.02)
w_pos.normal_(std=0.01)
self.w_voc = nn.Parameter(w_voc)
self.w_pos = nn.Parameter(w_pos)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 2:
raise ValueError()
return F.embedding(x, self.w_voc) + self.w_pos[None, :, :]
@attr.s(repr=False)
class ImageEmbedding(nn.Module):
image_size: int = attr.ib()
patch_size: int = attr.ib()
n_state: int = attr.ib()
n_timestep: int = attr.ib(default=0)
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
if self.image_size % self.patch_size != 0:
raise ValueError()
n_patch = self.image_size // self.patch_size
patch_proj = torch.empty(
(self.n_state, 3) + 2 * (self.patch_size,), dtype=torch.float32, device=self.device
)
w_pos = torch.empty(
(1 + n_patch ** 2, self.n_state), dtype=torch.float32, device=self.device
)
with torch.no_grad():
if self.n_timestep == 0:
pred_state = torch.empty((self.n_state,), dtype=torch.float32, device=self.device)
pred_state.normal_(std=1 / np.sqrt(self.n_state))
self.pred_state = nn.Parameter(pred_state)
else:
w_t = torch.empty(
(self.n_timestep, self.n_state), dtype=torch.float32, device=self.device
)
w_t.normal_(std=1 / np.sqrt(self.n_state))
self.w_t = nn.Parameter(w_t)
patch_proj.normal_(std=np.sqrt(2 / (self.n_state * self.patch_size ** 2)))
w_pos.normal_(std=1 / np.sqrt(self.n_state))
self.patch_proj = nn.Parameter(patch_proj)
self.w_pos = nn.Parameter(w_pos)
self.channel_means = torch.tensor(
image_channel_means, dtype=torch.float32, device=self.device
)[None, :, None, None]
self.channel_stds = torch.tensor(
image_channel_stds, dtype=torch.float32, device=self.device
)[None, :, None, None]
self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
def forward(self, x: torch.Tensor, t: Optional[torch.Tensor] = None) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError("input should be 4d")
if x.shape[1] != 3:
raise ValueError("input should have 3 channels")
if not (x.shape[2] == self.image_size and x.shape[3] == self.image_size):
raise ValueError(f"input is not {self.image_size} x {self.image_size}")
if (self.n_timestep == 0 and t is not None) or (self.n_timestep != 0 and t is None):
raise ValueError()
if self.n_timestep != 0:
assert t is not None
if len(t.shape) != 1:
raise ValueError()
if t.shape[0] != x.shape[0]:
raise ValueError()
x = (x - self.channel_means) / self.channel_stds
x = F.conv2d(x, self.patch_proj, stride=self.patch_size)
x = x.reshape(x.shape[0], self.n_state, (self.image_size // self.patch_size) ** 2).permute(
0, 2, 1
)
sot = (
self.pred_state[None, None].expand(x.shape[0], -1, -1)
if self.n_timestep == 0
else F.embedding(cast(torch.Tensor, t), self.w_t)[:, None]
)
x = torch.cat((sot, x), dim=1) + self.w_pos[None]
return self.ln(x)
@attr.s(repr=False)
class AttentionResblock(nn.Module):
n_state: int = attr.ib()
n_resblocks: int = attr.ib()
attn_fn: AttentionInfo = attr.ib()
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_head_state = self.n_state // self.attn_fn.n_heads
self.qk_scale = 1 / np.sqrt(self.n_head_state)
self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
self.f_q = Affine(
self.n_state,
self.n_state,
std=1 / math.sqrt(self.n_state),
use_bias=True,
bias_filter_fn=zero_key_bias_grad,
device=self.device,
)
self.f_k = Affine(
self.n_state,
self.n_state,
std=1 / math.sqrt(self.n_state),
use_bias=False,
bias_filter_fn=zero_key_bias_grad,
device=self.device,
)
self.f_v = Affine(
self.n_state,
self.n_state,
std=1 / math.sqrt(self.n_state),
use_bias=True,
bias_filter_fn=zero_key_bias_grad,
device=self.device,
)
self.f_c = Affine(
self.n_state,
self.n_state,
use_bias=True,
std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2),
device=self.device,
) # XXX
def forward(self, m: torch.Tensor) -> torch.Tensor:
n_context = m.shape[1]
n_query_pad = self.attn_fn.ctx_blks_q * self.attn_fn.block_size - n_context
n_key_pad = self.attn_fn.ctx_blks_k * self.attn_fn.block_size - n_context
assert n_query_pad >= 0
assert n_key_pad >= 0
r = m
r = self.ln(r)
q, k, v = self.f_q(r), self.f_k(r), self.f_v(r)
if n_query_pad != 0:
q = F.pad(q, (0, 0, 0, n_query_pad))
if n_key_pad != 0:
k = F.pad(k, (0, 0, 0, n_key_pad))
v = F.pad(v, (0, 0, 0, n_key_pad))
q = q.view([q.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3))
k = k.view([k.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3))
v = v.view([v.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3))
w = torch.einsum(
"bhcd,bhkd->bhck", q * math.sqrt(self.qk_scale), k * math.sqrt(self.qk_scale)
)
if hasattr(self.attn_fn, "pytorch_attn_bias"):
bias = self.attn_fn.pytorch_attn_bias
assert len(bias.shape) in {2, 3}
if len(bias.shape) == 2:
w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None, None], dim=-1)
elif len(bias.shape) == 3:
w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None], dim=-1)
else:
w = torch.softmax(w, dim=-1)
r = torch.einsum("bhck,bhkd->bhcd", w, v)
r = r.permute((0, 2, 1, 3)).reshape((r.shape[0], -1, self.n_state))
if n_query_pad != 0:
r = r[:, :-n_query_pad]
assert r.shape[1] == n_context
r = self.f_c(r)
return m + r
@attr.s(repr=False)
class FullyConnectedResblock(nn.Module):
"""
Not imported from other files because we retain Alec's original inits.
"""
n_state: int = attr.ib()
n_resblocks: int = attr.ib()
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
self.f_1 = Affine(
self.n_state,
4 * self.n_state,
use_bias=True,
std=np.sqrt(2 / (4 * self.n_state)),
device=self.device,
)
self.f_2 = Affine(
4 * self.n_state,
self.n_state,
use_bias=True,
std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2),
device=self.device,
) # XXX
def forward(self, m: torch.Tensor) -> torch.Tensor:
r = m
r = self.ln(r)
r = self.f_2(F.gelu(self.f_1(r)))
return m + r
@attr.s(repr=False)
class TransformerBlock(nn.Module):
n_state: int = attr.ib()
n_resblocks: int = attr.ib()
attn_fn: AttentionInfo = attr.ib()
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
self.f_attn = AttentionResblock(
self.n_state,
self.n_resblocks,
self.attn_fn,
self.device,
)
self.f_mlp = FullyConnectedResblock(self.n_state, self.n_resblocks, self.device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.f_mlp(self.f_attn(x))
@attr.s(repr=False)
class TextFeatureExtractor(nn.Module):
n_state: int = attr.ib()
n_embd: int = attr.ib()
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device)
def forward(
self, text: torch.Tensor, text_len: torch.Tensor, return_probe_features: bool = False
) -> torch.Tensor:
if len(text.shape) != 3:
raise ValueError("expected text to be 3d")
if len(text_len.shape) != 1:
raise ValueError("expected text length to be 1d")
if text.shape[0] != text_len.shape[0]:
raise ValueError("text and text_len have inconsistent batch dimensions")
index = (text_len - 1)[:, None, None].expand(-1, 1, text.shape[2])
x = torch.gather(text, dim=1, index=index)
assert list(x.shape) == [text.shape[0], 1, text.shape[2]]
if return_probe_features:
return x[:, 0]
x = self.ln(x)
return self.f(x[:, 0])
@attr.s(repr=False)
class ImageFeatureExtractor(nn.Module):
n_state: int = attr.ib()
n_embd: int = attr.ib()
device: torch.device = attr.ib(default=torch.device("cuda"))
def __attrs_post_init__(self) -> None:
super().__init__()
self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device)
def forward(self, x: torch.Tensor, return_probe_features: bool = False) -> torch.Tensor:
if return_probe_features:
return x[:, 0]
x = self.ln(x[:, :1])
return self.f(x[:, 0])
@attr.s(repr=False)
class TextEncoder(nn.Module):
n_bpe_vocab: int = attr.ib()
max_text_len: int = attr.ib()
n_embd: int = attr.ib()
n_head: int = attr.ib()
n_xf_blocks: int = attr.ib()
n_head_state: int = attr.ib(default=64)
device: torch.device = attr.ib(default=torch.device("cuda"))
block_size: int = attr.ib(init=False, default=32)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_state = self.n_head * self.n_head_state
n_rounded_context = self.block_size * int(math.ceil(self.max_text_len / self.block_size))
n_pad = n_rounded_context - self.max_text_len
args = (
n_rounded_context,
n_rounded_context,
self.block_size,
self.n_head,
False,
n_pad,
n_pad,
)
mask = DenseCausalAttentionMask(*args)
attn_fn = to_attention_info(mask)
m = 1 - make_full_layout(mask).astype(np.float32)
m[m == 1] = -1e10
attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device)
blocks: List[Tuple[str, nn.Module]] = [
(
"input",
TextEmbedding(
self.n_bpe_vocab, self.max_text_len, self.n_state, device=self.device
),
)
]
for i in range(self.n_xf_blocks):
blocks.append(
(
f"block_{i}",
TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device),
)
)
blocks.append(
("output", TextFeatureExtractor(self.n_state, self.n_embd, device=self.device))
)
self.blocks = nn.ModuleDict(OrderedDict(blocks))
def forward(
self,
text: torch.Tensor,
text_len: torch.Tensor,
return_probe_features: bool = False,
) -> torch.Tensor:
n_batch = text.shape[0]
h = self.blocks["input"](text)
for i in range(self.n_xf_blocks):
h = self.blocks[f"block_{i}"](h)
h = self.blocks["output"](h, text_len, return_probe_features=return_probe_features)
assert list(h.shape) == [
n_batch,
self.n_embd if not return_probe_features else self.n_state,
]
return h
@attr.s(repr=False)
class ImageEncoder(nn.Module):
image_size: int = attr.ib()
patch_size: int = attr.ib()
n_embd: int = attr.ib()
n_head: int = attr.ib()
n_xf_blocks: int = attr.ib()
n_head_state: int = attr.ib(default=64)
n_timestep: int = attr.ib(default=0)
device: torch.device = attr.ib(default=torch.device("cuda"))
block_size: int = attr.ib(init=False, default=32)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_state = self.n_head * self.n_head_state
self.n_context = 1 + (self.image_size // self.patch_size) ** 2
n_rounded_context = self.block_size * int(math.ceil(self.n_context / self.block_size))
n_pad = n_rounded_context - self.n_context
args = (
n_rounded_context,
n_rounded_context,
self.block_size,
self.n_head,
False,
n_pad,
n_pad,
)
mask = DenseAttentionMask(*args)
attn_fn = to_attention_info(mask)
m = 1 - make_full_layout(mask).astype(np.float32)
m[m == 1] = -1e10
attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device)
blocks: List[Tuple[str, nn.Module]] = [
(
"input",
ImageEmbedding(
self.image_size,
self.patch_size,
self.n_state,
n_timestep=self.n_timestep,
device=self.device,
),
)
]
for i in range(self.n_xf_blocks):
blocks.append(
(
f"block_{i}",
TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device),
)
)
blocks.append(("output", ImageFeatureExtractor(self.n_state, self.n_embd, self.device)))
self.blocks = nn.ModuleDict(OrderedDict(blocks))
def forward(
self,
image: torch.Tensor,
timesteps: Optional[torch.Tensor] = None,
return_probe_features: bool = False,
) -> torch.Tensor:
n_batch = image.shape[0]
h = self.blocks["input"](image, t=timesteps)
for i in range(self.n_xf_blocks):
| |
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convertwarp
--ref=standard
--premat=example_func2highres.mat
--warp1=highres2standard_warp
--out=example_func2standard_warp
"""
warputils = fsl.ConvertWarp()
warputils.inputs.reference = os.path.abspath(standard_brain)
warputils.inputs.premat = os.path.abspath(os.path.join(output_dir,
"example_func2highres.mat"))
warputils.inputs.warp1 = os.path.abspath(os.path.join(output_dir,
"highres2standard_warp.nii.gz"))
warputils.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard_warp.nii.gz"))
warputils.cmdline
warputils.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
--ref=standard
--in=example_func
--out=example_func2standard
--warp=example_func2standard_warp
"""
aw = fsl.ApplyWarp()
aw.inputs.ref_file = os.path.abspath(standard_brain)
aw.inputs.in_file = os.path.abspath(func_ref)
aw.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard.nii.gz"))
aw.inputs.field_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard_warp.nii.gz"))
aw.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2example_func.mat example_func2standard.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard.mat"))
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"standard2example_func.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.cmdline
inverse_transformer.run()
######################
###### plotting ######
example_func2highres = os.path.abspath(os.path.join(output_dir,
'example_func2highres'))
example_func2standard = os.path.abspath(os.path.join(output_dir,
"example_func2standard"))
highres2standard = os.path.abspath(os.path.join(output_dir,
'highres2standard'))
highres = os.path.abspath(anat_brain)
standard = os.path.abspath(standard_brain)
plot_example_func2highres = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png;
/bin/rm -f sl?.png {example_func2highres}2.png
/bin/rm {example_func2highres}1.png
""".replace("\n"," ")
plot_highres2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png;
/bin/rm -f sl?.png {highres2standard}2.png
/bin/rm {highres2standard}1.png
""".replace("\n"," ")
plot_example_func2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png;
/bin/rm -f sl?.png {example_func2standard}2.png
""".replace("\n"," ")
for cmdline in [plot_example_func2highres,plot_example_func2standard,plot_highres2standard]:
os.system(cmdline)
def create_simple_struc2BOLD(roi,
roi_name,
preprocessed_functional_dir,
output_dir):
from nipype.interfaces import fsl
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as util
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
simple_workflow = pe.Workflow(name = 'struc2BOLD')
inputnode = pe.Node(interface = util.IdentityInterface(
fields = ['flt_in_file',
'flt_in_matrix',
'flt_reference',
'mask']),
name = 'inputspec')
outputnode = pe.Node(interface = util.IdentityInterface(
fields = ['BODL_mask']),
name = 'outputspec')
"""
flirt
-in /export/home/dsoto/dsoto/fmri/$s/sess2/label/$i
-ref /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/example_func.nii.gz
-applyxfm
-init /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/reg/highres2example_func.mat
-out /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i}
"""
flirt_convert = pe.MapNode(
interface = fsl.FLIRT(apply_xfm = True),
iterfield = ['in_file',
'reference',
'in_matrix_file'],
name = 'flirt_convert')
simple_workflow.connect(inputnode, 'flt_in_file',
flirt_convert, 'in_file')
simple_workflow.connect(inputnode, 'flt_reference',
flirt_convert, 'reference')
simple_workflow.connect(inputnode, 'flt_in_matrix',
flirt_convert, 'in_matrix_file')
"""
fslmaths /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -mul 2
-thr `fslstats /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -p 99.6`
-bin /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i}
"""
def getthreshop(thresh):
return ['-mul 2 -thr %.10f -bin' % (val) for val in thresh]
getthreshold = pe.MapNode(
interface = fsl.ImageStats(op_string='-p 99.6'),
iterfield = ['in_file','mask_file'],
name = 'getthreshold')
simple_workflow.connect(flirt_convert, 'out_file',
getthreshold, 'in_file')
simple_workflow.connect(inputnode, 'mask',
getthreshold, 'mask_file')
threshold = pe.MapNode(
interface = fsl.ImageMaths(
suffix = '_thresh',
op_string = '-mul 2 -bin'),
iterfield = ['in_file','op_string'],
name = 'thresholding')
simple_workflow.connect(flirt_convert, 'out_file',
threshold, 'in_file')
simple_workflow.connect(getthreshold, ('out_stat',getthreshop),
threshold, 'op_string')
# simple_workflow.connect(threshold,'out_file',outputnode,'BOLD_mask')
bound_by_mask = pe.MapNode(
interface = fsl.ImageMaths(
suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'bound_by_mask')
simple_workflow.connect(threshold, 'out_file',
bound_by_mask, 'in_file')
simple_workflow.connect(inputnode, 'mask',
bound_by_mask, 'in_file2')
simple_workflow.connect(bound_by_mask, 'out_file',
outputnode, 'BOLD_mask')
# setup inputspecs
simple_workflow.inputs.inputspec.flt_in_file = roi
simple_workflow.inputs.inputspec.flt_in_matrix = os.path.abspath(os.path.join(preprocessed_functional_dir,
'reg',
'highres2example_func.mat'))
simple_workflow.inputs.inputspec.flt_reference = os.path.abspath(os.path.join(preprocessed_functional_dir,
'func',
'example_func.nii.gz'))
simple_workflow.inputs.inputspec.mask = os.path.abspath(os.path.join(preprocessed_functional_dir,
'func',
'mask.nii.gz'))
simple_workflow.inputs.bound_by_mask.out_file = os.path.abspath(os.path.join(output_dir,
roi_name.replace('_fsl.nii.gz',
'_BOLD.nii.gz')))
return simple_workflow
def registration_plotting(output_dir,
anat_brain,
standard_brain):
######################
###### plotting ######
try:
example_func2highres = os.path.abspath(os.path.join(output_dir,
'example_func2highres'))
example_func2standard = os.path.abspath(os.path.join(output_dir,
'example_func2standard_warp'))
highres2standard = os.path.abspath(os.path.join(output_dir,
'highres2standard'))
highres = os.path.abspath(anat_brain)
standard = os.path.abspath(standard_brain)
plot_example_func2highres = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png;
/bin/rm -f sl?.png {example_func2highres}2.png
/bin/rm {example_func2highres}1.png
""".replace("\n"," ")
plot_highres2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png;
/bin/rm -f sl?.png {highres2standard}2.png
/bin/rm {highres2standard}1.png
""".replace("\n"," ")
plot_example_func2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + | |
<gh_stars>1000+
#
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PE Tree runtime abstraction layer"""
# Standard imports
import os
import tempfile
import threading
import struct
# Config parser imports
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
# pefile
import pefile
# Qt imports
from PyQt5 import QtCore, Qt, QtGui, QtWidgets
# Capstone imports
try:
import capstone
HAVE_CAPSTONE = True
except ImportError:
HAVE_CAPSTONE = False
# PE Tree imports
import pe_tree.info
# pylint: disable=unused-argument
class RuntimeSignals(QtCore.QObject):
"""Allows worker threads to invoke runtime methods on the UI thread.
Warning:
This class must be instantiated from the UI thread!
"""
def __init__(self, runtime, opaque=None):
super(RuntimeSignals, self).__init__()
self.opaque = opaque if opaque != None else {}
self.runtime = runtime
def invoke_method(self, method, *args):
"""Invoke runtime method on the UI thread"""
# Ensure only 1 thread at a time can access runtime.ret
self.runtime.lock.acquire()
self.runtime.opaque = self.opaque
# Invoke the runtime method in the UI thread
QtCore.QMetaObject.invokeMethod(self.runtime, method, Qt.Qt.BlockingQueuedConnection, *args)
# Get the method result
ret = self.runtime.ret
self.runtime.lock.release()
return ret
def get_temp_dir(self):
return self.invoke_method("get_temp_dir")
def ask_file(self, filename, caption, filter="All Files (*)", save=False):
return self.invoke_method("ask_file", Qt.Q_ARG(str, filename), Qt.Q_ARG(str, caption), Qt.Q_ARG(str, filter), Qt.Q_ARG(bool, save))
def read_pe(self, image_base, size=0):
return self.invoke_method("read_pe", Qt.Q_ARG(object, image_base), Qt.Q_ARG(object, size))
def get_bytes(self, start, size):
return self.invoke_method("get_bytes", Qt.Q_ARG(object, start), Qt.Q_ARG(object, size))
def get_byte(self, offset):
return self.invoke_method("get_byte", Qt.Q_ARG(object, offset))
def get_word(self, offset):
return self.invoke_method("get_word", Qt.Q_ARG(object, offset))
def get_dword(self, offset):
return self.invoke_method("get_dword", Qt.Q_ARG(object, offset))
def get_qword(self, offset):
return self.invoke_method("get_qword", Qt.Q_ARG(object, offset))
def get_name(self, offset):
return self.invoke_method("get_name", Qt.Q_ARG(object, offset))
def get_segment_name(self, offset):
return self.invoke_method("get_segment_name", Qt.Q_ARG(object, offset))
def is_writable(self, offset):
return self.invoke_method("is_writable", Qt.Q_ARG(object, offset))
def get_label(self, offset):
return self.invoke_method("get_label", Qt.Q_ARG(object, offset))
def jumpto(self, item, offset):
return self.invoke_method("jumpto", Qt.Q_ARG(object, offset))
def log(self, output):
return self.invoke_method("log", Qt.Q_ARG(str, output))
def make_string(self, offset, size):
return self.invoke_method("make_string", Qt.Q_ARG(object, offset), Qt.Q_ARG(object, size))
def make_comment(self, offset, comment):
return self.invoke_method("make_comment", Qt.Q_ARG(object, offset), Qt.Q_ARG(str, str(comment)))
def make_segment(self, offset, size, class_name="DATA", name="pe_map", data=None):
return self.invoke_method("make_segment", Qt.Q_ARG(object, offset), Qt.Q_ARG(object, size), Qt.Q_ARG(str, class_name), Qt.Q_ARG(str, name), Qt.Q_ARG(bytes, data))
def resolve_address(self, offset):
return self.invoke_method("resolve_address", Qt.Q_ARG(object, offset))
def make_qword(self, offset):
return self.invoke_method("make_qword", Qt.Q_ARG(object, offset))
def make_dword(self, offset):
return self.invoke_method("make_dword", Qt.Q_ARG(object, offset))
def make_word(self, offset):
return self.invoke_method("make_word", Qt.Q_ARG(object, offset))
def make_byte(self, offset, size=1):
return self.invoke_method("make_byte", Qt.Q_ARG(object, offset))
def make_name(self, offset, name, flags=0):
return self.invoke_method("make_name", Qt.Q_ARG(object, offset), Qt.Q_ARG(str, name), Qt.Q_ARG(int, flags))
def find_iat_ptrs(self, pe, image_base, size, get_word):
return self.invoke_method("find_iat_ptrs", Qt.Q_ARG(object, pe), Qt.Q_ARG(object, image_base), Qt.Q_ARG(object, size), Qt.Q_ARG(object, get_word))
def find_pe(self, cursor=False):
return self.invoke_method("find_pe", Qt.Q_ARG(object, cursor))
def init_capstone(self, pe):
return self.invoke_method("init_capstone", Qt.Q_ARG(object, pe))
def get_config_option(self, section, option, fallback):
return self.invoke_method("get_config_option", Qt.Q_ARG(str, section), Qt.Q_ARG(str, option), Qt.Q_ARG(object, fallback))
class Runtime(QtCore.QObject):
"""Base runtime class"""
def __init__(self, widget, args):
super(Runtime, self).__init__()
self.widget = widget
self.ret = None
self.lock = threading.Lock()
self.config_lock = threading.RLock()
self.signals = RuntimeSignals(self)
self.opaque = {}
self.args = args
self.read_config()
self.save_config()
@QtCore.pyqtSlot()
def get_temp_dir(self):
"""Get temporary directory path
Returns:
str: Temporary directory path
"""
self.ret = tempfile.gettempdir()
return self.ret
@QtCore.pyqtSlot()
def get_script_dir(self):
"""Get script directory
Returns:
str: Script directory path
"""
self.ret = os.path.dirname(os.path.realpath(pe_tree.info.__file__))
return self.ret
def show_widget(self):
"""Display the widget"""
self.widget.show()
self.ret = True
return self.ret
@QtCore.pyqtSlot(str, str, str, bool)
def ask_file(self, filename, caption, filter="All Files (*)", save=False):
"""Ask user to select a filename via open/save dialog
Args:
filename (str): Preferred filename
caption (str): Save/open dialog caption
filter (str): File extension filter
save (bool): Present the save dialog if True, otherwise open
Returns:
str: Filename if successful, otherwise None
"""
dialog = QtWidgets.QFileDialog()
options = QtWidgets.QFileDialog.Options()
if not save:
# Open file dialog
filename, _ = dialog.getOpenFileName(self.widget, caption, filename, filter, options=options)
else:
# Save file dialog
if filename[0] == ".":
# Remove leading dot from section names
filename = filename[1:]
filename, _ = dialog.getSaveFileName(self.widget, caption, filename, filter, options=options)
if filename:
self.ret = filename
else:
self.ret = ""
return self.ret
@QtCore.pyqtSlot(object, object)
def read_pe(self, image_base, size=0):
"""Read PE image from memory
Args:
image_base (int): Address of PE file in-memory
size (int, optional): Size of PE file in-memory
Returns:
bytearray: Data of PE image if successful, otherwise an empty bytearray
"""
self.ret = b""
try:
# Read the module's PE headers to determine the image size
pe = pefile.PE(data=self.get_bytes(image_base, 0x1000), fast_load=True)
# Read the remainder of the PE image
pe = pefile.PE(data=self.get_bytes(image_base, max(pe.OPTIONAL_HEADER.SizeOfImage, pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData)), fast_load=True)
# Fix up section pointers/sizes
for section in pe.sections:
section.PointerToRawData = section.VirtualAddress
section.SizeOfRawData = section.Misc_VirtualSize + (pe.OPTIONAL_HEADER.SectionAlignment - (section.Misc_VirtualSize % pe.OPTIONAL_HEADER.SectionAlignment))
# Get PE data
self.ret = pe.write()
except:
pass
return self.ret
@QtCore.pyqtSlot(int, int)
def get_bytes(self, start, size):
"""Read a sequence of bytes from memory
Args:
start (int): Start address
size (int): Number of byte to read
Returns:
int: Array of bytes if successful, otherwise None
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def get_byte(self, offset):
"""Read 8-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Byte value
"""
self.ret = self.get_bytes(offset, 1)
return self.ret
@QtCore.pyqtSlot(int)
def get_word(self, offset):
"""Read 16-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Word value
"""
self.ret = struct.unpack("<H", self.get_bytes(offset, 2))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_dword(self, offset):
"""Read 32-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Dword value
"""
self.ret = struct.unpack("<I", self.get_bytes(offset, 4))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_qword(self, offset):
"""Read 64-bits from memory
Args:
offset (int): Offset to read from
Returns:
int: Qword value
"""
self.ret = struct.unpack("<Q", self.get_bytes(offset, 8))[0]
return self.ret
@QtCore.pyqtSlot(int)
def get_name(self, offset):
"""Get symbol name for the given address
Args:
offset (int): Address to get name for
Returns:
str: Name of symbol if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(int)
def get_segment_name(self, offset):
"""Get segment/module name for the given address
Args:
offset (int): Address to get name for
Returns:
str: Name of segment/module if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(int)
def is_writable(self, offset):
"""Determine if the memory address is write-able
Args:
offset (int): Address to check for write permissions
Returns:
bool: True if the memory address resides in writable page of memory, otherwise False
"""
self.ret = False
return self.ret
@QtCore.pyqtSlot(int)
def get_label(self, offset):
"""Get the disassembly label for the given address
Args:
offset (int): Address to get label for
Returns:
str: Label name if successful, otherwise an empty string
"""
self.ret = ""
return self.ret
@QtCore.pyqtSlot(object, int)
def jumpto(self, item, offset):
"""User double-clicked an item in the tree, by default disassemble using capstone
Args:
item (pe_tree.tree): Item that was double-clicked by the user
offset (int): Address to jump to
"""
try:
if item.tree.disasm:
for i in item.tree.disasm.disasm(item.get_data(size=0x100), offset):
item.tree.form.runtime.log("0x{:x}:\t{}\t{}".format(i.address, i.mnemonic, i.op_str))
except ValueError:
pass
self.ret = True
return self.ret
@QtCore.pyqtSlot(str)
def log(self, output):
"""Print to output"""
output_view = self.pe_tree_form.output_stack.currentWidget()
if output_view:
self.pe_tree_form.output_stack.setVisible(True)
output_view.setVisible(True)
output_view.append(output)
output_view.moveCursor(QtGui.QTextCursor.End)
self.ret = True
return self.ret
@QtCore.pyqtSlot(int, int)
def make_string(self, offset, size):
"""Convert the data at the given offset to an ASCII string
Args:
offset (int): Address to convert to string
size (int): Length of the string in bytes
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, str)
def make_comment(self, offset, comment):
"""Add a comment to the disassembly
Args:
offset (int): Address to comment
comment (str): Comment string
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int, int, str, str, bytes)
def make_segment(self, offset, size, class_name="DATA", name="pe_map", data=None):
"""Add a segment in the IDB
Args:
offset (int): Base address of the new segment
size (int): Size of the new segment in bytes
class_name (str): "CODE" or "DATA" (default)
name (str): Name of the segment, default is "pe_map"
data (bytes): Data to populate the segment with (optional)
"""
self.ret = None
return self.ret
@QtCore.pyqtSlot(int)
def resolve_address(self, offset):
"""Get | |
<filename>docs/decode_sphinx_inventory.py<gh_stars>0
#! /usr/bin/python
#! coding: utf-8
# decode_sphinx_inventory, mb, 2013, 2014-07-26, 2015-12-17
# ÄÖÜäöüß
# ==================================================
# The following __doc__ string is displayed on --info
# --------------------------------------------------
"""\
decode_sphinx_inventory - Decode an objects.inv and create some output.
Sphinx (http://sphinx-doc.org) offers symbolic linking to distant
documentation projects. This happens by means of an objects inventory
which is usually kept in a specially encoded file 'objects.inv'
in the root folder of the distant documentation project.
This module fetches an inventory and converts it into something readable.
Required parameter:
uri
Path to a Sphinx documentation project in the web like
http://python.org/ The file 'objects.inv' is expected to
exist there.
Optional parameters:
-f, --format
Output is utf-8 encoded.
'html' (default): A nicely formatted html document is created.
'csv': Comma separated values with \t als separator.
'json': Json encoded data.
'ref': Format of Sphinx ':ref:' textrole
-O, --outfilename
The file is created or overwritten and contains the output.
--abbreviation
A short name that is used in the Intersphinx mapping to
reference the specific documentation project. Default is 'abbrev'
or the typically used name for common TYPO3 projects.
Use 'None' to show no abbreviation at all.
inventory_uri
Path to a file 'objects.inv'. It default is expected to
exist in uri.
Examples:
python decode_sphinx_inventory.py https://docs.typo3.org/typo3cms/TyposcriptReference/
python decode_sphinx_inventory.py https://docs.typo3.org/typo3cms/TyposcriptReference/ -O result.html
python decode_sphinx_inventory.py https://docs.typo3.org/typo3cms/TyposcriptReference/ -O result.html --abbreviation=tsref
python decode_sphinx_inventory.py https://docs.typo3.org/typo3cms/TyposcriptReference/ -O result.csv -f csv
python decode_sphinx_inventory.py https://docs.typo3.org/typo3cms/TyposcriptReference/ -O result.json -f json
python decode_sphinx_inventory.py https://docs.djangoproject.com/en/dev/ https://docs.djangoproject.com/en/dev/_objects/ -O result.html --abbrevation=django
"""
from __future__ import print_function
import codecs
import json
import sys
import urllib
from posixpath import join
try:
from sphinx.ext.intersphinx import read_inventory_v2
except ImportError:
print(
'This module uses Sphinx modules. See https://sphinx.readthedocs.io/\n'
'To install Sphinx do something like:\n'
' $ pip install sphinx\n'
' or $ easy_install sphinx\n'
'\n'
'Run with "sudo" if required.'
)
sys.exit(1)
try:
from mako.template import Template
except ImportError:
print(
'This module uses Mako templating. See http://docs.makotemplates.org/\n'
'To install Mako do something like:\n'
' $ pip install Mako\n'
' or $ easy_install Mako\n'
' or $ git clone https://github.com/zzzeek/mako\n'
' $ cd mako\n'
' $ sudo python setup.py install\n'
'\n'
'Run with "sudo" if required.'
)
sys.exit(1)
__version_info__ = (0, 2, 0)
__version__ = '.'.join(map(str, __version_info__))
__history__ = ""
__copyright__ = """\
Copyright (c) since 2014, <NAME> <<EMAIL>>
All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby
granted, provided that the above copyright notice appears in all copies
and that both that copyright notice and this permission notice appear
in supporting documentation or portions thereof, including
modifications, that you make.
THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE!
"""
# map known projects to preferred abbreviation
uri2abbrev = {
'://docs.typo3.org/typo3cms/CodingGuidelinesReference': 't3cgl',
'://docs.typo3.org/typo3cms/CoreApiReference': 't3coreapi',
'://docs.typo3.org/typo3cms/EditorsTutorial': 't3editors',
'://docs.typo3.org/typo3cms/ExtbaseFluidBook': 't3extbasebook',
'://docs.typo3.org/typo3cms/ExtbaseGuide': 't3extbase',
'://docs.typo3.org/typo3cms/FileAbstractionLayerReference': 't3fal',
'://docs.typo3.org/typo3cms/FrontendLocalizationGuide': 't3l10n',
'://docs.typo3.org/typo3cms/GettingStartedTutorial': 't3start',
'://docs.typo3.org/typo3cms/InsideTypo3Reference': 't3inside',
'://docs.typo3.org/typo3cms/InstallationGuide': 't3install',
'://docs.typo3.org/typo3cms/SecurityGuide': 't3security',
'://docs.typo3.org/typo3cms/SkinningReference': 't3skinning',
'://docs.typo3.org/typo3cms/TCAReference': 't3tca',
'://docs.typo3.org/typo3cms/TemplatingTutorial': 't3templating',
'://docs.typo3.org/typo3cms/TSconfigReference': 't3tsconfig',
'://docs.typo3.org/typo3cms/Typo3ServicesReference': 't3services',
'://docs.typo3.org/typo3cms/TyposcriptIn45MinutesTutorial': 't3ts45',
'://docs.typo3.org/typo3cms/TyposcriptReference': 't3tsref',
'://docs.typo3.org/typo3cms/TyposcriptSyntaxReference': 't3tssyntax',
# what abbreviations should we use instead of 'api' in the following cases?
'://typo3.org/api/typo3cms': 't3api', # current stable
'://api.typo3.org/typo3cms/master/html': 't3api', # master
'://api.typo3.org/typo3cms/67/html': 't3api76',
'://api.typo3.org/typo3cms/62/html': 't3api62',
'://api.typo3.org/typo3cms/61/html': 't3api61',
'://api.typo3.org/typo3cms/60/html': 't3api60',
'://api.typo3.org/typo3cms/47/html': 't3api47',
'://api.typo3.org/typo3cms/45/html': 't3api45',
# may exist in future as well
'://typo3.org/api/flow': 'api',
'://api.typo3.org/flow/11': 'api',
'://api.typo3.org/flow/master': 'api',
}
# if module argparse is not available
class Namespace(object):
"""Simple object for storing attributes."""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
# a mako template
htmlTemplate = u"""\
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>${pagetitle}</title>
<style type="text/css">
body { font-family: sans-serif; font-size: 12px; }
.b { font-weight: bold }
li { margin-bottom: 8px }
.mono { font-family: monospace }
.mono, pre { font-size: 12px }
</style>
</head>
<body>
<h3>Known link targets for ${uri|h}</h3>
% if maxTargets > 1:
% for page, pagedict in pages.items():
<div class="page">
<p class="b">${page|h}</p>
<ul>
% for label, v in pagedict.items():
## <li><a href="${v[0]|h}" title="${v[1]|h}">:ref:`${abbrev|h}${label|h}`</a></li>
\t<li>:ref:`${abbrev|h}${label|h}`<br><a href="${v[0]|h}">${v[1]|h}</a></li>
% endfor
</ul>
</div>
% endfor
% else:
<ul>
% for label, v in sorted(items.items()):
\t<li>:ref:`${abbrev|h}${label|h}`<br><a href="${v[2]|h}">${v[3]|h}</a></li>
% endfor
</ul>
% endif
% if abbrev:
<h3>About '${abbrev|h}'</h3>
<p>
In this listing
the abbreviation <span class="mono">${abbrev|h}</span> in the <span class="mono">:ref:`${abbrev|h}:...`</span>
textrole<br>
serves as a pointer to '${uri|h}'.<br>
This requires the following setting in you Settings.yml file:<br>
</p>
<pre>
config.py:
intersphinx_mapping:
${abbrev|h}:
- ${uri|h}
- null
</pre>
<p>
You may as well choose any other unique abbreviation instead of <span class="mono">${abbrev|h}</span>.
</p>
% endif
<p>End.</p>
</body>
</html>
"""
class Main:
def __init__(self, args):
self.args = args
self.uri = self.args.uri.strip('/') + '/'
if self.uri.startswith('https:'):
k = self.uri[5:]
elif self.uri.startswith('http:'):
k = self.uri[4:]
else:
k = self.uri
self.abbrev = self.args.abbrev
if self.abbrev == 'None':
self.abbrev = ''
elif self.abbrev == 'abbrev':
self.abbrev = uri2abbrev.get(k.rstrip('/'), 'abbrev')
self.inventory_uri = self.args.inventory_uri
if not self.args.inventory_uri:
self.inventory_uri = self.uri + 'objects.inv'
self.lenuri = len(self.uri)
self.inventory = {}
self.inventory_items = {}
self.pages = {}
# to find out if there are pages with more than one target:
self.maxTargets = 0
def getInventory(self):
f = urllib.urlopen(self.inventory_uri)
f.readline() # burn a line
self.inventory = read_inventory_v2(f, self.uri, join)
f.close()
self.inventory_items = self.inventory.get('std:label', {})
def organiseByPages(self):
self.maxTargets = 0
for label, v in self.inventory_items.items():
page = v[2][self.lenuri:]
p = page.find('#')
if p > -1:
page = page[0:p]
pagelinks = self.pages.get(page, {})
target = v[2]
linktext = v[3]
pagelinks[label] = (target, linktext)
self.pages[page] = pagelinks
self.maxTargets = max(self.maxTargets, len(pagelinks))
def renderHtml(self):
kwds = {}
kwds['pages'] = self.pages
kwds['items'] = self.inventory_items
kwds['uri'] = self.uri
if self.abbrev:
kwds['abbrev'] = self.abbrev + ':'
else:
kwds['abbrev'] = ''
kwds['pagetitle'] = 'Link targets'
kwds['maxTargets'] = self.maxTargets
self.renderResult = Template(htmlTemplate).render(**kwds)
def renderJson(self):
if self.args.outfilename:
f2 = codecs.open(self.args.outfilename, 'w', 'utf-8')
json.dump(self.inventory, f2, sort_keys=True, indent=4, ensure_ascii=False)
f2.close()
def renderCsv(self):
if self.args.outfilename:
f2 = codecs.open(self.args.outfilename, 'w', 'utf-8')
f2.write('label\tlinktext\turl\n')
for k in sorted(self.inventory_items):
v = self.inventory_items[k]
f2.write(u'%s\t%s\t%s\n' % (k.replace('\t', '\\t'), v[
3].replace('\t', '\\t'), v[2].replace('\t', '\\t')))
f2.close()
def renderRef(self):
if self.abbrev:
abbrev = self.abbrev + ':'
else:
abbrev = ''
if self.args.outfilename:
f2 = codecs.open(self.args.outfilename, 'w', 'utf-8')
for k in sorted(self.inventory_items):
v = self.inventory_items[k]
p = v[3].lower().find(k)
if p > -1:
k = v[3][p:p + len(k)]
f2.write(u':ref:`%s%s`\n' % (abbrev, k.replace('\\', '\\\\'),))
f2.close()
def work(self):
self.getInventory()
if self.args.outfilename:
if self.args.format == 'csv':
self.renderCsv()
if self.args.format == 'json':
self.renderJson()
if self.args.format == 'html':
self.organiseByPages()
self.renderHtml()
f2path = self.args.outfilename
f2 = codecs.open(f2path, 'w', 'utf-8')
f2.write(self.renderResult)
f2.close()
if self.args.format == 'ref':
self.renderRef()
else:
print(len(self.inventory_items), 'targets found. Specify outfile for details.')
retCode = 0
msg = ''
return retCode, msg
def get_argparse_args():
"""Get commandline args using module 'argparse'. Python >= 2.7 required."""
class License(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print(__copyright__)
parser.exit()
class History(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print(__history__)
parser.exit()
class Info(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print()
print(__doc__)
parser.exit()
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0], add_help=False)
parser.add_argument('--help', '-h', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
parser.add_argument('--version', action='version',
version=__version__, help='show version and exit')
parser.add_argument('--license', help='show license and exit', nargs=0, action=License)
# parser.add_argument('--history', help='show history and exit', nargs=0, action=History)
parser.add_argument('--info', help='show more information about this module',
nargs=0, action=Info)
parser.add_argument('-O', '--outfile-name',
help="write utf-8 output to this file", dest='outfilename', default=None)
parser.add_argument('--abbreviation', help="abbreviation for the Intersphinx mapping. Default: abbrev", dest='abbrev', default='abbrev')
parser.add_argument('-f', '--format', help="format of the produced output. Always utf-8. Default: html)",
dest='format', choices=['html', 'json', 'csv', 'ref'], default='html')
# parser.add_argument('--logdir', help="Existing directory where logs will be written. Defaults to tempdir/t3pdb/logs which will be created.", dest='logdir', default=None)
parser.add_argument('uri', help='path to a Sphinx documentation project.')
parser.add_argument('inventory_uri', help='path to \'objects.inv\' of a Sphinx documentation project.', default=None)
return parser.parse_args()
if __name__ == "__main__":
argparse_available = False
try:
import argparse
argparse_available = True
except ImportError:
pass
if not argparse_available:
try:
import local_argparse as argparse
argparse_available = True
except ImportError:
pass
if argparse_available:
args = get_argparse_args()
else:
args = Namespace()
# you may hardcode parameters here:
if 'hardcode parameters here':
args.uri = ''
if not args.infile:
msg = ("\nNote:\n"
" '%(prog)s'\n"
" needs module 'argparse' (Python >= 2.7) to handle commandline\n"
" parameters. It seems that 'argparse' is not available. Provide\n"
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
from py4j.compat import long
from pyflink.common.configuration import Configuration
from pyflink.java_gateway import get_gateway
from pyflink.table.sql_dialect import SqlDialect
__all__ = ['TableConfig']
from pyflink.util.java_utils import add_jars_to_context_class_loader
class TableConfig(object):
"""
Configuration for the current :class:`TableEnvironment` session to adjust Table & SQL API
programs.
This class is a pure API class that abstracts configuration from various sources. Currently,
configuration can be set in any of the following layers (in the given order):
- flink-conf.yaml
- CLI parameters
- :class:`~pyflink.datastream.StreamExecutionEnvironment` when bridging to DataStream API
- :func:`~EnvironmentSettings.Builder.with_configuration`
- :func:`~TableConfig.set`
The latter two represent the application-specific part of the configuration. They initialize
and directly modify :func:`~TableConfig.get_configuration`. Other layers represent the
configuration of the execution context and are immutable.
The getter :func:`~TableConfig.get` gives read-only access to the full configuration. However,
application-specific configuration has precedence. Configuration of outer layers is used for
defaults and fallbacks. The setter :func:`~TableConfig.set` will only affect
application-specific configuration.
For common or important configuration options, this class provides getters and setters methods
with detailed inline documentation.
For more advanced configuration, users can directly access the underlying key-value map via
:func:`~pyflink.table.TableConfig.get_configuration`.
Example:
::
>>> table_config = t_env.get_config()
>>> config = Configuration()
>>> config.set_string("parallelism.default", "128") \\
... .set_string("pipeline.auto-watermark-interval", "800ms") \\
... .set_string("execution.checkpointing.interval", "30s")
>>> table_config.add_configuration(config)
.. note::
Because options are read at different point in time when performing operations, it is
recommended to set configuration options early after instantiating a table environment.
"""
def __init__(self, j_table_config=None):
gateway = get_gateway()
if j_table_config is None:
self._j_table_config = gateway.jvm.TableConfig.getDefault()
else:
self._j_table_config = j_table_config
def get(self, key: str, default_value: str) -> str:
"""
Returns the value associated with the given key as a string.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
if self.get_configuration().contains_key(key):
return self.get_configuration().get_string(key, default_value)
else:
return self._j_table_config.getRootConfiguration().getString(key, default_value)
def set(self, key: str, value: str) -> 'TableConfig':
"""
Sets a string-based value for the given string-based key.
The value will be parsed by the framework on access.
"""
self._j_table_config.set(key, value)
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
if key in [jars_key, classpaths_key]:
add_jars_to_context_class_loader(value.split(";"))
return self
def get_local_timezone(self) -> str:
"""
Returns the local timezone id for timestamp with local time zone, either an abbreviation
such as "PST", a full name such as "America/Los_Angeles", or a custom timezone_id such
as "GMT-08:00".
"""
return self._j_table_config.getLocalTimeZone().getId()
def set_local_timezone(self, timezone_id: str):
"""
Sets the local timezone id for timestamp with local time zone.
:param timezone_id: The timezone id, either an abbreviation such as "PST", a full name
such as "America/Los_Angeles", or a custom timezone_id such as
"GMT-08:00".
"""
if timezone_id is not None and isinstance(timezone_id, str):
j_timezone = get_gateway().jvm.java.time.ZoneId.of(timezone_id)
self._j_table_config.setLocalTimeZone(j_timezone)
else:
raise Exception("TableConfig.timezone should be a string!")
def get_max_generated_code_length(self) -> int:
"""
The current threshold where generated code will be split into sub-function calls. Java has
a maximum method length of 64 KB. This setting allows for finer granularity if necessary.
Default is 64000.
"""
return self._j_table_config.getMaxGeneratedCodeLength()
def set_max_generated_code_length(self, max_generated_code_length: int):
"""
Returns the current threshold where generated code will be split into sub-function calls.
Java has a maximum method length of 64 KB. This setting allows for finer granularity if
necessary. Default is 64000.
"""
if max_generated_code_length is not None and isinstance(max_generated_code_length, int):
self._j_table_config.setMaxGeneratedCodeLength(max_generated_code_length)
else:
raise Exception("TableConfig.max_generated_code_length should be a int value!")
def set_idle_state_retention_time(self,
min_time: datetime.timedelta,
max_time: datetime.timedelta):
"""
Specifies a minimum and a maximum time interval for how long idle state, i.e., state which
was not updated, will be retained.
State will never be cleared until it was idle for less than the minimum time and will never
be kept if it was idle for more than the maximum time.
When new data arrives for previously cleaned-up state, the new data will be handled as if it
was the first data. This can result in previous results being overwritten.
Set to 0 (zero) to never clean-up the state.
Example:
::
>>> table_config = TableConfig() \\
... .set_idle_state_retention_time(datetime.timedelta(days=1),
... datetime.timedelta(days=3))
.. note::
Cleaning up state requires additional bookkeeping which becomes less expensive for
larger differences of minTime and maxTime. The difference between minTime and maxTime
must be at least 5 minutes.
Method set_idle_state_retention_time is deprecated now. The suggested way to set idle
state retention time is :func:`~pyflink.table.TableConfig.set_idle_state_retention`
Currently, setting max_time will not work and the max_time is directly derived from the
min_time as 1.5 x min_time.
:param min_time: The minimum time interval for which idle state is retained. Set to
0 (zero) to never clean-up the state.
:param max_time: The maximum time interval for which idle state is retained. Must be at
least 5 minutes greater than minTime. Set to
0 (zero) to never clean-up the state.
"""
j_time_class = get_gateway().jvm.org.apache.flink.api.common.time.Time
j_min_time = j_time_class.milliseconds(long(round(min_time.total_seconds() * 1000)))
j_max_time = j_time_class.milliseconds(long(round(max_time.total_seconds() * 1000)))
self._j_table_config.setIdleStateRetentionTime(j_min_time, j_max_time)
def set_idle_state_retention(self, duration: datetime.timedelta):
"""
Specifies a retention time interval for how long idle state, i.e., state which
was not updated, will be retained.
State will never be cleared until it was idle for less than the duration and will never
be kept if it was idle for more than the 1.5 x duration.
When new data arrives for previously cleaned-up state, the new data will be handled as if it
was the first data. This can result in previous results being overwritten.
Set to 0 (zero) to never clean-up the state.
Example:
::
>>> table_config.set_idle_state_retention(datetime.timedelta(days=1))
.. note::
Cleaning up state requires additional bookkeeping which becomes less expensive for
larger differences of minTime and maxTime. The difference between minTime and maxTime
must be at least 5 minutes.
:param duration: The retention time interval for which idle state is retained. Set to
0 (zero) to never clean-up the state.
"""
j_duration_class = get_gateway().jvm.java.time.Duration
j_duration = j_duration_class.ofMillis(long(round(duration.total_seconds() * 1000)))
self._j_table_config.setIdleStateRetention(j_duration)
def get_min_idle_state_retention_time(self) -> int:
"""
State might be cleared and removed if it was not updated for the defined period of time.
.. note::
Currently the concept of min/max idle state retention has been deprecated and only
idle state retention time is supported. The min idle state retention is regarded as
idle state retention and the max idle state retention is derived from idle state
retention as 1.5 x idle state retention.
:return: The minimum time until state which was not updated will be retained.
"""
return self._j_table_config.getMinIdleStateRetentionTime()
def get_max_idle_state_retention_time(self) -> int:
"""
State will be cleared and removed if it was not updated for the defined period of time.
.. note::
Currently the concept of min/max idle state retention has been deprecated and only
idle state retention time is supported. The min idle state retention is regarded as
idle state retention and the max idle state retention is derived from idle state
retention as 1.5 x idle state retention.
:return: The maximum time until state which was not updated will be retained.
"""
return self._j_table_config.getMaxIdleStateRetentionTime()
def get_idle_state_retention(self) -> datetime.timedelta:
"""
:return: The duration until state which was not updated will be retained.
"""
return datetime.timedelta(
milliseconds=self._j_table_config.getIdleStateRetention().toMillis())
def get_configuration(self) -> Configuration:
"""
Gives direct access to the underlying key-value map for advanced configuration.
:return: Entire key-value configuration.
"""
return Configuration(j_configuration=self._j_table_config.getConfiguration())
def add_configuration(self, | |
= sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return s
def func_525021bbfb694e2d81ea77e6dfdfb893(T, infile):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return q
def func_3085b093b0094ae3babe3700b49f3241(T, infile):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return N
def func_529d5d0ce6d24c7b89be80d23f00dc9d(T, infile):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return total
def func_d3f7b486a618491a9a2c5ce79bc867c9():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
return T
def func_0784b0652171461a8249f952f2b48000():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
return infile
def func_379b3ac414f3412d82a2a3d21559845e(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
return i
def func_a15be2b4249741d2a57c8a1a96e038dd(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
return p
def func_ddaa4aa4e21a46f1a02effc47cf59045(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
return r
def func_f2ee59dcc42c4d53b8a37093f7e40f19(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
return A
def func_4e6cc241c7314974b4d28fd300c27ef7(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
return totalsum
def func_dc36827cd4164af3965269e69805f105(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
return N
def func_311acdaf55a14825b30544b6f62de751(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, | |
<reponame>DebeshJha/tensorflow-1
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
General utilities
~~~~~~~~~~~~~~~~~
"""
import collections
from enum import Enum
import os
import time
import numpy as np
from tensorflow.compiler.plugin.poplar.driver.config_pb2 import IpuOptions
from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent
from tensorflow.compiler.plugin.poplar.driver import config_pb2
from tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops
# pylint: disable=unused-import
# These imports are only here to make it easier for the Tensorflow Wheel users
# to use these functions:
# ```
# from tensorflow.python import ipu
# ...
# ipu.utils.export_variables_from_live_session(...)
# ```
from tensorflow.compiler.plugin.poplar.tools.tensorflow_weights_extractor import (
export_variables_from_live_session, export_variables_from_live_model,
import_data_in_live_session, import_data_in_live_model)
# pylint: enable=unused-import
from tensorflow.compat.v1 import executing_eagerly
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import values
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import deprecation
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import dataset_extractor
class SelectionOrder(Enum):
"""Depending on the communication pattern of the model, the order in
which the IPUs are selected and mapped to shards can impact the performance.
For example, given a model which executes on multiple IPUs:
.. code-block:: python
def sharded_graph(pa, pb, pc, pd):
with ipu.scopes.ipu_shard(0):
o1 = pa + pb
with ipu.scopes.ipu_shard(1):
o2 = o1 + pc
with ipu.scopes.ipu_shard(2):
o3 = o2 + pd
return o3
and a typical machine with 8 Graphcore C2 cards:
.. code-block:: none
_______ _______
| | | |
| 14 |=============| 15 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 12 |=============| 13 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 10 |=============| 11 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 8 |=============| 9 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 6 |=============| 7 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 4 |=============| 5 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 2 |=============| 3 |
|_______| |_______|
|| ||
_______ _______
| | | |
| 0 |=============| 1 |
|_______| |_______|
(where each numbered square represents an IPU with the given device ID and the
== and || connections represent IPUs being directly connected via IPU-Links)
we can see that the `ipu_shard(0)` directly communicates with `ipu_shard(1)`
and that `ipu_shard(1)` directly communicates with `ipu_shard(2)`.
If the shards 0, 1, 2 were mapped to IPUs 0, 1, 2 in that order, then the
communication between shards 1 and 2 would not have a direct connection via an
IPU-Link and would have to perform a "hop" via an IPU.
If the shards 0, 1, 2 were mapped to IPUs 0, 1, 3 in that order, then the
communication between shards 1 and 2 would have a direct connection via an
IPU-Link which will reduce the communication cost.
This Enum class is used to control the order in which the IPUs are selected.
Currently, the following IPU selection orderings are supported:
* `AUTO`: automatically try and select the best selection given the network.
* `ZIGZAG`: follow the natural ordering of IPUs. In the above example, the
IPUs would be selected in the following order:
`0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15`.
* `SNAKE`: select IPUs such that each consecutive shard is directly
connected via IPU-Links to the shard before and after. In the above example,
the IPUs would be selected in the following order:
`0, 1, 3, 2, 4, 5, 7, 6, 8, 9, 11, 10, 12, 13, 15, 14`.
* `HOOF`: select IPUs such that each consecutive shard is directly
connected via IPU-Links to the shard before and after and the last and first
shard are on the same C2 cards. In the above example, the IPUs would be
selected in the following order:
`0, 2, 4, 6, 8, 10, 12, 14, 15, 13, 11, 9, 7, 5, 3, 1`.
The `SNAKE` and `HOOF` IPU selection orders are particularly beneficial for
pipelined models.
"""
AUTO = config_pb2.IpuSelectionOrder.Value("AUTO")
ZIGZAG = config_pb2.IpuSelectionOrder.Value("ZIGZAG")
SNAKE = config_pb2.IpuSelectionOrder.Value("SNAKE")
HOOF = config_pb2.IpuSelectionOrder.Value("HOOF")
class ExecutionProfileType(Enum):
"""The execution profile type indicates the desired information in the
execution profile.
* `NO_PROFILE` indicates that there should be no execution profiling.
* `DEVICE_PROFILE` indicates that the execution profile should contain only
device wide events.
* `IPU_PROFILE` indicates that the profile should contain IPU level
execution events.
* `TILE_PROFILE` indicates that the profile should contain Tile level
execution events.
"""
NO_PROFILE = config_pb2.IpuExecutionProfileType.Value("NO_PROFILE")
DEVICE_PROFILE = config_pb2.IpuExecutionProfileType.Value("DEVICE_PROFILE")
IPU_PROFILE = config_pb2.IpuExecutionProfileType.Value("IPU_PROFILE")
TILE_PROFILE = config_pb2.IpuExecutionProfileType.Value("TILE_PROFILE")
class DeviceConnectionType(Enum):
"""Enumeration to describe the mechanism used to attach to the Poplar
device.
* `ALWAYS` indicates that the system will attach when configuring the
device.
* `ON_DEMAND` will defer connection to when the IPU is needed.
* `NEVER` will never try to attach to a device. Used when compiling offline.
"""
ALWAYS = config_pb2.IpuDeviceConnectionType.Value("ALWAYS")
ON_DEMAND = config_pb2.IpuDeviceConnectionType.Value("ON_DEMAND")
NEVER = config_pb2.IpuDeviceConnectionType.Value("NEVER")
def configure_ipu_system(config, device="cpu"):
"""Configure an IPU system. Passing an IpuOptions protobuf created by the
``create_ipu_config`` function.
Args:
config: An IpuOptions configuration protobuf
device: The CPU device which is local to the IPU hardware
Returns:
None
"""
if not isinstance(config, config_pb2.IpuOptions):
raise Exception("`config` must be an IpuOptions instance")
g = ops.Graph()
with g.as_default():
with ops.device(device):
cfg_op = gen_ipu_ops.ipu_configure_hardware(config.SerializeToString())
with session_lib.Session(graph=g) as sess:
sess.run(cfg_op)
def get_ipu_config(session=None):
"""Get the configuration of an IPU system.
Args:
session: An optional session on which to execute.
Returns:
A list of IpuOption instances, one for each PoplarExecutor.
"""
configurations = None
# Get the serialized output.
if executing_eagerly():
assert not session, "No session is required for eager execution."
configurations = gen_ipu_ops.ipu_get_configuration().numpy()
else:
s = session if session else session_lib.Session()
configurations = s.run(gen_ipu_ops.ipu_get_configuration())
# Deserialize and determine if a valid config exists,
# i.e. user has succesfully called ipu_configure_hardware.
deserialized = []
valid = False
for conf in configurations:
# Deserialize.
opt = IpuOptions()
opt.ParseFromString(conf)
deserialized.append(opt)
valid |= len(opt.device_config) > 0
if not valid:
raise RuntimeError("No IPU devices configured.")
return deserialized
def get_num_of_ipus_in_device(ipu_device, device="cpu"):
"""Get the number of physical IPUs
Args:
ipu_device: The IPU device for which to get the number of devices for.
device: The CPU device which is local to the IPU hardware.
Returns:
A number of physical IPUs configured for a particular TF device.
"""
g = ops.Graph()
with g.as_default():
with ops.device(device):
cfg_op = gen_ipu_ops.ipu_get_num_devices(ipu_device)
with session_lib.Session(graph=g) as sess:
return sess.run(cfg_op)
def running_on_ipu_model():
""" Check if XLA is configured to run on the ipu model.
Returns:
True if XLA is configured to run on the ipu model.
False if XLA is configured to run on real hardware.
"""
return "--use_ipu_model" in os.environ.get("TF_POPLAR_FLAGS", "")
@deprecation.deprecated_args(None, "Use set_optimization_options() instead.",
"max_cross_replica_sum_buffer_size",
"max_inter_ipu_copies_buffer_size")
def create_ipu_config(profiling=False,
enable_ipu_events=False,
use_poplar_text_report=False,
use_poplar_cbor_report=False,
profile_execution=None,
enable_poplar_serialized_graph=False,
report_every_nth_execution=0,
max_report_size=0x10000000,
report_directory="",
scheduler_selection="",
always_rearrange_copies_on_the_host=False,
merge_infeed_io_copies=False,
disable_graph_convolution_caching=False,
disable_graph_outlining=False,
retain_control_dependencies=False,
max_cross_replica_sum_buffer_size=0,
max_inter_ipu_copies_buffer_size=0,
max_scheduler_lookahead_depth=5,
max_scheduler_search_space_size=64,
prefetch_data_streams=True,
selection_order=None,
enable_experimental_remote_buffer_embedding=False):
"""Create an empty IPU session configuration structure.
Args:
profiling: Enable compilation reports, and IPU trace events.
enable_ipu_events: Enable IPU trace events without poplar reports.
use_poplar_text_report: Enable the Poplar textual report summary.
use_poplar_cbor_report: Enable the Poplar CBOR reports.
profile_execution: Include Poplar execution profiles in the execution
events. Can only be enabled if `profiling` is also enabled. If set, can be
`True`, 'False`, or a member of the `ExecutionProfileType` enumeration.
A `True` value indicates `ExecutionProfileType.DEVICE_PROFILE`.
enable_poplar_serialized_graph: Create the Poplar serialized graph and
include in the IPU compilation trace events.
report_every_nth_execution: Only produce an execution report on every Nth
execution. 0 = One report only.
max_report_size: The maximum size of Poplar profiles to include in the
profile events.
report_directory: When set, reports will be written to files in this
directory, instead of being written into the events. The events will
contain the full | |
LifeApp(Frame):
#initiates the class's initial properties
def __init__(self,master,test_name):
super(LifeApp, self).__init__(master)
self.master.protocol("WM_DELETE_WINDOW", self.confirmclosure)
self.master.iconbitmap('aclogo.ico')
self.test_name = test_name
self.attempts = []
self.resultlab = []
self.pack()
if self.test_name == "Money":
self.MoneyApp()
elif self.test_name == "Strangers":
self.StrangerApp()
elif self.test_name == "Cooking":
self.CookApp()
elif self.test_name == "Shopping":
self.ShopApp()
elif self.test_name == "Self-Care":
self.SelfCareApp()
#confirms whether the user wants to send this data through and end the test
def confirm(self):
confirmation = messagebox.askquestion("Confirm responses","Are you sure you want to send this data through?",icon="question")
if confirmation == "yes":
self.end()
else:
None
#adapts the data in resultlab into a string to be added to the results at the end of the whole assessment, in the form of a text file
def file(self):
self.endreturn = ""
for i in self.resultlab:
self.endreturn += i
return self.endreturn
#confirms whether the program will be closed when the top-right close button is clicked
def confirmclosure(self):
if messagebox.askokcancel("Quitting the program","Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!"):
self.master.destroy()
#retrieves the answer given to the first question
def store1(self):
self.dr1store = self.dr1.get()
#retrieves the answer given to the second question
def store2(self):
self.dr2store = self.dr2.get()
#retrieves the answer given to the third question
def store3(self):
self.dr3store = self.dr3.get()
#retrieves the answer given to the fourth question
def store4(self):
self.dr4store = self.dr4.get()
#retrieves the answer given to the fifth question
def store5(self):
self.dr5store = self.dr5.get()
#retrieves the answer given to the sixth question
def store6(self):
self.dr6store = self.dr6.get()
#appends the values in the respective answer variables to resultlab and then closes the window
def end(self):
if self.test_name == "Money":
self.resultlab.append("Topic selected: Money\n")
self.resultlab.append("Question 1: How do you manage your money?\nYou responded: {}\n".format(self.ent1.get()))
self.resultlab.append("Question 2: Do you have your own bank account?\nYou responded: {}\n".format(self.dr1store))
self.resultlab.append("Question 3: Do you have any savings?\nYou responded: {}\n".format(self.dr2store))
self.resultlab.append("Question 4: How do you work out how much to save (and/or spend)?\nYou responded: {}\n".format(self.ent4.get()))
self.resultlab.append("Question 5: How do you use your money?\nYou responded: {}\n".format(self.ent5.get()))
elif self.test_name == "Strangers":
self.resultlab.append("Topic selected: Strangers\n")
self.resultlab.append("Question 1: Would you get in a car with someone you don't know (even an Uber or other cab)?\nYou responded: {}\n".format(self.dr1store))
self.resultlab.append("Question 2: Would you accept money and/or deeds for/from someone you don't know?\nYou responded: {}\n".format(self.dr2store))
self.resultlab.append("Question 3: Would you talk to someone you don't know?\nYou responded: {}\n".format(self.dr3store))
self.resultlab.append("Question 4: Would you give your phone number to someone you don't know (and/or take theirs)?\nYou responded: {}\n".format(self.dr4store))
elif self.test_name == "Cooking":
self.resultlab.append("Topic selected: Cooking\n")
self.resultlab.append("Question 1: Can you cook a meal for yourself?\nYou responded: {}\n".format(self.dr1store))
self.resultlab.append("Question 2: Can you cook for others?\nYou responded: {}\n".format(self.dr2store))
self.resultlab.append("Question 3: Can you cook with others?\nYou responded: {}\n".format(self.dr3store))
self.resultlab.append("Question 4: Can you follow a recipe\nYou responded: {}\n".format(self.dr4store))
self.resultlab.append("Question 5: Do you know how to handle a stove/hob?\nYou responded: {}\n".format(self.dr5store))
elif self.test_name == "Shopping":
self.resultlab.append("Topic selected: Shopping\n")
self.resultlab.append("Question 1: Can you spend wisely?\nYou responded: {}\n".format(self.dr1store))
self.resultlab.append("Question 2: Can you travel on your own to the shops?\nYou responded: {}\n".format(self.dr2store))
self.resultlab.append("Question 3: Can you navigate around a shopping street, mall or district?\nYou responded: {}\n".format(self.dr3store))
self.resultlab.append("Question 4: Can you create and follow a shopping list?\nYou responded: {}\n".format(self.dr4store))
self.resultlab.append("Question 5: Can you use a trolley?\nYou responded: {}\n".format(self.dr5store))
elif self.test_name == "Self-Care":
self.resultlab.append("Topic selected: Self-Care\n")
self.resultlab.append("Question 1: Can you bath/shower yourself?\nYou responded: {}\n".format(self.dr1store))
self.resultlab.append("Question 2: Can you travel independently (to most local places, for example)?\nYou responded: {}\n".format(self.dr2store))
self.resultlab.append("Question 3: Can you plan your day and/or life effectively?\nYou responded: {}\n".format(self.dr3store))
self.resultlab.append("Question 4: Do you get enough sleep?\nYou responded: {}\n".format(self.dr4store))
self.resultlab.append("Question 5: Do you do enough exercise?\nYou responded: {}\n".format(self.dr5store))
self.resultlab.append("Qusetion 6: Do you have a social life?\nYou responded: {}\n".format(self.dr6store))
self.master.destroy()
#creates the Money questionnaire
def MoneyApp(self):
self.lab1 = Label(self,text="Topic for this test: Money")
self.lab1.pack()
self.q1 = Label(self,text="How do you manage your money?")
self.q1.pack()
self.ent1 = Entry(self,width=35)
self.ent1.pack()
self.q2 = Label(self,text="Do you have your own bank account?")
self.q2.pack()
self.dr1 = StringVar(self)
self.dr1.set("")
Radiobutton(self,text="Yes",variable=self.dr1,value="Yes",command=self.store1).pack()
Radiobutton(self,text="No",variable=self.dr1,value="No",command=self.store1).pack()
self.q3 = Label(self,text="Do you have any savings?")
self.q3.pack()
self.dr2 = StringVar(self)
self.dr2.set("")
Radiobutton(self,text="Yes",variable=self.dr2,value="Yes",command=self.store2).pack()
Radiobutton(self,text="No",variable=self.dr2,value="No",command=self.store2).pack()
self.q4 = Label(self,text="How do you work out how much to save (and/or spend)?")
self.q4.pack()
self.ent4 = Entry(self,width=35)
self.ent4.pack()
self.q5 = Label(self,text="How do you use your money?")
self.q5.pack()
self.ent5 = Entry(self,width=35)
self.ent5.pack()
self.exitbotun = Button(self,text="Enter",command=self.confirm)
self.exitbotun.pack()
#creates the Strangers questionnaire
def StrangerApp(self):
self.lab1 = Label(self,text="Topic for this test: Strangers")
self.lab1.pack()
self.q1 = Label(self,text="Would you get in a car with someone you don't know (even an Uber or other cab)?")
self.q1.pack()
self.dr1 = StringVar(self)
self.dr1.set("")
Radiobutton(self,text="Yes",variable=self.dr1,value="Yes",command=self.store1).pack()
Radiobutton(self,text="No",variable=self.dr1,value="No",command=self.store1).pack()
self.q2 = Label(self,text="Would you accept money and/or deeds for/from someone you don't know?")
self.q2.pack()
self.dr2 = StringVar(self)
self.dr2.set("")
Radiobutton(self,text="Yes",variable=self.dr2,value="Yes",command=self.store2).pack()
Radiobutton(self,text="No",variable=self.dr2,value="No",command=self.store2).pack()
self.q3 = Label(self,text="Would you talk to someone you don't know?")
self.q3.pack()
self.dr3 = StringVar(self)
self.dr3.set("")
Radiobutton(self,text="Yes",variable=self.dr3,value="Yes",command=self.store3).pack()
Radiobutton(self,text="No",variable=self.dr3,value="No",command=self.store3).pack()
self.q4 = Label(self,text="Would you give your phone number to someone you don't know (and/or take theirs)?")
self.q4.pack()
self.dr4 = StringVar(self)
self.dr4.set("")
Radiobutton(self,text="Yes",variable=self.dr4,value="Yes",command=self.store4).pack()
Radiobutton(self,text="No",variable=self.dr4,value="No",command=self.store4).pack()
self.exitbotun = Button(self,text="Enter",command=self.confirm)
self.exitbotun.pack()
#creates the Cooking questionnaire
def CookApp(self):
self.lab1 = Label(self,text="Topic for this test: Cooking")
self.lab1.pack()
self.q1 = Label(self,text="Can you cook a meal for yourself?")
self.q1.pack()
self.dr1 = StringVar(self)
self.dr1.set("")
Radiobutton(self,text="Yes",variable=self.dr1,value="Yes",command=self.store1).pack()
Radiobutton(self,text="No",variable=self.dr1,value="No",command=self.store1).pack()
self.q2 = Label(self,text="Can you cook for others?")
self.q2.pack()
self.dr2 = StringVar(self)
self.dr2.set("")
Radiobutton(self,text="Yes",variable=self.dr2,value="Yes",command=self.store2).pack()
Radiobutton(self,text="No",variable=self.dr2,value="No",command=self.store2).pack()
self.q3 = Label(self,text="Can you cook with others?")
self.q3.pack()
self.dr3 = StringVar(self)
self.dr3.set("")
Radiobutton(self,text="Yes",variable=self.dr3,value="Yes",command=self.store3).pack()
Radiobutton(self,text="No",variable=self.dr3,value="No",command=self.store3).pack()
self.q4 = Label(self,text="Can you follow a recipe?")
self.q4.pack()
self.dr4 = StringVar(self)
self.dr4.set("")
Radiobutton(self,text="Yes",variable=self.dr4,value="Yes",command=self.store4).pack()
Radiobutton(self,text="No",variable=self.dr4,value="No",command=self.store4).pack()
self.q5 = Label(self,text="Do you know how to handle a stove/hob?")
self.q5.pack()
self.dr5 = StringVar(self)
self.dr5.set("")
Radiobutton(self,text="Yes",variable=self.dr5,value="Yes",command=self.store5).pack()
Radiobutton(self,text="No",variable=self.dr5,value="No",command=self.store5).pack()
self.exitbotun = Button(self,text="Enter",command=self.confirm)
self.exitbotun.pack()
#creates the Shopping questionnaire
def ShopApp(self):
self.lab1 = Label(self,text="Topic for this test: Shopping")
self.lab1.pack()
self.q1 = Label(self,text="Can you spend wisely?")
self.q1.pack()
self.dr1 = StringVar(self)
self.dr1.set("")
Radiobutton(self,text="Yes",variable=self.dr1,value="Yes",command=self.store1).pack()
Radiobutton(self,text="No",variable=self.dr1,value="No",command=self.store1).pack()
self.q2 = Label(self,text="Can you travel on your own to the shops?")
self.q2.pack()
self.dr2 = StringVar(self)
self.dr2.set("")
Radiobutton(self,text="Yes",variable=self.dr2,value="Yes",command=self.store2).pack()
Radiobutton(self,text="No",variable=self.dr2,value="No",command=self.store2).pack()
self.q3 = Label(self,text="Can you navigate around a shopping street, mall or district?")
self.q3.pack()
self.dr3 = StringVar(self)
self.dr3.set("")
Radiobutton(self,text="Yes",variable=self.dr3,value="Yes",command=self.store3).pack()
Radiobutton(self,text="No",variable=self.dr3,value="No",command=self.store3).pack()
self.q4 = Label(self,text="Can you create and follow a shopping list?")
self.q4.pack()
self.dr4 = StringVar(self)
self.dr4.set("")
Radiobutton(self,text="Yes",variable=self.dr4,value="Yes",command=self.store4).pack()
Radiobutton(self,text="No",variable=self.dr4,value="No",command=self.store4).pack()
self.q5 = Label(self,text="Can you use a trolley?")
self.q5.pack()
self.dr5 = StringVar(self)
self.dr5.set("")
Radiobutton(self,text="Yes",variable=self.dr5,value="Yes",command=self.store5).pack()
Radiobutton(self,text="No",variable=self.dr5,value="No",command=self.store5).pack()
self.exitbotun = Button(self,text="Enter",command=self.confirm)
self.exitbotun.pack()
#creates the Self-Care questionnaire
def SelfCareApp(self):
self.lab1 = Label(self,text="Topic for this test: Self-Care")
self.lab1.pack()
self.q1 = Label(self,text="Can you bath/shower yourself?")
self.q1.pack()
self.dr1 = StringVar(self)
self.dr1.set("")
Radiobutton(self,text="Yes",variable=self.dr1,value="Yes",command=self.store1).pack()
Radiobutton(self,text="No",variable=self.dr1,value="No",command=self.store1).pack()
self.q2 = Label(self,text="Can you travel independently (to most local places, for example)?")
self.q2.pack()
self.dr2 = StringVar(self)
self.dr2.set("")
Radiobutton(self,text="Yes",variable=self.dr2,value="Yes",command=self.store2).pack()
Radiobutton(self,text="No",variable=self.dr2,value="No",command=self.store2).pack()
self.q3 = Label(self,text="Can you plan your day and/or life effectively?")
self.q3.pack()
self.dr3 = StringVar(self)
self.dr3.set("")
Radiobutton(self,text="Yes",variable=self.dr3,value="Yes",command=self.store3).pack()
Radiobutton(self,text="No",variable=self.dr3,value="No",command=self.store3).pack()
self.q4 = Label(self,text="Do you get enough sleep?")
self.q4.pack()
self.dr4 = StringVar(self)
self.dr4.set("")
Radiobutton(self,text="Yes",variable=self.dr4,value="Yes",command=self.store4).pack()
Radiobutton(self,text="No",variable=self.dr4,value="No",command=self.store4).pack()
self.q5 = Label(self,text="Do you do enough exercise?")
self.q5.pack()
self.dr5 = StringVar(self)
self.dr5.set("")
Radiobutton(self,text="Yes",variable=self.dr5,value="Yes",command=self.store5).pack()
Radiobutton(self,text="No",variable=self.dr5,value="No",command=self.store5).pack()
self.q6 = Label(self,text="Do you have a social life?")
self.q6.pack()
self.dr6 = StringVar(self)
self.dr6.set("")
Radiobutton(self,text="Yes",variable=self.dr6,value="Yes",command=self.store6).pack()
Radiobutton(self,text="No",variable=self.dr6,value="No",command=self.store6).pack()
self.exitbotun = Button(self,text="Enter",command=self.confirm)
self.exitbotun.pack()
#class the life skills topic selection class
class select_test(Frame):
#initiates the class's initial properties
def __init__(self,master):
super(select_test,self).__init__(master)
self.testchoice = ("Money","Strangers","Cooking","Shopping","Self-Care")
self.master.protocol("WM_DELETE_WINDOW",self.confirmclosure)
self.master.iconbitmap('aclogo.ico')
self.pack()
self.choico()
#confirms whether the program will be closed when the top-right close button is clicked
def confirmclosure(self):
if messagebox.askokcancel("Quitting the program","Are you sure you want to close this test? Data entered will NEITHER be saved NOR be considered in the final assessment results!"):
self.master.destroy()
#allows the supervisor to select from 5 topic-specific questionnaires
def choico(self):
self.choice = StringVar()
self.choice.set("")
Label(self,text="Select a test from the 5 options below").pack()
Label(self,text="").pack()
for i in range(len(self.testchoice)):
Radiobutton(self,text=self.testchoice[i],indicatoron=0,width=20,padx=20,variable=self.choice,value=self.testchoice[i]).pack()
Label(self,text="").pack()
self.starto = Button(self,text="Perform Test",command=self.perform)
self.starto.pack()
#gets the choice selected and closes the window
def perform(self):
self.selection = self.choice.get()
self.master.destroy()
#returns the selected choice
def save(self):
return self.selection
#function for the confidence questionnaire
def Mantra():
root = Tk()
root.title("Confidence Questionnaire")
root.geometry("640x480")
sign = QuestionApp(root, "Mantras.txt")
root.mainloop()
box = sign.file()
return box
#function for the motivation questionnaire
def Motivation():
root = Tk()
root.title("Motivation Questionnaire")
root.geometry("640x480")
sign = MotivApp(root)
root.mainloop()
box = sign.file()
return box
#function for the self-awareness questionnaire
def SelfAware():
root = Tk()
root.title("Self Awareness Questionnaire")
root.geometry("800x600")
sign = SelfAwareApp(root)
root.mainloop()
box = sign.file()
return box
#function | |
# -*- coding: utf-8 -*-
""" Sahana Eden Request Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3RequestModel",
"S3RequestItemModel",
"S3RequestSkillModel",
"S3RequestSummaryModel",
"S3RequestRecurringModel",
"S3CommitModel",
"S3CommitItemModel",
"S3CommitPersonModel",
"req_item_onaccept",
"req_update_status",
"req_rheader",
"req_match",
"req_site_virtualfields",
"req_add_from_template",
]
import datetime
from gluon import *
from gluon.storage import Storage
from ..s3 import *
REQ_STATUS_NONE = 0
REQ_STATUS_PARTIAL = 1
REQ_STATUS_COMPLETE = 2
T = current.T
req_status_opts = { REQ_STATUS_NONE: SPAN(T("None"),
_class = "req_status_none"),
REQ_STATUS_PARTIAL: SPAN(T("Partial"),
_class = "req_status_partial"),
REQ_STATUS_COMPLETE: SPAN(T("Complete"),
_class = "req_status_complete")
}
rn_label = T("%(REQ)s Number") % dict(REQ=current.deployment_settings.get_req_shortname())
# =============================================================================
class S3RequestModel(S3Model):
"""
"""
names = ["req_req",
"req_req_id",
"req_req_ref",
"req_hide_quantities",
"req_create_form_mods",
"req_prep",
"req_tabs",
"req_priority_opts",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
session = current.session
settings = current.deployment_settings
human_resource_id = self.hrm_human_resource_id
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
s3_string_represent = lambda str: str if str else NONE
add_component = self.add_component
crud_strings = current.response.s3.crud_strings
set_method = self.set_method
# Multiple Item/Skill Types per Request?
multiple_req_items = settings.get_req_multiple_req_items()
req_status = S3ReusableField("req_status", "integer",
label = T("Request Status"),
requires = IS_NULL_OR(IS_IN_SET(req_status_opts,
zero = None)),
represent = lambda opt: \
req_status_opts.get(opt, UNKNOWN_OPT),
default = REQ_STATUS_NONE,
writable = settings.get_req_status_writable(),
)
req_ref = S3ReusableField("req_ref", "string",
label = rn_label,
writable = False,
represent = self.req_ref_represent,
)
req_priority_opts = {
3:T("High"),
2:T("Medium"),
1:T("Low")
}
req_types_deployed = settings.get_req_req_type()
req_type_opts = {}
if settings.has_module("inv") and "Stock" in req_types_deployed:
# Number hardcoded in controller
req_type_opts[1] = settings.get_req_type_inv_label()
#if settings.has_module("asset") and "Asset" in req_types_deployed:
# req_type_opts[2] = T("Assets")
if settings.has_module("hrm") and "People" in req_types_deployed:
req_type_opts[3] = settings.get_req_type_hrm_label()
#if settings.has_module("cr") and "Shelter" in req_types_deployed:
# req_type_opts[4] = T("Shelter")
if "Summary" in req_types_deployed:
req_type_opts[8] = T("Summary")
if "Other" in req_types_deployed:
req_type_opts[9] = T("Other")
use_commit = settings.get_req_use_commit()
req_ask_security = settings.get_req_ask_security()
req_ask_transport = settings.get_req_ask_transport()
# ---------------------------------------------------------------------
# Requests
tablename = "req_req"
table = self.define_table(tablename,
self.super_link("doc_id", "doc_entity"),
self.event_event_id(
default=session.s3.event,
readable = False,
writable = False,
ondelete="SET NULL"),
Field("type", "integer",
requires = IS_IN_SET(req_type_opts, zero=None),
represent = lambda opt: \
req_type_opts.get(opt, UNKNOWN_OPT),
label = T("Request Type")),
req_ref(),
s3_datetime(label = T("Date Requested"),
default="now",
past=8760, # Hours, so 1 year
future=0,
#represent="date",
#widget="date",
),
Field("priority", "integer",
default = 2,
label = T("Priority"),
#@ToDo: Colour code the priority text - red, orange, green
represent = lambda opt: \
req_priority_opts.get(opt, UNKNOWN_OPT),
#represent = self.req_priority_represent,
requires = IS_NULL_OR(
IS_IN_SET(req_priority_opts))
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
self.super_link("site_id", "org_site",
label = T("Requested For Facility"),
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
empty = False,
#required = True,
instance_types = auth.org_site_types,
updateable = True,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Requested By Facility"),
# T("Enter some characters to bring up a list of possible matches"))),
represent = self.org_site_represent
),
#Field("location",
# label = T("Neighborhood")),
# Donations: What will the Items be used for?; People: Task Details
s3_comments("purpose",
label=T("Purpose"),
represent = self.req_purpose_represent,
comment=""),
Field("is_template", "boolean",
label = T("Recurring Request?"),
default = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Recurring Request?"),
T("If this is a request template to be added repeatedly then the schedule can be set on the next page."))),
),
s3_datetime("date_required",
label = T("Date Needed By"),
past=0,
future=8760, # Hours, so 1 year
represent="date",
widget="date",
),
s3_datetime("date_required_until",
label = T("Date Required Until"),
past=0,
future=8760, # Hours, so 1 year
readable = False,
writable = False
),
human_resource_id("requester_id",
label = T("Requester"),
empty = settings.get_req_requester_optional(),
#writable = False,
#comment = None,
default = auth.s3_logged_in_human_resource()),
human_resource_id("assigned_to_id", # This field should be in req_commit, but that complicates the UI
readable = False,
writable = False,
label = T("Assigned To")),
human_resource_id("approved_by_id",
label = T("Approved By"),
readable = False,
writable = False,
),
human_resource_id("request_for_id",
label = T("Requested For"),
readable = False,
writable = False,
#default = auth.s3_logged_in_human_resource()
),
Field("transport_req", "boolean",
readable = req_ask_transport,
writable = req_ask_transport,
label = T("Transportation Required")),
Field("security_req", "boolean",
readable = req_ask_security,
writable = req_ask_security,
label = T("Security Required")),
s3_datetime("date_recv",
label = T("Date Received"), # Could be T("Date Delivered") - make deployment_setting
past=8760, # Hours, so 1 year
future=0,
readable = False,
writable = False,
),
human_resource_id("recv_by_id",
label = T("Received By"),
# @ToDo: Set this in Update forms? Dedicated 'Receive' button?
# (Definitely not in Create forms)
#default = auth.s3_logged_in_human_resource()
),
req_status("commit_status",
readable = not use_commit,
writable = not use_commit,
label = T("Commit. Status")),
req_status("transit_status",
label = T("Transit Status")),
req_status("fulfil_status",
label = T("Fulfil. Status")),
Field("cancel", "boolean",
label = T("Cancel"),
default = False),
s3_comments(comment=""),
*s3_meta_fields())
if len(req_type_opts) == 1:
k, v = req_type_opts.popitem()
field = table.type
field.default = k
field.requires = k
field.writable = False
field.readable = False
if not settings.get_req_use_req_number():
table.req_ref.readable = False
table.req_ref.writable = False
# CRUD strings
ADD_REQUEST = T("Make Request")
crud_strings[tablename] = Storage(
title_create = ADD_REQUEST,
title_display = T("Request Details"),
title_list = T("Requests"),
title_map=T("Map of Requests"),
title_report = T("Requests Report"),
title_search = T("Search Requests"),
title_update = T("Edit Request"),
subtitle_create = ADD_REQUEST,
label_list_button = T("List Requests"),
label_create_button = ADD_REQUEST,
label_delete_button = T("Delete Request"),
msg_record_created = T("Request Added"),
msg_record_modified = T("Request Updated"),
msg_record_deleted = T("Request Canceled"),
msg_list_empty = T("No Requests"))
# Search method
req_req_search = (
S3SearchOptionsWidget(
name="req_search_fulfil_status",
label=T("Status"),
field="fulfil_status",
options = req_status_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_type",
label=T("Type"),
field="type",
options = req_type_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_priority",
label=T("Priority"),
field="priority",
options = req_priority_opts,
cols = 3,
),
#S3SearchOptionsWidget(
# name="req_search_L1",
# field="site_id$location_id$L1",
# location_level="L1",
# cols = 3,
#),
#S3SearchOptionsWidget(
# name="req_search_L2",
# field="site_id$location_id$L2",
# location_level="L2",
# cols = 3,
#),
S3SearchOptionsWidget(
name="req_search_L3",
field="site_id$location_id$L3",
location_level="L3",
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L4",
field="site_id$location_id$L4",
location_level="L4",
cols = 3,
),
)
report_fields = ["priority",
"site_id$organisation_id",
#"site_id$location_id$L1",
#"site_id$location_id$L2",
"site_id$location_id$L3",
"site_id$location_id$L4",
]
# @ToDo: id gets stripped in _select_field
fact_fields = report_fields + ["id"]
# Reusable Field
req_id = S3ReusableField("req_id", table, sortby="date",
requires = IS_ONE_OF(db,
"req_req.id",
lambda id, row:
self.req_represent(id, row,
show_link=False),
orderby="req_req.date",
sort=True),
represent = self.req_represent,
label = T("Request"),
ondelete = "CASCADE")
list_fields = ["id",
"site_id"
#"event_id",
]
if len(settings.get_req_req_type()) > 1:
list_fields.append("type")
if settings.get_req_use_req_number():
list_fields.append("req_ref")
list_fields.append("priority")
if use_commit:
list_fields.append("commit_status")
list_fields.append("transit_status")
list_fields.append("fulfil_status")
list_fields.append("date_required")
self.configure(tablename,
onaccept = self.req_onaccept,
deduplicate = self.req_req_duplicate,
search_method = S3Search(advanced=req_req_search),
report_options = Storage(
search=req_req_search,
rows=report_fields,
cols=report_fields,
facts=fact_fields,
methods=["count", "list", "sum"],
defaults=Storage(rows="site_id$location_id$L4",
cols="priority",
fact="id",
aggregate="count")
),
list_fields = list_fields
)
# Custom Methods
set_method("req", "req",
method = "check",
action=self.req_check)
# Print Forms
set_method("req", "req",
method="form",
action=self.req_form)
# Components
# Documents as a component of Requests
add_component("req_document",
req_req="req_id")
# Request Items as component of Requests
add_component("req_req_item",
req_req=dict(joinby="req_id",
multiple=multiple_req_items))
# Request Skills as component of Requests
add_component("req_req_skill",
req_req=dict(joinby="req_id",
multiple=multiple_req_items))
# Commitment as a component of Requests
add_component("req_commit",
req_req="req_id")
# Request Jobs as a component of Requests
add_component(S3Task.TASK_TABLENAME,
req_req=dict(name="job",
joinby="req_id",
link="req_job",
key="scheduler_task_id",
actuate="replace"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3.*)
#
return Storage(
req_create_form_mods = self.req_create_form_mods,
req_hide_quantities = self.req_hide_quantities,
req_prep = self.req_prep,
req_priority_opts = req_priority_opts,
req_priority_represent = self.req_priority_represent,
req_req_id = req_id,
req_req_ref = req_ref,
req_status_opts = req_status_opts,
req_type_opts = req_type_opts,
req_tabs = self.req_tabs,
)
# -------------------------------------------------------------------------
def defaults(self):
"""
Safe defaults for model-global names in | |
# Copyright (C) 2007-2010 by <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
# Shamelessly borrowed from http://launchpad.net/flufl.lock
"""Portable, NFS-safe file locking with timeouts for POSIX systems.
This code implements an NFS-safe file-based locking algorithm influenced by
the GNU/Linux open(2) manpage, under the description of the O_EXCL option:
[...] O_EXCL is broken on NFS file systems, programs which rely on it
for performing locking tasks will contain a race condition. The
solution for performing atomic file locking using a lockfile is to
create a unique file on the same fs (e.g., incorporating hostname and
pid), use link(2) to make a link to the lockfile. If link() returns
0, the lock is successful. Otherwise, use stat(2) on the unique file
to check if its link count has increased to 2, in which case the lock
is also successful.
The assumption made here is that there will be no 'outside interference',
e.g. no agent external to this code will ever link() to the specific lock
files used.
Lock objects support lock-breaking so that you can't wedge a process forever.
This is especially helpful in a web environment, but may not be appropriate
for all applications.
Locks have a 'lifetime', which is the maximum length of time the process
expects to retain the lock. It is important to pick a good number here
because other processes will not break an existing lock until the expected
lifetime has expired. Too long and other processes will hang; too short and
you'll end up trampling on existing process locks -- and possibly corrupting
data. In a distributed (NFS) environment, you also need to make sure that
your clocks are properly synchronized.
"""
__metaclass__ = type
__all__ = [
'LockError',
'AlreadyLockedError',
'NotLockedError',
'Lock',
]
import os
import sys
import time
import errno
import random
import socket
import logging
import datetime
DEFAULT_LOCK_LIFETIME = datetime.timedelta( seconds = 15 )
# Allowable a bit of clock skew.
CLOCK_SLOP = datetime.timedelta( seconds = 10 )
try:
MAXINT = sys.maxint
except AttributeError:
# Python 3.
MAXINT = sys.maxsize
# Details separator; also used in calculating the claim file path. Lock files
# should not include this character.
SEP = '|'
log = logging.getLogger( 'flufl.lock' )
# Install a null handler to avoid warnings when applications don't set their
# own flufl.lock logger. See http://docs.python.org/library/logging.html
try:
from logging import NullHandler
except ImportError:
# Python < 2.7.
class NullHandler( logging.Handler ):
def emit( self, record ):
pass
logging.getLogger( 'flufl.lock' ).addHandler( NullHandler() )
# Exceptions that can be raised by this module
class LockError( Exception ):
"""Base class for all exceptions in this module."""
class AlreadyLockedError( LockError ):
"""An attempt is made to lock an already locked object."""
class NotLockedError( LockError ):
"""An attempt is made to unlock an object that isn't locked."""
class TimeOutError( LockError ):
"""The timeout interval elapsed before the lock succeeded."""
class Lock:
"""A portable way to lock resources by way of the file system."""
def __init__( self, lockfile, lifetime = None ):
"""Create the resource lock using the given file name and lifetime.
Each process laying claim to this resource lock will create their own
temporary lock file based on the path specified. An optional lifetime
is the length of time that the process expects to hold the lock.
:param lockfile: The full path to the lock file.
:param lifetime: The expected maximum lifetime of the lock, as a
timedelta. Defaults to 15 seconds.
"""
if lifetime is None:
lifetime = DEFAULT_LOCK_LIFETIME
self._lockfile = lockfile
self._lifetime = lifetime
# Calculate a hard link file name that will be used to lay claim to
# the lock. We need to watch out for two Lock objects in the same
# process pointing to the same lock file. Without this, if you lock
# lf1 and do not lock lf2, lf2.locked() will still return True.
self._claimfile = SEP.join( (
self._lockfile,
socket.getfqdn(),
str( os.getpid() ),
str( random.randint( 0, MAXINT ) ),
) )
# For transferring ownership across a fork.
self._owned = True
def __repr__( self ):
return '<%s %s [%s: %s] pid=%s at %#xx>' % (
self.__class__.__name__,
self._lockfile,
( 'locked' if self.is_locked else 'unlocked' ),
self._lifetime, os.getpid(), id( self ) )
@property
def details( self ):
"""Details as read from the lock file.
:return: A 3-tuple of hostname, process id, file name.
:rtype: (str, int, str)
:raises NotLockedError: if the lock is not acquired.
"""
try:
with open( self._lockfile ) as fp:
filename = fp.read().strip()
except IOError as error:
if error.errno == errno.ENOENT:
raise NotLockedError( 'Details are unavailable' )
raise
# Rearrange for signature.
try:
lockfile, hostname, pid, random = filename.split( SEP )
except ValueError:
raise NotLockedError( 'Details are unavailable' )
return hostname, int( pid ), lockfile
@property
def lifetime( self ):
return self._lifetime
@lifetime.setter
def lifetime( self, lifetime ):
self._lifetime = lifetime
def refresh( self, lifetime = None, unconditionally = False ):
"""Refreshes the lifetime of a locked file.
Use this if you realize that you need to keep a resource locked longer
than you thought.
:param lifetime: If given, this sets the lock's new lifetime. This
must be a datetime.timedelta.
:param unconditionally: When False (the default), a `NotLockedError`
is raised if an unlocked lock is refreshed.
:raises NotLockedError: if the lock is not set, unless optional
`unconditionally` flag is set to True.
"""
if lifetime is not None:
self._lifetime = lifetime
# Do we have the lock? As a side effect, this refreshes the lock!
if not self.is_locked and not unconditionally:
raise NotLockedError( '%s: %s' % ( repr( self ), self._read() ) )
def lock( self, timeout = None ):
"""Acquire the lock.
This blocks until the lock is acquired unless optional timeout is not
None, in which case a `TimeOutError` is raised when the timeout
expires without lock acquisition.
:param timeout: A datetime.timedelta indicating approximately how long
the lock acquisition attempt should be made. None (the default)
means keep trying forever.
:raises AlreadyLockedError: if the lock is already acquired.
:raises TimeOutError: if `timeout` is not None and the indicated time
interval expires without a lock acquisition.
"""
if timeout is not None:
timeout_time = datetime.datetime.now() + timeout
# Make sure the claim file exists, and that its contents are current.
self._write()
# XXX This next call can fail with an EPERM. I have no idea why, but
# I'm nervous about wrapping this in a try/except. It seems to be a
# very rare occurrence, only happens from cron, and has only(?) been
# observed on Solaris 2.6.
self._touch()
log.debug( 'laying claim: %s', self._lockfile )
# For quieting the logging output
loopcount = -1
while True:
loopcount += 1
# Create the hard link and test for exactly 2 links to the file.
try:
os.link( self._claimfile, self._lockfile )
# If we got here, we know we know we got the lock, and never
# had it before, so we're done. Just touch it again for the
# fun of it.
log.debug( 'got the lock: %s', self._lockfile )
self._touch()
break
except OSError as error:
# The link failed for some reason, possibly because someone
# else already has the lock (i.e. we got an EEXIST), or for
# some other bizarre reason.
if error.errno == errno.ENOENT:
# XXX in some Linux environments, it is possible to get an
# ENOENT, which is truly strange, because this means that
# self._claimfile didn't exist at the time of the
| |
<filename>mitmproxy-mock/moxy.py<gh_stars>1-10
#
# moxy.py: A mitmproxy script for mocking/modifying server responses.
#
# The mock configuration is loaded from a JSON file, e.g.:
#
# mitmdump -s moxy.py --set mock=example.json -m reverse:https://foo.com/
#
# See config/example.json and README.md for examples and documentation.
#
# Authors:
# * <NAME> (design and initial version), https://github.com/arkku
#
# Copyright © 2020 Wolt Enterprises
#
import json
import os
import random
import re
from collections import OrderedDict
from typing import Optional, Tuple, Union
from mitmproxy import http
from mitmproxy import ctx
from mitmproxy import net
def host_matches(host: str, allow) -> bool:
"""
Returns whether `host` matches `allow`.
`allow` may be a string pattern, or a list of such patterns, in which
case returns True if `host` matches any pattern in `allow`.
- If the pattern begins with a dot, `host` must end with the suffix
following the dot
- If the patterns ends with a dot, `host` must start with the pattern
- If the patterns begins with a tilde, the rest of the pattern is treated as
a regular expression that must be found in `host`
- Otherwise `host` must equal the pattern
"""
if isinstance(allow, str):
if allow.startswith("."):
return host.endswith(allow[1:])
elif allow.endswith("."):
return host.startswith(allow)
elif allow.startswith("~"):
return bool(compiled_re_for(allow[1:]).search(host))
else:
return host == allow
elif isinstance(allow, dict):
return bool(allow.get(host, False))
elif allow is None:
return True
else:
for allowed_host in allow:
if host_matches(host, allowed_host):
return True
return False
def compiled_re_for(re_str: str):
"""
Returns a compiled regular expression object for the string `re_str`.
The compiled regular expressions are cached in memory.
"""
global re_cache
result = re_cache.get(re_str)
if result is None:
result = re.compile(re_str, re.X)
re_cache[re_str] = result
return result
def matches_value_or_list(value, allow) -> bool:
"""
Returns whether `value` matches `allow`.
`allow` may either be of the same type as `value`, or a list of such items,
in which case returns True if `value` matches any element of `allow`. In
case of strings, value may have a tilde prefix (`~`) in which case its
suffix is treated as a regular expression.
"""
if type(value) is type(allow):
if isinstance(allow, str) and allow.startswith("~"):
return (value == allow) or bool(compiled_re_for(allow[1:]).search(value))
else:
return value == allow
elif isinstance(allow, dict):
return allow.get(value, False)
elif isinstance(allow, str):
return allow == str(value)
else:
for allowed in allow:
if matches_value_or_list(value, allowed):
return True
return False
def request_matches_config(request: http.HTTPRequest, config: dict) -> bool:
"""
Returns whether `request` is matched by `config`. This checks the following:
- `host` (some patterns supported, see `host_matches`)
- `scheme` (exact match or list)
– `method` (exact match or list)
- `path` (exact match or list, normally matched already before coming here)
- `query` (keys are exact, values either exact or list)
– `request` (the content of the request body)
– `require` (dictionary from variable names to required values)
"""
if not config:
return False
host_whitelist = config.get("host", mock_config.get("host"))
if not host_matches(str(request.host), host_whitelist):
return False
required_scheme = config.get("scheme", mock_config.get("scheme"))
if required_scheme and not matches_value_or_list(request.scheme, required_scheme):
return False
required_method = config.get("method")
if required_method and not matches_value_or_list(request.method, required_method):
return False
required_path = config.get("path")
if required_path and not matches_value_or_list(request.path, required_path):
return False
required_query = config.get("query")
if required_query:
query = request.query
for key in required_query:
value = required_query[key]
if not ((key in query) and matches_value_or_list(query[key], value)):
return False
required_content = config.get("request")
if required_content and not content_matches(request.text, required_content):
return False
required_state = config.get("require")
if required_state:
if isinstance(required_state, dict):
for variable, required_value in required_state.items():
value = mock_state.get(variable, "")
if not matches_value_or_list(value, required_value):
return False
else:
variable = config.get("variable", request.path.split("?")[0])
if not matches_value_or_list(mock_state.get(variable, ""), required_state):
return False
return True
def is_subset(subset, superset) -> bool:
"""
Returns whether `subset` is indeed a subset of `superset`. That is, all
items contained in `subset` must be found exactly in `superset`. Any strings
in subset may be prefixed with a tilde (`~`) in which case the suffix is
interpreted as a regular expression.
"""
try:
if isinstance(subset, dict):
return all(key in superset and is_subset(subset[key], superset[key]) for key in subset)
elif isinstance(subset, list):
return all(any(is_subset(subitem, superitem) for superitem in superset) for subitem in subset)
elif isinstance(subset, str):
if subset == "~":
return True
elif subset.startswith("~"):
allow_re = compiled_re_for(subset[1:])
return bool(allow_re.search(str(superset)))
else:
return str(superset) == subset
else:
return subset == superset
except Exception as error:
ctx.log.debug("is_subset incompatible types: {}: {} {}".format(error, subset, superset))
return False
def content_matches(content_str: Optional[str], allow: Union[str,list,dict], content_object: Optional[Union[dict,list]] = None) -> bool:
"""
Returns whether `content` matches the `allow` criteria.
`allow` may be of the following types:
- a string prefixed with a tilde (`~`), in which case the suffix is
interpreted as a regular expression and matched against `content`
- any other string, which needs to be a substring of `content`
- a dictionary, in which case `content` is interpreted as a JSON object
which must be a superset `allow` (see `is_subset`)
- a list of any of the above, which must all match
"""
if isinstance(allow, str) or isinstance(allow, dict):
allow = [ allow ]
for allowed in allow:
try:
if isinstance(allowed, str):
if content_str is None:
content_str = content_as_str(content_object) or str(content_object)
if allowed.startswith("~"):
allow_re = compiled_re_for(allowed[1:])
if not allow_re.search(content_str):
return False
elif not allowed in content_str:
return False
elif isinstance(allowed, dict):
if content_object is None:
content_object = content_as_object(content_str) or {}
if not is_subset(allowed, content_object):
return False
elif not content_matches(content_str, allowed, content_object):
return False
except Exception as error:
ctx.log.info("Error: {}: matching {}".format(error, allowed))
return False
return True
def response_matches_config(response: Optional[http.HTTPResponse], config: dict) -> bool:
"""
Returns whether `response` is matched by `config`. This checks the following:
- `status` (the HTTP status code)
- `error` (true iff the HTTP status >= 400)
- `content` (a string or a list of strings where _all_ must match)
For content matching, each string can either be a regular expression denoted
by a tilde prefix (`~`), otherwise a substring that must be found exactly.
"""
if not response:
return False
required_status = config.get("status")
if required_status and not matches_value_or_list(response.status_code, required_status):
return False
required_error_state = config.get("error")
if isinstance(required_error_state, bool) and required_error_state != (response.status_code >= 400):
return False
required_content = config.get("content")
if required_content and not content_matches(response.text, required_content):
return False
return True
def merge_content(merge, content):
"""
Merges `merge` into `content` recursively for dictionaries and lists.
"""
if isinstance(merge, str) and (merge.startswith(".") and (merge.endswith(".json") or merge.endswith(".js"))):
try:
with open(merge) as merge_file:
merge = json.load(merge_file)
except:
pass
if isinstance(merge, dict):
if isinstance(content, dict):
for key in merge:
content[key] = merge_content(merge[key], content.get(key))
elif isinstance(content, list) and ("where" in merge):
where = merge["where"]
match_condition = not bool(merge.get("negated", False))
match_move = merge.get("move")
index, end_index = 0, len(content)
while index < end_index:
element = content[index]
if bool(is_subset(where, element)) == match_condition:
new_element = element
if "replace" in merge:
new_element = merge_content(merge["replace"], None)
if "merge" in merge:
new_element = merge_content(merge["merge"], new_element or {})
elif merge.get("delete"):
new_element = None
if new_element is None:
del content[index]
end_index -= 1
elif match_move:
del content[index]
if match_move == "head" or match_move == "first":
content.insert(0, new_element)
index += 1
else:
content.append(new_element)
end_index -= 1
else:
content[index] = new_element
index += 1
if not merge.get("forall", True):
break
else:
index += 1
else:
content = merge
elif isinstance(merge, list):
if isinstance(content, list):
content = content + merge
elif content is None:
content = merge
else:
content = [ content ] + merge
else:
content = merge
return content
def delete_content(delete, content):
"""
Returns `content` after recursively deleting `delete` from it.
Any matching dictionary keys are deleted if their value is empty or matches
the corresponding value in `content`. For lists, if `delete` has a non-empty
list, its elements are compared to the corresponding list in `content`
according to `is_subset`, and any matches are deleted from `content`.
"""
if isinstance(delete, dict):
for key in delete:
value = delete[key]
if isinstance(value, dict):
if value:
content_value = content.get(key)
if isinstance(content_value, dict):
new_content = delete_content(value, content_value)
content[key] = new_content
else:
content.pop(key, None)
elif isinstance(value, list):
if value:
content_value = content.get(key)
if isinstance(content_value, list):
content[key] = delete_content(value, content_value)
else:
content.pop(key, None)
else:
if (not value) or content.get(key) == value:
content.pop(key, None)
elif isinstance(delete, list):
if delete and | |
"""
Gibbs sampler for non-negative matrix tri-factorisation.
Optimised to draw all columns in parallel.
We expect the following arguments:
- R, the matrix
- M, the mask matrix indicating observed values (1) and unobserved ones (0)
- K, the number of row clusters
- L, the number of column clusters
- priors = { 'alpha' = alpha_R, 'beta' = beta_R, 'lambdaF' = [[lambdaFik]], 'lambdaS' = [[lambdaSkl]], 'lambdaG' = [[lambdaGjl]] },
a dictionary defining the priors over tau, F, S, G.
Initialisation can be done by running the initialise() function, with argument init:
- init='random' -> draw initial values randomly from priors Exp, Gamma
- init='exp' -> use the expectation of the priors Exp, Gamma
Alternatively, you can define your own initial values for F, S, G, and tau.
Usage of class:
BNMF = bnmf_gibbs(R,M,K,L,priors)
BNMF.initisalise(init)
BNMF.run(iterations)
Or:
BNMF = bnmf_gibbs(R,M,K,L,priors)
BNMF.train(init,iterations)
This returns a tuple (Fs,Ss,Gs,taus) of lists of F, S, G, tau values - of size <iterations>.
The expectation can be computed by specifying a burn-in and thinning rate, and using:
BNMF.approx_expectation(burn_in,thinning)
We can test the performance of our model on a test dataset, specifying our test set with a mask M.
performance = BNMF.predict(M_pred,burn_in,thinning)
This gives a dictionary of performances,
performance = { 'MSE', 'R^2', 'Rp' }
The performances of all iterations are stored in BNMF.all_performances, which
is a dictionary from 'MSE', 'R^2', or 'Rp' to a list of performances.
Finally, we can return the goodness of fit of the data using the quality(metric) function:
- metric = 'loglikelihood' -> return p(D|theta)
= 'BIC' -> return Bayesian Information Criterion
= 'AIC' -> return Afaike Information Criterion
= 'MSE' -> return Mean Square Error
(we want to maximise these values)
"""
from kmeans.kmeans import KMeans
from distributions.exponential import exponential_draw
from distributions.gamma import gamma_draw
from distributions.truncated_normal import TN_draw
from distributions.truncated_normal_vector import TN_vector_draw
import numpy, itertools, math, time
class bnmtf_gibbs_optimised:
def __init__(self,R,M,K,L,priors):
self.R = numpy.array(R,dtype=float)
self.M = numpy.array(M,dtype=float)
self.K = K
self.L = L
assert len(self.R.shape) == 2, "Input matrix R is not a two-dimensional array, " \
"but instead %s-dimensional." % len(self.R.shape)
assert self.R.shape == self.M.shape, "Input matrix R is not of the same size as " \
"the indicator matrix M: %s and %s respectively." % (self.R.shape,self.M.shape)
(self.I,self.J) = self.R.shape
self.size_Omega = self.M.sum()
self.check_empty_rows_columns()
self.alpha, self.beta, self.lambdaF, self.lambdaS, self.lambdaG = \
float(priors['alpha']), float(priors['beta']), numpy.array(priors['lambdaF']), numpy.array(priors['lambdaS']), numpy.array(priors['lambdaG'])
# If lambdaF, lambdaS, or lambdaG are an integer rather than a numpy array, we make it into one using that value
if self.lambdaF.shape == ():
self.lambdaF = self.lambdaF * numpy.ones((self.I,self.K))
if self.lambdaS.shape == ():
self.lambdaS = self.lambdaS * numpy.ones((self.K,self.L))
if self.lambdaG.shape == ():
self.lambdaG = self.lambdaG * numpy.ones((self.J,self.L))
assert self.lambdaF.shape == (self.I,self.K), "Prior matrix lambdaF has the wrong shape: %s instead of (%s, %s)." % (self.lambdaF.shape,self.I,self.K)
assert self.lambdaS.shape == (self.K,self.L), "Prior matrix lambdaS has the wrong shape: %s instead of (%s, %s)." % (self.lambdaS.shape,self.K,self.L)
assert self.lambdaG.shape == (self.J,self.L), "Prior matrix lambdaG has the wrong shape: %s instead of (%s, %s)." % (self.lambdaG.shape,self.J,self.L)
# Raise an exception if an entire row or column is empty
def check_empty_rows_columns(self):
sums_columns = self.M.sum(axis=0)
sums_rows = self.M.sum(axis=1)
# Assert none of the rows or columns are entirely unknown values
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in R, row %s." % i
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in R, column %s." % j
# Initialise and run the sampler
def train(self,init,iterations):
self.initialise(init=init)
return self.run(iterations)
# Initialise U, V, and tau. If init='random', draw values from an Exp and Gamma distribution. If init='exp', set it to the expectation values.
def initialise(self,init_S='random',init_FG='random'):
assert init_S in ['random','exp'], "Unknown initialisation option for S: %s. Should be 'random' or 'exp'." % init_S
assert init_FG in ['random','exp','kmeans'], "Unknown initialisation option for S: %s. Should be 'random', 'exp', or 'kmeans." % init_FG
self.S = 1./self.lambdaS
if init_S == 'random':
for k,l in itertools.product(xrange(0,self.K),xrange(0,self.L)):
self.S[k,l] = exponential_draw(self.lambdaS[k,l])
self.F, self.G = 1./self.lambdaF, 1./self.lambdaG
if init_FG == 'random':
for i,k in itertools.product(xrange(0,self.I),xrange(0,self.K)):
self.F[i,k] = exponential_draw(self.lambdaF[i,k])
for j,l in itertools.product(xrange(0,self.J),xrange(0,self.L)):
self.G[j,l] = exponential_draw(self.lambdaG[j,l])
elif init_FG == 'kmeans':
print "Initialising F using KMeans."
kmeans_F = KMeans(self.R,self.M,self.K)
kmeans_F.initialise()
kmeans_F.cluster()
self.F = kmeans_F.clustering_results + 0.2
print "Initialising G using KMeans."
kmeans_G = KMeans(self.R.T,self.M.T,self.L)
kmeans_G.initialise()
kmeans_G.cluster()
self.G = kmeans_G.clustering_results + 0.2
self.tau = self.alpha_s() / self.beta_s()
# Run the Gibbs sampler
def run(self,iterations):
self.all_F = numpy.zeros((iterations,self.I,self.K))
self.all_S = numpy.zeros((iterations,self.K,self.L))
self.all_G = numpy.zeros((iterations,self.J,self.L))
self.all_tau = numpy.zeros(iterations)
self.all_times = [] # to plot performance against time
metrics = ['MSE','R^2','Rp']
self.all_performances = {} # for plotting convergence of metrics
for metric in metrics:
self.all_performances[metric] = []
time_start = time.time()
for it in range(0,iterations):
for k in range(0,self.K):
tauFk = self.tauF(k)
muFk = self.muF(tauFk,k)
self.F[:,k] = TN_vector_draw(muFk,tauFk)
for k,l in itertools.product(xrange(0,self.K),xrange(0,self.L)):
tauSkl = self.tauS(k,l)
muSkl = self.muS(tauSkl,k,l)
self.S[k,l] = TN_draw(muSkl,tauSkl)
for l in range(0,self.L):
tauGl = self.tauG(l)
muGl = self.muG(tauGl,l)
self.G[:,l] = TN_vector_draw(muGl,tauGl)
self.tau = gamma_draw(self.alpha_s(),self.beta_s())
self.all_F[it], self.all_S[it], self.all_G[it], self.all_tau[it] = numpy.copy(self.F), numpy.copy(self.S), numpy.copy(self.G), self.tau
perf = self.predict_while_running()
for metric in metrics:
self.all_performances[metric].append(perf[metric])
print "Iteration %s. MSE: %s. R^2: %s. Rp: %s." % (it+1,perf['MSE'],perf['R^2'],perf['Rp'])
time_iteration = time.time()
self.all_times.append(time_iteration-time_start)
return (self.all_F, self.all_S, self.all_G, self.all_tau)
# Compute the dot product of three matrices
def triple_dot(self,M1,M2,M3):
return numpy.dot(M1,numpy.dot(M2,M3))
# Compute the parameters for the distributions we sample from
def alpha_s(self):
return self.alpha + self.size_Omega/2.0
def beta_s(self):
return self.beta + 0.5*(self.M*(self.R-self.triple_dot(self.F,self.S,self.G.T))**2).sum()
def tauF(self,k):
return self.tau * ( self.M * numpy.dot(self.S[k],self.G.T)**2 ).sum(axis=1)
def muF(self,tauFk,k):
return 1./tauFk * (-self.lambdaF[:,k] + self.tau*(self.M * ( (self.R-self.triple_dot(self.F,self.S,self.G.T)+numpy.outer(self.F[:,k],numpy.dot(self.S[k],self.G.T)))*numpy.dot(self.S[k],self.G.T) )).sum(axis=1))
def tauS(self,k,l):
return self.tau * ( self.M * numpy.outer(self.F[:,k]**2,self.G[:,l]**2) ).sum()
def muS(self,tauSkl,k,l):
return 1./tauSkl * (-self.lambdaS[k,l] + self.tau*(self.M * ( (self.R-self.triple_dot(self.F,self.S,self.G.T)+self.S[k,l]*numpy.outer(self.F[:,k],self.G[:,l]))*numpy.outer(self.F[:,k],self.G[:,l]) )).sum())
def tauG(self,l):
return self.tau * ( self.M.T * numpy.dot(self.F,self.S[:,l])**2 ).T.sum(axis=0)
def muG(self,tauGl,l):
return 1./tauGl * (-self.lambdaG[:,l] + self.tau*(self.M * ( (self.R-self.triple_dot(self.F,self.S,self.G.T)+numpy.outer(numpy.dot(self.F,self.S[:,l]),self.G[:,l])).T * numpy.dot(self.F,self.S[:,l]) ).T).sum(axis=0))
# Return the average value for U, V, tau - i.e. our approximation to the expectations.
# Throw away the first <burn_in> samples, and then use every <thinning>th after.
def approx_expectation(self,burn_in,thinning):
indices = range(burn_in,len(self.all_F),thinning)
exp_F = numpy.array([self.all_F[i] for i in indices]).sum(axis=0) / float(len(indices))
exp_S = numpy.array([self.all_S[i] for i in indices]).sum(axis=0) / float(len(indices))
exp_G = numpy.array([self.all_G[i] for i in indices]).sum(axis=0) / float(len(indices))
exp_tau = sum([self.all_tau[i] for i in indices]) / float(len(indices))
return (exp_F, exp_S, exp_G, exp_tau)
# Compute the expectation of U and V, and use it to predict missing values
def predict(self,M_pred,burn_in,thinning):
(exp_F,exp_S,exp_G,_) = self.approx_expectation(burn_in,thinning)
R_pred = self.triple_dot(exp_F,exp_S,exp_G.T)
MSE = self.compute_MSE(M_pred,self.R,R_pred)
R2 = self.compute_R2(M_pred,self.R,R_pred)
Rp = self.compute_Rp(M_pred,self.R,R_pred)
return {'MSE':MSE,'R^2':R2,'Rp':Rp}
def predict_while_running(self):
R_pred = self.triple_dot(self.F,self.S,self.G.T)
MSE = self.compute_MSE(self.M,self.R,R_pred)
R2 = self.compute_R2(self.M,self.R,R_pred)
Rp = self.compute_Rp(self.M,self.R,R_pred)
return {'MSE':MSE,'R^2':R2,'Rp':Rp}
# Functions for computing MSE, R^2 (coefficient of determination), Rp (Pearson correlation)
def compute_MSE(self,M,R,R_pred):
return (M * (R-R_pred)**2).sum() / float(M.sum())
def compute_R2(self,M,R,R_pred):
mean = (M*R).sum() / float(M.sum())
SS_total = float((M*(R-mean)**2).sum())
SS_res = float((M*(R-R_pred)**2).sum())
return 1. - SS_res / SS_total if SS_total != 0. else numpy.inf
def compute_Rp(self,M,R,R_pred):
mean_real = (M*R).sum() / float(M.sum())
mean_pred = (M*R_pred).sum() / float(M.sum())
covariance = (M*(R-mean_real)*(R_pred-mean_pred)).sum()
variance_real = (M*(R-mean_real)**2).sum()
variance_pred = (M*(R_pred-mean_pred)**2).sum()
return covariance / float(math.sqrt(variance_real)*math.sqrt(variance_pred))
# Functions for model selection, measuring the goodness of fit vs model complexity
def quality(self,metric,burn_in,thinning):
assert metric in ['loglikelihood','BIC','AIC','MSE','ELBO'], 'Unrecognised metric for model quality: %s.' % metric
(expF,expS,expG,exptau) = self.approx_expectation(burn_in,thinning)
log_likelihood = self.log_likelihood(expF,expS,expG,exptau)
if metric == 'loglikelihood':
return log_likelihood
elif metric == 'BIC':
# -2*loglikelihood + (no. free parameters * log(no data points))
return - 2 * log_likelihood + (self.I*self.K+self.K*self.L+self.J*self.L) * math.log(self.size_Omega)
elif metric == 'AIC':
# -2*loglikelihood + 2*no. free parameters
return - 2 * log_likelihood + 2 * (self.I*self.K+self.K*self.L+self.J*self.L)
elif metric == 'MSE':
R_pred = self.triple_dot(expF,expS,expG.T)
return self.compute_MSE(self.M,self.R,R_pred)
elif metric == 'ELBO':
return 0.
def log_likelihood(self,expF,expS,expG,exptau):
# Return the likelihood of | |
coupon_oid: The coupon oid to retrieve. (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_coupon_with_http_info(coupon_oid, **kwargs) # noqa: E501
else:
(data) = self.get_coupon_with_http_info(coupon_oid, **kwargs) # noqa: E501
return data
def get_coupon_with_http_info(self, coupon_oid, **kwargs): # noqa: E501
"""Retrieve a coupon # noqa: E501
Retrieves a single coupon using the specified coupon profile oid. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupon_with_http_info(coupon_oid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int coupon_oid: The coupon oid to retrieve. (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_oid', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_oid' is set
if ('coupon_oid' not in params or
params['coupon_oid'] is None):
raise ValueError("Missing the required parameter `coupon_oid` when calling `get_coupon`") # noqa: E501
collection_formats = {}
path_params = {}
if 'coupon_oid' in params:
path_params['coupon_oid'] = params['coupon_oid'] # noqa: E501
query_params = []
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/coupon/coupons/{coupon_oid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_coupon_by_merchant_code(self, merchant_code, **kwargs): # noqa: E501
"""Retrieve a coupon by merchant code # noqa: E501
Retrieves a single coupon using the specified merchant code. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupon_by_merchant_code(merchant_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str merchant_code: The coupon merchant code to retrieve. (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_coupon_by_merchant_code_with_http_info(merchant_code, **kwargs) # noqa: E501
else:
(data) = self.get_coupon_by_merchant_code_with_http_info(merchant_code, **kwargs) # noqa: E501
return data
def get_coupon_by_merchant_code_with_http_info(self, merchant_code, **kwargs): # noqa: E501
"""Retrieve a coupon by merchant code # noqa: E501
Retrieves a single coupon using the specified merchant code. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupon_by_merchant_code_with_http_info(merchant_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str merchant_code: The coupon merchant code to retrieve. (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['merchant_code', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupon_by_merchant_code" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'merchant_code' is set
if ('merchant_code' not in params or
params['merchant_code'] is None):
raise ValueError("Missing the required parameter `merchant_code` when calling `get_coupon_by_merchant_code`") # noqa: E501
collection_formats = {}
path_params = {}
if 'merchant_code' in params:
path_params['merchant_code'] = params['merchant_code'] # noqa: E501
query_params = []
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/coupon/coupons/merchant_code/{merchant_code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_coupons(self, **kwargs): # noqa: E501
"""Retrieve coupons # noqa: E501
Retrieves coupons for this account. If no parameters are specified, all coupons will be returned. You will need to make multiple API calls in order to retrieve the entire result set since this API performs result set pagination. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupons(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str merchant_code: Merchant code
:param str description: Description
:param str coupon_type: Coupon type
:param str start_date_begin: Start date begin
:param str start_date_end: Start date end
:param str expiration_date_begin: Expiration date begin
:param str expiration_date_end: Expiration date end
:param int affiliate_oid: Affiliate oid
:param bool exclude_expired: Exclude expired
:param int limit: The maximum number of records to return on this one API call. (Max 200)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the coupons. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_coupons_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_coupons_with_http_info(**kwargs) # noqa: E501
return data
def get_coupons_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve coupons # noqa: E501
Retrieves coupons for this account. If no parameters are specified, all coupons will be returned. You will need to make multiple API calls in order to retrieve the entire result set since this API performs result set pagination. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupons_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str merchant_code: Merchant code
:param str description: Description
:param str coupon_type: Coupon type
:param str start_date_begin: Start date begin
:param str start_date_end: Start date end
:param str expiration_date_begin: Expiration date begin
:param str expiration_date_end: Expiration date end
:param int affiliate_oid: Affiliate oid
:param bool exclude_expired: Exclude expired
:param int limit: The maximum number of records to return on this one API call. (Max 200)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the coupons. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['merchant_code', 'description', 'coupon_type', 'start_date_begin', 'start_date_end', 'expiration_date_begin', 'expiration_date_end', 'affiliate_oid', 'exclude_expired', 'limit', 'offset', 'sort', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupons" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'merchant_code' in params:
query_params.append(('merchant_code', params['merchant_code'])) # noqa: E501
if 'description' in params:
query_params.append(('description', params['description'])) # noqa: E501
if 'coupon_type' in params:
query_params.append(('coupon_type', params['coupon_type'])) # noqa: E501
if 'start_date_begin' in params:
query_params.append(('start_date_begin', params['start_date_begin'])) # noqa: E501
if 'start_date_end' in params:
query_params.append(('start_date_end', params['start_date_end'])) # noqa: E501
if 'expiration_date_begin' in params:
query_params.append(('expiration_date_begin', params['expiration_date_begin'])) # noqa: E501
if 'expiration_date_end' in params:
query_params.append(('expiration_date_end', params['expiration_date_end'])) # noqa: E501
if 'affiliate_oid' in params:
query_params.append(('affiliate_oid', params['affiliate_oid'])) # noqa: E501
if | |
<filename>c10.py
from tkinter import *
conduitType = ["Heavy duty rigid UPVC conduit", "Corflo conduit",
"Medium duty corrugated", "Medium duty rigid UPVC conduit"]
CableType = ["-", "1", "1.5", "2.5", "4" , "6" ,"10" ,"16", "25", "35", "50", "70" , "95" ,"120" ,"150","185","240","300",
"400","500","630"]
class Application(Frame):
def __init__(self, master):
""" Initialise the Frame. """
super(Application, self).__init__(master)
self.UserIn = IntVar()
self.grid()
self.create_widgets()
def create_widgets(self):
self.conduitLbl = Label (self, text = "Type of Conduit", height=2, width=20)#Label
self.conduitLbl.grid(row=0, column = 0)
self.conduit = StringVar(master) ### OPTION MENU FOR CONIOT TYPE
self.conduit.set("Heavy duty rigid UPVC conduit") # default value
self.conduitOptions = OptionMenu(master, self.conduit, *conduitType)
self.conduitOptions.config(width=28)
self.conduitOptions.grid(row=0, column=1)
self.PVCLabel = Label (master, text = "Cable Type", height=2, width=20)#Label
self.PVCLabel.grid(row=1, column = 0)
self.cable = StringVar(master)
self.cable.set("-") # default value
self.PVCom = OptionMenu(master, self.cable, *CableType, )
self.PVCom.config(width=15)
self.PVCom.grid(row=1, column=1)
self.circuitLbl = Label (master, text = "Number of Circuits:", height=1, width=20) #Label
self.circuitLbl.grid(row=2, column = 0)
self.getCircuit = StringVar()
self.getCircuit = Entry (master) ######## ENTRY BOX
self.getCircuit.grid(row=2, column=1)
self.btn = Button(master, text="Calculate", bg="light grey", command=self.onButtonClick)
self.btn.grid(row = 3,column=1)
self.conduitTypeResult = Label (master, text = "Conduit Type: ", height=1, width=40) #Label
self.conduitTypeResult.grid(row=0, column =2)
self.PVCResult = Label (master, text = "Cable Type: ", height=2, width=25) #Label
self.PVCResult.grid(row=1, column =2)
self.circuitNo = Label (master, text = "Number of Circuits: ", height=2, width=25) #Label
self.circuitNo.grid(row=2, column =2)
self.conduitResult = Label (master, text = "-", height=2, width=40, font='Helvetica 9 bold') #Label
self.conduitResult.grid(row=3, column =2)
self.disclaimerText = Label (master, text = """DISCLAIMER\n Please refer to Table C10 (can be viewed by clicking Open Table button)
to confirm the results before practically applying the Number Of Conduits. Each output has not been tested thus
caution should be taken when using this program.\n
REFERENCE: AS/NZ 3000:2018 Electrical Installations (known as the Australian/New Zealand Wiring Rules)"""
,font='Helvetica 9 bold') #Label
self.disclaimerText.grid(row=6, rowspan=2, column=0, columnspan=3, sticky=W)
self.close = Button(master, text="Close", bg="light grey", command=master.destroy)
self.close.grid(row = 4,column=0)
self.canvas = Canvas(master, width=99, height=29)
self.canvas.grid(row=4, column=2)
self.logo = PhotoImage(file='C:\\Users\\Aditya.Verma\\Documents\\GitHub\\Table-c10---max-single-core-sheathed-cables\\Lucid Logo.PNG')
self.canvas.create_image(0, 0, image = self.logo, anchor = NW)
self.canvas.logo = self.logo
def openImage(): ### opens table
control = Toplevel()
canvas = Canvas(control, width=1172, height=704)
canvas.pack(expand = YES, fill = BOTH)
png1 = PhotoImage(file='C:\\Users\\Aditya.Verma\\Documents\\GitHub\\Table-c10---max-single-core-sheathed-cables\\Capture.PNG')
canvas.create_image(0, 0, image = png1, anchor = NW)
canvas.png1 = png1
self.openImage = Button(master, text="Open Table", bg="light grey", command=openImage)#image open button
self.openImage.grid(row=4, column = 1)
def reset():
self.PVCResult.configure(text="" )
self.conduitTypeResult.configure(text="-" )
self.PVCResult.configure(text="-" )
self.conduit.set("Heavy duty rigid UPVC conduit")
self.cable.set("-")
self.circuitNo.configure(text="-")
self.conduitResult.configure(text="-", bg='gray85', borderwidth=2, relief='flat')
self.tableview = Button(master, text="Reset", bg="light grey", command=reset)
self.tableview.grid(row = 3,column=0)
if (self.cable.get()=='-'):
self.btn.config(state=DISABLED)
if (self.cable.get()=="1", "1.5", "2.5", "4" , "6" ,"10" ,"16", "25", "35", "50", "70" , "95" ,"120" ,"150","185","240","300",
"400","500","630"):
self.btn.config(state=NORMAL)
def onButtonClick(self):
#get values
def getConduitType(self): #type of conduit
self.x = self.conduit.get()
return self.x
def getCable(self):
self.x = self.cable.get()
return self.x
def getCircuitState(self):
self.x = self.getCircuit.get()
return int(self.x)
if not self.getCircuit.get():
self.conduitResult.configure(text="Error: Missing Values", bg='orange' )
self.conduitTypeResult.configure(text="Conduit Type: " + self.conduit.get(), font='Helvetica 9 bold')
self.PVCResult.configure(text="CableType: " + self.cable.get(),font='Helvetica 9 bold' )
self.circuitNo.configure(text="Number of Circuits: "+ self.getCircuit.get(), font='Helvetica 9 bold')
def circuitNo(self):
if (getConduitType(self)=="Heavy duty rigid UPVC conduit"):
if(getCable(self)=="1" and getCircuitState(self) <= int("5")):
return "20"
if(getCable(self)=="1" and getCircuitState(self)<= int("9")):
return "25"
if(getCable(self)=="1" and getCircuitState(self)<= int("16")):
return "32"
if(getCable(self)=="1" and getCircuitState(self)<= int("26")):
return "40"
if(getCable(self)=="1" and getCircuitState(self)<= int("43")):
return "50"
if(getCable(self)=="1" and getCircuitState(self)<= int("71")):
return "63"
if(getCable(self)=="1" and getCircuitState(self) >= int("100")):
return "80(NZ), 80(AUS), 100(NZ), 100(AUS), 125 or 150"
if ((getCable(self)=="25" or getCable(self)=="35" or getCable(self)=="50" )
and getCircuitState(self)<= int("0")):
return '20'
if ((getCable(self)=="70" or getCable(self)=="95") and getCircuitState(self)<= int("0")):
return "20 or 25"
if ((getCable(self)=="120" or getCable(self)=="150") and getCircuitState(self)<= int("0")):
return "20, 25 or 32"
if ((getCable(self)=="185" or
getCable(self)=="240" or getCable(self)=="300") and getCircuitState(self)<= int("0")):
return "20, 25, 32 or 40"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("0")):
return "20, 25, 32, 40 or 50"
if ((getCable(self)=="630") and getCircuitState(self)<= int("0")):
return "20, 25, 32, 40, 50 or 63"
if ((getCable(self)=="25" or getCable(self)== "35")
and getCircuitState(self)<= int("1")):
return '25 or 32'
if ((getCable(self)=="50") and getCircuitState(self)<= int("1")):
return "25, 32 or 40"
if ((getCable(self)=="70") and getCircuitState(self)<= int("1")):
return "25, 32, 40 or 50"
if ((getCable(self)=="95") and getCircuitState(self)<= int("1")):
return "32 or 40"
if ((getCable(self)=="120" or getCable(self)=="150") and getCircuitState(self)<= int("1")):
return "40 or 50"
if ((getCable(self)=="185" or
getCable(self)=="240") and getCircuitState(self)<= int("1")):
return "50 or 63"
if ((getCable(self)=="300") and getCircuitState(self)<= int("1")):
return "50, 63 or 80(NZ)"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("1")):
return "63 or 80(NZ) or 80(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("1")):
return "80(NZ), 80(AUS) or 100(NZ)"
if ((getCable(self)=="35") and getCircuitState(self)<= int(int("2"))):
return "40"
if ((getCable(self)=="70") and getCircuitState(self)<= int(int("2"))):
return "50"
if ((getCable(self)=="120") and getCircuitState(self)<= int("2")):
return "63"
if ((getCable(self)=="150") and getCircuitState(self)<= int("2")):
return "150"
if ((getCable(self)=="240") and getCircuitState(self)<= int("2")):
return "80(NZ)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("2")):
return "80(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("2")):
return "100(AUS)"
if ((getCable(self)=="25") and getCircuitState(self)<= int("3")):
return "40"
if ((getCable(self)=="50") and getCircuitState(self)<= int("3")):
return "50"
if ((getCable(self)=="95") and getCircuitState(self)<= int("3")):
return "63"
if ((getCable(self)=="185") and getCircuitState(self)<= int("3")):
return "80(NZ)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("3")):
return "80(AUS)"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("3")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("3")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("4")):
return "50"
if ((getCable(self)=="50") and getCircuitState(self)<= int("4")):
return "63"
if ((getCable(self)=="150") and getCircuitState(self)<= int("4")):
return "80(NZ)"
if ((getCable(self)=="185") and getCircuitState(self)<= int("4")):
return "80(AUS)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("4")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="500") and getCircuitState(self)<= int("4")):
return "125"
if ((getCable(self)=="630") and getCircuitState(self)<= int("4")):
return "150"
if ((getCable(self)=="25") and getCircuitState(self)<= int("5")):
return "50"
if ((getCable(self)=="120") and getCircuitState(self)<= int("5")):
return "80(NZ)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("5")):
return "80(AUS)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("5")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="400") and getCircuitState(self)<= int("5")):
return "125"
if ((getCable(self)=="50") and getCircuitState(self)<= int("6")):
return "63"
if ((getCable(self)=="95") and getCircuitState(self)<= int("6")):
return "80(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("6")):
return "80(AUS)"
if ((getCable(self)=="185") and getCircuitState(self)<= int("6")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("6")):
return "125"
if ((getCable(self)=="500") and getCircuitState(self)<= int("6")):
return "150"
#7
if ((getCable(self)=="35") and getCircuitState(self)<= int("7")):
return "63"
if ((getCable(self)=="95") and getCircuitState(self)<= int("7")):
return "80(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("7")):
return "100(NZ)"
if ((getCable(self)=="400") and getCircuitState(self)<= int("7")):
return "150"
#8
if ((getCable(self)=="70") and getCircuitState(self)<= int("8")):
return "80(NZ)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("8")):
return "100(AUS)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("8")):
return "125"
if ((getCable(self)=="300") and getCircuitState(self)<= int("8")):
return "150"
#9
if ((getCable(self)=="25") and getCircuitState(self)<= int("9")):
return "63"
if ((getCable(self)=="70") and getCircuitState(self)<= int("9")):
return "80(AUS)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("9")):
return "100(NZ)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("10")):
return "80(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("10")):
return "100(AUS)"
if ((getCable(self)=="185") and getCircuitState(self)<= int("10")):
return "125"
if ((getCable(self)=="240") and getCircuitState(self)<= int("10")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("11")):
return "100(NZ)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("12")):
return "80(AUS)"
if ((getCable(self)=="95") and getCircuitState(self)<= int("12")):
return "100(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("12")):
return "125"
if ((getCable(self)=="35") and getCircuitState(self)<= int("15")):
return "80(AUS)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("15")):
return "100(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("15")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("16")):
return "80(NZ)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("16")):
return "100(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("16")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("18")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("19")):
return "80(AUS"
if ((getCable(self)=="50") and getCircuitState(self)<= int("19")):
return "100(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("20")):
return "150"
if ((getCable(self)=="50") and getCircuitState(self)<= int("21")):
return "100(AUS)"
if ((getCable(self)=="35") and getCircuitState(self)<= int("24")):
return "100(NZ)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("24")):
return "125"
if ((getCable(self)=="95") and getCircuitState(self)<= int("24")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("26")):
return "100(AUS)"
if ((getCable(self)=="25") and getCircuitState(self)<= int("29")):
return "100(NZ)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("31")):
return "125"
if ((getCable(self)=="70") and getCircuitState(self)<= int("31")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("39")):
return "125"
if ((getCable(self)=="50") and getCircuitState(self)<= int("41")):
return "150"
| |
playthrough, fill_locations, itempool):
# We should make sure that we don't count event items, shop items,
# token items, or dungeon items as a major item. itempool at this
# point should only be able to have tokens of those restrictions
# since the rest are already placed.
major_items = [item for item in itempool if item.majoritem]
minor_items = [item for item in itempool if not item.majoritem]
dungeons = [dungeon for world in worlds for dungeon in world.dungeons]
double_dungeons = []
for dungeon in dungeons:
# we will count spirit temple twice so that it gets 2 items to match vanilla
if dungeon.name == 'Spirit Temple':
double_dungeons.append(dungeon)
dungeons.extend(double_dungeons)
random.shuffle(dungeons)
random.shuffle(itempool)
base_playthrough = playthrough.copy()
base_playthrough.collect_all(minor_items)
base_playthrough.collect_locations()
all_dungeon_locations = []
# iterate of all the dungeons in a random order, placing the item there
for dungeon in dungeons:
dungeon_locations = [location for region in dungeon.regions for location in region.locations if location in fill_locations]
# cache this list to flag afterwards
all_dungeon_locations.extend(dungeon_locations)
# place 1 item into the dungeon
fill_restrictive(window, worlds, base_playthrough, dungeon_locations, major_items, 1)
# update the location and item pool, removing any placed items and filled locations
# the fact that you can remove items from a list you're iterating over is python magic
for item in itempool:
if item.location != None:
fill_locations.remove(item.location)
itempool.remove(item)
# flag locations to not place further major items. it's important we do it on the
# locations instead of the dungeon because some locations are not in the dungeon
for location in all_dungeon_locations:
location.minor_only = True
logger.info("Unique dungeon items placed")
# Places items restricting placement to the recipient player's own world
def fill_ownworld_restrictive(window, worlds, playthrough, locations, ownpool, itempool, description="Unknown", attempts=15):
# get the locations for each world
# look for preplaced items
placed_prizes = [loc.item.name for loc in locations if loc.item is not None]
unplaced_prizes = [item for item in ownpool if item.name not in placed_prizes]
empty_locations = [loc for loc in locations if loc.item is None]
prizepool_dict = {world.id: [item for item in unplaced_prizes if item.world.id == world.id] for world in worlds}
prize_locs_dict = {world.id: [loc for loc in empty_locations if loc.world.id == world.id] for world in worlds}
# Shop item being sent in to this method are tied to their own world.
# Therefore, let's do this one world at a time. We do this to help
# increase the chances of successfully placing songs
for world in worlds:
# List of states with all items
unplaced_prizes = [item for item in unplaced_prizes if item not in prizepool_dict[world.id]]
base_playthrough = playthrough.copy()
base_playthrough.collect_all(itempool + unplaced_prizes)
world_attempts = attempts
while world_attempts:
world_attempts -= 1
try:
prizepool = list(prizepool_dict[world.id])
prize_locs = list(prize_locs_dict[world.id])
random.shuffle(prizepool)
fill_restrictive(window, worlds, base_playthrough, prize_locs, prizepool)
logger.info("Placed %s items for world %s.", description, (world.id+1))
except FillError as e:
logger.info("Failed to place %s items for world %s. Will retry %s more times.", description, (world.id+1), world_attempts)
for location in prize_locs_dict[world.id]:
location.item = None
if location.disabled == DisableType.DISABLED:
location.disabled = DisableType.PENDING
logger.info('\t%s' % str(e))
continue
break
else:
raise FillError('Unable to place %s items in world %d' % (description, (world.id+1)))
# Places items in the itempool into locations.
# worlds is a list of worlds and is redundant of the worlds in the base_state_list
# base_state_list is a list of world states prior to placing items in the item pool
# items and locations have pointers to the world that they belong to
#
# The algorithm places items in the world in reverse.
# This means we first assume we have every item in the item pool and
# remove an item and try to place it somewhere that is still reachable
# This method helps distribution of items locked behind many requirements
#
# count is the number of items to place. If count is negative, then it will place
# every item. Raises an error if specified count of items are not placed.
#
# This function will modify the location and itempool arguments. placed items and
# filled locations will be removed. If this returns and error, then the state of
# those two lists cannot be guaranteed.
def fill_restrictive(window, worlds, base_playthrough, locations, itempool, count=-1):
unplaced_items = []
# don't run over this playthrough, just keep it as an item collection
items_playthrough = base_playthrough.copy()
items_playthrough.collect_all(itempool)
# loop until there are no items or locations
while itempool and locations:
# if remaining count is 0, return. Negative means unbounded.
if count == 0:
break
# get an item and remove it from the itempool
item_to_place = itempool.pop()
if item_to_place.majoritem:
l2cations = [l for l in locations if not l.minor_only]
else:
l2cations = locations
random.shuffle(l2cations)
# generate the max playthrough with every remaining item
# this will allow us to place this item in a reachable location
items_playthrough.uncollect(item_to_place)
max_playthrough = items_playthrough.copy()
max_playthrough.collect_locations()
# perform_access_check checks location reachability
perform_access_check = True
if worlds[0].check_beatable_only:
# if any world can not longer be beatable with the remaining items
# then we must check for reachability no matter what.
# This way the reachability test is monotonic. If we were to later
# stop checking, then we could place an item needed in one world
# in an unreachable place in another world.
# scan_for_items would cause an unnecessary copy+collect
perform_access_check = not max_playthrough.can_beat_game(scan_for_items=False)
# find a location that the item can be placed. It must be a valid location
# in the world we are placing it (possibly checking for reachability)
spot_to_fill = None
for location in l2cations:
if location.can_fill(max_playthrough.state_list[location.world.id], item_to_place, perform_access_check):
# for multiworld, make it so that the location is also reachable
# in the world the item is for. This is to prevent early restrictions
# in one world being placed late in another world. If this is not
# done then one player may be waiting a long time for other players.
if location.world.id != item_to_place.world.id:
try:
source_location = item_to_place.world.get_location(location.name)
if not source_location.can_fill(max_playthrough.state_list[item_to_place.world.id], item_to_place, perform_access_check):
# location wasn't reachable in item's world, so skip it
continue
except KeyError:
# This location doesn't exist in the other world, let's look elsewhere.
# Check access to whatever parent region exists in the other world.
can_reach = True
parent_region = location.parent_region
while parent_region:
try:
source_region = item_to_place.world.get_region(parent_region.name)
can_reach = max_playthrough.can_reach(source_region)
break
except KeyError:
parent_region = parent_region.entrances[0].parent_region
if not can_reach:
continue
if location.disabled == DisableType.PENDING:
if not max_playthrough.can_beat_game(False):
continue
location.disabled = DisableType.DISABLED
# location is reachable (and reachable in item's world), so place item here
spot_to_fill = location
break
# if we failed to find a suitable location
if spot_to_fill is None:
# if we specify a count, then we only want to place a subset, so a miss might be ok
if count > 0:
# don't decrement count, we didn't place anything
unplaced_items.append(item_to_place)
items_playthrough.collect(item_to_place)
continue
else:
# we expect all items to be placed
raise FillError('Game unbeatable: No more spots to place %s [World %d] from %d locations (%d total); %d other items left to place, plus %d skipped' % (item_to_place, item_to_place.world.id + 1, len(l2cations), len(locations), len(itempool), len(unplaced_items)))
# Place the item in the world and continue
spot_to_fill.world.push_item(spot_to_fill, item_to_place)
locations.remove(spot_to_fill)
window.fillcount += 1
window.update_progress(5 + ((window.fillcount / window.locationcount) * 30))
# decrement count
count -= 1
# assert that the specified number of items were placed
if count > 0:
raise FillError('Could not place the specified number of item. %d remaining to be placed.' % count)
if count < 0 and len(itempool) > 0:
raise FillError('Could not place all the item. %d remaining to be placed.' % len(itempool))
# re-add unplaced items that were skipped
itempool.extend(unplaced_items)
# This places items in the itempool into the locations
# It does not check for reachability, only that the item is
# allowed in the location
def fill_restrictive_fast(window, worlds, locations, itempool):
while itempool and locations:
item_to_place = itempool.pop()
random.shuffle(locations)
# get location that allows this item
spot_to_fill = None
for location in locations:
if location.can_fill_fast(item_to_place):
spot_to_fill = location
break
# if we failed | |
and self._content is None:
length = self.environ.get('CONTENT_LENGTH', '')
if length.isdigit():
length = int(length)
else:
length = 0
if length <= self.MAX_CONTENT:
input = self.environ['wsgi.input']
f = io.BytesIO()
while length:
part = input.read(length)
if not part:
break
f.write(part)
length -= len(part)
self._content = f.getvalue()
else:
raise BadRequest("Too much data")
return self._content
def get_form(self):
"""Returns a FieldStorage object parsed from the content.
The query string is excluded before the form is parsed as this
only covers parameters submitted in the content of the request.
To search the query string you will need to examine the
dictionary returned by :meth:`get_query` too.
This method can be called multiple times, the form is only
actually read from the input the first time. Subsequent calls
return the same FieldStorage object.
This call cannot be called on the same context as
:meth:`get_content`, whichever is called first takes
precedence. Calls to get_form after get_content return None.
Warning: get_form will only parse the form from the content if
the request method was POST!"""
if self._form is None and self._content is None:
post_environ = self.environ.copy()
post_environ['QUERY_STRING'] = ''
self._form = cgi.FieldStorage(
fp=post_environ['wsgi.input'], environ=post_environ,
keep_blank_values=True)
return self._form
def get_form_string(self, name, max_length=0x10000):
"""Returns the value of a string parameter from the form.
name
The name of the parameter
max_length (optional, defaults to 64KB)
Due to an issue in the implementation of FieldStorage it
isn't actually possible to definitively tell the difference
between a file upload and an ordinary input field. HTML5
clarifies the situation to say that ordinary fields don't
have a content type but FieldStorage assumes 'text/plain' in
this case and sets the file and type attribute of the field
anyway.
To prevent obtuse clients sending large files disguised as
ordinary form fields, tricking your application into loading
them into memory, this method checks the size of any file
attribute (if present) against max_length before returning
the field's value.
If the parameter is missing from the form then an empty string
is returned."""
form = self.get_form()
if name in form:
result = form[name]
if isinstance(result, list):
return ','.join([x.value for x in result])
else:
if result.file:
# could be an ordinary field in multipart/form-data
fpos = result.file.tell()
result.file.seek(0, io.SEEK_END)
fsize = result.file.tell()
result.file.seek(fpos)
if fsize > max_length:
raise BadRequest
# result.value could be bytes or (text) str
value = result.value
if isinstance(value, bytes):
charset = 'ascii'
if result.type_options is not None:
charset = result.type_options.get('charset',
'ascii')
return value.decode(charset)
else:
return value
return result.value
return ''
def get_form_long(self, name):
"""Returns the value of a (long) integer parameter from the form.
name
The name of the parameter
If the parameter is missing from the form then None is returned,
if the parameter is present but is not a valid integer then
:class:`BadRequest` is raised."""
value = self.get_form_string(name, 256)
try:
return long2(value)
except ValueError as err:
logging.debug("get_form_long: %s", str(err))
raise BadRequest
def get_cookies(self):
"""Returns a dictionary of cookies from the request
If no cookies were passed an empty dictionary is returned.
For details of how multi-valued cookies are handled see:
:meth:`pyslet.http.cookie.CookieParser.request_cookie_string`."""
if self._cookies is None:
cookie_values = self.environ.get('HTTP_COOKIE', None)
if cookie_values is not None:
p = cookie.CookieParser(cookie_values)
self._cookies = p.require_cookie_string()
for name in self._cookies:
value = self._cookies[name]
if isinstance(value, set):
# join the items into a single string
value = list(value)
value.sort()
self._cookies[name] = b','.join(value)
else:
self._cookies = {}
return self._cookies
class DispatchNode(object):
"""An opaque class used for dispatching requests."""
def __init__(self):
self._handler = None
self._wildcard = None
self._nodes = {}
class WSGIApp(DispatchNode):
"""An object to help support WSGI-based applications.
Instances are designed to be callable by the WSGI middle-ware, on
creation each instance is assigned a random identifier which is used
to provide comparison and hash implementations. We go to this
trouble so that derived classes can use techniques like the
functools lru_cache decorator in future versions."""
#: the context class to use for this application, must be (derived
#: from) :class:`WSGIContext`
ContextClass = WSGIContext
#: The path to the directory for :attr:`static_files`. Defaults to
#: None. An :class:`pyslet.vfs.OSFilePath` instance.
static_files = None
private_files = None
"""Private data diretory
An :class:`pyslet.vfs.OSFilePath` instance.
The directory used for storing private data. The directory is
partitioned into sub-directories based on the lower-cased class name
of the object that owns the data. For example, if private_files is
set to '/var/www/data' and you derive a class called 'MyApp' from
WSGIApp you can assume that it is safe to store and retrieve private
data files from '/var/www/data/myapp'.
private_files defaults to None for safety. The current WSGIApp
implementation does not depend on any private data."""
settings_file = None
"""The path to the settings file. Defaults to None.
An :class:`pyslet.vfs.OSFilePath` instance.
The format of the settings file is a json dictionary. The
dictionary's keys are class names that define a scope for
class-specific settings. The key 'WSGIApp' is reserved for settings
defined by this class. The defined settings are:
level (None)
If specified, used to set the root logging level, a value
between 0 (NOTSET) and 50 (CRITICAL). For more information see
python's logging module.
port (8080)
The port number used by :meth:`run_server`
canonical_root ("http://localhost" or "http://localhost:<port>")
The canonical URL scheme, host (and port if required) for the
application. This value is passed to the context and used by
:meth:`WSGIContext.get_url` and similar methods in preference to
the SERVER_NAME and SEVER_PORT to construct absolute URLs
returned or recorded by the application. Note that the Host
header is always ignored to prevent related `security attacks`__.
.. __:
http://www.skeletonscribe.net/2013/05/practical-http-host-header-attacks.html
If no value is given then the default is calculated taking in to
consideration the port setting.
interactive (False)
Sets the behaviour of :meth:`run_server`, if specified the main
thread prompts the user with a command line interface allowing
you to interact with the running server. When False, run_server
will run forever and can only be killed by an application
request that sets :attr:`stop` to True or by an external signal
that kills the process.
static (None)
A URL to the static files (not a local file path). This will
normally be an absolute path or a relative path. Relative paths
are relative to the settings file in which the setting is
defined. As URL syntax is used you must use the '/' as a path
separator and add proper URL-escaping. On Windows, UNC paths
can be specified by putting the host name in the authority
section of the URL.
private (None)
A URL to the private files. Interpreted as per the 'static'
setting above."""
#: the class settings loaded from :attr:`settings_file` by
#: :meth:`setup`
settings = None
#: the base URI of this class, set from the path to the settings
#: file itself and is used to locate data files on the server. This
#: is a :class:`pyslet.rfc2396.FileURL` instance. Not to be confused
#: with the base URI of resources exposed by the application this
#: class implements!
base = None
#: the base URI of this class' private files. This is set from the
#: :attr:`private_files` member and is a
#: :class:`pyslet.rfc2396.FileURL` instance
private_base = None
content_type = {
'ico': params.MediaType('image', 'vnd.microsoft.icon'),
}
"""The mime type mapping table.
This table is used before falling back on Python's built-in
guess_type function from the mimetypes module. Add your own custom
mappings here.
It maps file extension (without the dot) on to
:class:`~pyslet.http.params.MediaType` instances."""
#: the maximum chunk size to read into memory when returning a
#: (static) file. Defaults to 64K.
MAX_CHUNK = 0x10000
#: the integer millisecond time (since the epoch) corresponding to
#: 01 January 1970 00:00:00 UTC the JavaScript time origin.
js_origin = int(
iso.TimePoint(
date=iso.Date(century=19, year=70, month=1, day=1),
time=iso.Time(hour=0, minute=0, second=0, zdirection=0)
).get_unixtime() * 1000)
#: a threading.RLock instance that can be used to lock the class
#: when dealing with data that might be shared amongst threads.
clslock = threading.RLock()
_nextid = 1
@classmethod
| |
the temporal extent of the element, hopefully from the cache
element_interval = interval_cache.get(element)
if element_interval is None:
element_interval = ISD._make_absolute(
element.get_begin(),
element.get_end(),
parent_computed_begin,
parent_computed_end
)
interval_cache[element] = element_interval
begin_time, end_time = element_interval
# update the activity cache if the element was not present
if is_active is None:
if (
(begin_time is not None and begin_time > absolute_offset) or
(end_time is not None and end_time <= absolute_offset)
) :
activity_cache[element] = False
return None
activity_cache[element] = True
# associated region is that associated with the element, or inherited otherwise
associated_region = element.get_region() if element.get_region() is not None else inherited_region
# prune the element if either:
# * the element has children and the associated region is neither the default nor the root region
# * the element has no children and the associated region is not the root region
if (
not isinstance(element, model.Region) and
associated_region is not selected_region and
(not element.has_children() or associated_region is not None)
):
return None
# create an ISD element
doc = element.get_doc()
if isinstance(element, model.Region):
isd_element = ISD.Region(element.get_id(), isd)
else:
isd_element = element.__class__(isd)
isd_element.set_id(element.get_id())
if not isinstance(element, (model.Br, model.Text)):
isd_element.set_lang(element.get_lang())
isd_element.set_space(element.get_space())
# keep track of specified style properties
styles_to_be_computed: typing.Set[typing.Type[model.StyleProperty]] = set()
# copy text nodes
if isinstance(element, model.Text):
isd_element.set_text(element.get_text())
# apply animation
for anim_step in element.iter_animation_steps():
anim_begin_time, anim_end_time = ISD._make_absolute(
anim_step.begin,
anim_step.end,
begin_time,
end_time
)
if anim_begin_time is not None and anim_begin_time > absolute_offset:
continue
if anim_end_time is not None and anim_end_time <= absolute_offset:
continue
styles_to_be_computed.add(anim_step.style_property)
isd_element.set_style(anim_step.style_property, anim_step.value)
# copy specified styles
for spec_style_prop in element.iter_styles():
if isd_element.has_style(spec_style_prop):
# skip if the style has already been set
continue
styles_to_be_computed.add(spec_style_prop)
isd_element.set_style(spec_style_prop, element.get_style(spec_style_prop))
# inherited styling
if not isinstance(element, (model.Br, model.Text, model.Region)):
for inherited_style_prop in parent.iter_styles():
StyleProcessors.BY_STYLE_PROP[inherited_style_prop].inherit(parent, isd_element)
# initial value styling
if not isinstance(element, (model.Br, model.Text)):
for initial_style in styles.StyleProperties.ALL:
if isd_element.has_style(initial_style):
continue
if doc.has_initial_value(initial_style):
initial_value = doc.get_initial_value(initial_style)
elif initial_style is not styles.StyleProperties.Position:
# the initial value of the Position style property is set to Origin as part of style computation
initial_value = initial_style.make_initial_value()
else:
initial_value = None
styles_to_be_computed.add(initial_style)
isd_element.set_style(initial_style, initial_value)
# compute style properties
ISD._compute_styles(styles_to_be_computed, parent, isd_element)
# prune element is display is "none"
if isd_element.get_style(styles.StyleProperties.Display) is styles.DisplayType.none:
return None
# process children of the element
isd_element_children = []
if isinstance(element, model.Region):
if doc.get_body() is not None:
isd_body_element = ISD._process_element(
interval_cache,
activity_cache,
isd,
absolute_offset,
selected_region,
associated_region,
isd_element,
None,
None,
doc.get_body()
)
if isd_body_element is not None:
isd_element_children.append(isd_body_element)
else:
for child_element in element:
isd_element_child = ISD._process_element(
interval_cache,
activity_cache,
isd,
absolute_offset,
selected_region,
associated_region,
isd_element,
begin_time,
end_time,
child_element
)
if isd_element_child is not None:
isd_element_children.append(isd_element_child)
if len(isd_element_children) > 0:
isd_element.push_children(isd_element_children)
if isinstance(isd_element, (model.P, model.Rt, model.Rtc)):
text_node_list = []
_construct_text_list(isd_element, text_node_list)
_process_lwsp(text_node_list)
_prune_empty_spans(isd_element)
# remove styles that are not applicable
for style_prop in list(isd_element.iter_styles()):
if not isd_element.is_style_applicable(style_prop):
isd_element.set_style(style_prop, None)
# prune or keep the element
if isinstance(isd_element, (model.Br, model.Text,model.Rb, model.Rbc)):
return isd_element
if isd_element.has_children():
return isd_element
if (
isinstance(isd_element, ISD.Region) and
isd_element.get_style(styles.StyleProperties.ShowBackground) is styles.ShowBackgroundType.always
):
return isd_element
return None
def _prune_empty_spans(element: model.ContentElement):
children = list(element)
for child in children:
_prune_empty_spans(child)
if isinstance(child, model.Text) and not child.get_text():
element.remove_child(child)
elif isinstance(child, model.Span) and not child:
element.remove_child(child)
def _construct_text_list(element: model.ContentElement, text_node_list: typing.List[typing.Union[model.Text, model.Br]]):
'''Constructs a list of all text and br elements in dfs order, excluding rt, rtc and rp elements'''
for child in element:
if isinstance(child, model.Br) or (isinstance(child, model.Text) and child.get_text()):
text_node_list.append(child)
elif not isinstance(child, (model.Rt, model.Rtc, model.Rp)):
_construct_text_list(child, text_node_list)
def _process_lwsp(text_node_list: typing.List[typing.Union[model.Text, model.Br]]):
'''Processes LWSP according to the space property'''
def _is_prev_char_lwsp(prev_element):
if isinstance(prev_element, model.Br):
return True
prev_text = prev_element.get_text()
return len(prev_text) > 0 and prev_text[-1] in ("\t", "\r", "\n", " ")
def _is_next_char_lwsp(next_element):
if isinstance(next_element, model.Br):
return True
next_text = next_element.get_text()
return len(next_text) > 0 and next_text[0] in ("\r", "\n")
elist = list(text_node_list)
# first pass: collapse spaces and remove leading LWSPs
i = 0
while i < len(elist):
node = elist[i]
if isinstance(node, model.Br) or node.parent().get_space() is model.WhiteSpaceHandling.PRESERVE:
i += 1
continue
trimmed_text = re.sub(r"[\t\r\n ]+", " ", node.get_text())
if len(trimmed_text) > 0 and trimmed_text[0] == " ":
if i == 0 or _is_prev_char_lwsp(elist[i - 1]):
trimmed_text = trimmed_text[1:]
node.set_text(trimmed_text)
if len(trimmed_text) == 0:
del elist[i]
else:
i += 1
# second pass: remove trailing LWSPs
for i, node in enumerate(elist):
if isinstance(node, model.Br) or node.parent().get_space() is model.WhiteSpaceHandling.PRESERVE:
i += 1
continue
node_text = node.get_text()
if node_text[-1] == " ":
if i == (len(elist) - 1) or _is_next_char_lwsp(elist[i + 1]):
node.set_text(node_text[:-1])
def _compute_length(
source_length: styles.LengthType,
pct_ref: typing.Optional[styles.LengthType],
em_ref: typing.Optional[styles.LengthType],
c_ref: typing.Optional[styles.LengthType],
px_ref: typing.Optional[styles.LengthType]
):
if source_length.units is styles.LengthType.Units.pct:
if pct_ref is None:
raise ValueError("Percent length computed without pct reference")
return styles.LengthType(
value=source_length.value * pct_ref.value / 100,
units=pct_ref.units
)
if source_length.units is styles.LengthType.Units.em:
if em_ref is None:
raise ValueError("Em length computed without em reference")
return styles.LengthType(
value=source_length.value * em_ref.value,
units=em_ref.units
)
if source_length.units is styles.LengthType.Units.c:
if c_ref is None:
raise ValueError("C length computed without c reference")
return styles.LengthType(
value=source_length.value * c_ref.value,
units=c_ref.units
)
if source_length.units is styles.LengthType.Units.px:
if px_ref is None:
raise ValueError("px length computed without px reference")
return styles.LengthType(
value=source_length.value * px_ref.value,
units=px_ref.units
)
return source_length
def _make_rh_length(value: numbers.Number) -> styles.LengthType:
'''Creates a length expressed in `rh` units
'''
return styles.LengthType(
value=value,
units=styles.LengthType.Units.rh
)
def _make_rw_length(value: numbers.Number) -> styles.LengthType:
'''Creates a length expressed in `rw` units
'''
return styles.LengthType(
value=value,
units=styles.LengthType.Units.rw
)
def _get_writing_mode(isd_element: model.ContentElement) -> styles.WritingModeType:
while not isinstance(isd_element, ISD.Region):
isd_element = isd_element.get_parent()
return isd_element.get_style(styles.StyleProperties.WritingMode)
class StyleProcessor:
'''Processes style properties during the style resolution process.
Class variables:
* `style_prop`: reference to the style property that the processor handles
'''
style_prop: typing.Type[styles.StyleProperty] = None
@classmethod
def compute(cls, parent: model.ContentElement, element: model.ContentElement):
'''Reads the (specified) style property value and computes it
'''
@classmethod
def inherit(cls, parent: model.ContentElement, element: model.ContentElement):
'''Inherit the style property from the parent to the element
'''
if cls.style_prop.is_inherited and not element.has_style(cls.style_prop):
element.set_style(cls.style_prop, parent.get_style(cls.style_prop))
class StyleProcessors:
'''Processes style properties during the style resolution process
Class variables:
* `BY_STYLE_PROP`: maps a style property to a processor
'''
# pylint: disable=missing-class-docstring
class BackgroundColor(StyleProcessor):
style_prop = styles.StyleProperties.BackgroundColor
class Color(StyleProcessor):
style_prop = styles.StyleProperties.Color
class Direction(StyleProcessor):
style_prop = styles.StyleProperties.Direction
class Disparity(StyleProcessor):
style_prop = styles.StyleProperties.Disparity
class Display(StyleProcessor):
style_prop = styles.StyleProperties.Display
class DisplayAlign(StyleProcessor):
style_prop = styles.StyleProperties.DisplayAlign
class Extent(StyleProcessor):
style_prop = styles.StyleProperties.Extent
@classmethod
def compute(cls, parent: model.ContentElement, element: model.ContentElement):
style_value: styles.ExtentType = element.get_style(cls.style_prop)
# height
height = _compute_length(
style_value.height,
_make_rh_length(100),
element.get_style(styles.StyleProperties.FontSize),
_make_rh_length(100 / element.get_doc().get_cell_resolution().rows),
_make_rh_length(100 / element.get_doc().get_px_resolution().height)
)
# width
width = _compute_length(
style_value.width,
_make_rw_length(100),
element.get_style(styles.StyleProperties.FontSize),
_make_rw_length(100 / element.get_doc().get_cell_resolution().columns),
_make_rw_length(100 / element.get_doc().get_px_resolution().width)
)
element.set_style(
cls.style_prop,
styles.ExtentType(
height=height,
width=width
)
)
class FillLineGap(StyleProcessor):
style_prop = styles.StyleProperties.FillLineGap
class FontFamily(StyleProcessor):
style_prop = styles.StyleProperties.FontFamily
class FontSize(StyleProcessor):
style_prop = styles.StyleProperties.FontSize
@classmethod
def inherit(cls, parent: model.ContentElement, element: model.ContentElement):
if element.has_style(cls.style_prop):
return
parent_value: styles.LengthType = parent.get_style(cls.style_prop)
if (
isinstance(element, model.Rtc) or
(isinstance(element, model.Rt) and not isinstance(parent, model.Rtc))
):
style_value = styles.LengthType(
value=parent_value.value/2,
units=parent_value.units
)
else:
style_value = parent_value
element.set_style(cls.style_prop, style_value)
@classmethod
def compute(cls, parent: model.ContentElement, element: model.ContentElement):
style_value = element.get_style(cls.style_prop)
parent_value = parent.get_style(cls.style_prop) if parent is not None else None
pct_ref = parent_value if parent_value is not None \
else _make_rh_length(100/element.get_doc().get_cell_resolution().rows)
element.set_style(
cls.style_prop,
_compute_length(
style_value,
pct_ref,
pct_ref,
_make_rh_length(100 / element.get_doc().get_cell_resolution().rows),
_make_rh_length(100 / element.get_doc().get_px_resolution().height)
)
)
class FontStyle(StyleProcessor):
style_prop = styles.StyleProperties.FontStyle
class FontWeight(StyleProcessor):
style_prop = styles.StyleProperties.FontWeight
class LineHeight(StyleProcessor):
style_prop = styles.StyleProperties.LineHeight
@classmethod
def compute(cls, parent: model.ContentElement, element: model.ContentElement):
value = element.get_style(cls.style_prop)
if value is styles.SpecialValues.normal:
computed_value = value
else:
computed_value = _compute_length(
value,
element.get_style(styles.StyleProperties.FontSize),
element.get_style(styles.StyleProperties.FontSize),
_make_rh_length(100 / element.get_doc().get_cell_resolution().rows),
_make_rh_length(100 / element.get_doc().get_px_resolution().height)
)
element.set_style(cls.style_prop, computed_value)
class LinePadding(StyleProcessor):
style_prop = styles.StyleProperties.LinePadding
@classmethod
def compute(cls, parent: model.ContentElement, element: model.ContentElement):
element.set_style(
cls.style_prop,
_compute_length(
element.get_style(cls.style_prop),
element.get_style(styles.StyleProperties.FontSize),
element.get_style(styles.StyleProperties.FontSize),
_make_rh_length(100 / element.get_doc().get_cell_resolution().rows),
_make_rh_length(100 / element.get_doc().get_px_resolution().height)
)
)
class LuminanceGain(StyleProcessor):
style_prop = styles.StyleProperties.LuminanceGain
class MultiRowAlign(StyleProcessor):
style_prop = styles.StyleProperties.MultiRowAlign
class Opacity(StyleProcessor):
style_prop = styles.StyleProperties.Opacity
class Origin(StyleProcessor):
style_prop = styles.StyleProperties.Origin
@classmethod
def compute(cls, parent: model.ContentElement, element: model.ContentElement):
style_value: styles.CoordinateType = element.get_style(cls.style_prop)
# height
y = _compute_length(
style_value.y,
_make_rh_length(100),
None,
_make_rh_length(100 / element.get_doc().get_cell_resolution().rows),
_make_rh_length(100 / element.get_doc().get_px_resolution().height)
)
# width
x = _compute_length(
style_value.x,
_make_rw_length(100),
None,
_make_rw_length(100 / element.get_doc().get_cell_resolution().columns),
_make_rw_length(100 / element.get_doc().get_px_resolution().width)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.