content
stringlengths 5
1.05M
|
---|
import re
from datetime import datetime
from typing import List
from . import Command
from ...config import get_config_value
from ...plugins.actions import ActionPlugin
from ...utils.cyclic_list import CyclicList
from ...utils.time_utils import is_it_late, is_today_a_workday, next_workday, now, next_workday_string, DATE_FORMAT
OPERATIONS_CHANNEL_TYPE = "operations"
def format_gender(name):
return "die" if name in get_config_value('woman', []) else "der"
class OperationsActionPlugin(ActionPlugin):
def __init__(self, opsbot):
super().__init__(opsbot)
self._members = CyclicList([], dynamic=True)
self._members.load_state(self.read_variable("members", dict()))
self._vacations = self.read_variable("vacations", list())
self._user_override = self.read_variable("override", None)
self._sayings_responsible_today = CyclicList(self.read_config_value('quotes'))
self._sayings_responsible_today.load_state(self.read_variable("sayings_responsible_today", dict()))
self._override_user = self.read_config_value('override_user')
self._how_to_link = self.read_config_value('how_to_link')
self.add_scheduled_job(self.daily_next, 'cron', id='daily_next', day_of_week='mon-fri', hour=8, minute=0)
self.add_scheduled_job(self.daily_preview, 'cron', id='daily_preview', day_of_week='mon-fri', hour=17, minute=0)
self._operator_text_today = self.read_config_value('operator_text_today')
self._operator_text_tomorrow = self.read_config_value('operator_text_tomorrow')
def get_commands(self) -> List[Command]:
return [
Command(r"((register)|(add))", self.register, "register @user: Person in die Rotation mit aufnehmen"),
Command(r"((unregister)|(remove))", self.unregister, "unregister @user: Person aus der Rotation entfernen"),
Command(r"((next)|(weiter))", self.next, "next / weiter [@user]: Rotation weiterschalten, wenn Person angegeben auf diese Person"),
Command(r"((heute)|(current)|(today)|(who)|(wer))", self.print_current, "heute / today / wer: Gibt aus wer heute Betriebsverantwortlicher ist"),
Command(r"((morgen)|(tomorrow))", self.print_tomorrow, "morgen / tomorrow: Gibt aus wer am naechsten Werktag Betriebsverantwortlicher sein wird"),
Command(r"(shibboleet)|(shiboleet)|(shibolet)", self.override, "shibboleet: Selbsterklärend ;-)"),
Command(r".*[uU]rlaub.*", self.add_vacation,
"'Urlaub am dd.mm.yyyy', 'Urlaub von dd.mm.yyyy bis dd.mm.yyyy' oder 'Urlaub dd.mm.yyyy - dd.mm.yyyy' [@user]: Trägt Urlaub ein, optional für eine andere Person"),
]
@staticmethod
def required_configs():
return ['quotes']
def register(self, activity, mentions):
if not mentions:
mentions = [activity.from_property.name]
for mention in mentions:
if self._members.add_element(mention):
self.send_reply("<at>%(name)s</at> Willkommen in der Rotation" % dict(name=mention), reply_to=activity, mentions=[mention])
else:
self.send_reply("<at>%(name)s</at> Dich kenne ich schon" % dict(name=mention), reply_to=activity, mentions=[mention])
self.save_user_config()
def unregister(self, activity, mentions):
if not mentions:
mention = activity.from_property.name
self.send_reply("<at>%(name)s</at> Feigling. So einfach kommst du mir nicht aus" % dict(name=mention), reply_to=activity, mentions=[mention])
return
for mention in mentions:
self._members.remove_element(mention)
self.send_reply("<at>%(name)s</at> Du bist frei" % dict(name=mention), reply_to=activity, mentions=[mention])
self.save_user_config()
def override(self, activity, mentions):
"""Command @OpsBot shiboleet"""
if not self._override_user or activity.from_property.name != self._override_user:
self.send_reply("Du hast hier nichts zu sagen!", reply_to=activity)
return
if is_it_late():
self._user_override = "tomorrow"
self.send_reply("Aye, aye. <at>%s</at> Du bist vom Haken." % self._members.peek(), reply_to=activity, mentions=[self._members.peek()])
else:
self._user_override = "today"
self.send_reply("Captain auf der Brücke! <at>%s</at> Du bist vom Haken." % self._members.get(), reply_to=activity, mentions=[self._members.get()])
self.save_user_config()
def next(self, activity, mentions):
"""Command @OpsBot weiter|next"""
its_late = is_it_late()
if mentions:
member = self._members.goto(mentions[0])
if not member:
self.send_reply("Dieser User ist mir nicht bekannt", reply_to=activity)
return
if its_late:
self._members.previous()
else:
if its_late:
self._members.next()
self.select_next_tomorrow()
else:
self.select_next_today()
if its_late:
self.inform_tomorrow(reply_to=activity)
else:
self.inform_today(reply_to=activity)
self.save_user_config()
def current(self):
if self._user_override == "today" and self._override_user:
return self._override_user
return self._members.get()
def _operator_text(self):
if self._how_to_link:
return f'<a href="{self._how_to_link}">Betriebsverantwortliche</a>'
else:
return "Betriebsverantwortliche"
def print_current(self, activity, mentions):
member = self.current()
if self._operator_text_today:
msg = f'Hey <at>{member}</at>, {self._operator_text_today}'.replace('{gender}', format_gender(member))
else:
msg = f'Hey <at>{member}</at> Du bist heute {format_gender(member)} {self._operator_text()}.'
self.send_reply(msg, reply_to=activity, mentions=[member])
def daily_next(self):
"""Called each morning by scheduler to announce for the day"""
if not is_today_a_workday():
return
if self._user_override == "tomorrow":
self._user_override = "today"
self.select_next_today()
self.inform_today()
self.save_user_config()
def daily_preview(self):
"""Called each evening by scheduler to announce for the next day"""
if not is_today_a_workday():
return
if self._user_override == "today":
self._user_override = None
self.select_next_tomorrow()
self.inform_tomorrow()
self.save_user_config()
def print_tomorrow(self, activity, mention):
self.inform_tomorrow(activity)
def inform_today(self, reply_to=None):
if self._user_override == "today" and self._override_user:
member = self._override_user
else:
member = self._members.get()
if self._operator_text_today:
msg = f'Hey <at>{member}</at>, {self._operator_text_today} {self._sayings_responsible_today.next()}'.replace('{gender}', format_gender(member))
else:
msg = f'Hey <at>{member}</at> Du bist heute {format_gender(member)} {self._operator_text()}. {self._sayings_responsible_today.next()}'
if reply_to:
self.send_reply(msg, mentions=[member], reply_to=reply_to)
else:
self.send_message(msg, mentions=[member], channel_type=OPERATIONS_CHANNEL_TYPE)
def inform_tomorrow(self, reply_to=None):
if self._user_override == "tomorrow" and self._override_user:
member = self._override_user
else:
member = self._members.peek()
tomorrow = next_workday_string()
if self._operator_text_tomorrow:
msg = f'Hey <at>{member}</at>, {self._operator_text_tomorrow}'.replace('{gender}', format_gender(member)).replace('{tomorrow}', tomorrow)
else:
msg = f'Hey <at>{member}</at> Du wirst {tomorrow} {format_gender(member)} {self._operator_text()} sein.'
if reply_to:
self.send_reply(msg, mentions=[member], reply_to=reply_to)
else:
self.send_message(msg, mentions=[member], channel_type=OPERATIONS_CHANNEL_TYPE)
def add_vacation(self, activity, mentions):
if mentions:
name = mentions[0]
else:
name = activity.from_property.name
text = activity.text
try:
r_1 = re.compile(r".*[uU]rlaub am (\d{1,2}\.\d{1,2}\.\d{4}).*")
r_2 = re.compile(r".*[uU]rlaub vo[nm] (\d{1,2}\.\d{1,2}\.\d{4}) bis (\d{1,2}\.\d{1,2}\.\d{4}).*")
r_3 = re.compile(r".*[uU]rlaub (\d{1,2}\.\d{1,2}\.\d{4}) - (\d{1,2}\.\d{1,2}\.\d{4}).*")
if r_1.match(text):
from_string = r_1.match(text).groups()[0]
to_string = from_string
elif r_2.match(text):
groups = r_2.match(text).groups()
from_string = groups[0]
to_string = groups[1]
elif r_3.match(text):
groups = r_3.match(text).groups()
from_string = groups[0]
to_string = groups[1]
else:
self.send_reply(
"Ich habe dich nicht verstanden. Ich verstehe die folgenden Formate: 'Urlaub am dd.mm.yyyy', 'Urlaub von dd.mm.yyyy bis dd.mm.yyyy' oder 'Urlaub dd.mm.yyyy - dd.mm.yyyy'",
reply_to=activity, mentions=[name])
return
self._vacations.append((name, from_string, to_string))
self.send_reply("Alles klar.", reply_to=activity)
self.save_user_config()
except Exception as ex:
self.logger.info(ex)
self.send_reply(
"Ich habe dich nicht verstanden. Ich verstehe die folgenden Formate: 'Urlaub am dd.mm.yyyy', 'Urlaub von dd.mm.yyyy bis dd.mm.yyyy' oder 'Urlaub dd.mm.yyyy - dd.mm.yyyy'",
reply_to=activity, mentions=[])
def select_next_today(self):
date_obj = now().date()
n = 0
max_n = self._members.size()
ok = False
while not ok and n < max_n:
ok = True
member = self._members.next()
for vacation in self._vacations:
if vacation[0] != member:
continue
date_from = datetime.strptime(vacation[1], DATE_FORMAT).date()
date_to = datetime.strptime(vacation[2], DATE_FORMAT).date()
if date_from <= date_obj <= date_to:
ok = False
n += 1
return member
def select_next_tomorrow(self):
date_obj = next_workday()
n = 1
max_n = self._members.size()
ok = False
member = self._members.peek()
while not ok and n < max_n:
ok = True
member = self._members.peek()
for vacation in self._vacations:
if vacation[0] != member:
continue
date_from = datetime.strptime(vacation[1], DATE_FORMAT).date()
date_to = datetime.strptime(vacation[2], DATE_FORMAT).date()
if date_from <= date_obj <= date_to:
ok = False
continue
n += 1
if not ok:
member = self._members.next()
return member
def save_user_config(self):
self.save_variable("sayings_responsible_today", self._sayings_responsible_today.get_state())
self.save_variable("members", self._members.get_state())
self.save_variable("vacations", self._vacations)
self.save_variable("override", self._user_override)
|
import os
from shutil import copyfile
import shutil
base_folder = "Data_raw/house_test/"
to_folder = "Data_raw/house_single_test/"
fw_train = open(to_folder+"train.txt", "w+")
fw_test = open(to_folder+"test.txt", "w+")
total_count = 0
for name in os.listdir(base_folder):
idx_count = 0
if not name.startswith(".") and not name.endswith("txt"):
for label in os.listdir(base_folder+name):
if label.startswith("label"):
idx_count += 1
for i in range(idx_count):
if not os.path.exists(to_folder+name+"/labels"):
os.makedirs(to_folder+name+"/labels")
if not os.path.exists(to_folder+name+"/JPEGImages"):
os.makedirs(to_folder+name+"/JPEGImages")
for txt in os.listdir(base_folder+name+"/labels"+str(i)):
if txt.endswith("txt"):
flag = True
for j in range(idx_count):
if i != j and os.path.isfile(base_folder+name+"/labels"+str(j)+"/"+txt):
flag = False
break
if (flag):
total_count+=1
copyfile(base_folder+name+"/labels"+str(i)+"/"+txt, to_folder+name+"/labels/"+txt)
# copyfile(base_folder+name+"/outputcamerafile", to_folder+name+"/outputcamerafile")
# copyfile(base_folder+name+"/house.json", to_folder+name+"/house.json")
# copyfile(base_folder+name+"/house.mtl", to_folder+name+"/house.mtl")
# copyfile(base_folder+name+"/house.obj", to_folder+name+"/house.obj")
copyfile(base_folder+name+"/JPEGImages/"+txt.replace("txt", "jpg"), to_folder+name+"/JPEGImages/"+txt.replace("txt", "jpg"))
fw_train.write("../"+to_folder+name+"/JPEGImages/"+txt.replace("txt", "jpg")+"\n")
fw_test.write("../"+to_folder+name+"/JPEGImages/"+txt.replace("txt", "jpg")+"\n")
print (total_count) |
import visualpriors
import torch
from config.config import device
def mid_level_representations(input_image_tensor, representation_names):
"""
:param input_image_tensor: (batch_size, 3, 256, 256)
:param representation_names: list
:return: concatted image tensor to pass into FCN (batch_size, 8*len(representation_names), 16, 16)
"""
representations = []
for name in representation_names:
# (batch_size, 3, 256, 256) ——>(batch_size, 8, 16, 16)
representations.append(visualpriors.representation_transform(input_image_tensor, name, device=device))
return torch.cat(representations, dim=1)
|
"""Main module."""
import csv
import logging
from io import StringIO
from .client import CellarTrackerClient
from .enum import CellarTrackerFormat, CellarTrackerTable
_LOGGER = logging.getLogger(__name__)
class CellarTracker(object):
"""
CellarTracker is the class handling the CellarTracker data export.
"""
def __init__(self, username: None, password: None):
self.client = CellarTrackerClient(username, password)
def get_list(self):
"""Get list."""
return self._get_data(table=CellarTrackerTable.List)
def get_inventory(self):
"""Get inventory."""
return self._get_data(table=CellarTrackerTable.Inventory)
def get_notes(self):
"""Get notes."""
return self._get_data(table=CellarTrackerTable.Notes)
def get_private_notes(self):
"""Get private notes data."""
return self._get_data(table=CellarTrackerTable.PrivateNotes)
def get_purchase(self):
"""Get purchase data."""
return self._get_data(table=CellarTrackerTable.Purchase)
def get_pending(self):
"""Get pending."""
return self._get_data(table=CellarTrackerTable.Pending)
def get_consumed(self):
"""Get consumed."""
return self._get_data(table=CellarTrackerTable.Consumed)
def get_availability(self):
"""Get availability."""
return self._get_data(table=CellarTrackerTable.Availability)
def get_tag(self):
"""Get tag."""
return self._get_data(table=CellarTrackerTable.Tag)
def get_pro_review(self):
"""Get pro review."""
return self._get_data(table=CellarTrackerTable.ProReview)
def get_bottles(self):
"""Get bottles."""
return self._get_data(table=CellarTrackerTable.Bottles)
def get_food_tag(self):
"""Get food tag."""
return self._get_data(table=CellarTrackerTable.FoodTag)
def _get_data(self, table: CellarTrackerTable):
"""Get data."""
return _parse_data(
self.client.get(table=table, format=CellarTrackerFormat.tab)
)
def _parse_data(data: str):
reader = csv.DictReader(StringIO(data), dialect="excel-tab")
results = []
for row in reader:
result = {}
for key, value in row.items():
result[key] = value
results.append(result)
return results
|
import json
import pandas as pd
import argparse
import script_util as su
import scipy.stats as st
def compute_scores(summary_df, fc, bc):
alpha = 0.05
summary_df["wald_reject"] = (summary_df["wald_p"].astype(float) < alpha).astype(int)
summary_df["cmh_reject"] = (summary_df["cmh_p"].astype(float) < alpha).astype(int)
summary_df["wald_2s"] = summary_df["wald"].astype(float).map(lambda x: x**2)
summary_df["cmh_2s"] = summary_df["cmh"].astype(float).map(lambda x: x**2)
# MLE estimates
N_A = summary_df["final_A0"] + summary_df["final_A1"]
N_B = summary_df["final_B0"] + summary_df["final_B1"]
#summary_df["pA_mle"] = summary_df["final_A1"] / N_A
#summary_df["pB_mle"] = summary_df["final_B1"] / N_B
#summary_df["pA_mle_bias"] = summary_df["pA_mle"] - summary_df["pA"]
#summary_df["pB_mle_bias"] = summary_df["pB_mle"] - summary_df["pB"]
#summary_df["effect"] = summary_df["pA_mle"] - summary_df["pB_mle"]
#summary_df["true_effect"] = summary_df["pA"] - summary_df["pB"]
#summary_df["effect_bias"] = summary_df["effect"] - summary_df["true_effect"]
summary_df["effect_estimate"] = summary_df["effect_estimate"].astype(float)
summary_df["effect_bias"] = summary_df["effect_estimate"].astype(float) - (summary_df["pA"] - summary_df["pB"])
# Total number of patients
summary_df["pat"] = N_A + N_B
summary_df["nA-nB"] = N_A - N_B
summary_df["nA-nB_norm"] = summary_df["nA-nB"]/summary_df["pat"]
# Excess failures (recall: by construction, pA >= pB)
summary_df["A_fraction"] = N_A / summary_df["pat"]
summary_df["excess_failures"] = (summary_df["pA"] - summary_df["pB"])*(N_B-N_A)/summary_df["pat"]
summary_df["failures"] = summary_df["final_A0"] + summary_df["final_B0"]
# Interim analyses; early stopping
summary_df["obf_stopping_point"] = (summary_df["interim_n_patients"] / summary_df["pat"])
summary_df["obf_stopped_early"] = (summary_df["interim_n_patients"] < summary_df["pat"]).astype(float)
summary_df["obf_reject"] = (summary_df["interim_n_patients"] != summary_df["pat"] | summary_df["cmh_reject"])
#beta = 0.2
#chisq_beta = qchisq(1.0-beta)
# Total utility
#summary_df["utility_wald"] = summary_df["wald"] \
summary_df["utility_wald"] = summary_df["wald_2s"]/summary_df["pat"]\
- summary_df["excess_failures"]*fc \
- summary_df["blocks"]*bc
#summary_df["utility_cmh"] = summary_df["cmh"] \
summary_df["utility_cmh"] = summary_df["cmh_2s"]/summary_df["pat"]\
- summary_df["excess_failures"]*fc \
- summary_df["blocks"]*bc
return summary_df
def aggregate_quantities(summary_df, gp_columns):
var_cols = ["cmh_reject", "excess_failures", "utility_cmh", "nA-nB_norm"]
q_cols = ["nA-nB"]
ci_cols = ["nA-nB", "cmh_reject", "effect_bias", "nA-nB_norm"]
ci_conf = 0.90
agg_gp = summary_df.groupby(gp_columns)
agg_df = agg_gp.mean()
for col in var_cols:
agg_df[col+"_var"] = agg_gp[col].var()
for col in q_cols:
agg_df[col+"_10"] = agg_gp[col].quantile(0.1)
agg_df[col+"_90"] = agg_gp[col].quantile(0.9)
agg_df[col+"_05"] = agg_gp[col].quantile(0.05)
agg_df[col+"_95"] = agg_gp[col].quantile(0.95)
for col in ci_cols:
agg_df[col+"_n"] = agg_gp[col].count()
agg_df[col+"_sem"] = agg_gp[col].sem()
agg_df[col+"_h"] = agg_df[col+"_sem"].values * st.t.ppf(0.5*(1.0 + ci_conf), agg_df[col+"_n"].values)
return agg_df
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--summaries", required=True,
help="one or more JSON files containing simulation summaries",
nargs="+")
parser.add_argument("--output_tsv", required=True, help="path to output TSV file")
parser.add_argument("--fc", required=True, type=float, help="Failure cost")
parser.add_argument("--bc", required=True, type=float, help="Block cost")
parser.add_argument("--gp_columns", nargs="+", default=["pA", "pB", "pat"])
args = parser.parse_args()
summary_table = su.tabulate_jsons(args.summaries)
scored_table = compute_scores(summary_table, args.fc, args.bc)
agg_table = aggregate_quantities(scored_table, args.gp_columns)
agg_table.to_csv(args.output_tsv, sep="\t", index=True)
|
from saien import login_manager
from saien.models import User
from flask_login import current_user
@login_manager.user_loader
def load_user(user):
if user is not None:
return User.query.get(user)
return None
@login_manager.unauthorized_handler
def unauthorized():
return "NO PERMISSION"
|
import os
from argparse import ArgumentParser, Namespace
from pprint import pformat
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import torch
from attr import asdict
from allrank.click_models.click_utils import click_on_slates
from allrank.config import Config
from allrank.data.dataset_loading import load_libsvm_dataset_role
from allrank.data.dataset_saving import write_to_libsvm_without_masked
from allrank.inference.inference_utils import rank_slates, metrics_on_clicked_slates
from allrank.models.model import make_model
from allrank.models.model_utils import get_torch_device, CustomDataParallel, load_state_dict_from_file
from allrank.utils.args_utils import split_as_strings
from allrank.utils.command_executor import execute_command
from allrank.utils.config_utils import instantiate_from_recursive_name_args
from allrank.utils.file_utils import create_output_dirs, PathsContainer, copy_local_to_gs
from allrank.utils.ltr_logging import init_logger
from allrank.utils.python_utils import all_equal
def parse_args() -> Namespace:
parser = ArgumentParser("allRank rank and apply click model")
parser.add_argument("--job-dir", help="Base output path for all experiments", required=True)
parser.add_argument("--run-id", help="Name of this run to be recorded (must be unique within output dir)",
required=True)
parser.add_argument("--config-file-name", required=True, type=str, help="Name of json file with model config")
parser.add_argument("--input-model-path", required=True, type=str, help="Path to the model to read weights")
parser.add_argument("--roles", required=True, type=split_as_strings,
help="List of comma-separated dataset roles to load and process")
return parser.parse_args()
def run():
# reproducibility
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
np.random.seed(42)
args = parse_args()
paths = PathsContainer.from_args(args.job_dir, args.run_id, args.config_file_name)
os.makedirs(paths.base_output_path, exist_ok=True)
create_output_dirs(paths.output_dir)
logger = init_logger(paths.output_dir)
logger.info("will save data in {output_dir}".format(output_dir=paths.base_output_path))
# read config
config = Config.from_json(paths.config_path)
logger.info("Config:\n {}".format(pformat(vars(config), width=1)))
output_config_path = os.path.join(paths.output_dir, "used_config.json")
execute_command("cp {} {}".format(paths.config_path, output_config_path))
datasets = {role: load_libsvm_dataset_role(role, config.data.path, config.data.slate_length) for role in args.roles}
n_features = [ds.shape[-1] for ds in datasets.values()]
assert all_equal(n_features), f"Last dimensions of datasets must match but got {n_features}"
# gpu support
dev = get_torch_device()
logger.info("Will use device {}".format(dev.type))
# instantiate model
model = make_model(n_features=n_features[0], **asdict(config.model, recurse=False))
model.load_state_dict(load_state_dict_from_file(args.input_model_path, dev))
logger.info(f"loaded model weights from {args.input_model_path}")
if torch.cuda.device_count() > 1:
model = CustomDataParallel(model)
logger.info("Model training will be distributed to {} GPUs.".format(torch.cuda.device_count()))
model.to(dev)
assert config.click_model is not None, "click_model must be defined in config for this run"
click_model = instantiate_from_recursive_name_args(name_args=config.click_model)
ranked_slates = rank_slates(datasets, model, config)
clicked_slates = {role: click_on_slates(slates, click_model, include_empty=False) for role, slates in ranked_slates.items()}
# save clickthrough datasets
for role, slates in clicked_slates.items():
write_to_libsvm_without_masked(os.path.join(paths.output_dir, f"{role}.txt"), *slates) # type: ignore
# calculate metrics
metered_slates = {role: metrics_on_clicked_slates(slates) for role, slates in clicked_slates.items()} # type: ignore
for role, metrics in metered_slates.items():
metrics_df = pd.DataFrame(metrics)
logger.info(f"{role} metrics summary:")
logger.info(metrics_df.mean())
metrics_df.to_csv(os.path.join(paths.output_dir, f"{role}_metrics.csv"), index=False)
pd.DataFrame(metrics_df.mean()).T.to_csv(os.path.join(paths.output_dir, f"{role}_metrics_mean.csv"), index=False)
if urlparse(args.job_dir).scheme == "gs":
copy_local_to_gs(paths.local_base_output_path, args.job_dir)
if __name__ == "__main__":
run()
|
# Copyright 2018 D-Wave Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http: // www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
try:
import matplotlib.pyplot as plt
except ImportError:
# Not required for demo
pass
from qboost import QBoostClassifier, qboost_lambda_sweep
from datasets import make_blob_data, get_handwritten_digits_data
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Run QBoost example",
epilog="Information about additional options that are specific to the data set can be obtained using either 'demo.py blobs -h' or 'demo.py digits -h'.")
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--cross-validation', action='store_true',
help='use cross-validation to estimate the value of the regularization parameter')
parser.add_argument('--lam', default=0.01, type=float,
help='regularization parameter (default: %(default)s)')
# Note: required=True could be useful here, but not available
# until Python 3.7
subparsers = parser.add_subparsers(
title='dataset', description='dataset to use', dest='dataset')
sp_blobs = subparsers.add_parser('blobs', help='blobs data set')
sp_blobs.add_argument('--num-samples', type=int, default=2000,
help='number of samples (default: %(default)s)')
sp_blobs.add_argument('--num-features', type=int, default=10,
help='number of features (default: %(default)s)')
sp_blobs.add_argument('--num-informative', type=int, default=2,
help='number of informative features (default: %(default)s)')
sp_digits = subparsers.add_parser(
'digits', help='handwritten digits data set')
sp_digits.add_argument('--digit1', type=int, default=0, choices=range(10),
help='first digit to include (default: %(default)s)')
sp_digits.add_argument('--digit2', type=int, default=1, choices=range(10),
help='second digit to include (default: %(default)s)')
sp_digits.add_argument('--plot-digits', action='store_true',
help='plot a random sample of each digit')
args = parser.parse_args()
if args.dataset == 'blobs':
n_samples = args.num_samples
n_features = args.num_features
n_informative = args.num_informative
X, y = make_blob_data(
n_samples=n_samples, n_features=n_features, n_informative=n_informative)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4)
if args.cross_validation:
# See Boyda et al. (2017), Eq. (17) regarding normalization
normalized_lambdas = np.linspace(0.0, 0.5, 10)
lambdas = normalized_lambdas / n_features
print('Performing cross-validation using {} values of lambda, this may take several minutes...'.format(len(lambdas)))
qboost, lam = qboost_lambda_sweep(
X_train, y_train, lambdas, verbose=args.verbose)
else:
qboost = QBoostClassifier(X_train, y_train, args.lam)
if args.verbose:
qboost.report_baseline(X_test, y_test)
print('Informative features:', list(range(n_informative)))
print('Selected features:', qboost.get_selected_features())
print('Score on test set: {:.3f}'.format(qboost.score(X_test, y_test)))
elif args.dataset == 'digits':
if args.digit1 == args.digit2:
raise ValueError("must use two different digits")
X, y = get_handwritten_digits_data(args.digit1, args.digit2)
n_features = np.size(X, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4)
print('Number of features:', np.size(X, 1))
print('Number of training samples:', len(X_train))
print('Number of test samples:', len(X_test))
if args.cross_validation:
# See Boyda et al. (2017), Eq. (17) regarding normalization
normalized_lambdas = np.linspace(0.0, 1.75, 10)
lambdas = normalized_lambdas / n_features
print('Performing cross-validation using {} values of lambda, this make take several minutes...'.format(len(lambdas)))
qboost, lam = qboost_lambda_sweep(
X_train, y_train, lambdas, verbose=args.verbose)
else:
qboost = QBoostClassifier(X_train, y_train, args.lam)
if args.verbose:
qboost.report_baseline(X_test, y_test)
print('Number of selected features:',
len(qboost.get_selected_features()))
print('Score on test set: {:.3f}'.format(qboost.score(X_test, y_test)))
if args.plot_digits:
digits = load_digits()
images1 = [image for image, target in zip(
digits.images, digits.target) if target == args.digit1]
images2 = [image for image, target in zip(
digits.images, digits.target) if target == args.digit2]
f, axes = plt.subplots(1, 2)
# Select a random image from each set to show:
i1 = np.random.choice(len(images1))
i2 = np.random.choice(len(images2))
for ax, image in zip(axes, (images1[i1], images2[i2])):
ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
elif not args.dataset:
parser.print_help()
|
import hashlib
def hashData(data: str) -> str:
hashobj = hashlib.sha1(data.encode())
digest = hashobj.hexdigest()
return digest
d = hashData("This is my testing string")
print(d)
# hashlib.sha256(data.encode())
|
class NumberingException(Exception):
pass
|
#############################################################################
#
#
# CSS Style webGenFramework module to BFA c7
#
#
#############################################################################
""" This is a CSS style module for a simple HTML/JS/CSS generator/framework with the purpose of maintaining the
webclient of bfa.
Dependencies:
css <- style
note:: Author(s): Mitch last-check: 07.07.2021 """
from bfassist.webgen.framework.css import CSS_Rule
# noinspection PyUnusedLocal
def __preload__(forClient: bool = True):
pass
# noinspection PyUnusedLocal
def __postload__(forClient: bool = True):
pass
class CSS_Style:
""" Representation of CSS styling for a single instance.
:param target: The target specification for the style to be applied to.
:param rules: Set of style rules to be applied.
note:: Author(s): Mitch """
def __init__(self, target: str, rules: set):
self.target = target
if all([True if isinstance(rule, CSS_Rule) else False for rule in rules]):
self.rules = rules
else:
raise TypeError("All contents of rules must be CSS rules.")
def toString(self):
""" Converts a style to a string.
:return: Style as string.
note:: Author(s): Mitch """
css = self.target + " {\n"
for rule in self.rules:
css += rule.toString()
css += "}\n"
return css
|
import sys
import os
from os.path import join as pjoin
import json
import shutil
from os import makedirs
from textwrap import dedent
from nose.tools import eq_, ok_
from .utils import (temp_working_dir, temp_working_dir_fixture, assert_raises,
cat)
from ..fileutils import touch
from .. import build_tools
from nose import SkipTest
def test_execute_files_dsl():
def assertions(dirname):
with file(pjoin(dirname, 'bin', 'hit')) as f:
x = f.read().strip()
assert x == ("sys.path.insert(0, sys.path.join('%s', 'lib'))" % dirname)
assert os.stat(pjoin(dirname, 'bin', 'hit')).st_mode & 0100
with file(pjoin(dirname, 'doc.json')) as f:
assert json.load(f) == {"foo": "bar"}
with temp_working_dir() as d:
doc = [
{
"target": "$ARTIFACT/bin/hit",
"executable": True,
"expandvars": True,
"text": [
"sys.path.insert(0, sys.path.join('$ARTIFACT', 'lib'))"
]
},
{
"target": "$ARTIFACT/doc.json",
"object": {"foo": "bar"}
}
]
# relative paths
build_tools.execute_files_dsl(doc, dict(ARTIFACT='A'))
assertions('A')
# error on collisions for both types
with assert_raises(OSError):
build_tools.execute_files_dsl([doc[0]], dict(ARTIFACT='A'))
with assert_raises(OSError):
build_tools.execute_files_dsl([doc[1]], dict(ARTIFACT='A'))
# absolute paths
with temp_working_dir() as another_dir:
build_tools.execute_files_dsl(doc, dict(ARTIFACT=pjoin(d, 'B')))
assertions(pjoin(d, 'B'))
# test with a plain file and relative target
doc = [{"target": "foo/bar/plainfile", "text": ["$ARTIFACT"]}]
build_tools.execute_files_dsl(doc, dict(ARTIFACT='ERROR_IF_USED'))
with file(pjoin('foo', 'bar', 'plainfile')) as f:
assert f.read() == '$ARTIFACT'
assert not (os.stat(pjoin('foo', 'bar', 'plainfile')).st_mode & 0100)
# test with a file in root directory
doc = [{"target": "plainfile", "text": ["bar"]}]
build_tools.execute_files_dsl(doc, {})
with file(pjoin('plainfile')) as f:
assert f.read() == 'bar'
class MockBuildStore:
def __init__(self, is_in):
self.is_in = is_in
def is_path_in_build_store(self, d):
return self.is_in
@temp_working_dir_fixture
def test_python_shebang(d):
if 'linux' not in sys.platform:
raise SkipTest('Linux only')
from subprocess import Popen, PIPE
makedirs(pjoin(d, 'my-python', 'bin'))
makedirs(pjoin(d, 'profile', 'bin'))
abs_interpreter = pjoin(d, 'my-python', 'bin', 'python')
script_file = pjoin(d, 'profile', 'bin', 'myscript')
os.symlink(sys.executable, abs_interpreter)
os.symlink(abs_interpreter, pjoin(d, 'profile', 'bin', 'python'))
script = dedent('''\
#! %s
# This is a comment
# """
u"""A test script
"""
import sys
print sys.executable
print ':'.join(sys.argv)
''') % abs_interpreter
# Should be no difference when path is not in build store
new_script = ''.join(build_tools.make_relative_multiline_shebang(MockBuildStore(False),
script_file,
script.splitlines(True)))
assert new_script == script
# Path is in build store...
new_script = ''.join(build_tools.make_relative_multiline_shebang(MockBuildStore(True),
script_file,
script.splitlines(True)))
assert new_script != script
with open(script_file, 'w') as f:
f.write(new_script)
os.chmod(script_file, 0o755)
os.symlink('profile/bin/myscript', 'scriptlink')
def runit(entry_point):
p = Popen([entry_point, "a1 a2", "b "], stdout=PIPE)
out, _ = p.communicate()
outlines = out.splitlines()
assert p.wait() == 0
eq_("%s:a1 a2:b " % entry_point, outlines[1])
return outlines[0]
for relative in ['./scriptlink', 'profile/bin/myscript']:
for entry_point in [relative, os.path.realpath(relative)]:
touch(pjoin(d, 'profile', 'artifact.json'))
#print cat(entry_point)
intp = runit(entry_point)
eq_("%s/profile/bin/python" % d, intp)
os.unlink(pjoin(d, 'profile', 'artifact.json'))
#intp = runit(entry_point)
#assert "%s/my-python/bin/python" % d == intp
|
from django.contrib import admin
# Register your models here.
from .models import Notion, NotionLike
class NotionLikeAdmin(admin.TabularInline):
model = NotionLike
class NotionAdmin(admin.ModelAdmin):
list_display = ['__str__', 'user']
search_fields = ['user__username', 'user__email']
class Meta:
model = Notion
admin.site.register(Notion, NotionAdmin)
|
from datetime import datetime
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_string('DATA_PATH', "dataset/DATA_FILE_PATH", "")
flags.DEFINE_string('LABEL_PATH', "dataset/LABEL_FILE_PATH", "")
flags.DEFINE_string('DICT_PATH', "dictionary/DICT_FILE_PATH", "")
flags.DEFINE_integer('VOCAB_SIZE', 20000, '')
flags.DEFINE_integer('BATCH_SIZE', 32, '')
flags.DEFINE_integer('SEQ_LEN', 60, '')
flags.DEFINE_integer('LABELED_NUM', 500, '')
flags.DEFINE_integer('LABEL_CLASS', 2, '')
flags.DEFINE_integer('EPOCH', 40, '')
flags.DEFINE_integer('BATCHES_PER_EPOCH', 3000, '')
flags.DEFINE_string('VAE_NAME', 'Simple_VAE', '')
flags.DEFINE_string('ENCODER_NAME', 'Encoder_vae', '')
flags.DEFINE_string('DECODER_NAME', 'Decoder_vae', '')
flags.DEFINE_integer('ENCODER_DROPOUT_KEEP', 0.7, '')
flags.DEFINE_integer('DECODER_DROPOUT_KEEP', 0.9, '')
flags.DEFINE_integer('DECODER_DROPWORD_KEEP', 0.6, '')
flags.DEFINE_integer('LEARNING_RATE', 0.001, '')
flags.DEFINE_integer('LR_DECAY_START', 30, '')
flags.DEFINE_integer('MAX_GRAD', 5.0, '')
flags.DEFINE_integer('EMBED_SIZE', 512, '')
flags.DEFINE_integer('LATENT_VARIABLE_SIZE', 32, '')
flags.DEFINE_integer('RNN_NUM', 1, '')
flags.DEFINE_integer('RNN_SIZE', 1024, '')
flags.DEFINE_boolean('DECODER_BATCHNORM', True, '')
flags.DEFINE_integer('DECODER_CNN_INTERNAL_CHANNEL', 512, '')
flags.DEFINE_integer('DECODER_CNN_EXTERNAL_CHANNEL', 1024, '')
flags.DEFINE_integer('DECODER_CNN_FILTER_SIZE', 3, '')
decoder_cnn_dilation = [1, 2, 4]
flags.DEFINE_integer('DECODER_CNN_LAYER_NUM', len(decoder_cnn_dilation), '')
flags.DEFINE_integer('DECODER_CNN_DILATION', decoder_cnn_dilation, '')
flags.DEFINE_integer('DECODER_CNN_PAD', [2, 4, 8], '')
flags.DEFINE_integer('INIT_KLD_WEIGHT', 0.01, '')
flags.DEFINE_integer('KLD_ANNEAL_START', 0, '')
flags.DEFINE_integer('KLD_ANNEAL_END', 40 * 1000, '')
flags.DEFINE_string('LOG_DIR', "log/log" + datetime.now().strftime("%y%m%d-%H%M"), "")
FLAGS = flags.FLAGS
|
# PyTrace: Prototyping iterative raytracing
from math import sqrt
import time
class vec3:
'3d vector class'
def __init__(self, x = 0, y = 0, z = 0):
self.x = x
self.y = y
self.z = z
# settting new coordinates to vector
def set(self, x, y, z):
self.x = x
self.y = y
self.z = z
# representation for printing it out
def __str__(self):
return 'vec3 @ ({0}, {1}, {2})'.format(self.x, self.y, self.z)
# vec operations:
def __add__(self, other):
return vec3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return vec3(self.x - other.x, self.y - other.y, self.z - other.z)
def __mul__(self, num):
return vec3(self.x * num, self.y * num, self.z * num)
def __truediv__(self, num):
return vec3(self.x / num, self.y / num, self.z / num)
def dot(self, other):
return (self.x * other.x + self.y * other.y + self.z * other.z)
def length(self):
return sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
# does not affect parameter
def normalized(vec):
norm = sqrt(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z)
if norm == 0:
norm = 1
return vec3(vec.x / norm, vec.y / norm, vec.z / norm)
def sq_norm(vec):
norm = sqrt(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z)
return norm
def dist(vec1, vec2):
v = vec2 - vec1
return sqrt(v.x * v.x + v.y * v.y + v.z * v.z)
"""
v1 = vec3(1,0,0)
v2 = vec3(1,2,2)
v3 = vec3(1,0,1)
v4 = (((v1 + v2 - v3) / 5))
print(normalized(v4))
"""
class Ray:
def __init__(self, start, end):
self.start = start
self.end = end
self.sdir = normalized(end - start)
def dir(self):
return normalized(self.end - self.start)
def org(self):
return self.start
def __str__(self):
return "Vector: {0} -> {1}".format(self.start, self.end)
def length(self):
return dist(self.end, self.start)
def f(p):
if dist(p, vec3(0,150,0)) < 50:
return 0
else:
return 10
def Sphere(x, center, radius):
return dist(x, center) - radius
def s1(x):
return Sphere(x, vec3(W//2 + 200, 400, H//2), 300)
def s2(x):
return Sphere(x, vec3(W//2 - 330, 400, H//2 + 330), 300)
def plane(p):
return p.x - 3
def clamp(clr):
if clr.x > 255:
clr.x = 255
if clr.x < 0:
clr.x = 0
if clr.y > 255:
clr.y = 255
if clr.y < 0:
clr.y = 0
if clr.z > 255:
clr.z = 255
if clr.z < 0:
clr.z = 0
return clr
H, W = 300, 300
MAX_RAY_LENGTH = 900
camera = vec3(W//2, -500, H//2)
light = vec3(W//2, 0, H//2) + vec3(10, 0, 10)
object_functions = [s1, s2, plane]
img = [[0 for w in range (0, W)] for h in range(0, H)]
console = [['.' for w in range (0, W)] for h in range(0, H)]
out = open('out.ppm', 'w')
out.write('P3\n{0} {1} 255\n'.format(W, H))
def DistanceEval(f, p): # DistEval(func, point)
return f(p)
def EstNormal(z, obj, eps):
z1 = z + vec3(eps, 0, 0)
z2 = z - vec3(eps, 0, 0)
z3 = z + vec3(0, eps, 0)
z4 = z - vec3(0, eps, 0)
z5 = z + vec3(0, 0, eps)
z6 = z - vec3(0, 0, eps)
dx = DistanceEval(obj, z1) - DistanceEval(obj, z2)
dy = DistanceEval(obj, z3) - DistanceEval(obj, z4)
dz = DistanceEval(obj, z5) - DistanceEval(obj, z6)
return normalized(vec3(dx,dy,dz) / (2.0 * eps))
def RayIntersectLinear(ray, step = 40):
i = 0
while i <= MAX_RAY_LENGTH:
dot = ray.org() + ray.sdir * i * step
for f in object_functions:
if f(dot) <= 0:
if step > 2:
i -= 1
step /= 2
else:
return [dist(ray.org(), dot), dot]
i += 1
return False
'''
def RayIntersect(ray):
step = 1
for i in range(0, MAX_RAY_LENGTH):
dot = ray.org() + ray.sdir * i * step
for f in object_functions:
if f(dot) <= 0:
return [dist(ray.org(), dot), dot]
return False
'''
# If i'm right, this should be a Distance-Aided imlementation
def RayIntersect(ray):
dot = ray.org()
dist_min = object_functions[0](dot)
min_dist = 4
max_len = MAX_RAY_LENGTH
while dist_min >= min_dist and dist(ray.org(), dot) <= max_len:
dist_min = object_functions[0](dot)
for f in object_functions[1:]:
dst = f(dot)
if dst < dist_min:
dist_min = dst
dot += ray.sdir * dist_min
#print("Out of cycle!\n")
return RayIntersectLinear(Ray(dot, dot + ray.sdir))
def RayTrace(ray):
color = vec3(0,0,0)
hit = RayIntersect(ray) # either a [dist, hit_point] or False.
if not hit:
return color
hit_point = hit[1]
L = light - hit_point
f = object_functions[0]
for func in object_functions[1:]:
if func(hit_point) < f(hit_point):
f = func
N = EstNormal(hit_point, f, 0.001)
dL = normalized(N).dot(normalized(L))
color = clamp(vec3(255,255,255) * dL)
#shading:
#for ls in scene.lights:
#if visible(hit_point, ls):
#color += shade(hit_point, hit.normal)
return color
def RTC():
for h in range(0, H):
percent = h*100/H
if percent % 10 == 0: print(percent, '% complete\n') # % complete
for w in range(0, W):
ray = Ray(camera, vec3(h, 20, w))
color = RayTrace(ray)
img[h][w] = color
out.write('{0} {1} {2}\n'.format(int(color.x), int(color.y), int(color.z)))
t1 = time.clock()
RTC()
t2 = time.clock()
print(t2 - t1)
#for row in console:
#print(''.join(row))
'''
camera_pos = vec3(0,-50,0)
camera_dir = vec3(0,1,0)
d = 50
screen_center = camera_pos + normalized(camera_dir) * d
W, H = 100, 100
img = [[' ' for w in range (0, H)] for h in range(0, W)]
out = open('out.ppm', 'w')
out.write('P3\n{0} {1} 255\n'.format(W, H))
out.close()
for row in img:
print(''.join(row))
'''
|
import wx
import re
from Properties import *
from DynamicDialog import *
class ChoiceWidget:
def __init__(self, property):
self.property = property
self.isChecked = False
def CreateWidget(self, parentFrame):
box = wx.BoxSizer(wx.HORIZONTAL)
checkId = wx.NewId()
control = wx.CheckBox(parentFrame, checkId, self.property.Name, style=wx.ALIGN_RIGHT)
self.isChecked = self.property.GetValue()
control.SetValue(self.isChecked)
box.Add(control, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
parentFrame.Bind(wx.EVT_CHECKBOX, self.BoxChecked, id=checkId)
self.control = control
control.Pipename = self.property.Name
return box
def BoxChecked(self, event):
self.isChecked = event.IsChecked()
self.SetFromWidget()
def SetFromWidget(self):
self.property.SetValue(self.isChecked)
class IntWidget:
def __init__(self, property):
self.property = property
self.numericDigit = re.compile(r"[+-.eE]|[0-9]")
self.numericString = re.compile(r"[+-]?\d+")
def CreateWidget(self, parentFrame):
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(parentFrame, -1, self.property.Name)
box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
control = wx.TextCtrl(parentFrame, -1, str(self.property.GetValue()))
box.Add(control, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
#parentFrame.Bind(wx.EVT_TEXT, self.TextEvent, control)
#control.Bind(wx.EVT_SET_FOCUS, self.TextEvent)
control.Bind(wx.EVT_KILL_FOCUS, self.ApplyEvent)
control.Bind(wx.EVT_CHAR, self.CharEvent)
self.control = control
#control.Bind(wx.EVT_WINDOW_DESTROY, self.TextEvent)
return box
def CharEvent(self, event):
event.Skip()
#if self.numericDigit.match(chr(event.GetKeyCode())):
# event.Skip()
def ApplyEvent(self, event):
control = event.GetEventObject()
self.SetFromWidget()
def SetFromWidget(self):
val = self.control.GetValue()
if self.numericString.match(val):
self.property.SetValue(int(val))
class FloatWidget:
def __init__(self, property):
self.property = property
self.numericDigit = re.compile(r"[+-.eE]|[0-9]")
self.numericString = re.compile(r"[+-]?(\d+[.]?\d*|.\d+)([eE][+-]\d+)?")
def CreateWidget(self, parentFrame):
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(parentFrame, -1, self.property.Name)
box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
control = wx.TextCtrl(parentFrame, -1, str(self.property.GetValue()))
box.Add(control, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
#parentFrame.Bind(wx.EVT_TEXT, self.TextEvent, control)
#control.Bind(wx.EVT_SET_FOCUS, self.TextEvent)
control.Bind(wx.EVT_KILL_FOCUS, self.ApplyEvent)
control.Bind(wx.EVT_CHAR, self.CharEvent)
self.control = control
#control.Bind(wx.EVT_WINDOW_DESTROY, self.TextEvent)
return box
def CharEvent(self, event):
event.Skip()
#if self.numericDigit.match(chr(event.GetKeyCode())):
# event.Skip()
def ApplyEvent(self, event):
control = event.GetEventObject()
self.SetFromWidget()
def SetFromWidget(self):
val = self.control.GetValue()
if self.numericString.match(val):
self.property.SetValue(float(val))
class ColorWidget:
def __init__(self, property):
self.property = property
self.color = None
def CreateWidget(self, parentFrame):
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(parentFrame, -1, self.property.Name)
box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
colorId = wx.NewId()
control = wx.Button(parentFrame, colorId, "set color")
box.Add(control, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
parentFrame.Bind(wx.EVT_BUTTON, self.OnButton, control, id=colorId)
self.control = control
self.parentFrame = parentFrame
return box
def OnButton(self, event):
dlg = wx.ColourDialog(self.parentFrame)
dlg.GetColourData().SetChooseFull(True)
c = self.property.GetValue()
charColor = map(lambda x: int(round(255*x)), c)
dlg.GetColourData().SetColour(charColor)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
self.color = data.GetColour().Get()
dlg.Destroy()
self.SetFromWidget();
def SetFromWidget(self):
if self.color:
c = self.color
self.property.SetValue((float(c[0])/255,float(c[1])/255, float(c[2])/255))
class PropertyWxMediator:
def __init__(self, properties):
self.Properties = properties
self.Widgets = []
self.PropertyToWidget = { FloatProperty : FloatWidget, IntProperty : IntWidget,
ColorProperty : ColorWidget, ChoiceProperty : ChoiceWidget
}
def WidgetForItem(self, parent, addItem):
property = self.Properties[addItem]
box = None
if self.PropertyToWidget.has_key(property.__class__):
widgetDecorator = self.PropertyToWidget[property.__class__](property)
box = widgetDecorator.CreateWidget(parent)
self.Widgets.append(widgetDecorator)
return box
def size(self):
return len(self.Properties)
def Apply(self, event):
for widget in self.Widgets:
widget.SetFromWidget()
# Testing below this line.
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = wx.Frame(None, -1, "Test Window", size=wx.Size(400,400))
frame.Show(True)
props = [ TestFloat(), TestColor(), TestChoice() ]
dialog = DynamicDialog(PropertyWxMediator(props), frame, -1, "test dialog")
dialog.Show()
return True
if __name__ == "__main__":
app = App(0)
app.MainLoop() |
import urllib.parse
import urllib.request
import sys
import socket
import re
def Main():
host = "67.222.1.194"
port = 80
mysocket = socket.socket()
mysocket.connect((host, port))
message = input("->")
while message != 'q':
mysocket.send(message.encode())
data = mysocket.recv(2000).decode()
print('This is your balance: ' + data)
message = input("->")
mysocket.close()
if __name__ =='__main__':
Main()
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import os
from .. import Backend
if os.environ.get('SYM_STRICT_TESTING', '0')[:1].lower() in ('1', 't'):
AVAILABLE_BACKENDS = list(Backend.backends)
else:
AVAILABLE_BACKENDS = []
for k in Backend.backends:
try:
__import__(k)
except ImportError:
pass
else:
AVAILABLE_BACKENDS.append(k)
|
"""WizardKit: repairs module init"""
# vim: sts=2 sw=2 ts=2
import platform
if platform.system() == 'Windows':
from . import win
|
import torch
import argparse
def parameter_setup():
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument("-p",
"--pretrain",
help="Turn on training model from scratch",
action="store_true",
default=False)
parser.add_argument("-s",
"--save",
help="Turn on saving the generator and discriminator",
action="store_true",
default=False)
parser.add_argument(
"--pre_G",
help="setting location for pre-trained Generator",
#default="./netG_10_epoch_state_dict")
default='./celeba_pretrained_generator')
parser.add_argument(
"--pre_D",
help="setting location for pre-trained Discriminator",
#default="./netD_10_epoch_state_dict")
default='./celeba_pretrained_discriminator')
parser.add_argument(
"--data_root",
help="setting location for training data")
#default="./data/AF_Mini")
parser.add_argument("--batch_size",
type=int,
help="setting batch_size",
default=4)
parser.add_argument("--num_shots",
type=int,
help="number of fine-tuning examples",
default=-1)
parser.add_argument(
"--img_freq",
type=int,
help="setting frequency (every n iteration) of saving images",
default=50)
parser.add_argument(
"--score_freq",
type=int,
help="setting frequency (every n iteration) of generating scores (default -1 = last iteration only)",
default=-1)
parser.add_argument("--image_size",
type=int,
help="setting image_size",
default=64)
parser.add_argument("--workers",
type=int,
help="setting workers for data load",
default=2)
parser.add_argument("--num_epochs",
type=int,
help="setting number of epochs",
default=100)
parser.add_argument(
"--D_lr",
type=float,
help="Setting learning rate for discriminator",
#default=0.0002)
default=0.0006)
parser.add_argument("--D_update_rate",
type=int,
help="setting the discriminator update rate",
default=1)
parser.add_argument(
"--D_beta1",
type=float,
help="Setting learning rate for discriminator Adam optimizer beta 1",
default=0.5)
parser.add_argument(
"--D_beta2",
type=float,
help="Setting learning rate for discriminator Adam optimizer beta 2",
default=0.999)
parser.add_argument("--G_lr",
type=float,
help="Setting learning rate for generator",
default=0.0002)
parser.add_argument(
"--G_beta1",
type=float,
help="Setting learning rate for generator Adam optimizer beta 1",
default=0.5)
parser.add_argument(
"--G_beta2",
type=float,
help="Setting learning rate for generator Adam optimizer beta 2",
default=0.999)
parser.add_argument("--ngpu",
type=int,
help="Number of GPU available",
default=torch.cuda.device_count())
# EWC Parameters
parser.add_argument(
"--ewc_data_root",
help="setting location for pre-trained data root",
#default="./data/AF_Mini")
default="./data/CelebA/")
parser.add_argument("--G_ewc_lambda",
type=float,
help="Setting ewc penalty lambda coefficient ",
default=600)
parser.add_argument("--D_ewc_lambda",
type=float,
help="Setting ewc penalty lambda coefficient ",
default=0)
#GAN Hack parameters
parser.add_argument(
"--instance_noise_sigma",
type=float,
help="Setting instant noise std dev inital value (annealed to 0)",
default=0)
parser.add_argument(
"--label_smoothing_p",
type=float,
help="Setting one sided label smoothing probability of wrong label",
default=0)
args = parser.parse_args()
train_dict = dict()
for ele in args._get_kwargs():
train_dict[ele[0]] = ele[1]
train_dict["device"] = torch.device("cuda:0" if (
torch.cuda.is_available() and train_dict['ngpu'] > 0) else "cpu")
# training setup for EWC
ewc_dict = {
"G_ewc_lambda": train_dict['G_ewc_lambda'],
"D_ewc_lambda": train_dict['D_ewc_lambda'],
"ewc_data_root": train_dict['ewc_data_root']
}
return train_dict, ewc_dict
|
from pathlib import Path
import pytest
from hyperstyle.src.python.review.application_config import LanguageVersion
from analysis.src.python.evaluation.common.pandas_util import (
equal_df, filter_df_by_language, get_solutions_df_by_file_path,
)
from analysis.test.python.evaluation import PANDAS_UTIL_DIR_PATH
from analysis.src.python.evaluation.common.file_util import get_name_from_path
from analysis.src.python.evaluation.common.args_util import get_in_and_out_list
RESOURCES_PATH = PANDAS_UTIL_DIR_PATH / 'filter_by_language'
IN_FILE_TO_LANGUAGES = {
'in_1.csv': set(LanguageVersion),
'in_2.csv': set(),
'in_3.csv': [LanguageVersion.PYTHON_3],
'in_4.csv': [LanguageVersion.PYTHON_3, LanguageVersion.PYTHON_3],
'in_5.csv': [LanguageVersion.PYTHON_3, LanguageVersion.JAVA_11],
}
IN_AND_OUT_FILES = get_in_and_out_list(RESOURCES_PATH)
@pytest.mark.parametrize(('in_file', 'out_file'), IN_AND_OUT_FILES)
def test(in_file: Path, out_file: Path):
in_df = get_solutions_df_by_file_path(in_file)
out_df = get_solutions_df_by_file_path(out_file)
filtered_df = filter_df_by_language(in_df, IN_FILE_TO_LANGUAGES[get_name_from_path(str(in_file))])
assert equal_df(out_df, filtered_df)
|
import torch
import os
import glob
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
from torchvision import transforms
from monoscene.data.utils.helpers import (
vox2pix,
compute_local_frustums,
compute_CP_mega_matrix,
)
class KittiDataset(Dataset):
def __init__(
self,
split,
root,
preprocess_root,
project_scale=2,
frustum_size=4,
color_jitter=None,
fliplr=0.0,
):
super().__init__()
self.root = root
self.label_root = os.path.join(preprocess_root, "labels")
self.n_classes = 20
splits = {
"train": ["00", "01", "02", "03", "04", "05", "06", "07", "09", "10"],
"val": ["08"],
"test": ["11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21"],
}
self.split = split
self.sequences = splits[split]
self.frustum_size = frustum_size
self.project_scale = project_scale
self.output_scale = int(self.project_scale / 2)
self.scene_size = (51.2, 51.2, 6.4)
self.vox_origin = np.array([0, -25.6, -2])
self.fliplr = fliplr
self.voxel_size = 0.2 # 0.2m
self.img_W = 1220
self.img_H = 370
self.color_jitter = (
transforms.ColorJitter(*color_jitter) if color_jitter else None
)
self.scans = []
for sequence in self.sequences:
calib = self.read_calib(
os.path.join(self.root, "dataset", "sequences", sequence, "calib.txt")
)
P = calib["P2"]
T_velo_2_cam = calib["Tr"]
proj_matrix = P @ T_velo_2_cam
glob_path = os.path.join(
self.root, "dataset", "sequences", sequence, "voxels", "*.bin"
)
for voxel_path in glob.glob(glob_path):
self.scans.append(
{
"sequence": sequence,
"P": P,
"T_velo_2_cam": T_velo_2_cam,
"proj_matrix": proj_matrix,
"voxel_path": voxel_path,
}
)
self.normalize_rgb = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
def __getitem__(self, index):
scan = self.scans[index]
voxel_path = scan["voxel_path"]
sequence = scan["sequence"]
P = scan["P"]
T_velo_2_cam = scan["T_velo_2_cam"]
proj_matrix = scan["proj_matrix"]
filename = os.path.basename(voxel_path)
frame_id = os.path.splitext(filename)[0]
rgb_path = os.path.join(
self.root, "dataset", "sequences", sequence, "image_2", frame_id + ".png"
)
data = {
"frame_id": frame_id,
"sequence": sequence,
"P": P,
"T_velo_2_cam": T_velo_2_cam,
"proj_matrix": proj_matrix,
}
scale_3ds = [self.output_scale, self.project_scale]
data["scale_3ds"] = scale_3ds
cam_k = P[0:3, 0:3]
data["cam_k"] = cam_k
for scale_3d in scale_3ds:
# compute the 3D-2D mapping
projected_pix, fov_mask, pix_z = vox2pix(
T_velo_2_cam,
cam_k,
self.vox_origin,
self.voxel_size * scale_3d,
self.img_W,
self.img_H,
self.scene_size,
)
data["projected_pix_{}".format(scale_3d)] = projected_pix
data["pix_z_{}".format(scale_3d)] = pix_z
data["fov_mask_{}".format(scale_3d)] = fov_mask
target_1_path = os.path.join(self.label_root, sequence, frame_id + "_1_1.npy")
target = np.load(target_1_path)
data["target"] = target
target_8_path = os.path.join(self.label_root, sequence, frame_id + "_1_8.npy")
target_1_8 = np.load(target_8_path)
CP_mega_matrix = compute_CP_mega_matrix(target_1_8)
data["CP_mega_matrix"] = CP_mega_matrix
# Compute the masks, each indicate the voxels of a local frustum
if self.split != "test":
projected_pix_output = data["projected_pix_{}".format(self.output_scale)]
pix_z_output = data[
"pix_z_{}".format(self.output_scale)
]
frustums_masks, frustums_class_dists = compute_local_frustums(
projected_pix_output,
pix_z_output,
target,
self.img_W,
self.img_H,
dataset="kitti",
n_classes=20,
size=self.frustum_size,
)
else:
frustums_masks = None
frustums_class_dists = None
data["frustums_masks"] = frustums_masks
data["frustums_class_dists"] = frustums_class_dists
img = Image.open(rgb_path).convert("RGB")
# Image augmentation
if self.color_jitter is not None:
img = self.color_jitter(img)
# PIL to numpy
img = np.array(img, dtype=np.float32, copy=False) / 255.0
img = img[:370, :1220, :] # crop image
# Fliplr the image
if np.random.rand() < self.fliplr:
img = np.ascontiguousarray(np.fliplr(img))
for scale in scale_3ds:
key = "projected_pix_" + str(scale)
data[key][:, 0] = img.shape[1] - 1 - data[key][:, 0]
data["img"] = self.normalize_rgb(img)
return data
def __len__(self):
return len(self.scans)
@staticmethod
def read_calib(calib_path):
"""
Modify from https://github.com/utiasSTARS/pykitti/blob/d3e1bb81676e831886726cc5ed79ce1f049aef2c/pykitti/utils.py#L68
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, "r") as f:
for line in f.readlines():
if line == "\n":
break
key, value = line.split(":", 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
# 3x4 projection matrix for left camera
calib_out["P2"] = calib_all["P2"].reshape(3, 4)
calib_out["Tr"] = np.identity(4) # 4x4 matrix
calib_out["Tr"][:3, :4] = calib_all["Tr"].reshape(3, 4)
return calib_out
|
#
# PySNMP MIB module HP-PROCURVE-420-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-PROCURVE-420-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:36:10 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
mgmt, Counter64, Unsigned32, Counter32, ModuleIdentity, MibIdentifier, Bits, Gauge32, IpAddress, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, ObjectIdentity, enterprises, TimeTicks, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "mgmt", "Counter64", "Unsigned32", "Counter32", "ModuleIdentity", "MibIdentifier", "Bits", "Gauge32", "IpAddress", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "ObjectIdentity", "enterprises", "TimeTicks", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class PhysAddress(OctetString):
pass
class Guage32(Counter32):
pass
class MacAddress(OctetString):
pass
class DisplayString(OctetString):
pass
class TruthValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(2, 1))
namedValues = NamedValues(("false", 2), ("true", 1))
hP = MibIdentifier((1, 3, 6, 1, 4, 1, 11))
wireless = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2))
enterprise = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3))
accessPoint = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7))
proCurve = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11))
hPProCuve420 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37))
enterpriseApSys = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 1))
enterpriseApLineMgnt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2))
enterpriseApPortMgnt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3))
enterpriseApFileTransferMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4))
enterpriseApResetMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 5))
enterpriseApIpMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 6))
enterpriseAPdot11 = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7))
swHardwareVer = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swHardwareVer.setStatus('mandatory')
if mibBuilder.loadTexts: swHardwareVer.setDescription('Hardware version of the main board.')
swBootRomVer = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swBootRomVer.setStatus('mandatory')
if mibBuilder.loadTexts: swBootRomVer.setDescription('Boot ROM code version of the main board.')
swOpCodeVer = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swOpCodeVer.setStatus('mandatory')
if mibBuilder.loadTexts: swOpCodeVer.setDescription('Operation code version of the main board.')
swCountryCode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swCountryCode.setStatus('mandatory')
if mibBuilder.loadTexts: swCountryCode.setDescription('Country code of the AP. AL-ALBANIA, DZ-ALGERIA, AR-ARGENTINA, AM-ARMENIA, AU-AUSTRALIA, AT-AUSTRIA, AZ-AZERBAIJAN, BH-BAHRAIN, BY-BELARUS, BE-BELGIUM, BZ-BELIZE, BO-BOLVIA, BR-BRAZIL, BN-BRUNEI_DARUSSALAM, BG-BULGARIA, CA-CANADA, CL-CHILE, CN-CHINA, CO-COLOMBIA, CR-COSTA_RICA, HR-CROATIA, CY-CYPRUS, CZ-CZECH_REPUBLIC, DK-DENMARK, DO-DOMINICAN_REPUBLIC, EC-ECUADOR, EG-EGYPT, EE-ESTONIA, FI-FINLAND, FR-FRANCE, GE-GEORGIA, DE-GERMANY, GR-GREECE, GT-GUATEMALA, HK-HONG_KONG, HU-HUNGARY, IS-ICELAND, IN-INDIA, ID-INDONESIA, IR-IRAN, IE-IRELAND, IL-ISRAEL, IT-ITALY, JP-JAPAN, JO-JORDAN, KZ-KAZAKHSTAN, KP-NORTH_KOREA, KR-KOREA_REPUBLIC, KW-KUWAIT, LV-LATVIA, LB-LEBANON, LI-LIECHTENSTEIN, LT-LITHUANIA, LU-LUXEMBOURG, MO-MACAU, MK-MACEDONIA, MY-MALAYSIA, MX-MEXICO, MC-MONACO, MA-MOROCCO, NA-NORTH_AMERICA, NL-NETHERLANDS, NZ-NEW_ZEALAND, NO-NORWAY, OM-OMAN, PK-PAKISTAN, PA-PANAMA, PE-PERU, PH-PHILIPPINES, PL-POLAND, PT-PORTUGAL, PR-PUERTO_RICO, QA-QATAR, RO-ROMANIA, RU-RUSSIA, SA-SAUDI_ARABIA, SG-SINGAPORE, SK-SLOVAK_REPUBLIC, SI-SLOVENIA, ZA-SOUTH_AFRICA, ES-SPAIN, SE-SWEDEN, CH-SWITZERLAND, SY-SYRIA, TW-TAIWAN, TH-THAILAND, TR-TURKEY, UA-UKRAINE, AE-UNITED_ARAB_EMIRATES, GB-UNITED_KINGDOM, US-UNITED_STATES, UY-URUGUAY, VE-VENEZUELA, VN-VIETNAM')
lineTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1), )
if mibBuilder.loadTexts: lineTable.setStatus('mandatory')
if mibBuilder.loadTexts: lineTable.setDescription('Table of descriptive and status information about configuration of each RS-232 line in this system')
lineEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "lineIndex"))
if mibBuilder.loadTexts: lineEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lineEntry.setDescription('An entry in the table, containing information about configuration in one RS232 line of the Access Point.')
lineIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: lineIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lineIndex.setDescription('This is defined as RS-232 index.')
lineDataBits = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lineDataBits.setStatus('mandatory')
if mibBuilder.loadTexts: lineDataBits.setDescription('This is defined as number of data bits for the RS232 interface.')
lineParity = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2))).clone(namedValues=NamedValues(("none", 99), ("odd", 1), ("even", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lineParity.setStatus('mandatory')
if mibBuilder.loadTexts: lineParity.setDescription('This is defined as parity of the RS232 interface.')
lineSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lineSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lineSpeed.setDescription('This is defined as the speed of the RS-232 interface.')
lineStopBits = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lineStopBits.setStatus('mandatory')
if mibBuilder.loadTexts: lineStopBits.setDescription('This is defined as the number of stop bits for the RS-232 interface.')
portTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1), )
if mibBuilder.loadTexts: portTable.setStatus('mandatory')
if mibBuilder.loadTexts: portTable.setDescription("Table of descriptive and status information about configuration of each switch port (including expansion slot) in this system. This table also contains information about each trunk (similar to Cisco's EtherChannel).")
portEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "portIndex"))
if mibBuilder.loadTexts: portEntry.setStatus('mandatory')
if mibBuilder.loadTexts: portEntry.setDescription('An entry in the table, containing information about configuration in one switch port of the switch.')
portIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: portIndex.setStatus('mandatory')
if mibBuilder.loadTexts: portIndex.setDescription('This is defined as ifIndex in the IF-MIB.')
portName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portName.setStatus('mandatory')
if mibBuilder.loadTexts: portName.setDescription('Indicates the port name. This is same as ifAlias in the IF-MIB (RFC2863 or later).')
portType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("other", 1), ("hundredBaseTX", 2), ("hundredBaseFX", 3), ("thousandBaseSX", 4), ("thousandBaseLX", 5), ("thousandBaseT", 6), ("thousandBaseGBIC", 7), ("thousandBaseMiniGBIC", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portType.setStatus('mandatory')
if mibBuilder.loadTexts: portType.setDescription('Indicates the port type.')
portSpeedDpxCfg = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("auto", 1), ("halfDuplex10", 2), ("fullDuplex10", 3), ("halfDuplex100", 4), ("fullDuplex100", 5), ("halfDuplex1000", 6), ("fullDuplex1000", 7))).clone('halfDuplex10')).setMaxAccess("readonly")
if mibBuilder.loadTexts: portSpeedDpxCfg.setStatus('mandatory')
if mibBuilder.loadTexts: portSpeedDpxCfg.setDescription('Set the port speed and duplex mode as follows: halfDuplex10(2) - 10Mbps and half duplex mode fullDuplex10(3) - 10Mbps and full duplex mode halfDuplex100(4) - 100Mbps and half duplex mode fullDuplex100(5) - 100Mbps and full duplex mode halfDuplex1000(6) - 1000Mbps and half duplex mode fullDuplex1000(7) - 1000Mbps and full duplex mode hundredBaseTX port can be set as halfDuplex10(2) fullDuplex10(3) halfDuplex100(4) fullDuplex100(5) hundredBaseFX port can be set as halfDuplex100(4) fullDuplex100(5) thousandBaseSX port can be set as halfDuplex1000(6) fullDuplex1000(7) The actual operating speed and duplex of the port is given by portSpeedDpxStatus.')
portFlowCtrlCfg = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("backPressure", 3), ("dot3xFlowControl", 4))).clone('enabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFlowCtrlCfg.setStatus('mandatory')
if mibBuilder.loadTexts: portFlowCtrlCfg.setDescription('(1) Flow control mechanism is enabled. If the port type is hundredBaseTX or thousandBaseSX: When the port is operating in halfDuplex mode, the port uses backPressure flow control mechanism. When the port is operating in fullDuplex mode, the port uses IEEE 802.3x flow control mechanism. If the port type is hundredBaseFX: When the port is operating in halfDuplex mode, the port uses backPressure flow control mechanism. When the port is operating in fullDuplex mode, Flow control mechanism will not function. (2) Flow control mechanism is disabled. (3) Flow control mechanism is backPressure. when the port is in fullDuplex mode.This flow control mechanism will not function. (4) Flow control mechanism is IEEE 802.3x flow control. when the port is in halfDuplex mode.This flow control mechanism will not function. hundredBaseTX and thousandBaseSX port can be set as: enabled(1), disabled(2), backPressure(3), dot3xFlowControl(4). hundredBaseFX port can be set as: enabled(1), disabled(2), backPressure(3). The actual flow control mechanism is used given by portFlowCtrlStatus.')
portCapabilities = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("portCap10half", 99), ("portCap10full", 1), ("portCap100half", 2), ("portCap100full", 3), ("portCap1000half", 4), ("portCap1000full", 5), ("reserved6", 6), ("reserved7", 7), ("reserved8", 8), ("reserved9", 9), ("reserved10", 10), ("reserved11", 11), ("reserved12", 12), ("reserved13", 13), ("portCapSym", 14), ("portCapFlowCtrl", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portCapabilities.setStatus('mandatory')
if mibBuilder.loadTexts: portCapabilities.setDescription('Port capabilities.')
portAutonegotiation = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portAutonegotiation.setStatus('mandatory')
if mibBuilder.loadTexts: portAutonegotiation.setDescription('Whether autonegotiation is enabled.')
portSpeedDpxStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("error", 1), ("halfDuplex10", 2), ("fullDuplex10", 3), ("halfDuplex100", 4), ("fullDuplex100", 5), ("halfDuplex1000", 6), ("fullDuplex1000", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portSpeedDpxStatus.setStatus('mandatory')
if mibBuilder.loadTexts: portSpeedDpxStatus.setDescription('The operating speed and duplex mode of the switched port. If this index is a trunk, the speed is the speed of its individual members. If this index is a trunk and the result is inconsistent among its member ports, this value is error(1).')
portFlowCtrlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 3, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("error", 1), ("backPressure", 2), ("dot3xFlowControl", 3), ("none", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFlowCtrlStatus.setStatus('mandatory')
if mibBuilder.loadTexts: portFlowCtrlStatus.setDescription('(2) BackPressure flow control machanism is used. (3) IEEE 802.3 flow control machanism is used. (4) Flow control mechanism is disabled. If this index is a trunk and the result is inconsistent among its member ports, this value is error(1).')
transferStart = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("go", 1), ("nogo", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: transferStart.setStatus('mandatory')
if mibBuilder.loadTexts: transferStart.setDescription('Set to go(1) to start a transfer.')
transferType = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ftp", 1), ("tftp", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: transferType.setStatus('mandatory')
if mibBuilder.loadTexts: transferType.setDescription('Type of file to transfer.')
fileType = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("opcode", 1), ("config", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: fileType.setStatus('mandatory')
if mibBuilder.loadTexts: fileType.setDescription('Type of file to transfer.')
srcFile = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: srcFile.setStatus('mandatory')
if mibBuilder.loadTexts: srcFile.setDescription('The source file name for TFTP transfer when a transfer is next requested via this MIB. This value is set to the zero length string when no file name has been specified.')
destFile = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: destFile.setStatus('mandatory')
if mibBuilder.loadTexts: destFile.setDescription('The destination file name for TFTP transfer when a transfer is next requested via this MIB. This value is set to the zero length string when no file name has been specified.')
fileServer = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: fileServer.setStatus('mandatory')
if mibBuilder.loadTexts: fileServer.setDescription("The IP address of the TFTP server for transfer when a download is next requested via this MIB. This value is set to `0.0.0.0' when no IP address has been specified.")
userName = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: userName.setStatus('mandatory')
if mibBuilder.loadTexts: userName.setDescription('The username specified for an FTP Transfer.')
password = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 4, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: password.setStatus('mandatory')
if mibBuilder.loadTexts: password.setDescription('The password specified for an FTP Transfer.')
restartOpCodeFile = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 5, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: restartOpCodeFile.setStatus('mandatory')
if mibBuilder.loadTexts: restartOpCodeFile.setDescription('Name of op-code file for start-up.')
restartControl = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("running", 1), ("warmBoot", 2), ("coldBoot", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: restartControl.setStatus('mandatory')
if mibBuilder.loadTexts: restartControl.setDescription('Setting this object to warmBoot(2) causes the device to restart the application software with current configuration parameters saved in non-volatile memory. Setting this object to coldBoot(3) causes the device to reinitialize configuration parameters in non-volatile memory to default values and restart the application software. When the device is running normally, this variable has a value of running(1).')
netConfigIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 6, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netConfigIPAddress.setStatus('mandatory')
if mibBuilder.loadTexts: netConfigIPAddress.setDescription('The IP address of this Net interface. The default value for this object is 0.0.0.0. If either the netConfigIPAddress or netConfigSubnetMask are 0.0.0.0, then when the device boots, it may use DHCP to try to figure out what these values should be. If DHCP fails, before the device can talk on the network, this value must be configured (e.g., through a terminal attached to the device).')
netConfigSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 6, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netConfigSubnetMask.setStatus('mandatory')
if mibBuilder.loadTexts: netConfigSubnetMask.setDescription('The subnet mask of this Net interface. The default value for this object is 0.0.0.0. If either the netConfigIPAddress or netConfigSubnetMask are 0.0.0.0, then when the device boots, it may use DHCP to try to figure out what these values should be. If DHCP fails, before the device can talk on the network, this value must be configured (e.g., through a terminal attached to the device).')
netDefaultGateway = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 6, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netDefaultGateway.setStatus('mandatory')
if mibBuilder.loadTexts: netDefaultGateway.setDescription('The IP Address of the default gateway. If this value is undefined or unknown, it shall have the value 0.0.0.0.')
ipHttpState = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 6, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipHttpState.setStatus('mandatory')
if mibBuilder.loadTexts: ipHttpState.setDescription('Whether HTTP is enabled.')
ipHttpPort = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 6, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipHttpPort.setStatus('mandatory')
if mibBuilder.loadTexts: ipHttpPort.setDescription('The port number for HTTP.')
hpdot11StationConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1), )
if mibBuilder.loadTexts: hpdot11StationConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11StationConfigTable.setDescription('Table of descriptive and status information about configuration of each radio of the AP.')
hpdot11StationConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "hpdot11portIndex"))
if mibBuilder.loadTexts: hpdot11StationConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11StationConfigEntry.setDescription('An entry in the table, containing information about configuration in one radio of the AP.')
hpdot11portIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: hpdot11portIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11portIndex.setDescription('Radio index of the AP.')
hpdot11DesiredSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11DesiredSSID.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11DesiredSSID.setDescription('This attribute reflects the Service Set ID used in the DesiredSSID parameter of the most recent MLME_Scan.request. This value may be modified by an external management entity and used by the local SME to make decisions about the Scanning process.')
hpdot11BeaconPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11BeaconPeriod.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11BeaconPeriod.setDescription('This attribute shall specify the number of ms that a station shall use for scheduling Beacon transmissions. This value is transmitted in Beacon and Probe Response frames.')
hpdot11DTIMPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11DTIMPeriod.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11DTIMPeriod.setDescription('This attribute shall specify the number of beacon intervals that shall elapse between transmission of Beacons frames containing a TIM element whose DTIM Count field is 0. This value is transmitted in the DTIM Period field of Beacon frames.')
hpdot11OperationalRateSet = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 108))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11OperationalRateSet.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11OperationalRateSet.setDescription('This attribute shall specify the set of data rates at which the station may transmit data. Each octet contains a value representing a rate. Each rate shall be within the range from 2 to 127, corresponding to data rates in increments of 500 kb/s from 1 Mbit/s to 63.5 Mbit/s, and shall be supported (as indicated in the supported rates table) for receiving data. This value is reported in transmitted Beacon, Probe Request, Probe Response, Association Request, Association Response, Reassociation Request, and Reassociation Response frames, and is used to determine whether a BSS with which the station desires to synchronize is suitable. It is also used when starting a BSS, as specified in 10.3.')
hpdot11AuthenticationAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("openSystem", 1), ("sharedKey", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11AuthenticationAlgorithm.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationAlgorithm.setDescription('This attribute shall be a set of all the authentication algorithms supported by the STAs. The following are the default values and the associated algorithm. Value = 1: Open System Value = 2: Shared Key')
hpdot11PrivacyTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 2), )
if mibBuilder.loadTexts: hpdot11PrivacyTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PrivacyTable.setDescription('Table of descriptive and status information about configuration of each radio of the AP.')
hpdot11PrivacyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 2, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "hpdot11PrivacyportIndex"))
if mibBuilder.loadTexts: hpdot11PrivacyEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PrivacyEntry.setDescription('An entry in the table, containing information about configuration in one radio of the AP.')
hpdot11PrivacyportIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: hpdot11PrivacyportIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PrivacyportIndex.setDescription('Radio index of the AP.')
hpdot11PrivacyInvoked = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11PrivacyInvoked.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PrivacyInvoked.setDescription('When this attribute is true, it shall indicate that the IEEE 802.11 WEP mechanism is used for transmitting frames of type Data. The default value of this attribute shall be false.')
hpdot11WEPDefaultKeyID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11WEPDefaultKeyID.setReference('ISO/IEC 8802-11:1999, 8.3.2')
if mibBuilder.loadTexts: hpdot11WEPDefaultKeyID.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPDefaultKeyID.setDescription('This attribute shall indicate the use of the first, second, or third element of the WEPDefaultKeys array when set to values of zero, one, or two(the fourth are reserved for dynamic key). The default value of this attribute shall be 0.')
hpdot11WEPKeyMappingLength = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11WEPKeyMappingLength.setReference('ISO/IEC 8802-11:1999, 8.3.2')
if mibBuilder.loadTexts: hpdot11WEPKeyMappingLength.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPKeyMappingLength.setDescription('The maximum number of tuples that dot11WEPKeyMappings can hold.')
hpdot11mac = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 3))
hpdot11OperationTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 3, 1), )
if mibBuilder.loadTexts: hpdot11OperationTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11OperationTable.setDescription('Table of descriptive and status information about configuration of each radio of the AP.')
hpdot11OperationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 3, 1, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "hpdot11OperationIndex"))
if mibBuilder.loadTexts: hpdot11OperationEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11OperationEntry.setDescription('An entry in the table, containing information about configuration in one radio of the AP.')
hpdot11OperationIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 3, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: hpdot11OperationIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11OperationIndex.setDescription('Radio index of the AP.')
hpdot11RTSThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2347))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11RTSThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11RTSThreshold.setDescription('This attribute shall indicate the number of octets in an MPDU, below which an RTS/CTS handshake shall not be performed. An RTS/CTS handshake shall be performed at the beginning of any frame exchange sequence where the MPDU is of type Data or Management, the MPDU has an individual address in the Address1 field, and the length of the MPDU is greater than this threshold. (For additional details, refer to Table 21 in 9.7.) Setting this attribute to be larger than the maximum MSDU size shall have the effect of turning off the RTS/CTS handshake for frames of Data or Management type transmitted by this STA. Setting this attribute to zero shall have the effect of turning on the RTS/CTS handshake for all frames of Data or Management type transmitted by this STA. The default value of this attribute shall be 2347.')
hpdot11FragmentationThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(256, 2346))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11FragmentationThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11FragmentationThreshold.setDescription('This attribute shall specify the mandatory maximum size, in octets, of the MPDU that may be delivered to the PHY. An MSDU shall be broken into fragments if its size exceeds the value of this attribute after adding MAC headers and trailers. An MSDU or MMPDU shall be fragmented when the resulting frame has an individual address in the Address1 field, and the length of the frame is larger than this threshold. The default value for this attribute shall be the lesser of 2346 or the aMPDUMaxLength of the attached PHY and shall never exceed the lesser of 2346 or the aMPDUMaxLength of the attached PHY. The value of this attribute shall never be less than 256.')
hpdot11phy = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4))
hpdot11PhyOperationTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4, 1), )
if mibBuilder.loadTexts: hpdot11PhyOperationTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PhyOperationTable.setDescription('Table of descriptive and status information about configuration of each radio of the AP.')
hpdot11PhyOperationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4, 1, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "hpdot11Index"))
if mibBuilder.loadTexts: hpdot11PhyOperationEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PhyOperationEntry.setDescription('An entry in the table, containing information about configuration in one radio of the AP.')
hpdot11Index = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: hpdot11Index.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11Index.setDescription('Radio index of the AP.')
hpdot11CurrentChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11CurrentChannel.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11CurrentChannel.setDescription('The current operating frequency channel of the network')
hpdot11TurboModeEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(99, 1, 2))).clone(namedValues=NamedValues(("none", 99), ("on", 1), ("off", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpdot11TurboModeEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11TurboModeEnabled.setDescription('This attribute, when true, shall indicate that the propietory turbo mode option is enabled. The default value of this attribute shall be false.')
hpdot11PreambleLength = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("short", 1), ("long", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11PreambleLength.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11PreambleLength.setDescription('This attribute determines whether or not a short or a long preamble is used to delineate 802.11 frames.')
hpdot11AuthenticationEntry = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 5))
hpdot118021xSupport = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 5, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot118021xSupport.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot118021xSupport.setDescription('This attribute, when true(1), indicates that the Enterprise Access Point supports the 802.1x authentication algorithm.')
hpdot118021xRequired = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 5, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot118021xRequired.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot118021xRequired.setDescription('This attribute, when true(1), indicates that the Enterprise Access Point requires successful 802.1x authentication for any clients accessing the network.')
hpdot11AuthenticationServerTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6), )
if mibBuilder.loadTexts: hpdot11AuthenticationServerTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationServerTable.setDescription('Table of descriptive and status information about configuration of each authentication server.')
hpdot11AuthenticationServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "hpdot11serverIndex"))
if mibBuilder.loadTexts: hpdot11AuthenticationServerEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationServerEntry.setDescription('An entry in the table, containing information about configuration in one radio of the AP.')
hpdot11serverIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1, 1), Integer32())
if mibBuilder.loadTexts: hpdot11serverIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11serverIndex.setDescription('Radio index of the AP.')
hpdot11AuthenticationServer = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11AuthenticationServer.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationServer.setDescription('This values indicates the IP address of the authentication server.')
hpdot11AuthenticationPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11AuthenticationPort.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationPort.setDescription('This values indicates the UDP Port used by the primary authentication server.')
hpdot11AuthenticationKey = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11AuthenticationKey.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationKey.setDescription('This values indicates the shared key used by the authentication server.')
hpdot11AuthenticationRetransmit = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11AuthenticationRetransmit.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationRetransmit.setDescription('This values indicates the retransmit timer length used by the authentication server.')
hpdot11AuthenticationTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 6, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11AuthenticationTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11AuthenticationTimeout.setDescription('This values indicates the Timeout value(sec) used by the authentication server.')
hpdot11FilterTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 7), )
if mibBuilder.loadTexts: hpdot11FilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11FilterTable.setDescription('Table of status information about each configured MAC Address Filtering Entry.')
hpdot11FilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 7, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "hpdot11FilterIndex"))
if mibBuilder.loadTexts: hpdot11FilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11FilterEntry.setDescription('An entry in the table, containing information about configuration in one radio of the AP.')
hpdot11FilterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 7, 1, 1), Integer32())
if mibBuilder.loadTexts: hpdot11FilterIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11FilterIndex.setDescription('Filter index.')
hpdot11FilterAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 7, 1, 2), PhysAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11FilterAddress.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11FilterAddress.setDescription('This values indicates the MAC address of the filter entry.')
hpdot11FilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(30, 31))).clone(namedValues=NamedValues(("allowed", 30), ("denied", 31)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11FilterStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11FilterStatus.setDescription('This values indicates the Status of the filter entry. Ifallowed, the client is allowed access to the network. If disallowed, the no frames will be forwarded to the network from the client.')
hpdot11smt = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8))
hpdot11WEPDefaultKeys11g = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8, 1))
hpdot11WEPDefaultKeys11gTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8, 1, 1), )
if mibBuilder.loadTexts: hpdot11WEPDefaultKeys11gTable.setReference('IEEE Std 802.11-1997, 8.3.2')
if mibBuilder.loadTexts: hpdot11WEPDefaultKeys11gTable.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPDefaultKeys11gTable.setDescription('Conceptual table for WEP default keys. This table shall contain the four WEP default secret key values corresponding to the four possible KeyID values. The WEP default secret keys are logically WRITE-ONLY. Attempts to read the entries in this table shall return unsuccessful status and values of null or zero. The default value of each WEP default key shall be null.')
hpdot11WEPDefaultKeys11gEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8, 1, 1, 1), ).setIndexNames((0, "HP-PROCURVE-420-PRIVATE-MIB", "dot11WEPDefaultKey11gLength"))
if mibBuilder.loadTexts: hpdot11WEPDefaultKeys11gEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPDefaultKeys11gEntry.setDescription('An Entry (conceptual row) in the WEP Default Keys Table. ifIndex - Each 802.11 interface is represented by an ifEntry. Interface tables in this MIB module are indexed by ifIndex.')
hpdot11WEPDefaultKey11gLength = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(64, 128, 152))).clone(namedValues=NamedValues(("sixtyFour", 64), ("oneHundredTwentyEight", 128), ("oneHundredFiftyTwo", 152))).clone('oneHundredTwentyEight')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11WEPDefaultKey11gLength.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPDefaultKey11gLength.setDescription('A 40(64)-bits [5 octets WEP], 104(128)-bits [13 octets] or 128(152)-bits [16 octets]')
hpdot11WEPDefaultKey11gIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4)))
if mibBuilder.loadTexts: hpdot11WEPDefaultKey11gIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPDefaultKey11gIndex.setDescription('The auxiliary variable used to identify instances of the columnar objects in the WEP Default Keys Table. The value of this variable is equal to the WEPDefaultKeyID + 1')
hpdot11WEPDefaultKey11gValue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 3, 7, 11, 37, 7, 8, 1, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpdot11WEPDefaultKey11gValue.setStatus('mandatory')
if mibBuilder.loadTexts: hpdot11WEPDefaultKey11gValue.setDescription('A 40(64)-bits [5 octets WEP] or 104(128)-bits [13 octets] default secret key value.')
mibBuilder.exportSymbols("HP-PROCURVE-420-PRIVATE-MIB", hpdot11Index=hpdot11Index, ipHttpState=ipHttpState, transferStart=transferStart, portCapabilities=portCapabilities, password=password, hpdot11WEPKeyMappingLength=hpdot11WEPKeyMappingLength, lineDataBits=lineDataBits, hpdot11WEPDefaultKeys11g=hpdot11WEPDefaultKeys11g, hpdot11FilterStatus=hpdot11FilterStatus, lineTable=lineTable, hpdot11smt=hpdot11smt, swOpCodeVer=swOpCodeVer, fileType=fileType, enterpriseApPortMgnt=enterpriseApPortMgnt, hpdot11WEPDefaultKeyID=hpdot11WEPDefaultKeyID, proCurve=proCurve, lineIndex=lineIndex, enterprise=enterprise, enterpriseAPdot11=enterpriseAPdot11, hpdot118021xRequired=hpdot118021xRequired, restartControl=restartControl, hpdot11serverIndex=hpdot11serverIndex, hpdot11WEPDefaultKey11gIndex=hpdot11WEPDefaultKey11gIndex, enterpriseApLineMgnt=enterpriseApLineMgnt, hpdot11AuthenticationServerEntry=hpdot11AuthenticationServerEntry, hPProCuve420=hPProCuve420, netDefaultGateway=netDefaultGateway, PhysAddress=PhysAddress, hpdot11OperationTable=hpdot11OperationTable, hpdot11RTSThreshold=hpdot11RTSThreshold, srcFile=srcFile, Guage32=Guage32, transferType=transferType, fileServer=fileServer, hpdot11DesiredSSID=hpdot11DesiredSSID, portAutonegotiation=portAutonegotiation, hpdot11AuthenticationServerTable=hpdot11AuthenticationServerTable, ipHttpPort=ipHttpPort, hpdot11phy=hpdot11phy, netConfigSubnetMask=netConfigSubnetMask, hpdot11PhyOperationEntry=hpdot11PhyOperationEntry, hpdot11StationConfigTable=hpdot11StationConfigTable, portFlowCtrlStatus=portFlowCtrlStatus, userName=userName, hpdot11BeaconPeriod=hpdot11BeaconPeriod, lineStopBits=lineStopBits, MacAddress=MacAddress, hpdot11FilterIndex=hpdot11FilterIndex, hpdot11AuthenticationKey=hpdot11AuthenticationKey, hpdot11AuthenticationServer=hpdot11AuthenticationServer, hpdot11WEPDefaultKey11gLength=hpdot11WEPDefaultKey11gLength, lineEntry=lineEntry, DisplayString=DisplayString, portType=portType, hpdot11PrivacyportIndex=hpdot11PrivacyportIndex, enterpriseApResetMgt=enterpriseApResetMgt, hpdot11OperationEntry=hpdot11OperationEntry, accessPoint=accessPoint, portEntry=portEntry, portIndex=portIndex, portName=portName, wireless=wireless, portFlowCtrlCfg=portFlowCtrlCfg, portSpeedDpxStatus=portSpeedDpxStatus, hpdot11PrivacyTable=hpdot11PrivacyTable, hpdot11WEPDefaultKeys11gTable=hpdot11WEPDefaultKeys11gTable, enterpriseApFileTransferMgt=enterpriseApFileTransferMgt, hpdot11AuthenticationRetransmit=hpdot11AuthenticationRetransmit, enterpriseApSys=enterpriseApSys, hpdot11FilterTable=hpdot11FilterTable, hpdot11FragmentationThreshold=hpdot11FragmentationThreshold, netConfigIPAddress=netConfigIPAddress, restartOpCodeFile=restartOpCodeFile, hpdot11AuthenticationPort=hpdot11AuthenticationPort, enterpriseApIpMgt=enterpriseApIpMgt, hpdot11AuthenticationAlgorithm=hpdot11AuthenticationAlgorithm, hpdot11PrivacyEntry=hpdot11PrivacyEntry, hpdot11FilterAddress=hpdot11FilterAddress, hpdot11OperationalRateSet=hpdot11OperationalRateSet, hpdot11OperationIndex=hpdot11OperationIndex, hpdot11WEPDefaultKeys11gEntry=hpdot11WEPDefaultKeys11gEntry, lineParity=lineParity, TruthValue=TruthValue, portTable=portTable, hpdot11PreambleLength=hpdot11PreambleLength, swBootRomVer=swBootRomVer, hpdot11FilterEntry=hpdot11FilterEntry, hpdot11portIndex=hpdot11portIndex, hP=hP, hpdot11StationConfigEntry=hpdot11StationConfigEntry, hpdot11DTIMPeriod=hpdot11DTIMPeriod, hpdot11AuthenticationEntry=hpdot11AuthenticationEntry, hpdot11PrivacyInvoked=hpdot11PrivacyInvoked, swCountryCode=swCountryCode, hpdot11WEPDefaultKey11gValue=hpdot11WEPDefaultKey11gValue, hpdot118021xSupport=hpdot118021xSupport, hpdot11TurboModeEnabled=hpdot11TurboModeEnabled, lineSpeed=lineSpeed, swHardwareVer=swHardwareVer, hpdot11mac=hpdot11mac, hpdot11PhyOperationTable=hpdot11PhyOperationTable, portSpeedDpxCfg=portSpeedDpxCfg, hpdot11CurrentChannel=hpdot11CurrentChannel, hpdot11AuthenticationTimeout=hpdot11AuthenticationTimeout, destFile=destFile)
|
#!/usr/bin/env python
""" Usage:
python split_sequences_by_domain.py input_taxa_file input_fasta_to_split output_16S_fasta output_18S_fasta
"""
from sys import argv
from cogent.parse.fasta import MinimalFastaParser
input_taxa = open(argv[1], "U")
input_fasta = open(argv[2], "U")
output_bact_arch = open(argv[3], "w")
output_euks = open(argv[4], "w")
euk_string = "Eukaryota"
bact_string = "Bacteria"
arch_string = "Archaea"
# Should be able to handle different forms of taxonomy mapping file by searching
# for Eukaryota at the beginning of the taxonomy strings
euk_ids = []
for line in input_taxa:
curr_taxa = line.split()[1][0:15]
if euk_string in curr_taxa:
euk_ids.append(line.split()[0])
# Need to test to make sure shows up in Bacteria or Archaea
else:
if bact_string not in curr_taxa and arch_string not in curr_taxa:
raise ValueError,("Eukaryota, Bacteria, and Archaea not found "+\
"in taxa string %s" % curr_taxa)
euk_ids = set(euk_ids)
for label,seq in MinimalFastaParser(input_fasta):
if label in euk_ids:
output_euks.write(">%s\n%s\n" % (label, seq))
else:
output_bact_arch.write(">%s\n%s\n" % (label, seq))
|
import numpy as np
import pandas as pd
import time, datetime
#df_train = pd.read_csv('train_data/train_task_1_2.csv')
answer_sorted = pd.read_csv('answer_metadata_sorted_1_2.csv')
answer_sorted = np.array(answer_sorted)
answer_dict = {}
count = 0
quiz_dict = {}
for item in answer_sorted:
count+=1
if str(item[2]) == 'nan':
print('nan')
continue
if str(item[0]) == 'nan':
print(count)
continue
answer_dict[int(item[0])] = float(item[4])
quiz_dict[int(item[0])] = int(item[2])
train_all = pd.read_csv('../public_data/train_data/submission_task_1_2.csv')
train_all['timestamp'] = train_all['AnswerId'].apply(lambda x:answer_dict[x])
train_all['quizid'] = train_all['AnswerId'].apply(lambda x:quiz_dict[x])
#train_new = train_all.drop(['AnswerId'], axis = 1)
train_all.to_csv('submission_task_1_2_new.csv',index=False)
train_all = pd.read_csv('../public_data/train_data/train_task_1_2.csv')
train_all['timestamp'] = train_all['AnswerId'].apply(lambda x:answer_dict[x])
train_all['quizid'] = train_all['AnswerId'].apply(lambda x:quiz_dict[x])
#train_new = train_all.drop(['AnswerId'], axis = 1)
train_all.to_csv('train_task_1_2_new.csv',index=False) |
# -*- coding: utf-8 -*-
"""Predicting the price of house.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Xz1nrYtWKSFwecExkEnfzDlWIcdT2CK0
<h1>Predicting the price of house</h1>
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Dataset/House_Price.csv")
df.head()
df.tail()
df.shape
df.describe()
df.info()
"""<h1>Exploratory Analysis</h1>"""
sns.jointplot(x = "crime_rate", y="price", data=df)
sns.jointplot(x = "n_hot_rooms", y="price", data=df)
sns.jointplot(x="air_qual", y="price", data=df)
sns.jointplot(x="rainfall", y="price", data=df)
sns.countplot(x="bus_ter", data=df)
sns.countplot(x="airport", data=df)
sns.countplot(x="waterbody", data=df)
corr = df.corr()
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
"""<h3>Couple of Observations</h3>
<ol>
<li>Missing values in "no_hot_rooms"</li>
<li>Only "Yes" responses in "bus_ter"</li>
<li>Skewness exists in the missing values</li>
<li>Darker colors represent more the magnitude</li>
</ol>
<h1>Handling missing values</h1>
"""
df.info()
df.n_hos_beds = df.n_hos_beds.fillna(df.n_hos_beds.mean())
df.info()
"""<h1>Variable Manipulation</h1>"""
df['avg_dist'] = (df.dist1 + df.dist2 + df.dist3 + df.dist4)/4
del df['dist1']
del df['dist2']
del df['dist3']
del df['dist4']
df.head()
df.corr()
plt.scatter(df.air_qual, df.parks)
plt.xlabel("Parks")
plt.ylabel("Air Quality")
plt.show()
plt.scatter(df.room_num, df.price)
plt.xlabel("Room number")
plt.ylabel("Price")
plt.show()
"""<h1>Linear Regression</h1>"""
import statsmodels.api as sn
X = sn.add_constant(df['room_num'])
lm = sn.OLS(df['price'], X).fit()
lm.summary()
from sklearn.linear_model import LinearRegression
y = df['price']
X = df[['room_num']]
lm2 = LinearRegression()
lm2.fit(X,y)
print(lm2.intercept_, lm2.coef_)
sns.jointplot(x= df['room_num'], y = df['price'], data=df, kind='reg')
"""<h1>Predicting Linear Regression</h1>
<p>The Equation is:</p>
<p>9.0997* [room_num] + -34.6592</p>
"""
print("What would be the price if the number of rooms are 6.756?")
print(round((9.0997)*(6.756)-(34.6592),2))
"""<h1>Conclusion</h1>
<p>Hence, this was our evalution and prediction of the price. More to come in the future.</p>
<small>Kowsar Rahman</small>
""" |
from pyspark.sql.functions import (
count, first, grouping,
mean, stddev, covar_pop,
variance, coalesce,
sum as spark_sum,
min as spark_min,
max as spark_max
)
__all__ = [
"SparkMethods",
"ClassType",
"ClassName",
"InstanceError",
"FormatRead",
"PathWrite",
"FormatWrite",
"OpheliaMLException",
"OpheliaMLMinerException",
"OpheliaUtilitiesException",
"OpheliaReadFileException",
"OpheliaSparkSessionException",
"OpheliaSparkWrapperException",
"OpheliaWriteFileException",
"OpheliaFunctionsException"
]
def SparkMethods():
return {
'sum': spark_sum,
'min': spark_min,
'max': spark_max,
'mean': mean,
'stddev': stddev,
'var': variance,
'first': first,
'count': count,
'coalesce': coalesce,
'covar_pop': covar_pop,
'grouping': grouping
}
def ClassType(dtype):
return dtype.__class__
def ClassName(dtype):
return dtype.__class__.__name__
def InstanceError(obj, t):
if not isinstance(obj, t):
raise TypeError("Unsupported Type {}".format(ClassName(obj)))
return None
class FormatRead:
def __init__(self):
self.parquet = "parquet"
self.excel = "excel"
self.csv = "csv"
self.json = "json"
self.all = [self.parquet, self.excel, self.csv, self.json]
class FormatWrite:
def __init__(self):
self.parquet = "parquet"
self.excel = "excel"
self.csv = "csv"
self.json = "json"
self.all = [self.parquet, self.excel, self.csv, self.json]
class PathWrite:
def __init__(self):
self.root = "data"
self.dir = "ophelia"
self.out = "out"
self.engine = "engine"
self.model = "model"
def WritePath(self, opt, project):
return f"{self.root}/{self.dir}/{self.out}/{opt}/{project}/"
class OpheliaMLException(Exception):
"""
Ophelia ML Exception
"""
pass
class OpheliaMLMinerException(Exception):
"""
Ophelia ML Miner Exception
"""
pass
class OpheliaReadFileException(Exception):
"""
Ophelia Read File Exception
"""
pass
class OpheliaSparkSessionException(Exception):
"""
Ophelia Spark Session Exception
"""
pass
class OpheliaUtilitiesException(Exception):
"""
Ophelia Utilities Exception
"""
pass
class OpheliaSparkWrapperException(Exception):
"""
Ophelia Spark Wrapper Exception
"""
pass
class OpheliaWriteFileException(Exception):
"""
Ophelia Write File Exception
"""
pass
class OpheliaFunctionsException(Exception):
"""
Ophelia Functions Exception
"""
pass
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ..autograd_cryptensor import AutogradCrypTensor
from .module import Module
class _Loss(Module):
"""
Base criterion class that mimics Pytorch's Loss.
"""
def __init__(self, reduction="mean", skip_forward=False):
super(_Loss, self).__init__()
if reduction != "mean":
raise NotImplementedError("reduction %s not supported")
self.reduction = reduction
self.skip_forward = skip_forward
def forward(self, x, y):
raise NotImplementedError("forward not implemented")
def __call__(self, x, y):
return self.forward(x, y)
class MSELoss(_Loss):
r"""
Creates a criterion that measures the mean squared error (squared L2 norm) between
each element in the prediction :math:`x` and target :math:`y`.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = (x_n - y_n)^2,
where :math:`N` is the batch size, :math:`x` and :math:`y` are tensors of
arbitrary shapes with a total of :math:`n` elements each.
"""
def forward(self, x, y):
assert all(
isinstance(val, AutogradCrypTensor) for val in [y, x]
), "inputs must be AutogradCrypTensors"
assert x.size() == y.size(), "input and target must have the same size"
return (x - y).square().mean()
class L1Loss(_Loss):
r"""
Creates a criterion that measures the mean absolute error between each element in
the prediction :math:`x` and target :math:`y`.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = \left | x_n - y_n \right |,
where :math:`N` is the batch size, :math:`x` and :math:`y` are tensors of
arbitrary shapes with a total of :math:`n` elements each.
"""
def forward(self, x, y):
assert all(
isinstance(val, AutogradCrypTensor) for val in [y, x]
), "inputs must be AutogradCrypTensors"
assert x.size() == y.size(), "input and target must have the same size"
return (x - y).abs().mean()
class BCELoss(_Loss):
r"""
Creates a criterion that measures the Binary Cross Entropy
between the prediction :math:`x` and the target :math:`y`.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = - \left [ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right ],
where :math:`N` is the batch size, :math:`x` and :math:`y` are tensors of
arbitrary shapes with a total of :math:`n` elements each.
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets :math:`y` should be numbers
between 0 and 1.
"""
def forward(self, x, y):
assert all(
isinstance(val, AutogradCrypTensor) for val in [y, x]
), "inputs must be AutogradCrypTensors"
assert x.size() == y.size(), "input and target must have the same size"
return x.binary_cross_entropy(y, skip_forward=self.skip_forward)
class CrossEntropyLoss(_Loss):
r"""
Creates a criterion that measures cross-entropy loss between the
prediction :math:`x` and the target :math:`y`. It is useful when
training a classification problem with `C` classes.
The prediction `x` is expected to contain raw, unnormalized scores for each class.
The prediction `x` has to be a Tensor of size either :math:`(N, C)` or
:math:`(N, C, d_1, d_2, ..., d_K)`, where :math:`N` is the size of the minibatch,
and with :math:`K \geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
target `y` for each value of a 1D tensor of size `N`.
The loss can be described as:
.. math::
\text{loss}(x, class) = -\log \left(
\frac{\exp(x[class])}{\sum_j \exp(x[j])} \right )
= -x[class] + \log \left (\sum_j \exp(x[j]) \right)
The losses are averaged across observations for each batch
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape.
"""
def forward(self, x, y):
assert all(
isinstance(val, AutogradCrypTensor) for val in [y, x]
), "inputs must be AutogradCrypTensors"
assert x.size() == y.size(), "input and target must have the same size"
return x.cross_entropy(y, skip_forward=self.skip_forward)
class BCEWithLogitsLoss(_Loss):
"""
This loss combines a Sigmoid layer and the BCELoss in one single class.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = - \left [ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right ],
This is used for measuring the error of a reconstruction in for example an
auto-encoder. Note that the targets t[i] should be numbers between 0 and 1.
"""
def forward(self, x, y):
assert all(
isinstance(val, AutogradCrypTensor) for val in [y, x]
), "inputs must be AutogradCrypTensors"
assert x.size() == y.size(), "input and target must have the same size"
return x.binary_cross_entropy_with_logits(y, skip_forward=self.skip_forward)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the fake file system implementation."""
import unittest
from dfvfs.path import fake_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import fake_file_system
from tests import test_lib as shared_test_lib
class FakeFileSystemTest(shared_test_lib.BaseTestCase):
"""Tests for the fake file system."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
self._fake_path_spec = fake_path_spec.FakePathSpec(location='/')
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = fake_file_system.FakeFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(self._fake_path_spec)
file_system.Close()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by path specification functionality."""
file_system = fake_file_system.FakeFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.AddFileEntry(
'/test_data/testdir_fake/file1.txt', file_data=b'FILE1')
file_system.Open(self._fake_path_spec)
path_spec = fake_path_spec.FakePathSpec(
location='/test_data/testdir_fake/file1.txt')
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = fake_path_spec.FakePathSpec(
location='/test_data/testdir_fake/file6.txt')
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
file_system.Close()
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
file_system = fake_file_system.FakeFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.AddFileEntry(
'/test_data/testdir_fake/file1.txt', file_data=b'FILE1')
file_system.Open(self._fake_path_spec)
path_spec = fake_path_spec.FakePathSpec(
location='/test_data/testdir_fake/file1.txt')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'file1.txt')
path_spec = fake_path_spec.FakePathSpec(
location='/test_data/testdir_fake/file6.txt')
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
file_system.Close()
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = fake_file_system.FakeFileSystem(self._resolver_context)
self.assertIsNotNone(file_system)
file_system.Open(self._fake_path_spec)
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '')
file_system.Close()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
#=========================== begin_copyright_notice ============================
#
# Copyright (c) 2020-2021 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#============================ end_copyright_notice =============================
"""
Usage: cisa_gen_intrinsics.py <input_file> <output_path>
This script gets intrinsics description from JSON file specified by <input_file> argument
and generates two files GenXIntrinsicInfoTable.inc and GenXIntrinsicsBuildMap.inc into
path specified by <output_path> argument.
JSON file must contain following mandatory fields: INTRINSICS, OPCODE_GEN and ARGUMENTS_GEN.
*** Field INTRINSICS
Contains description of all intrinsics. Each intrinsic is described in following format:
intrinsic_name : {
opc: VISA opcode corresponding to the intrinsic
gen_opc: optional field, it aims to distinguish generators of complex opcodes which may
contain sub-opcode field
OPTIONS: list of intrinsics options. Currently, supported only 'disable' value, which means
that intrinsic will be skipped at all.
<ARGUMENT>: see description below
}
Each argument is a [key: list] format, where key is a name of Argument, list is a command
for generator.
First field of generator command is a generator name, it tells how to generate code for
fetching an argument value. Each argument generator is described in ARGUMENTS_GEN map.
For example:
"Surface": ["GENERAL", "UNSIGNED", 10],
Here GENERAL is generator name by which will be determined (from "ARGUMENTS_GEN") what code
to generate for getting argument value.
Generated code:
auto Surface = CreateOperand(II::ArgInfo(UNSIGNED | 10));
or for GenXIntrinsicInfoTable.inc:
GENERAL | UNSIGNED | 10,
To add new intrinsic you need to add new description into INTRINSICS map. If it contains
opcode which is absent in opcode_map you also need to add item for new opcode to OPCODE_GEN.
For example, lets add new intrinsic with new opcode and one new argument generator(NEW_PREDICATION):
"INTRINSICS":
"genx_new": {
"opc": "ISA_NEW",
"exec_size": ["EXECSIZE_FROM_ARG", 1],
"pred": ["NEW_PREDICATION", 1],
"DataOrder": ["BYTE", 5],
"Surface": ["GENERAL", "UNSIGNED", 10],
"DstData": ["RAW", 0],
"Src1Data": ["NULLRAW"]
},
"OPCODE_GEN":
ISA_NEW: "CISA_CALL(Kernel->AppendNew(exec_size, pred, DataOrder, Src1Data, DstData, Surface));"
"ARGUMENTS_GEN":
"NEW_PREDICATION": "CreateNewPredication(II::ArgInfo({args}))",
Also we need to add new function or lambda with name CreateNewPredication to GenXCisaBuilder.cpp
*** Field ARGUMENTS_GEN
It is needed only to generate CISA building code (GenXIntrinsicsBuildMap.inc)
Pattern keys that can be used inside generator:
args - string with arguments that are passed to ArgInfo constructor.
value1 - first value in argument list, needed for LITERAL generator
dst - name of a variable to which will be assigned argument value
*** Field OPCODE_GEN
It is needed only to generate CISA building code (GenXIntrinsicsBuildMap.inc)
Final part of generated code for a single intrinsic is a call of Finalizer's function that builds
instruction itself. So, all items of this map is just map from opcode to the build function.
Opcode may be not real VISA opcode, for example, ISA_VA_SKL_PLUS has different functions to build
instructions with different signatures, which depends of its sub-opcode. Thus there are maybe
compound opcodes for such cases.
"""
import sys
import re
import json
from collections import OrderedDict
HEADER = '''/******************************************************************************
* AUTOGENERATED FILE, DO NOT EDIT!
* Generated by GenXUtilBuild project
*/
'''
def open_and_delete_comments(dscr_filename):
with open(dscr_filename, "r") as jsonfile:
data = jsonfile.readlines()
jsonwithoutcomments = filter(lambda line: "//" not in line, data)
stringjson = "".join(jsonwithoutcomments)
return stringjson;
def generate(dscr_filename, out_path):
special_keys = ('gen_opc', 'OPTIONS')
descr = json.loads(open_and_delete_comments(dscr_filename), object_pairs_hook=OrderedDict)
opcode_gen = descr['OPCODE_GEN']
arguments_gen = descr['ARGUMENTS_GEN']
intrinsics = descr['INTRINSICS']
# Convert list to function call string
# Example: [ Func, arg1, arg2] to Func(arg1, arg2)
def gen2str(value):
if isinstance(value, list):
args = []
for v in value[1:]:
args.append(gen2str(v))
return "{}({})".format(value[0], ', '.join(args))
return str(value)
# Recursively search regex in lists
def gen_search(value, regex):
if isinstance(value, list):
for v in value:
if gen_search(v, regex):
return True
return False
return bool(re.search(regex, value))
def isstrinst(opc_gen):
isalt = True
if sys.version_info[0] >= 3:
isalt = isinstance(opc_gen, bytes)
else:
isalt = isinstance(opc_gen, unicode)
return bool(isinstance(opc_gen, str) or isalt)
with open(out_path + '/GenXIntrinsicInfoTable.inc', 'w') as file:
file.write(HEADER)
for name, intr in intrinsics.items():
if 'OPTIONS' in intr and 'disable' in intr['OPTIONS']:
continue
if name == "fma":
file.write('Intrinsic::{},\n'.format(name))
else:
file.write('GenXIntrinsic::{},\n'.format(name))
for key, value in intr.items():
if key in special_keys:
continue
elif key in ('opc'):
file.write('LITERAL | {},\n'.format(value))
elif isinstance(value, list):
file.write('{},\n'.format(' | '.join([str(x) for x in value
if (x != 'RAW_OPERANDS' and ('BUILD_ONLY::') not in str(x) )])))
else:
# skip other
pass
file.write('END,\n\n')
def analyseForBuildMap(x):
if isstrinst(x) and 'BUILD_ONLY::' not in str(x):
return 'II::' + x
elif 'BUILD_ONLY::' in str(x):
return str(x).rsplit('BUILD_ONLY::',1)[1]
else:
return str(x)
with open(out_path + '/GenXIntrinsicsBuildMap.inc', 'w') as file:
file.write(HEADER)
file.write('switch(IntrinID) {\n\n')
for name, intr in intrinsics.items():
gen_opc = intr.get('gen_opc')
if not gen_opc:
gen_opc = intr['opc']
opc_gen = opcode_gen.get(gen_opc)
if not opc_gen:
print(intr)
raise RuntimeError("Instruction generator not found")
if isstrinst(opc_gen):
opc_gen = [opc_gen]
assert isinstance(opc_gen, list)
if 'OPTIONS' in intr and 'disable' in intr['OPTIONS']:
continue
if name == "fma":
file.write(' case llvm::Intrinsic::' + name + ': {\n')
else:
file.write(' case llvm::GenXIntrinsic::' + name + ': {\n')
for key, value in intr.items():
if key in special_keys:
continue
# no_assign means that there is no variable that need to be assigned
no_assign = key in ('twoaddr', 'nobarrier')
# skip items that are not exist in generator string
if not no_assign and not gen_search(opc_gen, r'\b%s\b'%key):
continue
if key == 'opc':
replace = value
elif isinstance(value, list):
replace = arguments_gen.get(value[0])
if not replace:
print(value)
raise RuntimeError('Key not found!')
if not replace:
continue
context = { 'value1': value[1] if len(value) > 1 else None, 'dst': key,
'args': '{}'.format(' | ').join(
[analyseForBuildMap(x) for x in value if x != 'RAW_OPERANDS']) }
if isinstance(replace, list):
replace = [x.format(**context) for x in replace]
else:
replace = replace.format(**context)
else:
replace = value
assert replace, 'Unknown token'
if isinstance(replace, list):
for replace_item in replace:
file.write(' ' + replace_item + ';\n')
else:
assign = '' if no_assign else 'auto ' + key + ' = '
file.write(' ' + assign + replace + ';\n')
for g in opc_gen:
file.write(' ' + gen2str(g) + ';\n')
file.write(' } break;\n\n')
file.write(''' default:
CI->print(errs());
errs() << '\\n';
report_fatal_error("Unsupported intrinsic!");
break;
}''')
def main():
if len(sys.argv) > 1 and sys.argv[1] == '--help':
print(__doc__)
sys.exit(0)
assert len(sys.argv) > 2, "Missing arguments! Usage: cisa_gen_intrinsics.py <INPUT_FILE> <OUTPUT_PATH>"
generate(sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
|
# This is an additional feature for LCD in Ardupy on Wio Terminal
class LCD_print:
def __init__(self, lcd, font_size, bg_color=None, fg_color=None):
self.prints = []
self.pl = 10 * font_size # Per line has 10 pixels height per font size
self.lcd = lcd
if bg_color == None:
self.bg_color = lcd.color.BLACK
lcd.fillScreen(lcd.color.BLACK)
else:
self.bg_color = bg_color
lcd.fillScreen(bg_color)
lcd.setTextSize(font_size)
if fg_color == None:
lcd.setTextColor(lcd.color.GREEN)
else:
lcd.setTextColor(fg_color)
self.line = 1
self.line_limit = round(24/font_size)
def println(self, text, l=None):
if l == None:
if self.line <= 0:
self.lcd.drawString(str(text), 0, 0)
self.line = 2
self.prints.append(text)
elif self.line > self.line_limit:
self.prints.pop(0)
self.prints.append(text)
self.lcd.fillScreen(self.bg_color)
for index, _ in enumerate(self.prints):
self.println(_, index)
self.line += 1
pass
else:
self.lcd.drawString(str(text), 0, self.pl*(self.line-1))
self.line += 1
self.prints.append(text)
else:
self.lcd.drawString(str(text), 0, self.pl*l)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# https://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# All exceptions in this class should subclass from Boto3Error.
import botocore.exceptions
# All exceptions should subclass from Boto3Error in this module.
class Boto3Error(Exception):
"""Base class for all Boto3 errors."""
class ResourceLoadException(Boto3Error):
pass
# NOTE: This doesn't appear to be used anywhere.
# It's probably safe to remove this.
class NoVersionFound(Boto3Error):
pass
# We're subclassing from botocore.exceptions.DataNotFoundError
# to keep backwards compatibility with anyone that was catching
# this low level Botocore error before this exception was
# introduced in boto3.
# Same thing for ResourceNotExistsError below.
class UnknownAPIVersionError(Boto3Error,
botocore.exceptions.DataNotFoundError):
def __init__(self, service_name, bad_api_version,
available_api_versions):
msg = (
"The '%s' resource does not an API version of: %s\n"
"Valid API versions are: %s"
% (service_name, bad_api_version, available_api_versions)
)
# Not using super because we don't want the DataNotFoundError
# to be called, it has a different __init__ signature.
Boto3Error.__init__(self, msg)
class ResourceNotExistsError(Boto3Error,
botocore.exceptions.DataNotFoundError):
"""Raised when you attempt to create a resource that does not exist."""
def __init__(self, service_name, available_services, has_low_level_client):
msg = (
"The '%s' resource does not exist.\n"
"The available resources are:\n"
" - %s\n" % (service_name, '\n - '.join(available_services))
)
if has_low_level_client:
msg += (
"\nConsider using a boto3.client('%s') instead "
"of a resource for '%s'" % (service_name, service_name))
# Not using super because we don't want the DataNotFoundError
# to be called, it has a different __init__ signature.
Boto3Error.__init__(self, msg)
class RetriesExceededError(Boto3Error):
def __init__(self, last_exception, msg='Max Retries Exceeded'):
super(RetriesExceededError, self).__init__(msg)
self.last_exception = last_exception
class S3TransferFailedError(Boto3Error):
pass
class S3UploadFailedError(Boto3Error):
pass
class DynamoDBOperationNotSupportedError(Boto3Error):
"""Raised for operations that are not supported for an operand."""
def __init__(self, operation, value):
msg = (
'%s operation cannot be applied to value %s of type %s directly. '
'Must use AttributeBase object methods (i.e. Attr().eq()). to '
'generate ConditionBase instances first.' %
(operation, value, type(value)))
Exception.__init__(self, msg)
# FIXME: Backward compatibility
DynanmoDBOperationNotSupportedError = DynamoDBOperationNotSupportedError
class DynamoDBNeedsConditionError(Boto3Error):
"""Raised when input is not a condition"""
def __init__(self, value):
msg = (
'Expecting a ConditionBase object. Got %s of type %s. '
'Use AttributeBase object methods (i.e. Attr().eq()). to '
'generate ConditionBase instances.' % (value, type(value)))
Exception.__init__(self, msg)
class DynamoDBNeedsKeyConditionError(Boto3Error):
pass
class PythonDeprecationWarning(Warning):
"""
Python version being used is scheduled to become unsupported
in an future release. See warning for specifics.
"""
pass
|
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
# Type hints processing
#
import inspect
import logging
from typing import AbstractSet, Any, Callable, Dict, List, Optional, Tuple, Type, Union
from . import intents
from . import entities
from . import responses
from functools import wraps, reduce, partial
logger = logging.getLogger(__name__)
AnyType = Type[Any]
def _is_subclass(cls: Any, classinfo: Union[AnyType, Tuple[AnyType, ...]]) -> bool:
""" Permissive issubclass: does not throw TypeError
:param cls:
:param classinfo: a class or tuple of class objects
:return:
"""
return isinstance(cls, type) and issubclass(cls, classinfo)
def _is_subtype(cls: Any, classinfo) -> bool:
""" Return true if class is a subscripted subclass of classinfo.
:param cls:
:param classinfo: a class or tuple of class objects
:return:
"""
return bool(getattr(cls, '__origin__', None) and _is_subclass(cls.__origin__, classinfo))
def _is_attribute_v2(annotation) -> bool:
""" Return true if annotation contains AttributeV2
:param annotation:
:return:
"""
if isinstance(annotation, (list, tuple)):
return _is_attribute_v2(next(iter(annotation), None))
if _is_subtype(annotation, (List, Tuple)):
args = getattr(annotation, '__args__', None) or annotation
return _is_attribute_v2(next(iter(args), None))
return _is_subtype(annotation, entities.AttributeV2) or _is_subclass(annotation, entities.AttributeV2)
def list_functor(annotation):
""" Convert to List of type values """
to_type = next(iter(annotation), None)
if _is_subtype(to_type, entities.AttributeV2):
return partial(map, attr_v2_functor(to_type.__args__)), list
return partial(map, entities.converter(to_type)), list
def attr_v2_functor(annotation):
""" Convert to AttributeV2 with type value """
to_type = next(iter(annotation), None)
return partial(entities.AttributeV2, mapping=entities.converter(to_type))
def _converters(annotation) -> Tuple:
""" Construct converter functions """
if isinstance(annotation, (list, tuple)):
converter = list_functor(annotation)
elif _is_subtype(annotation, (List, Tuple)):
converter = list_functor(annotation.__args__)
elif _is_subtype(annotation, entities.AttributeV2):
converter = entities.get_entity, attr_v2_functor(annotation.__args__)
elif _is_subclass(annotation, intents.Context):
converter = (lambda c: c,) # No-op
else:
# Get a single attribute and convert to type
converter = entities.get_entity, entities.converter(annotation)
return converter
def get_converters(func_name: str, parameters: AbstractSet[Tuple[str, inspect.Parameter]],
reduce_func: Callable) -> Dict[str, partial]:
""" Helper: Constructs converter functions
a dict of {"name": (f1, f2, ...)} where f1, f2, ... will be applied to handler arguments
:param func_name: function name (used just to throw the exception)
:param parameters: function parameters
:param reduce_func: final reduce function
:return:
"""
converters = {}
for name, param in list(parameters):
if param.annotation == inspect.Parameter.empty:
raise ValueError(f"Function {func_name} - parameter '{name}' has no type hint defined")
converters[name] = partial(reduce_func, _converters(param.annotation))
return converters
def apply(value, func: Callable[[str], Any]) -> Optional[Any]:
""" Apply callable to value, returning EntityValueException if conversion error occurs
:param value:
:param func:
:return:
"""
if value is None:
return None
try:
return func(value)
except Exception as ex: # NOSONAR
logger.error('Exception converting %s with %s: %s ', repr(value), repr(func), repr(ex))
return intents.EntityValueException(ex, value=value, func=func)
def get_inner(f: Callable[..., Any]) -> Callable[..., Any]:
""" Skip inner @intent_handler decorators """
try:
return get_inner(getattr(f, '__wrapped__')) if getattr(f, '__intent_handler__') else f
except AttributeError:
return f
def _call(func: Callable, context, *args, **kwargs):
""" Helper to call inner function """
if context is not None:
args = context, *args
logger.debug('Direct call: "%s" args=%s kwargs=%s', func.__name__, args, kwargs)
return func(*args, **kwargs)
def intent_handler(func: Callable[..., Any] = None,
silent: bool = True,
error_handler: Callable[[str, Exception], Any] = None):
"""
Generic intent handler decorator:
Will check the handler parameters and supply entity values according to the type hints
an intent handler function is supposed to have the following signature:
handler(context: skill_sdk.intents.Context, entity_one: typing.Any, entity_two: typing.Any, *)
to receive a date in a handler function, use:
@intent_handler
handler(context: skill_sdk.intents.Context, date: datetime.date)
to receive an array of integer values, use:
@intent_handler
handler(context: skill_sdk.intents.Context, int_list: [int])
to suppress the conversion errors, should you want to handle exceptions, set `silent` to `True`.
The decorator will return exception as value:
@intent_handler(silent=True)
:param func: decorated function (can be `None` if decorator used without call)
:param silent: if `True`, an exception occurred during conversion will not be raised and returned as value
:param error_handler: if set, will be called if conversion error occurs, instead of a decorated function
:return:
"""
if isinstance(func, bool):
silent, func = func, None
def handler_decorator(_func: Callable[..., Any]) -> Callable[..., Any]:
""" The entry point to the decorator """
_reduce = partial(reduce, apply)
inner = get_inner(_func)
signature = inspect.signature(inner)
parameters = signature.parameters.items()
converters = get_converters(inner.__name__, parameters, _reduce)
@wraps(inner)
def wrapper(context=None, *args, **kwargs) -> responses.Response:
""" The entry point to intent handler """
# If we're called without context as first argument, this is a direct call:
# we do not parse the context and simply pass arguments to the decorated function
if not isinstance(context, intents.Context):
return _call(inner, context, *args, **kwargs)
# Otherwise proceed with skill invocation context
kw = _parse_context(context, parameters)
logger.debug('Collected arguments: %s', repr(kw))
ba = signature.bind(**kw)
arguments = {name: converters[name](value) for name, value in ba.arguments.items()}
# Pre-check: if not silent mode, raise EntityValueException or call `error_handler` if set
error = next(iter((name, ex) for name, ex in arguments.items()
if isinstance(ex, intents.EntityValueException)), None)
if error and not silent:
raise error[1]
if error and error_handler:
logger.debug('Exception during conversion, calling error_handler: %s', repr(error_handler))
return error_handler(*error)
logger.debug('Converted arguments to: %s', repr(arguments))
ba.arguments.update(arguments)
return inner(*ba.args, **ba.kwargs)
setattr(wrapper, '__intent_handler__', True)
return wrapper
return handler_decorator(func) if func else handler_decorator
def _parse_context(context: intents.Context, parameters: AbstractSet[Tuple[str, inspect.Parameter]]) -> Dict[str, Any]:
""" This function parses attributes from the invocation context
:param context:
:param parameters:
:return:
"""
result = {
# use "context" as argument, if annotated as Context
name: context if _is_subclass(param.annotation, intents.Context)
# look up attributesV2, if annotated as AttributeV2
else context.attributesV2.get(name) if _is_attribute_v2(param.annotation) and context.attributesV2
# look up the standard attributes
else context.attributes.get(name) if context.attributes
# get from pre-parsed keyword arguments
else None
for name, param in parameters
}
return result
|
"""Contains utilities that extend the Django framework."""
from django.db import models
class PycontModel(models.Model):
"""Base model for the entire application, adding created_at and last_updated_at columns."""
class Meta:
"""Make it abstract."""
abstract = True
created_at = models.DateTimeField(auto_now_add=True)
last_updated_at = models.DateTimeField(auto_now=True)
|
# The RenderPasses and UI code are kept in this file to make it executable in Blender without the need to install it as addon.
import bpy
import os
import sys
import random
from mathutils import Vector
class RenderPasses:
COMBINED = 'Combined'
ALPHA = 'Alpha'
DEPTH = 'Depth'
MIST = 'Mist'
NORMAL = 'Normal'
SCREEN_SPACE_NORMAL = 'Screen Space Normal'
MOTION_VECTOR = 'Motion Vector'
OBJECT_ID = 'Object ID'
MATERIAL_ID = 'Material ID'
UV = 'UV'
SHADOW = 'Shadow'
AMBIENT_OCCLUSION = 'Ambient Occlusion'
EMISSION = 'Emission'
ENVIRONMENT = 'Environment'
DIFFUSE_DIRECT = 'Diffuse Direct'
DIFFUSE_INDIRECT = 'Diffuse Indirect'
DIFFUSE_COLOR = 'Diffuse Color'
GLOSSY_DIRECT = 'Glossy Direct'
GLOSSY_INDIRECT = 'Glossy Indirect'
GLOSSY_COLOR = 'Glossy Color'
TRANSMISSION_DIRECT = 'Transmission Direct'
TRANSMISSION_INDIRECT = 'Transmission Indirect'
TRANSMISSION_COLOR = 'Transmission Color'
SUBSURFACE_DIRECT = 'Subsurface Direct'
SUBSURFACE_INDIRECT = 'Subsurface Indirect'
SUBSURFACE_COLOR = 'Subsurface Color'
VOLUME_DIRECT = 'Volume Direct'
VOLUME_INDIRECT = 'Volume Indirect'
class DeepDenoiserRender:
@staticmethod
def prepare_image_settings():
render = bpy.context.scene.render
image_settings = render.image_settings
image_settings.file_format = 'OPEN_EXR'
image_settings.color_mode = 'RGBA'
image_settings.color_depth = '32'
image_settings.exr_codec = 'ZIP'
render.use_border = False
render.use_crop_to_border = False
render.use_file_extension = True
render.use_stamp = False
# Save buffers have to be disabled due to a bug.
# If it is enabled, the volumetric passes are not saved.
render.use_save_buffers = False
@staticmethod
def prepare_cycles():
bpy.context.scene.render.engine = 'CYCLES'
scene = bpy.context.scene
cycles = scene.cycles
# No branched path tracing for now.
cycles.progressive = 'PATH'
cycles_render_layer = scene.render.layers.active.cycles
cycles_render_layer.use_denoising = False
@staticmethod
def prepare_passes():
render_layer = bpy.context.scene.render.layers[0]
cycles_render_layer = render_layer.cycles
render_layer.use_pass_diffuse_direct = True
render_layer.use_pass_diffuse_indirect = True
render_layer.use_pass_diffuse_color = True
render_layer.use_pass_glossy_direct = True
render_layer.use_pass_glossy_indirect = True
render_layer.use_pass_glossy_color = True
render_layer.use_pass_transmission_direct = True
render_layer.use_pass_transmission_indirect = True
render_layer.use_pass_transmission_color = True
render_layer.use_pass_subsurface_direct = True
render_layer.use_pass_subsurface_indirect = True
render_layer.use_pass_subsurface_color = True
cycles_render_layer.use_pass_volume_direct = True
cycles_render_layer.use_pass_volume_indirect = True
render_layer.use_pass_combined = True
render_layer.use_pass_z = True
render_layer.use_pass_mist = True
render_layer.use_pass_normal = True
render_layer.use_pass_vector = True
render_layer.use_pass_object_index = True
render_layer.use_pass_material_index = True
render_layer.use_pass_uv = True
render_layer.use_pass_emit = True
render_layer.use_pass_environment = True
render_layer.use_pass_shadow = True
render_layer.use_pass_ambient_occlusion = True
@staticmethod
def prepare_compositor(target_folder):
bpy.context.scene.render.use_compositing = True
bpy.context.scene.use_nodes = True
node_tree = bpy.context.scene.node_tree
for node in node_tree.nodes:
node_tree.nodes.remove(node)
input_node = node_tree.nodes.new('CompositorNodeRLayers')
output_node = node_tree.nodes.new('CompositorNodeOutputFile')
output_node.layer_slots.clear()
output_node.location = (300, 0)
samples_per_pixel = bpy.context.scene.cycles.samples
relative_frame_number = bpy.context.scene.frame_current
seed = bpy.context.scene.cycles.seed
path = os.path.join(
target_folder, DeepDenoiserRender.blend_filename() + '_' +
str(samples_per_pixel) + '_' + str(relative_frame_number) + '_' + str(seed))
path = bpy.path.abspath(path)
path = os.path.realpath(path)
output_node.base_path = path
links = node_tree.links
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Image', output_node, RenderPasses.COMBINED)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Alpha', output_node, RenderPasses.ALPHA)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Depth', output_node, RenderPasses.DEPTH)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Normal', output_node, RenderPasses.NORMAL)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'UV', output_node, RenderPasses.UV)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Vector', output_node, RenderPasses.MOTION_VECTOR)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Shadow', output_node, RenderPasses.SHADOW)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'AO', output_node, RenderPasses.AMBIENT_OCCLUSION)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'IndexOB', output_node, RenderPasses.OBJECT_ID)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'IndexMA', output_node, RenderPasses.MATERIAL_ID)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Mist', output_node, RenderPasses.MIST)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Emit', output_node, RenderPasses.EMISSION)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'Env', output_node, RenderPasses.ENVIRONMENT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'DiffDir', output_node, RenderPasses.DIFFUSE_DIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'DiffInd', output_node, RenderPasses.DIFFUSE_INDIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'DiffCol', output_node, RenderPasses.DIFFUSE_COLOR)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'GlossDir', output_node, RenderPasses.GLOSSY_DIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'GlossInd', output_node, RenderPasses.GLOSSY_INDIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'GlossCol', output_node, RenderPasses.GLOSSY_COLOR)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'TransDir', output_node, RenderPasses.TRANSMISSION_DIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'TransInd', output_node, RenderPasses.TRANSMISSION_INDIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'TransCol', output_node, RenderPasses.TRANSMISSION_COLOR)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'SubsurfaceDir', output_node, RenderPasses.SUBSURFACE_DIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'SubsurfaceInd', output_node, RenderPasses.SUBSURFACE_INDIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'SubsurfaceCol', output_node, RenderPasses.SUBSURFACE_COLOR)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'VolumeDir', output_node, RenderPasses.VOLUME_DIRECT)
DeepDenoiserRender.connect_pass_to_new_file_output(
links, input_node, 'VolumeInd', output_node, RenderPasses.VOLUME_INDIRECT)
viewer_node = node_tree.nodes.new('CompositorNodeViewer')
viewer_node.location = (300, 150)
links.new(input_node.outputs[RenderPasses.NORMAL], viewer_node.inputs[0])
@staticmethod
def connect_pass_to_new_file_output(links, input_node, pass_name, output_node, short_output_name):
output_name = DeepDenoiserRender.extended_name(short_output_name)
output_slot = output_node.layer_slots.new(output_name)
links.new(input_node.outputs[pass_name], output_slot)
@staticmethod
def blend_filename():
blend_filename = bpy.path.basename(bpy.context.blend_data.filepath)
result = os.path.splitext(blend_filename)[0]
result = result.replace('_', ' ')
return result
@staticmethod
def extended_name(name):
samples_per_pixel = bpy.context.scene.cycles.samples
relative_frame_number = bpy.context.scene.frame_current
seed = bpy.context.scene.cycles.seed
result = (
DeepDenoiserRender.blend_filename() + '_' +
str(samples_per_pixel) + '_' + str(relative_frame_number) + '_' + str(seed) + '_' +
name + '_')
return result
@staticmethod
def is_render_layer_valid():
result = False
if (
len(bpy.context.scene.render.layers) == 1 and
bpy.context.scene.render.layers[0].use):
result = True
return result
@staticmethod
def render(target_folder):
DeepDenoiserRender.prepare_passes()
DeepDenoiserRender.prepare_image_settings()
DeepDenoiserRender.prepare_cycles()
DeepDenoiserRender.prepare_compositor(target_folder)
bpy.ops.render.render()
# UI
class DeepDenoiserRenderJobPropertyGroup(bpy.types.PropertyGroup):
use_render_job = bpy.props.BoolProperty(name='use_render_job')
class DeepDenoiserRenderPropertyGroup(bpy.types.PropertyGroup):
target_folder = bpy.props.StringProperty(
name='target_folder', description='Base directory for the rendered results', default='//OpenEXR/', maxlen=1024, subtype='DIR_PATH')
render_jobs_initialized = bpy.props.BoolProperty(
name='render_jobs_initialized', description='Were the render jobs initialized. Only false when the script was never initialized',
default=False)
class RENDER_JOB_prepare(bpy.types.Operator):
bl_idname = 'deep_blender_render.prepare'
bl_label = "Prepare Settings"
bl_description = "Prepare all the Blender settings to experiment with the same settings that are used when rendering"
def execute(self, context):
target_folder = context.scene.deep_denoiser_generator_property_group.target_folder
DeepDenoiserRender.prepare_passes()
DeepDenoiserRender.prepare_image_settings()
DeepDenoiserRender.prepare_cycles()
DeepDenoiserRender.prepare_compositor(target_folder)
return{'FINISHED'}
class RENDER_JOB_render(bpy.types.Operator):
bl_idname = 'deep_blender_render.render'
bl_label = "Render main frame noiseless"
bl_description = "Render main frame noiseless"
def execute(self, context):
target_folder = context.scene.deep_denoiser_generator_property_group.target_folder
samples_per_pixel = bpy.context.scene.cycles.samples
DeepDenoiserRender.render(target_folder)
return{'FINISHED'}
class DeepDenoiserRenderPanel(bpy.types.Panel):
bl_label = "DeepDenoiser Render"
bl_idname = "CATEGORY_PT_DeepDenoiserRender"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "DeepBlender"
def draw(self, context):
scene = context.scene
layout = self.layout
render = scene.render
column = layout.column()
column.operator('deep_blender_render.prepare')
column = layout.column()
column.prop(scene.deep_denoiser_generator_property_group, 'target_folder', text='Folder')
is_render_layer_valid = DeepDenoiserRender.is_render_layer_valid()
if not is_render_layer_valid:
box = layout.box()
inner_column = box.column()
inner_column.label(text="Render Layer Error", icon='ERROR')
inner_column.label(text="The scene is only allowed to have one render layer and that one has to be active!")
column = layout.column()
if not is_render_layer_valid:
column.enabled = False
column.label(text="Render:")
column.operator('deep_blender_render.render', text='Render', icon='RENDER_STILL')
classes = [
DeepDenoiserRenderJobPropertyGroup, DeepDenoiserRenderPropertyGroup, DeepDenoiserRenderPanel,
RENDER_JOB_prepare, RENDER_JOB_render]
def register():
for i in classes:
bpy.utils.register_class(i)
# Store properties in the scene
bpy.types.Scene.deep_denoiser_generator_property_group = bpy.props.PointerProperty(type=DeepDenoiserRenderPropertyGroup)
bpy.types.Scene.render_jobs = bpy.props.CollectionProperty(type=DeepDenoiserRenderJobPropertyGroup)
def unregister():
for i in classes:
bpy.utils.unregister_class(i)
if __name__ == "__main__":
register() |
import numpy as np
import functions.kernel.conv_kernel as convKernel
import tensorflow as tf
def kernel_bspline_r4():
kernel_r4 = convKernel.convDownsampleKernel('bspline', 3, 15, normalizeKernel=1)
kernel_r4 = np.expand_dims(kernel_r4, -1)
kernel_r4 = np.expand_dims(kernel_r4, -1)
kernel_r4 = tf.constant(kernel_r4)
return kernel_r4
def kernel_bspline_r2():
kernel_r2 = convKernel.convDownsampleKernel('bspline', 3, 7, normalizeKernel=1)
kernel_r2 = np.expand_dims(kernel_r2, -1)
kernel_r2 = np.expand_dims(kernel_r2, -1)
kernel_r2 = tf.constant(kernel_r2)
return kernel_r2
|
def render_graph_GBufferRT():
loadRenderPassLibrary("GBuffer.dll")
tracer = RenderGraph("RtGbuffer")
tracer.addPass(RenderPass("GBufferRT"), "GBufferRT")
tracer.markOutput("GBufferRT.posW")
tracer.markOutput("GBufferRT.normW")
tracer.markOutput("GBufferRT.bitangentW")
tracer.markOutput("GBufferRT.texC")
tracer.markOutput("GBufferRT.diffuseOpacity")
tracer.markOutput("GBufferRT.specRough")
tracer.markOutput("GBufferRT.emissive")
tracer.markOutput("GBufferRT.matlExtra")
return tracer
GBufferRT = render_graph_GBufferRT()
try: m.addGraph(GBufferRT)
except NameError: None |
import warnings
from typing import Any, Callable, Dict, List
import numpy as np
from gym.spaces import Box, Dict
from multiworld.core.multitask_env import MultitaskEnv
from rlkit import pythonplusplus as ppp
from rlkit.core.distribution import DictDistribution
from rlkit.envs.contextual import ContextualRewardFn
from rlkit.envs.contextual.contextual_env import (
ContextualDiagnosticsFn,
Path,
Context,
Diagnostics,
)
from rlkit.envs.images import Renderer
from rlkit.core.loss import LossFunction
from rlkit.torch import pytorch_util as ptu
import torch
from torch import optim, nn
import collections
from collections import OrderedDict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.torch.irl.irl_trainer import IRLTrainer
Observation = Dict
Goal = Any
GoalConditionedDiagnosticsFn = Callable[
[List[Path], List[Goal]],
Diagnostics,
]
class GAILTrainer(IRLTrainer):
def compute_loss(self, batch, epoch=-1, test=False):
prefix = "test/" if test else "train/"
positives = self.positives.random_batch(self.batch_size)["observations"]
P, feature_size = positives.shape
positives = ptu.from_numpy(positives)
negatives = batch['observations']
N, feature_size = negatives.shape
X = torch.cat((positives, negatives))
Y = np.zeros((P + N, 1))
Y[:P, 0] = 1
# Y[P:, 0] = 0
# X = ptu.from_numpy(X)
Y = ptu.from_numpy(Y)
y_pred = self.GAIL_discriminator_logits(X)
loss = self.loss_fn(y_pred, Y)
y_pred_class = (y_pred > 0).float()
self.update_with_classification_stats(y_pred_class, Y, prefix)
self.eval_statistics.update(create_stats_ordered_dict(
"y_pred_positives",
ptu.get_numpy(y_pred[:P]),
))
self.eval_statistics.update(create_stats_ordered_dict(
"y_pred_negatives",
ptu.get_numpy(y_pred[P:]),
))
self.eval_statistics['epoch'] = epoch
self.eval_statistics[prefix + "losses"].append(loss.item())
return loss
def GAIL_discriminator_logits(self, observations):
log_p = self.score_fn(observations)
return log_p
|
import hydra
from omegaconf import DictConfig
from src.toxic.data.data_module import DataModule
from src.toxic.modelling.model import Model
from src.toxic.training import Trainer
from src.toxic.utils.random import set_seed
@hydra.main(config_path='conf', config_name='config')
def train(config: DictConfig):
set_seed(config.trainer.seed)
data = DataModule(config.data)
model = Model(config)
trainer = Trainer(config, data, model)
trainer.train()
if __name__ == '__main__':
train()
|
import os
HERE = os.path.abspath(os.path.dirname(__file__))
TEMPLATES_DIR = os.path.join(HERE, "templates") |
from easydict import EasyDict as edict
cfg = edict()
cfg.nid = 1000
cfg.arch = "osnet_ain" # "osnet" or "res50-fc512"
cfg.loadmodel = "trackers/weights/osnet_ain_x1_0_msmt17_256x128_amsgrad_ep50_lr0.0015_coslr_b64_fb10_softmax_labsmth_flip_jitter.pth"
cfg.frame_rate = 30
cfg.track_buffer = 240
cfg.conf_thres = 0.5
cfg.nms_thres = 0.4
cfg.iou_thres = 0.5
|
import re
from collections import Counter, OrderedDict
from poetics.patterning import assign_letters_to_dict
########################################################################################################################
# Tokenizing
########################################################################################################################
# Tokenizes text into words.
def tokenize(line):
tokenized = []
# Substitutions for various punctuation.
subs = {'-': ' ', '‘': "'", '’': "'"}
for chars, substitution in subs.items():
line = line.replace(chars, substitution)
# Loops through expressions/replacements in expressions. Currently deals with getting rid of uneeded apostrophes
# while maintaining conjunctions
# Final expression removes all non-apostrophe, digit, or word characters left over
expressions = {'\'[^\w]': ' ', '[^\w]\'': ' ', "\'\Z": ' ', "\A\'": ' ', '[^\d\w\']': ' '}
for expression, replacement in expressions.items():
line = re.sub(expression, replacement, line)
# Split into individual words
tokenized.extend([word.lower() for word in line.strip().split()])
return tokenized
# Generator for getting indexes of tokens in lines.
def get_line_indexes(token_list):
start_index = 0
for index, token in enumerate(token_list):
if index == len(token_list) - 1:
if token == '\n':
yield(start_index, index)
else:
yield(start_index, index + 1)
elif token == '\n':
yield(start_index, index)
start_index = index + 1
# Generator for getting indexes of tokens in sentences.
def get_sentence_indexes(token_list):
terminators = ['.', '!', '?']
start_index = 0
for index, token in enumerate(token_list):
if index == len(token_list) - 1:
if index > start_index:
yield(start_index, index)
elif index == start_index and token == '\n':
start_index += 1
elif token in terminators:
end_index = index
for token2 in token_list[index:]:
match = re.match("[^\w\'\s]+", token2)
if match:
end_index += 1
else:
break
if index > start_index:
yield(start_index, end_index)
start_index = end_index + 1
# Generator for getting indexes of tokens in stanzas.
def get_stanza_indexes(token_list):
start_index = 0
for index, token in enumerate(token_list):
if index == len(token_list) - 1:
if token == '\n':
yield(start_index, index)
else:
yield(start_index, index + 1)
elif index == start_index and token == '\n':
start_index += 1
elif token == '\n':
if token_list[index + 1] == '\n':
if index > start_index:
yield (start_index, index)
start_index = index + 1
# Tokenizes text and gets indexes of tokens belonging to lines, sentences, and stanzas.
def full_tokenize(text):
# TODO: add exceptions for 'cause, 'em,, 'll, 'nuff, doin', goin', nothin', nothin', ol', somethin'
# List of abbreviations that should be considered a single token.
abbre = ('a\.m\.|p\.m\.|'
'[vV][sS]\.|'
'[eE]\.[gG]\.|[iI]\.[eE]\.|'
'Mt\.|'
'Mont\.|'
'Bros\.|'
'[cC]o\.|'
'Corp\.|'
'Inc\.|'
'Ltd\.|'
'Md\.|'
'Dr\.|'
'Ph\.|'
'Rep\.|'
'Rev\.|'
'Sen\.|'
'St\.|'
'Messrs\.|'
'Jr\.|'
'Mr\.|'
'Mrs\.|'
'Ms\.|')
# Pattern that splits tokens.
pattern = ('(' + abbre +
'[A-Z](?=\.)|' # Any capital letter followed by a period.
'[\'](?=[^\w])|' # ' followed by a non-word character.
'(?<=[^\w])\'|' # ' preceeded by a non-word character.
'\'\Z|\A\'|' # ' at the start or end of the text.
'[^\w\']|' # Anything that isn't a word character or an apostrophe.
'\s+' # Any number of consecutive spaces.
')')
tokens = [segment for segment in re.split(pattern, text) if segment]
line_indexes = [index for index in get_line_indexes(tokens)]
sentence_indexes = [index for index in get_sentence_indexes(tokens)]
stanza_indexes = [index for index in get_stanza_indexes(tokens)]
return tokens, line_indexes, sentence_indexes, stanza_indexes
########################################################################################################################
# Conversion
########################################################################################################################
# Converts parts of speech tags from tagger to those used by wordnet. Returns None if not relevant
def convert_pos(pos):
relevant_letters = ['a', 'n', 'r', 'v']
pos = pos.replace("J", "A").lower()
if pos[0] in relevant_letters:
return pos[0]
else:
return None
# Title cases titles/names. Does not handle subtitles presently.
# Future: handle roman numerals.
def title_case(text):
decap = ['a', 'an', 'and', 'as', 'at', 'but', 'by', 'en', 'for', 'if', 'in', 'of', 'on', 'or', 'the', 'to', 'v',
'v.', 'via', 'vs', 'vs.']
words = re.split('[\t ]', text.title())
for index, word in enumerate(words[1:]):
if word.lower() in decap:
words[index + 1] = word.lower()
for index, word in enumerate(words):
match = re.match('\w+\'\w+', word)
if match:
out_word = [word[0].upper()]
out_word.extend([i.lower() for i in word[1:]])
words[index] = ''.join(out_word)
return ' '.join(words)
########################################################################################################################
# Grouping
########################################################################################################################
# Gets groups from a sound set.
def get_sound_set_groups(sound_list, tokenized_text, max_feature_distance):
# Make a dictionary of sound appearances that are {Sound: [indexes of appearances]}.
sound_set = index_unique_strings(sound_list, 2)
output_dict = {}
# Turn the sets of indexes into groups with a maximum distance between each consequitive member.
for key, indexes in sound_set.items():
groups = [num for num in get_distance_groups(indexes, max_feature_distance, 3)]
# Test to make sure that those index groups correspond to at least two unique words.
for index, group in enumerate(groups):
words = [tokenized_text[index2].lower() for index2 in group]
if len(set(words)) < 2:
del(groups[index])
# If any groups remain, add them to an output dictionary.
if groups:
output_dict[key] = groups
return output_dict
# Yields max-length groups of integers, of min_length or longer, where each consecutive integer in a group is no more
# than distance apart.
def get_distance_groups(num_list, distance, min_length=1):
group = []
for index, num in enumerate(num_list):
# If group is currently empty or the last number in group is within distance of num, append num to group.
if not group or num - group[-1] <= distance:
group.append(num)
# If we're not on the last number in the list, then we move to the next one.
# This check necessary because otherwise the last group will never be yielded.
if index < len(num_list) - 1:
continue
# If we aren't within distance and group isn't empty (and so we never hit continue) then we yield group if
# group is at least the minimum number of items long.
if len(group) >= min_length:
yield group
# Then we take the current num, and put it in group alone for the next iteration.
group = [num]
########################################################################################################################
# Indexing
########################################################################################################################
# Takes a list of strings and returns a dict of unique items and indexes of appearances.
# Handles lists of lists of strings or lists of strings (e.g. [['a', 'b'],['c', 'd']] or ['a', 'b']).
def index_unique_strings(input_list, min_count=None):
min_count = min_count or 1
out = {}
if isinstance(input_list[0], list):
count = Counter([string for item in input_list for string in item if string])
for key in [key for key, value in count.items() if value >= min_count]:
# Creates a list of the top-level indexes of the occurances of the present key.
indexes = [index for index, item in enumerate(input_list) for string in item if key == string]
# Adds the key and its indexes to output.
out[key] = indexes
return out
else:
count = Counter([string for string in input_list if string])
for key in [key for key, value in count.items() if value >= min_count]:
# Creates a list of the top-level indexes of the occurances of the present key.
indexes = [index for index, string in enumerate(input_list) if key == string]
# Adds the key and its indexes to output.
out[key] = indexes
return out
# Takes a list of features and turns it into a scheme.
def feats_to_scheme(features, lower=False, allow_blanks=False, max_unique=None):
# If blanks aren't allowed, return None if any are present.
if not allow_blanks:
blanks = [feature for feature in features if not feature]
if len(blanks) > 0:
return None
# Create an ordered dict of unique features that assigns each unique feature a letter.
ordered_features = assign_letters_to_dict(OrderedDict.fromkeys([feature for feature in features
if feature and not feature == ' ']), lower)
# Return none if the number of features surpasses max unique.
if max_unique:
if len(ordered_features) > max_unique:
return None
# Return joined scheme
return ''.join([' ' if not feature or feature == ' ' else ordered_features[feature] for feature in features])
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from networkapi.admin_permission import AdminPermission
from networkapi.auth import has_perm
from networkapi.equipamento.models import EquipamentoError
from networkapi.equipamento.models import TipoEquipamento
from networkapi.equipamento.models import TipoEquipamentoDuplicateNameError
from networkapi.exception import InvalidValueError
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.infrastructure.xml_utils import loads
from networkapi.infrastructure.xml_utils import XMLError
from networkapi.rest import RestResource
from networkapi.rest import UserNotAuthorizedError
from networkapi.util import is_valid_regex
from networkapi.util import is_valid_string_maxsize
from networkapi.util import is_valid_string_minsize
class EquipmentTypeAddResource(RestResource):
log = logging.getLogger('EquipmentTypeAddResource')
def handle_post(self, request, user, *args, **kwargs):
"""Treat requests POST to insert a Equipment Type.
URL: equipmenttype/
"""
try:
self.log.info('Add Equipment Script')
# User permission
if not has_perm(user, AdminPermission.EQUIPMENT_MANAGEMENT, AdminPermission.WRITE_OPERATION):
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, attrs_map = loads(request.body)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
msg = u'There is no value to the networkapi tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
equipment_type_map = networkapi_map.get('equipment_type')
if equipment_type_map is None:
msg = u'There is no value to the equipment_type tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
# Get XML data
name = equipment_type_map.get('name')
# Valid Name
if not is_valid_string_minsize(name, 3) or not is_valid_string_maxsize(name, 100) or not is_valid_regex(name, '^[A-Za-z0-9 -]+$'):
self.log.error(u'Parameter name is invalid. Value: %s', name)
raise InvalidValueError(None, 'name', name)
# Business Rules
equipment_type = TipoEquipamento()
# save Equipment Type
equipment_type.insert_new(user, name)
etype_dict = dict()
etype_dict['id'] = equipment_type.id
return self.response(dumps_networkapi({'equipment_type': etype_dict}))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except TipoEquipamentoDuplicateNameError, e:
return self.response_error(312, name)
except UserNotAuthorizedError:
return self.not_authorized()
except EquipamentoError, e:
return self.response_error(1)
except XMLError, e:
self.log.exception(e)
return self.response_error(1)
|
import copy
def main():
n, m = map(int, input().split())
bars = sorted(map(int, input().split()))
dp = [[0] * (m + 1) for _ in range(2)]
idx = 0
idx_prev = 1
for i in range(n):
bar = bars[i]
dp[idx] = copy.copy(dp[idx_prev])
for j in range(bar, m + 1):
if dp[idx_prev][j] > (bar + dp[idx_prev][j - bar]):
dp[idx][j] = dp[idx_prev][j]
else:
dp[idx][j] = bar + dp[idx_prev][j - bar]
idx, idx_prev = idx_prev, idx
print(dp[idx_prev][m])
if __name__ == '__main__':
main()
|
#
# PySNMP MIB module TPLINK-PROXYARP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TPLINK-PROXYARP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:25:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Gauge32, ObjectIdentity, TimeTicks, Counter32, iso, Counter64, Unsigned32, IpAddress, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "ObjectIdentity", "TimeTicks", "Counter32", "iso", "Counter64", "Unsigned32", "IpAddress", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "NotificationType", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tplinkMgmt, = mibBuilder.importSymbols("TPLINK-MIB", "tplinkMgmt")
tplinkProxyArpMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11863, 6, 37))
tplinkProxyArpMIB.setRevisions(('2012-12-13 09:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: tplinkProxyArpMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: tplinkProxyArpMIB.setLastUpdated('201212130930Z')
if mibBuilder.loadTexts: tplinkProxyArpMIB.setOrganization('TPLINK')
if mibBuilder.loadTexts: tplinkProxyArpMIB.setContactInfo('www.tplink.com.cn')
if mibBuilder.loadTexts: tplinkProxyArpMIB.setDescription('Private MIB for proxy arp configuration.')
tplinkProxyArpMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1))
tplinkProxyArpNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 37, 2))
tpProxyArpConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1))
tpProxyArpTable = MibTable((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1), )
if mibBuilder.loadTexts: tpProxyArpTable.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpTable.setDescription('Proxy Arp.')
tpProxyArpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1, 1), ).setIndexNames((0, "TPLINK-PROXYARP-MIB", "tpProxyArpVlanId"))
if mibBuilder.loadTexts: tpProxyArpEntry.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpEntry.setDescription('An entry contains of the information of port configure.')
tpProxyArpVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tpProxyArpVlanId.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpVlanId.setDescription('The vlan id of the proxy arp ip address')
tpProxyArpIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tpProxyArpIpAddr.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpIpAddr.setDescription('Displays the proxy arp ip address.')
tpProxyArpIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tpProxyArpIpMask.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpIpMask.setDescription('Displays the proxy arp ip address mask')
tpProxyArpInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tpProxyArpInterfaceName.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpInterfaceName.setDescription('Displays the interface name.')
tpProxyArpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 11863, 6, 37, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpProxyArpEnable.setStatus('current')
if mibBuilder.loadTexts: tpProxyArpEnable.setDescription('Select Enable/Disable Fast Leave feature for the IP address. 0. Disable 1. Enable')
mibBuilder.exportSymbols("TPLINK-PROXYARP-MIB", tplinkProxyArpMIBObjects=tplinkProxyArpMIBObjects, tplinkProxyArpNotifications=tplinkProxyArpNotifications, tpProxyArpVlanId=tpProxyArpVlanId, tpProxyArpEntry=tpProxyArpEntry, PYSNMP_MODULE_ID=tplinkProxyArpMIB, tplinkProxyArpMIB=tplinkProxyArpMIB, tpProxyArpConfig=tpProxyArpConfig, tpProxyArpIpAddr=tpProxyArpIpAddr, tpProxyArpTable=tpProxyArpTable, tpProxyArpInterfaceName=tpProxyArpInterfaceName, tpProxyArpEnable=tpProxyArpEnable, tpProxyArpIpMask=tpProxyArpIpMask)
|
# 3rd party
from nose.plugins.attrib import attr
# Agent
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
@attr(requires='gearman')
class GearmanTestCase(AgentCheckTest):
CHECK_NAME = "gearmand"
def test_metrics(self):
tags = ['first_tag', 'second_tag']
service_checks_tags = ['server:127.0.0.1', 'port:4730']
config = {
'instances': [{
'tags': tags
}]
}
tags += service_checks_tags
self.run_check(config)
self.assertMetric('gearman.unique_tasks', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.running', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.queued', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.workers', value=0.0, tags=tags, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.OK,
tags=service_checks_tags, count=1)
self.coverage_report()
def test_service_checks(self):
config = {
'instances': [
{'host': '127.0.0.1', 'port': 4730},
{'host': '127.0.0.1', 'port': 4731}]
}
self.assertRaises(Exception, self.run_check, config)
service_checks_tags_ok = ['server:127.0.0.1', 'port:4730']
service_checks_tags_not_ok = ['server:127.0.0.1', 'port:4731']
tags = service_checks_tags_ok
self.assertMetric('gearman.unique_tasks', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.running', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.queued', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.workers', value=0.0, tags=tags, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.OK,
tags=service_checks_tags_ok, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.CRITICAL,
tags=service_checks_tags_not_ok, count=1)
self.coverage_report()
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
# Server that adds an object to the database
import rospy
from object_recognition_core.db import models
from object_recognition_core.db.tools import args_to_db_params
import object_recognition_core.db.tools as dbtools
import couchdb
from ork_interface.srv import *
DEFAULT_DB_ROOT = 'http://localhost:5984'
def handles_object_add(req):
print "Object Name:", req.name
obj = models.Object(object_name=req.name,description=req.description)
couch = couchdb.Server(DEFAULT_DB_ROOT)
db = dbtools.init_object_databases(couch)
objects = db
existing = models.Object.by_object_name(objects, key=obj.object_name)
store_new = True
print('*****************test****************************.')
if len(existing) > 0:
print('It appears that there are %d object(s) with the same name.' % len(existing))
for x in existing:
print(x)
print('Use the object id above? [y,n]')
use_it = raw_input('')
if 'y' in use_it.lower():
store_new = False
obj = x
break
else:
store_new = True
if store_new:
obj.store(objects)
print('Stored new object with id:', obj.id)
print('Use the --commit option to commit the proposed change.')
newObj_ID = obj.id
return objAddResponse(newObj_ID)
def object_add_server():
# 初始化节点 用节点 发布服务数据
rospy.init_node('object_add_server')
# 声明了一个 'object_add'的服务, 数据类型为objAdd, 调用handles_object_add回调函数
s = rospy.Service('object_add', objAdd, handles_object_add)
print "Ready to add a object."
rospy.spin()
if __name__ == "__main__":
object_add_server()
|
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from scipy.stats import norm
def PICP(posterior, data, CI):
final = pd.read_csv(posterior)
final.set_index('Unnamed: 0', inplace=True)
observed = pd.read_csv(data)
upper_ci = (100-(100-CI)/2)/100
lower_ci = ((100-CI)/2)/100
upper = final.quantile(upper_ci, axis=1)
lower = final.quantile(lower_ci, axis=1)
included = 0
for i in range(len(observed)):
if (observed.value[i] >= lower[i]) & (observed.value[i] <= upper[i]):
included +=1
PP = included/len(observed)*100
return PP
def PICP2(posterior, data, CI):
data_in = pd.read_csv(data)
data_out = pd.read_csv(posterior)
lower = (50-CI/2)/100
upper = (50+CI/2)/100
data_set = data_in.copy()
data_set['lower_quant'] = data_out.quantile(lower, axis=1)
data_set['upper_quant'] = data_out.quantile(upper, axis=1)
data_set['overlap'] = norm.cdf(data_set['upper_quant'],loc=data_set['value'],scale=data_set['noise']) - norm.cdf(data_set['lower_quant'],loc=data_set['value'],scale=data_set['noise'])
score = data_set['overlap'].sum()/len(data_set)*100
return score |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Sequence, Optional, Dict
import cirq
import sympy
_setup_code = ('import cirq\n'
'import numpy as np\n'
'import sympy\n'
'import openfermioncirq as ofc\n'
'import openfermion as of\n')
def assert_equivalent_repr(
value: Any,
*,
setup_code: str = _setup_code) -> None:
"""Checks that eval(repr(v)) == v.
Args:
value: A value whose repr should be evaluatable python
code that produces an equivalent value.
setup_code: Code that must be executed before the repr can be evaluated.
Ideally this should just be a series of 'import' lines.
"""
cirq.testing.assert_equivalent_repr(value, setup_code=setup_code)
def assert_implements_consistent_protocols(
val: Any,
*,
exponents: Sequence[Any] = (
0, 1, -1, 0.5, 0.25, -0.5, 0.1, sympy.Symbol('s')),
qubit_count: Optional[int] = None,
ignoring_global_phase: bool=False,
setup_code: str = _setup_code,
global_vals: Optional[Dict[str, Any]] = None,
local_vals: Optional[Dict[str, Any]] = None
) -> None:
"""Checks that a value is internally consistent and has a good __repr__."""
cirq.testing.assert_implements_consistent_protocols(
val,
exponents=exponents,
qubit_count=qubit_count,
ignoring_global_phase=ignoring_global_phase,
setup_code=setup_code,
global_vals=global_vals,
local_vals=local_vals)
|
from models.worker.macro_cnn_worker import MacroCNN, MacroCNNlight
__all__ = ['MacroCNN', 'MacroCNNlight']
def get_worker(args, architecture=None) :
if args.task_type == 'vision' :
if args.worker_type == 'macro' :
if args.controller == 'enas' :
worker = MacroCNN(args, architecture)
elif args.controller == 'enas_light' :
worker = MacroCNNlight(args, architecture)
else :
raise NotImplementedError
elif args.task_type == 'text' :
raise NotImplementedError
return worker |
#Import Libraries
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
#----------------------------------------------------
#load breast cancer data
X, y = load_breast_cancer(return_X_y = True)
#----------------------------------------------------
#Splitting data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=44, shuffle =True)
#----------------------------------------------------
#Applying LogisticRegression Model
'''
#linear_model.LogisticRegression(penalty='l2’,dual=False,tol=0.0001,C=1.0,fit_intercept=True,intercept_scaling=1,
# class_weight=None,random_state=None,solver='warn’,max_iter=100,
# multi_class='warn’, verbose=0,warm_start=False, n_jobs=None)
'''
LogisticRegressionModel = LogisticRegression(penalty='l1',solver='liblinear',C=1.0, max_iter=100, random_state=33)
LogisticRegressionModel.fit(X_train, y_train)
#Calculating Details
print('LogisticRegressionModel Train Score is : ' , LogisticRegressionModel.score(X_train, y_train))
print('LogisticRegressionModel Test Score is : ' , LogisticRegressionModel.score(X_test, y_test))
print('LogisticRegressionModel Classes are : ' , LogisticRegressionModel.classes_)
print('LogisticRegressionModel No. of iteratios is : ' , LogisticRegressionModel.n_iter_)
print('----------------------------------------------------')
#Calculating Prediction
y_pred = LogisticRegressionModel.predict(X_test)
y_pred_prob = LogisticRegressionModel.predict_proba(X_test)
print('Predicted Value for LogisticRegressionModel is : ' , y_pred[:10])
print('Prediction Probabilities Value for LogisticRegressionModel is : ' , y_pred_prob[:10])
#----------------------------------------------------
#Calculating Confusion Matrix
CM = confusion_matrix(y_test, y_pred)
print('Confusion Matrix is : \n', CM)
# drawing confusion matrix
sns.heatmap(CM, center = True)
plt.show()
#----------------------------------------------------
#Calculating Accuracy Score : ((TP + TN) / float(TP + TN + FP + FN))
AccScore = accuracy_score(y_test, y_pred, normalize=False)
print('Accuracy Score is : ', AccScore)
#----------------------------------------------------
#Calculating F1 Score : 2 * (precision * recall) / (precision + recall)
# f1_score(y_true, y_pred, labels=None, pos_label=1, average=’binary’, sample_weight=None)
F1Score = f1_score(y_test, y_pred, average='micro') #it can be : binary,macro,weighted,samples
print('F1 Score is : ', F1Score)
#----------------------------------------------------
#Calculating Recall Score : (Sensitivity) (TP / float(TP + FN)) 1 / 1+2
# recall_score(y_true, y_pred, labels=None, pos_label=1, average=’binary’, sample_weight=None)
RecallScore = recall_score(y_test, y_pred, average='micro') #it can be : binary,macro,weighted,samples
print('Recall Score is : ', RecallScore)
#----------------------------------------------------
#Calculating Precision Score : (Specificity) #(TP / float(TP + FP))
# precision_score(y_true, y_pred, labels=None, pos_label=1, average=’binary’,sample_weight=None)
PrecisionScore = precision_score(y_test, y_pred, average='micro') #it can be : binary,macro,weighted,samples
print('Precision Score is : ', PrecisionScore)
#----------------------------------------------------
#Calculating Precision recall Score :
#metrics.precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=
# None, warn_for = ('precision’,’recall’, ’f-score’), sample_weight=None)
PrecisionRecallScore = precision_recall_fscore_support(y_test, y_pred, average='micro') #it can be : binary,macro,weighted,samples
print('Precision Recall Score is : ', PrecisionRecallScore)
#----------------------------------------------------
#Calculating Precision recall Curve :
# precision_recall_curve(y_true, probas_pred, pos_label=None, sample_weight=None)
PrecisionValue, RecallValue, ThresholdsValue = precision_recall_curve(y_test,y_pred)
print('Precision Value is : ', PrecisionValue)
print('Recall Value is : ', RecallValue)
print('Thresholds Value is : ', ThresholdsValue)
#----------------------------------------------------
#Calculating classification Report :
#classification_report(y_true, y_pred, labels=None, target_names=None,sample_weight=None, digits=2, output_dict=False)
ClassificationReport = classification_report(y_test,y_pred)
print('Classification Report is : ', ClassificationReport )
#----------------------------------------------------
#Calculating Area Under the Curve :
fprValue2, tprValue2, thresholdsValue2 = roc_curve(y_test,y_pred)
AUCValue = auc(fprValue2, tprValue2)
print('AUC Value : ', AUCValue)
#----------------------------------------------------
#Calculating Receiver Operating Characteristic :
#roc_curve(y_true, y_score, pos_label=None, sample_weight=None,drop_intermediate=True)
fprValue, tprValue, thresholdsValue = roc_curve(y_test,y_pred)
print('fpr Value : ', fprValue)
print('tpr Value : ', tprValue)
print('thresholds Value : ', thresholdsValue)
#----------------------------------------------------
#Calculating ROC AUC Score:
#roc_auc_score(y_true, y_score, average=’macro’, sample_weight=None,max_fpr=None)
ROCAUCScore = roc_auc_score(y_test,y_pred, average='micro') #it can be : macro,weighted,samples
print('ROCAUC Score : ', ROCAUCScore)
#----------------------------------------------------
#Calculating Zero One Loss:
#zero_one_loss(y_true, y_pred, normalize = True, sample_weight = None)
ZeroOneLossValue = zero_one_loss(y_test,y_pred,normalize=False)
print('Zero One Loss Value : ', ZeroOneLossValue ) |
# Exercício 1 - Crie uma estrutura que pergunte ao usuário qual o dia da semana. Se o dia for igual a Domingo ou
# igual a sábado, imprima na tela "Hoje é dia de descanso", caso contrário imprima na tela "Você precisa trabalhar!"
"""
dia = str(input("Qual dia da semana é hoje? ")).lower()
if dia == "sabado" or dia == "domingo":
print('Hoje é dia de descanso')
else:
print('Voce precisa trabalhar!')"""
# Exercício 2 - Crie uma lista de 5 frutas e verifique se a fruta 'Morango' faz parte da lista
cont3 = 0
lista1 = ['Abacaxi', 'Laranja', 'Pera', 'maçã', 'goiaba']
lista2 = ['Abacaxi', 'Laranja', 'Pera', 'maçã', 'morango']
for i in lista2:
if i.lower() == "morango":
cont3 += 1
if cont3 == 0:
print('Não tem morango!')
else:
print('Existe morango na lista')
# Exercício 3 - Crie uma tupla de 4 elementos, multiplique cada elemento da tupla por 2 e guarde os resultados em uma lista
tupla = (1,2,3,4)
lista3 = []
for i in tupla:
lista3.append(i*2)
print(lista3)
# Exercício 4 - Crie uma sequência de números pares entre 100 e 150 e imprima na tela
for c in range(100,151,2):
if c == 150:
print(c)
else:
print(c, end=', ')
print()
# Exercício 5 - Crie uma variável chamada temperatura e atribua o valor 40. Enquanto temperatura for maior que 35,
# imprima as temperaturas na tela
temperatura = 40
while temperatura > 35:
print(temperatura, end=', ')
temperatura -= 1
print()
# Exercício 6 - Crie uma variável chamada contador = 0. Enquanto counter for menor que 100, imprima os valores na tela,
# mas quando for encontrado o valor 23, interrompa a execução do programa
contador = 0
while contador < 100:
print(contador, end=', ')
contador += 1
if contador == 23:
break
print()
# Exercício 7 - Crie uma lista vazia e uma variável com valor 4. Enquanto o valor da variável for menor ou igual a 20,
# adicione à lista, apenas os valores pares e imprima a lista
lista7 = []
var7 = 4
while var7 <= 20:
if var7 % 2 == 0:
lista7.append(var7)
var7 +=1
else:
var7 +=1
print(lista7)
# Exercício 8 - Transforme o resultado desta função range em uma lista: range(5, 45, 2)
nums = range(5, 45, 2)
print(list(nums))
# Exercício 9 - Faça a correção dos erros no código abaixo e execute o programa. Dica: são 3 erros.
temperatura = float(input('Qual a temperatura? '))
if temperatura > 30:
print('Vista roupas leves.')
else:
print('Busque seus casacos.')
# Exercício 10 - Faça um programa que conte quantas vezes a letra "r" aparece na frase abaixo. Use um placeholder na sua instrução de impressão
frase = "É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a vantagem de existir."
for letra in range(0, len(frase)):
if frase[letra] == 'r':
contador += 1
print(f'Foram contados {contador} letras "r"')
|
import asab
class MinimailerService(asab.Service):
def __init__(self, app, service_name="minimailer.MinimailerService"):
super().__init__(app, service_name)
# Key-value registry of mailers
# where key is a user-defined mailer id
# and value a MailerEngine id
self.MailerRegistry = {}
def register_mailer(self, mailer):
"""Registers a mailer"""
if id in self.MailerRegistry:
raise RuntimeError("Mailer with ID '{}' already registered.")
self.MailerRegistry[mailer.Id] = mailer
|
from elasticsearch import Elasticsearch, TransportError, RequestsHttpConnection
from elasticsearch.helpers import streaming_bulk
import certifi
import time
import getopt
import sys
import csv
csv.field_size_limit(sys.maxsize)
help = """
This python utility helps with uploading CSV files of any size to Elasticsearch. This has been tested up to ~2GB. Currently, all fields in the document are indexed and treated as text. In the future, controlling how the fields are indexed from the command line would be a handy feature.
Example on localhost
python csv-to-elasticsearch.py -i my-index -c localhost:9200,localhost:9201 -f my.csv
Example over https
python csv-to-elasticsearch.py -i my-index -c https://sub.domain.ca/es -f my.csv
Required fields
-i (--index) The index to write documents to.
-c (--connect) The Elasticsearch host string.
Ex. '-c localhost:9200,localhost:9201,localhost:9202'
-f (--file) The CSV file.
Optional fields
-h (--help) Print this helpful text field.
-u (--user) The user to connect to Elasticsearch as.
-p (--password) The user's password.
-x (--certificate) The certificate to use to connect to the cluster.
"""
def parse_reports(file):
with open(file, 'r') as file_std:
csv_reader = csv.reader(file_std)
for values in csv_reader:
if csv_reader.line_num == 1:
headers = values
else:
yield dict(zip(headers, values))
def create_index(client, index):
try:
client.indices.create(index=index)
except TransportError as e:
# ignore already existing index
if e.error == 'resource_already_exists_exception':
pass
else:
raise
def parse_hosts(hosts):
"""
Parses a comma delimited string of hosts.
Ex. localhost:9200,localhost:9201,localhost:9202
:param str hosts The hosts string.
:return List An Elasticsearch list of hosts.
"""
return hosts.split(',')
def parse_args(args):
"""
Parses the command line arguments. See 'help' for details.
"""
short_opts = 'hi:c:f:u:p:x:'
long_opts = [
'help',
'index=',
'connect=',
'file=',
'user=',
'password=',
'certificate='
]
index, hosts, csv_file, user, password, cert = None, None, None, None, None, None
try:
opts, args = getopt.getopt(args, short_opts, long_opts)
except getopt.GetoptError:
print(help)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help)
sys.exit()
elif opt in ('-i', '--index'):
index = arg
elif opt in ('-c', '--connect'):
hosts = arg
elif opt in ('-f', '--file'):
csv_file = arg
elif opt in ('-u', '--user'):
user = arg
elif opt in ('-p', '--password'):
password = arg
elif opt in ('-x', '--certificate'):
cert = arg
else:
print('Unknown flag: {}'.format(opt))
sys.exit(2)
if not index:
print('-i is required')
print(help)
sys.exit(2)
if not hosts:
print('-c is required')
print(help)
sys.exit(2)
if not csv_file:
print('-f is required')
print(help)
sys.exit(2)
return index, hosts, csv_file, user, password, cert
def main(index, hosts, csv_file, user, password, cert):
hosts = parse_hosts(hosts)
options = {}
if user and password:
options['http_auth'] = (user, password)
if cert:
options['verify_certs'] = True
options['ca_certs'] = cert
es = Elasticsearch(hosts, **options)
create_index(es, index)
for ok, result in streaming_bulk(es, parse_reports(csv_file), index=index, max_retries=5):
action, result = result.popitem()
doc_id = '/%s/doc/%s' % (index, result['_id'])
if not ok:
print('Failed to %s document %s: %r' % (action, doc_id, result))
else:
print(doc_id)
if __name__ == '__main__':
start = time.perf_counter()
index, hosts, csv_file, user, password, cert = parse_args(sys.argv[1:])
main(index, hosts, csv_file, user, password, cert)
elapsed = time.perf_counter() - start
print(f'Program completed in {elapsed:0.5f} seconds.')
|
from .this_file import function_name, other_function
from ... top.middle.lower.filename import Class1, Class2
import pytest
def test_alive():
""" Does our test file even run
"""
pass
def test_function_name_exists():
""" can we see the basic function
"""
assert function_name
@pytest.fixture
def empty_thing():
return Class1()
def test_function_name_results_are_a_certain_way(empty_list):
""" docstring on what we are checking
"""
expected = 44
actual = empty_list.method(44)
assert expected == actual
|
"""Test module for primary command line interface."""
import sys
import unittest
from unittest.mock import patch
from io import StringIO
from pylint_fail_under.__main__ import main
class _MockPylintLinter: # pylint: disable=too-few-public-methods
def __init__(self, score):
self.stats = {"global_note": score}
class _MockPylintResults: # pylint: disable=too-few-public-methods
def __init__(self, score):
self.linter = _MockPylintLinter(score)
class TestCmdLineInterface(unittest.TestCase):
"""Test case class for primary command line interface."""
def test_pass(self): # pylint: disable=no-self-use
"""Test success/passing when Pylint score exceeds argument-specified minimum."""
sys_args = [None, "--fail_under", "8.5", "--disable", "duplicate-code", "my_pkg"]
with patch("sys.argv", sys_args):
with patch("pylint_fail_under.__main__.Run",
return_value=_MockPylintResults(9.3)) as pl_mock:
exit_code = main()
pl_mock.assert_called_once_with(
args=["--disable", "duplicate-code", "my_pkg"], do_exit=False)
assert exit_code == 0
def test_fail(self): # pylint: disable=no-self-use
"""Test failure when Pylint score is less than argument-specified minimum."""
sys_args = [None, "my_pkg", "--fail_under", "9.25"]
with patch("sys.argv", sys_args):
with patch("pylint_fail_under.__main__.Run",
return_value=_MockPylintResults(7.73)) as pl_mock:
exit_code = main()
pl_mock.assert_called_once_with(args=["my_pkg"], do_exit=False)
assert exit_code == 1
def test_default(self): # pylint: disable=no-self-use
"""Verify that a score of 10 is required if not specified as an argument."""
sys_args = [None, "my_pkg"]
with patch("sys.argv", sys_args):
with patch("pylint_fail_under.__main__.Run",
return_value=_MockPylintResults(10.0)):
exit_code = main()
assert exit_code == 0
with patch("pylint_fail_under.__main__.Run",
return_value=_MockPylintResults(9.9)) as pl_mock:
exit_code = main()
assert exit_code == 1
pl_mock.assert_called_once_with(
args=["my_pkg"], do_exit=False)
def test_no_score(self): # pylint: disable=no-self-use
"""Verify failure when no Pylint score is returned (i.e. fatal error)."""
sys_args = [None, "--disable", "trailing-whitespace", "bad_pkg", "--fail_under", "9.5"]
with patch("sys.argv", sys_args):
mock_pl_results = _MockPylintResults(9.3)
del mock_pl_results.linter.stats["global_note"]
with patch("pylint_fail_under.__main__.Run",
return_value=mock_pl_results) as pl_mock:
exit_code = main()
pl_mock.assert_called_once_with(
args=["--disable", "trailing-whitespace", "bad_pkg"], do_exit=False)
assert exit_code == 2
def test_help(self): # pylint: disable=no-self-use
"""Verify printing usage information to the console if no arguments specified."""
sys_args = [None]
with patch("sys.argv", sys_args):
_stdout = sys.stdout
sys.stdout = StringIO()
exit_code = main()
_printed = sys.stdout.getvalue()
sys.stdout = _stdout
assert "usage: pylint-fail-under [--fail_under SCORE] [Pylint Command Line Arguments]" in _printed
assert exit_code == 0
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
"""
P(ssh = down | state = ok) = 0
P(ssh = up | state = ok) = 1
"""
""" Determine the state based on the indicators """
def analyze(status):
if not status['SSH'] and status['REASON'] == 'Not responding' and status['POWER'] == 'on':
return 'NODE_KILLED_IPMI_ON'
if not status['SSH'] and status['REASON'] == 'Not responding' and status['POWER'] == 'off':
return 'NODE_KILLED_IPMI_OFF'
if not status['SSH']:
return 'UNKNOWN'
# All of these are when SSH is working
if status['REASON'] == 'Not responding':
if len(status['USER_PROCESSES']):
return 'SLURM_FAILED_USER_PROCESSES_ALIVE'
else:
return 'SLURM_FAILED_NO_USER_PROCESSES'
if status['REASON'] == 'Node unexpectedly rebooted':
return 'NODE_WORKING'
if status['REASON'] == 'batch job complete failure' and status['OVERALL']:
return 'NODE_WORKING'
return 'UNKNOWN'
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import sqlite3
import sys
def process_options():
arg_parser = argparse.ArgumentParser(
description='Import contacts CSV into the specified database')
arg_parser.add_argument('--server', default='data')
args = arg_parser.parse_args()
return args
def database_connection(name):
db = sqlite3.connect(f'{name}.db', isolation_level='DEFERRED')
cursor = db.cursor()
# Check if the table is already present
cursor.execute('''
SELECT name FROM sqlite_master WHERE type='table' AND
name='employees'
''')
if cursor.fetchone() is None:
cursor.execute('''
CREATE TABLE employees
(name, address, country, phone, employee_no)
''')
return db
def import_data(database):
reader = csv.reader(sys.stdin)
cursor = database.cursor()
try:
for row in reader:
cursor.execute('''
INSERT INTO employees VALUES
(?, ?, ?, ?, ?)
''', row)
database.commit()
print("Import successful")
except Exception:
print("Import error", file=sys.stderr)
def main():
options = process_options()
database = database_connection(options.server)
import_data(database)
return 0
if __name__ == "__main__":
sys.exit(main())
|
"""
Created By : Ubaidullah Effendi-Emjedi
LinkedIn :
Icon By : https://www.flaticon.com/authors/freepik
"""
import sys
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QAction, qApp, QFileDialog
from checksum import *
from pyqt_creator import *
class checksum_window(QMainWindow):
"""
Checksum Window Class.
"""
""" ORIENTATION """
X_POSITION = 200
Y_POSITION = 200
WIDTH = 640
HEIGHT = 480
""" TITLE """
TITLE = "Simple Checksum"
""" JSON FILE REFERENCE """
json_file = ""
""" HASH FUNCTION """
__hash_type = hashlib.blake2b()
""" COMMON FILE SIZE RANGES """
FILE_SIZES = {"32mb": 32.0, "64mb": 64.0, "128mb": 128.0, "256mb": 256.0, "512mb": 512.0, "1024mb": 1024.0,
"2048mb": 2048.0, "4096mb": 4096.0}
custom_file_size = 0
checksum_string = ""
checksum_data = []
""" CONSTRUCTOR """
def __init__(self):
super(checksum_window, self).__init__()
self.main_window_setup()
self.menu_bar()
self.main_window_layout_setup()
self.ui()
""" FUNCTIONS """
def main_window_setup(self):
"""
Adjust Main Window Parameters such as Geometry and Title etc
:return:
"""
self.setGeometry(self.X_POSITION, self.Y_POSITION, self.WIDTH, self.HEIGHT)
self.setWindowTitle(self.TITLE)
self.setWindowIcon(QIcon("checksum.ico"))
def menu_bar(self):
"""
Create a Menu Bar with Tabs and Actions
:return:
"""
# Create MenuBar
self.bar = self.menuBar()
# Create Status Bar
self.status_bar = self.statusBar()
self.status_bar.showMessage("Ready")
# Create Root Menus
file_menu = self.bar.addMenu("File")
self.hash_menu = self.bar.addMenu("Hash Type")
# Create Actions for Menus
open_action = QAction("Open", self)
open_action.setShortcut("Ctrl+O")
open_action.setStatusTip("Open a Checksum File")
save_action = QAction("Save", self)
save_action.setShortcut("Ctrl+S")
save_action.setStatusTip("Save Calculated Checksum File")
quit_action = QAction('Quit', self)
quit_action.setShortcut("Ctrl+Q")
quit_action.setStatusTip("Quit Application.")
self.blake2_hash_action = QAction("Blake2", self)
self.blake2_hash_action.setCheckable(True)
self.blake2_hash_action.setChecked(True)
self.blake2_hash_action.setStatusTip("Use Blake2 Hashing Algorithm.")
self.sha3_512_hash_action = QAction("SHA3_512", self)
self.sha3_512_hash_action.setCheckable(True)
self.sha3_512_hash_action.setStatusTip("Use SHA3_512 Hashing Algorithm.")
# Add Actions
file_menu.addAction(open_action)
file_menu.addAction(save_action)
file_menu.addAction(quit_action)
self.hash_menu.addAction(self.blake2_hash_action)
self.hash_menu.addAction(self.sha3_512_hash_action)
# Events
open_action.triggered.connect(self.open_file_dialog)
save_action.triggered.connect(lambda: self.save_file_dialog())
quit_action.triggered.connect(lambda: (qApp.quit(), print("Close Application!")))
self.blake2_hash_action.toggled.connect(lambda: self.__blake2_hash_action())
self.sha3_512_hash_action.toggled.connect(lambda: self.__sha3_512_hash_action())
def main_window_layout_setup(self):
"""
Main Window Layout Setup
:return:
"""
# Create a Central Widget.
self.central_widget = create_widget()
self.setCentralWidget(self.central_widget)
# Create UI Button Group
self.ui_group = QButtonGroup(self)
# Create Form Layout.
self.form_layout = QFormLayout()
def ui(self):
"""
Create UI Widgets and Layouts Functions.
Functions create Widgets and Layouts that are added to the Vertical Layout.
:return:
"""
# CUSTOM GUI
file_size_hlayout = self.choose_file_sizes_to_ignore()
self.log_file = self.checksum_log_file()
self.hash_button = self.calculate_hash_button()
self.compare_button = self.compare_hash_button()
# Create Vertical Layout
vertical_layout = QVBoxLayout(self.central_widget)
# Add Form and Horizontal Layout to Vertical Layout
vertical_layout.addLayout(self.form_layout)
vertical_layout.addLayout(file_size_hlayout)
# Add Widgets to Vertical Layout.
vertical_layout.addWidget(self.log_file)
vertical_layout.addWidget(self.hash_button)
vertical_layout.addWidget(self.compare_button)
# Set MainWindow Layout to Vertical Layout
self.setLayout(vertical_layout)
def choose_file_sizes_to_ignore(self):
"""
Choose a file Size to Ignore.
Any Files greater than the selected or custom input file size in Megabytes will be ignore.
:return: Horizontal Layout.
"""
# Create Check Box
self.skip_file_checkbox = create_check_box(
tooltip = "Skip Files that are bigger than the chosen or given size in megabytes.")
self.skip_file_checkbox.setStatusTip(
"Skip and Do not Calculate the Checksum of Files Greater than the Given or Provided Size in Megabytes.")
# Create Combo Box
self.combo_box = QComboBox()
self.combo_box.addItems(
["32mb", "64mb", "128mb", "256mb", "512mb", "1024mb", "2048mb", "4096mb", "Custom Size(MB)"])
self.combo_box.hide()
# Create Line Edit
self.file_size_line_edit = create_line_edit(hint = "Size in MB, i.e 128")
self.file_size_line_edit.hide()
# Set Skip File Checkbox Action
self.skip_file_checkbox.stateChanged.connect(
lambda: self.choose_file_size_active(self.skip_file_checkbox.isChecked()))
# Activate Line Edit
self.combo_box.activated[str].connect(
lambda: self.set_file_size_line_edit_active(self.combo_box.currentText()))
# Create Horizontal Layout.
horizontal_layout = create_horizontal_layout(self.skip_file_checkbox, self.combo_box,
self.file_size_line_edit)
horizontal_layout.addStretch()
# Add Horizontal Layout to Form Layout.
self.form_layout.addRow("Skip Files", horizontal_layout)
return horizontal_layout
def checksum_log_file(self):
"""
Create Checksum Log File
:return:
"""
# Create Line Edit Widget
log_file = create_text_edit()
log_file.setToolTip("Checksum Log Text")
log_file.setStatusTip(
"This is where the new Calculated Checksum Data or Opened Checksum File Data is Displayed.")
# Set to ReadOnly True
log_file.setReadOnly(True)
return log_file
def calculate_hash_button(self):
"""
Create Calculate Hash Button
:return:
"""
button = create_button(text = "Calculate Checksum of Files")
button.clicked.connect(lambda: self.calculate_checksum_data())
button.setStatusTip(
"Calculate the Checksum of all the files in the Directory where this Program is Placed. "
"Files in sub-directories are included.")
return button
def calculate_checksum_data(self):
"""
Calculate Checksum Data
:return:
"""
paths = get_path_to_all_files(
ignore_files = [os.path.basename(__file__), sys.argv[0], os.path.basename(sys.argv[0]), "checksum.py"])
print(f"Hash Type: {self.__hash_type}\n")
if self.skip_file_checkbox.isChecked():
# Get Custom File Size
self.custom_file_size = self.get_file_size(self.combo_box.currentText())
if self.blake2_hash_action.isChecked():
self.checksum_data = [(path, size_cap_checksum_blake2(path,
size_cap_in_mb = self.custom_file_size))
for path in paths]
else:
self.checksum_data = [
(
path,
size_cap_checksum(path, hash_type = self.__hash_type(), size_cap_in_mb = self.custom_file_size))
for path in paths]
else:
if self.blake2_hash_action.isChecked():
self.checksum_data = [(path, checksum_blake2(path)) for path in paths]
else:
self.checksum_data = [(path, checksum(path, hash_type = self.__hash_type())) for path in paths]
self.set_log_file_text(stringify_checksum_data_array(self.checksum_data))
def compare_hash_button(self):
"""
Create Compare Hash Button
:return: Button
"""
button = create_button(text = "Compare Checksum with Checksum File")
button.clicked.connect(lambda: self.compare_new_checksum_with_checksum_file())
button.setStatusTip(
"Compare the Checksum Data that was Calculated (as seen in the Log), "
"with previously saved Checksum data, saved in a Checksum Json File.")
return button
def compare_new_checksum_with_checksum_file(self):
"""
Compare Checksum with Saved Checksum Json
:return:
"""
if self.log_file.toPlainText() == "":
self.message = QMessageBox()
self.message.setWindowTitle("Warning")
self.message.setText("No Checksum Calculated!")
self.message.show()
else:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
path, _ = QFileDialog.getOpenFileName(self, "Open Checksum File", "",
"Json (*.json)", options = options)
# If the Path is not Empty String
if path:
# Open the File
self.json_file = open(path, mode = "r")
# Store Checksum data
self.checksum_string = self.json_file.read()
self.compare_checksums(self.checksum_data, json.loads(self.checksum_string))
def compare_checksums(self, checksum_array = [], checksum_dict = {}):
"""
Compare the new Computed Hash Values with the Existing Backup to check if any files were altered or newly found.
:param checksum_array:
:param checksum_dict:
:return:
"""
checksum_string = ""
file_name = ""
for file_path, hash_value in checksum_array:
if file_path in checksum_dict.keys():
file_name = os.path.basename(file_path)
if hash_value.lower() == checksum_dict[file_path].lower():
checksum_string += f"[Match]\n{file_path}\n{file_name} : {hash_value} \n\n"
self.log_file.setText(checksum_string)
print(f"\nChecksum :\n{checksum_string}\n")
else:
checksum_string += f"[This is an altered File!]\n{file_path}\n{file_name} : {hash_value} \n\n"
print(f"\nChecksum :\n{checksum_string}\n")
self.log_file.setText(checksum_string)
else:
file_name = os.path.basename(file_path)
checksum_string += f"[This is a new File!]\n{file_path}\n{file_name} : {hash_value} \n\n"
print(f"\nChecksum :\n{checksum_string}\n")
self.log_file.setText(checksum_string)
return checksum_string
def open_file_dialog(self):
"""
Select and Open a Checksum File.
:return:
"""
# Create Dialog Options
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
# Save Path
path, _ = QFileDialog.getOpenFileName(self, "Open Checksum File", "",
"All Files (*);;Json (*.json)", options = options)
# If the Path is not Empty String
if path:
# Open the File
self.json_file = open(path, mode = "r")
# Store Checksum data
self.checksum_string = self.json_file.read()
self.checksum_data = checksum_data_dict_to_array(json.loads(self.checksum_string))
self.set_log_file_text(text = stringify_checksum_data_array(self.checksum_data))
print(f"\nChecksum Data :\n{self.checksum_data}")
def save_file_dialog(self):
"""
Save Checksum Data.
:return:
"""
# Create QFile Dialog Options
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
# Store the Path
path, _ = QFileDialog.getSaveFileName(self, "Save Checksum Data", "",
"All Files (*);;Json Files (*.json)", options = options)
# Save the Checksum Data if log_file is not an Empty String.
if path and self.log_file != "":
write_checksum_to_json(self.checksum_data, path = path)
else:
print("No Checksum Data to Save")
def set_log_file_text(self, text = ""):
"""
Set Log File Text
:param text: Checksum Data as String Format
:return:
"""
self.log_file.setFontPointSize(10)
self.log_file.setText(text)
def __change_hash_type(self, is_checked = False, hash_type = hashlib.blake2b):
"""
Change Hash Algorithm
:param is_checked: Checkbox is Checked Value
:param hash_type: Hash Algorithm to Use.
:return:
"""
if is_checked:
self.__hash_type = hash_type
def __blake2_hash_action(self):
"""
Blake2 Hash Action Event.
:return:
"""
if self.blake2_hash_action.isChecked():
self.sha3_512_hash_action.setChecked(False)
self.__hash_type = hashlib.blake2b
else:
self.sha3_512_hash_action.setChecked(True)
self.__hash_type = hashlib.sha3_512
def __sha3_512_hash_action(self):
"""
SHA3 512 Hash Action Event.
:return:
"""
if self.sha3_512_hash_action.isChecked():
self.blake2_hash_action.setChecked(False)
self.__hash_type = hashlib.sha3_512
else:
self.blake2_hash_action.setChecked(True)
self.__hash_type = hashlib.blake2b
def choose_file_size_active(self, is_checked = False):
"""
Activate File Size Combo Box if Skip File Size is Checked.
:param is_checked: Checkbox is Checked Value.
:return:
"""
if is_checked:
self.combo_box.show()
else:
self.file_size_line_edit.hide()
self.reset_combo_box(0)
def get_file_size(self, current_value = ""):
"""
Get the Correct File Size from FILE_SIZE Dictionary or User Input
:param current_value:
:return:
"""
try:
if current_value != "Custom Size(MB)":
return self.FILE_SIZES[current_value]
else:
return float(self.file_size_line_edit.text())
except ValueError | ArithmeticError:
print(f"{ValueError} or {ArithmeticError}")
def set_file_size_line_edit_active(self, text = ""):
"""
Enter Custom File Size
:param text:
:return:
"""
if text == "Custom Size(MB)":
self.file_size_line_edit.show()
else:
self.file_size_line_edit.hide()
def reset_combo_box(self, index = -1):
"""
Reset Combo Box Options
:param
index: Option Index
:return:
"""
self.combo_box.hide()
self.combo_box.setCurrentIndex(index)
def window():
application = QApplication(sys.argv)
win = checksum_window()
win.show()
sys.exit(application.exec_())
if __name__ == "__main__":
window()
|
from freemium.tests import *
from freemium.tests import Session, metadata
class TestElixir(TestModel):
def setUp(self):
TestModel.setUp(self)
def test_metadata(self):
assert 'A collection of Tables and their associated schema constructs.' in metadata.__doc__
def test_session(self):
assert Session.connection().dialect.name is 'sqlite'
|
class Solution:
def gcdOfStrings(self, str1: str, str2: str) -> str:
n = math.gcd(len(str1), len(str2))
if str1 + str2 == str2 + str1:
return str1[:n]
return ''
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from azure.ai.ml.constants import AutoMLConstants
from azure.ai.ml._schema.automl.image_vertical.image_vertical import ImageVerticalSchema
from azure.ai.ml._schema.core.fields import StringTransformedEnum
from marshmallow import fields, post_load
from azure.ai.ml._schema import NestedField
from azure.ai.ml._schema.automl.image_vertical.image_model_distribution_settings import (
ImageModelDistributionSettingsObjectDetectionSchema,
)
from azure.ai.ml._schema.automl.image_vertical.image_model_settings import ImageModelSettingsObjectDetectionSchema
from azure.ai.ml._restclient.v2022_02_01_preview.models import (
InstanceSegmentationPrimaryMetrics,
ObjectDetectionPrimaryMetrics,
TaskType,
)
from azure.ai.ml._utils.utils import camel_to_snake
class ImageObjectDetectionBaseSchema(ImageVerticalSchema):
image_model = NestedField(ImageModelSettingsObjectDetectionSchema())
search_space = fields.List(NestedField(ImageModelDistributionSettingsObjectDetectionSchema()))
class ImageObjectDetectionSchema(ImageObjectDetectionBaseSchema):
task_type = StringTransformedEnum(
allowed_values=TaskType.IMAGE_OBJECT_DETECTION,
casing_transform=camel_to_snake,
data_key=AutoMLConstants.TASK_TYPE_YAML,
required=True,
)
primary_metric = StringTransformedEnum(
allowed_values=ObjectDetectionPrimaryMetrics.MEAN_AVERAGE_PRECISION,
casing_transform=camel_to_snake,
load_default=camel_to_snake(ObjectDetectionPrimaryMetrics.MEAN_AVERAGE_PRECISION),
)
@post_load
def make(self, data, **kwargs) -> "ImageObjectDetectionJob":
from azure.ai.ml.entities._job.automl.image import ImageObjectDetectionJob
data.pop("task_type")
loaded_data = data
search_space_val = data.pop("search_space")
search_space = ImageObjectDetectionJob._get_search_space_from_str(search_space_val)
data_settings = {
"training_data": loaded_data.pop("training_data"),
"target_column_name": loaded_data.pop("target_column_name"),
"validation_data": loaded_data.pop("validation_data", None),
"validation_data_size": loaded_data.pop("validation_data_size", None),
}
job = ImageObjectDetectionJob(search_space=search_space, **loaded_data)
job.set_data(**data_settings)
return job
class ImageInstanceSegmentationSchema(ImageObjectDetectionBaseSchema):
task_type = StringTransformedEnum(
allowed_values=TaskType.IMAGE_INSTANCE_SEGMENTATION,
casing_transform=camel_to_snake,
data_key=AutoMLConstants.TASK_TYPE_YAML,
required=True,
)
primary_metric = StringTransformedEnum(
allowed_values=[InstanceSegmentationPrimaryMetrics.MEAN_AVERAGE_PRECISION],
casing_transform=camel_to_snake,
load_default=camel_to_snake(InstanceSegmentationPrimaryMetrics.MEAN_AVERAGE_PRECISION),
)
@post_load
def make(self, data, **kwargs) -> "ImageInstanceSegmentationJob":
from azure.ai.ml.entities._job.automl.image import ImageInstanceSegmentationJob
data.pop("task_type")
loaded_data = data
search_space_val = data.pop("search_space")
search_space = ImageInstanceSegmentationJob._get_search_space_from_str(search_space_val)
data_settings = {
"training_data": loaded_data.pop("training_data"),
"target_column_name": loaded_data.pop("target_column_name"),
"validation_data": loaded_data.pop("validation_data", None),
"validation_data_size": loaded_data.pop("validation_data_size", None),
}
job = ImageInstanceSegmentationJob(search_space=search_space, **loaded_data)
job.set_data(**data_settings)
return job
|
from flask_appbuilder import Model
from sqlalchemy import Table, Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from . import appbuilder, db
"""
You can use the extra Flask-AppBuilder fields and Mixin's
AuditMixin will add automatic timestamp of created and modified by who
"""
class Tournament(db.Model):
"""
A tournament is the top-level object
"""
id = db.Column(db.Integer, primary_key=True)
schedule_id = db.Column(db.Integer, db.ForeignKey('schedule.id'))
playoff_id = db.Column(db.Integer, db.ForeignKey('game.id'))
winner_id = db.Column(db.Integer, db.ForeignKey('player.id'))
title = db.Column(db.String(80), unique=True, nullable=False)
def __repr__(self):
return "<Tournament {}>".format(self.title)
class Player(db.Model):
"""
The player class doesn't reference any other tables. It is liked
to other tables through join tables. But you can't jump from
player to anywhere else.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=True)
affiliation = db.Column(db.String(120), unique=False, nullable=True)
level = db.Column(db.Integer, default=1)
vip = db.Column(db.Boolean, default=False)
def __repr__(self):
return "<Player {}>".format(self.name)
class Game(db.Model):
"""
The game class is the heart of the system. Many other
tables refer to it.
"""
id = db.Column(db.Integer, primary_key=True)
result = db.Column(db.Integer, default=0)
player_one_id = db.Column(db.Integer, db.ForeignKey('player.id'))
player_one_draw_id = db.Column(db.Integer, db.ForeignKey('draw.id'))
player_two_id = db.Column(db.Integer, db.ForeignKey('player.id'))
player_two_draw_id = db.Column(db.Integer, db.ForeignKey('draw.id'))
color_code = db.Column(db.Integer, default=0)
bye = db.Column(db.Boolean, default=False)
round_id = db.Column(db.Integer, db.ForeignKey('round.id'))
def __repr__(self):
return "<Game: {} vs {} Result: {} >".format(self.player_one_id, self.player_two_id, self.result)
class Schedule(db.Model):
"""
A schedule is just a collection of rounds
who have games
"""
tournament_id = db.Column(Integer, ForeignKey('tournament.id'))
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80), unique=True, nullable=False)
rounds = db.relationship('Round', backref='schedule')
def __repr__(self):
return "<Schedule {}>".format(self.title)
class Round(db.Model):
"""
A Round is just a collection of games in a tournament
and it's kept simple
"""
id = db.Column(db.Integer, primary_key=True)
round_number = db.Column(db.Integer, default=1)
schedule_id = db.Column(db.Integer, db.ForeignKey('schedule.id'))
games = relationship("Game", backref='schedule')
def __repr__(self):
return "<Round: {} For Schedule {}>".format(self.round_number, self.schedule_id)
class Draw(db.Model):
"""
A draw is just a shortcut table that has
the player's games for a particular tournament
"""
id = db.Column(db.Integer, primary_key=True)
tournament_id = db.Column(db.Integer, db.ForeignKey('tournament.id'))
player_id = db.Column(db.Integer, db.ForeignKey('player.id'))
db.create_all()
|
# /bin/env python3 python3 main.py
# Author: otonroad
# Date: Dec. 2020
# Version: 1.0
# License: MIT
# this is the main file of the project
# Importing the libraries
from translate import Translator, translate
import os
import requests
from selenium import webdriver
from time import sleep
def get_html_snapshot(name):
# this sends a request to the website and returns the html code
# to see if the website is up and running
try:
filename = name + ".html"; file = open(filename, "w")
url = "https://www.google.com/"; data = requests.get(url).text
file.write(data); file.close()
return filename
except:
return None
def get_html_snapshot_selenium(name):
# this sends a request to the website and returns the html code
# to see if the website is up and running
# file = open name + ".png", 'wb'); file.close();
d = webdriver.Chrome(); d.get("https://www.google.com/search?q=" + name)
sleep(0.5)
d.find_element_by_id("L2AGLb").click()
filename = "screenshot.png"; d.get_screenshot_as_file(filename); d.quit()
def getFile():
f = open("spanishwords.txt", "r")
lines = f.readlines()
translator = Translator(from_lang="es", to_lang="en")
for line in lines:
print(line)
get_html_snapshot_selenium(line)
print("word meaning: " + translator.translate(line))
try:
while True:
getFile()
except KeyboardInterrupt:
print("stopped ........") |
import django_filters
from graphene_django.filter import GlobalIDMultipleChoiceFilter
from ...shipping.models import ShippingZone
from ..channel.types import Channel
from ..core.types import FilterInputObjectType
from ..utils import resolve_global_ids_to_primary_keys
from ..utils.filters import filter_fields_containing_value
def filter_channels(qs, _, values):
if values:
_, channels_ids = resolve_global_ids_to_primary_keys(values, Channel)
qs = qs.filter(channels__id__in=channels_ids)
return qs
class ShippingZoneFilter(django_filters.FilterSet):
search = django_filters.CharFilter(method=filter_fields_containing_value("name"))
channels = GlobalIDMultipleChoiceFilter(method=filter_channels)
class Meta:
model = ShippingZone
fields = ["search"]
class ShippingZoneFilterInput(FilterInputObjectType):
class Meta:
filterset_class = ShippingZoneFilter
|
# Importing the required modules
from termcolor import colored
from.get import get
# The function to get your own balance
def balance(currency_module) -> None:
"""
This function is used to get your own current balance, i.e, get the number of tokens that you hold.
"""
details = get(currency_module)
print(colored(
f"The balance of current user is {currency_module.format_units(currency_module.balance(), details['decimals']) + ' ' + details['symbol']}.", 'green'))
# The function to get the balance of a specific address
def balance_of(currency_module, address) -> None:
"""
This module is used to get your someone's current balance, i.e, get the number of tokens that you hold.
"""
details = get(currency_module)
print(colored(
f"The balance of {address} is {currency_module.format_units(currency_module.balance_of(address), details['decimals']) + ' ' + details['symbol']}.", 'green'))
|
# coding: utf-8
"""
Hydrogen Integration API
The Hydrogen Integration API # noqa: E501
OpenAPI spec version: 1.3.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from integration_api.configuration import Configuration
class FinancialStatement(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'accounting_method': 'str',
'business_id': 'str',
'client_id': 'str',
'create_date': 'datetime',
'currency_code': 'str',
'id': 'str',
'metadata': 'dict(str, str)',
'period_length': 'str',
'period_month': 'int',
'period_quarter': 'int',
'period_type': 'str',
'period_year': 'int',
'secondary_id': 'str',
'statement_date': 'date',
'statement_type': 'str',
'stats': 'list[Stat]',
'update_date': 'datetime'
}
attribute_map = {
'accounting_method': 'accounting_method',
'business_id': 'business_id',
'client_id': 'client_id',
'create_date': 'create_date',
'currency_code': 'currency_code',
'id': 'id',
'metadata': 'metadata',
'period_length': 'period_length',
'period_month': 'period_month',
'period_quarter': 'period_quarter',
'period_type': 'period_type',
'period_year': 'period_year',
'secondary_id': 'secondary_id',
'statement_date': 'statement_date',
'statement_type': 'statement_type',
'stats': 'stats',
'update_date': 'update_date'
}
def __init__(self, accounting_method=None, business_id=None, client_id=None, create_date=None, currency_code=None, id=None, metadata=None, period_length=None, period_month=None, period_quarter=None, period_type=None, period_year=None, secondary_id=None, statement_date=None, statement_type=None, stats=None, update_date=None, _configuration=None): # noqa: E501
"""FinancialStatement - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._accounting_method = None
self._business_id = None
self._client_id = None
self._create_date = None
self._currency_code = None
self._id = None
self._metadata = None
self._period_length = None
self._period_month = None
self._period_quarter = None
self._period_type = None
self._period_year = None
self._secondary_id = None
self._statement_date = None
self._statement_type = None
self._stats = None
self._update_date = None
self.discriminator = None
self.accounting_method = accounting_method
if business_id is not None:
self.business_id = business_id
if client_id is not None:
self.client_id = client_id
if create_date is not None:
self.create_date = create_date
self.currency_code = currency_code
if id is not None:
self.id = id
if metadata is not None:
self.metadata = metadata
if period_length is not None:
self.period_length = period_length
if period_month is not None:
self.period_month = period_month
if period_quarter is not None:
self.period_quarter = period_quarter
if period_type is not None:
self.period_type = period_type
if period_year is not None:
self.period_year = period_year
if secondary_id is not None:
self.secondary_id = secondary_id
self.statement_date = statement_date
self.statement_type = statement_type
if stats is not None:
self.stats = stats
if update_date is not None:
self.update_date = update_date
@property
def accounting_method(self):
"""Gets the accounting_method of this FinancialStatement. # noqa: E501
accounting_method # noqa: E501
:return: The accounting_method of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._accounting_method
@accounting_method.setter
def accounting_method(self, accounting_method):
"""Sets the accounting_method of this FinancialStatement.
accounting_method # noqa: E501
:param accounting_method: The accounting_method of this FinancialStatement. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and accounting_method is None:
raise ValueError("Invalid value for `accounting_method`, must not be `None`") # noqa: E501
self._accounting_method = accounting_method
@property
def business_id(self):
"""Gets the business_id of this FinancialStatement. # noqa: E501
businessId # noqa: E501
:return: The business_id of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._business_id
@business_id.setter
def business_id(self, business_id):
"""Sets the business_id of this FinancialStatement.
businessId # noqa: E501
:param business_id: The business_id of this FinancialStatement. # noqa: E501
:type: str
"""
self._business_id = business_id
@property
def client_id(self):
"""Gets the client_id of this FinancialStatement. # noqa: E501
clientId # noqa: E501
:return: The client_id of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this FinancialStatement.
clientId # noqa: E501
:param client_id: The client_id of this FinancialStatement. # noqa: E501
:type: str
"""
self._client_id = client_id
@property
def create_date(self):
"""Gets the create_date of this FinancialStatement. # noqa: E501
:return: The create_date of this FinancialStatement. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this FinancialStatement.
:param create_date: The create_date of this FinancialStatement. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def currency_code(self):
"""Gets the currency_code of this FinancialStatement. # noqa: E501
currencyCode # noqa: E501
:return: The currency_code of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this FinancialStatement.
currencyCode # noqa: E501
:param currency_code: The currency_code of this FinancialStatement. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and currency_code is None:
raise ValueError("Invalid value for `currency_code`, must not be `None`") # noqa: E501
self._currency_code = currency_code
@property
def id(self):
"""Gets the id of this FinancialStatement. # noqa: E501
:return: The id of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FinancialStatement.
:param id: The id of this FinancialStatement. # noqa: E501
:type: str
"""
self._id = id
@property
def metadata(self):
"""Gets the metadata of this FinancialStatement. # noqa: E501
:return: The metadata of this FinancialStatement. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this FinancialStatement.
:param metadata: The metadata of this FinancialStatement. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def period_length(self):
"""Gets the period_length of this FinancialStatement. # noqa: E501
periodLength # noqa: E501
:return: The period_length of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._period_length
@period_length.setter
def period_length(self, period_length):
"""Sets the period_length of this FinancialStatement.
periodLength # noqa: E501
:param period_length: The period_length of this FinancialStatement. # noqa: E501
:type: str
"""
self._period_length = period_length
@property
def period_month(self):
"""Gets the period_month of this FinancialStatement. # noqa: E501
periodMonth # noqa: E501
:return: The period_month of this FinancialStatement. # noqa: E501
:rtype: int
"""
return self._period_month
@period_month.setter
def period_month(self, period_month):
"""Sets the period_month of this FinancialStatement.
periodMonth # noqa: E501
:param period_month: The period_month of this FinancialStatement. # noqa: E501
:type: int
"""
self._period_month = period_month
@property
def period_quarter(self):
"""Gets the period_quarter of this FinancialStatement. # noqa: E501
periodQuarter # noqa: E501
:return: The period_quarter of this FinancialStatement. # noqa: E501
:rtype: int
"""
return self._period_quarter
@period_quarter.setter
def period_quarter(self, period_quarter):
"""Sets the period_quarter of this FinancialStatement.
periodQuarter # noqa: E501
:param period_quarter: The period_quarter of this FinancialStatement. # noqa: E501
:type: int
"""
self._period_quarter = period_quarter
@property
def period_type(self):
"""Gets the period_type of this FinancialStatement. # noqa: E501
periodType # noqa: E501
:return: The period_type of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._period_type
@period_type.setter
def period_type(self, period_type):
"""Sets the period_type of this FinancialStatement.
periodType # noqa: E501
:param period_type: The period_type of this FinancialStatement. # noqa: E501
:type: str
"""
self._period_type = period_type
@property
def period_year(self):
"""Gets the period_year of this FinancialStatement. # noqa: E501
periodYear # noqa: E501
:return: The period_year of this FinancialStatement. # noqa: E501
:rtype: int
"""
return self._period_year
@period_year.setter
def period_year(self, period_year):
"""Sets the period_year of this FinancialStatement.
periodYear # noqa: E501
:param period_year: The period_year of this FinancialStatement. # noqa: E501
:type: int
"""
self._period_year = period_year
@property
def secondary_id(self):
"""Gets the secondary_id of this FinancialStatement. # noqa: E501
:return: The secondary_id of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this FinancialStatement.
:param secondary_id: The secondary_id of this FinancialStatement. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def statement_date(self):
"""Gets the statement_date of this FinancialStatement. # noqa: E501
statementDate # noqa: E501
:return: The statement_date of this FinancialStatement. # noqa: E501
:rtype: date
"""
return self._statement_date
@statement_date.setter
def statement_date(self, statement_date):
"""Sets the statement_date of this FinancialStatement.
statementDate # noqa: E501
:param statement_date: The statement_date of this FinancialStatement. # noqa: E501
:type: date
"""
if self._configuration.client_side_validation and statement_date is None:
raise ValueError("Invalid value for `statement_date`, must not be `None`") # noqa: E501
self._statement_date = statement_date
@property
def statement_type(self):
"""Gets the statement_type of this FinancialStatement. # noqa: E501
statement_type # noqa: E501
:return: The statement_type of this FinancialStatement. # noqa: E501
:rtype: str
"""
return self._statement_type
@statement_type.setter
def statement_type(self, statement_type):
"""Sets the statement_type of this FinancialStatement.
statement_type # noqa: E501
:param statement_type: The statement_type of this FinancialStatement. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and statement_type is None:
raise ValueError("Invalid value for `statement_type`, must not be `None`") # noqa: E501
self._statement_type = statement_type
@property
def stats(self):
"""Gets the stats of this FinancialStatement. # noqa: E501
stats # noqa: E501
:return: The stats of this FinancialStatement. # noqa: E501
:rtype: list[Stat]
"""
return self._stats
@stats.setter
def stats(self, stats):
"""Sets the stats of this FinancialStatement.
stats # noqa: E501
:param stats: The stats of this FinancialStatement. # noqa: E501
:type: list[Stat]
"""
self._stats = stats
@property
def update_date(self):
"""Gets the update_date of this FinancialStatement. # noqa: E501
:return: The update_date of this FinancialStatement. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this FinancialStatement.
:param update_date: The update_date of this FinancialStatement. # noqa: E501
:type: datetime
"""
self._update_date = update_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FinancialStatement, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FinancialStatement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FinancialStatement):
return True
return self.to_dict() != other.to_dict()
|
import os
import sys
import glob
import cc3d
import pandas as pd
import numpy as np
import itertools
sys.path.insert(0, os.path.abspath('../'))
from core.metrics import numpy_soft_dice_coeff
from core.generator import get_onehot_labelmap, get_unique_patient_finger_list
from contextlib import redirect_stdout
from cnn.convert import get_cc_map
def eval_settings(prediction_type):
if prediction_type == 'hnh':
labels = (0, 4)
hnh = True
rcnn = False
header = ["Healthy", "Non-healthy"]
elif prediction_type == 'fm':
labels = (0, 3)
hnh = False
rcnn = False
header = ["Background", "Erosion"]
elif prediction_type in ['tuber', 'resnet', 'vggnet']:
labels = (0, 2, 3,)
hnh = False
rcnn = True
header = ["Background", "Cyst", "Erosion"]
else:
labels = (0, 1, 2, 3)
hnh = False
rcnn = False
header = ["Background", "Bone", "Cyst", "Erosion"]
return labels, header, hnh, rcnn
def eval_dice(labels, truth, truth_onehot, prediction_onehot):
# Find dice_scores
dice = numpy_soft_dice_coeff(truth_onehot, prediction_onehot)
# Ensure that empty classes are not counted towards scores
ignored_classes = np.setxor1d(np.unique(truth), labels)
for label in ignored_classes:
if (len(labels) == 1) and (label in labels):
dice[1] = None
else:
dice[np.where(labels == label)] = None
return dice
def eval_quantification(quant_scores, y_true, y_pred, axis=(-3, -2, -1)):
# Calc positives
true_positive = np.sum(y_true * y_pred, axis=axis)
false_negative = np.sum(y_true, axis=axis) - true_positive
# Find inverse
y_true_inv = np.logical_not(y_true).astype(np.float64)
y_pred_inv = np.logical_not(y_pred).astype(np.float64)
# Calc negatives
true_negative = np.sum(y_true_inv * y_pred_inv, axis=axis)
false_positive = np.sum(y_true_inv, axis=axis) - true_negative
# Update quant_scores
quant_scores[0] += true_positive
quant_scores[1] += false_negative
quant_scores[2] += true_negative
quant_scores[3] += false_positive
def eval_detection(y_true, y_pred):
"""
lav en liste over alle CC indexes
for hver true-CC, se om nogen af pred-CC har samme indexes
TP hvis ja
FN hvis nej
FP = for hver pred-CC, se om der er ingen af true-CC der har samme indexes
"""
n_skips = detection_n_skips(y_true.shape[0])
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, DETECTION SCORE NOT EVALUATED.")
return 0, 0, 0
# For detection
d_true_positive = np.zeros(y_true.shape[0] - n_skips) # we do not test cc on background
d_false_negative = np.zeros(y_true.shape[0] - n_skips)
d_false_positive = np.zeros(y_true.shape[0] - n_skips)
# For detection segmentation
detect_dice_scores = [[] for _ in range(y_true.shape[0] - n_skips)]
# s_true_positive = np.zeros(y_true.shape[0] - n_skips)
# s_true_negative = np.zeros(y_true.shape[0] - n_skips)
# s_false_negative = np.zeros(y_true.shape[0] - n_skips)
# s_false_positive = np.zeros(y_true.shape[0] - n_skips)
for i in range(n_skips, y_true.shape[0]):
true_cc = get_cc_map(y_true[i])
pred_cc = get_cc_map(y_pred[i])
# find TP and FN
for tlabel, tcc in cc3d.each(true_cc, binary=True, in_place=True):
intersect = pred_cc[tcc > 0]
if np.count_nonzero(intersect):
d_true_positive[i - n_skips] += 1
## Find detected segmentation accuracy ##
intersecting_regions = np.zeros_like(tcc)
# Find all regions that overlaps with the truth
for plabel, pcc in cc3d.each(pred_cc, binary=True, in_place=True):
tmp_intersect = pcc[tcc > 0]
if np.count_nonzero(tmp_intersect):
intersecting_regions += pcc
# Calculate detected dice score
# print(np.count_nonzero(intersecting_regions))
tmp_quant_scores = [0, 0, 0, 0]
eval_quantification(tmp_quant_scores, tcc, intersecting_regions)
s_true_positive = tmp_quant_scores[0]
s_false_negative = tmp_quant_scores[1]
s_false_positive = tmp_quant_scores[3]
dice = (2 * s_true_positive) / (2 * s_true_positive + s_false_positive + s_false_negative)
detect_dice_scores[i - n_skips].append(dice)
else:
d_false_negative[i - n_skips] += 1
# find FP
for label, cc in cc3d.each(pred_cc, binary=True, in_place=True):
intersect = true_cc[cc > 0]
if np.count_nonzero(intersect) == 0:
d_false_positive[i - n_skips] += 1
# # Update detection_scores
# detection_scores[0] += true_positive
# detection_scores[1] += false_negative
# detection_scores[2] += false_positive
return d_true_positive, d_false_negative, d_false_positive, detect_dice_scores
def eval_reliability(detection_dict, subject_ids):
# Find number classes that have been detected:
n_classes = len(next(iter(detection_dict.values()))[0])
# Make list of fingers with more than one scan
unique_list = get_unique_patient_finger_list(None, subject_ids)
consecutive_list = [x for x in unique_list if len(x) > 1]
# Find erosion count increase for every pair
increase_list = list() # [0] = first, [1] = second, [2] = increase
for finger_scans in consecutive_list:
for i in range(1, len(finger_scans)):
first_subject = subject_ids[finger_scans[i - 1]]
second_subject = subject_ids[finger_scans[i]]
increment_tp = detection_dict[first_subject][0] - detection_dict[second_subject][0]
increment_fp = detection_dict[first_subject][1] - detection_dict[second_subject][1]
increment_tot = increment_tp # + increment_fp
increase_list.append([first_subject, second_subject, increment_tot])
# Sort in positive and negative
increase_list = np.array(increase_list)
zero_or_positive = list()
negative = list()
n_positive = list()
n_negative = list()
var_positive = list()
var_negative = list()
for i in range(n_classes):
zero_or_positive.append(increase_list[np.stack(increase_list[:, 2])[:, i] >= 0])
negative.append(increase_list[np.stack(increase_list[:, 2])[:, i] < 0])
# Count N_positive and N_negative
n_positive.append(len(zero_or_positive[i]))
n_negative.append(len(negative[i]))
# Compute variance of N_positive and N_negative
try:
var_positive.append(np.var(np.stack(zero_or_positive[i][:, 2])[:, i]))
except IndexError:
var_positive.append(0.0)
try:
var_negative.append(np.var(np.stack(negative[i][:, 2])[:, i]))
except IndexError:
var_negative.append(0.0)
return (n_positive, n_negative, var_positive, var_negative)
def detection_n_skips(n_labels):
if n_labels == 4: # skip background and bone
n_skips = 2
elif n_labels == 3: # skip background
n_skips = 1
elif n_labels == 2: # skip background
n_skips = 1
else:
n_skips = -1
return n_skips
def save_dice(dice_coeffs, header, subject_ids, output_path, prediction_type):
# Expand header
new_header = header.copy()
for i in range(len(header)):
new_header[i] = 'DICE_' + new_header[i]
# Save dice coefficients
dice_dataframe = pd.DataFrame.from_records(dice_coeffs, columns=header, index=subject_ids)
dice_score_path = os.path.join(output_path, prediction_type + "_dice_scores.csv")
dice_dataframe.to_csv(dice_score_path, float_format='%.4f', na_rep='nan')
# Save dice summary
summary_path = os.path.join(output_path, prediction_type + '_dice_summary.txt')
pd.options.display.float_format = '{:,.4f}'.format
with open(summary_path, 'w') as f:
with redirect_stdout(f):
# Print out median and max values of all classes.
print("Ncols: {}".format(dice_dataframe.shape[0]))
print("Max scores:")
max_score = dice_dataframe.max(axis=0).rename('max_score')
max_index = dice_dataframe.idxmax(axis=0).rename('max_index')
print(pd.concat([max_score, max_index], axis=1))
print()
print("Median scores:")
median_score = dice_dataframe.median(axis=0).rename('median_score')
print(median_score)
print()
print("Average of individual scores:")
average_score = dice_dataframe.mean(axis=0).rename('average_score')
print(average_score)
print()
print("Count of non-NAN values:")
print(dice_dataframe.count())
print()
print("Count of non-zero values:")
print(dice_dataframe.fillna(0).astype(bool).sum(axis=0))
def save_detection(detection_scores, header, output_path, prediction_type):
n_skips = detection_n_skips(len(header))
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, DETECTION SCORE NOT EVALUATED.")
return detection_scores
# Change to names we understand
true_positive = detection_scores[0]
false_negative = detection_scores[1]
false_positive = detection_scores[2]
# Calculate stats
sensitivity = true_positive / (true_positive + false_negative)
ppv = true_positive / (true_positive + false_positive)
# Expand header
n_stats = 5
new_header = [''] * (len(header) - n_skips) * n_stats # we do not save stats for background
for i in range(len(header) - n_skips):
new_header[i + 0 * (len(header) - n_skips)] = 'TP_' + header[i + n_skips]
new_header[i + 1 * (len(header) - n_skips)] = 'FN_' + header[i + n_skips]
new_header[i + 2 * (len(header) - n_skips)] = 'FP_' + header[i + n_skips]
new_header[i + 3 * (len(header) - n_skips)] = 'TPR_' + header[i + n_skips]
new_header[i + 4 * (len(header) - n_skips)] = 'PPV_' + header[i + n_skips]
# Save values
dataframe = pd.DataFrame.from_records(
np.concatenate([true_positive, false_negative, false_positive, sensitivity, ppv]).reshape([len(new_header), 1]),
index=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_detection_scores.csv")
dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan', header=False)
def save_quantification(quant_scores, header, output_path, prediction_type):
# Change to names we understand
true_positive = quant_scores[0]
false_negative = quant_scores[1]
true_negative = quant_scores[2]
false_positive = quant_scores[3]
# Calculate stats
sensitivity = true_positive / (true_positive + false_negative)
specificity = true_negative / (true_negative + false_positive)
ppv = true_positive / (true_positive + false_positive)
dice = (2 * true_positive) / (2 * true_positive + false_positive + false_negative)
# Expand header
n_stats = 4
new_header = [''] * len(header) * n_stats
for i in range(len(header)):
new_header[i] = 'TPR_' + header[i]
new_header[i + 1 * len(header)] = 'TNR_' + header[i]
new_header[i + 2 * len(header)] = 'PPV_' + header[i]
new_header[i + 3 * len(header)] = 'DICE_' + header[i]
# Save values
dataframe = pd.DataFrame.from_records(
np.concatenate([sensitivity, specificity, ppv, dice]).reshape([len(new_header), 1]),
index=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_quantification_scores.csv")
dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan', header=False)
def save_reliability(reliability_stats, header, output_path, prediction_type):
n_skips = detection_n_skips(len(header))
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, RELIABILITY SCORE NOT EVALUATED.")
return reliability_stats
# Change to names we understand
n_positive = np.array(reliability_stats[0])
n_negative = np.array(reliability_stats[1])
var_positive = np.array(reliability_stats[2])
var_negative = np.array(reliability_stats[3])
# Calculate stats
pos_ratio = n_positive / (n_positive + n_negative)
# Expand header
n_stats = 5
new_header = [''] * (len(header) - n_skips) * n_stats # we do not save stats for background
for i in range(len(header) - n_skips):
new_header[i + 0 * (len(header) - n_skips)] = 'n_positive' + '_' + header[i + n_skips]
new_header[i + 1 * (len(header) - n_skips)] = 'n_negative' + '_' + header[i + n_skips]
new_header[i + 2 * (len(header) - n_skips)] = 'var_positive' + '_' + header[i + n_skips]
new_header[i + 3 * (len(header) - n_skips)] = 'var_negative' + '_' + header[i + n_skips]
new_header[i + 4 * (len(header) - n_skips)] = 'pos_ratio' + '_' + header[i + n_skips]
# Save values
dataframe = pd.DataFrame.from_records(
np.concatenate([n_positive, n_negative, var_positive, var_negative, pos_ratio]).reshape([len(new_header), 1]),
index=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_reliability_scores.csv")
dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan', header=False)
def save_detected_seg(detected_dice_scores, header, output_path, prediction_type):
n_skips = detection_n_skips(len(header))
if n_skips == -1:
print("ERROR! UNKNOWN NUMBER OF SKIPS, DETECTION SCORE NOT EVALUATED.")
return detected_dice_scores
new_header = header.copy()
new_header = new_header[n_skips:]
for i in range(len(new_header)):
new_header[i] = 'DICE_' + new_header[i]
# Save dice coefficients
zip_tuple = (_ for _ in itertools.zip_longest(*detected_dice_scores))
dice_dataframe = pd.DataFrame.from_records(zip_tuple, columns=new_header)
dataframe_path = os.path.join(output_path, prediction_type + "_dq_scores.csv")
dice_dataframe.to_csv(dataframe_path, float_format='%.4f', na_rep='nan')
# Save dice summary
summary_path = os.path.join(output_path, prediction_type + '_dq_summary.txt')
pd.options.display.float_format = '{:,.4f}'.format
with open(summary_path, 'w') as f:
with redirect_stdout(f):
# Print out median and max values of all classes.
print("Ncols: {}".format(dice_dataframe.shape[0]))
print("Max scores:")
max_score = dice_dataframe.max(axis=0).rename('max_score')
max_index = dice_dataframe.idxmax(axis=0).rename('max_index')
print(pd.concat([max_score, max_index], axis=1))
print()
print("Min scores:")
min_score = dice_dataframe.min(axis=0).rename('min_score')
min_index = dice_dataframe.idxmin(axis=0).rename('min_index')
print(pd.concat([min_score, min_index], axis=1))
print()
print("Median scores:")
median_score = dice_dataframe.median(axis=0).rename('median_score')
print(median_score)
print()
print("Average of individual scores:")
average_score = dice_dataframe.mean(axis=0).rename('average_score')
print(average_score)
print()
print("Count of non-NAN values:")
print(dice_dataframe.count())
print()
print("Count of non-zero values:")
print(dice_dataframe.fillna(0).astype(bool).sum(axis=0))
def evaluator(prediction_type, prediction_path, output_path, resize_shape, quantification, detection):
# Find all folders inside prediction folder
glob_list = glob.glob(os.path.join(prediction_path, '*'))
folders = list()
for path in glob_list:
if os.path.isdir(path):
folders.append(path)
folders.sort()
print("Evaluating scores for {} cases".format(len(folders)))
# Decide on run settings
labels, header, hnh, rcnn = eval_settings(prediction_type)
# Computation loop
# folders = folders[0:2] # debug
counter = 1
dice_scores = list()
quantification_scores = [0] * 4 # tp, fn, tn, fp
detected_dice_scores = list()
detection_scores = [0] * 3 # tp, fn, fp
detection_dict = dict()
subject_ids = list()
for case_folder in folders:
# Add subject to ouput list
subject = os.path.basename(case_folder)
subject_ids.append(subject)
# Get truth data
if hnh == True: # healthy / non-healthy
truth_path = glob.glob(os.path.join(case_folder, '*_hnh.nii.gz'))[0]
else:
truth_path = glob.glob(os.path.join(case_folder, "*_truth.nii.gz"))[0]
truth_onehot, truth = get_onehot_labelmap(truth_path, labels, resize_shape)
# Get prediction data
if rcnn == True: # rcnn predictions
prediction_path = glob.glob(os.path.join(case_folder, '*_' + prediction_type + '.nii.gz'))[0]
else:
prediction_path = glob.glob(os.path.join(case_folder, '*_prediction.nii.gz'))[0]
prediction_onehot, _ = get_onehot_labelmap(prediction_path, labels, resize_shape)
# Evaluate
if quantification:
dice_scores.append(eval_dice(labels, truth, truth_onehot, prediction_onehot))
eval_quantification(quantification_scores, truth_onehot, prediction_onehot)
if detection:
dtp, dfn, dfp, detect_ds = eval_detection(truth_onehot, prediction_onehot)
detection_scores[0] += dtp
detection_scores[1] += dfn
detection_scores[2] += dfp
detection_dict[subject] = [dtp, dfn, dfp]
detected_dice_scores.append(detect_ds)
# Status
print("Index: {}, Subject: {}".format(counter, subject))
counter += 1
# Post evaluate reliability
reliability_stats = eval_reliability(detection_dict, subject_ids)
# Format detected_dice_scores
clean_detected_dice_scores = list()
for i in range(len(detected_dice_scores[0])):
clean_detected_dice_scores.append(np.concatenate(np.array(detected_dice_scores).T[i].flatten()))
# Save files
if quantification:
save_dice(dice_scores, header, subject_ids, output_path, prediction_type)
save_quantification(quantification_scores, header, output_path, prediction_type)
if detection:
save_detection(detection_scores, header, output_path, prediction_type)
save_reliability(reliability_stats, header, output_path, prediction_type)
save_detected_seg(clean_detected_dice_scores, header, output_path, prediction_type)
if __name__ == "__main__":
# Define paths
# prediction_path = r'C:\school\RnD\trash'
# prediction_path = r'C:\school\RnD\server_data\210502_1026_exp_04_HER\config_2_predictions'
prediction_path = r'C:\school\RnD\server_data\210511_1345_rcnn_B3_C15\combined_predictions'
output_path = prediction_path
# output_path = r'C:\school\RnD'
prediction_type = 'tuber' # Prediction type
resize_shape = (128, 128, 128) # Resize to this shape
detection = True # whether or not to include detection
quantification = True # whether or not to include quantification and dice
print("prediction_path:", prediction_path)
print("output_path:", output_path)
print("prediction_type:", prediction_type)
print("data_shape:", resize_shape)
print("quantification:", quantification)
print("detection:", detection)
# Run stats script
evaluator(prediction_type, prediction_path, output_path, resize_shape, quantification, detection)
|
# Reference to https://github.com/AntonKueltz/fastecdsa
from binascii import hexlify
from os import urandom
from typing import Callable, Tuple
from cipher.curve import Curve, Point
def gen_keypair(curve: Curve, randfunc: Callable = None) -> Tuple[int, Point]:
randfunc = randfunc or urandom
private_key = gen_private_key(curve, randfunc)
public_key = get_public_key(private_key, curve)
return private_key, public_key
def gen_private_key(curve: Curve, randfunc: Callable = None) -> int:
order_bits = 0
order = curve.n
while order > 0:
order >>= 1
order_bits += 1
order_bytes = (order_bits + 7) // 8
extra_bits = order_bytes * 8 - order_bits
rand = int(hexlify(randfunc(order_bytes)), 16)
rand >>= extra_bits
while rand >= curve.n:
rand = int(hexlify(randfunc(order_bytes)), 16)
rand >>= extra_bits
return rand
def get_public_key(d: int, curve: Curve) -> Point:
return d * curve.G
|
import unittest
from django.test import TestCase
from book.models import Book
from model_mommy import mommy
from .random_string_generator import random_string_generator
from .unique_slug_field_generator import unique_slug_generator
class TestStringMethods(TestCase):
def test_random_string_generator_with_default_length(self):
default_str_length =random_string_generator()
actual = len(default_str_length)
expected = 10
self.assertEquals(actual, expected)
def test_random_string_generator_with_custom_length(self):
default_str_length =random_string_generator(size=20)
actual = len(default_str_length)
expected = 20
self.assertEquals(actual, expected)
def test_unique_slug_generator(self):
model = mommy.make(Book)
old_slug = model.slug
new_slug = unique_slug_generator(model)
self.assertNotEqual(old_slug, new_slug)
|
"""Endpoint for submitting new tests and viewing results."""
import base64
import json
import logging
import os
import urllib
from ..shared import http, decorators, mongo, users, core, common
from . import validators
import azure.functions as func
from bson import ObjectId
import cerberus
from jose import jws
@decorators.login_required
@decorators.validate_parameters(route_settings=validators.ROUTE_SETTINGS)
def get_test(req, user: users.User, test_id, queue=None):
user_param = None
if not user.is_admin:
user_param = user.email
result = mongo.MongoTests.get_test(test_id, user=user_param)
if not result:
logging.info("Not found result")
return http.response_not_found()
return http.response_ok(result)
@decorators.login_required
@decorators.validate_parameters(query_settings=validators.QUERY_SETTINGS)
def list_tests(
req: func.HttpRequest,
user: users.User,
email=None,
submission_id=None,
case_id=None,
task_id=None,
):
"""List all tests, that were run."""
if not user.is_admin and (not email or email != user.email):
return http.response_forbidden()
# If user is really filtering only his submissions, we don't need to
# validate other parameters (i.e. submission id)
result = mongo.MongoTests().list_tests(
email=email, submission_id=submission_id, case_id=case_id, task_id=task_id
)
return http.response_ok(list(result))
def get_dispatch(req: func.HttpRequest, queue=None):
"""Dispatch to concrete implementation, based on route."""
has_id = req.route_params.get("id", None)
if has_id:
return get_test(req) # pylint:disable=no-value-for-parameter
return list_tests(req) # pylint:disable=no-value-for-parameter
@decorators.login_required
def post_test(req: func.HttpRequest, user: users.User, queue=None):
body = http.get_json(req, validators.POST_SCHEMA)
roles = None
if not user.is_admin:
roles = user.roles or []
if body is None:
return http.response_client_error()
if not (self_url := http.get_host_url(req)):
logging.error("Cannot build host url in post test.")
return http.response_server_error()
# First let's check if test case exists
test_case_id = ObjectId(body[validators.SCHEMA_CASE_ID])
test_case = mongo.MongoTestCases.get_case(case_id=test_case_id, roles=roles)
if test_case is None:
return http.response_not_found()
# Test case was found, so user has right to execute it. We are not going
# to check, if this test case is for this concrete submission. We don't care
if test_case.does_count and not user.is_admin:
count = mongo.MongoTests.count_tests(case_id=test_case_id, user=user.email)
if count >= test_case.runs_allowed:
return http.response_payment()
# This run is allowed, either it does not count, or we counted number of
# runs, and incresed number on submissions. This is bascially race on
# parallel requests, but even if successfully exploited, gains are like
# one more test run, so who cares.
submission_id = ObjectId(body[validators.SCHEMA_SUBMISSION_ID])
user_param = None
if not user.is_admin:
user_param = user.email
submission = mongo.MongoSubmissions.get_submission(
submission_id=submission_id, user=user_param
)
if not submission:
return http.response_not_found()
if user.email == submission.user: # Let's mark submission as test run
mongo.MongoSubmissions.increment_test(submission_id)
result = mongo.MongoTests.create_test(
user=user.email,
submission_id=submission_id,
case_id=test_case_id,
task_name=submission.task_name,
case_name=test_case.name,
task_id=submission.task_id,
)
if not result:
return http.response_server_error()
this_test_url = urllib.parse.urljoin(self_url, f'/api/tests/{str(result._id)}')
notification = common.encode_message(this_test_url, test_case_id, submission_id)
queue.set(notification)
return http.response_ok(result, code=201)
@decorators.validate_parameters(route_settings=validators.ROUTE_SETTINGS)
def patch_handler(request: func.HttpRequest, queue=None, test_id=None) -> func.HttpResponse:
"""Handle update to test result.
This function will be called by background test machines. Authentication
will be done via JOSE, as these machines don't have social network accounts.
"""
...
try:
if test_id is None:
return http.response_client_error()
body = request.get_body()
secret_key64 = os.environ[common.ENV_QUEUE_SECRET]
decoded_key = base64.decodebytes(secret_key64.encode('utf-8'))
query = json.loads(
jws.verify(
body.decode('utf-8'), decoded_key.decode('utf-8'), 'HS256'
)
)
validator = cerberus.Validator(
validators.PATCH_SCHEMA, allow_unknown=False, require_all=True
)
if not validator.validate(query):
logging.error("Bad json in test update %s", validator.errors)
return http.response_client_error()
document = validator.document
update_result = mongo.MongoTests.update_test(
test_id,
document[validators.SCHEMA_TEST_DESCRIPTION] or "<empty>",
)
if not update_result:
logging.error("On patch test, update db fail (wrong id?)")
return http.response_client_error()
return http.response_ok(None, code=204)
except Exception as error:
logging.error("Unknown error in test patch %s", error)
return http.response_client_error()
def main(req: func.HttpRequest, queue: func.Out[str]) -> func.HttpResponse: # type: ignore
"""Entry point. Dispatch request based on method."""
dispatch = http.dispatcher(get=get_dispatch, post=post_test, patch=patch_handler)
return dispatch(req, queue=queue)
|
import numpy as np # used to handle all the multi dimensional arrays within the picture
import cv2
img = cv2.imread('scene_jpg.jpg', 1) # used to read an colourful image
cv2.imshow('Original', img) # to display image
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# ******************************************************************************
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
# ******************************************************************************
import argparse
import os
def default_dataset_dir(dataset_dirs):
for dir in dataset_dirs:
if os.path.exists(dir):
return dir
return None
def check_path(label, dir, default_dirs):
if not dir:
print("{} dir not defined and neither default location was found:\n{}".format(label, default_dirs))
return False
elif not os.path.exists(dir):
print("{} path does not exist:\n{}".format(label, dir))
return False
else:
return True
def common_args(custom_args_fun = None, custom_bf16_fun = None, bf16_val = '1', dataset_dirs = None):
parser = argparse.ArgumentParser()
parser.add_argument("--data_type", "-dt", choices=['fp32', 'bf16'], default='bf16')
parser.add_argument("--batch_size", "-bs", type=int)
parser.add_argument("--num_epochs", "-ep", type=int)
parser.add_argument("--num_steps", "-st", type=int)
parser.add_argument("--log_every_n_steps", "-ls", type=int)
parser.add_argument("--cp_every_n_steps", "-cs", type=int)
parser.add_argument("--dataset_dir", "-dd", type=str,
default=default_dataset_dir(dataset_dirs if dataset_dirs else []))
if custom_args_fun:
custom_args_fun(parser)
args = parser.parse_args()
print("args = {}".format(args), flush=True)
if not check_path("Dataset", args.dataset_dir, dataset_dirs):
exit(1)
if args.data_type == 'bf16':
if custom_bf16_fun:
custom_bf16_fun()
else:
os.environ["TF_ENABLE_BF16_CONVERSION"] = str(bf16_val)
return args
|
__author__ = 'Shaban Hassan [shaban00]'
from typing import Callable, Dict, List
from flask import Flask
from flask_restful import Api, Resource
from flask_socketio import SocketIO
def add_api_resource(resource: Resource, urls: tuple, endpoint: str, api: Api) -> None:
urls = tuple([f"/api/v1/{route}" for route in urls])
api.add_resource(resource, *urls, endpoint=endpoint)
def register_api_urls(api_urls: Dict, api: Api) -> None:
for api_url in api_urls:
add_api_resource(api_url.get("resource"), api_url.get("urls"), api_url.get("endpoint"), api)
def add_app_url(func: Callable, url: str, app: Flask, endpoint: str, method: List) -> None:
app.add_url_rule(f"/api/v1/{url}", endpoint=endpoint, view_func=func, methods=method)
def register_app_urls(app_urls: List, app: Flask) -> None:
for app_url in app_urls:
add_app_url(app_url.get("func"), app_url.get("url"), app, app_url.get("endpoint"), method=app_url.get("methods"))
def add_views(func: Callable, url: str, app: Flask) -> None:
app.add_url_rule(f"/api/v1/{url}", view_func=func)
def register_views(views: List, app: Flask) -> None:
for view in views:
add_views(view.get("func"), view.get("url"), app)
def register_events(events: List, socketio: SocketIO) -> None:
for event in events:
socketio.on_event(event.get("event"), event.get("func"), event.get("namespace")) |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TranslatedEmailTemplate'
db.create_table(u'post_office_translatedemailtemplate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(default=u'de', max_length=12)),
('default_template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['post_office.EmailTemplate'])),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('html_content', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'post_office', ['TranslatedEmailTemplate'])
# Adding unique constraint on 'TranslatedEmailTemplate', fields ['language', 'default_template']
db.create_unique(u'post_office_translatedemailtemplate', ['language', 'default_template_id'])
def backwards(self, orm):
# Removing unique constraint on 'TranslatedEmailTemplate', fields ['language', 'default_template']
db.delete_unique(u'post_office_translatedemailtemplate', ['language', 'default_template_id'])
# Deleting model 'TranslatedEmailTemplate'
db.delete_table(u'post_office_translatedemailtemplate')
models = {
u'post_office.attachment': {
'Meta': {'object_name': 'Attachment'},
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'attachments'", 'symmetrical': 'False', 'to': u"orm['post_office.Email']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'post_office.email': {
'Meta': {'object_name': 'Email'},
'bcc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'context': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'from_email': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'headers': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.EmailTemplate']", 'null': 'True', 'blank': 'True'}),
'to': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'post_office.emailtemplate': {
'Meta': {'object_name': 'EmailTemplate'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'post_office.log': {
'Meta': {'object_name': 'Log'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'logs'", 'to': u"orm['post_office.Email']"}),
'exception_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'post_office.translatedemailtemplate': {
'Meta': {'unique_together': "((u'language', u'default_template'),)", 'object_name': 'TranslatedEmailTemplate'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'default_template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.EmailTemplate']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "u'de'", 'max_length': '12'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['post_office'] |
#
# PySNMP MIB module NTWS-SYSTEM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NTWS-SYSTEM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:25:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ntwsMibs, = mibBuilder.importSymbols("NTWS-ROOT-MIB", "ntwsMibs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Unsigned32, Integer32, NotificationType, TimeTicks, Counter64, Counter32, IpAddress, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, ModuleIdentity, MibIdentifier, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "Integer32", "NotificationType", "TimeTicks", "Counter64", "Counter32", "IpAddress", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "ModuleIdentity", "MibIdentifier", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ntwsSystemMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8))
ntwsSystemMib.setRevisions(('2007-08-31 00:13', '2007-08-14 00:12', '2007-05-04 00:10', '2007-03-14 00:07', '2006-11-09 00:04', '2006-06-06 00:03',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ntwsSystemMib.setRevisionsDescriptions(('v3.0.2, MRT v2.2: Made changes in order to make MIB comply with corporate MIB conventions.', 'v3.0.1: Added new objects to support Power Supply status.', 'v2.1.0: Obsoleted two previously deprecated objects', "v2.0.0: Added new objects to support CPU load and memory (RAM) usage details: for last few seconds (''instant''), minute, 5 minutes, hour, day, 3 days.", 'v1.0.3: Removed unused imports', 'v1.0.2: Initial version',))
if mibBuilder.loadTexts: ntwsSystemMib.setLastUpdated('200708310013Z')
if mibBuilder.loadTexts: ntwsSystemMib.setOrganization('Nortel Networks')
if mibBuilder.loadTexts: ntwsSystemMib.setContactInfo('www.nortelnetworks.com')
if mibBuilder.loadTexts: ntwsSystemMib.setDescription("System objects for Nortel Networks wireless switches. Copyright 2007 Nortel Networks. All rights reserved. This Nortel Networks SNMP Management Information Base Specification (Specification) embodies Nortel Networks' confidential and proprietary intellectual property. This Specification is supplied 'AS IS' and Nortel Networks makes no warranty, either express or implied, as to the use, operation, condition, or performance of the Specification.")
class NtwsSysCpuLoad(TextualConvention, Unsigned32):
description = 'CPU load in percents'
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 100)
class NtwsSysMemoryAmount(TextualConvention, Unsigned32):
description = 'Memory amount in KBytes (1024 octets)'
status = 'current'
class NtwsSysPowerSupplyStatus(TextualConvention, Integer32):
description = 'The status of a Power Supply.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ac-failed", 3), ("dc-failed", 4), ("ac-ok-dc-ok", 5))
ntwsSysObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1))
ntwsSysDataObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1))
ntwsSysCpuMemoryUsedBytes = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryUsedBytes.setStatus('obsolete')
if mibBuilder.loadTexts: ntwsSysCpuMemoryUsedBytes.setDescription('CPU memory used in bytes. Obsoleted by ntwsSysCpuMemoryInstantUsage.')
ntwsSysCpuMemoryTotalBytes = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryTotalBytes.setStatus('obsolete')
if mibBuilder.loadTexts: ntwsSysCpuMemoryTotalBytes.setDescription('CPU total physical memory in bytes. Obsoleted by ntwsSysCpuMemorySize.')
ntwsSysFlashMemoryUsedBytes = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysFlashMemoryUsedBytes.setStatus('current')
if mibBuilder.loadTexts: ntwsSysFlashMemoryUsedBytes.setDescription('Flash memory used in bytes.')
ntwsSysFlashMemoryTotalBytes = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysFlashMemoryTotalBytes.setStatus('current')
if mibBuilder.loadTexts: ntwsSysFlashMemoryTotalBytes.setDescription('Flash memory available in bytes.')
ntwsSysCpuAverageLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 5), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuAverageLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuAverageLoad.setDescription('CPU load average since system startup.')
ntwsSysCpuMemorySize = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 6), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemorySize.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemorySize.setDescription('Maximum available CPU Memory (RAM) in KBytes. This is the memory available to the Wireless Switch software. May be less than physical RAM size.')
ntwsSysCpuLoadDetail = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11))
ntwsSysCpuMemoryUsageDetail = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12))
ntwsSysChassisComponents = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13))
ntwsSysCpuInstantLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11, 1), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuInstantLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuInstantLoad.setDescription('CPU instant load (for last few seconds).')
ntwsSysCpuLastMinuteLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11, 2), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuLastMinuteLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuLastMinuteLoad.setDescription('CPU load for last minute.')
ntwsSysCpuLast5MinutesLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11, 3), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuLast5MinutesLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuLast5MinutesLoad.setDescription('CPU load for last 5 minutes.')
ntwsSysCpuLastHourLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11, 4), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuLastHourLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuLastHourLoad.setDescription('CPU load for last hour.')
ntwsSysCpuLastDayLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11, 5), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuLastDayLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuLastDayLoad.setDescription('CPU load for last day.')
ntwsSysCpuLast3DaysLoad = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 11, 6), NtwsSysCpuLoad()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuLast3DaysLoad.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuLast3DaysLoad.setDescription('CPU load for last 3 days.')
ntwsSysCpuMemoryInstantUsage = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12, 1), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryInstantUsage.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemoryInstantUsage.setDescription('Instant memory usage (RAM) in KBytes (for last few seconds). Ranges between 0 and ntwsSysCpuMemorySize.')
ntwsSysCpuMemoryLastMinuteUsage = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12, 2), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryLastMinuteUsage.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemoryLastMinuteUsage.setDescription('Memory usage (RAM) for last minute in KBytes. Ranges between 0 and ntwsSysCpuMemorySize.')
ntwsSysCpuMemoryLast5MinutesUsage = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12, 3), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryLast5MinutesUsage.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemoryLast5MinutesUsage.setDescription('Memory usage (RAM) for last 5 minutes in KBytes. Ranges between 0 and ntwsSysCpuMemorySize.')
ntwsSysCpuMemoryLastHourUsage = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12, 4), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryLastHourUsage.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemoryLastHourUsage.setDescription('Memory usage (RAM) for last hour in KBytes. Ranges between 0 and ntwsSysCpuMemorySize.')
ntwsSysCpuMemoryLastDayUsage = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12, 5), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryLastDayUsage.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemoryLastDayUsage.setDescription('Memory usage (RAM) for last day in KBytes. Ranges between 0 and ntwsSysCpuMemorySize.')
ntwsSysCpuMemoryLast3DaysUsage = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 12, 6), NtwsSysMemoryAmount()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysCpuMemoryLast3DaysUsage.setStatus('current')
if mibBuilder.loadTexts: ntwsSysCpuMemoryLast3DaysUsage.setDescription('Memory usage (RAM) for last 3 days in KBytes. Ranges between 0 and ntwsSysCpuMemorySize.')
ntwsSysChasCompPowerSupplies = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1))
ntwsSysNumPowerSuppliesSupported = MibScalar((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysNumPowerSuppliesSupported.setStatus('current')
if mibBuilder.loadTexts: ntwsSysNumPowerSuppliesSupported.setDescription('The number of power supplies supported by the Wireless Switch. This is the upper limit of the number of entries in the power supply table, ntwsSysPowerSupplyTable.')
ntwsSysPowerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1, 2), )
if mibBuilder.loadTexts: ntwsSysPowerSupplyTable.setStatus('current')
if mibBuilder.loadTexts: ntwsSysPowerSupplyTable.setDescription('Table of power supplies actually installed on the Wireless Switch.')
ntwsSysPowerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1, 2, 1), ).setIndexNames((0, "NTWS-SYSTEM-MIB", "ntwsSysPowerSupplyDeviceOID"))
if mibBuilder.loadTexts: ntwsSysPowerSupplyEntry.setStatus('current')
if mibBuilder.loadTexts: ntwsSysPowerSupplyEntry.setDescription('An entry in the ntwsSysPowerSupplyTable table.')
ntwsSysPowerSupplyDeviceOID = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1, 2, 1, 1), ObjectIdentifier())
if mibBuilder.loadTexts: ntwsSysPowerSupplyDeviceOID.setStatus('current')
if mibBuilder.loadTexts: ntwsSysPowerSupplyDeviceOID.setDescription('OID value used to identify this chassis component as indicated in Registration MIB.')
ntwsSysPowerSupplyStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1, 2, 1, 2), NtwsSysPowerSupplyStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysPowerSupplyStatus.setStatus('current')
if mibBuilder.loadTexts: ntwsSysPowerSupplyStatus.setDescription('Status of the power supply.')
ntwsSysPowerSupplyDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 8, 1, 1, 13, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsSysPowerSupplyDescr.setStatus('current')
if mibBuilder.loadTexts: ntwsSysPowerSupplyDescr.setDescription("A human interpretable description of this power supply, for example 'Left Power Supply'.")
mibBuilder.exportSymbols("NTWS-SYSTEM-MIB", ntwsSysChassisComponents=ntwsSysChassisComponents, ntwsSysPowerSupplyTable=ntwsSysPowerSupplyTable, ntwsSysCpuLast5MinutesLoad=ntwsSysCpuLast5MinutesLoad, ntwsSysChasCompPowerSupplies=ntwsSysChasCompPowerSupplies, ntwsSysPowerSupplyDeviceOID=ntwsSysPowerSupplyDeviceOID, NtwsSysMemoryAmount=NtwsSysMemoryAmount, ntwsSysObjects=ntwsSysObjects, ntwsSysCpuMemoryLastMinuteUsage=ntwsSysCpuMemoryLastMinuteUsage, ntwsSysCpuLastDayLoad=ntwsSysCpuLastDayLoad, ntwsSysCpuLast3DaysLoad=ntwsSysCpuLast3DaysLoad, ntwsSysCpuMemoryUsageDetail=ntwsSysCpuMemoryUsageDetail, ntwsSysPowerSupplyDescr=ntwsSysPowerSupplyDescr, ntwsSysCpuMemoryLast5MinutesUsage=ntwsSysCpuMemoryLast5MinutesUsage, ntwsSysFlashMemoryTotalBytes=ntwsSysFlashMemoryTotalBytes, NtwsSysPowerSupplyStatus=NtwsSysPowerSupplyStatus, ntwsSysCpuLastHourLoad=ntwsSysCpuLastHourLoad, ntwsSysCpuMemoryLastHourUsage=ntwsSysCpuMemoryLastHourUsage, ntwsSysCpuMemoryLast3DaysUsage=ntwsSysCpuMemoryLast3DaysUsage, ntwsSysDataObjects=ntwsSysDataObjects, ntwsSysCpuMemorySize=ntwsSysCpuMemorySize, ntwsSysCpuMemoryTotalBytes=ntwsSysCpuMemoryTotalBytes, ntwsSysPowerSupplyStatus=ntwsSysPowerSupplyStatus, ntwsSysFlashMemoryUsedBytes=ntwsSysFlashMemoryUsedBytes, ntwsSystemMib=ntwsSystemMib, ntwsSysCpuMemoryUsedBytes=ntwsSysCpuMemoryUsedBytes, ntwsSysCpuMemoryLastDayUsage=ntwsSysCpuMemoryLastDayUsage, ntwsSysCpuLastMinuteLoad=ntwsSysCpuLastMinuteLoad, ntwsSysNumPowerSuppliesSupported=ntwsSysNumPowerSuppliesSupported, ntwsSysPowerSupplyEntry=ntwsSysPowerSupplyEntry, ntwsSysCpuAverageLoad=ntwsSysCpuAverageLoad, ntwsSysCpuLoadDetail=ntwsSysCpuLoadDetail, ntwsSysCpuInstantLoad=ntwsSysCpuInstantLoad, NtwsSysCpuLoad=NtwsSysCpuLoad, ntwsSysCpuMemoryInstantUsage=ntwsSysCpuMemoryInstantUsage, PYSNMP_MODULE_ID=ntwsSystemMib)
|
# Generated by Django 4.0.3 on 2022-03-21 06:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField()),
('street', models.CharField(max_length=500)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.address')),
],
),
]
|
from pprint import pprint
from optparse import OptionParser
from settings import load_env_variables
from MixcloudSpider import LeftoSpider
from TracklistParser import prepare_spotify_search
from SpotifyPlaylistManager import search_track
import spotipy.util as util
import spotipy
parser = OptionParser()
parser.add_option("-f", "--fresh", action="store_true", dest="fresh", default=False)
parser.add_option("-a", "--archive", action="store_true", dest="archive", default=False)
(options, args) = parser.parse_args()
scope = 'playlist-modify-public'
username = 'idberend'
load_env_variables()
token = util.prompt_for_user_token(username, scope)
if token:
sp = spotipy.Spotify(auth='token')
if __name__ == "__main__":
spider = LeftoSpider()
if options.fresh:
track_list = spider.run('fresh')
track_list = prepare_spotify_search(track_list)
if spider.TRACKLIST:
spotify_list = []
for track in track_list:
track_id = search_track(track)
if track_id:
spotify_list.append(track_id)
print("\t\t" + track_id)
else:
print("Found no tracks")
elif options.archive:
mixes = spider.run('archive')
for mix in mixes:
track_list = prepare_spotify_search(mix)
for track in track_list:
search_track(track)
else:
print("Please supply a -f or -a argument.")
quit()
sp.trace = False
sp.user_playlist_add_tracks(username, '6mzdLe0e8Oew4kfFN6845f', spotify_list) |
import discord
import time
client = discord.Client()
prefixes = ["!qt",";;","!","!b"]
@client.event
async def on_ready():
print("The bot is ready!")
def test_prefixes(message_content):
if len(message_content) <= 2:
return False
if len(message_content) >= 3:
if message_content[:1] == "!":
return True
if len(message_content) >= 4:
if message_content[:2] == ";;" or message_content[:2] == "!b" or message_content[:2] == "y! ":
return True
if len(message_content) >= 5:
if message_content[:3] == "!qt" or message_content[:3] == "pls":
return True
@client.event
async def on_message(message):
if(message.author != client.user):
#Function that answers Hello to the author
#if(message.content == 'Hello'):
#await client.send_message(message.channel, 'Hello '+message.author.mention)
#Tests the message head for a PoppyQTPi command
if(len(message.content) > 4):
if(message.content[:3] == "!qt"):
print(len(message.content))
print(message.content[4:8])
#Tests the length of a message otherwise array might be accessing private memory
if(len(message.content) >= 8):
if(message.content[4:8] == "help"):
await client.send_message(message.author,"After using the prefix *!qt* insert one of these commands:\n\t_-help_\n\t_-ping_")
if(message.content[4:8] == "ping"):
await client.send_message(message.channel,"pong")
#Function that eliminates unecessary commands from server
if(test_prefixes(message.content)):
time.sleep(1)
await client.delete_message(message)
def read_token(path):
file = open(path,"r")
return file.read()
client.run(read_token("Token.txt"))
|
from .Account import Account
from .User import User
|
from __future__ import print_function, absolute_import
import netCDF4
import numpy
import xarray as xr
import pytest
from hypothesis import given
from hypothesis.strategies import text
from tests.conftest import tmpnetcdf_filename as get_tmpnetcdf_filename
import string
from datacube.model import Variable
from datacube.storage.netcdf_writer import create_netcdf, create_coordinate, create_variable, netcdfy_data, \
create_grid_mapping_variable, flag_mask_meanings
from datacube.storage.storage import write_dataset_to_netcdf
from datacube.utils import geometry, DatacubeException, read_strings_from_netcdf
GEO_PROJ = geometry.CRS("""GEOGCS["WGS 84",
DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]]""")
ALBERS_PROJ = geometry.CRS("""PROJCS["GDA94 / Australian Albers",
GEOGCS["GDA94",
DATUM["Geocentric_Datum_of_Australia_1994",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6283"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4283"]],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",-18],
PARAMETER["standard_parallel_2",-36],
PARAMETER["latitude_of_center",0],
PARAMETER["longitude_of_center",132],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
AUTHORITY["EPSG","3577"],
AXIS["Easting",EAST],
AXIS["Northing",NORTH]]""")
SINIS_PROJ = geometry.CRS("""PROJCS["Sinusoidal",
GEOGCS["GCS_Undefined",
DATUM["Undefined",
SPHEROID["User_Defined_Spheroid",6371007.181,0.0]],
PRIMEM["Greenwich",0.0],
UNIT["Degree",0.0174532925199433]],
PROJECTION["Sinusoidal"],
PARAMETER["False_Easting",0.0],
PARAMETER["False_Northing",0.0],
PARAMETER["Central_Meridian",0.0],
UNIT["Meter",1.0]]""")
LCC2_PROJ = geometry.CRS("""PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["unknown",
SPHEROID["WGS84",6378137,6556752.3141]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["standard_parallel_1",17.5],
PARAMETER["standard_parallel_2",29.5],
PARAMETER["latitude_of_origin",12],
PARAMETER["central_meridian",-102],
PARAMETER["false_easting",2500000],
PARAMETER["false_northing",0]]""")
GLOBAL_ATTRS = {'test_attribute': 'test_value'}
DATA_VARIABLES = ('B1', 'B2')
LAT_LON_COORDINATES = ('latitude', 'longitude')
PROJECTED_COORDINATES = ('x', 'y')
COMMON_VARIABLES = ('crs', 'time')
DATA_WIDTH = 400
DATA_HEIGHT = 200
def _ensure_spheroid(var):
assert 'semi_major_axis' in var.ncattrs()
assert 'semi_minor_axis' in var.ncattrs()
assert 'inverse_flattening' in var.ncattrs()
def _ensure_gdal(var):
assert 'GeoTransform' in var.ncattrs()
assert 'spatial_ref' in var.ncattrs()
def _ensure_geospatial(nco):
assert 'geospatial_bounds' in nco.ncattrs()
assert 'geospatial_bounds_crs' in nco.ncattrs()
assert nco.getncattr('geospatial_bounds_crs') == "EPSG:4326"
assert 'geospatial_lat_min' in nco.ncattrs()
assert 'geospatial_lat_max' in nco.ncattrs()
assert 'geospatial_lat_units' in nco.ncattrs()
assert nco.getncattr('geospatial_lat_units') == "degrees_north"
assert 'geospatial_lon_min' in nco.ncattrs()
assert 'geospatial_lon_max' in nco.ncattrs()
assert 'geospatial_lon_units' in nco.ncattrs()
assert nco.getncattr('geospatial_lon_units') == "degrees_east"
def test_create_albers_projection_netcdf(tmpnetcdf_filename):
nco = create_netcdf(tmpnetcdf_filename)
create_coordinate(nco, 'x', numpy.array([1., 2., 3.]), 'm')
create_coordinate(nco, 'y', numpy.array([1., 2., 3.]), 'm')
create_grid_mapping_variable(nco, ALBERS_PROJ)
nco.close()
with netCDF4.Dataset(tmpnetcdf_filename) as nco:
assert 'crs' in nco.variables
assert nco['crs'].grid_mapping_name == 'albers_conical_equal_area'
assert 'standard_parallel' in nco['crs'].ncattrs()
assert 'longitude_of_central_meridian' in nco['crs'].ncattrs()
assert 'latitude_of_projection_origin' in nco['crs'].ncattrs()
_ensure_spheroid(nco['crs'])
_ensure_gdal(nco['crs'])
_ensure_geospatial(nco)
def test_create_lambert_conformal_conic_2sp_projection_netcdf(tmpnetcdf_filename):
nco = create_netcdf(tmpnetcdf_filename)
create_coordinate(nco, 'x', numpy.array([1., 2., 3.]), 'm')
create_coordinate(nco, 'y', numpy.array([1., 2., 3.]), 'm')
create_grid_mapping_variable(nco, LCC2_PROJ)
nco.close()
with netCDF4.Dataset(tmpnetcdf_filename) as nco:
assert 'crs' in nco.variables
assert nco['crs'].grid_mapping_name == 'lambert_conformal_conic'
assert 'standard_parallel' in nco['crs'].ncattrs()
assert 'longitude_of_central_meridian' in nco['crs'].ncattrs()
assert 'latitude_of_projection_origin' in nco['crs'].ncattrs()
assert 'false_easting' in nco['crs'].ncattrs()
assert 'false_northing' in nco['crs'].ncattrs()
_ensure_spheroid(nco['crs'])
_ensure_gdal(nco['crs'])
_ensure_geospatial(nco)
def test_create_epsg4326_netcdf(tmpnetcdf_filename):
nco = create_netcdf(tmpnetcdf_filename)
create_coordinate(nco, 'latitude', numpy.array([1., 2., 3.]), 'm')
create_coordinate(nco, 'longitude', numpy.array([1., 2., 3.]), 'm')
create_grid_mapping_variable(nco, GEO_PROJ)
nco.close()
with netCDF4.Dataset(tmpnetcdf_filename) as nco:
assert 'crs' in nco.variables
assert nco['crs'].grid_mapping_name == 'latitude_longitude'
_ensure_spheroid(nco['crs'])
_ensure_geospatial(nco)
def test_create_sinus_netcdf(tmpnetcdf_filename):
nco = create_netcdf(tmpnetcdf_filename)
create_coordinate(nco, 'x', numpy.array([1., 2., 3.]), 'm')
create_coordinate(nco, 'y', numpy.array([1., 2., 3.]), 'm')
create_grid_mapping_variable(nco, SINIS_PROJ)
nco.close()
with netCDF4.Dataset(tmpnetcdf_filename) as nco:
assert 'crs' in nco.variables
assert nco['crs'].grid_mapping_name == 'sinusoidal'
assert 'longitude_of_central_meridian' in nco['crs'].ncattrs()
_ensure_spheroid(nco['crs'])
_ensure_geospatial(nco)
# Work around outstanding bug with hypothesis/pytest, where function level fixtures are only run once.
# Generate a new netcdf filename for each run, so that old files don't cause permission errors on windows
# due to antivirus software filesystem lag.
# See https://github.com/HypothesisWorks/hypothesis-python/issues/377
@given(s1=text(alphabet=string.printable, max_size=100),
s2=text(alphabet=string.printable, max_size=100),
s3=text(alphabet=string.printable, max_size=100))
def test_create_string_variable(tmpdir, s1, s2, s3):
tmpnetcdf_filename = get_tmpnetcdf_filename(tmpdir)
str_var = 'str_var'
nco = create_netcdf(tmpnetcdf_filename)
coord = create_coordinate(nco, 'greg', numpy.array([1.0, 3.0, 9.0]), 'cubic gregs')
assert coord is not None
dtype = numpy.dtype('S100')
data = numpy.array([s1, s2, s3], dtype=dtype)
var = create_variable(nco, str_var, Variable(dtype, None, ('greg',), None))
var[:] = netcdfy_data(data)
nco.close()
with netCDF4.Dataset(tmpnetcdf_filename) as nco:
assert str_var in nco.variables
for returned, expected in zip(read_strings_from_netcdf(tmpnetcdf_filename, variable=str_var), (s1, s2, s3)):
assert returned == expected
def test_chunksizes(tmpnetcdf_filename):
nco = create_netcdf(tmpnetcdf_filename)
x = numpy.arange(3, dtype='float32')
y = numpy.arange(5, dtype='float32')
coord1 = create_coordinate(nco, 'x', x, 'm')
coord2 = create_coordinate(nco, 'y', y, 'm')
assert coord1 is not None and coord2 is not None
no_chunks = create_variable(nco, 'no_chunks',
Variable(numpy.dtype('int16'), None, ('x', 'y'), None))
min_max_chunks = create_variable(nco, 'min_max_chunks',
Variable(numpy.dtype('int16'), None, ('x', 'y'), None),
chunksizes=(2, 50))
assert no_chunks is not None
assert min_max_chunks is not None
strings = numpy.array(["AAa", 'bbb', 'CcC'], dtype='S')
strings = xr.DataArray(strings, dims=['x'], coords={'x': x})
create_variable(nco, 'strings_unchunked', strings)
create_variable(nco, 'strings_chunked', strings, chunksizes=(1,))
nco.close()
with netCDF4.Dataset(tmpnetcdf_filename) as nco:
assert nco['no_chunks'].chunking() == 'contiguous'
assert nco['min_max_chunks'].chunking() == [2, 5]
assert nco['strings_unchunked'].chunking() == 'contiguous'
assert nco['strings_chunked'].chunking() == [1, 3]
EXAMPLE_FLAGS_DEF = {
'band_1_saturated': {
'bits': 0,
'values': {
0: True,
1: False
},
'description': 'Band 1 is saturated'},
'band_2_saturated': {
'bits': 1,
'values': {
0: True,
1: False
},
'description': 'Band 2 is saturated'},
'band_3_saturated': {
'bits': 2,
'values': {
0: True,
1: False
},
'description': 'Band 3 is saturated'},
'land_sea': {
'bits': 9,
'values': {
0: 'sea',
1: 'land'
},
'description': 'Land/Sea observation'},
}
def test_measurements_model_netcdfflags():
masks, valid_range, meanings = flag_mask_meanings(EXAMPLE_FLAGS_DEF)
assert ([0, 1023] == valid_range).all()
assert ([1, 2, 4, 512] == masks).all()
assert 'no_band_1_saturated no_band_2_saturated no_band_3_saturated land' == meanings
def test_useful_error_on_write_empty_dataset(tmpnetcdf_filename):
with pytest.raises(DatacubeException) as excinfo:
ds = xr.Dataset()
write_dataset_to_netcdf(ds, tmpnetcdf_filename)
assert 'empty' in str(excinfo.value)
with pytest.raises(DatacubeException) as excinfo:
ds = xr.Dataset(data_vars={'blue': (('time',), numpy.array([0, 1, 2]))})
write_dataset_to_netcdf(ds, tmpnetcdf_filename)
assert 'CRS' in str(excinfo.value)
|
#!/usr/bin/env python
# Retrieved from http://ecdsa.org/ecdsa.py on 2011-10-17.
# Thanks to ThomasV.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
import sys
import os
import warnings
import optparse
import re
from cgi import escape
import posixpath
import wsgiref.util
import time
import binascii
import daemon
import Abe.DataStore
import Abe.readconf
import operator
# bitcointools -- modified deserialize.py to return raw transaction
import Abe.deserialize
import Abe.util # Added functions.
import Abe.base58
from Abe.abe import *
AML_APPNAME = "Bitcoin ecdsa.org"
AML_TEMPLATE = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<link rel="stylesheet" type="text/css" href="http://s3.ecdsa.org/style.css" />
<link rel="shortcut icon" href="http://s3.ecdsa.org/favicon.ico" />
<title>%(title)s</title>
</head>
<body>
<div id="logo">
<a href="%(dotdot)s/">
<img src="http://s3.ecdsa.org/bc_logo.png" alt="Bitcoin logo" border="none" />
</a>
</div>
<div id="navigation">
<ul>
<li><a href="%(dotdot)shome">Home</a> </li>
<li><a href="%(dotdot)ssearch">Search</a> </li>
<li><a href="%(dotdot)sannotate">Annotations</a> </li>
<li><a href="%(dotdot)swidgets">Widgets</a></li>
<li><a href="%(dotdot)sthresholdRelease">Threshold release</a></li>
<li><a href="%(dotdot)sstats.html">Statistics</a></li>
</ul>
</div>
<div id=\"content\">
<h1>%(h1)s</h1>
%(body)s
</div>
</body>
</html>
"""
class Aml(Abe):
def __init__(abe, store, args):
abe.store = store
abe.args = args
abe.htdocs = args.document_root or find_htdocs()
abe.static_path = '' if args.static_path is None else args.static_path
abe.template_vars = args.template_vars.copy()
abe.template_vars['STATIC_PATH'] = (
abe.template_vars.get('STATIC_PATH', abe.static_path))
abe.template = flatten(args.template)
abe.debug = args.debug
import logging
abe.log = logging
abe.log.info('Abe initialized.')
abe.home = "home"
if not args.auto_agpl:
abe.template_vars['download'] = (
abe.template_vars.get('download', ''))
abe.base_url = args.base_url
abe.reports = abe.get_reports()
def handle_home(abe, page):
page['title'] = 'Bitcoin Web Services'
body = page['body']
body += [ """
<p>This website allows you to :
<ul>
<li>Annotate transactions in the blockchain (signature requested)</li>
<li>Use fundraiser widgets (counters, progress bars, javascript)</li>
<li>Release data when donations to an address reach a given threshold.</li>
</ul>
<br/><br/>
<p style="font-size: smaller">
This site is powered by <span style="font-style: italic"> <a href="https://github.com/bitcoin-abe/bitcoin-abe">bitcoin-ABE</a></span>
source:<a href="ecdsa.py">[1]</a> <a href="abe.diff">[2]</a>
</p>"""
]
return
def get_sender_comment(abe, tx_id):
r = abe.store.selectrow("SELECT c_text, c_pubkey, c_sig FROM comments WHERE c_tx = ?""", (tx_id,))
if r:
return r[0]
else:
return ""
def get_address_comment(abe, address):
#rename this column in sql
r = abe.store.selectrow("SELECT text FROM addr_comments WHERE address = '%s'"""%(address))
if r:
return r[0]
else:
return ""
def get_tx(abe, tx_hash ):
row = abe.store.selectrow("""
SELECT tx_id, tx_version, tx_lockTime, tx_size
FROM tx
WHERE tx_hash = ?
""", (abe.store.hashin_hex(tx_hash),))
if row is None: return None, None, None, None
tx_id, tx_version, tx_lockTime, tx_size = (int(row[0]), int(row[1]), int(row[2]), int(row[3]))
return tx_id, tx_version, tx_lockTime, tx_size
def get_tx_inputs(abe, tx_id):
return abe.store.selectall("""
SELECT
txin.txin_pos,
txin.txin_scriptSig,
txout.txout_value,
COALESCE(prevtx.tx_hash, u.txout_tx_hash),
prevtx.tx_id,
COALESCE(txout.txout_pos, u.txout_pos),
pubkey.pubkey_hash
FROM txin
LEFT JOIN txout ON (txout.txout_id = txin.txout_id)
LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
LEFT JOIN tx prevtx ON (txout.tx_id = prevtx.tx_id)
LEFT JOIN unlinked_txin u ON (u.txin_id = txin.txin_id)
WHERE txin.tx_id = ?
ORDER BY txin.txin_pos
""", (tx_id,))
def get_tx_outputs(abe, tx_id):
return abe.store.selectall("""
SELECT
txout.txout_pos,
txout.txout_scriptPubKey,
txout.txout_value,
nexttx.tx_hash,
nexttx.tx_id,
txin.txin_pos,
pubkey.pubkey_hash
FROM txout
LEFT JOIN txin ON (txin.txout_id = txout.txout_id)
LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
LEFT JOIN tx nexttx ON (txin.tx_id = nexttx.tx_id)
WHERE txout.tx_id = ?
ORDER BY txout.txout_pos
""", (tx_id,))
def handle_tx(abe, page):
tx_hash = wsgiref.util.shift_path_info(page['env'])
if tx_hash in (None, '') or page['env']['PATH_INFO'] != '':
raise PageNotFound()
page['title'] = ['Transaction ', tx_hash[:10], '...', tx_hash[-4:]]
body = page['body']
if not HASH_PREFIX_RE.match(tx_hash):
body += ['<p class="error">Not a valid transaction hash.</p>']
return
tx_id, tx_version, tx_lockTime, tx_size = abe.get_tx( tx_hash )
if tx_id is None:
body += ['<p class="error">Transaction not found.</p>']
return
block_rows = abe.store.selectall("""
SELECT c.chain_name, cc.in_longest,
b.block_nTime, b.block_height, b.block_hash,
block_tx.tx_pos
FROM chain c
JOIN chain_candidate cc ON (cc.chain_id = c.chain_id)
JOIN block b ON (b.block_id = cc.block_id)
JOIN block_tx ON (block_tx.block_id = b.block_id)
WHERE block_tx.tx_id = ?
ORDER BY c.chain_id, cc.in_longest DESC, b.block_hash
""", (tx_id,))
def parse_row(row):
pos, script, value, o_hash, o_id, o_pos, binaddr = row
chain = abe.get_default_chain()
hash = abe.store.binout(binaddr)
address = hash_to_address(chain['address_version'], hash)
return {
"pos": int(pos),
"script": abe.store.binout(script),
"value": None if value is None else int(value),
"o_hash": abe.store.hashout_hex(o_hash),
"o_id": o_id,
"o_pos": None if o_pos is None else int(o_pos),
"binaddr": abe.store.binout(binaddr),
}
def row_to_html(row, this_ch, other_ch, no_link_text):
body = []
body += [
'<tr>\n',
'<td><a name="', this_ch, row['pos'], '">', row['pos'],
'</a></td>\n<td>']
if row['o_hash'] is None:
body += [no_link_text]
else:
body += [
'<a href="', row['o_hash'], '#', other_ch, row['o_pos'],
'">', row['o_hash'][:10], '...:', row['o_pos'], '</a>']
body += [
'</td>\n',
'<td>', format_satoshis(row['value'], chain), '</td>\n',
]
if row['binaddr'] is None:
body += ['Unknown', '</td><td></td>']
else:
link = hash_to_address_link(chain['address_version'], row['binaddr'], '../')
addr = hash_to_address(chain['address_version'], row['binaddr'])
comment = abe.get_address_comment(addr)
comment += " <a title=\"add comment\" href=\"http://ecdsa.org/annotate?address="+addr+"\">[+]</a>"
body += [ '<td>', link, '</td><td>', comment, '</td>']
body += ['</tr>\n']
return body
in_rows = map(parse_row, abe.get_tx_inputs(tx_id))
out_rows = map(parse_row, abe.get_tx_outputs(tx_id))
def sum_values(rows):
ret = 0
for row in rows:
if row['value'] is None:
return None
ret += row['value']
return ret
value_in = sum_values(in_rows)
value_out = sum_values(out_rows)
is_coinbase = None
body += abe.short_link(page, 't/' + hexb58(tx_hash[:14]))
body += ['<p>Hash: ', tx_hash, '<br />\n']
chain = None
for row in block_rows:
(name, in_longest, nTime, height, blk_hash, tx_pos) = (
row[0], int(row[1]), int(row[2]), int(row[3]),
abe.store.hashout_hex(row[4]), int(row[5]))
if chain is None:
chain = abe.chain_lookup_by_name(name)
is_coinbase = (tx_pos == 0)
elif name <> chain['name']:
abe.log.warn('Transaction ' + tx_hash + ' in multiple chains: '
+ name + ', ' + chain['name'])
body += [
'Appeared in <a href="../block/', blk_hash, '">',
escape(name), ' ',
height if in_longest else [blk_hash[:10], '...', blk_hash[-4:]],
'</a> (', format_time(nTime), ')<br />\n']
if chain is None:
abe.log.warn('Assuming default chain for Transaction ' + tx_hash)
chain = abe.get_default_chain()
sender_comment = abe.get_sender_comment(tx_id)
sender_comment += " <a href=\"http://ecdsa.org/annotate?tx="+tx_hash+"\">[+]</a>"
fee = format_satoshis(0 if is_coinbase else (value_in and value_out and value_in - value_out), chain)
body += [
len(in_rows),' inputs, ', len(out_rows),' outputs.<br/>\n'
'Amounts: ', format_satoshis(value_in, chain), ' --> ', format_satoshis(value_out, chain), ' + ',fee,' fee.<br/>\n',
'Size: ', tx_size, ' bytes<br /><br/>\n',
'<b>Comment from sender:</b><br/>', sender_comment, '<br/>\n',
]
body += ['</p>\n',
'<a name="inputs"><h3>Inputs</h3></a>\n<table>\n',
'<tr><th>Index</th><th>Previous output</th><th>Amount</th>',
'<th>From address</th><th>Comment</th></tr>\n']
for row in in_rows:
page['body'] += row_to_html(row, 'i', 'o', 'Generation' if is_coinbase else 'Unknown')
body += ['</table>\n',
'<a name="outputs"><h3>Outputs</h3></a>\n<table>\n',
'<tr><th>Index</th><th>Redeemed at</th><th>Amount</th>',
'<th>To address</th><th>Comment</th></tr>\n']
for row in out_rows:
page['body'] += row_to_html(row, 'o', 'i', 'Not yet redeemed')
body += ['</table>\n']
def trackrow_to_html(row, report_name):
line = [ '<tr>\n<td>' ]
if row['o_hash'] is None:
line += ['Generation' if is_coinbase else 'Unknown']
else:
line += [
'<a href="', row['o_hash'], '">', row['o_hash'][:10], '...:', row['o_pos'], '</a>']
line += [
'</td>\n',
'<td>', format_satoshis(row['value'], chain), '</td>\n',
'<td>']
if row['binaddr'] is None:
line += ['Unknown']
else:
line += hash_to_address_link(chain['address_version'], row['binaddr'], '../')
line += [
'</td>\n',
'<td>', row['dist'].get(report_name),'</td>\n',
'<td>', row['comment'],'</td>\n',
'</tr>\n']
return line
def get_address_out_rows(abe, dbhash):
return abe.store.selectall("""
SELECT
b.block_nTime,
cc.chain_id,
b.block_height,
1,
b.block_hash,
tx.tx_hash,
tx.tx_id,
txin.txin_pos,
-prevout.txout_value
FROM chain_candidate cc
JOIN block b ON (b.block_id = cc.block_id)
JOIN block_tx ON (block_tx.block_id = b.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txin ON (txin.tx_id = tx.tx_id)
JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
WHERE pubkey.pubkey_hash = ?
AND cc.in_longest = 1""",
(dbhash,))
def get_address_in_rows(abe, dbhash):
return abe.store.selectall("""
SELECT
b.block_nTime,
cc.chain_id,
b.block_height,
0,
b.block_hash,
tx.tx_hash,
tx.tx_id,
txout.txout_pos,
txout.txout_value
FROM chain_candidate cc
JOIN block b ON (b.block_id = cc.block_id)
JOIN block_tx ON (block_tx.block_id = b.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txout ON (txout.tx_id = tx.tx_id)
JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
WHERE pubkey.pubkey_hash = ?
AND cc.in_longest = 1""",
(dbhash,))
def handle_qr(abe,page):
address = wsgiref.util.shift_path_info(page['env'])
if address in (None, '') or page['env']['PATH_INFO'] != '':
raise PageNotFound()
body = page['body']
page['title'] = 'Address ' + escape(address)
version, binaddr = decode_check_address(address)
if binaddr is None:
body += ['<p>Not a valid address.</p>']
return
ret = """<html><body>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.5.2/jquery.min.js"></script>
<script type="text/javascript" src="http://ecdsa.org/jquery.qrcode.min.js"></script>
<div id="qrcode"></div>
<script>jQuery('#qrcode').qrcode("bitcoin:%s");</script>
</body></html>"""%address
abe.do_raw(page, ret)
page['content_type']='text/html'
def handle_address(abe, page):
#action = abe.get_param( page, 'action', '')
address = wsgiref.util.shift_path_info(page['env'])
if address in (None, '') or page['env']['PATH_INFO'] != '':
raise PageNotFound()
body = page['body']
page['title'] = 'Address ' + escape(address)
version, binaddr = decode_check_address(address)
if binaddr is None:
body += ['<p>Not a valid address.</p>']
return
txpoints = []
chains = {}
balance = {}
received = {}
sent = {}
count = [0, 0]
chain_ids = []
def adj_balance(txpoint):
chain_id = txpoint['chain_id']
value = txpoint['value']
if chain_id not in balance:
chain_ids.append(chain_id)
chains[chain_id] = abe.chain_lookup_by_id(chain_id)
balance[chain_id] = 0
received[chain_id] = 0
sent[chain_id] = 0
balance[chain_id] += value
if value > 0:
received[chain_id] += value
else:
sent[chain_id] -= value
count[txpoint['is_in']] += 1
dbhash = abe.store.binin(binaddr)
rows = []
rows += abe.get_address_out_rows( dbhash )
rows += abe.get_address_in_rows( dbhash )
#rows.sort()
for row in rows:
nTime, chain_id, height, is_in, blk_hash, tx_hash, tx_id, pos, value = row
txpoint = {
"nTime": int(nTime),
"chain_id": int(chain_id),
"height": int(height),
"is_in": int(is_in),
"blk_hash": abe.store.hashout_hex(blk_hash),
"tx_hash": abe.store.hashout_hex(tx_hash),
"tx_id": int(tx_id),
"pos": int(pos),
"value": int(value),
}
adj_balance(txpoint)
txpoints.append(txpoint)
#txpoints.sort( lambda a,b: a['tx_id']<b['tx_id'])
txpoints = sorted(txpoints, key=operator.itemgetter("tx_id"))
if (not chain_ids):
body += ['<p>Address not seen on the network.</p>']
return
def format_amounts(amounts, link):
ret = []
for chain_id in chain_ids:
chain = chains[chain_id]
if chain_id != chain_ids[0]:
ret += [', ']
ret += [format_satoshis(amounts[chain_id], chain),
' ', escape(chain['code3'])]
if link:
other = hash_to_address(chain['address_version'], binaddr)
if other != address:
ret[-1] = ['<a href="', page['dotdot'],
'address/', other,
'">', ret[-1], '</a>']
return ret
comment = abe.get_address_comment(address)
comment += " <a title=\"add comment\" href=\"http://ecdsa.org/annotate?address="+address+"\">[+]</a>"
body += [ '<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.5.2/jquery.min.js"></script>',
'<script type="text/javascript" src="http://ecdsa.org/jquery.qrcode.min.js"></script>',
'<div style="float:right;" id="qrcode"></div>',
"<script>jQuery('#qrcode').qrcode(\"bitcoin:"+address+"\");</script>" ]
body += abe.short_link(page, 'a/' + address[:10])
body += ['<p>Balance: '] + format_amounts(balance, True)
for chain_id in chain_ids:
balance[chain_id] = 0 # Reset for history traversal.
body += ['<br />\n',
'Transactions in: ', count[0], '<br />\n',
'Received: ', format_amounts(received, False), '<br />\n',
'Transactions out: ', count[1], '<br />\n',
'Sent: ', format_amounts(sent, False), '<br/>'
'Comment: ', comment, '<br/>'
]
body += ['</p>\n'
'<h3>Transactions</h3>\n'
'<table>\n<tr><th>Transaction</th><th>Block</th>'
'<th>Approx. Time</th><th>Amount</th><th>Balance</th>'
'<th>Comment</th>'
'</tr>\n']
for elt in txpoints:
chain = chains[elt['chain_id']]
balance[elt['chain_id']] += elt['value']
body += ['<tr><td><a href="../tx/', elt['tx_hash'],
'#', 'i' if elt['is_in'] else 'o', elt['pos'],
'">', elt['tx_hash'][:10], '...</a>',
'</td><td><a href="../block/', elt['blk_hash'],
'">', elt['height'], '</a></td><td>',
format_time(elt['nTime']), '</td><td>']
if elt['value'] < 0:
body += ['<span style="color:red;">-', format_satoshis(-elt['value'], chain), "</span>" ]
else:
body += ['+', format_satoshis(elt['value'], chain)]
# get sender comment
comment = abe.get_sender_comment(elt['tx_id'])
comment += " <a href=\"http://ecdsa.org/annotate?tx="+elt['tx_hash']+"\">[+]</a>"
body += ['</td><td>',
format_satoshis(balance[elt['chain_id']], chain),
'</td><td>', comment,
'</td></tr>\n']
body += ['</table>\n']
def search_form(abe, page):
q = (page['params'].get('q') or [''])[0]
return [
'<p>Search by address, block number, block or transaction hash,'
' or chain name:</p>\n'
'<form action="', page['dotdot'], 'search"><p>\n'
'<input name="q" size="64" value="', escape(q), '" />'
'<button type="submit">Search</button>\n'
'<br />Address or hash search requires at least the first six'
' characters.</p></form>\n']
def get_reports(abe):
rows = abe.store.selectall("select reports.report_id, tx.tx_id, tx.tx_hash, name from reports left join tx on tx.tx_id=reports.tx_id" )
return map(lambda x: { 'report_id':int(x[0]), 'tx_id':int(x[1]), 'tx_hash':x[2], 'name':x[3] }, rows)
def handle_reports(abe, page):
page['title'] = 'Fraud reports'
page['body'] += [ 'List of transactions that have been reported as fraudulent.', '<br/><br/>']
page['body'] += [ '<table><tr><th>name</th><th>transaction</th></tr>']
for item in abe.reports:
link = '<a href="tx/' + item['tx_hash'] + '">'+ item['tx_hash'] + '</a>'
page['body'] += ['<tr><td>'+item['name']+'</td><td>'+link+'</td></tr>']
page['body'] += [ '</table>']
def handle_annotate(abe, page):
tx_hash = (page['params'].get('tx') or [''])[0]
address = (page['params'].get('address') or [''])[0]
message = (page['params'].get('comment') or [''])[0]
signature = (page['params'].get('signature') or [''])[0]
if not tx_hash and not address:
page['title'] = 'Annotations'
page['body'] += [ 'This website allows you to annotate the Bitcoin blockchain.<br/><br/>',
'You will need a version of bitcoind that has the "signmessage" command.<br/>'
'In order to annotate an address or transaction, first <a href="search">find</a> the corresponding page, then follow the "[+]" link. <a href="http://ecdsa.org/annotate?tx=e357fece18a4191be8236570c7dc309ec6ac04473317320b5e8b9ab7cd023549">(example here)</a><br/><br/>']
page['body'] += [ '<h3>Annotated addresses.</h3>']
rows = abe.store.selectall("""select text, address from addr_comments limit 100""" )
page['body'] += [ '<table>']
page['body'] += [ '<tr><th>Address</th><th>Comment</th></tr>']
for row in rows:
link = '<a href="address/' + row[1]+ '">'+ row[1] + '</a>'
page['body'] += ['<tr><td>'+link+'</td><td>'+row[0]+'</td></tr>']
page['body'] += [ '</table>']
page['body'] += [ '<h3>Annotated transactions.</h3>']
rows = abe.store.selectall("""select tx.tx_id, tx.tx_hash, comments.c_text
from comments left join tx on tx.tx_id = comments.c_tx where c_sig != '' limit 100""" )
page['body'] += [ '<table>']
page['body'] += [ '<tr><th>Transaction</th><th>Comment</th></tr>']
for row in rows:
link = '<a href="tx/' + row[1]+ '">'+ row[1] + '</a>'
page['body'] += ['<tr><td>'+link+'</td><td>'+row[2]+'</td></tr>']
page['body'] += [ '</table>']
return
if tx_hash:
page['title'] = 'Annotate transaction'
tx_id, b, c, d = abe.get_tx( tx_hash )
chain = abe.get_default_chain()
in_addresses = []
for row in abe.get_tx_inputs( tx_id ):
addr = abe.store.binout(row[6])
addr = hash_to_address_link(chain['address_version'], addr, '../')
in_addresses.append( addr[3] )
if not address:
address = in_addresses[0]
out_addresses = []
for row in abe.get_tx_outputs( tx_id ):
addr = abe.store.binout(row[6])
addr = hash_to_address_link(chain['address_version'], addr, '../')
out_addresses.append( addr[3] )
if message or signature:
# check address
#if address not in in_addresses and address not in out_addresses:
if address not in in_addresses:
page['title'] = 'Error'
page['body'] = ['<p>wrong address for this transaction.</p>\n']
print address, in_addresses
return
# check signature
import bitcoinrpc
conn = bitcoinrpc.connect_to_local()
message = message.replace("\r\n","\\n").replace("!","\\!").replace("$","\\$")
print "verifymessage:", address, signature, repr(message)
try:
v = conn.verifymessage(address,signature, tx_hash+":"+message)
except:
v = False
if not v:
page['title'] = 'Error'
page['body'] = ['<p>Invalid signature.</p>']
return
# little bobby tables
message = message.replace('"', '\\"').replace("'", "\\'")
# escape html
message = escape( message )
message = message[:1024]
row = abe.store.selectrow("select c_tx from comments where c_tx=%d "%(tx_id ) )
if not row:
abe.store.sql("insert into comments (c_tx, c_text, c_pubkey, c_sig) VALUES (%d, '%s', '%s', '%s')"%( tx_id, message, address, signature) )
abe.store.commit()
page['body'] = ['<p>Your comment was added successfully.</p>\n']
else:
if not message:
abe.store.sql("delete from comments where c_tx=%d "%( tx_id ) )
abe.store.commit()
page['body'] = ['<p>Your comment was deleted.</p>\n']
else:
abe.store.sql("update comments set c_text='%s', c_sig='%s', c_pubkey='%s' where c_tx=%d "%( message, signature, address, tx_id ) )
abe.store.commit()
page['body'] = ['<p>Your comment was updated.</p>\n']
return
else:
select = "<select id=\"address\" onkeyup=\"change_address(this.value);\" onchange=\"change_address(this.value);\" name='address'>" \
+ "\n".join( map( lambda addr: "<option value=\""+addr+"\">"+addr+"</option>", in_addresses ) ) \
+"</select>"
select = select.replace("<option value=\""+address+"\">","<option value=\""+address+"\" selected>")
tx_link = '<a href="tx/' + tx_hash + '">'+ tx_hash + '</a>'
javascript = """
<script>
function change_address(x){
document.getElementById("saddress").innerHTML=x;
}
function change_text(x){
x = x.replace(/!/g,"\\\\!");
x = x.replace(/\\n/g,"\\\\n");
x = x.replace(/\\$/g,"\\\\$");
document.getElementById("stext").innerHTML = x;
}
function onload(){
change_text(document.getElementById("text").value);
//change_address(document.getElementById("address").value);
}
</script>
"""
page['title'] = 'Annotate transaction'
page['body'] = [
javascript,
'<form id="form" action="', page['dotdot'], 'annotate">\n'
'Transaction: ',tx_link,'<br/>'
'Address:', select,'<br/><br/>\n'
'Message:<br/><textarea id="text" onkeyup="change_text(this.value);" name="comment" cols="80" value=""></textarea><br/><br/>\n'
'You must sign your message with one of the input addresses of involved in the transaction.<br/>\n'
'The signature will be returned by the following command line:<br/>\n'
'<pre>bitcoind signmessage <span id="saddress">'+in_addresses[0]+'</span> "'+tx_hash+':<span id="stext">your text</span>"</pre>\n'
'Signature:<br/><input name="signature" value="" style="width:500px;"/><br/>'
'<input name="tx" type="hidden" value="'+tx_hash+'" />'
'<button type="submit">Submit</button>\n'
'</form>\n']
return
if address:
page['title'] = 'Annotate address'
if message or signature:
# check signature
import bitcoinrpc
conn = bitcoinrpc.connect_to_local()
message = message.replace("\n","\\n").replace("!","\\!").replace("$","\\$")
print "verifymessage:", address, signature, message
try:
v = conn.verifymessage(address,signature, message)
except:
v = False
if not v:
page['title'] = 'Error'
page['body'] = ['<p>Invalid signature.</p>']
return
# little bobby tables
message = message.replace('"', '\\"').replace("'", "\\'")
# escape html
message = escape( message )
message = message[:1024]
row = abe.store.selectrow("select address from addr_comments where address='%s' "%(address ) )
if not row:
abe.store.sql("insert into addr_comments (address, text) VALUES ('%s', '%s')"%( address, message) )
abe.store.commit()
page['body'] = ['<p>Your comment was added successfully.</p>\n']
else:
if not message:
abe.store.sql("delete from addr_comments where address='%s' "%( message ) )
abe.store.commit()
page['body'] = ['<p>Your comment was deleted.</p>\n']
else:
abe.store.sql("update addr_comments set text='%s' where address='%s' "%( message, address ) )
abe.store.commit()
page['body'] = ['<p>Your comment was updated.</p>\n']
return
else:
javascript = """
<script>
function change_text(x){
x = x.replace(/!/g,"\\\\!");
x = x.replace(/\\n/g,"\\\\n");
x = x.replace(/\\$/g,"\\\\$");
document.getElementById("stext").innerHTML=x;
}
function onload(){
change_text(document.getElementById("text").value);
}
</script>
"""
page['title'] = 'Annotate address'
page['body'] = [
javascript,
'<form id="form" action="', page['dotdot'], 'annotate">\n'
'Address:', address,'<br/><br/>\n'
'Message:<br/><textarea id="text" onkeyup="change_text(this.value);" name="comment" cols="80" value=""></textarea><br/><br/>\n'
'You must sign your message with the addresses.<br/>\n'
'The signature will be returned by the following command line:<br/>\n'
'<pre>bitcoind signmessage <span id="saddress">'+address+'</span> "<span id="stext">your text</span>"</pre>\n'
'Signature:<br/><input name="signature" value="" style="width:500px;"/><br/>'
'<input name="address" type="hidden" value="'+address+'" />'
'<button type="submit">Submit</button>\n'
'</form>\n']
def handle_thresholdRelease(abe, page):
page['title'] = 'Threshold Release'
chain = abe.get_default_chain()
target = (page['params'].get('target') or [''])[0]
address = (page['params'].get('address') or [''])[0]
secret = (page['params'].get('secret') or [''])[0]
signature = (page['params'].get('signature') or [''])[0]
if address:
# check if address is valid
version, binaddr = decode_check_address(address)
if binaddr is None:
page['body'] = ['<p>Not a valid address.</p>']
return
# check amount
try:
target = float(target)
except:
page['body'] = ['<p>Not a valid amount.</p>']
return
# check signature
import bitcoinrpc
conn = bitcoinrpc.connect_to_local()
print address, signature
try:
v = conn.verifymessage(address,signature, "fundraiser")
except:
v = False
if not v:
page['body'] = ['<p>Invalid signature.</p>']
return
# little bobby tables
secret = secret.replace('"', '\\"').replace("'", "\\'")
# escape html
#message = escape( message )
#
secret = secret[:1024]
row = abe.store.selectrow("select address from fundraisers where address='%s'"%(address ) )
if not row:
abe.store.sql("insert into fundraisers (address, target, secret) VALUES ('%s', %d, '%s')"%( address, target, secret) )
abe.store.commit()
page['body'] = ['<p>Your fundraiser was added successfully.</p>\n']
else:
if not secret:
abe.store.sql("delete from fundraisers where address='%s'"%( address ) )
abe.store.commit()
page['body'] = ['<p>Fundraiser entry was deleted.</p>\n']
else:
abe.store.sql("update fundraisers set target=%d, secret='%s' where address='%s'"%( target, secret, address ) )
abe.store.commit()
page['body'] = ['<p>Your fundraiser data was updated.</p>\n']
msg = "<object data=\"http://ecdsa.org/fundraiser/"+address+"?width=400\" height=\"60\" width=\"400\">Donate to "+address+"</object/>"
page['body'] += "Sample code:<br/><pre>"+escape(msg)+"</pre><br/><br/>"+msg
return
else:
javascript = """
<script>
function change_address(x){
//check validity here
document.getElementById("saddress").innerHTML=x;
}
function onload(){
change_address(document.getElementById("address").value);
}
</script>
"""
msg= """
This service allows you to release digital content when a requested amount of Bitcoin donations has been reached.<br/>
<br/>
For example, you may want to publish a low quality version of a music file, and release a high quality version only if donations reach the price you want.<br/>
<br/>
There are various ways to use this service:
<ul>
<li>You may upload your content at a private URL; we will disclose the URL once the amount is reached.</li>
<li>You may encrypt your content and upload it to a public server; we will publish the encryption password only when the target amount is reached.</li>
</ul>
Once the threshold is reached, the content is displayed in place of the donation progress bar.<br/>
<br/>
"""
page['title'] = 'Threshold Release'
page['body'] = [
javascript, msg,
'<form id="form" action="', page['dotdot'], 'thresholdRelease">\n'
'Address:<br/><input name="address" value="" style="width:500px;" onkeyup="change_address(this.value);"/><br/><br/>'
'Target amount:<br/><input name="target" value="" style="width:500px;"/><br/><br/>'
'Secret (will be displayed in place of the widget when the donation target is reached. Html, max. 1024 bytes):<br/>'
'<textarea name="secret" value="" style="width:500px;"></textarea><br/><br/>'
'You must provide a signature in order to demonstrate that you own the bitcoin address of the fundraiser.<br/>'
'The signature will be returned by the following command line:<br/>\n'
'<pre>bitcoind signmessage <span id="saddress"></span> <span id="stext">fundraiser</span></pre>\n'
'Signature:<br/><input name="signature" value="" style="width:500px;"/><br/>'
'<button type="submit">Submit</button>\n'
'</form>\n'
]
# check and display html as it is typed
def get_fundraiser(abe,page):
address = page['env'].get('PATH_INFO')[1:]
if not address: return None,None,None,None
chain = abe.get_default_chain()
# get donations
donations = abe.q_getreceivedbyaddress(page,chain)
try:
donations = float(donations)
except:
donations = 0
# check if address is in the database
row = abe.store.selectrow("select target, secret from fundraisers where address='%s'"%address )
secret = None
target = None
if row:
target, secret = row
if donations < target: secret = None
target = float(target)
#priority
try:
target = float( page['params'].get('target')[0] )
except:
pass
return address, donations, target, secret
def handle_fundraiser_js(abe,page):
""" return a scriptlet"""
address,donations,target,secret = abe.get_fundraiser(page)
if secret:
secret = escape( secret )
ret = "var fundraiser_address = \"%s\";\nvar fundraiser_secret='%s';\nvar fundraiser_received = %f;\nfundraiser_callback();\n"%(address,secret,donations)
abe.do_raw(page, ret)
page['content_type']='text/javascript'
def handle_fundraiser_img(abe,page):
return abe.handle_counter(page)
def handle_counter(abe,page):
""" return a png with percentage"""
address, donations, target, secret = abe.get_fundraiser(page)
if target:
progress = int(100 * donations/target)
progress = max(0, min( progress, 100 ))
return abe.serve_static("percent/%dpercent.png"%progress, page['start_response'])
else:
donations = "%.2f"%donations
path = "/img/" + donations + ".png"
cpath = abe.htdocs + path
if not os.path.exists(cpath):
s = donations+ " BTC"
length = 13*len(s)
cmd = "echo \"%s\" | convert -page %dx20+0+0 -font Helvetica -style Normal -background none -undercolor none -fill black -pointsize 22 text:- +repage -background none -flatten %s"%(s, length, cpath)
print cmd
os.system(cmd)
return abe.serve_static(path, page['start_response'])
def get_param(abe,page,name,default):
try:
return page['params'].get(name)[0]
except:
return default
def handle_fundraiser(abe, page):
abe.handle_widgets(page)
def handle_widgets(abe, page):
""" return embedded html"""
address, donations, target, secret = abe.get_fundraiser(page)
if not address:
f = open(abe.htdocs + '/widgets.html', "rb")
s = f.read()
f.close()
page['body'] = s
page['title'] = "Bitcoin Widgets"
return
if secret:
abe.do_raw(page, secret)
page['content_type']='text/html'
return
try:
width = int(page['params'].get('width')[0])
except:
width = 400
try:
bg = page['params'].get('bg')[0]
except:
bg = "#000000"
try:
lc = page['params'].get('leftcolor')[0]
except:
lc = "#dddddd"
try:
rc = page['params'].get('rightcolor')[0]
except:
rc = "#ffaa44"
try:
padding = page['params'].get('padding')[0]
except:
padding = "3"
try:
radius = page['params'].get('radius')[0]
except:
radius = "1em"
try:
textcolor = page['params'].get('textcolor')[0]
except:
textcolor = "#000000"
leftwidth = width - 120
if target:
progress = min( width, max( 1, int( leftwidth * donations/target ) ))
percent = min( 100, max( 0, int( 100 * donations/target ) ))
title = "%d"%percent + " percent of %.2f BTC"%target
else:
title = ""
progress = leftwidth
outer_style = "border-radius:%s; -moz-border-radius:%s; padding:%s; color:%s; background-color: %s;"%(radius,radius,padding,textcolor,bg)
left_style = "border-radius:%s; -moz-border-radius:%s; padding:%s; background-color: %s;"%(radius,radius,padding,lc)
right_style = "border-radius:%s; -moz-border-radius:%s; padding:%s; background-color: %s; width:80px; text-align:center;"%(radius,radius,padding,rc)
count = "%.2f BTC"%donations
link_count = "<a style=\"text-decoration:none;color:"+textcolor + "\" title=\""+ title + "\" href=\"http://ecdsa.org/address/"+address+"\" target=\"_blank\">"+count+"</a>"
text = "Donate"
link_text = "<a style=\"text-decoration:none;color:"+textcolor+"\" href=\"javascript:alert('Donate to this Bitcoin address:\\n"+address+"');\">"+text+"</a>"
ret = """<table style="border-width:0px;"><tr><td>
<table style="%s width:%dpx;">
<tr><td style="%s width:%dpx; text-align:center;">%s</td><td></td></tr>
</table>
</td>
<td>
<table style="%s width:100px;">
<tr><td style="%s">%s</td></tr>
</table>
</td></tr></table>"""%(outer_style,leftwidth,left_style,progress,link_count,outer_style,right_style,link_text)
abe.do_raw(page, ret)
page['content_type']='text/html'
def serve(store):
args = store.args
abe = Aml(store, args)
if args.host or args.port:
# HTTP server.
if args.host is None:
args.host = "localhost"
from wsgiref.simple_server import make_server
port = int(args.port or 80)
httpd = make_server(args.host, port, abe )
print "Listening on http://" + args.host + ":" + str(port)
try:
httpd.serve_forever()
except:
httpd.shutdown()
raise
from daemon import Daemon
class MyDaemon(Daemon):
def __init__(self,args):
self.args = args
Daemon.__init__(self, self.args.pidfile, stderr=self.args.error_log, stdout=self.args.access_log )
def run(self):
store = make_store(self.args)
serve(store)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd not in ['start','stop','restart','run']:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
argv = sys.argv[2:]
conf = {
"port": 80,
"host": '',
"no_serve": None,
"debug": None,
"static_path": None,
"auto_agpl": None,
"download_name":None,
"watch_pid": None,
"base_url": None,
"no_update": None,
"pidfile": '',
"access_log": '',
"error_log": '',
"document_root":'',
"template": AML_TEMPLATE,
"template_vars": {
"APPNAME": AML_APPNAME,
"CONTENT_TYPE": 'text/html',
},
}
conf.update(DataStore.CONFIG_DEFAULTS)
argv.append('--config=/etc/abe.conf')
args, argv = readconf.parse_argv(argv, conf)
if argv:
sys.stderr.write("Error: unknown option `%s'\n" % (argv[0],))
sys.exit(1)
daemon = MyDaemon(args)
if cmd == 'start' :
daemon.start()
elif cmd == 'stop' :
daemon.stop()
elif cmd == 'restart' :
daemon.restart()
elif cmd=='run':
daemon.stop()
daemon.run()
sys.exit(0)
|
import pytest
from django.utils.timezone import now, timedelta
from discount.models import Discount
@pytest.mark.django_db
def test_discount_model():
discount = Discount(code="DIS20", value=5, description="Some discount",
created=now(), ended=now() + timedelta(days=2))
discount.save()
assert discount.code == "DIS20"
assert discount.created < discount.ended
assert discount.value == 5
|
import sys
from PySide6.QtWidgets import QApplication
from todo.main_window import MainWindow
def main():
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
fileName = get_program_parameters()
# Read the image.
readerFactory = vtk.vtkImageReader2Factory()
reader = readerFactory.CreateImageReader2(fileName)
reader.SetFileName(fileName)
reader.Update()
cast = vtk.vtkImageCast()
cast.SetInputConnection(reader.GetOutputPort())
cast.SetOutputScalarTypeToDouble()
# Get rid of the discrete scalars.
smooth = vtk.vtkImageGaussianSmooth()
smooth.SetInputConnection(cast.GetOutputPort())
smooth.SetStandardDeviations(0.8, 0.8, 0)
m1 = vtk.vtkSphere()
m1.SetCenter(310, 130, 0)
m1.SetRadius(0)
m2 = vtk.vtkSampleFunction()
m2.SetImplicitFunction(m1)
m2.SetModelBounds(0, 264, 0, 264, 0, 1)
m2.SetSampleDimensions(264, 264, 1)
m3 = vtk.vtkImageShiftScale()
m3.SetInputConnection(m2.GetOutputPort())
m3.SetScale(0.000095)
div = vtk.vtkImageMathematics()
div.SetInputConnection(0, smooth.GetOutputPort())
div.SetInputConnection(1, m3.GetOutputPort())
div.SetOperationToMultiply()
# Create the actors.
colorWindow = 256.0
colorLevel = 127.5
originalActor = vtk.vtkImageActor()
originalActor.GetMapper().SetInputConnection(cast.GetOutputPort())
originalActor.GetProperty().SetColorWindow(colorWindow)
originalActor.GetProperty().SetColorLevel(colorLevel)
filteredActor = vtk.vtkImageActor()
filteredActor.GetMapper().SetInputConnection(div.GetOutputPort())
# Define the viewport ranges.
# (xmin, ymin, xmax, ymax)
originalViewport = [0.0, 0.0, 0.5, 1.0]
filteredViewport = [0.5, 0.0, 1.0, 1.0]
# Setup the renderers.
originalRenderer = vtk.vtkRenderer()
originalRenderer.SetViewport(originalViewport)
originalRenderer.AddActor(originalActor)
originalRenderer.ResetCamera()
originalRenderer.SetBackground(colors.GetColor3d("SlateGray"))
filteredRenderer = vtk.vtkRenderer()
filteredRenderer.SetViewport(filteredViewport)
filteredRenderer.AddActor(filteredActor)
filteredRenderer.ResetCamera()
filteredRenderer.SetBackground(colors.GetColor3d("LightSlateGray"))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(600, 300)
renderWindow.AddRenderer(originalRenderer)
renderWindow.AddRenderer(filteredRenderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
style = vtk.vtkInteractorStyleImage()
renderWindowInteractor.SetInteractorStyle(style)
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'This MRI image illustrates attenuation that can occur due to sensor position.'
epilogue = '''
The artifact is removed by dividing by the attenuation profile determined manually.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='AttenuationArtifact.pgm.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
|
"""Test state getters for retrieving geometry views of state."""
import pytest
from decoy import Decoy
from typing import cast
from opentrons.calibration_storage.helpers import uri_from_details
from opentrons_shared_data.deck.dev_types import DeckDefinitionV2
from opentrons.protocols.models import LabwareDefinition
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.types import Point, DeckSlotName
from opentrons.protocol_engine import errors
from opentrons.protocol_engine.types import DeckSlotLocation, WellLocation, WellOrigin
from opentrons.protocol_engine.state.labware import LabwareView, LabwareData
from opentrons.protocol_engine.state.geometry import GeometryView
@pytest.fixture
def subject(labware_view: LabwareView) -> GeometryView:
"""Get a GeometryView with its store dependencies mocked out."""
return GeometryView(labware_view=labware_view)
def test_get_labware_parent_position(
decoy: Decoy,
standard_deck_def: DeckDefinitionV2,
well_plate_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should return a deck slot position for labware in a deck slot."""
labware_data = LabwareData(
uri=uri_from_details(namespace="a", load_name="b", version=1),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
decoy.when(labware_view.get_labware_data_by_id("labware-id")).then_return(
labware_data
)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
Point(1, 2, 3)
)
result = subject.get_labware_parent_position("labware-id")
assert result == Point(1, 2, 3)
def test_get_labware_origin_position(
decoy: Decoy,
standard_deck_def: DeckDefinitionV2,
well_plate_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should return a deck slot position with the labware's offset as its origin."""
uri = uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
)
labware_data = LabwareData(
uri=uri,
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
decoy.when(labware_view.get_labware_data_by_id("labware-id")).then_return(
labware_data
)
decoy.when(labware_view.get_definition_by_uri(uri)).then_return(well_plate_def)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
Point(1, 2, 3)
)
expected_parent = Point(1, 2, 3)
expected_offset = Point(
x=well_plate_def.cornerOffsetFromSlot.x,
y=well_plate_def.cornerOffsetFromSlot.y,
z=well_plate_def.cornerOffsetFromSlot.z,
)
expected_point = expected_parent + expected_offset
result = subject.get_labware_origin_position("labware-id")
assert result == expected_point
def test_get_labware_highest_z(
decoy: Decoy,
standard_deck_def: DeckDefinitionV2,
well_plate_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should get the absolute location of a labware's highest Z point."""
uri = uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
)
labware_data = LabwareData(
uri=uri,
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
decoy.when(labware_view.get_labware_data_by_id("labware-id")).then_return(
labware_data
)
decoy.when(labware_view.get_definition_by_uri(uri)).then_return(well_plate_def)
slot_pos = Point(1, 2, 3)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
slot_pos
)
highest_z = subject.get_labware_highest_z("labware-id")
assert highest_z == (well_plate_def.dimensions.zDimension + slot_pos[2] + 3)
def test_get_all_labware_highest_z(
decoy: Decoy,
standard_deck_def: DeckDefinitionV2,
well_plate_def: LabwareDefinition,
reservoir_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should get the highest Z amongst all labware."""
plate_data = LabwareData(
uri=uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
reservoir_data = LabwareData(
uri=uri_from_details(
namespace=reservoir_def.namespace,
load_name=reservoir_def.parameters.loadName,
version=reservoir_def.version,
),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_4),
calibration=(1, -2, 3),
)
decoy.when(labware_view.get_labware_data_by_id("plate-id")).then_return(plate_data)
decoy.when(labware_view.get_labware_data_by_id("reservoir-id")).then_return(
reservoir_data
)
decoy.when(labware_view.get_definition_by_uri(plate_data.uri)).then_return(
well_plate_def
)
decoy.when(labware_view.get_definition_by_uri(reservoir_data.uri)).then_return(
reservoir_def
)
decoy.when(labware_view.get_all_labware()).then_return(
[
("plate-id", plate_data),
("reservoir-id", reservoir_data),
]
)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
Point(1, 2, 3)
)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
Point(4, 5, 6)
)
plate_z = subject.get_labware_highest_z("plate-id")
reservoir_z = subject.get_labware_highest_z("reservoir-id")
all_z = subject.get_all_labware_highest_z()
assert all_z == max(plate_z, reservoir_z)
def test_get_labware_position(
decoy: Decoy,
well_plate_def: LabwareDefinition,
standard_deck_def: DeckDefinitionV2,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should return the slot position plus calibrated offset."""
labware_data = LabwareData(
uri=uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_4),
calibration=(1, -2, 3),
)
slot_pos = Point(4, 5, 6)
decoy.when(labware_view.get_labware_data_by_id("abc")).then_return(labware_data)
decoy.when(labware_view.get_definition_by_uri(labware_data.uri)).then_return(
well_plate_def
)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_4)).then_return(
slot_pos
)
position = subject.get_labware_position(labware_id="abc")
assert position == Point(
x=slot_pos[0] + well_plate_def.cornerOffsetFromSlot.x + 1,
y=slot_pos[1] + well_plate_def.cornerOffsetFromSlot.y - 2,
z=slot_pos[2] + well_plate_def.cornerOffsetFromSlot.z + 3,
)
def test_get_well_position(
decoy: Decoy,
well_plate_def: LabwareDefinition,
standard_deck_def: DeckDefinitionV2,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware."""
labware_data = LabwareData(
uri=uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
well_def = well_plate_def.wells["B2"]
slot_pos = Point(4, 5, 6)
decoy.when(labware_view.get_definition_by_uri(labware_data.uri)).then_return(
well_plate_def
)
decoy.when(labware_view.get_labware_data_by_id("plate-id")).then_return(
labware_data
)
decoy.when(labware_view.get_well_definition("plate-id", "B2")).then_return(well_def)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
slot_pos
)
point = subject.get_well_position("plate-id", "B2")
assert point == Point(
x=slot_pos[0] + 1 + well_def.x,
y=slot_pos[1] - 2 + well_def.y,
z=slot_pos[2] + 3 + well_def.z + well_def.depth,
)
def test_get_well_position_with_top_offset(
decoy: Decoy,
well_plate_def: LabwareDefinition,
standard_deck_def: DeckDefinitionV2,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware."""
labware_data = LabwareData(
uri=uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
well_def = well_plate_def.wells["B2"]
slot_pos = Point(4, 5, 6)
decoy.when(labware_view.get_definition_by_uri(labware_data.uri)).then_return(
well_plate_def
)
decoy.when(labware_view.get_labware_data_by_id("plate-id")).then_return(
labware_data
)
decoy.when(labware_view.get_well_definition("plate-id", "B2")).then_return(well_def)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
slot_pos
)
point = subject.get_well_position(
"plate-id", "B2", WellLocation(origin=WellOrigin.TOP, offset=(1, 2, 3))
)
assert point == Point(
x=slot_pos[0] + 1 + well_def.x + 1,
y=slot_pos[1] - 2 + well_def.y + 2,
z=slot_pos[2] + 3 + well_def.z + well_def.depth + 3,
)
def test_get_well_position_with_bottom_offset(
decoy: Decoy,
well_plate_def: LabwareDefinition,
standard_deck_def: DeckDefinitionV2,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware."""
labware_data = LabwareData(
uri=uri_from_details(
namespace=well_plate_def.namespace,
load_name=well_plate_def.parameters.loadName,
version=well_plate_def.version,
),
location=DeckSlotLocation(slot=DeckSlotName.SLOT_3),
calibration=(1, -2, 3),
)
well_def = well_plate_def.wells["B2"]
slot_pos = Point(4, 5, 6)
decoy.when(labware_view.get_definition_by_uri(labware_data.uri)).then_return(
well_plate_def
)
decoy.when(labware_view.get_labware_data_by_id("plate-id")).then_return(
labware_data
)
decoy.when(labware_view.get_well_definition("plate-id", "B2")).then_return(well_def)
decoy.when(labware_view.get_slot_position(DeckSlotName.SLOT_3)).then_return(
slot_pos
)
point = subject.get_well_position(
"plate-id", "B2", WellLocation(origin=WellOrigin.BOTTOM, offset=(3, 2, 1))
)
assert point == Point(
x=slot_pos[0] + 1 + well_def.x + 3,
y=slot_pos[1] - 2 + well_def.y + 2,
z=slot_pos[2] + 3 + well_def.z + 1,
)
def test_get_effective_tip_length(
decoy: Decoy,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should get the effective tip length from a labware ID and pipette config."""
pipette_config: PipetteDict = cast(
PipetteDict,
{
"tip_overlap": {
"default": 10,
"opentrons/opentrons_96_tiprack_300ul/1": 20,
}
},
)
decoy.when(labware_view.get_tip_length("tip-rack-id")).then_return(50)
decoy.when(labware_view.get_definition_uri("tip-rack-id")).then_return(
"opentrons/opentrons_96_tiprack_300ul/1"
)
length_eff = subject.get_effective_tip_length(
labware_id="tip-rack-id",
pipette_config=pipette_config,
)
assert length_eff == 30
decoy.when(labware_view.get_definition_uri("tip-rack-id")).then_return(
"opentrons/something_else/1"
)
default_length_eff = subject.get_effective_tip_length(
labware_id="tip-rack-id",
pipette_config=pipette_config,
)
assert default_length_eff == 40
def test_get_tip_geometry(
decoy: Decoy,
tip_rack_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should get a "well's" tip geometry."""
pipette_config: PipetteDict = cast(PipetteDict, {"tip_overlap": {"default": 10}})
well_def = tip_rack_def.wells["B2"]
decoy.when(labware_view.get_tip_length("tip-rack-id")).then_return(50)
decoy.when(labware_view.get_definition_uri("tip-rack-id")).then_return("")
decoy.when(labware_view.get_well_definition("tip-rack-id", "B2")).then_return(
well_def
)
tip_geometry = subject.get_tip_geometry(
labware_id="tip-rack-id",
well_name="B2",
pipette_config=pipette_config,
)
assert tip_geometry.effective_length == 40
assert tip_geometry.diameter == well_def.diameter # type: ignore[misc]
assert tip_geometry.volume == well_def.totalLiquidVolume
def test_get_tip_geometry_raises(
decoy: Decoy,
tip_rack_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should raise LabwareIsNotTipRackError if well is not circular."""
pipette_config: PipetteDict = cast(PipetteDict, {"tip_overlap": {"default": 10}})
well_def = tip_rack_def.wells["B2"]
well_def.shape = "rectangular"
with pytest.raises(errors.LabwareIsNotTipRackError):
decoy.when(labware_view.get_tip_length("tip-rack-id")).then_return(0)
decoy.when(labware_view.get_well_definition("tip-rack-id", "B2")).then_return(
well_def
)
subject.get_tip_geometry(
labware_id="tip-rack-id", well_name="B2", pipette_config=pipette_config
)
def test_get_tip_drop_location(
decoy: Decoy,
tip_rack_def: LabwareDefinition,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should get relative drop tip location for a pipette/labware combo."""
pipette_config: PipetteDict = cast(PipetteDict, {"return_tip_height": 0.7})
decoy.when(labware_view.get_tip_length("tip-rack-id")).then_return(50)
location = subject.get_tip_drop_location(
labware_id="tip-rack-id", pipette_config=pipette_config
)
assert location == WellLocation(
origin=WellOrigin.TOP,
offset=(0, 0, -0.7 * 50),
)
def test_get_tip_drop_location_with_trash(
decoy: Decoy,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should get relative drop tip location for a the fixed trash."""
pipette_config: PipetteDict = cast(PipetteDict, {"return_tip_height": 0.7})
decoy.when(
labware_view.get_labware_has_quirk(labware_id="labware-id", quirk="fixedTrash")
).then_return(True)
location = subject.get_tip_drop_location(
labware_id="labware-id",
pipette_config=pipette_config,
)
assert location == WellLocation(origin=WellOrigin.TOP, offset=(0, 0, 0))
|
# Copyright (c) 2018 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
from __future__ import absolute_import
import unittest
import os
import io
import warnings
import logging
import uuid
import copy
import random
import json
from contextlib import contextmanager
import signac.contrib
import signac.common.config
from signac.common import six
from signac.errors import DestinationExistsError
from signac.errors import JobsCorruptedError
from signac.errors import InvalidKeyError
if six.PY2:
from tempdir import TemporaryDirectory
else:
from tempfile import TemporaryDirectory
try:
import h5py # noqa
H5PY = True
except ImportError:
H5PY = False
# Make sure the jobs created for this test are unique.
test_token = {'test_token': str(uuid.uuid4())}
warnings.simplefilter('default')
warnings.filterwarnings('error', category=DeprecationWarning, module='signac')
warnings.filterwarnings(
'ignore', category=PendingDeprecationWarning, message=r'.*Cache API.*')
BUILTINS = [
({'e': [1.0, '1.0', 1, True]}, '4d8058a305b940005be419b30e99bb53'),
({'d': True}, '33cf9999de25a715a56339c6c1b28b41'),
({'f': (1.0, '1.0', 1, True)}, 'e998db9b595e170bdff936f88ccdbf75'),
({'a': 1}, '42b7b4f2921788ea14dac5566e6f06d0'),
({'c': '1.0'}, '80fa45716dd3b83fa970877489beb42e'),
({'b': 1.0}, '0ba6c5a46111313f11c41a6642520451'),
]
def builtins_dict():
random.shuffle(BUILTINS)
d = dict()
for b in BUILTINS:
d.update(b[0])
return d
BUILTINS_HASH = '7a80b58db53bbc544fc27fcaaba2ce44'
NESTED_HASH = 'bd6f5828f4410b665bffcec46abeb8f3'
def config_from_cfg(cfg):
cfile = io.StringIO('\n'.join(cfg))
return signac.common.config.get_config(cfile)
def testdata():
return str(uuid.uuid4())
class BaseJobTest(unittest.TestCase):
project_class = signac.Project
def setUp(self):
self._tmp_dir = TemporaryDirectory(prefix='signac_')
self.addCleanup(self._tmp_dir.cleanup)
self._tmp_pr = os.path.join(self._tmp_dir.name, 'pr')
self._tmp_wd = os.path.join(self._tmp_dir.name, 'wd')
os.mkdir(self._tmp_pr)
self.config = signac.common.config.load_config()
self.project = self.project_class.init_project(
name='testing_test_project',
root=self._tmp_pr,
workspace=self._tmp_wd)
self.project.config['default_host'] = 'testing'
def tearDown(self):
pass
def open_job(self, *args, **kwargs):
project = self.project
return project.open_job(*args, **kwargs)
@classmethod
def nested_dict(self):
d = dict(builtins_dict())
d['g'] = builtins_dict()
return d
class JobIDTest(BaseJobTest):
def test_builtins(self):
for p, h in BUILTINS:
self.assertEqual(str(self.project.open_job(p)), h)
self.assertEqual(
str(self.project.open_job(builtins_dict())), BUILTINS_HASH)
def test_shuffle(self):
for i in range(10):
self.assertEqual(
str(self.project.open_job(builtins_dict())), BUILTINS_HASH)
def test_nested(self):
for i in range(10):
self.assertEqual(
str(self.project.open_job(self.nested_dict())), NESTED_HASH)
def test_sequences_identity(self):
job1 = self.project.open_job({'a': [1.0, '1.0', 1, True]})
job2 = self.project.open_job({'a': (1.0, '1.0', 1, True)})
self.assertEqual(str(job1), str(job2))
self.assertEqual(job1.statepoint(), job2.statepoint())
class JobTest(BaseJobTest):
def test_repr(self):
job = self.project.open_job({'a': 0})
job2 = self.project.open_job({'a': 0})
self.assertEqual(repr(job), repr(job2))
self.assertEqual(job, job2)
def test_str(self):
job = self.project.open_job({'a': 0})
self.assertEqual(str(job), job.get_id())
def test_isfile(self):
job = self.project.open_job({'a': 0})
fn = 'test.txt'
fn_ = os.path.join(job.workspace(), fn)
self.assertFalse(job.isfile(fn))
job.init()
self.assertFalse(job.isfile(fn))
with open(fn_, 'w') as file:
file.write('hello')
self.assertTrue(job.isfile(fn))
class JobSPInterfaceTest(BaseJobTest):
def test_interface_read_only(self):
sp = self.nested_dict()
job = self.open_job(sp)
self.assertEqual(job.statepoint(), json.loads(json.dumps(sp)))
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp, x), sp[x])
self.assertEqual(job.sp[x], sp[x])
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp.g, x), sp['g'][x])
self.assertEqual(job.sp[x], sp[x])
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(job.sp.get(x), sp[x])
self.assertEqual(job.sp.get(x), sp[x])
self.assertEqual(job.sp.g.get(x), sp['g'][x])
self.assertIsNone(job.sp.get('not_in_sp'))
self.assertIsNone(job.sp.g.get('not_in_sp'))
self.assertIsNone(job.sp.get('not_in_sp', None))
self.assertIsNone(job.sp.g.get('not_in_sp', None))
self.assertEqual(job.sp.get('not_in_sp', 23), 23)
self.assertEqual(job.sp.g.get('not_in_sp', 23), 23)
def test_interface_contains(self):
sp = self.nested_dict()
job = self.open_job(sp)
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertIn(x, job.sp)
self.assertIn(x, job.sp.g)
def test_interface_read_write(self):
sp = self.nested_dict()
job = self.open_job(sp)
job.init()
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp, x), sp[x])
self.assertEqual(job.sp[x], sp[x])
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp.g, x), sp['g'][x])
self.assertEqual(job.sp[x], sp[x])
a = [1, 1.0, '1.0', True, None]
b = list(a) + [a] + [tuple(a)]
for v in b:
for x in ('a', 'b', 'c', 'd', 'e'):
setattr(job.sp, x, v)
self.assertEqual(getattr(job.sp, x), v)
setattr(job.sp.g, x, v)
self.assertEqual(getattr(job.sp.g, x), v)
def test_interface_job_identity_change(self):
job = self.open_job({'a': 0})
old_id = job.get_id()
job.sp.a = 1
self.assertNotEqual(old_id, job.get_id())
def test_interface_nested_kws(self):
with self.assertRaises(InvalidKeyError):
job = self.open_job({'a.b.c': 0})
job = self.open_job(dict(a=dict(b=dict(c=2))))
self.assertEqual(job.sp.a.b.c, 2)
self.assertEqual(job.sp['a']['b']['c'], 2)
def test_interface_lists(self):
job = self.open_job({'a': [1, 2, 3]})
self.assertEqual(job.sp.a, [1, 2, 3])
old_id = job.get_id()
job.sp.a.append(4)
self.assertEqual(job.sp.a, [1, 2, 3, 4])
self.assertNotEqual(old_id, job.get_id())
def test_interface_reserved_keywords(self):
job = self.open_job({'with': 0, 'pop': 1})
self.assertEqual(job.sp['with'], 0)
self.assertEqual(job.sp['pop'], 1)
self.assertEqual(job.sp.pop('with'), 0)
self.assertNotIn('with', job.sp)
def test_interface_illegal_type(self):
job = self.open_job(dict(a=0))
self.assertEqual(job.sp.a, 0)
class Foo(object):
pass
with self.assertRaises(TypeError):
job.sp.a = Foo()
def test_interface_rename(self):
job = self.open_job(dict(a=0))
job.init()
self.assertEqual(job.sp.a, 0)
job.sp.b = job.sp.pop('a')
self.assertNotIn('a', job.sp)
self.assertEqual(job.sp.b, 0)
def test_interface_add(self):
job = self.open_job(dict(a=0))
job.init()
with self.assertRaises(AttributeError):
job.sp.b
job.sp.b = 1
self.assertIn('b', job.sp)
self.assertEqual(job.sp.b, 1)
def test_interface_delete(self):
job = self.open_job(dict(a=0, b=0))
job.init()
self.assertIn('b', job.sp)
self.assertEqual(job.sp.b, 0)
del job.sp['b']
self.assertNotIn('b', job.sp)
with self.assertRaises(AttributeError):
job.sp.b
job.sp.b = 0
self.assertIn('b', job.sp)
self.assertEqual(job.sp.b, 0)
del job.sp.b
self.assertNotIn('b', job.sp)
with self.assertRaises(AttributeError):
job.sp.b
def test_interface_destination_conflict(self):
job_a = self.open_job(dict(a=0))
job_b = self.open_job(dict(b=0))
job_a.init()
id_a = job_a.get_id()
job_a.sp = dict(b=0)
self.assertEqual(job_a.statepoint(), dict(b=0))
self.assertEqual(job_a, job_b)
self.assertNotEqual(job_a.get_id(), id_a)
job_a = self.open_job(dict(a=0))
# Moving to existing job, no problem while empty:
self.assertNotEqual(job_a, job_b)
job_a.sp = dict(b=0)
job_a = self.open_job(dict(a=0))
job_b.init()
# Moving to an existing job with data leads
# to an error:
job_a.document['a'] = 0
job_b.document['a'] = 0
self.assertNotEqual(job_a, job_b)
with self.assertRaises(RuntimeError):
job_a.sp = dict(b=0)
with self.assertRaises(DestinationExistsError):
job_a.sp = dict(b=0)
def test_interface_multiple_changes(self):
for i in range(1, 4):
job = self.project.open_job(dict(a=i))
job.init()
for job in self.project:
self.assertTrue(job.sp.a > 0)
for job in self.project:
obj_id = id(job)
id0 = job.get_id()
sp0 = job.statepoint()
self.assertEqual(id(job), obj_id)
self.assertTrue(job.sp.a > 0)
self.assertEqual(job.get_id(), id0)
self.assertEqual(job.sp, sp0)
job.sp.a = - job.sp.a
self.assertEqual(id(job), obj_id)
self.assertTrue(job.sp.a < 0)
self.assertNotEqual(job.get_id(), id0)
self.assertNotEqual(job.sp, sp0)
job.sp.a = - job.sp.a
self.assertEqual(id(job), obj_id)
self.assertTrue(job.sp.a > 0)
self.assertEqual(job.get_id(), id0)
self.assertEqual(job.sp, sp0)
job2 = self.project.open_job(id=id0)
self.assertEqual(job.sp, job2.sp)
self.assertEqual(job.get_id(), job2.get_id())
class ConfigTest(BaseJobTest):
def test_set_get_delete(self):
key, value = list(test_token.items())[0]
key, value = 'author_name', list(test_token.values())[0]
config = copy.deepcopy(self.project.config)
config[key] = value
self.assertEqual(config[key], value)
self.assertIn(key, config)
del config[key]
self.assertNotIn(key, config)
def test_update(self):
key, value = 'author_name', list(test_token.values())[0]
config = copy.deepcopy(self.project.config)
config.update({key: value})
self.assertEqual(config[key], value)
self.assertIn(key, config)
def test_set_and_retrieve_version(self):
fake_version = 0, 0, 0
self.project.config['signac_version'] = fake_version
self.assertEqual(self.project.config['signac_version'], fake_version)
def test_str(self):
str(self.project.config)
class JobOpenAndClosingTest(BaseJobTest):
def test_init(self):
job = self.open_job(test_token)
self.assertFalse(os.path.isdir(job.workspace()))
job.init()
self.assertEqual(job.workspace(), job.ws)
self.assertTrue(os.path.isdir(job.workspace()))
self.assertTrue(os.path.isdir(job.ws))
self.assertTrue(os.path.exists(os.path.join(job.workspace(), job.FN_MANIFEST)))
def test_chained_init(self):
job = self.open_job(test_token)
self.assertFalse(os.path.isdir(job.workspace()))
job = self.open_job(test_token).init()
self.assertEqual(job.workspace(), job.ws)
self.assertTrue(os.path.isdir(job.workspace()))
self.assertTrue(os.path.isdir(job.ws))
self.assertTrue(os.path.exists(os.path.join(job.workspace(), job.FN_MANIFEST)))
def test_construction(self):
job = self.open_job(test_token)
job2 = eval(repr(job))
self.assertEqual(job, job2)
def test_open_job_close(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.open_job(test_token) as job:
pass
job.remove()
def test_open_job_close_manual(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
job = self.open_job(test_token)
job.open()
job.close()
job.remove()
def test_open_job_close_with_error(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
job = self.open_job(test_token)
class TestError(Exception):
pass
with self.assertRaises(TestError):
with job:
raise TestError()
job.remove()
def test_reopen_job(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.open_job(test_token) as job:
job_id = job.get_id()
self.assertEqual(str(job_id), str(job))
with self.open_job(test_token) as job:
self.assertEqual(job.get_id(), job_id)
job.remove()
def test_close_nonopen_job(self):
job = self.open_job(test_token)
job.close()
with job:
pass
def test_close_job_while_open(self):
rp = os.path.realpath
cwd = rp(os.getcwd())
job = self.open_job(test_token)
with job:
job.close()
self.assertEqual(cwd, rp(os.getcwd()))
def test_open_job_recursive(self):
rp = os.path.realpath
cwd = rp(os.getcwd())
job = self.open_job(test_token)
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
os.chdir(self.project.root_directory())
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
os.chdir(self.project.root_directory())
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(rp(os.getcwd()), rp(self.project.root_directory()))
self.assertEqual(cwd, rp(os.getcwd()))
with job:
job.close()
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
def test_corrupt_workspace(self):
job = self.open_job(test_token)
job.init()
fn_manifest = os.path.join(job.workspace(), job.FN_MANIFEST)
with open(fn_manifest, 'w') as file:
file.write("corrupted")
job2 = self.open_job(test_token)
try:
logging.disable(logging.ERROR)
with self.assertRaises(JobsCorruptedError):
job2.init()
finally:
logging.disable(logging.NOTSET)
job2.init(force=True)
job2.init()
class JobDocumentTest(BaseJobTest):
def test_get_set(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
self.assertFalse(bool(job.document))
self.assertEqual(len(job.document), 0)
self.assertNotIn(key, job.document)
job.document[key] = d
self.assertTrue(bool(job.document))
self.assertEqual(len(job.document), 1)
self.assertIn(key, job.document)
self.assertEqual(job.document[key], d)
self.assertEqual(job.document.get(key), d)
self.assertEqual(job.document.get('non-existent-key', d), d)
def test_del(self):
key = 'del0'
key1 = 'del1'
d = testdata()
d1 = testdata()
job = self.open_job(test_token)
self.assertEqual(len(job.document), 0)
self.assertNotIn(key, job.document)
job.document[key] = d
self.assertEqual(len(job.document), 1)
self.assertIn(key, job.document)
job.document[key1] = d1
self.assertEqual(len(job.document), 2)
self.assertIn(key, job.document)
self.assertIn(key1, job.document)
self.assertEqual(job.document[key], d)
self.assertEqual(job.document[key1], d1)
del job.document[key]
self.assertEqual(len(job.document), 1)
self.assertIn(key1, job.document)
self.assertNotIn(key, job.document)
def test_get_set_doc(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
self.assertFalse(bool(job.doc))
self.assertEqual(len(job.doc), 0)
self.assertNotIn(key, job.doc)
job.doc[key] = d
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 1)
self.assertIn(key, job.doc)
self.assertEqual(job.doc[key], d)
self.assertEqual(job.doc.get(key), d)
self.assertEqual(job.doc.get('non-existent-key', d), d)
def test_set_set_doc(self):
key0, key1 = 'set_set0', 'set_set1'
d0, d1 = testdata(), testdata()
job = self.open_job(test_token)
self.assertFalse(bool(job.doc))
self.assertEqual(len(job.doc), 0)
self.assertNotIn(key0, job.doc)
job.doc[key0] = d0
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 1)
self.assertIn(key0, job.doc)
self.assertEqual(job.doc[key0], d0)
job = self.open_job(test_token)
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 1)
self.assertIn(key0, job.doc)
self.assertEqual(job.doc[key0], d0)
job = self.open_job(test_token)
job.document[key1] = d1
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 2)
self.assertIn(key0, job.doc)
self.assertIn(key1, job.doc)
self.assertEqual(job.doc[key0], d0)
self.assertEqual(job.doc[key1], d1)
def test_get_set_nested(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
self.assertEqual(len(job.document), 0)
self.assertNotIn('key0', job.document)
job.document['key0'] = d0
self.assertEqual(len(job.document), 1)
self.assertIn('key0', job.document)
self.assertEqual(job.document['key0'], d0)
with self.assertRaises(AttributeError):
job.document.key0.key1
job.document.key0 = {'key1': d0}
self.assertEqual(len(job.document), 1)
self.assertIn('key0', job.document)
self.assertEqual(job.document(), {'key0': {'key1': d0}})
self.assertEqual(job.document['key0'], {'key1': d0})
self.assertEqual(job.document['key0']['key1'], d0)
self.assertEqual(job.document.key0, {'key1': d0})
self.assertEqual(job.document.key0.key1, d0)
job.document.key0.key1 = d1
self.assertEqual(job.document, {'key0': {'key1': d1}})
self.assertEqual(job.document['key0'], {'key1': d1})
self.assertEqual(job.document['key0']['key1'], d1)
self.assertEqual(job.document.key0, {'key1': d1})
self.assertEqual(job.document.key0.key1, d1)
job.document['key0']['key1'] = d2
self.assertEqual(job.document, {'key0': {'key1': d2}})
self.assertEqual(job.document['key0'], {'key1': d2})
self.assertEqual(job.document['key0']['key1'], d2)
self.assertEqual(job.document.key0, {'key1': d2})
self.assertEqual(job.document.key0.key1, d2)
def test_get_set_nested_doc(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
self.assertEqual(len(job.doc), 0)
self.assertNotIn('key0', job.doc)
job.doc['key0'] = d0
self.assertEqual(len(job.doc), 1)
self.assertIn('key0', job.doc)
self.assertEqual(job.doc['key0'], d0)
with self.assertRaises(AttributeError):
job.doc.key0.key1
job.doc.key0 = {'key1': d0}
self.assertEqual(len(job.doc), 1)
self.assertIn('key0', job.doc)
self.assertEqual(job.doc(), {'key0': {'key1': d0}})
self.assertEqual(job.doc['key0'], {'key1': d0})
self.assertEqual(job.doc['key0']['key1'], d0)
self.assertEqual(job.doc.key0, {'key1': d0})
self.assertEqual(job.doc.key0.key1, d0)
job.doc.key0.key1 = d1
self.assertEqual(job.doc, {'key0': {'key1': d1}})
self.assertEqual(job.doc['key0'], {'key1': d1})
self.assertEqual(job.doc['key0']['key1'], d1)
self.assertEqual(job.doc.key0, {'key1': d1})
self.assertEqual(job.doc.key0.key1, d1)
job.doc['key0']['key1'] = d2
self.assertEqual(job.doc, {'key0': {'key1': d2}})
self.assertEqual(job.doc['key0'], {'key1': d2})
self.assertEqual(job.doc['key0']['key1'], d2)
self.assertEqual(job.doc.key0, {'key1': d2})
self.assertEqual(job.doc.key0.key1, d2)
def test_assign(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
self.assertEqual(len(job.document), 0)
job.document[key] = d0
self.assertEqual(len(job.document), 1)
self.assertEqual(job.document(), {key: d0})
with self.assertRaises(ValueError):
job.document = d1
job.document = {key: d1}
self.assertEqual(len(job.document), 1)
self.assertEqual(job.document(), {key: d1})
def test_assign_doc(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
self.assertEqual(len(job.doc), 0)
job.doc[key] = d0
self.assertEqual(len(job.doc), 1)
self.assertEqual(job.doc(), {key: d0})
with self.assertRaises(ValueError):
job.doc = d1
job.doc = {key: d1}
self.assertEqual(len(job.doc), 1)
self.assertEqual(job.doc(), {key: d1})
def test_copy_document(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
job.document[key] = d
self.assertTrue(bool(job.document))
self.assertEqual(len(job.document), 1)
self.assertIn(key, job.document)
self.assertEqual(job.document[key], d)
self.assertEqual(job.document.get(key), d)
self.assertEqual(job.document.get('non-existent-key', d), d)
copy = dict(job.document)
self.assertTrue(bool(copy))
self.assertEqual(len(copy), 1)
self.assertIn(key, copy)
self.assertEqual(copy[key], d)
self.assertEqual(copy.get(key), d)
self.assertEqual(copy.get('non-existent-key', d), d)
def test_update(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
job.document.update({key: d})
self.assertIn(key, job.document)
def test_clear_document(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
job.document[key] = d
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
job.document.clear()
self.assertNotIn(key, job.document)
self.assertEqual(len(job.document), 0)
def test_reopen(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
job.document[key] = d
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
job2 = self.open_job(test_token)
self.assertIn(key, job2.document)
self.assertEqual(len(job2.document), 1)
def test_concurrency(self):
key = 'concurrent'
d = testdata()
job = self.open_job(test_token)
job2 = self.open_job(test_token)
self.assertNotIn(key, job.document)
self.assertNotIn(key, job2.document)
job.document[key] = d
self.assertIn(key, job.document)
self.assertIn(key, job2.document)
def test_remove(self):
key = 'remove'
job = self.open_job(test_token)
job.remove()
d = testdata()
job.document[key] = d
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
fn_test = os.path.join(job.workspace(), 'test')
with open(fn_test, 'w') as file:
file.write('test')
self.assertTrue(os.path.isfile(fn_test))
job.remove()
self.assertNotIn(key, job.document)
self.assertFalse(os.path.isfile(fn_test))
def test_clear_job(self):
key = 'clear'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.init()
self.assertIn(job, self.project)
job.clear()
self.assertIn(job, self.project)
job.clear()
job.clear()
self.assertIn(job, self.project)
d = testdata()
job.document[key] = d
self.assertIn(job, self.project)
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
job.clear()
self.assertEqual(len(job.document), 0)
with open(job.fn('test'), 'w') as file:
file.write('test')
self.assertTrue(job.isfile('test'))
self.assertIn(job, self.project)
job.clear()
self.assertFalse(job.isfile('test'))
self.assertEqual(len(job.document), 0)
def test_reset(self):
key = 'reset'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.reset()
self.assertIn(job, self.project)
self.assertEqual(len(job.document), 0)
job.document[key] = testdata()
self.assertEqual(len(job.document), 1)
job.reset()
self.assertIn(job, self.project)
self.assertEqual(len(job.document), 0)
def test_doc(self):
key = 'test_doc'
job = self.open_job(test_token)
def check_content(key, d):
self.assertEqual(job.doc[key], d)
self.assertEqual(getattr(job.doc, key), d)
self.assertEqual(job.doc()[key], d)
self.assertEqual(job.document[key], d)
self.assertEqual(getattr(job.document, key), d)
self.assertEqual(job.document()[key], d)
d = testdata()
job.doc[key] = d
check_content(key, d)
d2 = testdata()
job.doc[key] = d2
check_content(key, d2)
d3 = testdata()
job.document[key] = d3
check_content(key, d3)
d4 = testdata()
setattr(job.doc, key, d4)
check_content(key, d4)
def test_sp_formatting(self):
job = self.open_job({'a': 0})
self.assertEqual('{job.statepoint.a}'.format(job=job), str(job.sp.a))
self.assertEqual('{job.sp.a}'.format(job=job), str(job.sp.a))
self.assertEqual('{job.statepoint[a]}'.format(job=job), str(job.sp.a))
self.assertEqual('{job.sp[a]}'.format(job=job), str(job.sp.a))
job.sp.a = dict(b=0)
self.assertEqual('{job.statepoint.a.b}'.format(job=job), str(job.sp.a.b))
self.assertEqual('{job.sp.a.b}'.format(job=job), str(job.sp.a.b))
self.assertEqual('{job.statepoint[a][b]}'.format(job=job), str(job.sp.a.b))
self.assertEqual('{job.sp[a][b]}'.format(job=job), str(job.sp.a.b))
def test_doc_formatting(self):
job = self.open_job(test_token)
job.doc.a = 0
self.assertEqual('{job.doc.a}'.format(job=job), str(job.doc.a))
self.assertEqual('{job.doc[a]}'.format(job=job), str(job.doc.a))
self.assertEqual('{job.document.a}'.format(job=job), str(job.doc.a))
self.assertEqual('{job.document[a]}'.format(job=job), str(job.doc.a))
job.doc.a = dict(b=0)
self.assertEqual('{job.doc.a.b}'.format(job=job), str(job.doc.a.b))
self.assertEqual('{job.doc.a.b}'.format(job=job), str(job.doc.a.b))
self.assertEqual('{job.document.a.b}'.format(job=job), str(job.doc.a.b))
self.assertEqual('{job.document[a][b]}'.format(job=job), str(job.doc.a.b))
@unittest.skipIf(not H5PY, 'test requires the h5py package')
def test_reset_statepoint_job(self):
key = 'move_job'
d = testdata()
src = test_token
dst = dict(test_token)
dst['dst'] = True
src_job = self.open_job(src)
src_job.document[key] = d
self.assertIn(key, src_job.document)
self.assertEqual(len(src_job.document), 1)
src_job.data[key] = d
self.assertIn(key, src_job.data)
self.assertEqual(len(src_job.data), 1)
src_job.reset_statepoint(dst)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertIn(key, dst_job.document)
self.assertEqual(len(dst_job.document), 1)
self.assertNotIn(key, src_job.document)
self.assertIn(key, dst_job.data)
self.assertEqual(len(dst_job.data), 1)
self.assertNotIn(key, src_job.data)
with self.assertRaises(RuntimeError):
src_job.reset_statepoint(dst)
with self.assertRaises(DestinationExistsError):
src_job.reset_statepoint(dst)
@unittest.skipIf(not H5PY, 'test requires the h5py package')
def test_reset_statepoint_project(self):
key = 'move_job'
d = testdata()
src = test_token
dst = dict(test_token)
dst['dst'] = True
src_job = self.open_job(src)
src_job.document[key] = d
self.assertIn(key, src_job.document)
self.assertEqual(len(src_job.document), 1)
src_job.data[key] = d
self.assertIn(key, src_job.data)
self.assertEqual(len(src_job.data), 1)
self.project.reset_statepoint(src_job, dst)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertIn(key, dst_job.document)
self.assertEqual(len(dst_job.document), 1)
self.assertNotIn(key, src_job.document)
self.assertIn(key, dst_job.data)
self.assertEqual(len(dst_job.data), 1)
self.assertNotIn(key, src_job.data)
with self.assertRaises(RuntimeError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(DestinationExistsError):
self.project.reset_statepoint(src_job, dst)
@unittest.skipIf(not H5PY, 'test requires the h5py package')
def test_update_statepoint(self):
key = 'move_job'
d = testdata()
src = test_token
extension = {'dst': True}
dst = dict(src)
dst.update(extension)
extension2 = {'dst': False}
dst2 = dict(src)
dst2.update(extension2)
src_job = self.open_job(src)
src_job.document[key] = d
self.assertIn(key, src_job.document)
self.assertEqual(len(src_job.document), 1)
src_job.data[key] = d
self.assertIn(key, src_job.data)
self.assertEqual(len(src_job.data), 1)
self.project.update_statepoint(src_job, extension)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertEqual(dst_job.statepoint(), dst)
self.assertIn(key, dst_job.document)
self.assertEqual(len(dst_job.document), 1)
self.assertNotIn(key, src_job.document)
self.assertIn(key, dst_job.data)
self.assertEqual(len(dst_job.data), 1)
self.assertNotIn(key, src_job.data)
with self.assertRaises(RuntimeError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(DestinationExistsError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(KeyError):
self.project.update_statepoint(dst_job, extension2)
self.project.update_statepoint(dst_job, extension2, overwrite=True)
dst2_job = self.open_job(dst2)
self.assertEqual(dst2_job.statepoint(), dst2)
self.assertIn(key, dst2_job.document)
self.assertEqual(len(dst2_job.document), 1)
self.assertIn(key, dst2_job.data)
self.assertEqual(len(dst2_job.data), 1)
@unittest.skipIf(not H5PY, 'test requires the h5py package')
class JobOpenDataTest(BaseJobTest):
@staticmethod
@contextmanager
def open_data(job):
with job.data:
yield
def test_get_set(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertFalse(bool(job.data))
self.assertEqual(len(job.data), 0)
self.assertNotIn(key, job.data)
job.data[key] = d
self.assertTrue(bool(job.data))
self.assertEqual(len(job.data), 1)
self.assertIn(key, job.data)
self.assertEqual(job.data[key], d)
self.assertEqual(job.data.get(key), d)
self.assertEqual(job.data.get('non-existent-key', d), d)
def test_del(self):
key = 'del0'
key1 = 'del1'
d = testdata()
d1 = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
self.assertNotIn(key, job.data)
job.data[key] = d
self.assertEqual(len(job.data), 1)
self.assertIn(key, job.data)
job.data[key1] = d1
self.assertEqual(len(job.data), 2)
self.assertIn(key, job.data)
self.assertIn(key1, job.data)
self.assertEqual(job.data[key], d)
self.assertEqual(job.data[key1], d1)
del job.data[key]
self.assertEqual(len(job.data), 1)
self.assertIn(key1, job.data)
self.assertNotIn(key, job.data)
def test_get_set_data(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertFalse(bool(job.data))
self.assertEqual(len(job.data), 0)
self.assertNotIn(key, job.data)
job.data[key] = d
self.assertTrue(bool(job.data))
self.assertEqual(len(job.data), 1)
self.assertIn(key, job.data)
self.assertEqual(job.data[key], d)
self.assertEqual(job.data.get(key), d)
self.assertEqual(job.data.get('non-existent-key', d), d)
def test_set_set_data(self):
key0, key1 = 'set_set0', 'set_set1'
d0, d1 = testdata(), testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertFalse(bool(job.data))
self.assertEqual(len(job.data), 0)
self.assertNotIn(key0, job.data)
job.data[key0] = d0
self.assertTrue(bool(job.data))
self.assertEqual(len(job.data), 1)
self.assertIn(key0, job.data)
self.assertEqual(job.data[key0], d0)
job = self.open_job(test_token)
with self.open_data(job):
self.assertTrue(bool(job.data))
self.assertEqual(len(job.data), 1)
self.assertIn(key0, job.data)
self.assertEqual(job.data[key0], d0)
job = self.open_job(test_token)
with self.open_data(job):
job.data[key1] = d1
self.assertTrue(bool(job.data))
self.assertEqual(len(job.data), 2)
self.assertIn(key0, job.data)
self.assertIn(key1, job.data)
self.assertEqual(job.data[key0], d0)
self.assertEqual(job.data[key1], d1)
def test_get_set_nested(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
self.assertNotIn('key0', job.data)
job.data['key0'] = d0
self.assertEqual(len(job.data), 1)
self.assertIn('key0', job.data)
self.assertEqual(job.data['key0'], d0)
with self.assertRaises(AttributeError):
job.data.key0.key1
job.data.key0 = {'key1': d0}
self.assertEqual(len(job.data), 1)
self.assertIn('key0', job.data)
self.assertEqual(dict(job.data), {'key0': {'key1': d0}})
self.assertEqual(job.data['key0'], {'key1': d0})
self.assertEqual(job.data['key0']['key1'], d0)
self.assertEqual(job.data.key0, {'key1': d0})
self.assertEqual(job.data.key0.key1, d0)
job.data.key0.key1 = d1
self.assertEqual(job.data, {'key0': {'key1': d1}})
self.assertEqual(job.data['key0'], {'key1': d1})
self.assertEqual(job.data['key0']['key1'], d1)
self.assertEqual(job.data.key0, {'key1': d1})
self.assertEqual(job.data.key0.key1, d1)
job.data['key0']['key1'] = d2
self.assertEqual(job.data, {'key0': {'key1': d2}})
self.assertEqual(job.data['key0'], {'key1': d2})
self.assertEqual(job.data['key0']['key1'], d2)
self.assertEqual(job.data.key0, {'key1': d2})
self.assertEqual(job.data.key0.key1, d2)
def test_get_set_nested_data(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
self.assertNotIn('key0', job.data)
job.data['key0'] = d0
self.assertEqual(len(job.data), 1)
self.assertIn('key0', job.data)
self.assertEqual(job.data['key0'], d0)
with self.assertRaises(AttributeError):
job.data.key0.key1
job.data.key0 = {'key1': d0}
self.assertEqual(len(job.data), 1)
self.assertIn('key0', job.data)
self.assertEqual(dict(job.data), {'key0': {'key1': d0}})
self.assertEqual(job.data['key0'], {'key1': d0})
self.assertEqual(job.data['key0']['key1'], d0)
self.assertEqual(job.data.key0, {'key1': d0})
self.assertEqual(job.data.key0.key1, d0)
job.data.key0.key1 = d1
self.assertEqual(job.data, {'key0': {'key1': d1}})
self.assertEqual(job.data['key0'], {'key1': d1})
self.assertEqual(job.data['key0']['key1'], d1)
self.assertEqual(job.data.key0, {'key1': d1})
self.assertEqual(job.data.key0.key1, d1)
job.data['key0']['key1'] = d2
self.assertEqual(job.data, {'key0': {'key1': d2}})
self.assertEqual(job.data['key0'], {'key1': d2})
self.assertEqual(job.data['key0']['key1'], d2)
self.assertEqual(job.data.key0, {'key1': d2})
self.assertEqual(job.data.key0.key1, d2)
def test_assign(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
job.data[key] = d0
self.assertEqual(len(job.data), 1)
self.assertEqual(dict(job.data), {key: d0})
with self.assertRaises(ValueError):
job.data = d1
job.data = {key: d1}
self.assertEqual(len(job.data), 1)
self.assertEqual(dict(job.data), {key: d1})
def test_assign_data(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
job.data[key] = d0
self.assertEqual(len(job.data), 1)
self.assertEqual(dict(job.data), {key: d0})
with self.assertRaises(ValueError):
job.data = d1
job.data = {key: d1}
self.assertEqual(len(job.data), 1)
self.assertEqual(dict(job.data), {key: d1})
def test_copy_data(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.data[key] = d
self.assertTrue(bool(job.data))
self.assertEqual(len(job.data), 1)
self.assertIn(key, job.data)
self.assertEqual(job.data[key], d)
self.assertEqual(job.data.get(key), d)
self.assertEqual(job.data.get('non-existent-key', d), d)
copy = dict(job.data)
self.assertTrue(bool(copy))
self.assertEqual(len(copy), 1)
self.assertIn(key, copy)
self.assertEqual(copy[key], d)
self.assertEqual(copy.get(key), d)
self.assertEqual(copy.get('non-existent-key', d), d)
def test_update(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.data.update({key: d})
self.assertIn(key, job.data)
def test_clear_data(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.data[key] = d
self.assertIn(key, job.data)
self.assertEqual(len(job.data), 1)
job.data.clear()
self.assertNotIn(key, job.data)
self.assertEqual(len(job.data), 0)
def test_reopen(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.data[key] = d
self.assertIn(key, job.data)
self.assertEqual(len(job.data), 1)
job2 = self.open_job(test_token)
with self.open_data(job2):
self.assertIn(key, job2.data)
self.assertEqual(len(job2.data), 1)
def test_concurrency(self):
key = 'concurrent'
d = testdata()
job = self.open_job(test_token)
job2 = self.open_job(test_token)
with self.open_data(job):
with self.open_data(job2):
self.assertNotIn(key, job.data)
self.assertNotIn(key, job2.data)
job.data[key] = d
self.assertIn(key, job.data)
self.assertIn(key, job2.data)
def test_move_not_initialized(self):
job = self.open_job(test_token)
with self.assertRaises(RuntimeError):
job.move(job._project)
def test_move_intra_project(self):
job = self.open_job(test_token).init()
job.move(self.project) # no-op
def test_move_inter_project(self):
job = self.open_job(test_token).init()
project_a = self.project
project_b = self.project_class.init_project(
name='project_b',
root=os.path.join(self._tmp_pr, 'project_b'))
job.move(project_b)
job.move(project_a)
project_b.clone(job)
with self.assertRaises(DestinationExistsError):
job.move(project_b)
def test_remove(self):
key = 'remove'
job = self.open_job(test_token)
job.remove()
d = testdata()
with self.open_data(job):
job.data[key] = d
self.assertIn(key, job.data)
self.assertEqual(len(job.data), 1)
fn_test = os.path.join(job.workspace(), 'test')
with open(fn_test, 'w') as file:
file.write('test')
self.assertTrue(os.path.isfile(fn_test))
job.remove()
with self.open_data(job):
self.assertNotIn(key, job.data)
self.assertFalse(os.path.isfile(fn_test))
def test_clear_job(self):
key = 'clear'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.init()
self.assertIn(job, self.project)
job.clear()
self.assertIn(job, self.project)
job.clear()
job.clear()
self.assertIn(job, self.project)
d = testdata()
with self.open_data(job):
job.data[key] = d
self.assertIn(job, self.project)
self.assertIn(key, job.data)
self.assertEqual(len(job.data), 1)
job.clear()
with self.open_data(job):
self.assertEqual(len(job.data), 0)
with open(job.fn('test'), 'w') as file:
file.write('test')
self.assertTrue(job.isfile('test'))
self.assertIn(job, self.project)
job.clear()
self.assertFalse(job.isfile('test'))
with self.open_data(job):
self.assertEqual(len(job.data), 0)
def test_reset(self):
key = 'reset'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.reset()
self.assertIn(job, self.project)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
job.data[key] = testdata()
self.assertEqual(len(job.data), 1)
job.reset()
self.assertIn(job, self.project)
with self.open_data(job):
self.assertEqual(len(job.data), 0)
def test_data(self):
key = 'test_data'
job = self.open_job(test_token)
def check_content(key, d):
self.assertEqual(job.data[key], d)
self.assertEqual(getattr(job.data, key), d)
self.assertEqual(dict(job.data)[key], d)
self.assertEqual(job.data[key], d)
self.assertEqual(getattr(job.data, key), d)
self.assertEqual(dict(job.data)[key], d)
with self.open_data(job):
d = testdata()
job.data[key] = d
check_content(key, d)
d2 = testdata()
job.data[key] = d2
check_content(key, d2)
d3 = testdata()
job.data[key] = d3
check_content(key, d3)
d4 = testdata()
setattr(job.data, key, d4)
check_content(key, d4)
def test_reset_statepoint_job(self):
key = 'move_job'
d = testdata()
src = test_token
dst = dict(test_token)
dst['dst'] = True
src_job = self.open_job(src)
with self.open_data(src_job):
src_job.data[key] = d
self.assertIn(key, src_job.data)
self.assertEqual(len(src_job.data), 1)
src_job.reset_statepoint(dst)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
with self.open_data(dst_job):
self.assertIn(key, dst_job.data)
self.assertEqual(len(dst_job.data), 1)
with self.open_data(src_job):
self.assertNotIn(key, src_job.data)
with self.assertRaises(RuntimeError):
src_job.reset_statepoint(dst)
with self.assertRaises(DestinationExistsError):
src_job.reset_statepoint(dst)
def test_reset_statepoint_project(self):
key = 'move_job'
d = testdata()
src = test_token
dst = dict(test_token)
dst['dst'] = True
src_job = self.open_job(src)
with self.open_data(src_job):
src_job.data[key] = d
self.assertIn(key, src_job.data)
self.assertEqual(len(src_job.data), 1)
self.project.reset_statepoint(src_job, dst)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
with self.open_data(dst_job):
self.assertIn(key, dst_job.data)
self.assertEqual(len(dst_job.data), 1)
with self.open_data(src_job):
self.assertNotIn(key, src_job.data)
with self.assertRaises(RuntimeError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(DestinationExistsError):
self.project.reset_statepoint(src_job, dst)
def test_update_statepoint(self):
key = 'move_job'
d = testdata()
src = test_token
extension = {'dst': True}
dst = dict(src)
dst.update(extension)
extension2 = {'dst': False}
dst2 = dict(src)
dst2.update(extension2)
src_job = self.open_job(src)
with self.open_data(src_job):
src_job.data[key] = d
self.assertIn(key, src_job.data)
self.assertEqual(len(src_job.data), 1)
self.project.update_statepoint(src_job, extension)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertEqual(dst_job.statepoint(), dst)
with self.open_data(dst_job):
self.assertIn(key, dst_job.data)
self.assertEqual(len(dst_job.data), 1)
with self.open_data(src_job):
self.assertNotIn(key, src_job.data)
with self.assertRaises(RuntimeError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(DestinationExistsError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(KeyError):
self.project.update_statepoint(dst_job, extension2)
self.project.update_statepoint(dst_job, extension2, overwrite=True)
dst2_job = self.open_job(dst2)
self.assertEqual(dst2_job.statepoint(), dst2)
with self.open_data(dst2_job):
self.assertIn(key, dst2_job.data)
self.assertEqual(len(dst2_job.data), 1)
@unittest.skipIf(not H5PY, 'test requires the h5py package')
class JobClosedDataTest(JobOpenDataTest):
@staticmethod
@contextmanager
def open_data(job):
yield
def test_implicit_initialization(self):
job = self.open_job(test_token)
self.assertNotIn('test', job.stores)
self.assertNotIn('foo', job.stores.test)
self.assertEqual(list(job.stores.keys()), [])
self.assertEqual(list(job.stores), [])
self.assertNotIn('test', job.stores)
job.stores.test.foo = True
self.assertIn('test', job.stores)
self.assertIn('foo', job.stores.test)
self.assertEqual(list(job.stores.keys()), ['test'])
self.assertEqual(list(job.stores), ['test'])
@unittest.skipIf(not H5PY, 'test requires the h5py package')
class JobOpenCustomDataTest(BaseJobTest):
@staticmethod
@contextmanager
def open_data(job):
with job.stores.test:
yield
def test_get_set(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertFalse(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 0)
self.assertNotIn(key, job.stores.test)
job.stores.test[key] = d
self.assertTrue(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key, job.stores.test)
self.assertEqual(job.stores.test[key], d)
self.assertEqual(job.stores.test.get(key), d)
self.assertEqual(job.stores.test.get('non-existent-key', d), d)
def test_del(self):
key = 'del0'
key1 = 'del1'
d = testdata()
d1 = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
self.assertNotIn(key, job.stores.test)
job.stores.test[key] = d
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key, job.stores.test)
job.stores.test[key1] = d1
self.assertEqual(len(job.stores.test), 2)
self.assertIn(key, job.stores.test)
self.assertIn(key1, job.stores.test)
self.assertEqual(job.stores.test[key], d)
self.assertEqual(job.stores.test[key1], d1)
del job.stores.test[key]
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key1, job.stores.test)
self.assertNotIn(key, job.stores.test)
def test_get_set_data(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertFalse(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 0)
self.assertNotIn(key, job.stores.test)
job.stores.test[key] = d
self.assertTrue(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key, job.stores.test)
self.assertEqual(job.stores.test[key], d)
self.assertEqual(job.stores.test.get(key), d)
self.assertEqual(job.stores.test.get('non-existent-key', d), d)
def test_set_set_data(self):
key0, key1 = 'set_set0', 'set_set1'
d0, d1 = testdata(), testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertFalse(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 0)
self.assertNotIn(key0, job.stores.test)
job.stores.test[key0] = d0
self.assertTrue(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key0, job.stores.test)
self.assertEqual(job.stores.test[key0], d0)
job = self.open_job(test_token)
with self.open_data(job):
self.assertTrue(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key0, job.stores.test)
self.assertEqual(job.stores.test[key0], d0)
job = self.open_job(test_token)
with self.open_data(job):
job.stores.test[key1] = d1
self.assertTrue(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 2)
self.assertIn(key0, job.stores.test)
self.assertIn(key1, job.stores.test)
self.assertEqual(job.stores.test[key0], d0)
self.assertEqual(job.stores.test[key1], d1)
def test_get_set_nested(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
self.assertNotIn('key0', job.stores.test)
job.stores.test['key0'] = d0
self.assertEqual(len(job.stores.test), 1)
self.assertIn('key0', job.stores.test)
self.assertEqual(job.stores.test['key0'], d0)
with self.assertRaises(AttributeError):
job.stores.test.key0.key1
job.stores.test.key0 = {'key1': d0}
self.assertEqual(len(job.stores.test), 1)
self.assertIn('key0', job.stores.test)
self.assertEqual(dict(job.stores.test), {'key0': {'key1': d0}})
self.assertEqual(job.stores.test['key0'], {'key1': d0})
self.assertEqual(job.stores.test['key0']['key1'], d0)
self.assertEqual(job.stores.test.key0, {'key1': d0})
self.assertEqual(job.stores.test.key0.key1, d0)
job.stores.test.key0.key1 = d1
self.assertEqual(job.stores.test, {'key0': {'key1': d1}})
self.assertEqual(job.stores.test['key0'], {'key1': d1})
self.assertEqual(job.stores.test['key0']['key1'], d1)
self.assertEqual(job.stores.test.key0, {'key1': d1})
self.assertEqual(job.stores.test.key0.key1, d1)
job.stores.test['key0']['key1'] = d2
self.assertEqual(job.stores.test, {'key0': {'key1': d2}})
self.assertEqual(job.stores.test['key0'], {'key1': d2})
self.assertEqual(job.stores.test['key0']['key1'], d2)
self.assertEqual(job.stores.test.key0, {'key1': d2})
self.assertEqual(job.stores.test.key0.key1, d2)
def test_get_set_nested_data(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
self.assertNotIn('key0', job.stores.test)
job.stores.test['key0'] = d0
self.assertEqual(len(job.stores.test), 1)
self.assertIn('key0', job.stores.test)
self.assertEqual(job.stores.test['key0'], d0)
with self.assertRaises(AttributeError):
job.stores.test.key0.key1
job.stores.test.key0 = {'key1': d0}
self.assertEqual(len(job.stores.test), 1)
self.assertIn('key0', job.stores.test)
self.assertEqual(dict(job.stores.test), {'key0': {'key1': d0}})
self.assertEqual(job.stores.test['key0'], {'key1': d0})
self.assertEqual(job.stores.test['key0']['key1'], d0)
self.assertEqual(job.stores.test.key0, {'key1': d0})
self.assertEqual(job.stores.test.key0.key1, d0)
job.stores.test.key0.key1 = d1
self.assertEqual(job.stores.test, {'key0': {'key1': d1}})
self.assertEqual(job.stores.test['key0'], {'key1': d1})
self.assertEqual(job.stores.test['key0']['key1'], d1)
self.assertEqual(job.stores.test.key0, {'key1': d1})
self.assertEqual(job.stores.test.key0.key1, d1)
job.stores.test['key0']['key1'] = d2
self.assertEqual(job.stores.test, {'key0': {'key1': d2}})
self.assertEqual(job.stores.test['key0'], {'key1': d2})
self.assertEqual(job.stores.test['key0']['key1'], d2)
self.assertEqual(job.stores.test.key0, {'key1': d2})
self.assertEqual(job.stores.test.key0.key1, d2)
def test_assign(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
job.stores.test[key] = d0
self.assertEqual(len(job.stores.test), 1)
self.assertEqual(dict(job.stores.test), {key: d0})
with self.assertRaises(ValueError):
job.stores.test = d1
job.stores.test = {key: d1}
self.assertEqual(len(job.stores.test), 1)
self.assertEqual(dict(job.stores.test), {key: d1})
def test_assign_data(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
job.stores.test[key] = d0
self.assertEqual(len(job.stores.test), 1)
self.assertEqual(dict(job.stores.test), {key: d0})
with self.assertRaises(ValueError):
job.stores.test = d1
job.stores.test = {key: d1}
self.assertEqual(len(job.stores.test), 1)
self.assertEqual(dict(job.stores.test), {key: d1})
def test_copy_data(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.stores.test[key] = d
self.assertTrue(bool(job.stores.test))
self.assertEqual(len(job.stores.test), 1)
self.assertIn(key, job.stores.test)
self.assertEqual(job.stores.test[key], d)
self.assertEqual(job.stores.test.get(key), d)
self.assertEqual(job.stores.test.get('non-existent-key', d), d)
copy = dict(job.stores.test)
self.assertTrue(bool(copy))
self.assertEqual(len(copy), 1)
self.assertIn(key, copy)
self.assertEqual(copy[key], d)
self.assertEqual(copy.get(key), d)
self.assertEqual(copy.get('non-existent-key', d), d)
def test_update(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.stores.test.update({key: d})
self.assertIn(key, job.stores.test)
def test_clear_data(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.stores.test[key] = d
self.assertIn(key, job.stores.test)
self.assertEqual(len(job.stores.test), 1)
job.stores.test.clear()
self.assertNotIn(key, job.stores.test)
self.assertEqual(len(job.stores.test), 0)
def test_reopen(self):
key = 'reopen'
d = testdata()
job = self.open_job(test_token)
with self.open_data(job):
job.stores.test[key] = d
self.assertIn(key, job.stores.test)
self.assertEqual(len(job.stores.test), 1)
job2 = self.open_job(test_token)
with self.open_data(job2):
self.assertIn(key, job2.stores.test)
self.assertEqual(len(job2.stores.test), 1)
def test_concurrency(self):
key = 'concurrent'
d = testdata()
job = self.open_job(test_token)
job2 = self.open_job(test_token)
with self.open_data(job):
with self.open_data(job2):
self.assertNotIn(key, job.stores.test)
self.assertNotIn(key, job2.stores.test)
job.stores.test[key] = d
self.assertIn(key, job.stores.test)
self.assertIn(key, job2.stores.test)
def test_remove(self):
key = 'remove'
job = self.open_job(test_token)
job.remove()
d = testdata()
with self.open_data(job):
job.stores.test[key] = d
self.assertIn(key, job.stores.test)
self.assertEqual(len(job.stores.test), 1)
fn_test = os.path.join(job.workspace(), 'test')
with open(fn_test, 'w') as file:
file.write('test')
self.assertTrue(os.path.isfile(fn_test))
job.remove()
with self.open_data(job):
self.assertNotIn(key, job.stores.test)
self.assertFalse(os.path.isfile(fn_test))
def test_clear_job(self):
key = 'clear'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.init()
self.assertIn(job, self.project)
job.clear()
self.assertIn(job, self.project)
job.clear()
job.clear()
self.assertIn(job, self.project)
d = testdata()
with self.open_data(job):
job.stores.test[key] = d
self.assertIn(job, self.project)
self.assertIn(key, job.stores.test)
self.assertEqual(len(job.stores.test), 1)
job.clear()
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
with open(job.fn('test'), 'w') as file:
file.write('test')
self.assertTrue(job.isfile('test'))
self.assertIn(job, self.project)
job.clear()
self.assertFalse(job.isfile('test'))
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
def test_reset(self):
key = 'reset'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.reset()
self.assertIn(job, self.project)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
job.stores.test[key] = testdata()
self.assertEqual(len(job.stores.test), 1)
job.reset()
self.assertIn(job, self.project)
with self.open_data(job):
self.assertEqual(len(job.stores.test), 0)
def test_data(self):
key = 'test_data'
job = self.open_job(test_token)
def check_content(key, d):
self.assertEqual(job.stores.test[key], d)
self.assertEqual(getattr(job.stores.test, key), d)
self.assertEqual(dict(job.stores.test)[key], d)
self.assertEqual(job.stores.test[key], d)
self.assertEqual(getattr(job.stores.test, key), d)
self.assertEqual(dict(job.stores.test)[key], d)
with self.open_data(job):
d = testdata()
job.stores.test[key] = d
check_content(key, d)
d2 = testdata()
job.stores.test[key] = d2
check_content(key, d2)
d3 = testdata()
job.stores.test[key] = d3
check_content(key, d3)
d4 = testdata()
setattr(job.stores.test, key, d4)
check_content(key, d4)
class JobClosedCustomDataTest(JobOpenCustomDataTest):
@staticmethod
@contextmanager
def open_data(job):
yield
if __name__ == '__main__':
unittest.main()
|
#!/bin/python3
import sys
arr = [float(arr_i) for arr_i in input().strip().split(' ')]
mean1 = arr[0]
mean2 = arr[1]
print(round(160+40*(mean1+mean1**2), 3))
print(round(128+40*(mean2+mean2**2), 3))
|
import numpy as np
"""
Attributes:
n_units: int
Number of units in the layer
W: n_units_in_prev_layer * n_units_in_this_layer numpy array
Weights of this layer
b: n_units_in_current_layer * 1 numpy array
biases of this layer
activation: activation object
activation_name: string
Name of activation function
A: (temporarily used during backprop): n_units * m dimensional array
activation of Z
active_neurons: n_units*1 numpy array
Value of each element is binary. Used for dropout
"""
class Dense:
"""
Parameters:
activation: string
"relu", "sigmoid", "linear"
regularizer(optional): tuple(string name_of_regularizer, int labmda)
keep_prob: 0< float <1
For dropouts, probabilty of keeping a neuron active
"""
def __init__(self, n_units, keep_prob=1, activation="linear", regularizer="no_regularizer", input_size=None):
self.n_units = n_units
self.keep_prob=keep_prob
self.input_size= input_size
if activation == "relu":
from keras.activations.relu import relu
self.activation_name = "relu"
self.activation = relu()
elif activation == "softmax":
from keras.activations.softmax import softmax
self.activation_name = "softmax"
self.activation = softmax()
elif activation == "sigmoid":
from keras.activations.sigmoid import sigmoid
self.activation_name = "sigmoid"
self.activation = sigmoid()
elif activation == "linear":
from keras.activations.linear import linear
self.activation_name = "linear"
self.activation = linear()
if regularizer[0] == "l2":
from keras.regularizers.l2 import l2
self.regularizer = l2(regularizer[1])
else:
from keras.regularizers.no_regularizer import no_regularizer
self.regularizer = no_regularizer()
def initialise_weights(self, input_size):
activation_name = self.activation_name
# Check in case someone applies activation on input layer
if activation_name == "relu":
self.W = np.random.randn(self.n_units, input_size[0]) * np.sqrt(2/input_size[0])
elif activation_name == "sigmoid":
self.W = np.random.randn(self.n_units, input_size[0]) * np.sqrt(1/input_size[0])
else:
self.W = np.random.randn(self.n_units, input_size[0])
self.b = np.zeros((self.n_units, 1))
return [self.n_units]
"""
Returns output of this layer given an input
Also stores the activation as cache to be used during back prop
Attributes:
activation_prev_layer: input to this layer
"""
def forward_propagation(self, activation_prev_layer):
keep_prob = self.keep_prob
self.active_neurons = np.random.choice([1,0], size=(self.n_units,1), p=[keep_prob, 1-keep_prob])
z = np.dot(self.W, activation_prev_layer) + self.b
print(z)
a = self.activation.activate(z)
self.A = a
a = np.multiply(a, self.active_neurons)
return a
def backward_propagation(self, dz, activation_prev_layer, activation_derivative_prev_layer, learning_rate):
dW = np.dot(dz, activation_prev_layer.T) + self.regularizer.calculateDerivative(self.W)
db = dz.sum(axis=1, keepdims=True)
self.W -= learning_rate * dW
self.b -= learning_rate * db
dz = np.multiply(np.dot(self.W.T, dz), \
activation_derivative_prev_layer)
return dz
def predict(self, X):
z = np.dot(self.W, X) + self.b
a = self.activation.activate(z)
return a |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: pt. sie 29 14:02:38 2014
# by: The Resource Compiler for PyQt (Qt v4.8.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x07\xb3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\x55\x49\x44\x41\x54\x78\xda\xc4\
\x97\x79\x6c\x54\xd7\x15\xc6\xbf\xb7\xcf\xe2\x59\xbc\x0c\x86\x78\
\x1f\x6c\x1c\x37\x44\x98\x38\xd8\x38\xf2\x82\x49\xd4\x86\x50\x9c\
\x28\x4b\xdb\x28\x4d\x10\x2a\x6a\x90\x1a\x55\x55\x68\x28\x2d\xc2\
\x41\x29\x88\x56\x6a\x2b\xa4\x4a\x09\xad\x50\x23\x50\xf2\x47\xd2\
\xcd\x81\xd8\x21\x35\x82\xda\xb8\x2c\x01\xb5\x40\x70\xa5\x90\xd4\
\x63\x20\xe0\x32\xe3\x61\x36\xcf\xcc\x9b\xe5\xbd\xd7\x73\x9f\xc7\
\x96\x41\x80\x27\x0a\x51\x9f\x74\x67\x79\x73\xcf\xb9\xbf\xf3\xdd\
\x73\xcf\x79\xc3\x19\x86\x81\xff\xe7\xc5\xdd\xe9\xb7\xad\x3d\xdb\
\x5a\x64\xc5\xf2\x0a\xc7\x61\x15\x81\x5a\x8d\x59\x46\x1c\xc7\x25\
\xe9\xb5\x3f\x1e\x8f\x6d\xda\xb9\x63\xbb\x8f\x6e\x19\x77\x0d\xe0\
\x85\xb5\xeb\xdc\x75\x8b\xea\x8f\xba\x5c\x8e\xc5\xed\x6d\x6d\xa8\
\xf5\xd6\xa0\xc0\xe1\xb8\x61\xce\x64\x2c\x86\xcf\x46\x7d\x38\x3a\
\x3c\x8c\x48\x24\x76\xfe\xd3\x0b\x9f\xb4\xef\xdb\xfb\x66\xf8\xcb\
\x02\x70\x5b\xb6\xf6\xac\x96\x25\xe5\x40\xf7\x9a\xd5\x68\x6c\x5c\
\x92\x97\x93\x33\x67\xce\x62\xff\x81\x3e\xa4\x33\xa9\x35\x3b\x7e\
\xfe\x5a\xdf\x17\x51\x63\x36\x00\xb7\xf9\xa7\x5b\xba\xdd\xee\xc2\
\xde\x0d\x2f\xae\x87\xcb\xe5\x32\x6f\x1e\x3e\x37\x86\xde\x93\x9f\
\xe1\xe4\xa7\xe3\xe0\xb9\xa9\xe9\x3a\xe5\x4d\x4b\xdd\x02\x3c\xd1\
\x5c\x8b\x95\x4b\xaa\xcd\x7b\x91\x48\x04\xbb\x7f\xb7\x07\xe1\x70\
\xe8\x89\x5f\xec\xdc\xb1\x3f\x5f\x88\x19\x80\xc7\x56\x7f\xb3\xa4\
\xa3\x73\x45\xe0\xfb\xeb\xd7\xa1\xb0\xb0\x08\xd1\xc9\x24\x5e\xda\
\x73\x18\x97\xa2\x1a\x0a\x4b\xca\xe0\x74\x95\x40\x94\x38\x08\x02\
\x47\x20\x3c\xe2\x91\x09\x5c\x1b\xbf\x8c\x05\x36\x0e\xbf\x5d\xbf\
\x12\xce\x02\x2b\x42\xa1\xeb\xf8\xfd\x9e\x37\x31\x34\xf8\x77\x4f\
\x7f\xdf\xfb\x13\xf9\x00\x88\xb9\x77\xa9\xe9\xc1\x65\x83\x5d\x2b\
\x3a\xcc\xc5\xc3\xb1\x38\x9e\xf9\xcd\x07\xb0\xb8\x2b\xb0\xa2\xb9\
\x12\x5d\x75\x4e\x78\x8b\x15\x44\xd3\x1a\x62\x69\x1d\x93\x29\x0d\
\xbe\x90\x0d\x17\xab\xe6\xe3\xe2\xd8\x65\x3c\xfd\xab\x3e\xbc\xbb\
\x71\x95\x69\xcb\x7c\xc4\xe3\xf1\x41\x02\x68\x24\xbf\x99\xb9\x00\
\x04\xa6\xc2\xb3\xcf\x3d\xdf\xe0\xf5\x7a\x5f\x7d\xfa\xa9\x27\x99\
\xc0\xf8\xc1\x1f\x86\x91\x96\x4b\xf0\xc3\x47\xef\xc3\xe3\xf7\xbb\
\x21\xd1\xac\x30\x2d\x9c\xd4\x78\x24\x34\x20\x96\xd1\x4d\x85\x39\
\x9e\x87\x54\xe0\x04\x27\xca\x18\xf8\xe8\x02\xba\x9b\xab\x51\x56\
\x56\x8e\x7f\x9d\x39\xeb\x29\x2e\xf1\xbc\x7b\xfe\xe3\x73\x81\xb9\
\x00\x78\xa6\x42\x65\x55\xd5\xce\xd6\xe5\x2d\xb4\xb6\x86\x43\x67\
\x2f\xe1\x6a\x9c\xc3\x03\x75\x95\x58\x5a\x6e\xc3\x44\x3c\x43\x8b\
\x6b\x48\xd3\xd4\xef\x6e\xd9\x8d\x97\xb6\xef\x85\xce\x0b\x50\xc9\
\x50\xa5\x5c\x90\x65\x0e\x55\xde\x72\x5c\x37\x24\x0c\x9c\xb9\x64\
\xfa\x60\xbe\x98\xcf\x59\x0a\xdf\x11\xc0\x22\x0a\xc2\x23\x55\x15\
\x15\x64\x9c\xc5\xc0\xbf\x03\x70\xb8\x3c\xe8\x5a\xe4\x44\x28\x99\
\x45\x2c\xab\x83\x14\x47\x96\xd2\xc5\x6e\xb7\xc3\x62\xb7\x42\x13\
\x05\x64\x28\x7a\x43\xe4\x21\x2b\x02\x64\x89\x47\xb5\xb7\x02\x03\
\x23\x14\xb0\x96\x05\xf3\xc5\x7c\x32\xdf\xf9\x00\x28\x94\xd5\x96\
\x79\xa5\xa5\x04\xaf\x61\xc4\xaf\xc2\x62\xb5\xa3\x86\xf6\x7c\x32\
\x6b\x50\xb4\x14\x84\x24\x41\x17\x25\x48\x8a\x42\x11\x2b\xd0\x15\
\x89\x5c\xcb\x70\x3a\x14\x58\x15\x11\x92\x2c\xa0\x74\x7e\x11\xd9\
\x26\xc9\x47\x16\xa6\x2f\xf2\xc9\x7c\xe7\x93\x84\xbc\xa1\xeb\xd0\
\xc8\x90\x5e\xa0\x28\x16\x33\xd3\x63\x4c\x76\x7d\x4a\x76\x16\x39\
\x5b\x5c\xb1\x90\x4f\x51\xc4\xae\x37\xde\x87\x46\xec\x59\x9d\xc3\
\x86\xef\xad\x04\xaf\x66\x90\xc9\x72\x04\x67\x81\x46\x5b\xc0\x20\
\x98\xcf\x5c\x80\x73\x9f\x02\xd6\x0f\x74\x66\x40\xc6\x92\x24\x93\
\x7c\x04\x90\xd2\x91\x30\x58\xf0\x0a\x78\x52\x40\x20\x05\xd8\xe2\
\xbc\x20\xd3\x22\x9c\xe9\xdb\xa0\x1c\x56\x28\x7a\x4d\xd3\x61\x70\
\x3a\xcd\x95\x4c\x15\x75\xfa\x9c\x6f\x8f\x99\x49\x12\x66\xa0\x91\
\xb1\x40\x51\x50\x9d\x27\x00\x0d\x71\x82\xda\xbe\xe9\x05\x73\xcf\
\x99\xec\xbb\x5e\xa7\xc8\xe9\xb7\x4d\xaf\xac\x62\xac\x4c\x30\x24\
\x29\x7a\x91\x72\x81\xad\xc7\x9b\x0a\xe8\xc0\x17\x68\x70\xa6\x44\
\x34\x3d\x75\xf5\xea\x38\x19\x1b\x68\x98\x67\x05\x2b\x78\xbe\x50\
\x9a\xce\x7b\x96\x12\x31\x85\x60\x5c\x45\x28\x91\xa2\xc5\x79\x53\
\x7a\xb6\xb8\x91\xb3\x16\x68\x32\x4f\x8a\x85\x23\x29\x7c\xad\xd4\
\x6a\xfa\x60\xbe\x98\xcf\x7c\x01\xf4\xc4\xe4\xe4\xb0\xef\xe2\x18\
\x34\xb2\x5a\x5e\x2e\x81\x13\x44\x02\x50\xe1\x9f\xcc\x60\x82\x0e\
\x7e\x58\x65\xfb\xaa\x9b\x7b\x9e\xd1\x99\xe4\x86\x39\x97\x6d\x9b\
\x31\xd5\x1a\xe1\x0f\xa6\xd1\x5a\xc1\xb6\x07\x18\x1d\x1b\x43\x7c\
\x32\xf6\x0f\xb3\xa8\xe4\x51\x88\x44\x97\xdb\x9d\x10\x04\xe9\xc9\
\xe6\xe6\x66\xdc\x63\xc9\x60\x24\x42\x55\x8f\x6a\x58\x86\xc9\x49\
\xd1\xb1\x9c\xe0\x79\x0e\xad\x2d\xb5\x68\x6b\xa9\x41\x96\xed\x39\
\x9d\x10\xf6\xae\x13\xc9\xf8\x35\x15\x42\x52\xc3\x53\x5e\xba\x2f\
\x48\xf8\xe3\x9f\xfe\x8a\x0f\xfa\xfb\x36\x04\xfc\xfe\x31\xf2\x9f\
\x9e\x4b\x01\x75\xe0\x6f\x1f\x1e\x0b\x06\x82\xa3\x27\x3f\x3a\x85\
\xac\xc1\xe3\xf9\xba\x14\x9d\x06\x3b\x9d\x7d\xc9\x4c\x30\xb6\x48\
\x9a\x4e\x45\x2a\x99\x46\x3c\x91\x46\x4a\xcd\x42\xcd\x64\xa9\xfb\
\x69\x18\xf7\x67\x11\xa4\xaa\xbf\xb6\x3e\x43\xf3\x79\x1c\x3b\x7e\
\x82\x72\x42\xd0\x47\xce\x7f\x7c\x81\xf9\xce\x47\x01\x53\xa6\x51\
\xdf\x7f\x8e\x97\x78\x4a\xd7\xd5\x78\x17\xa2\xb0\xc0\x82\xc5\x0e\
\x15\xbe\x94\x83\x6a\x81\x85\xf6\x53\x27\x05\xb4\xa9\x7d\xe7\x0c\
\x33\xc7\xa2\x09\x0e\xff\xf5\xf3\x10\x09\x6c\x6d\x75\x18\x4e\x85\
\xc3\xf8\x44\x10\x7f\xfe\x4b\x2f\xba\xbb\xd7\x70\xf7\x2d\x6e\xfc\
\xd1\x87\x07\xfb\xb7\x93\x85\x36\x17\x00\xbb\xb4\x44\x3c\x9e\xb1\
\xdb\xec\xe3\x81\x40\xf0\xeb\xde\x85\x0b\xe1\x26\x88\xa5\x8e\x28\
\x1c\xd4\x01\x13\xba\x0d\xfe\x94\x13\x89\xb4\x15\xb1\xa4\x05\xa1\
\xb8\x8c\x42\x23\x8b\x66\x67\x14\xdf\x98\x17\x82\x22\x89\x94\x03\
\xd7\xb1\x6f\xdf\xdb\x68\x5a\xfa\x00\x1e\x7e\x78\x25\xac\x36\x07\
\x6a\x1b\x9a\xb6\x1e\x3a\xb8\x7f\xe7\x9d\x20\xa6\x8f\x21\xeb\x5a\
\xc1\x83\x07\xfb\x7b\xa9\x90\xf0\xaa\x9a\xfa\x65\x57\x57\x27\x96\
\x35\x2d\x45\x43\x41\x02\x0d\xb6\x28\xa5\xbd\x3e\xab\xc5\x73\x94\
\x7c\x3c\x35\x23\x91\xca\xb1\x82\x53\xa7\xff\x89\x23\x47\x06\x71\
\xef\xbd\xf5\x54\xc6\x1d\xb4\x4d\x49\xd4\xd6\xd5\xc3\x83\x53\x68\
\xff\x75\x6b\xea\xa1\x8d\xc7\x95\xdb\xe5\x02\x77\xd3\x67\x3b\x8d\
\xf9\x45\x45\xc5\xb5\xdf\x79\xf6\xb9\x37\x8a\x8b\x8b\xaa\x1f\x6c\
\x6a\x42\x79\x45\x19\x3c\x1e\xcf\x4c\x71\x61\x75\x22\x10\x08\xe0\
\xf3\xcb\x57\x70\xfa\xf4\x69\xa8\xa9\xb4\x5e\x5e\x51\xce\xb3\x87\
\x18\x89\x2a\x29\x2f\xda\xf0\x68\xe7\x12\xb8\xe5\xcf\x81\x4f\x36\
\x63\xf8\x28\xd0\xfe\xf2\x91\x5b\x42\x70\xb7\xf8\xce\x6a\x78\x31\
\x8d\x05\x8b\x16\xd5\xd7\xb7\xb5\x77\xbe\xe8\x70\x38\x96\xb1\xba\
\x3e\x1b\x80\xae\x54\x2c\x16\x3b\x75\xe4\xf0\xa1\xdd\x3e\xdf\xe8\
\xf5\x9f\x6c\xde\xd2\x5f\x59\x53\x03\x51\x71\xe1\x7e\xe5\x1c\x5a\
\xe5\xb7\x80\xce\x77\x40\xdd\x8a\x20\x5e\x26\x08\xfd\x96\x10\xb7\
\x7b\x2a\x96\x72\x6a\xb0\xe7\xb2\x22\x1a\x8e\xdc\x76\x4d\xcf\x67\
\x24\xd4\x3c\x10\xa3\x11\xca\xe5\xd2\x3d\x3f\xde\xbc\x6d\xa8\xad\
\x32\x88\x6e\x5b\x2f\x38\x6a\x68\xa0\x0a\x89\xce\x7d\x04\x21\xdd\
\x16\x82\x9b\xe3\x71\x4d\xcc\x29\xa2\xdc\xa2\xb1\xb0\xa4\x48\xe5\
\x8e\x1a\x9b\x3b\x8f\x46\xcd\x7b\xaf\x3d\x32\x54\xef\xbc\x82\x2a\
\xb7\x4e\x5d\x55\x9c\xf2\xd0\xb9\x37\x07\xb1\xd1\x84\x88\xa9\xda\
\x43\x8f\xfd\x6c\xf0\x04\x0b\xe4\x4e\xdd\xca\xc8\x25\x27\x8b\x92\
\x3d\xdf\xf9\x6f\x1a\x13\xb9\xdf\x32\xb9\x88\xd8\x3d\xdf\xe3\x3d\
\x87\x3a\xce\x45\x6b\xe1\x8f\xaa\x48\x26\xb5\x29\x9d\x06\xd7\xd2\
\x0c\xfa\xd0\xb0\x0b\x6d\x2b\x14\x14\x3b\xe5\x63\xd3\xad\x9a\xc7\
\xdd\xbb\x66\x20\xbe\xd5\x73\xa0\xe3\x44\xb8\x11\x81\x68\xf2\x46\
\x88\x0c\xc5\x44\x7d\x45\x9b\x3a\x94\xb6\xbb\x0d\x70\x03\xc4\xb7\
\x7b\xde\x23\x88\x25\xf0\x85\x45\xa8\xaa\x36\x55\x09\x86\x08\xc2\
\xb0\xb3\xb2\x36\x53\x02\xee\x36\xc0\x4d\x10\xfb\x3b\x46\xa2\x5e\
\x04\x22\x09\x64\x54\xba\x9d\xa5\x91\xb8\x46\xa7\x88\xcf\xeb\xbf\
\xe1\x97\xbd\xe4\xe9\xc4\x7c\x67\xdb\x9a\x21\xd6\x35\xa9\x7c\xb1\
\x66\x8e\x67\x5e\xed\x5f\x4e\xf7\x59\xaf\x08\x7d\x95\x00\xb3\x21\
\xca\x68\x14\x4c\xff\xad\xa4\x71\x25\xa7\x52\xfa\xab\x06\x98\x86\
\xb0\xe7\x6a\xcb\x74\xd9\x8f\x4f\xd7\x82\xff\x09\x30\x00\xbc\x9f\
\x33\x3f\x06\x8f\xbe\x80\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x07\x9f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\x41\x49\x44\x41\x54\x78\xda\xc4\
\x57\x7b\x6c\x53\xe7\x15\xff\xdd\xb7\xed\xc4\xce\xd3\x04\x96\xb7\
\x49\x48\xb3\x52\x11\x1a\x9e\x55\x1e\x84\x56\xeb\x83\x91\x56\x5b\
\xbb\xad\xea\x5a\x84\x86\xd6\x4a\x9b\xaa\xaa\x6c\x1d\x1b\x22\x45\
\x1d\x88\x56\xea\xa6\x4a\xfd\xa3\x74\x42\x43\x45\xeb\x1f\xad\xb4\
\x29\x85\x91\xd1\x05\x91\x26\x04\x08\x0f\x6d\x40\x61\x5a\x68\x17\
\x07\x28\xa4\xc4\x71\x1c\x3f\xae\x7d\x6d\xdf\xc7\xce\x77\x63\x67\
\xa1\xa2\xc4\x55\xa9\x76\xa5\xe3\x6b\xdf\xfb\x9d\x73\x7e\xe7\xf1\
\xfd\xce\x67\xce\xb2\x2c\xfc\x3f\x2f\xee\x76\xef\xb6\x75\x6d\x5f\
\x29\x2b\x8e\x5f\x72\x1c\x1e\x26\xa0\x4e\x6b\x96\x12\xc7\x71\x09\
\xfa\xec\x51\xd5\xe8\x4b\xbb\x76\xee\xf0\xd3\x23\xeb\x8e\x01\x78\
\x66\xc3\xc6\xc2\xfa\x45\x0d\x47\x0b\x0a\xdc\x8b\x5b\x5b\x5a\x50\
\xe7\xab\x45\xbe\xdb\x7d\xd3\x9a\x58\x34\x8a\x4f\x47\xfc\x38\x3a\
\x38\x88\x70\x38\x7a\xe1\x93\x4b\xc3\xad\xfb\xde\xd9\x3b\xf5\x75\
\x01\x70\x5b\xb7\x75\xad\x93\x25\xe5\x40\xe7\xfa\x75\x68\x6a\x5a\
\x92\x93\x91\xb3\x67\xcf\x61\xff\x81\x83\x48\xa5\x93\xeb\x77\xfe\
\xf6\x95\x83\x5f\x25\x1b\xb3\x01\x70\x5b\x7e\xbd\xb5\xb3\xb0\xb0\
\xa8\xfb\xb9\x67\x37\xa1\xa0\xa0\xc0\x7e\x78\xe4\xfc\x28\xba\x4f\
\x7e\x8a\x93\x9f\x8c\x81\xe7\xa6\x97\x9b\xd4\x37\x2b\xeb\x17\xe0\
\xb1\x15\x75\x58\xbb\xa4\xc6\x7e\x16\x0e\x87\xb1\xfb\xed\x3d\x98\
\x9a\x0a\x3d\xf6\xea\xae\x9d\xfb\x73\x05\x31\x03\xe0\x91\x75\xdf\
\x2d\x6d\x6b\x5f\x13\xf8\xe9\xa6\x8d\x28\x2a\x2a\x46\x24\x96\xc0\
\xcf\xf7\x1c\xc1\x95\x88\x81\xa2\xd2\x72\x78\x0a\x4a\x21\x4a\x1c\
\x04\x81\x23\x20\x3c\xd4\xf0\x04\x6e\x8c\x5d\xc5\x02\x17\x87\x37\
\x37\xad\x85\x27\xdf\x89\x50\x68\x12\x7f\xd8\xb3\x17\x03\xfd\x1f\
\x79\x7b\x0e\xfe\x75\x22\x17\x00\x62\xe6\x2e\x35\x2f\x5b\xde\xdf\
\xb1\xa6\xcd\x76\x3e\x15\x55\xf1\xc4\xef\xff\x06\x47\x61\x25\xd6\
\xac\xa8\x42\x47\xbd\x1b\xb5\x25\x0a\xc2\x49\x03\xb1\x94\x69\xcb\
\x68\xc8\x85\xcb\xd5\xf3\x71\x79\xf4\x2a\x1e\x7f\xfd\x20\xde\xdf\
\xfc\xb0\xad\xcb\x6c\xa8\xaa\xda\x4f\x00\x9a\xc8\x6e\x7a\x2e\x00\
\x02\xcb\xc2\x93\x4f\x3d\xdd\xe8\xf3\xf9\x5e\x7e\xfc\xfb\xdf\x63\
\x09\xc6\xcf\xfe\x38\x88\x94\x5c\x8a\xe7\x1f\xba\x1b\x8f\xde\x53\
\x08\x89\xa2\x9e\x4c\xe8\x88\x68\x06\xa2\x29\x02\x91\x36\x01\x9e\
\x14\x79\x1e\x52\xbe\x07\x9c\x28\xa3\xf7\xd4\x25\x74\xae\xa8\x41\
\x79\x79\x05\xfe\x79\xf6\x9c\xb7\xa4\xd4\xfb\xfe\x85\x8f\xcf\x07\
\xe6\x02\xc0\xb3\x2c\x54\x55\x57\xef\x5a\xbd\x6a\x25\xf9\x36\x70\
\xf8\xdc\x15\x5c\x57\x39\xdc\x5b\x5f\x85\xa5\x15\x2e\x4c\xa8\x69\
\x4c\x6a\xba\xed\x34\x6e\x58\x48\x90\x6f\xcd\x16\x0b\x1a\xf5\x82\
\x2c\x73\xa8\xf6\x55\x60\xd2\x92\xd0\x7b\xf6\x8a\x6d\x83\xd9\x62\
\x36\x67\x65\xf8\xb6\x00\x1c\xa2\x20\x3c\x50\x5d\x59\x49\xca\x3a\
\x7a\xff\x15\x80\xbb\xc0\x8b\x8e\x45\x1e\x84\x28\xea\xa8\x6e\x82\
\x32\x8e\x14\xb5\x54\x9a\x9a\x50\xa7\xb6\xd1\x05\x1e\x29\x8a\xde\
\x12\x79\xc8\x8a\x00\x59\xe2\x51\xe3\xab\x44\xef\x45\x0a\xd8\xd0\
\xc1\x6c\x31\x9b\xcc\x76\x2e\x00\x14\xea\x6a\xc7\xbc\xb2\x32\x02\
\x6f\xe0\xe2\xb8\x06\x87\x33\xcf\xae\x79\x4c\xb7\x60\xf2\x14\x04\
\xa5\x98\x93\x15\x12\x19\xbc\x93\xee\x0e\xfa\x4d\x77\x8f\x5b\x81\
\x53\x11\x21\xc9\x02\xca\xe6\x17\x93\x6e\x82\x6c\xe8\xb0\x6d\x91\
\x4d\x66\x3b\x97\x26\xe4\x2d\xd3\x84\x41\x8a\xf4\x01\x45\x71\xd8\
\x9d\xce\x6a\x9d\x32\x79\xfc\x78\xeb\x6e\xb8\x5c\x79\xe0\x25\x09\
\xbc\x28\x82\x13\xd8\x5d\x22\x2d\x11\xba\xc5\xe3\xb9\x9f\xac\x05\
\xaf\xa5\x91\xd6\x39\x2a\x87\x03\x06\x95\x80\x81\x60\x36\x33\x01\
\xce\xbd\x0b\xd8\x3c\x30\x99\x02\x29\x4b\x92\x4c\xe9\x23\x00\x49\
\xaa\x39\xa5\x5d\x92\x14\x08\xe4\x5c\x60\x4e\x05\x06\x40\xb6\x9d\
\x5b\xf4\xdd\x32\x05\x28\x14\xbd\x61\x98\xb0\x38\x93\xd6\x4a\x76\
\x16\x4d\xfa\x9e\xeb\x8c\x99\x69\x12\xa6\x60\x90\xb2\x40\x51\x10\
\xcf\x13\x00\x03\x2a\x81\xda\xf1\xd2\x33\x54\x73\x01\x49\x62\x8c\
\x14\xc5\x23\x93\x43\x56\x77\x6a\x01\x02\x00\x24\x28\x7a\x91\x7a\
\x81\xf9\xe3\xed\x0c\x98\xc0\x57\x18\x70\x76\x8a\x68\x79\xf2\xfa\
\xf5\x31\x52\xb6\xd0\x38\xcf\x09\x46\x78\xfe\x50\x0a\xb1\xa4\x4e\
\x8d\x98\xc4\x04\x91\x52\x50\x4d\x12\xd5\xea\x94\x6a\x12\xba\x67\
\xb5\x05\x5a\xcc\x53\xc6\xa6\xc2\x49\x7c\xbb\xcc\x69\xdb\x60\xb6\
\x98\xcd\x5c\x01\x98\xf1\x58\x6c\xd0\x7f\x79\x14\xb4\xcb\xb0\xaa\
\x42\xa2\x34\x8b\x04\x40\xc3\x78\x2c\x8d\x89\xb8\x41\x04\x34\x1d\
\x95\x65\xb2\x52\x59\xd3\x25\xcb\x94\xcd\x9a\x1e\x8d\x18\x0f\xa6\
\xb0\xba\x52\x26\x00\xc0\xc8\xe8\x28\xd4\x58\xf4\x98\x4d\x2a\x39\
\x00\x48\x0d\x0f\xff\x7b\xef\xd0\xd0\x29\x32\x26\xa0\xd9\x6b\x61\
\xbe\x47\x81\xaa\x0b\x08\xa8\x3a\x34\xda\x86\x1c\xa6\x1d\xdb\x42\
\x28\xd3\xc4\x09\x29\xe2\x86\x14\x95\xc9\xa0\xf7\x9f\xdf\xd0\xe0\
\x26\x10\xcb\xbc\xe4\x91\x68\xfa\xd8\xb1\x13\xf8\xa8\xaf\xef\x35\
\xe4\x90\x05\x06\x40\xeb\xfd\xfb\x87\xc7\x83\x81\xe0\xc8\xc9\x53\
\xa7\xed\xce\x7e\xba\x3e\x49\xbb\x21\x8f\xf6\xbc\x64\x37\x98\x69\
\xb0\x5d\x62\xd9\xce\x98\xf3\x34\x11\x43\x32\x4d\xbb\x84\x64\x6c\
\x5c\x47\x90\x58\x7f\x43\x43\x9a\xd6\xf3\x38\x7e\x62\x88\x7a\x42\
\x30\x2f\x5e\xf8\xf8\x12\xb3\x9d\x0b\x15\xdb\x69\x1a\xf1\xff\xe7\
\x44\xa9\xb7\x6c\x63\xad\x6f\x21\x8a\xf2\x1d\x58\xec\xd6\xe0\x4f\
\xba\x89\x0b\x1c\x14\xbf\x45\x4d\x67\x64\xc6\x97\x65\x37\x4d\x24\
\xce\xe1\xf3\x71\x1e\x22\x6d\xd7\x0d\x35\x53\xf0\x28\x1c\xc6\x26\
\x82\xf8\xf3\x5f\xba\xd1\xd9\xb9\x9e\xbb\x7b\x71\xd3\x0b\x1f\x1e\
\xea\xd9\x41\x1a\xc6\x5c\x00\xd8\x65\xc4\x55\x35\x9d\xe7\xca\x1b\
\x0b\x04\x82\xdf\xf1\x2d\x5c\x88\x42\x02\xb1\xd4\x1d\x81\x9b\x26\
\x60\xdc\x74\x61\x3c\xe9\x41\x3c\xe5\x44\x34\xe1\x40\x48\x95\x51\
\x64\xe9\x58\xe1\x89\xe0\xc1\x79\x21\x28\x92\x48\x3d\x30\x89\x7d\
\xfb\xde\x45\xf3\xd2\x7b\x71\xff\xfd\x6b\xe1\x74\xb9\x51\xd7\xd8\
\xbc\xed\xf0\xa1\xfd\xbb\x6e\x07\x22\xbb\x0d\xd9\xd4\x0a\x1e\x3a\
\xd4\xd3\x4d\x44\xc2\x6b\x5a\xf2\xb5\x8e\x8e\x76\x2c\x6f\x5e\x8a\
\xc6\xfc\x38\x1a\x5d\x91\xe9\x3d\x87\xff\x1d\xca\x2c\xaa\x35\xc7\
\xf8\x40\x54\x70\xfa\xcc\x3f\xd0\xd7\xd7\x8f\xbb\xee\x6a\x20\x1a\
\x77\x43\x8d\x27\x50\x57\xdf\x00\x2f\x4e\xa3\xf5\x77\xab\x93\xf7\
\x6d\x3e\xc1\x18\x31\x35\xe7\x81\x84\x24\x8f\x64\x7e\x71\x71\x49\
\xdd\x8f\x9e\x7c\xea\xad\x92\x92\xe2\x9a\x65\xcd\xcd\xa8\xa8\x2c\
\x87\xd7\xeb\x9d\x21\x17\xc6\x13\x81\x40\x00\x9f\x5d\xbd\x86\x33\
\x67\xce\x40\x4b\xa6\xcc\x8a\xca\x0a\x9e\x1d\x62\x24\x62\x52\x5e\
\x74\xe1\xa1\xf6\x25\x28\x94\x3f\x03\x86\xb7\x60\xf0\x28\xd0\xfa\
\x62\xdf\x2d\x41\x70\xb7\xf8\xcd\x38\xbc\x84\x64\xc1\xa2\x45\x0d\
\x0d\x2d\xad\xed\xcf\xba\xdd\xee\xe5\x8c\xd7\x67\x03\x60\x1d\x1e\
\x8d\x46\x4f\xf7\x1d\x39\xbc\xdb\xef\x1f\x99\xfc\xd5\x96\xad\x3d\
\x55\xb5\xb5\x10\x95\x02\xdc\xa3\x9c\xc7\x6a\xf9\x4f\x40\xfb\x7b\
\xc4\x5c\xe4\x77\xf8\x45\x02\x61\xde\x12\xc4\x97\x9d\x8a\xa5\x4c\
\x36\xd8\xb9\xac\x98\xc4\x9d\x29\x57\x76\x3d\x43\xc2\xd8\x28\x4a\
\x12\xca\xf4\xd2\xb7\x7e\xb1\x65\xfb\x40\x4b\x55\x10\x9d\xae\x6e\
\x1a\x56\xa4\x4e\x0c\x89\xf6\x7d\x04\x42\xfa\x52\x10\xdc\x1c\xc7\
\x35\x31\x93\x11\xe5\x16\x83\xc5\xcc\xec\x73\x2d\xb3\x76\x1e\x49\
\xed\x07\xaf\x3c\x30\xd0\xe0\xb9\x86\xea\x42\x93\xa6\xaa\x38\x6d\
\xa1\xfd\x9d\x0c\x88\xcd\x36\x88\xa8\x66\xdc\xf7\xc8\x6f\xfa\x87\
\x58\x20\xb7\x9b\x56\x56\xa6\x39\x59\x94\xec\x7c\x37\xfe\x05\x99\
\xc8\xbc\x4b\x67\x22\x62\xcf\xfc\x8f\x76\x1d\x6e\x3b\x1f\xa9\xc3\
\x78\x44\x43\x22\x61\x4c\xe7\xa9\x7f\x03\xad\xa0\x2f\x8d\x6f\xa0\
\x65\x8d\x82\x12\x8f\x7c\x3c\x3b\xaa\x79\xdc\xb9\x6b\x06\xc4\x0f\
\xba\x0e\xb4\x0d\x4d\x35\x21\x10\x49\xdc\x0c\x22\x4d\x31\xa9\x1a\
\x9b\xfa\xec\x72\xdd\x69\x00\x37\x81\xf8\x61\xd7\x07\x04\x62\x09\
\xfc\x53\x22\x34\x3a\x4b\xda\x4c\x30\x40\x20\xac\x3c\xaa\x1d\x3f\
\x43\x01\x77\x1a\xc0\x17\x40\xec\x6f\xbb\x18\xf1\x21\x10\x8e\x23\
\xad\xd1\x63\x9d\x24\x7e\x83\x76\x11\x9f\xd3\x7f\xc3\xaf\x7b\xc9\
\xd9\xc6\x7c\x6f\xfb\xfa\x01\x36\x35\xd9\x50\xe3\x29\xfe\x27\x5e\
\xee\x59\x45\xcf\xd9\xac\x08\x7d\x93\x00\x66\x83\x28\x27\xc9\xcf\
\xfe\xad\x24\xb9\x96\xc9\x52\xea\x9b\x06\x90\x05\x91\x97\xe1\x96\
\x2c\xed\xab\x59\x2e\xf8\xaf\x00\x03\x00\x13\x97\x41\x1d\x31\x94\
\xc0\x9a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x04\
\x00\x08\x16\x5d\
\x00\x7a\
\x00\x6f\x00\x6f\x00\x6d\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x15\
\x0c\x5f\x27\xc7\
\x00\x6d\
\x00\x61\x00\x67\x00\x6e\x00\x69\x00\x66\x00\x69\x00\x65\x00\x72\x00\x5f\x00\x7a\x00\x6f\x00\x6f\x00\x6d\x00\x5f\x00\x69\x00\x6e\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x0b\x20\x5e\xa7\
\x00\x6d\
\x00\x61\x00\x67\x00\x6e\x00\x69\x00\x66\x00\x69\x00\x65\x00\x72\x00\x5f\x00\x7a\x00\x6f\x00\x6f\x00\x6d\x00\x5f\x00\x6f\x00\x75\
\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0e\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x07\xb7\
\x00\x00\x00\x20\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
from os import name
from utilities import MODULE_CONTEXT, userutils
from db import get_db
from utilities import UserUtils
from .response import post_error
from datetime import datetime, timedelta
from utilities import UserUtils, normalize_bson_to_json
import time
import config
from config import USR_KEY_MONGO_COLLECTION, USR_MONGO_COLLECTION, USR_TEMP_TOKEN_MONGO_COLLECTION
import logging
from utilities import EnumVals
log = logging.getLogger('file')
admin_role_key = config.ADMIN_ROLE_KEY
verify_mail_expiry = config.USER_VERIFY_LINK_EXPIRY
apikey_expiry = config.USER_API_KEY_EXPIRY
class UserAuthenticationModel(object):
def user_login(self,user_email, password=None):
"""User Login."""
try:
user_keys = UserUtils.get_data_from_keybase(user_email,keys=True)
if not user_keys:
user_keys = UserUtils.generate_api_keys(user_email)
if "errorID" in user_keys:
return user_keys
collections = get_db()[USR_MONGO_COLLECTION]
#fetching user data
user_details = collections.find({"email": user_email, "isActive": True,"isVerified":True},{"_id":0,"password":0})
if user_details.count() == 0:
return post_error("Data not valid","Error on fetching user details")
for user in user_details:
return {"userKeys":user_keys,"userDetails": normalize_bson_to_json(user)}
except Exception as e:
log.exception("Database connection exception | {} ".format(str(e)))
return post_error("Database exception", "An error occurred while processing on the database:{}".format(str(e)), None)
def user_logout(self,user_name):
"""User Logout
updating active status to False on user token collection.
"""
try:
#connecting to mongo instance/collection
collections = get_db()[USR_MONGO_COLLECTION]
#fetching user data
record = collections.find({"user": user_name, "active": True})
if record.count() == 0:
return False
if record.count() != 0:
for user in record:
#updating status = False for user token collection
collections.update(user, {"$set": {"active": False, "end_time": eval(
str(time.time()).replace('.', '')[0:13])}})
log.info("Updated database record on user log out for {}".format(user_name), MODULE_CONTEXT)
return True
except Exception as e:
log.exception("Database connection exception ", MODULE_CONTEXT, e)
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
def key_search(self,key):
"""Token search for user details"""
try:
log.info("searching for the user, using api-key")
result = UserUtils.get_data_from_keybase(key,email=True)
if "errorID" in result:
return result
email = result["email"]
collections = get_db()[USR_MONGO_COLLECTION]
user = collections.find({"email":email,"isVerified":True,"isActive":True},{"password":0,"_id":0})
if user.count() == 0:
log.info("No user records found in db matching email: {}".format(email))
return post_error("Invalid data", "Data received on request is not valid", None)
for record in user:
record["privateKey"] = result["privateKey"]
return normalize_bson_to_json(record)
except Exception as e:
log.exception("Database connection exception | {}".format(str(e)))
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
def forgot_password(self,user_email):
"""Generaing forgot password notification"""
#connecting to mongo instance/collection
user_collection = get_db()[USR_MONGO_COLLECTION]
key_collection = get_db()[USR_KEY_MONGO_COLLECTION]
token_collection = get_db()[USR_TEMP_TOKEN_MONGO_COLLECTION]
user_record = user_collection.find({"email":user_email})
name = user_record[0]["firstName"]
record = token_collection.find({"email":user_email})
#removing previous records if any
if record.count() != 0:
key_collection.remove({"email":user_email})
token_collection.remove({"email":user_email})
user_keys = UserUtils.get_data_from_keybase(user_email,keys=True)
if not user_keys:
user_keys = UserUtils.generate_api_keys(user_email)
if "errorID" in user_keys:
return user_keys
user_keys["createdOn"] = datetime.utcnow()
#inserting new id generated onto temporary token collection
token_collection.insert(user_keys)
#generating email notification
result = UserUtils.generate_email_notification([{"email":user_email,"pubKey":user_keys["publicKey"],"pvtKey":user_keys["privateKey"],"name":name}],EnumVals.ForgotPwdTaskId.value)
if result is not None:
return result
return True
def reset_password(self,user_id,user_email,password):
"""Resetting password
an active user can reset their own password,
admin can reset password of any active users.
"""
#generating password hash
hashed = UserUtils.hash_password(password).decode("utf-8")
try:
#connecting to mongo instance/collection
collections = get_db()[USR_MONGO_COLLECTION]
key_collection = get_db()[USR_KEY_MONGO_COLLECTION]
temp_collection = get_db()[USR_TEMP_TOKEN_MONGO_COLLECTION]
#searching for valid record matching given user_id
record = collections.find({"userID": user_id})
if record.count() != 0:
log.info("Record found matching the userID {}".format(user_id), MODULE_CONTEXT)
for user in record:
#fetching the user roles
roles=user["roles"]
#fetching user name
email=user["email"]
#verifying the requested person, both admin and user can reset password
if (admin_role_key in roles) or (email == user_email):
log.info("Reset password request is checked against role permission and username")
results = collections.update({"email":user_email,"isActive":True}, {"$set": {"password": hashed}})
if 'writeError' in list(results.keys()):
return post_error("Database error", "writeError while updating record", None)
# removing temp API keys from user record
temp_collection.remove({"email":user_email})
# removing API keys from user record
key_collection.remove({"email":user_email})
return True
else:
log.info("No record found matching the userID {}".format(user_id), MODULE_CONTEXT)
return post_error("Data Not valid","Invalid user details",None)
except Exception as e:
log.exception("Database exception ", MODULE_CONTEXT, e)
return post_error("Database exception", "Exception:{}".format(str(e)), None)
def verify_user(self,user_email,user_id):
"""User verification and activation."""
try:
name = None
#connecting to mongo instance/collection
collections = get_db()[USR_MONGO_COLLECTION]
#checking for pre-verified records on the same username
primary_record= collections.find({"email": user_email,"isVerified": True})
if primary_record.count()!=0:
log.info("{} is already a verified user".format(user_email))
return post_error("Not allowed","Your email has already been verified",None)
#fetching user record matching userName and userID
record = collections.find({"email": user_email,"userID":user_id})
if record.count() ==1:
for user in record:
register_time = user["registeredTime"]
name = user["firstName"]
#checking whether verfication link had expired or not
if (datetime.utcnow() - register_time) > timedelta(hours=verify_mail_expiry):
log.exception("Verification link expired for {}".format(user_email))
#Removing previous record from db
collections.delete_many({"email": user_email,"userID":user_id})
return post_error("Data Not valid","Verification link expired. Please sign up again.",None)
results = collections.update(user, {"$set": {"isVerified": True,"isActive": True,"activatedTime": datetime.utcnow()}})
if 'writeError' in list(results.keys()):
return post_error("Database error", "writeError whie updating record", None)
log.info("Record updated for {}, activation & verification statuses are set to True".format(user_email))
else:
log.exception("No proper database records found for activation of {}".format(user_email))
return post_error("Data Not valid","No records matching the given parameters ",None)
#Generating API Keys
new_keys = UserUtils.generate_api_keys(user_email)
#email notification for registered users
user_notified=UserUtils.generate_email_notification([{"email":user_email,"name":name}],EnumVals.ConfirmationTaskId.value)
if user_notified is not None:
return user_notified
return new_keys
except Exception as e:
log.exception("Database exception ")
return post_error("Database exception", "Exception:{}".format(str(e)), None)
def activate_deactivate_user(self,user_email,status,from_id):
""""Resetting activation status of verified users"""
try:
#connecting to mongo instance/collection
collections = get_db()[USR_MONGO_COLLECTION]
#searching for a verified account for given username
record = collections.find({"email": user_email,"isVerified":True})
if record.count()==0:
log.info("{} is not a verified user".format(user_email), MODULE_CONTEXT)
return post_error("Data Not valid","Not a verified user",None)
if record.count() ==1:
for user in record:
#validating the org where user belongs to
# validity=OrgUtils.validate_org(user["orgID"])
# if validity is not None:
# log.info("{} belongs to an inactive org {}, hence operation failed".format(user_email,user["orgID"]), MODULE_CONTEXT)
# return validity
if user["userID"] == from_id:
log.info("Self activation/deactivation not allowed")
return post_error("Invalid Request", "You are not allowed to change your status", None)
#updating active status on database
results = collections.update(user, {"$set": {"isActive": status}})
if 'writeError' in list(results.keys()):
log.info("Status updation on database failed due to writeError")
return post_error("DB error", "Something went wrong, please try again", None)
log.info("Status updation on database successful")
else:
return post_error("Data Not valid","Something went wrong, please try again",None)
except Exception as e:
log.exception(f"Database exception : {e}")
return post_error("Database exception", "Something went wrong, please try again", None)
def token_search(self,token):
"""Token search for user details"""
try:
log.info("searching for the keys")
collections = get_db()[USR_TEMP_TOKEN_MONGO_COLLECTION]
user = collections.find({"publicKey":token})
if user.count() == 0:
log.info("Token has expired")
return {"active": False}
return {"active": True}
except Exception as e:
log.exception("Database connection exception | {}".format(str(e)))
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-27 00:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0026_merge_20161127_0003'),
]
operations = [
migrations.RemoveField(
model_name='recipe',
name='image',
),
migrations.AlterField(
model_name='step',
name='description',
field=models.CharField(max_length=1000),
),
]
|
"""Read hosts from LSF."""
import re
from subprocess import Popen, PIPE, check_output
def parseval(val):
"""Parse a value that could be int, float, % or contain a memory unit."""
if val == "-":
return None
if re.match("\d+$", val):
return int(val)
if re.match("\d+(.\d+)?([eE][+-]\d+)?$", val):
return float(val)
if re.match("\d+(.\d+)?%$", val):
return 100 * float(val[:-1])
if re.match("\d+(.\d+)?[KMGT]$", val):
e = {"K": 1, "M": 2, "G": 3, "T": 4}[val[-1]]
return int(float(val[:-1]) * 1024 ** e)
return val
def readhosts(args, fast=False):
"""Read hosts from LSF."""
# read bhosts for dynamic information
p = Popen(["bhosts", "-l"] + args, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if err:
return []
lines = out.splitlines()
lines.reverse()
hostorder = []
hosts = {}
host = None
stage = None
while lines:
tokens = lines.pop().split()
if not tokens: # ignore empty lines
continue
if tokens[0] == "HOST":
if host:
hostorder.append(host["host_name"])
hosts[host["host_name"]] = host
host = {
"host_name": tokens[1],
"load": {},
"threshold": {},
"comment": None,
"affinity": None,
}
stage = None
elif tokens[0] == "STATUS":
keys = [token.lower() for token in tokens]
try:
vals = lines.pop().split()
for key, val in zip(keys, vals):
host[key] = parseval(val)
except:
pass
elif tokens[0] == "CURRENT":
stage = "load"
elif tokens[0] == "LOAD":
stage = "threshold"
elif tokens[0] == "ADMIN":
host["comment"] = " ".join(tokens[3:])[1:-1]
elif tokens[0] == "CONFIGURED":
host["affinity"] = " ".join(tokens[4:])
elif stage in ("load", "threshold"):
keys = tokens
try:
total = map(parseval, lines.pop().split()[1:])
used = map(parseval, lines.pop().split()[1:])
new = {k: v for k, v in zip(keys, zip(total, used))}
host[stage].update(new)
except:
pass
hostorder.append(host["host_name"])
hosts[host["host_name"]] = host
if fast:
return [hosts[hn] for hn in hostorder]
# read lshosts for static information
out = check_output(["lshosts", "-w"] + hostorder)
lines = out.splitlines()
keys = lines[0].lower().split()
for line in lines[1:]:
vals = line.split()
host = hosts[vals[0]]
for key, val in zip(keys[1:], vals[1:]):
host[key] = parseval(val)
if key in ("server"):
host[key] = val == "Yes"
resources = vals[len(keys) - 1:]
resources[0] = resources[0][1:] # get rid of ()
resources[-1] = resources[-1][:-1]
host[keys[-1]] = resources
return [hosts[hn] for hn in hostorder]
|
import factory
from .models import DelStavbe
from .models import Stavba
from .models import Skupina
from .models import Podskupina
from .models import ProjektnoMesto
class StavbaFactory(factory.DjangoModelFactory):
class Meta:
model = Stavba
oznaka = factory.Sequence(lambda n: '{0}'.format(n))
naziv = factory.Sequence(lambda n: 'Stavba {0}'.format(n))
class SkupinaFactory(factory.DjangoModelFactory):
class Meta:
model = Skupina
oznaka = factory.Sequence(lambda n: '{0}'.format(n))
naziv = factory.Sequence(lambda n: 'Skupina {0}'.format(n))
class PodskupinaFactory(factory.DjangoModelFactory):
class Meta:
model = Podskupina
oznaka = factory.Sequence(lambda n: '{0}'.format(n))
naziv = factory.Sequence(lambda n: 'Podskupina {0}'.format(n))
skupina = factory.SubFactory(SkupinaFactory)
class DelStavbeFactory(factory.DjangoModelFactory):
class Meta:
model = DelStavbe
oznaka = factory.Sequence(lambda n: '{0}'.format(n))
naziv = factory.Sequence(lambda n: 'Podskupina {0}'.format(n))
podskupina = factory.SubFactory(PodskupinaFactory)
class ProjektnoMestoFactory(factory.DjangoModelFactory):
class Meta:
model = ProjektnoMesto
oznaka = factory.Sequence(lambda n: '{0}'.format(n))
naziv = factory.Sequence(lambda n: 'Podskupina {0}'.format(n))
del_stavbe = factory.SubFactory(DelStavbeFactory)
|
from sst.actions import *
go_to('http://seleniumhq.org/')
assert_title('Selenium - Web Browser Automation')
|
from basicobject import BasicObject, BasicObjectEncoder
from odinconfigparser import OdinConfigParser
from asthelpers import AsteriskHelper
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Common module for the interaction with OSI OCS (OSIsoft Cloud Services) """
import requests
import json
__author__ = "Stefano Simonelli"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
OCS_URL = "https://qi-data.osisoft.com"
def retrieve_authentication_token(tenant, client_id, client_secret,):
"""" Retrieves from OCS the authentication token for the requested tenant/client """
url = 'https://login.windows.net/{0}/oauth2/token'.format(tenant)
authorization = requests.post(
url,
data={
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
'resource': 'https://qihomeprod.onmicrosoft.com/ocsapi'
}
)
header = {
'Authorization': 'bearer %s' % authorization.json()['access_token'],
'Content-type': 'application/json',
'Accept': 'text/plain'
}
return header
def delete_object(headers, tenant, namespace, _object):
"""" Deletes an OCS object, used against Types and Streams """
tenant_url = "/api/Tenants/{}".format(tenant)
api_url = "/Namespaces/{0}/{1}".format(namespace, _object)
url = OCS_URL + tenant_url + api_url
response = requests.delete(url, headers=headers)
print('--- Deleted {} -----------------------------------------'.format(api_url))
print('\nExit code: |{0}| \n\nText: |{1}| \n\nUrl: |{2}| '.format(
response.status_code,
response.text,
response.url,
))
return response.text
def delete_object_type(headers, tenant, namespace, _type):
"""" Deletes all the items of a type, used for deleting Streams or/and Types """
# Retrieves the list of objects to delete
objects_list = call_api(headers, tenant, namespace, _type)
if objects_list != "[]":
# the translations are needed to being able to convert the string into a dict
objects_list = objects_list.replace(": true", ": 1")
objects_list = objects_list.replace(": false", ": 0")
object_list_dict = eval(objects_list)
print("\n Number of elements : namespace |{0}| - type |{1}| - N |{2}|".format(namespace,
_type,
len(object_list_dict)))
for item in object_list_dict:
type_to_del = item['Id']
print("to delete |{}|".format(type_to_del))
api = "{0}/{1}".format(_type, type_to_del)
delete_object(headers, tenant, namespace, api)
def delete_types_streams(headers, tenant, namespace):
"""" Deletes all the types and streams in the provided namespace
WARNING: it deletes all the information in the namespace
"""
delete_object_type(headers, tenant, namespace, "Streams")
delete_object_type(headers, tenant, namespace, "Types")
def call_api(headers, tenant, name_space, api):
"""" Calls (read operation) an OCS api and returns a string representing the JSON response """
tenant_url = "/api/Tenants/{}".format(tenant)
api_url = "/Namespaces/{0}/{1}".format(name_space, api)
url = OCS_URL + tenant_url + api_url
response = requests.get(url, headers=headers)
api_output = response.json()
api_output_str = json.dumps(api_output)
return api_output_str
def get_values_stream(headers, tenant, namespace, ocs_stream, start_timestamp, values_count):
"""" Retrieves N values for a specific asset code """
api_url = "Streams/{0}/Data/GetRangeValues?" \
"startIndex={1}"\
"&count={2}"\
.format(ocs_stream,
start_timestamp,
values_count)
api_output_str = call_api(headers, tenant, namespace, api_url)
return api_output_str
|
# -*- coding: utf-8 -*-
class EscapeAlEscape(object):
def solve(self, input_log):
all_ones = all([bool(int(i)) for i in input_log])
all_zeroes = all([not bool(int(i)) for i in input_log])
if all_ones or all_zeroes:
sep_long = 1
else:
sep_long = ((len(input_log) // 2) // 2) + 1
return sep_long
|
# Generated by Django 2.1.2 on 2018-11-04 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('game_catalog', '0010_auto_20181104_1036'),
]
operations = [
migrations.RemoveField(
model_name='publishedgame',
name='edition',
),
migrations.RemoveField(
model_name='publishedgame',
name='game_system',
),
migrations.RemoveField(
model_name='publishedgame',
name='isbn',
),
migrations.RemoveField(
model_name='publishedgame',
name='publisher',
),
]
|
'''
import random
## random choice will choose 1 option from the list
randnum = random.choice(['True', 'False'])
print(randnum)
'''
class Enemy:
def __init__(self):
pass
self.health = health
self.attack = attack
def health(self):
pass
def attack(self):
pass
class Player(Enemy):
def health(self):
pass
def attack(self):
pass
class Action():
def __init__(self):
pass
## compare enemy and player attack stats
def fight(self):
pass
## make the w key call a class
fight = input ("w to fight ")
if fight == "w":
print(fight)
#Action.fight()
else:
print("else")
#else action
|
Subsets and Splits