id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
458325
|
from vedo import Latex, show
from vedo.pyplot import histogram
import numpy as np
N = 2000
x = np.random.randn(N) * 1.0
y = np.random.randn(N) * 1.5
# hexagonal binned histogram:
histo = histogram(x, y,
bins=10,
mode='hexbin',
xtitle="\sigma_x =1.0",
ytitle="\sigma_y =1.5",
ztitle="counts",
fill=True,
cmap='terrain',
)
# add a formula:
f = r'f(x, y)=A \exp \left(-\left(\frac{\left(x-x_{o}\right)^{2}}'
f+= r'{2 \sigma_{x}^{2}}+\frac{\left(y-y_{o}\right)^{2}}'
f+= r'{2 \sigma_{y}^{2}}\right)\right)'
formula = Latex(f, c='k', s=1.5).rotateX(90).rotateZ(90).pos(1.5,-2,1)
show(histo, formula, axes=1, viewup='z')
|
458333
|
from conda_forge_tick.path_lengths import get_levels, nx
def test_get_levels():
g = nx.DiGraph(
[
("a", "d"),
("b", "d"),
("b", "e"),
("c", "e"),
("c", "h"),
("d", "f"),
("d", "g"),
("d", "h"),
("e", "g"),
],
)
levels = {0: {"a"}, 1: {"d"}, 2: {"f", "g", "h"}}
assert get_levels(g, "a") == levels
g.add_edges_from([("a", "b"), ("e", "a")])
levels = {0: {"a"}, 1: {"b"}, 2: {"d", "e"}, 3: {"f", "g", "h"}}
assert get_levels(g, "a") == levels
g.add_edge("d", "c")
levels = {0: {"a"}, 1: {"b"}, 2: {"d"}, 3: {"c", "f"}, 4: {"e", "h"}, 5: {"g"}}
assert get_levels(g, "a") == levels
|
458337
|
from numpy import ones
#$ header class Point(public)
#$ header method __init__(Point, [double])
#$ header method __del__(Point)
#$ header method translate(Point, [double])
#$ header class Points(public)
#$ header method __init__(Points, Point)
#$ header method __del__(Points)
class Point(object):
def __init__(self, x):
self.x = x
def __del__(self):
pass
def translate(self, a):
self.x = self.x + a
class Points(object):
def __init__(self, x):
self.x = x
def __del__(self):
pass
x = [1., 1., 1.]
P1 = Point(x)
P2 = Points(P1)
P3 = P2.x
P4 = P2
P5 = P2.x.x
print x,P5
|
458338
|
from django.db.models.utils import create_namedtuple_class
from django.test import SimpleTestCase
class NamedTupleClassTests(SimpleTestCase):
def test_immutability(self):
row_class = create_namedtuple_class('field1', 'field2')
row = row_class('value1', 'value2')
with self.assertRaises(AttributeError):
row.field3 = 'value3'
|
458341
|
import argparse
import json
import logging
import os
from os import listdir
from os.path import isfile, join
from collections import Counter
from nlp.data import load_text_file
from nlp.preprocessing import prepareText, frequencies
from echr.utils.folders import make_build_folder
from echr.utils.logger import getlogger
from echr.utils.cli import TAB
from echr.utils.config import config
from rich.markdown import Markdown
from rich.console import Console
from rich.progress import (
Progress,
BarColumn,
TimeRemainingColumn,
)
log = getlogger()
__console = Console(record=True)
def normalized_step(tokens, path='./', force=False, lemmatization=True):
"""
Normalize the tokens
:param tokens: list of strings
:type tokens: [str]
:param path: path to write the output in
:type path: str
:return: normalized tokens
:rtype: [str]
"""
normalized_tokens = prepareText(tokens, lemmatization)
normalized_tokens = [t[0] for t in normalized_tokens]
# print('normalized_tokens', normalized_tokens)
return normalized_tokens
def ngram_step(original_tokens, freq=None, path='./', force=False):
"""
Calculate the ngrams
:param original_tokens: list of tokens
:type original_tokens: [[str]]
:param freq: rules to extract and filter ngrams
:type freq: dict
:param path: path to write the output in
:type path: str
:return: dictionary of ngrams indexed by n
:rtype: dict
"""
if freq is None:
logging.info('No configuration specified, uses the default one')
freq = {1: 1, 2: 1, 3: 1, 4: 1}
for k in freq:
output_file = 'tokens_{}grams.txt'.format(k)
p = os.path.join(path, output_file)
if not force:
if os.path.isfile(p):
raise Exception("The file {} already exists!".format(p))
allgrams = frequencies(original_tokens, n=len(freq), minlimits=freq)
return allgrams
def run(console, build, title, force=False, update=False):
__console = console
global print
print = __console.print
print(Markdown("- **Step configuration**"))
input_folder = os.path.join(build, 'raw', 'preprocessed_documents')
output_folder = os.path.join(build, 'raw', 'normalized_documents')
ngrams_config = {}
try:
ngrams_config = config()['steps']['normalize']['ngrams']
except Exception as e:
print('Cannot retrieve n-grams configuration. Details: {}'.format(e))
exit(5)
print(TAB + '> Step folder: {}'.format(output_folder))
make_build_folder(console, output_folder, force, strict=False)
files = sorted([os.path.join(input_folder, f) for f in listdir(input_folder) if isfile(join(input_folder, f)) if
'_text_without_conclusion.txt' in f])
raw_corpus = []
corpus_id = []
print(Markdown('- **Load documents**'))
with Progress(
TAB + "> Loading in memory... [IN PROGRESS]",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Loading...", total=len(files), error="",
doc=files[0].split('/')[-1].split('_text_without_conclusion.txt')[0])
for i, p in enumerate(files):
error = ""
doc_id = p.split('/')[-1].split('_text_without_conclusion.txt')[0]
try:
raw_corpus.append(load_text_file(p))
corpus_id.append(doc_id)
except Exception as e:
error = '\n| {}'.format('Could not load the document')
log.debug(p, e)
progress.update(task, advance=1, error=error, doc=doc_id)
print(TAB + "> Loading in memory... [green][DONE]")
normalized_tokens = []
print(Markdown('- **Generate language model**'))
try:
with Progress(
TAB + "> Normalize... [IN PROGRESS]\n",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Compute tokens...", total=len(raw_corpus), error="", doc=corpus_id[0])
for i, doc in enumerate(raw_corpus):
filename = os.path.join(output_folder, '{}_normalized.txt'.format(corpus_id[i]))
if not update or not os.path.isfile(filename):
normalized_tokens.append(normalized_step(doc, force=force, lemmatization=True))
else:
with open(filename, 'r') as f:
normalized_tokens.extend(f.read().split())
f.close()
progress.update(task, advance=1, error=error, doc=corpus_id[i])
except Exception as e:
print(TAB + '[bold red]:double_exclamation_mark: Could not normalized the tokens. Details: {}'.format(e))
exit(40)
print(TAB + "> Normalize... [green][DONE]")
all_grams = []
doc_grammed = []
try:
with Progress(
TAB + "> Compute ngrams... [IN PROGRESS]\n",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Compute tokens...", total=len(corpus_id), error="", doc=corpus_id[0])
for i, doc in enumerate(normalized_tokens):
error = ""
filename = os.path.join(output_folder, '{}_normalized.txt'.format(corpus_id[i]))
if not update or not os.path.isfile(filename):
grams = ngram_step(doc, ngrams_config, force=force)
merged = []
for g in grams.values():
merged.extend(g)
doc_grammed.append(merged)
all_grams.extend(merged)
else:
error = "\n| Load document as already normalized."
with open(filename, 'r') as f:
all_grams.extend(f.read().split())
doc_grammed.append(None)
f.close()
progress.update(task, advance=1, error=error, doc=corpus_id[i])
except Exception:
console.print_exception()
print(TAB + "> Compute ngrams... [green][DONE]")
f = Counter(all_grams)
with open(os.path.join(output_folder, 'full_dictionary.txt'), 'w') as outfile:
json.dump(f, outfile, indent=4, sort_keys=True)
print(TAB + '> Save the full dictionary [green][DONE]')
with Progress(
TAB + "> Save normalized documents... [IN PROGRESS]\n",
BarColumn(30),
TimeRemainingColumn(),
"| Document [blue]{task.fields[doc]} [white]({task.completed}/{task.total})"
"{task.fields[error]}",
transient=True,
console=console
) as progress:
task = progress.add_task("Compute tokens...", total=len(doc_grammed), error="", doc=corpus_id[0])
for i, doc in enumerate(doc_grammed):
if doc is not None:
with open(os.path.join(output_folder, '{}_normalized.txt'.format(corpus_id[i])), 'a') as file:
file.write(' '.join(doc))
progress.update(task, advance=1, error=error, doc=corpus_id[i])
print(TAB + '> Save normalized documents... [green][DONE]')
def main(args):
console = Console(record=True)
run(console, args.build, args.title, args.force, args.u)
def parse_args(parser):
args = parser.parse_args()
# Check path
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Turn a collection of documents into a BoW and TF-IDF representation.')
parser.add_argument('--build', type=str, default="./build/echr_database/")
parser.add_argument('--title', type=str)
parser.add_argument('-f', action='store_true')
parser.add_argument('-u', action='store_true')
args = parse_args(parser)
main(args)
|
458365
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetDimensionReferences(item):
if hasattr(item, "References"):
return [item.Document.GetElement(x.ElementId) for x in item.References]
else: return None
dimensions = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetDimensionReferences(x) for x in dimensions]
else: OUT = GetDimensionReferences(dimensions)
|
458372
|
from torchnlp.nn.attention import Attention
from torchnlp.nn.lock_dropout import LockedDropout
from torchnlp.nn.weight_drop import WeightDropGRU
from torchnlp.nn.weight_drop import WeightDropLSTM
from torchnlp.nn.weight_drop import WeightDropLinear
from torchnlp.nn.weight_drop import WeightDrop
from torchnlp.nn.cnn_encoder import CNNEncoder
__all__ = [
'LockedDropout',
'Attention',
'CNNEncoder',
'WeightDrop',
'WeightDropGRU',
'WeightDropLSTM',
'WeightDropLinear',
]
|
458412
|
class Solution(object):
def removeInterval(self, intervals, toBeRemoved):
"""
:type intervals: List[List[int]]
:type toBeRemoved: List[int]
:rtype: List[List[int]]
"""
removeStart, removeEnd = toBeRemoved
output = []
for start, end in intervals:
if end <= removeStart or start >= removeEnd:
output.append([start, end])
elif start < removeStart and end > removeEnd:
output.append([start, removeStart])
output.append([removeEnd, end])
elif start < removeStart and end <= removeEnd:
output.append([start, removeStart])
elif start >= removeStart and end > removeEnd:
output.append([removeEnd, end])
return output
|
458448
|
import argparse
import csv
import os
from pprint import pprint
ERROR_STRINGS = {
'uniqueness_error': '"{0} must be unique.".format(fieldname)',
'required_error': '"Required field {0} is missing.".format(fieldname)',
'type_error': '"{0} must be of type {1}.".format(fieldname, data_type)',
'len_error': ('"{0} must be no more than {1} characters.".format'
'(fieldname, length)')
}
TAS_KEY_IDENTIFIERS = [
'AllocationTransferRecipientAgencyId',
'AgencyIdentifier',
'BeginningPeriodOfAvailability',
'EndingPeriodOfAvailability',
'AvailabilityTypeCode',
'MainAccountCode'
]
KEY_IDENTIFIERS = {
'appropriation': [],
'object_class_program_activity': [
'ProgramActivityCode',
'ObjectClass'
],
'award': [
'FainAwardNumber'
],
'award_financial': [
'ObjectClass',
'FainAwardNumber'
]
}
class Validator(object):
'''A validator which will read in 4 csv files and
perform various validations'''
def __init__(self,
appropriation_file,
object_class_file,
award_financial_file,
award_file,
rules_dir):
self.appropriations = self.load_data(appropriation_file)
self.object_class = self.load_data(object_class_file)
self.award_financial = self.load_data(award_financial_file)
self.award = self.load_data(award_file)
self.appropriations_rules = self.load_simple_rules(rules_dir +
'appropriation_rules.csv')
self.object_class_rules = self.load_simple_rules(rules_dir +
'object_class_program_activity_rules.csv')
self.award_financial_rules = self.load_simple_rules(rules_dir +
'award_financial_rules.csv')
self.award_rules = self.load_simple_rules(rules_dir + 'award_rules.csv')
self.results = self.validate_submission()
def load_data(self, dataframe):
'''Loads data from submitted agency CSV
Expects:
dataframe: dataframe of the agency-submitted CSV
Outputs:
A list of dicts'''
if dataframe is None:
return ''
return dataframe.to_dict('records')
def load_simple_rules(self, rules_file):
base = os.path.dirname(__file__)
filepath = filename = os.path.join(base, rules_file)
rules_dict = {}
rules = csv.DictReader(open(filepath, 'rU'))
for row in rules:
rules_dict[row['fieldname']] = row
return rules_dict
def check_required(self, required, value):
if not eval(required) or value:
return True
elif value == 0:
return True
def check_data_type(self, data_type, value):
try:
eval(data_type)(value)
return True
except ValueError:
return False
def check_length(self, length, value):
if len(str(value)) <= int(length):
return True
def check_unique(self, value):
''' TODO: micahsaul'''
pass
def generate_error(self,
error_type,
fieldname,
submitted_value,
hard_fail=True,
data_type='',
length=''):
result = {}
result['error_type'] = error_type
result['hard_fail'] = hard_fail
result['fieldname'] = fieldname
result['value'] = submitted_value
result['error_string'] = eval(ERROR_STRINGS[error_type])
return result
def build_tas(self, data):
#key = ''
#for field in TAS_KEY_IDENTIFIERS:
# if data[field]:
# key += str(data[field]) + '-'
#return key[:-1]
return "" # just return blank string since TAS fields changed
def build_key(self, data, fields):
keys = {}
for field in fields:
is_key = data.get(field,None)
if is_key:
keys[field] = data[field]
return keys
def validate_row(self, row, rules):
'''Runs a set of simple validation rules against submitted data.
Expects:
data: A single dict from the list produced by load_data()
rules: A dict of simple validation rules from load_simple_rules
Outputs:
A list of validation errors and metadata'''
results = []
for field in row:
if field in rules:
rule = rules[field]
if not self.check_required(rule['required'], row[field]):
error = self.generate_error('required_error', field, 'n/a')
results.append(error)
continue
if row[field]:
if not self.check_data_type(rule['data_type'], row[field]):
error = self.generate_error('type_error',
field,
row[field],
data_type=rule['data_type'])
results.append(error)
if not self.check_length(rule['field_length'], row[field]):
error = self.generate_error('len_error',
field,
row[field],
length=rule['field_length'])
results.append(error)
else:
#for the prototype, ignore extra columns on submitted csvs
pass
return results
def validate_file(self, filename, data, rules):
'''Runs all validations on a single file.
Expects:
filename: The type of file being validated.
data: A list of dicts produced by load_data()
rules: A dict of simple validation rules from load_simple_rules()
Outputs:
A dict, keyed by a docname + row number, containing errors and
original data.
'''
results = {}
row_count = 0
for row in data:
row_count += 1
errors = self.validate_row(row, rules)
if errors:
row_id = filename + '_row' + str(row_count)
result = {}
result['errors'] = errors
result['data'] = row
if filename != 'award':
result['tas_identifier'] = self.build_tas(row)
result['identifiers'] = self.build_key(
row, KEY_IDENTIFIERS[filename])
results[row_id] = result
return results
def validate_submission(self):
results = []
results.append(self.validate_file('appropriations',
self.appropriations,
self.appropriations_rules))
results.append(self.validate_file('object_class',
self.object_class,
self.object_class_rules))
results.append(self.validate_file('award_financial',
self.award_financial,
self.award_financial_rules))
results.append(self.validate_file('award',
self.award,
self.award_rules))
return results
class ValidatorSingle(Validator):
def __init__(self,
file_dataframe,
file_template,
rules_dir):
self.file_data = self.load_data(file_dataframe)
self.file_template = file_template
self.rules = self.load_simple_rules(rules_dir +
file_template[:-4] + '_rules' + file_template[-4:])
self.results = []
self.results.append(self.validate_file(file_template[:-4],
self.file_data,
self.rules))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=(
'Validates the four CSV files in the DATA Act Agency submission'))
parser.add_argument('appropriation', help="Appropriation CSV")
parser.add_argument('object_class', help="Object Class CSV")
parser.add_argument('award_financial', help="Award Financial CSV")
parser.add_argument('award', help="Award CSV")
parser.add_argument('rules_dir', help="Path to rules directory")
args = parser.parse_args()
validator = Validator(args.appropriation,
args.object_class,
args.award_financial,
args.award,
args.rules_dir
)
pprint(validator.results)
|
458466
|
import argparse
from collections import defaultdict
import cv2
from PIL import Image, ImageDraw
import numpy as np
import torch
from models import load_model_from_path
from utils import coerce_to_path_and_create_dir, coerce_to_path_and_check_exist, get_files_from_dir
from utils.constant import (CONTEXT_BACKGROUND_COLOR, ILLUSTRATION_LABEL, TEXT_LABEL, LABEL_TO_COLOR_MAPPING,
MODEL_FILE, SEG_GROUND_TRUTH_FMT)
from utils.image import resize, LabeledArray2Image
from utils.logger import get_logger, print_info, print_error, print_warning
from utils.metrics import RunningMetrics
from utils.path import MODELS_PATH
VALID_EXTENSIONS = ['jpeg', 'JPEG', 'jpg', 'JPG', 'pdf', 'tiff']
GT_COLOR = (0, 255, 0)
LABEL_TO_DOCUMENT_CC_AREA_RATIO_THRESHOLD = {
ILLUSTRATION_LABEL: 0.005,
TEXT_LABEL: 0.0001,
}
class Evaluator:
"""Pipeline to evaluate a given trained segmentation NN model on a given input_dir"""
def __init__(self, input_dir, output_dir, tag="default", seg_fmt=SEG_GROUND_TRUTH_FMT, labels_to_eval=None,
save_annotations=True, labels_to_annot=None, predict_bbox=False, verbose=True):
self.input_dir = coerce_to_path_and_check_exist(input_dir).absolute()
self.files = get_files_from_dir(self.input_dir, valid_extensions=VALID_EXTENSIONS, recursive=True, sort=True)
self.output_dir = coerce_to_path_and_create_dir(output_dir).absolute()
self.seg_fmt = seg_fmt
self.logger = get_logger(self.output_dir, name='evaluator')
model_path = coerce_to_path_and_check_exist(MODELS_PATH / tag / MODEL_FILE)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model, (self.img_size, restricted_labels, self.normalize) = load_model_from_path(
model_path, device=self.device, attributes_to_return=['train_resolution', 'restricted_labels', 'normalize'])
self.model.eval()
self.restricted_labels = sorted(restricted_labels)
self.labels_to_eval = [ILLUSTRATION_LABEL] if labels_to_eval is None else sorted(labels_to_eval)
self.labels_to_rm = set(self.restricted_labels).difference(self.labels_to_eval)
assert len(set(self.labels_to_eval).intersection(self.restricted_labels)) == len(self.labels_to_eval)
self.restricted_colors = [LABEL_TO_COLOR_MAPPING[l] for l in self.restricted_labels]
self.label_idx_color_mapping = {self.restricted_labels.index(l) + 1: c
for l, c in zip(self.restricted_labels, self.restricted_colors)}
self.color_label_idx_mapping = {c: l for l, c in self.label_idx_color_mapping.items()}
self.metrics = defaultdict(lambda : RunningMetrics(self.restricted_labels, self.labels_to_eval))
self.save_annotations = save_annotations
self.labels_to_annot = labels_to_annot or self.labels_to_eval
self.predict_bbox = predict_bbox
self.verbose = verbose
self.print_and_log_info('Output dir: {}'.format(self.output_dir.absolute()))
self.print_and_log_info('Evaluator initialised with kwargs {}'.format(
{'labels_to_eval': self.labels_to_eval, 'save_annotations': save_annotations}))
self.print_and_log_info('Model tag: {}'.format(model_path.parent.name))
self.print_and_log_info('Model characteristics: train_resolution={}, restricted_labels={}'
.format(self.img_size, self.restricted_labels))
self.print_and_log_info('Found {} input files to process'.format(len(self.files)))
def print_and_log_info(self, string):
self.logger.info(string)
if self.verbose:
print_info(string)
def print_and_log_error(self, string):
self.logger.error(string)
if self.verbose:
print_error(string)
def print_and_log_warning(self, string):
self.logger.warning(string)
if self.verbose:
print_warning(string)
def run(self):
for filename in self.files:
self.print_and_log_info('Processing {}'.format(filename.relative_to(self.input_dir)))
label_file = filename.parent / self.seg_fmt.format(filename.stem, 'png')
dir_path = filename.parent.relative_to(self.input_dir)
img = Image.open(filename).convert('RGB')
pred = self.predict(img)
if not label_file.exists():
self.print_and_log_warning('Ground truth not found')
gt = None
else:
if Image.open(label_file).size == img.size:
gt = self.encode_segmap(Image.open(label_file))
self.metrics[str(dir_path)].update(gt, pred)
else:
self.print_and_log_error(filename.relative_to(self.input_dir))
if self.save_annotations:
output_path = self.output_dir / dir_path
output_path.mkdir(exist_ok=True)
pred_img = LabeledArray2Image.convert(pred, self.label_idx_color_mapping)
mask = Image.fromarray((np.array(pred_img) == (0, 0, 0)).all(axis=-1).astype(np.uint8) * 127 + 128)
blend_img = Image.composite(img, pred_img, mask)
empty_pred, empty_gt = np.all(pred == 0), True
lw = int(min([0.01 * img.size[0], 0.01 * img.size[1]]))
if gt is not None:
for label in self.labels_to_eval:
if label in gt:
mask_gt = (gt == label).astype(np.uint8)
contours = cv2.findContours(mask_gt, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
for cnt in contours:
empty_gt = False
draw = ImageDraw.Draw(blend_img)
draw.line(list(map(tuple, cnt.reshape(-1, 2).tolist())) + cnt[0][0].tolist(),
fill=GT_COLOR, width=lw)
if not empty_pred or not empty_gt:
blend_img = resize(blend_img.convert('RGB'), (1000, 1000))
blend_img.save(output_path / '{}.jpg'.format(filename.stem))
self.save_metrics()
self.print_and_log_info('Evaluator run is over')
def encode_segmap(self, img):
arr_segmap = np.array(img)
unique_colors = set([color for size, color in img.getcolors()]).difference({CONTEXT_BACKGROUND_COLOR})
label = np.zeros(arr_segmap.shape[:2], dtype=np.uint8)
for color in unique_colors:
if color in self.restricted_colors:
mask = (arr_segmap == color).all(axis=-1)
label[mask] = self.color_label_idx_mapping[color]
return label
@torch.no_grad()
def predict(self, image):
red_img = resize(image, size=self.img_size, keep_aspect_ratio=True)
inp = np.array(red_img, dtype=np.float32) / 255
if self.normalize:
inp = ((inp - inp.mean(axis=(0, 1))) / (inp.std(axis=(0, 1)) + 10**-7))
inp = torch.from_numpy(inp.transpose(2, 0, 1)).float().to(self.device) # HWC -> CHW tensor
pred = self.model(inp.reshape(1, *inp.shape))[0].max(0)[1].cpu().numpy()
res = np.zeros(pred.shape, dtype=np.uint8)
for label in self.labels_to_annot:
mask_pred = (pred == self.restricted_labels.index(label) + 1).astype(np.uint8)
_, contours, _ = cv2.findContours(mask_pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_area = cv2.contourArea(cnt)
if cnt_area / (pred.shape[0] * pred.shape[1]) >= LABEL_TO_DOCUMENT_CC_AREA_RATIO_THRESHOLD[label]:
if self.predict_bbox:
x, y, width, height = cv2.boundingRect(cnt)
bbox = np.asarray([[x, y], [x+width, y], [x+width, y+height], [x, y+height]])
cv2.fillPoly(res, [bbox], color=self.restricted_labels.index(label) + 1)
else:
cv2.fillPoly(res, [cnt], color=self.restricted_labels.index(label) + 1)
res = cv2.resize(res, image.size, interpolation=cv2.INTER_NEAREST)
return res
def save_metrics(self):
metric_names = next(iter(self.metrics.values())).names
all_values = [[] for _ in range(len(metric_names))]
with open(self.output_dir / 'metrics.tsv', mode='w') as f:
f.write('dir_name\t{}\n'.format('\t'.join(metric_names)))
for name, metrics in self.metrics.items():
values = list(metrics.get().values())
f.write('{}\t{}\n'.format(name, '\t'.join(map('{:.4f}'.format, values))))
[all_values[k].append(v) for k, v in enumerate(values)]
if len(self.metrics) > 1:
mean_values = list(map(np.mean, all_values))
f.write('{}\t{}\n'.format('average', '\t'.join(map('{:.4f}'.format, mean_values))))
print_info('Metrics saved')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pipeline to evaluate a NN model on a given input_dir')
parser.add_argument('-i', '--input_dir', nargs='?', type=str, required=True, help='Input directory')
parser.add_argument('-o', '--output_dir', nargs='?', type=str, required=True, help='Output directory')
parser.add_argument('-t', '--tag', nargs='?', type=str, default='default', help='Model tag to evaluate')
parser.add_argument('-l', '--labels', nargs='+', type=int, default=[1], help='Labels to eval')
parser.add_argument('-s', '--save_annot', action='store_true', help='Whether to save annotations')
parser.add_argument('-lta', '--labels_to_annot', nargs='+', type=int, default=None, help='Labels to annotate')
parser.add_argument('-b', '--pred_bbox', action='store_true', help='Whether to predict bounding boxes')
args = parser.parse_args()
input_dir = coerce_to_path_and_check_exist(args.input_dir)
evaluator = Evaluator(input_dir, args.output_dir, tag=args.tag, labels_to_eval=args.labels,
save_annotations=args.save_annot if args.labels_to_annot is None else True,
labels_to_annot=args.labels_to_annot, predict_bbox=args.pred_bbox)
evaluator.run()
|
458524
|
import gc
import numpy
from sppy.linalg.GeneralLinearOperator import GeneralLinearOperator
from sppy.lib.Parameter import Parameter
def biCGSTAB(A, b, maxIter=1000, tol=10**-6):
"""
Solve the linear system of equations given by A x = b where A is a csarray
and b is a numpy array of the same dtype. Uses the Iterative stabilized
bi-conjugate gradient method.
:param A: A csarray object of size n x n
:param b: A numpy array of length n of the same dtype as A.
:param maxIter: The maximum number of iteartions of the method
:param tol: The error tolerance
:return x: A numpy array corresponding to the solution vector.
:return i: The output code: 0 = success, 1 = numerical Issue, 2 = no convergence, 3 = invalid input
"""
return A._array.biCGSTAB(b, maxIter, tol)
def norm(A, ord=None):
"""
This function returns the Frobenius norm of the input A, which is defined as
sqrt(sum A_ij^2).
:param A: A csarray object.
:param ord: The type of norm required, currently ignored.
:return: The Frobenius norm of A.
"""
return A._array.norm()
def rsvd(A, k, p=10, q=2, omega=None):
"""
Compute the randomised SVD using the algorithm on page 9 of Halko et al.,
Finding Structure with randomness: stochastic algorithms for constructing
approximate matrix decompositions, 2009.
Finds the partial SVD of a sparse or dense matrix A, resolving the largest k
singular vectors/values, using exponent q and k+p projections. Returns the
left and right singular vectors, and the singular values. The resulting matrix
can be approximated using A ~ U s V.T. To improve the approximation quality
for a fixed k, increase p or q.
:param A: A sparse or dense matrix or GeneralLinearOperator
:param k: The number of singular values and random projections
:param p: The oversampling parameter
:param q: The exponent for the projections.
:param omega: An initial matrix to perform random projections onto with at least k columns
:return U: The left singular vectors
:return s: The singular values
:return V: The right singular vectors
"""
Parameter.checkInt(k, 1, float("inf"))
Parameter.checkInt(p, 0, float("inf"))
Parameter.checkInt(q, 0, float("inf"))
if isinstance(A, GeneralLinearOperator):
L = A
else:
L = GeneralLinearOperator.asLinearOperator(A)
n = L.shape[1]
if omega == None:
omega = numpy.random.randn(n, k+p)
else:
omega = numpy.c_[omega, numpy.random.randn(n, p+k - omega.shape[1])]
Y = L.matmat(omega)
del omega
for i in range(q):
Y = L.rmatmat(Y)
gc.collect()
Y = L.matmat(Y)
gc.collect()
Q, R = numpy.linalg.qr(Y)
del Y
del R
gc.collect()
B = L.rmatmat(Q).T
U, s, V = numpy.linalg.svd(B, full_matrices=False)
del B
V = V.T
U = Q.dot(U)
U = U[:, 0:k]
s = s[0:k]
V = V[:, 0:k]
return U, s, V
|
458555
|
from collections import defaultdict
class TrackRelatedChanges(object):
def __init__(self):
self.create_models = defaultdict(list)
self.update_models = defaultdict(list)
self.delete_models = defaultdict(list)
def has_tracked_models(self, model_class=None):
return any((
self.has_tracked_models_to_create(model_class),
self.has_tracked_models_to_update(model_class),
self.has_tracked_models_to_delete(model_class)
))
def has_tracked_models_to_delete(self, model_class=None):
return self._has_tracked_models(self.delete_models, model_class)
def has_tracked_models_to_update(self, model_class=None):
return self._has_tracked_models(self.update_models, model_class)
def has_tracked_models_to_create(self, model_class=None):
return self._has_tracked_models(self.create_models, model_class)
def _has_tracked_models(self, storage, model_class=None):
if model_class:
return bool(storage[model_class])
return any(models for models in storage.values())
def clear_tracked_models(self, model_class=None):
if not model_class:
self.create_models.clear()
self.update_models.clear()
self.delete_models.clear()
else:
self.create_models[model_class] = []
self.update_models[model_class] = []
self.delete_models[model_class] = []
self.on_tracked_models_cleared(model_class)
def on_tracked_models_cleared(self, model_class=None):
"""
Override this to be notified when tracked models have been cleared.
:param model_class: May be None which indicates that all types have been cleared.
"""
pass
def track_create(self, model):
self.create_models[model.__class__].append(model)
def track_update(self, model):
self.update_models[model.__class__].append(model)
def track_delete(self, model):
self.delete_models[model.__class__].append(model)
def get_live_tracked_models(self, model_class):
"""Return tracked models that have not been deleted
"""
return self.update_models[model_class] + self.create_models[model_class]
def get_tracked_models_to_create(self, model_class):
return self.create_models[model_class]
def get_tracked_models_to_update(self, model_class):
return self.update_models[model_class]
def get_tracked_models_to_delete(self, model_class):
return self.delete_models[model_class]
|
458568
|
import matplotlib.cm as cm
import html
from IPython.display import display, HTML
import torch
import numpy as np
from transformers import pipeline
def value2rgba(x, cmap=cm.RdYlGn, alpha_mult=1.0):
"Convert a value `x` from 0 to 1 (inclusive) to an RGBA tuple according to `cmap` times transparency `alpha_mult`."
c = cmap(x)
rgb = (np.array(c[:-1]) * 255).astype(int)
a = c[-1] * alpha_mult
return tuple(rgb.tolist() + [a])
def piece_prob_html(pieces, prob, sep=' ', **kwargs):
html_code,spans = ['<span style="font-family: monospace;">'], []
for p, a in zip(pieces, prob):
p = html.escape(p)
c = str(value2rgba(a, alpha_mult=0.5, **kwargs))
spans.append(f'<span title="{a:.3f}" style="background-color: rgba{c};">{p}</span>')
html_code.append(sep.join(spans))
html_code.append('</span>')
return ''.join(html_code)
def show_piece_attn(*args, **kwargs):
from IPython.display import display, HTML
display(HTML(piece_prob_html(*args, **kwargs)))
def split_text(x, max_length):
length = len(x)
if length > max_length:
splits = length // max_length
y = list()
[y.append(torch.tensor([x[i : i + max_length]])) for i in range(0, splits*max_length, max_length)]
if length % max_length > 0:
y.append(torch.tensor([x[splits*max_length : length]]))
else:
y = list(torch.tensor([x]))
return y
def nothing_ent(i, word):
return {
'entity': 'O',
'score': 0,
'index': i,
'word': word,
'start': 0,
'end': 0
}
def generate_highlighted_text(model, tokenizer, text):
ner_model = pipeline(
'token-classification',
model=model,
tokenizer=tokenizer,
ignore_labels=None,
device=0)
result = ner_model(text)
tokens = ner_model.tokenizer.tokenize(text)
label_indeces = [i['index'] - 1 for i in result]
entities = list()
for i, word in enumerate(tokens):
if i in label_indeces:
entities.append(result[label_indeces.index(i)])
else:
entities.append(nothing_ent(i, word))
entities = ner_model.group_entities(entities)
spans = [e['word'] for e in entities]
probs = [e['score'] for e in entities]
return piece_prob_html(spans, probs, sep=' ')
|
458622
|
import trio
import anyio
import mock
import attr
import copy
import time
from functools import partial
from contextlib import asynccontextmanager, AsyncExitStack
from asyncscope import main_scope
from distkv.default import CFG
from distkv.server import Server
from distkv.codec import unpacker
from distkv.util import attrdict, combine_dict, NotGiven, ValueEvent
from distkv.mock import S as _S
from asyncserf.stream import SerfEvent
from distmqtt.utils import create_queue
import logging
logger = logging.getLogger(__name__)
otm = time.time
@asynccontextmanager
async def stdtest(n=1, run=True, ssl=False, tocks=20, **kw):
C_OUT = CFG.get("_stdout", NotGiven)
if C_OUT is not NotGiven:
del CFG["_stdout"]
TESTCFG = copy.deepcopy(CFG)
TESTCFG.server.port = None
TESTCFG.server.backend = "serf"
TESTCFG.root = "test"
if C_OUT is not NotGiven:
CFG["_stdout"] = C_OUT
TESTCFG["_stdout"] = C_OUT
if ssl:
import ssl
import trustme
ca = trustme.CA()
cert = ca.issue_server_cert(u"127.0.0.1")
server_ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
client_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
ca.configure_trust(client_ctx)
cert.configure_cert(server_ctx)
else:
server_ctx = client_ctx = False
clock = trio.lowlevel.current_clock()
clock.autojump_threshold = 0.0
# clock.rate = 5
@attr.s
class S(_S):
splits = attr.ib(factory=set)
serfs = attr.ib(factory=set)
def split(self, s):
assert s not in self.splits
logger.debug("Split: add %d", s)
self.splits.add(s)
def join(self, s):
logger.debug("Split: join %d", s)
self.splits.remove(s)
async def mock_get_host_port(st, host):
i = int(host[host.rindex("_") + 1 :]) # noqa: E203
s = st.s[i]
await s.is_serving
for host, port, *_ in s.ports:
if host == "::" or host[0] != ":":
return host, port
def tm():
try:
return trio.current_time()
except RuntimeError:
return otm()
async def mock_set_tock(self, old):
assert self._tock < tocks, "Test didn't terminate. Limit:" + str(tocks)
await old()
async with main_scope("_distkv_test_serf") as scp:
tg = scp._tg
st = S(tg, client_ctx)
async with AsyncExitStack() as ex:
st.ex = ex # pylint: disable=attribute-defined-outside-init
ex.enter_context(mock.patch("time.time", new=tm))
ex.enter_context(mock.patch("time.monotonic", new=tm))
logging._startTime = tm()
ex.enter_context(
mock.patch("asyncserf.serf_client", new=partial(mock_serf_client, st))
)
for i in range(n):
name = "test_" + str(i)
args = kw.get(name, kw.get("args", attrdict()))
args["cfg"] = combine_dict(
args.get("cfg", {}),
{
"connect": {"ssl": client_ctx},
"server": {
"bind_default": {
"host": "127.0.0.1",
"port": i + 50120,
"ssl": server_ctx,
},
"serf": {"i": i},
},
},
TESTCFG,
)
s = Server(name, **args)
ex.enter_context(
mock.patch.object(s, "_set_tock", new=partial(mock_set_tock, s, s._set_tock))
)
ex.enter_context(
mock.patch.object(s, "_get_host_port", new=partial(mock_get_host_port, st))
)
st.s.append(s)
evts = []
for i in range(n):
if kw.get("run_" + str(i), run):
evt = anyio.Event()
tg.spawn(partial(st.s[i].serve, ready_evt=evt))
evts.append(evt)
for e in evts:
await e.wait()
try:
yield st
finally:
with anyio.fail_after(2, shield=True):
logger.info("Runtime: %s", clock.current_time())
tg.cancel_scope.cancel()
logger.info("End")
pass # unwinding ex:AsyncExitStack
@asynccontextmanager
async def mock_serf_client(master, **cfg):
async with anyio.create_task_group() as tg:
ms = MockServ(tg, master, **cfg)
master.serfs.add(ms)
try:
yield ms
finally:
master.serfs.remove(ms)
pass # terminating mock_serf_client nursery
class MockServ:
def __init__(self, tg, master, **cfg):
self.cfg = cfg
self._tg = tg
self.streams = {}
self._master = master
def __hash__(self):
return id(self)
async def spawn(self, fn, *args, **kw):
async def run(evt):
with anyio.CancelScope() as sc:
await evt.set(sc)
await fn(*args, **kw)
evt = ValueEvent()
self._tg.spawn(run, evt)
return await evt.get()
async def event(self, name, payload, coalesce=True):
try:
logger.debug("SERF:%s: %r", name, unpacker(payload))
except Exception:
logger.debug("SERF:%s: %r (raw)", name, payload)
assert not coalesce, "'coalesce' must be cleared!"
i_self = self.cfg.get("i", 0)
for s in list(self._master.serfs):
i_s = s.cfg.get("i", 0)
for x in self._master.splits:
if (i_s < x) != (i_self < x):
break
else:
n = tuple(name.split("."))
while n:
sl = s.streams.get(n, ())
for sn in sl:
await sn.q.put((name, payload))
n = n[:-1]
def stream(self, typ):
"""compat for supporting asyncactor"""
if not typ.startswith("user:"):
raise RuntimeError("not supported")
typ = typ[5:]
return self.serf_mon(typ)
def serf_mon(self, typ):
if "," in typ:
raise RuntimeError("not supported")
s = MockSerfStream(self, "user:" + typ)
return s
async def serf_send(self, typ, payload):
"""compat for supporting asyncactor"""
return await self.event(typ, payload)
class MockSerfStream:
q = None
def __init__(self, serf, typ):
self.serf = serf
assert typ.startswith("user:")
self.typ = tuple(typ[5:].split("."))
async def __aenter__(self):
self.q = create_queue(100)
self.serf.streams.setdefault(self.typ, []).append(self)
return self
async def __aexit__(self, *tb):
self.serf.streams[self.typ].remove(self)
del self.q
def __aiter__(self):
return self
async def __anext__(self):
res = await self.q.get()
evt = SerfEvent(self)
evt.topic, evt.payload = res
return evt
|
458640
|
from .api import * # noqa; F401, F403
from .jni import * # noqa; F401, F403
from .types import * # noqa; F401, F403
__version__ = '0.2.5'
|
458651
|
import traceback
from itertools import chain
import shutil, os, re
from hashlib import sha1
import psutil
import ctypes
ASTERISKS = r'**********'
user32 = ctypes.WinDLL('user32')
SW_MAXIMISE = 3
window_titles = []
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
JoinSep = '_'
def cat_lists(L):
lst = list(chain(*L))
return lst
def convert2float_s(str_t):
try:
dig_t = float(str_t)
except:
dig_t = float('nan')
return dig_t
def convert_s(obj):
ret = obj
if isinstance(obj,str):
if obj==ASTERISKS:
ret = float('nan')
else:
for func in [int,float]:
try:
ret = func(obj)
break
except:
pass
return ret
def copyfiles(path_src, path_dst, files):
if not os.path.isdir(path_dst):
os.makedirs(path_dst)
for file in files:
file_src = os.path.join(path_src, file)
file_dst = os.path.join(path_dst, file)
shutil.copy(file_src, file_dst)
def copyfiles_pattern(path_src, path_dst, pattern, files=None):
if files is None:
if os.path.isdir(path_src):
files = os.listdir(path_src)
else:
files = []
if not os.path.isdir(path_dst):
os.makedirs(path_dst)
for file in files:
if re.match(file,pattern):
file_src = os.path.join(path_src, file)
file_dst = os.path.join(path_dst, file)
shutil.copy(file_src, file_dst)
def delete_files_pattern(path_t, pattern):
if isinstance(path_t, str) and isinstance(pattern, str):
if os.path.isdir(path_t):
files = os.listdir(path_t)
files_d = [x for x in files if re.match(pattern, x)]
for fd in files_d:
try:
pfd = os.path.join(path_t, fd)
os.remove(pfd)
except:
print('error in deleting %s' % fd)
def get_all_process_legacy():
attrs_as_dict = ['pid', 'name', 'username', 'exe', 'create_time']
pid_list = psutil.pids()
list_process = []
for pid in pid_list:
try:
dict_t = psutil.Process(pid).as_dict(attrs=attrs_as_dict)
list_process.append(dict_t)
except:
traceback.print_exc()
return list_process
def get_all_process(name=None):
attrs_as_dict = ['pid', 'name', 'username', 'exe', 'create_time']
list_process = []
for r in psutil.process_iter():
try:
if (name is None) or (isinstance(name, str) and r.name() == name):
dict_t = r.as_dict(attrs=attrs_as_dict)
list_process.append(dict_t)
except:
traceback.print_exc()
return list_process
def get_all_process_tasklist(name=None):
keys = ['name', 'exe', 'type', '']
r = os.popen('tasklist')
A = r.read()
B = [x.split() for x in A.split('\n')]
# TODO: how to split??
def generate_new_files_save_yield(path_save, prefix_save, postfix_save='', try_ori=False, flag_dir = False,
return_path = False, join_underline=True):
if try_ori:
count = -1
else:
count = 0
max_count = 100000
if isinstance(prefix_save, str) and isinstance(postfix_save, str):
if join_underline and prefix_save[-1]!=JoinSep:
prefix_save = prefix_save+JoinSep
while count <= max_count:
if count == -1:
file_name = prefix_save.rstrip(JoinSep) + postfix_save
else:
file_name = prefix_save + str(count) + postfix_save
file_path_t = os.path.join(path_save, file_name)
if flag_dir:
flag_t = os.path.isdir(file_path_t)
else:
flag_t = os.path.isfile(file_path_t)
if not flag_t:
if return_path:
rt = os.path.join(path_save,file_name)
else:
rt = file_name
yield rt
count += 1
def formulate_list_of_dicts(list_dicts):
list_keys = union_all_keys(list_dicts)
list_result = arrange_list_dict_by_keys(list_dicts, list_keys)
return list_keys, list_result
def union_all_keys(list_dicts):
list_keys_raw = []
for dict_t in list_dicts:
if isinstance(dict_t, dict):
list_keys_raw.extend(dict_t.keys())
list_keys = list(set(list_keys_raw))
return list_keys
def arrange_list_dict_by_keys(list_dicts, list_keys, dict_translate=None):
num_keys = len(list_keys)
list_result = []
if isinstance(dict_translate, dict):
dict_translate_reverse = {v: k for k, v in dict_translate.items()}
keys_o_r = dict_translate_reverse.keys()
keys_o = dict_translate.keys()
else:
dict_translate_reverse = {}
keys_o_r = []
keys_o = []
for dict_t in list_dicts:
if isinstance(dict_t, dict):
keys_dict_t = dict_t.keys()
result_t = []
for key_t in list_keys:
appended = False
if key_t in keys_dict_t:
result_t.append(dict_t[key_t])
appended = True
elif key_t in keys_o:
key_t_n = dict_translate[key_t]
if key_t_n in keys_dict_t:
result_t.append(dict_t[key_t_n])
appended = True
elif key_t in keys_o_r:
key_t_n = dict_translate_reverse[key_t]
if key_t_n in keys_dict_t:
result_t.append(dict_t[key_t_n])
appended = True
if not appended:
result_t.append(None)
else:
result_t = [None] * num_keys
list_result.append(result_t)
return list_result
def gen_token():
return sha1(os.urandom(24)).hexdigest()
def hide_window():
hWnd = user32.GetForegroundWindow()
if hWnd:
user32.ShowWindow(hWnd, 2)
ctypes.windll.kernel32.CloseHandle(hWnd)
def foreach_window(hwnd, lParam):
global window_titles
if IsWindowVisible(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
window_titles.append(buff.value)
return True
def foreach_window_hide(hwnd,window_name, lParam):
if IsWindowVisible(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
if isinstance(buff.value,str):
if str.find(buff.value,window_name)!=-1:
user32.ShowWindow(hwnd, 2)
return True
return False
def hide_window_by_name(window_name):
def foreach_window_t(x,y):
return foreach_window_hide(x,window_name,y)
EnumWindows(EnumWindowsProc(foreach_window_t), 0)
def get_window_titles():
global window_titles
window_titles = []
EnumWindows(EnumWindowsProc(foreach_window), 0)
return window_titles
if __name__=='__main__':
for i in range(100):
a = gen_token()
print(a)
|
458656
|
import re
import dateparser
import itertools
from malaya.num2word import to_cardinal
from malaya.text.function import (
is_english,
is_malay,
multireplace,
case_of,
replace_laugh,
replace_mengeluh,
)
from malaya.text.regex import (
_past_date_string,
_now_date_string,
_future_date_string,
_yesterday_tomorrow_date_string,
_depan_date_string,
_expressions,
_left_datetime,
_right_datetime,
_today_time,
_left_datetodaytime,
_right_datetodaytime,
_left_yesterdaydatetime,
_right_yesterdaydatetime,
_left_yesterdaydatetodaytime,
_right_yesterdaydatetodaytime,
)
from malaya.text.tatabahasa import (
date_replace,
consonants,
sounds,
bulan,
)
from malaya.text.normalization import (
_remove_postfix,
_normalize_title,
_is_number_regex,
_string_to_num,
_replace_compound,
cardinal,
digit_unit,
rom_to_int,
ordinal,
fraction,
money,
ignore_words,
digit,
)
from malaya.text.rules import rules_normalizer
from malaya.cluster import cluster_words
from malaya.function import validator
from herpetologist import check_type
from typing import Callable
import logging
logger = logging.getLogger('malaya.normalize')
def normalized_entity(normalized):
money_ = re.findall(_expressions['money'], normalized)
money_ = [(s, money(s)[1]) for s in money_]
dates_ = re.findall(_expressions['date'], normalized)
past_date_string_ = re.findall(_past_date_string, normalized)
logger.debug(f'past_date_string_: {past_date_string_}')
now_date_string_ = re.findall(_now_date_string, normalized)
logger.debug(f'now_date_string_: {now_date_string_}')
future_date_string_ = re.findall(_future_date_string, normalized)
logger.debug(f'future_date_string_: {future_date_string_}')
yesterday_date_string_ = re.findall(
_yesterday_tomorrow_date_string, normalized
)
logger.debug(f'yesterday_date_string_: {yesterday_date_string_}')
depan_date_string_ = re.findall(_depan_date_string, normalized)
logger.debug(f'depan_date_string_: {depan_date_string_}')
today_time_ = re.findall(_today_time, normalized)
logger.debug(f'today_time_: {today_time_}')
time_ = re.findall(_expressions['time'], normalized)
logger.debug(f'time_: {time_}')
left_datetime_ = [
f'{i[0]} {i[1]}' for i in re.findall(_left_datetime, normalized)
]
logger.debug(f'left_datetime_: {left_datetime_}')
right_datetime_ = [
f'{i[0]} {i[1]}' for i in re.findall(_right_datetime, normalized)
]
logger.debug(f'right_datetime_: {left_datetime_}')
today_left_datetime_ = [
f'{i[0]} {i[1]}' for i in re.findall(_left_datetodaytime, normalized)
]
logger.debug(f'today_left_datetime_: {today_left_datetime_}')
today_right_datetime_ = [
f'{i[0]} {i[1]}' for i in re.findall(_right_datetodaytime, normalized)
]
logger.debug(f'today_right_datetime_: {today_right_datetime_}')
left_yesterdaydatetime_ = [
f'{i[0]} {i[1]}'
for i in re.findall(_left_yesterdaydatetime, normalized)
]
logger.debug(f'left_yesterdaydatetime_: {left_yesterdaydatetime_}')
right_yesterdaydatetime_ = [
f'{i[0]} {i[1]}'
for i in re.findall(_right_yesterdaydatetime, normalized)
]
logger.debug(f'right_yesterdaydatetime_: {right_yesterdaydatetime_}')
left_yesterdaydatetodaytime_ = [
f'{i[0]} {i[1]}'
for i in re.findall(_left_yesterdaydatetodaytime, normalized)
]
logger.debug(f'left_yesterdaydatetodaytime_: {left_yesterdaydatetodaytime_}')
right_yesterdaydatetodaytime_ = [
f'{i[0]} {i[1]}'
for i in re.findall(_right_yesterdaydatetodaytime, normalized)
]
logger.debug(f'right_yesterdaydatetodaytime_: {right_yesterdaydatetodaytime_}')
dates_ = (
dates_
+ past_date_string_
+ now_date_string_
+ future_date_string_
+ yesterday_date_string_
+ depan_date_string_
+ time_
+ today_time_
+ left_datetime_
+ right_datetime_
+ today_left_datetime_
+ today_right_datetime_
+ left_yesterdaydatetime_
+ right_yesterdaydatetime_
+ left_yesterdaydatetodaytime_
+ right_yesterdaydatetodaytime_
)
dates_ = [d.replace('.', ':') for d in dates_ if not isinstance(d, tuple)]
dates_ = [multireplace(s, date_replace) for s in dates_]
dates_ = [re.sub(r'[ ]+', ' ', s).strip() for s in dates_]
dates_ = cluster_words(dates_)
dates_ = {s: dateparser.parse(s) for s in dates_}
money_ = {s[0]: s[1] for s in money_}
return dates_, money_
def check_repeat(word):
if word[-1].isdigit() and not word[-2].isdigit():
repeat = int(word[-1])
word = word[:-1]
else:
repeat = 1
if repeat < 1:
repeat = 1
return word, repeat
def groupby(string):
results = []
for word in string.split():
if not (
_is_number_regex(word)
or re.findall(_expressions['url'], word)
or re.findall(_expressions['money'], word.lower())
or re.findall(_expressions['number'], word)
):
word = ''.join([''.join(s)[:2] for _, s in itertools.groupby(word)])
results.append(word)
return ' '.join(results)
def put_spacing_num(string):
string = re.sub('[A-Za-z]+', lambda ele: ' ' + ele[0] + ' ', string).split()
for i in range(len(string)):
if _is_number_regex(string[i]):
string[i] = ' '.join([to_cardinal(int(n)) for n in string[i]])
string = ' '.join(string)
return re.sub(r'[ ]+', ' ', string).strip()
class Normalizer:
def __init__(self, tokenizer, speller=None):
self._tokenizer = tokenizer
self._speller = speller
@check_type
def normalize(
self,
string: str,
normalize_text: bool = True,
normalize_entity: bool = True,
normalize_url: bool = False,
normalize_email: bool = False,
normalize_year: bool = True,
normalize_telephone: bool = True,
normalize_date: bool = True,
normalize_time: bool = True,
check_english_func=is_english,
check_malay_func=is_malay,
**kwargs,
):
"""
Normalize a string.
Parameters
----------
string : str
normalize_text: bool, (default=True)
if True, will try to replace shortforms with internal corpus.
normalize_entity: bool, (default=True)
normalize entities, only effect `date`, `datetime`, `time` and `money` patterns string only.
normalize_url: bool, (default=False)
if True, replace `://` with empty and `.` with `dot`.
`https://huseinhouse.com` -> `https huseinhouse dot com`.
normalize_email: bool, (default=False)
if True, replace `@` with `di`, `.` with `dot`.
`huse<EMAIL>` -> `husein dot zol kosong lima di gmail dot com`.
normalize_year: bool, (default=True)
if True, `tahun 1987` -> `tahun sembilan belas lapan puluh tujuh`.
if True, `1970-an` -> `sembilan belas tujuh puluh an`.
if False, `tahun 1987` -> `tahun seribu sembilan ratus lapan puluh tujuh`.
normalize_telephone: bool, (default=True)
if True, `no 012-1234567` -> `no kosong satu dua, satu dua tiga empat lima enam tujuh`
normalize_date: bool, (default=True)
if True, `01/12/2001` -> `satu disember dua ribu satu`.
if True, `Jun 2017` -> `satu Jun dua ribu tujuh belas`.
if True, `2017 Jun` -> `satu Jun dua ribu tujuh belas`.
if False, `2017 Jun` -> `01/06/2017`.
if False, `Jun 2017` -> `01/06/2017`.
normalize_time: bool, (default=True)
if True, `pukul 2.30` -> `pukul dua tiga puluh minit`.
if False, `pukul 2.30` -> `'02:00:00'`
check_english_func: Callable, (default=malaya.text.is_english)
function to check a word in english dictionary, default is malaya.text.is_english.
check_malay_func: Callable, (default=malaya.text.is_malay)
function to check a word in malay dictionary, default is malaya.text.is_malay.
Returns
-------
string: {'normalize', 'date', 'money'}
"""
tokenized = self._tokenizer(string)
s = f'tokenized: {tokenized}'
logger.debug(s)
string = ' '.join(tokenized)
string = groupby(string)
if normalize_text:
string = replace_laugh(string)
string = replace_mengeluh(string)
string = _replace_compound(string)
if hasattr(self._speller, 'normalize_elongated'):
string = [
self._speller.normalize_elongated(word)
if len(re.findall(r'(.)\1{1}', word))
and not word[0].isupper()
and not word.lower().startswith('ke-')
and not _is_number_regex(word)
else word
for word in string.split()
]
string = ' '.join(string)
result, normalized = [], []
tokenized = self._tokenizer(string)
index = 0
while index < len(tokenized):
word = tokenized[index]
word_lower = word.lower()
word_upper = word.upper()
first_c = word[0].isupper()
s = f'index: {index}, word: {word}, queue: {result}'
logger.debug(s)
if word in '~@#$%^&*()_+{}|[:"\'];<>,.?/-':
s = f'index: {index}, word: {word}, condition punct'
logger.debug(s)
result.append(word)
index += 1
continue
normalized.append(rules_normalizer.get(word_lower, word_lower))
if word_lower in ignore_words:
s = f'index: {index}, word: {word}, condition ignore words'
logger.debug(s)
result.append(word)
index += 1
continue
if (
first_c
and not len(re.findall(_expressions['money'], word_lower))
and not len(re.findall(_expressions['date'], word_lower))
):
s = f'index: {index}, word: {word}, condition not in money and date'
logger.debug(s)
if word_lower in rules_normalizer and normalize_text:
result.append(case_of(word)(rules_normalizer[word_lower]))
index += 1
continue
elif word_upper not in ['KE', 'PADA', 'RM', 'SEN', 'HINGGA']:
result.append(
_normalize_title(word) if normalize_text else word
)
index += 1
continue
if check_english_func is not None:
s = f'index: {index}, word: {word}, condition check english'
logger.debug(s)
if check_english_func(word_lower):
result.append(word)
index += 1
continue
if check_malay_func is not None:
s = f'index: {index}, word: {word}, condition check malay'
logger.debug(s)
if check_malay_func(word_lower) and word_lower not in ['pada', 'ke']:
result.append(word)
index += 1
continue
if len(word) > 2 and normalize_text:
s = f'index: {index}, word: {word}, condition len(word) > 2 and norm text'
logger.debug(s)
if word[-2] in consonants and word[-1] == 'e':
word = word[:-1] + 'a'
if word[0] == 'x' and len(word) > 1 and normalize_text:
s = f'index: {index}, word: {word}, condition word[0] == `x` and len(word) > 1 and norm text'
logger.debug(s)
result_string = 'tak '
word = word[1:]
else:
s = f'index: {index}, word: {word}, condition else for (word[0] == `x` and len(word) > 1 and norm text)'
logger.debug(s)
result_string = ''
if word_lower == 'ke' and index < (len(tokenized) - 2):
s = f'index: {index}, word: {word}, condition ke'
logger.debug(s)
if tokenized[index + 1] == '-' and _is_number_regex(
tokenized[index + 2]
):
result.append(
ordinal(
word + tokenized[index + 1] + tokenized[index + 2]
)
)
index += 3
continue
elif tokenized[index + 1] == '-' and re.match(
'.*(V|X|I|L|D)', tokenized[index + 2]
):
result.append(
ordinal(
word
+ tokenized[index + 1]
+ str(rom_to_int(tokenized[index + 2]))
)
)
index += 3
continue
else:
result.append('ke')
index += 1
continue
if _is_number_regex(word) and index < (len(tokenized) - 2):
s = f'index: {index}, word: {word}, condition hingga'
logger.debug(s)
if tokenized[index + 1] == '-' and _is_number_regex(
tokenized[index + 2]
):
result.append(
to_cardinal(_string_to_num(word))
+ ' hingga '
+ to_cardinal(_string_to_num(tokenized[index + 2]))
)
index += 3
continue
if word_lower == 'pada' and index < (len(tokenized) - 3):
s = f'index: {index}, word: {word}, condition pada hari bulan'
logger.debug(s)
if (
_is_number_regex(tokenized[index + 1])
and tokenized[index + 2] in '/-'
and _is_number_regex(tokenized[index + 3])
):
result.append(
'pada %s hari bulan %s'
% (
to_cardinal(_string_to_num(tokenized[index + 1])),
to_cardinal(_string_to_num(tokenized[index + 3])),
)
)
index += 4
continue
if (
word_lower in ['tahun', 'thun']
and index < (len(tokenized) - 1)
and normalize_year
):
s = f'index: {index}, word: {word}, condition tahun'
logger.debug(s)
if (
_is_number_regex(tokenized[index + 1])
and len(tokenized[index + 1]) == 4
):
t = tokenized[index + 1]
if t[1] != '0':
l = to_cardinal(int(t[:2]))
r = to_cardinal(int(t[2:]))
c = f'{l} {r}'
else:
c = to_cardinal(int(t))
if (
index < (len(tokenized) - 3)
and tokenized[index + 2] == '-'
and tokenized[index + 3].lower() == 'an'
):
end = 'an'
plus = 4
else:
end = ''
plus = 2
result.append(f'tahun {c}{end}')
index += plus
continue
if _is_number_regex(word) and index < (len(tokenized) - 2):
s = f'index: {index}, word: {word}, condition fraction'
logger.debug(s)
if tokenized[index + 1] == '/' and _is_number_regex(
tokenized[index + 2]
):
result.append(
fraction(
word + tokenized[index + 1] + tokenized[index + 2]
)
)
index += 3
continue
if (
tokenized[index + 1] == '-'
and tokenized[index + 2].lower() == 'an'
and normalize_year
and len(word) == 4
):
t = word
if t[1] != '0':
l = to_cardinal(int(t[:2]))
r = to_cardinal(int(t[2:]))
c = f'{l} {r}'
else:
c = to_cardinal(int(t))
result.append(f'{c}an')
index += 3
continue
if re.findall(_expressions['money'], word_lower):
s = f'index: {index}, word: {word}, condition money'
logger.debug(s)
money_, _ = money(word)
result.append(money_)
if index < (len(tokenized) - 1):
if tokenized[index + 1].lower() in ('sen', 'cent'):
index += 2
else:
index += 1
else:
index += 1
continue
if re.findall(_expressions['date'], word_lower):
s = f'index: {index}, word: {word}, condition date'
logger.debug(s)
word = word_lower
word = multireplace(word, date_replace)
word = re.sub(r'[ ]+', ' ', word).strip()
try:
s = f'index: {index}, word: {word}, parsing date'
logger.debug(s)
parsed = dateparser.parse(word)
if parsed:
word = parsed.strftime('%d/%m/%Y')
if normalize_date:
day, month, year = word.split('/')
day = cardinal(day)
month = bulan[int(month)].title()
year = cardinal(year)
word = f'{day} {month} {year}'
except Exception as e:
logger.warning(str(e))
result.append(word)
index += 1
continue
if (
re.findall(_expressions['time'], word_lower)
or re.findall(_expressions['time_pukul'], word_lower)
):
s = f'index: {index}, word: {word}, condition time'
logger.debug(s)
word = word_lower
word = multireplace(word, date_replace)
word = re.sub(r'[ ]+', ' ', word).strip()
try:
s = f'index: {index}, word: {word}, parsing time'
logger.debug(s)
parsed = dateparser.parse(word.replace('.', ':'))
if parsed:
word = parsed.strftime('%H:%M:%S')
if normalize_time:
hour, minute, second = word.split(':')
hour = cardinal(hour)
if int(minute) > 0:
minute = cardinal(minute)
minute = f'{minute} minit'
else:
minute = ''
if int(second) > 0:
second = cardinal(second)
second = f'{second} saat'
else:
second = ''
word = f'pukul {hour} {minute} {second}'
word = re.sub(r'[ ]+', ' ', word).strip()
except Exception as e:
logger.warning(str(e))
result.append(word)
index += 1
continue
if re.findall(_expressions['hashtag'], word_lower):
s = f'index: {index}, word: {word}, condition hashtag'
logger.debug(s)
result.append(word)
index += 1
continue
if re.findall(_expressions['url'], word_lower):
s = f'index: {index}, word: {word}, condition url'
logger.debug(s)
if normalize_url:
word = word.replace('://', ' ').replace('.', ' dot ')
word = put_spacing_num(word)
word = word.replace('https', 'HTTPS').replace('http', 'HTTP').replace('www', 'WWW')
result.append(word)
index += 1
continue
if re.findall(_expressions['email'], word_lower):
s = f'index: {index}, word: {word}, condition email'
logger.debug(s)
if normalize_email:
word = (
word.replace('://', ' ')
.replace('.', ' dot ')
.replace('@', ' di ')
)
word = put_spacing_num(word)
result.append(word)
index += 1
continue
if re.findall(_expressions['phone'], word_lower):
s = f'index: {index}, word: {word}, condition phone'
logger.debug(s)
if normalize_telephone:
splitted = word.split('-')
if len(splitted) == 2:
left = put_spacing_num(splitted[0])
right = put_spacing_num(splitted[1])
word = f'{left}, {right}'
result.append(word)
index += 1
continue
if re.findall(_expressions['user'], word_lower):
s = f'index: {index}, word: {word}, condition user'
logger.debug(s)
result.append(word)
index += 1
continue
if (
re.findall(_expressions['temperature'], word_lower)
or re.findall(_expressions['distance'], word_lower)
or re.findall(_expressions['volume'], word_lower)
or re.findall(_expressions['duration'], word_lower)
or re.findall(_expressions['weight'], word_lower)
):
s = f'index: {index}, word: {word}, condition units'
logger.debug(s)
word = word.replace(' ', '')
result.append(digit_unit(word))
index += 1
continue
if (
re.findall(_expressions['percent'], word_lower)
):
s = f'index: {index}, word: {word}, condition percent'
logger.debug(s)
word = word.replace('%', '')
result.append(cardinal(word) + ' peratus')
index += 1
continue
if re.findall(_expressions['ic'], word_lower):
s = f'index: {index}, word: {word}, condition IC'
logger.debug(s)
result.append(digit(word))
index += 1
continue
if (
re.findall(_expressions['number'], word_lower)
and word_lower[0] == '0'
and '.' not in word_lower
):
s = f'index: {index}, word: {word}, condition digit and word[0] == `0`'
logger.debug(s)
result.append(digit(word))
index += 1
continue
cardinal_ = cardinal(word)
if cardinal_ != word:
s = f'index: {index}, word: {word}, condition cardinal'
logger.debug(s)
result.append(cardinal_)
index += 1
continue
normalized_ke = ordinal(word)
if normalized_ke != word:
s = f'index: {index}, word: {word}, condition normalized ke'
logger.debug(s)
result.append(normalized_ke)
index += 1
continue
word, end_result_string = _remove_postfix(word)
if normalize_text:
word, repeat = check_repeat(word)
else:
repeat = 1
if normalize_text:
s = f'index: {index}, word: {word}, condition normalize text'
logger.debug(s)
if word in sounds:
selected = sounds[word]
elif word in rules_normalizer:
selected = rules_normalizer[word]
elif self._speller:
selected = self._speller.correct(
word, string=' '.join(tokenized), index=index
)
else:
selected = word
else:
selected = word
selected = '-'.join([selected] * repeat)
result.append(result_string + selected + end_result_string)
index += 1
result = ' '.join(result)
normalized = ' '.join(normalized)
if normalize_entity:
dates_, money_ = normalized_entity(normalized)
else:
dates_, money_ = {}, {}
return {'normalize': result, 'date': dates_, 'money': money_}
def normalizer(speller=None, **kwargs):
"""
Load a Normalizer using any spelling correction model.
Parameters
----------
speller : spelling correction object, optional (default = None)
Returns
-------
result: malaya.normalize.Normalizer class
"""
validator.validate_object_methods(
speller, ['correct', 'normalize_elongated'], 'speller'
)
from malaya.preprocessing import Tokenizer
tokenizer = Tokenizer(**kwargs).tokenize
return Normalizer(tokenizer, speller)
|
458675
|
import FWCore.ParameterSet.Config as cms
generator = cms.EDFilter("Herwig7GeneratorFilter",
hwpp_cmsDefaults = cms.vstring('+hwpp_basicSetup',
'+hwpp_setParticlesStableForDetector'),
run = cms.string('InterfaceMatchboxTest'),
dumpConfig = cms.untracked.string('HerwigConfig.in'),
repository = cms.string('${HERWIGPATH}/HerwigDefaults.rpo'),
dataLocation = cms.string('${HERWIGPATH:-6}'),
hwpp_setParticlesStableForDetector = cms.vstring('set /Herwig/Particles/mu-:Stable Stable',
'set /Herwig/Particles/mu+:Stable Stable',
'set /Herwig/Particles/Sigma-:Stable Stable',
'set /Herwig/Particles/Sigmabar+:Stable Stable',
'set /Herwig/Particles/Lambda0:Stable Stable',
'set /Herwig/Particles/Lambdabar0:Stable Stable',
'set /Herwig/Particles/Sigma+:Stable Stable',
'set /Herwig/Particles/Sigmabar-:Stable Stable',
'set /Herwig/Particles/Xi-:Stable Stable',
'set /Herwig/Particles/Xibar+:Stable Stable',
'set /Herwig/Particles/Xi0:Stable Stable',
'set /Herwig/Particles/Xibar0:Stable Stable',
'set /Herwig/Particles/Omega-:Stable Stable',
'set /Herwig/Particles/Omegabar+:Stable Stable',
'set /Herwig/Particles/pi+:Stable Stable',
'set /Herwig/Particles/pi-:Stable Stable',
'set /Herwig/Particles/K+:Stable Stable',
'set /Herwig/Particles/K-:Stable Stable',
'set /Herwig/Particles/K_S0:Stable Stable',
'set /Herwig/Particles/K_L0:Stable Stable'),
generatorModule = cms.string('/Herwig/Generators/EventGenerator'),
eventHandlers = cms.string('/Herwig/EventHandlers'),
hwpp_basicSetup = cms.vstring('#read Matchbox/GenericCollider.in',
'#create ThePEG::RandomEngineGlue /Herwig/RandomGlue',
'#set /Herwig/Generators/EventGenerator:RandomNumberGenerator /Herwig/RandomGlue',
'set /Herwig/Generators/EventGenerator:DebugLevel 2',
'set /Herwig/Generators/EventGenerator:PrintEvent 1',
'set /Herwig/Generators/EventGenerator:MaxErrors 10000'),
configFiles = cms.vstring(),
crossSection = cms.untracked.double(-1),
parameterSets = cms.vstring(
'Matchbox',
'hwpp_cmsDefaults'),
filterEfficiency = cms.untracked.double(1.0),
Matchbox = cms.vstring( 'read snippets/Matchbox.in',
'read snippets/PPCollider.in',
'read Matchbox/DefaultPPJets.in',
'cd /Herwig/EventHandlers',
'set EventHandler:LuminosityFunction:Energy 13000*GeV',
'## Model assumptions',
'read Matchbox/StandardModelLike.in',
'read Matchbox/DiagonalCKM.in',
'## Set the order of the couplings',
'cd /Herwig/MatrixElements/Matchbox',
'set Factory:OrderInAlphaS 2',
'set Factory:OrderInAlphaEW 0',
'## Select the process',
'do Factory:Process p p -> j j',
'#read Matchbox/MadGraph-GoSam.in',
'read Matchbox/MadGraph-MadGraph.in',
'#read Matchbox/MadGraph-OpenLoops.in',
'cd /Herwig/MatrixElements/Matchbox',
'set /Herwig/Cuts/FirstJet:PtMin 30.*GeV',
'#set /Herwig/MatrixElements/Matchbox/ScalesHTScale:JetPtCut 30.*GeV',
'set Factory:ScaleChoice Scales/SHatScale',
'#set Factory:ScaleChoice Scales/HTScale',
'read Matchbox/MCatNLO-DefaultShower.in',
'# read Matchbox/NLO-NoShower.in',
'# read Matchbox/LO-NoShower.in',
'read Matchbox/LO.in',
'read Matchbox/FiveFlavourScheme.in',
'read Matchbox/MMHT2014.in',
'do /Herwig/MatrixElements/Matchbox/Factory:ProductionMode',
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
458692
|
import logging
import time
from modules.base_module import Module
from client import Client
import common
ACTIONS = ["danceBoy1", "danceBoy2", "danceBoy3", "danceBoy4",
"danceBoyKoreanStyle", "danceBoyIreland", "danceBoyDisco",
"danceBoyGrooveJam", "danceGirl1", "danceGirl2", "danceGirl3",
"danceGirl4", "danceGirlKoreanStyle", "danceGirlIreland",
"danceGirlDisco", "danceGirlGrooveJam", "idle1", "idle2", "idle3",
"idle4", "idle5", "Yes", "Rage", "Proud", "Gratitude", "Disgust",
"Discontent", "Dab", "Crying", "Applaud", "Amazement",
"EmotionYes", "EmotionRage", "Laugh1", "Laugh2", "Laugh3",
"lgh:Laugh1", "lgh:Laugh2", "lgh:Laugh3",
"couturierPodiumStairsItem9", "animation=PhotoPose2",
"animation=PhotoPose3", "animation=PhotoPose4",
"animation=PhotoPose5", "animation=PhotoPose6",
"animation=PhotoPose7", "animation=PhotoPose8",
"animation=PhotoPose9", "animation=PhotoPose10",
"animation=SceneDrums", "animation=SceneGuitar",
"animation=ScenePiano", "animation=SceneVocal",
"animation=FlashMobDance", "SitInBath@avatarAnimation",
"animation=SceneCursed2|disableLoop=1", "animation=SceneCursed1|disableLoop=1"]
def check_action(prefix, action):
if action in ACTIONS:
return True
elif action.startswith("sitItem"):
return True
elif action.startswith("car"):
return True
if prefix == "h":
try:
int(action.split(":")[0])
return True
except ValueError:
return False
return False
class Location(Module):
def __init__(self, server):
self.server = server
self.commands = {"r": self.room}
self.refresh_cooldown = {}
self.actions = {"ks": "kiss", "hg": "hug", "gf": "giveFive",
"k": "kickAss", "sl": "slap", "lks": "longKiss",
"hs": "handShake", "aks": "airKiss"}
async def room(self, msg, client):
subcommand = msg[1].split(".")[2]
if subcommand in ["u", "m", "k", "sa", "sl", "bd", "lks", "hs",
"ks", "hg", "gf", "aks"]:
msg.pop(0)
if msg[1]["uid"] != client.uid:
return
if "at" in msg[1]:
prefix = msg[0].split(".")[0]
if not check_action(prefix, msg[1]["at"]):
msg[1]["at"] = ""
if subcommand == "u":
client.position = (msg[1]["x"], msg[1]["y"])
client.direction = msg[1]["d"]
if "at" in msg[1]:
client.action_tag = msg[1]["at"]
else:
client.action_tag = ""
client.state = msg[1]["st"]
elif subcommand in self.actions:
action = self.actions[subcommand]
uid = msg[1]["tmid"]
rl = self.server.modules["rl"]
link = await rl.get_link(client.uid, uid)
if link:
await rl.add_progress(action, link)
online = self.server.online
try:
room = self.server.rooms[client.room].copy()
except KeyError:
return
for uid in room:
try:
tmp = online[uid]
except KeyError:
continue
await tmp.send(msg)
elif subcommand == "ra":
if client.uid in self.refresh_cooldown:
if time.time() - self.refresh_cooldown[client.uid] < 3:
return
self.refresh_cooldown[client.uid] = time.time()
await refresh_avatar(client, self.server)
else:
logging.warning(f"Command {msg[1]} not found")
async def join_room(self, client, room):
if room in self.server.rooms:
self.server.rooms[room].append(client.uid)
else:
self.server.rooms[room] = [client.uid]
client.room = room
client.position = (-1.0, -1.0)
client.action_tag = ""
client.state = 0
client.dimension = 4
plr = await gen_plr(client, self.server)
prefix = common.get_prefix(client.room)
online = self.server.online
new_room = self.server.rooms[room].copy()
location_name = room.split("_")[0]
if location_name == "canyon":
client.canyon_lid = "l1"
cc = await get_cc(room, self.server)
else:
cc = None
for uid in new_room:
if uid not in online:
continue
tmp = online[uid]
await tmp.send([f"{prefix}.r.jn", {"plr": plr, "cc": cc}])
await tmp.send([client.room, client.uid], type_=16)
async def leave_room(self, client):
if client.uid not in self.server.rooms[client.room]:
return
self.server.rooms[client.room].remove(client.uid)
old_room = self.server.rooms[client.room].copy()
if old_room:
prefix = common.get_prefix(client.room)
online = self.server.online
location_name = client.room.split("_")[0]
if location_name == "canyon":
cc = await get_cc(client.room, self.server)
else:
cc = None
for uid in old_room:
try:
tmp = online[uid]
except KeyError:
continue
await tmp.send([f"{prefix}.r.lv", {"uid": client.uid,
"cc": cc}])
await tmp.send([client.room, client.uid], type_=17)
else:
del self.server.rooms[client.room]
room = client.room.split("_")
if room[0] == "house" and room[1] == client.uid:
await self.server.modules["h"].owner_at_house(client.uid, False)
client.room = None
async def gen_plr(client, server):
if isinstance(client, Client):
uid = client.uid
else:
uid = client
apprnc = await server.get_appearance(uid)
if not apprnc:
return None
user_data = await server.get_user_data(uid)
clths = await server.get_clothes(uid, type_=2)
mobile_skin = await server.redis.get(f"uid:{uid}:mobile_skin")
mobile_accessory = await server.redis.get(f"uid:{uid}:mobile_ac")
mobile_ringtone = await server.redis.get(f"uid:{uid}:mobile_rt")
if not mobile_skin:
mobile_skin = "blackMobileSkin"
plr = {"uid": uid, "apprnc": apprnc, "clths": clths,
"mbm": {"ac": mobile_accessory, "sk": mobile_skin,
"rt": mobile_ringtone},
"usrinf": {"rl": user_data["role"], "sid": uid}}
bubble = await server.redis.get(f"uid:{uid}:bubble")
text_color = await server.redis.get(f"uid:{uid}:tcl")
plr["chtdcm"] = {"bdc": bubble, "tcl": text_color,
"spks": ["bushStickerPack", "froggyStickerPack",
"doveStickerPack", "jackStickerPack",
"catStickerPack", "sharkStickerPack"]}
if isinstance(client, Client):
if await server.redis.get(f"uid:{uid}:loc_disabled"):
shlc = False
else:
shlc = True
plr["locinfo"] = {"st": client.state, "s": "127.0.0.1",
"at": client.action_tag, "d": client.dimension,
"x": client.position[0], "y": client.position[1],
"shlc": shlc, "pl": "", "l": client.room}
cid = await server.redis.get(f"uid:{uid}:clan")
if cid:
clan = server.modules["cln"]
info = await clan.get_clan(cid)
plr["clif"] = {"tg": info["tag"], "icn": info["icon"],
"ctl": info["name"], "clv": info["lvl"], "cid": cid,
"crl": info["members"][uid]["role"]}
else:
plr["clif"] = None
plr["ci"] = await get_city_info(uid, server)
plr["pf"] = {"pf": {"jntr": {"tp": "jntr", "l": 20, "pgs": 0},
"phtghr": {"tp": "phtghr", "l": 20, "pgs": 0},
"grdnr": {"tp": "grdnr", "l": 20, "pgs": 0},
"vsgst": {"tp": "vsgst", "l": 20, "pgs": 0}}}
return plr
async def get_city_info(uid, server):
user_data = await server.get_user_data(uid)
rl = server.modules["rl"]
relations = await server.redis.smembers(f"rl:{uid}")
cmid = 0
ceid = 0
for link in relations:
relation = await rl._get_relation(uid, link)
if not relation:
continue
if relation["rlt"]["s"] // 10 == 7:
cmid = relation["uid"]
if relation["rlt"]["s"] // 10 == 6:
ceid = relation["uid"]
if ceid and cmid:
break
if await server.redis.get(f"uid:{uid}:hide_crown"):
show_crown = False
else:
show_crown = True
psrtdcr = await server.redis.get(f"uid:{uid}:psrtdcr")
if psrtdcr:
psrtdcr = int(psrtdcr)
else:
psrtdcr = 1
snowscore = await server.redis.get(f"uid:{uid}:snowscore")
if snowscore:
snowscore = int(snowscore)
else:
snowscore = 0
plcmt = {"pc": {"snowboardRating": {"uid": 0, "ct": 2, "cid": 812,
"cr": snowscore}}}
ci = {"exp": user_data["exp"], "crt": user_data["crt"],
"hrt": user_data["hrt"], "fexp": 0, "gdc": 0, "lgt": 0,
"vip": user_data["premium"], "vexp": user_data["prem_time"],
"vsexp": user_data["prem_time"], "vsact": True, "vret": 0,
"vfgc": 0, "ceid": ceid, "cmid": cmid, "dr": True, "spp": 0,
"tts": None, "eml": None, "ys": 0, "ysct": 0, "fak": None,
"shcr": show_crown, "gtrfrd": 0, "strfrd": 0, "rtrtm": 0,
"kyktid": None, "actrt": user_data["act"], "compid": 0,
"actrp": 0, "actrd": 1899999999, "shousd": False, "rpt": 0,
"as": None, "lvt": user_data["lvt"], "lrnt": 0, "lwts": 0,
"skid": "iceRinkSkate1", "skrt": int(time.time()+10000000),
"bcld": 0, "trid": user_data["trid"], "trcd": 0, "sbid": None,
"sbrt": 0, "plcmt": plcmt, "pamns": {"amn": []}, "crst": 0,
"psrtdcr": psrtdcr, "dl": True}
if user_data["premium"]:
ci["actrp"] = 1
ci["rpt"] = 600
return ci
async def refresh_avatar(client, server):
if not client.room:
return
plr = await gen_plr(client, server)
prefix = common.get_prefix(client.room)
online = server.online
room = server.rooms[client.room].copy()
for uid in room:
try:
tmp = online[uid]
except KeyError:
continue
await tmp.send([f"{prefix}.r.ra", {"plr": plr}])
async def get_cc(room, server):
room = server.rooms[room].copy()
online = server.online
cc = {"cc": []}
i = 1
for uid in room:
if uid not in online:
continue
car = await get_car(uid, server.redis)
cc["cc"].append({'pss': [uid], 'uid': uid,
'ctid': car, 'gtp': '', 'sid': str(i)})
i += 1
return cc
async def get_car(uid, r):
for room in await r.smembers(f"rooms:{uid}"):
for item in await r.smembers(f"rooms:{uid}:{room}:items"):
if "car" in item.lower() or "bike" in item.lower() or \
"tank" in item.lower():
return item.split("_")[0]
return "carPtcChrry"
|
458716
|
import discord, random
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option, create_choice
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="rps", description="Jouer au rock scroll scissors !", options=[
create_option(
name="joueur",
description="Pierre, scroll ou scissors ?",
option_type=3,
required=True,
choices=[
create_choice(
name="pierre",
value="rock"
),
create_choice(
name="papier",
value="scroll"
),
create_choice(
name="ciseaux",
value="scissors"
)])])
async def _rps(self, ctx, joueur: str):
bot_ = ["rock", "scroll", "scissors"]
bot_ = random.choice(bot_)
bot_emoji = f":{bot_}:"
joueur_emoji = f":{joueur}:"
if joueur == bot_: msg = ":crossed_swords: Égalité !"
elif joueur == "rock" and bot_ == "scroll": msg = "Tu as perdu..."
elif joueur == "scroll" and bot_ == "scissors": msg = "Tu as perdu..."
elif joueur == "scissors" and bot_ == "rock": msg = "Tu as perdu..."
elif joueur == "rock" and bot_ == "scissors": msg = "Tu as gagné !"
elif joueur == "scroll" and bot_ == "rock": msg = "Tu as gagné !"
elif joueur == "scissors" and bot_ == "scroll": msg = "Tu as gagné !"
bot_ = bot_.replace("rock", "pierre").replace("scroll", "papier").replace("scissors", "ciseaux")
joueur = joueur.replace("rock", "pierre").replace("scroll", "papier").replace("scissors", "ciseaux")
embed = discord.Embed(title="<NAME>")
embed.add_field(name="** **", value=f"{joueur_emoji} {ctx.author.mention} : {joueur}\n{bot_emoji} <@760171813866700850> : {bot_}", inline=False)
embed.add_field(name="** **", value=msg, inline=False)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("rps")
|
458749
|
from recipe_scrapers.tastesoflizzyt import TastesOfLizzyT
from tests import ScraperTest
class TestTastesOfLizzyTScraper(ScraperTest):
scraper_class = TastesOfLizzyT
def test_host(self):
self.assertEqual("tastesoflizzyt.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.tastesoflizzyt.com/soft-baked-gingerbread-cookies/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Soft Gingerbread Cookies")
def test_total_time(self):
self.assertEqual(27, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("60 serving(s)", self.harvester_class.yields())
def test_ingredients(self):
self.assertCountEqual(
[
"1 cup shortening",
"1 cup brown sugar packed",
"1 cup molasses",
"1 cup buttermilk",
"5 1/2 cups flour",
"4 teaspoons baking soda",
"1 teaspoon ginger",
"3/4 teaspoon cinnamon",
"1/4 teaspoon nutmeg",
"1/4 teaspoon cloves",
"1 teaspoons salt",
"1/2 cup extra sugar for rolling dough",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
self.assertEqual(
"In a large bowl, cream together the shortening, brown sugar, molasses and buttermilk.\nIn a separate bowl, sift together the flour, baking soda, ginger, cinnamon, nutmeg, cloves and salt.\nAdd the dry ingredients to the creamed sugar mixture and mix well.\nRoll the dough into balls and then roll the balls in sugar.\nPlace the cookie dough balls on an ungreased cookie sheet and bake at 350 degrees for 11-12 minutes.\nAllow the cookies to cool on a wire rack and then store in an airtight container.",
self.harvester_class.instructions(),
)
|
458777
|
import abc
import asyncio
from typing import AnyStr, ClassVar, List, Optional, TypeVar, Dict, Mapping
T = TypeVar("T")
class Response(abc.ABC):
status_code: int
status_codes_success: ClassVar[List[int]]
content_type: str
async def read(self) -> AnyStr:
"""
Should return json-serializable bytes or str object
"""
raise NotImplementedError
async def close(self):
"""
Will be called after reading response [Guaranteed in core.request.Requests::make_return]
"""
raise NotImplementedError
class ConnectorException(Exception):
...
class Connector(abc.ABC):
def __init__(
self,
timeout: float,
default_headers: Mapping[str, str],
loop: Optional[asyncio.AbstractEventLoop] = None,
):
"""
:param timeout: request timeout should be float'able
:param default_headers: flat [str,str] mapping
:param loop: asyncio's abstract event loop compatible
"""
self.timeout = timeout
self.default_headers = default_headers
self.loop = loop
async def request(
self,
method: str,
url: str,
*,
params: Optional[Dict[str, str]] = None,
data: Optional[AnyStr] = None,
headers: Optional[Dict[str, str]] = None,
) -> Response:
"""
Should implement http request for particular method
:param method: HTTP method
:param url: URL string
:param params: Dictionary of query holding parameters (e.g: url + "?" + key=val + sep)
:param data: Request body
:param headers: Dictionary holding headers
(default headers are in lower priority, this dict can overlap with default)
:return: Implemented `Response` object
"""
raise NotImplementedError
async def close(self):
"""
Should "close" connector, can be used to close all current streams etc.
Can be called several times, `closed` flag may be required.
"""
raise NotImplementedError
@classmethod
def from_connector(cls, connector: 'Connector') -> 'Connector':
"""
Get current class' connector instance. Implemented method.
:param connector: old connector to be replace
"""
return cls(
timeout=connector.timeout,
default_headers=connector.default_headers,
loop=connector.loop
)
|
458784
|
from django.contrib.auth import get_user_model
from graphene import Node, Field, GlobalID, String
from graphene_django.types import DjangoObjectType, ObjectType
from polls.schema import PollQueries
from users.jwt_schema import TokensInterface
class UserNode(DjangoObjectType):
class Meta:
model = get_user_model()
only_fields = (
'id',
'last_login',
'is_superuser',
'username',
'first_name',
'last_name',
'email',
'is_staff',
'is_active',
'date_joined',
)
interfaces = (Node, TokensInterface)
class Viewer(ObjectType, PollQueries):
id = GlobalID()
user = Field(UserNode, jwt_token=String())
class Meta:
interfaces = (TokensInterface,)
|
458795
|
import random
import torch
import torch.nn as nn
from ..util import box_ops
from ..util.misc import NestedTensor
from .deformable_detr import DeformableDETR
from .detr import DETR
class DETRTrackingBase(nn.Module):
def __init__(self,
track_query_false_positive_prob=0.0,
track_query_false_negative_prob=0.0,
track_query_noise=0.0,
matcher=None):
self._matcher = matcher
self._track_query_false_positive_prob = track_query_false_positive_prob
self._track_query_false_negative_prob = track_query_false_negative_prob
self._track_query_noise = track_query_noise
self._tracking = False
def train(self, mode: bool = True):
"""Sets the module in train mode."""
self._tracking = False
return super().train(mode)
def tracking(self):
"""Sets the module in tracking mode."""
self.eval()
self._tracking = True
def forward(self, samples: NestedTensor, targets: list = None):
if targets is not None and not self._tracking:
prev_out, *_ = super().forward([targets[0]['prev_image']])
prev_outputs_without_aux = {
k: v for k, v in prev_out.items() if 'aux_outputs' not in k}
prev_targets = [
{k.replace('prev_', ''): v for k, v in target.items() if "prev" in k}
for target in targets]
prev_indices = self._matcher(prev_outputs_without_aux, prev_targets)
for i, (target, prev_ind) in enumerate(zip(targets, prev_indices)):
prev_out_ind, prev_target_ind = prev_ind
# random subset
if self._track_query_false_negative_prob:
random_subset_mask = torch.empty(len(prev_target_ind)).uniform_()
random_subset_mask = random_subset_mask.ge(
self._track_query_false_negative_prob)
prev_out_ind = prev_out_ind[random_subset_mask]
prev_target_ind = prev_target_ind[random_subset_mask]
# detected prev frame tracks
prev_track_ids = target['prev_track_ids'][prev_target_ind]
# match track ids between frames
target_ind_match_matrix = prev_track_ids.unsqueeze(dim=1).eq(target['track_ids'])
target_ind_matching = target_ind_match_matrix.any(dim=1)
target_ind_matched_idx = target_ind_match_matrix.nonzero()[:, 1]
# current frame track ids detected in the prev frame
# track_ids = target['track_ids'][target_ind_matched_idx]
# index of prev frame detection in current frame box list
target['track_query_match_ids'] = target_ind_matched_idx
# random false positives
prev_boxes_matched = prev_out['pred_boxes'][i, prev_out_ind[target_ind_matching]]
not_prev_out_ind = torch.arange(prev_out['pred_boxes'].shape[1])
not_prev_out_ind = [
ind.item()
for ind in not_prev_out_ind
if ind not in prev_out_ind]
random_false_out_ind = []
for prev_box_matched in prev_boxes_matched:
if random.uniform(0, 1) < self._track_query_false_positive_prob:
prev_boxes_unmatched = prev_out['pred_boxes'][i, not_prev_out_ind]
# only cxcy
# box_dists = prev_box_matched[:2].sub(prev_boxes_unmatched[:, :2]).abs()
# box_dists = box_dists.pow(2).sum(dim=-1).sqrt()
# box_weights = 1.0 / box_dists.add(1e-8)
prev_box_ious, _ = box_ops.box_iou(
box_ops.box_cxcywh_to_xyxy(prev_box_matched.unsqueeze(dim=0)),
box_ops.box_cxcywh_to_xyxy(prev_boxes_unmatched))
box_weights = prev_box_ious[0]
if box_weights.gt(0.0).any():
random_false_out_idx = not_prev_out_ind.pop(
torch.multinomial(box_weights.cpu(), 1).item())
random_false_out_ind.append(random_false_out_idx)
prev_out_ind = torch.tensor(prev_out_ind.tolist() + random_false_out_ind).long()
target_ind_matching = torch.tensor(
target_ind_matching.tolist() + [False, ] * len(random_false_out_ind)).bool()
# matches indices with 1.0 and not matched -1.0
track_queries_match_mask = torch.ones_like(target_ind_matching).float()
track_queries_match_mask[~target_ind_matching] = -1.0
# set prev frame info
hs_embeds = prev_out['hs_embed'][i, prev_out_ind]
if self._track_query_noise and not torch.isnan(hs_embeds.std()).any():
track_query_noise = torch.randn_like(hs_embeds) \
* hs_embeds.std(dim=1, keepdim=True)
hs_embeds = hs_embeds + track_query_noise * self._track_query_noise
# hs_embeds = track_query_noise * self._track_query_noise \
# + hs_embeds * (1 - self._track_query_noise)
target['track_query_hs_embeds'] = hs_embeds
target['track_query_boxes'] = prev_out['pred_boxes'][i, prev_out_ind].detach()
# add zeros for detection object queries
device = track_queries_match_mask.device
track_queries_match_mask = torch.tensor(
track_queries_match_mask.tolist() + [0, ] * self.num_queries)
target['track_queries_match_mask'] = track_queries_match_mask.to(device)
out, targets, features, memory, hs = super().forward(samples, targets)
return out, targets, features, memory, hs
# TODO: with meta classes
class DETRTracking(DETRTrackingBase, DETR):
def __init__(self, tracking_kwargs, detr_kwargs):
DETR.__init__(self, **detr_kwargs)
DETRTrackingBase.__init__(self, **tracking_kwargs)
class DeformableDETRTracking(DETRTrackingBase, DeformableDETR):
def __init__(self, tracking_kwargs, detr_kwargs):
DeformableDETR.__init__(self, **detr_kwargs)
DETRTrackingBase.__init__(self, **tracking_kwargs)
|
458797
|
from __future__ import print_function
from CherwellAPI import CherwellClient
#########################################################################################
# This example demonstrates how the CherwellAPI Connection object can be used to
# retrieve the business object template for a Cherwell Business Object
###########################################################################################
#############################################
# Change the following to suit your instance
#############################################
base_uri = "http://<Your Cherwell Host here>"
username = "<Your UserName Here>"
password = "<<PASSWORD>>"
api_key = "<Your Cherwell REST API Client Key here>"
# Create a new CherwellClient connection
cherwell_client = CherwellClient.Connection(base_uri, api_key, username, password)
# Show the template for the incident object
print("Cherwell Template for Incident is:\n{}".format(
cherwell_client.get_business_object_template("Incident")
))
|
458818
|
from .attrdict import AttrDict
class Configuration(AttrDict):
"""Represents the running configuration.
This class never raises IndexError, instead it will return None if a
section or option does not yet exist.
"""
def __getitem__(self, key):
"""Returns a config section, creating it if it doesn't exist yet.
"""
if key not in self._data:
self._data[key] = ConfigurationSection(self)
return self._data[key]
class ConfigurationSection(Configuration):
def __init__(self, parent, *args, **kwargs):
super(ConfigurationSection, self).__init__(*args, **kwargs)
self._parent = parent
def __getitem__(self, key):
"""Returns a config value, pulling from the `user` section as a fallback.
This is called when the attribute is accessed either via the get method or through [ ] index.
"""
if key in self._data and self._data.get(key) is not None:
return self._data[key]
elif key in self._parent.user:
return self._parent.user[key]
return None
def __getattr__(self, key):
"""Returns the config value from the `user` section.
This is called when the attribute is accessed via dot notation but does not exist.
"""
if key[0] != '_' and key in self._parent['user']:
return self._parent['user'][key]
return None
def __setattr__(self, key, value):
"""Sets dictionary value when an attribute is set.
"""
super().__setattr__(key, value)
if key[0] != '_':
self._data[key] = value
class SubparserWrapper(object):
"""Wrap subparsers so we can track what options the user passed.
"""
def __init__(self, cli, submodule, subparser):
self.cli = cli
self.submodule = submodule
self.subparser = subparser
for attr in dir(subparser):
if not hasattr(self, attr):
setattr(self, attr, getattr(subparser, attr))
def completer(self, completer):
"""Add an arpcomplete completer to this subcommand.
"""
self.subparser.completer = completer
def add_argument(self, *args, **kwargs):
"""Add an argument for this subcommand.
This also stores the default for the argument in `self.cli.default_arguments`.
"""
if kwargs.get('action') == 'store_boolean':
# Store boolean will call us again with the enable/disable flag arguments
return handle_store_boolean(self, *args, **kwargs)
completer = None
if kwargs.get('completer'):
completer = kwargs['completer']
del kwargs['completer']
self.cli.acquire_lock()
argument_name = get_argument_name(self.cli._arg_parser, *args, **kwargs)
if completer:
self.subparser.add_argument(*args, **kwargs).completer = completer
else:
self.subparser.add_argument(*args, **kwargs)
if kwargs.get('action') == 'store_false':
self.cli._config_store_false.append(argument_name)
if kwargs.get('action') == 'store_true':
self.cli._config_store_true.append(argument_name)
if self.submodule not in self.cli.default_arguments:
self.cli.default_arguments[self.submodule] = {}
self.cli.default_arguments[self.submodule][argument_name] = kwargs.get('default')
self.cli.release_lock()
def get_argument_strings(arg_parser, *args, **kwargs):
"""Takes argparse arguments and returns a list of argument strings or positional names.
"""
try:
return arg_parser._get_optional_kwargs(*args, **kwargs)['option_strings']
except ValueError:
return [arg_parser._get_positional_kwargs(*args, **kwargs)['dest']]
def get_argument_name(arg_parser, *args, **kwargs):
"""Takes argparse arguments and returns the dest name.
"""
try:
return arg_parser._get_optional_kwargs(*args, **kwargs)['dest']
except ValueError:
return arg_parser._get_positional_kwargs(*args, **kwargs)['dest']
def handle_store_boolean(self, *args, **kwargs):
"""Does the add_argument for action='store_boolean'.
"""
disabled_args = None
disabled_kwargs = kwargs.copy()
disabled_kwargs['action'] = 'store_false'
disabled_kwargs['dest'] = get_argument_name(getattr(self, 'cli', self)._arg_parser, *args, **kwargs)
disabled_kwargs['help'] = 'Disable ' + kwargs['help']
kwargs['action'] = 'store_true'
kwargs['help'] = 'Enable ' + kwargs['help']
for flag in args:
if flag[:2] == '--':
disabled_args = ('--no-' + flag[2:],)
break
self.add_argument(*args, **kwargs)
self.add_argument(*disabled_args, **disabled_kwargs)
return (args, kwargs, disabled_args, disabled_kwargs)
|
458855
|
ABORTED = 'Aborted'
FAILED = 'Failed'
NOT_PROCESSED = 'Not Processed'
COMPLETED = 'Completed'
ERROR_STATES = (
ABORTED,
FAILED,
NOT_PROCESSED,
)
|
458858
|
import torch
import numpy as np
from torch import nn
from util import denormalize_image
from skimage.metrics import structural_similarity
class HybridLoss(nn.Module):
def __init__(self, device, l1_weight=None, l2_weight=None):
super().__init__()
self.l1_loss = nn.L1Loss(reduction="none").to(device)
self.l2_loss = nn.MSELoss(reduction="none").to(device)
if l1_weight is None:
l1_weight = 1.
if l2_weight is None:
l2_weight = 1.
self.l1_weight = l1_weight
self.l2_weight = l2_weight
def _compute_psnr(self, gt_pixels, rendered_pixels):
l2 = self.l2_loss(gt_pixels.float(), rendered_pixels.float()).mean()
PSNR = 20 * np.log10(255.)
if l2.item() != 0:
PSNR -= 10 * np.log10(l2.item())
return float(PSNR)
@staticmethod
def _compute_ssim(img1, img2, mask):
i1 = img1.permute(0, 2, 3, 1).cpu().numpy()
i2 = img2.permute(0, 2, 3, 1).cpu().numpy()
ssim_img = []
for i in range(img1.shape[0]):
mssim, S = structural_similarity(i1[i], i2[i], data_range=2, multichannel=True, full=True)
ssim_img.append(torch.from_numpy(S).permute(2, 0, 1))
ssim_img = torch.stack(ssim_img).to(device=mask.device)
ssim_img = ssim_img.permute(0, 2, 3, 1)[mask]
ssim = ssim_img.mean()
return ssim.item()
# mask = mask.permute(0, 2, 3, 1).cpu().numpy()
# ssim = compare_ssim(i1, i2, multichannel=True, full=True)
# (_, channel, _, _) = img1.size()
# # compute window
# gauss = [math.exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]
# gauss = torch.Tensor(gauss)
# gauss = gauss / gauss.sum()
#
# _1D_window = gauss.unsqueeze(1)
# _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
# window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
# window = window.to(device=img1.device, dtype=img1.dtype)
#
# # compute ssim map
# mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
# mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
#
# mu1_sq, mu2_sq, mu1_mu2 = mu1.pow(2), mu2.pow(2), mu1 * mu2
#
# sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
# sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
# sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
#
# # BCHW
# ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
# ssim_map = ssim_map.permute(0, 2, 3, 1)
# ssim_map = ssim_map.masked_select(mask).view(-1, 3)
# ssim_score = ssim_map.mean()
# return ssim_score
def __call__(self, gt_image, rendered_image, mask, omit_metrics=False):
assert gt_image.shape == rendered_image.shape
B, C, H, W = gt_image.shape
mask = mask.view(B, H, W)
gt_pixels = gt_image.permute(0, 2, 3, 1)[mask].view(-1, C)
rendered_pixels = rendered_image.permute(0, 2, 3, 1)[mask].view(-1, C)
l1_loss = self.l1_loss(gt_pixels, rendered_pixels).mean(dim=1).mean()
l2_loss = self.l2_loss(gt_pixels, rendered_pixels).mean(dim=1).mean()
loss_obj = self.l1_weight * l1_loss + self.l2_weight * l2_loss
loss_logs = {
'l1_loss': l1_loss.detach().item(),
'l2_loss': l2_loss.detach().item(),
'l_loss': loss_obj.detach().item(),
}
if not omit_metrics:
loss_logs['SSIM'] = self._compute_ssim(gt_image, rendered_image, mask)
gt_pixels = denormalize_image(gt_pixels)
rendered_pixels = denormalize_image(rendered_pixels)
loss_logs['PSNR'] = self._compute_psnr(gt_pixels, rendered_pixels)
return loss_obj, loss_logs
|
458904
|
import solana_rpc as rpc
from common import ValidatorConfig
from common import print_json
from common import measurement_from_fields
from monitoring_config import config
def calculate_output_data(config: ValidatorConfig):
data = rpc.load_solana_validators(config)
measurements = []
for info in data:
measurement = measurement_from_fields("validators", info, config)
measurements.append(measurement)
return measurements
print_json(calculate_output_data(config))
|
458907
|
from typing import Iterable, TypedDict
class OptionDict(TypedDict):
contractSymbol: str
strike: float
currency: str
lastPrice: float
change: float
percentChange: float
ask: float
contractSize: str
expiration: int
lastTradeDate: int
impliedVolatility: float
inTheMoney: bool
volume: int
bid: float
openInterest: int
class OptionQuoteDictBase(TypedDict):
language: str
region: str
quoteType: str
quoteSourceName: str
triggerable: bool
currency: str
sharesOutstanding: int
bookValue: float
fiftyDayAverage: float
fiftyDayAverageChange: float
fiftyDayAverageChangePercent: float
twoHundredDayAverage: float
twoHundredDayAverageChange: float
twoHundredDayAverageChangePercent: float
marketCap: int
forwardPE: float
priceToBook: float
sourceInterval: int
exchangeDataDelayedBy: int
tradeable: bool
ask: float
bidSize: int
askSize: int
fullExchangeName: str
financialCurrency: str
regularMarketOpen: float
averageDailyVolume3Month: int
averageDailyVolume10Day: int
fiftyTwoWeekLowChange: float
fiftyTwoWeekLowChangePercent: float
fiftyTwoWeekRange: str
fiftyTwoWeekHighChange: float
fiftyTwoWeekHighChangePercent: float
fiftyTwoWeekLow: float
fiftyTwoWeekHigh: float
dividendDate: int
earningsTimestamp: int
earningsTimestampStart: int
earningsTimestampEnd: int
trailingAnnualDividendRate: float
trailingPE: float
trailingAnnualDividendYield: float
epsTrailingTwelveMonths: float
epsForward: float
epsCurrentYear: float
priceEpsCurrentYear: float
exchange: str
shortName: str
longName: str
messageBoardId: str
exchangeTimezoneName: str
exchangeTimezoneShortName: str
gmtOffSetMilliseconds: int
market: str
esgPopulated: bool
marketState: str
firstTradeDateMilliseconds: int
priceHint: int
regularMarketChange: float
regularMarketChangePercent: float
regularMarketTime: float
regularMarketPrice: float
regularMarketDayHigh: float
regularMarketDayRange: str
regularMarketDayLow: float
regularMarketVolume: int
regularMarketPreviousClose: float
bid: float
displayName: str
symbol: str
class OptionQuoteDict(OptionQuoteDictBase, total=False):
preMarketChange: float
preMarketChangePercent: float
preMarketTime: float
preMarketPrice: float
class OptionsDict(TypedDict):
expirationDate: int
hasMiniOptions: bool
calls: Iterable[OptionDict]
puts: Iterable[OptionDict]
class OptionsDataRecord(OptionDict):
underlyingSymbol: str
type: str
|
458930
|
import json
from datetime import timedelta
from unittest import mock
import pytest
from boto3.exceptions import RetriesExceededError, S3UploadFailedError
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.db.models import FileField
from django.test import override_settings
from wagtail.core.rich_text import RichText
from core import cms_slugs, wagtail_hooks
from core.models import DetailPage
from core.wagtail_hooks import (
FileTransferError,
S3FileFieldAdapter,
S3WagtailTransferFile,
editor_css,
register_s3_media_file_adapter,
)
from tests.helpers import make_test_video
from tests.unit.core import factories
from tests.unit.learn.factories import LessonPageFactory
LOREM_IPSUM = (
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '
'Verum hoc loco sumo verbis his eandem certe vim voluptatis '
'Epicurum nosse quam ceteros. Consequentia exquirere, quoad sit '
'id, quod volumus, effectum. Et quidem saepe quaerimus verbum '
'Latinum par Graeco et quod idem valeat; Quam illa ardentis '
'amores excitaret sui! Cur tandem? Nihil est enim, de quo aliter '
'tu sentias atque ego, modo commutatis verbis ipsas res conferamus. '
)
@pytest.mark.django_db
def test_anonymous_user_required_handles_anonymous_users(rf, domestic_homepage):
request = rf.get('/')
request.user = AnonymousUser()
response = wagtail_hooks.anonymous_user_required(
page=domestic_homepage,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_anonymous_user_required_handles_authenticated_users(rf, domestic_homepage, user):
request = rf.get('/')
request.user = user
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
response = wagtail_hooks.anonymous_user_required(
page=domestic_homepage,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response.status_code == 302
assert response.url == domestic_homepage.anonymous_user_required_redirect_url
@pytest.mark.django_db
def test_login_required_signup_wizard_ignores_irrelevant_pages(rf, domestic_homepage):
request = rf.get('/')
request.user = AnonymousUser()
response = wagtail_hooks.login_required_signup_wizard(
page=domestic_homepage,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_login_required_signup_wizard_handles_anonymous_users(rf, domestic_homepage):
page = LessonPageFactory(parent=domestic_homepage)
request = rf.get('/foo/bar/')
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response.status_code == 302
assert response.url == '/signup/tailored-content/start/?next=/foo/bar/'
@pytest.mark.django_db
def test_login_required_signup_wizard_handles_anonymous_users_opting_out(rf, domestic_homepage, user):
page = LessonPageFactory(parent=domestic_homepage)
first_request = rf.get('/foo/bar/', {'show-generic-content': True})
first_request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(first_request)
first_request.session.save()
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=first_request,
serve_args=[],
serve_kwargs={},
)
assert response is None
second_request = rf.get('/foo/bar/')
second_request.user = user
second_request.session = first_request.session
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=second_request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_login_required_signup_wizard_handles_authenticated_users(rf, user, domestic_homepage):
page = LessonPageFactory(parent=domestic_homepage)
request = rf.get('/')
request.user = user
response = wagtail_hooks.login_required_signup_wizard(
page=page,
request=request,
serve_args=[],
serve_kwargs={},
)
assert response is None
@pytest.mark.django_db
def test_estimated_read_time_calculation(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
reading_content = f'<p>{ LOREM_IPSUM * 10}</p>'
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
hero=[],
body=[],
objective=[('paragraph', RichText(reading_content))],
)
# Every real-world page will have a revision, so the test needs one, too
revision = detail_page.save_revision()
revision.publish()
expected_duration = timedelta(seconds=152)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != expected_duration
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration == expected_duration
@pytest.mark.django_db
def test_estimated_read_time_calculation__checks_text_and_video(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=123)
video_for_hero.save()
reading_content = f'<p>{ LOREM_IPSUM * 10}</p>'
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
hero=[
('Video', factories.SimpleVideoBlockFactory(video=video_for_hero)),
],
objective=[('paragraph', RichText(reading_content))],
body=[], # if needed StreamField rich-text and video content can be added
)
# Every real-world page will have a revision, so the test needs one, too
revision = detail_page.save_revision()
revision.publish()
expected_duration = timedelta(seconds=153 + 123) # reading + watching
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != expected_duration
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration == expected_duration
@pytest.mark.django_db
def test_estimated_read_time_calculation__checks_video(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=123)
video_for_hero.save()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
hero=[
('Video', factories.SimpleVideoBlockFactory(video=video_for_hero)),
],
objective=[],
body=[], # if needed StreamField rich-text and video content can be added
)
# Every real-world page will have a revision, so the test needs one, too
revision = detail_page.save_revision()
revision.publish()
expected_duration = timedelta(seconds=4 + 123) # reading + watching
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != expected_duration
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration == expected_duration
@pytest.mark.django_db
def test_estimated_read_time_calculation__updates_only_draft_if_appropriate(rf, domestic_homepage):
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=124)
video_for_hero.save()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
assert detail_page.live is True
original_live_read_duration = detail_page.estimated_read_duration
assert original_live_read_duration is None
# Note: for test simplicity here, we're not adding streamfield content to our
# revision - it is enough to just notice how the readtimes for Draft vs Live
# are appropriate updated at the expected times, based on the minimal default
# content of a DetailPage.
revision = detail_page.save_revision()
assert json.loads(revision.content_json)['estimated_read_duration'] == original_live_read_duration
detail_page.refresh_from_db()
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
expected_duration = timedelta(seconds=2) # NB just the read time of a skeleton DetailPage
# show the live version is not updated yet
assert detail_page.has_unpublished_changes is True
assert detail_page.estimated_read_duration != expected_duration
assert detail_page.estimated_read_duration == original_live_read_duration
# but the draft is
latest_rev = detail_page.get_latest_revision()
assert revision == latest_rev
assert json.loads(latest_rev.content_json)['estimated_read_duration'] == str(expected_duration)
# Now publish the draft and show it updates the live, too
latest_rev.publish()
detail_page.refresh_from_db()
wagtail_hooks._set_read_time(page=detail_page, request=request)
detail_page.refresh_from_db()
assert detail_page.estimated_read_duration != original_live_read_duration
# NOTE: for a reason unrelated to the point of _this_ test, the readtime
# of the published page CAN BE calculated as slightly longer than the draft.
# This may be in part due to the page having a very small amount of content.
assert detail_page.estimated_read_duration == timedelta(seconds=3)
@pytest.mark.django_db
def test_estimated_read_time_calculation__forced_update_of_live(rf, domestic_homepage):
# This test is a variant of test_estimated_read_time_calculation__updates_only_draft_if_appropriate
# IF THIS TEST FAILS BASED ON OFF-BY-ONE-SECOND DURATIONS... check whether
# your changeset has slightly increased the size of the HTML page, which
# may have slightly pushed up the default/empty-page readtime (either in
# real terms or just in terms of elements that affect the calculation). If
# so, pushing up the expected time variables in the test is OK to do.
request = rf.get('/')
request.user = AnonymousUser()
video_for_hero = make_test_video(duration=124)
video_for_hero.save()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
assert detail_page.live is True
original_live_read_duration = detail_page.estimated_read_duration
assert original_live_read_duration is None
# Make a revision, so we have both draft and live in existence
revision = detail_page.save_revision()
assert json.loads(revision.content_json)['estimated_read_duration'] == original_live_read_duration
detail_page.refresh_from_db()
wagtail_hooks._set_read_time(
page=detail_page,
request=request,
is_post_creation=True, # THIS will mean the live page is updated at the same time as the draft
)
detail_page.refresh_from_db()
expected_duration = timedelta(seconds=2) # NB just the read time of a skeleton DetailPage
# show the live version is updated yet
assert detail_page.estimated_read_duration == expected_duration
assert detail_page.has_unpublished_changes is True
# and the draft is updated too
latest_rev = detail_page.get_latest_revision()
assert revision == latest_rev
assert json.loads(latest_rev.content_json)['estimated_read_duration'] == str(expected_duration)
@pytest.mark.parametrize('is_post_creation_val', (True, False))
@pytest.mark.django_db
def test__set_read_time__passes_through_is_post_creation(
rf,
domestic_homepage,
is_post_creation_val,
):
request = rf.get('/')
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
with mock.patch(
'core.wagtail_hooks._update_data_for_appropriate_version'
) as mocked_update_data_for_appropriate_version:
wagtail_hooks._set_read_time(request, detail_page, is_post_creation=is_post_creation_val)
expected_seconds = 2
mocked_update_data_for_appropriate_version.assert_called_once_with(
page=detail_page,
force_page_update=is_post_creation_val,
data_to_update={'estimated_read_duration': timedelta(seconds=expected_seconds)},
)
@pytest.mark.django_db
@pytest.mark.parametrize('force_update', (False, True))
def test__update_data_for_appropriate_version(domestic_homepage, rf, force_update):
request = rf.get('/')
request.user = AnonymousUser()
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
assert detail_page.live is True
# Make a revision, so we have both draft and live in existence
revision = detail_page.save_revision()
assert detail_page.get_latest_revision() == revision
assert detail_page.title != 'Dummy Title'
assert json.loads(revision.content_json)['title'] == detail_page.title
wagtail_hooks._update_data_for_appropriate_version(
page=detail_page, force_page_update=force_update, data_to_update={'title': 'Dummy Title'}
)
revision.refresh_from_db()
assert json.loads(revision.content_json)['title'] == 'Dummy Title'
detail_page.refresh_from_db()
if force_update:
assert detail_page.title == 'Dummy Title'
else:
assert detail_page.title != 'Dummy Title'
@pytest.mark.django_db
def test_set_read_time__after_create_page(domestic_homepage, rf):
request = rf.get('/')
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
with mock.patch('core.wagtail_hooks._set_read_time') as mock__set_read_time:
wagtail_hooks.set_read_time__after_create_page(request, detail_page)
mock__set_read_time.assert_called_once_with(request, detail_page, is_post_creation=True)
@pytest.mark.django_db
def test_set_read_time__after_edit_page(domestic_homepage, rf):
request = rf.get('/')
detail_page = factories.DetailPageFactory(
parent=domestic_homepage,
template='learn/detail_page.html',
body=[],
)
with mock.patch('core.wagtail_hooks._set_read_time') as mock__set_read_time:
wagtail_hooks.set_read_time__after_edit_page(request, detail_page)
mock__set_read_time.assert_called_once_with(request, detail_page)
def test_wagtail_transfer_custom_adapter_methods___get_relevant_s3_meta():
mock_field = mock.Mock(name='mock_field')
adapter = S3FileFieldAdapter(mock_field)
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
# There are other attributes on the real object, eg 'url'
mock_objectsummary_instance = mock.Mock(name='mock_objectsummary_instance')
mock_objectsummary_instance.size = 1234567
mock_objectsummary_instance.e_tag.replace.return_value = '<KEY>'
# The double quoting is correct - ETags are meant to be double-quoted.
# See https://tools.ietf.org/html/rfc2616#section-14.19
mock_objectsummary_class = mock.Mock(name='mock ObjectSummary')
mock_objectsummary_class.return_value = mock_objectsummary_instance
with mock.patch('core.wagtail_hooks.s3.ObjectSummary', mock_objectsummary_class):
meta = adapter._get_relevant_s3_meta(mock_field_value)
mock_objectsummary_class.assert_called_once_with('test-bucket-name', 'test-bucket-key')
assert meta == {'size': 1234567, 'hash': 'aabbccddeeff112233445566'}
@pytest.mark.parametrize(
'etag_val,expected',
(
('"<KEY>"', '<KEY>'),
('aabbccddeeff112233445566', '<KEY>'),
("<KEY>", '<KEY>'), # noqa Q000 - this was deliberate
),
)
def test_wagtail_transfer_custom_adapter_methods___get_file_hash(etag_val, expected):
mock_field = mock.Mock(name='mock_field')
adapter = S3FileFieldAdapter(mock_field)
mock_objectsummary_instance = mock.Mock(name='mock_objectsummary_instance')
mock_objectsummary_instance.size = 1234567
mock_objectsummary_instance.e_tag = etag_val
hash_ = adapter._get_file_hash(mock_objectsummary_instance)
assert hash_ == expected
@pytest.mark.parametrize(
'file_url,expected',
(
# See constants.AWS_S3_MAIN_HOSTNAME_OPTIONS
(
'https://w-t-test-bucket.s3.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'http://w-t-test-bucket.s3.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3.dualstack.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3-accesspoint.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
(
'https://w-t-test-bucket.s3-accesspoint.dualstack.eu-west-2.amazonaws.com/media/path/to/file.mp4',
('w-t-test-bucket', 'media/path/to/file.mp4'),
),
),
)
def test_wagtail_transfer_custom_adapter_methods___get_imported_file_bucket_and_key(file_url, expected):
mock_field = mock.Mock(name='mock_field')
adapter = S3FileFieldAdapter(mock_field)
assert adapter._get_imported_file_bucket_and_key(file_url) == expected
@override_settings(MEDIA_URL='https://magna-fake-example.s3.amazonaws.com')
@pytest.mark.parametrize(
'url,expected',
(
(
'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
{
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
},
),
(None, None),
),
)
def test_wagtail_transfer_custom_adapter_methods__serialize(url, expected):
file_field = FileField()
if url:
mock_field_value = mock.Mock()
mock_field_value.url = url
# There are other attributes on the real object, but we're only using url here
else:
mock_field_value = None
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
adapter = S3FileFieldAdapter(file_field)
instance = mock.Mock()
mock_get_relevant_s3_meta = mock.Mock(
return_value={'download_url': url, 'size': 123321, 'hash': 'aabbccddeeff665544332211'}
)
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
output = adapter.serialize(instance)
assert output == expected
file_field.value_from_object.assert_called_once_with(instance)
if url:
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
####################################################################################################
# Cases for S3FileFieldAdapter.populate_field
# These following tests are repetitive, but using parametrize() to DRY them up just
# made them really complex
# 1. File not already imported, source's hash matches hashes with existing file, so no import needed
# 2. File not already imported, source's hash doesn't match existing file, so we do a fresh import
# 3. As above, but an exception is raised during file.transfer()
# 4. File was already imported - no need to re-import
# 5. Null `value` param, we abandon early
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_1():
# 1. File not already imported, source's hash matches hashes with existing file, so no import needed
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock(
return_value=('magna-fake-example.s3.amazonaws.com', 'path/to/file.jpg')
)
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {}
mock_imported_file = mock.Mock(name='mock_imported_file')
mock_get_relevant_s3_meta = mock.Mock(
return_value={
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211', # same as existing file, so no import will happen
}
)
mock_s3_file = mock.Mock(name='mock_s3_file')
mock_s3_file.source_url = 'MOCK_SOURCE_URL_VALUE'
mock_s3_file.transfer.return_value = mock_imported_file
mock_S3WagtailTransferFile = mock.Mock(return_value=mock_s3_file) # noqa N806
mock_instance = mock.Mock()
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
assert adapter._get_imported_file_bucket_and_key.call_count == 0
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
assert mock_S3WagtailTransferFile.call_count == 0
assert mock_s3_file.transfer.call_count == 0
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_2():
# 2. File not already imported, source's hash DOES NOT match existing file, so we do a fresh import
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock(
return_value=('magna-fake-example.s3.amazonaws.com', 'path/to/file.jpg')
)
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {}
mock_imported_file = mock.Mock(name='mock_imported_file')
mock_get_relevant_s3_meta = mock.Mock(
return_value={
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'bbccddeeff', # ie, does NOT match
}
)
mock_s3_file = mock.Mock(name='mock_s3_file')
mock_s3_file.source_url = 'MOCK_SOURCE_URL_VALUE'
mock_s3_file.transfer.return_value = mock_imported_file
mock_S3WagtailTransferFile = mock.Mock(return_value=mock_s3_file) # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
# the importer was called
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
adapter._get_imported_file_bucket_and_key.assert_called_once_with(fake_value['download_url'])
mock_S3WagtailTransferFile.assert_called_once_with(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-example.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
mock_s3_file.transfer.assert_called_once_with() # Deliberately no args
# show the imported file is now in the cache so it won't be re-imported
assert mock_context.imported_files_by_source_url['MOCK_SOURCE_URL_VALUE'] == mock_imported_file
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_3():
# 3. As above, but an exception is raised during file.transfer()
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock(
return_value=('magna-fake-example.s3.amazonaws.com', 'path/to/file.jpg')
)
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {}
mock_get_relevant_s3_meta = mock.Mock(
return_value={
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'bbccddeeff', # ie, does NOT match
}
)
mock_s3_file = mock.Mock(name='mock_s3_file')
mock_s3_file.source_url = 'MOCK_SOURCE_URL_VALUE'
mock_s3_file.transfer.side_effect = FileTransferError('Faked')
mock_S3WagtailTransferFile = mock.Mock(return_value=mock_s3_file) # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
# the importer was called, but dudn't succeed
mock_get_relevant_s3_meta.assert_called_once_with(field_value=mock_field_value)
adapter._get_imported_file_bucket_and_key.assert_called_once_with(fake_value['download_url'])
mock_S3WagtailTransferFile.assert_called_once_with(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-example.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
mock_s3_file.transfer.assert_called_once_with() # Deliberately no args
# show the imported file is NOT in the cache because we failde
assert 'MOCK_SOURCE_URL_VALUE' not in mock_context.imported_files_by_source_url
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_4():
# 4. File was already imported - no need to re-import
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
mock_field_value = mock.Mock(name='mock_field_value')
mock_field_value.storage.bucket.name = 'test-bucket-name'
mock_field_value.name = 'test-bucket-key'
file_field.value_from_object = mock.Mock(return_value=mock_field_value)
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {
'download_url': 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
'size': 123321,
'hash': 'aabbccddeeff665544332211',
}
adapter._get_imported_file_bucket_and_key = mock.Mock()
mock_imported_file = mock.Mock(name='mock_imported_file')
mock_context = mock.Mock()
mock_context.imported_files_by_source_url = {
'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg': mock_imported_file
}
mock_get_relevant_s3_meta = mock.Mock()
mock_S3WagtailTransferFile = mock.Mock() # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
assert adapter._get_imported_file_bucket_and_key.call_count == 0
assert mock_get_relevant_s3_meta.call_count == 0
assert mock_S3WagtailTransferFile.call_count == 0
def test_wagtail_transfer_custom_adapter_methods__populate_field__case_5():
# 5. Null `value` param, we abandon early
file_field = FileField()
file_field.get_attname = mock.Mock(return_value='some-filefield')
file_field.value_from_object = mock.Mock()
mock_instance = mock.Mock()
adapter = S3FileFieldAdapter(file_field)
fake_value = {}
adapter._get_imported_file_bucket_and_key = mock.Mock()
mock_context = mock.Mock()
mock_get_relevant_s3_meta = mock.Mock()
mock_S3WagtailTransferFile = mock.Mock() # noqa N806
with mock.patch('core.wagtail_hooks.S3FileFieldAdapter._get_relevant_s3_meta', mock_get_relevant_s3_meta):
with mock.patch('core.wagtail_hooks.S3WagtailTransferFile', mock_S3WagtailTransferFile):
adapter.populate_field(
instance=mock_instance,
value=fake_value,
context=mock_context,
)
assert file_field.value_from_object.call_count == 0
assert adapter._get_imported_file_bucket_and_key.call_count == 0
assert mock_get_relevant_s3_meta.call_count == 0
assert mock_S3WagtailTransferFile.call_count == 0
####################################################################################################
@override_settings(AWS_STORAGE_BUCKET_NAME='magna-fake-bucket-2')
@mock.patch('core.wagtail_hooks.s3.meta.client.copy')
@mock.patch('core.wagtail_hooks.ImportedFile.objects.create')
def test_s3wagtailtransferfile__transfer(
mock_importedfile_objects_create,
mock_s3_client_copy,
):
file = S3WagtailTransferFile(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-bucket-1.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
assert file.local_filename == 'path/to/file.jpg'
assert file.size == 123321
assert file.hash == 'aabbccddeeff665544332211'
assert file.source_url == 'https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg'
assert file.source_bucket == 'magna-fake-bucket-1.s3.amazonaws.com'
assert file.source_key == 'path/to/file.jpg'
assert not mock_s3_client_copy.called
file.transfer()
mock_s3_client_copy.assert_called_once_with(
{'Bucket': file.source_bucket, 'Key': file.source_key},
'magna-fake-bucket-2',
file.local_filename,
)
mock_importedfile_objects_create.assert_called_once_with(
file=file.local_filename,
source_url=file.source_url,
hash=file.hash,
size=file.size,
)
@pytest.mark.parametrize(
'exception_class',
(
RetriesExceededError,
S3UploadFailedError,
ValueError,
),
)
@mock.patch('core.wagtail_hooks.s3.meta.client.copy')
def test_s3wagtailtransferfile__transfer__covered_exceptions(mock_s3_client_copy, exception_class):
file = S3WagtailTransferFile(
local_filename='path/to/file.jpg', # not changed by DefaultStorage in this test
size=123321,
hash_='aabbccddeeff665544332211',
source_url='https://magna-fake-example.s3.amazonaws.com/path/to/file.jpg',
source_bucket='magna-fake-bucket-1.s3.amazonaws.com',
source_key='path/to/file.jpg',
)
mock_s3_client_copy.side_effect = exception_class('Faked')
with pytest.raises(FileTransferError):
file.transfer()
@pytest.mark.parametrize(
'user_media_on_s3,expected',
(
(True, {FileField: S3FileFieldAdapter}),
(False, {}),
),
)
def test_register_s3_media_file_adapter(user_media_on_s3, expected):
with override_settings(USER_MEDIA_ON_S3=user_media_on_s3):
assert register_s3_media_file_adapter() == expected
def _fake_static(value):
return '/path/to/static/' + value
@mock.patch('core.wagtail_hooks.static')
def test_case_study_editor_css(mock_static):
mock_static.side_effect = _fake_static
assert editor_css() == '<link rel="stylesheet" href="/path/to/static/cms-admin/css/case-study.css">'
@pytest.mark.django_db
@pytest.mark.parametrize(
'request_path',
(
'/test/path/',
'/test/path/?token=test',
),
)
def test_authenticated_user_required__sets_next_param(rf, request_path):
instance = DetailPage()
assert instance.authenticated_user_required_redirect_url == cms_slugs.SIGNUP_URL
request = rf.get(request_path)
request.user = AnonymousUser()
output = wagtail_hooks.authenticated_user_required(instance, request, [], {})
assert output.status_code == 302
assert output._headers['location'] == ('Location', f'{cms_slugs.SIGNUP_URL}?next={request_path}')
|
458932
|
import os
from collections import defaultdict
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy
numpy.random.seed(1)
import tensorflow as tf
import logging
import math
from tensorflow import logging as log
from tensorflow.python import debug as tf_debug
from collections import OrderedDict
from data_iterator_elmo import TextIterator
from tensorflow.contrib import rnn
import tensorflow.contrib.layers as layers
import warnings
import pickle as pkl
import sys
import pprint
import pdb
import os
import copy
import time
import pickle
import h5py
import numpy as np
logger = logging.getLogger(__name__)
def get_elmo(batch_data, max_seq_length, max_news, embedding_file, day_flag=False):
# first prepare for padding
zero_word = []
zero_news = []
one_batch = []
'''
zero_word = [0.0]*1024 #new way to generate all zero list
zero_news = [zero_word for _ in range(max_seq_length)]
'''
for w in range(1024):
zero_word.append(float(0))
for n in range(max_seq_length):
zero_news.append(zero_word)
# deal with batch without days
if day_flag is False:
''' same implementation but might be faster
for samples,i in enumerate(batch_data):
one_sample = []
for news,j in enumerate(i):
if int(j) == -1:
'''
for samples in range(len(batch_data)):
one_sample = []
for news in range(len(batch_data[samples])):
if int(batch_data[samples][news]) == -1:
elmo_news = zero_news
else:
with h5py.File(embedding_file, 'r') as fin:
elmo_news = np.average(fin[str(batch_data[samples][news])], axis=0).tolist()
while len(elmo_news) < max_seq_length:
elmo_news.append(zero_word)
for d0 in range(len(elmo_news)):
elmo_news[d0] = np.array(elmo_news[d0])
one_sample.append(np.array(elmo_news))
one_batch.append(np.array(one_sample))
return np.array(one_batch)
# deal with batch with days
else:
''' same implementation but might be faster
for samples,i in enumerate(batch_data):
one_sample = []
for days,j in enumerate(i):
one_day = []
for news,z in enumerate(j):
if int(z) == -1:
'''
for samples in range(len(batch_data)):
one_sample = []
for days in range(len(batch_data[samples])):
one_day = []
for news in range(len(batch_data[samples][days])):
if int(batch_data[samples][days][news]) == -1:
elmo_news = zero_news
else:
with h5py.File(embedding_file, 'r') as fin:
elmo_news = np.average(fin[str(batch_data[samples][days][news])], axis=0).tolist()
while len(elmo_news) < max_seq_length:
elmo_news.append(zero_word)
for d in range(len(elmo_news)):
elmo_news[d] = np.array(elmo_news[d])
one_day.append(np.array(elmo_news))
one_sample.append(np.array(one_day))
one_batch.append(np.array(one_sample))
return np.array(one_batch)
def _s(pp, name): # add perfix
return '{}_{}'.format(pp, name)
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('{} is not in the archive'.format(kk))
continue
params[kk] = pp[kk]
return params
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * numpy.sqrt(6.0 / (fan_in + fan_out))
high = constant * numpy.sqrt(6.0 / (fan_in + fan_out))
W = numpy.random.uniform(low=low, high=high, size=(fan_in, fan_out))
return W.astype('float32')
def ortho_weight(ndim): # used by norm_weight below
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
# W = numpy.random.uniform(-0.5,0.5,size=(nin,nout))
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100):
# length = [len(s) for s in sequence]
length, length_d1, length_d2 = [], [], []
for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2):
dd1, dd2 = list(), list()
length.append(len(i))
for day in d1:
dd1.append(len(day))
length_d1.append(dd1)
for day in d2:
dd2.append(len(day))
length_d2.append(dd2)
if maxlen is not None: # max length is the news level
new_sequence = []
new_lengths = []
new_sequence_d1 = []
new_lengths_d1 = []
new_sequence_d2 = []
new_lengths_d2 = []
for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2):
dd1, lld1, dd2, lld2 = list(), list(), list(), list()
if l < maxlen:
new_sequence.append(s)
new_lengths.append(l)
for i, j in zip(ld1, sd1):
if i < maxlen:
dd1.append(j)
lld1.append(i)
new_sequence_d1.append(dd1)
new_lengths_d1.append(lld1)
for i, j in zip(ld2, sd2):
if i < maxlen:
dd2.append(j)
lld2.append(i)
new_sequence_d2.append(dd2)
new_lengths_d2.append(lld2)
length = new_lengths # This step is to filter the sentence which length is bigger
sequence = new_sequence # than the max length. length means number of news. sequence means
# length of each sentence
length_d1 = new_lengths_d1
sequence_d1 = new_sequence_d1
length_d2 = new_lengths_d2
sequence_d2 = new_sequence_d2
##TODO need to be careful, set the max length bigger to avoid bug
if len(length) < 1:
return None, None, None, None, None, None, None, None
# day1 = len(sequence_d1[0])
# day2 = len(sequence_d2[0])
day1 = options['delay1'] - 1
day2 = options['delay2'] - options['delay1']
maxlen_x = numpy.max(length) # max time step
try:
maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1])
maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2])
except ValueError as e:
print(str(e))
maxlen_xd1 = 100
maxlen_xd2 = 100
n_samples = len(sequence) # number of samples== batch
max_sequence = max(len(j) for i in sequence for j in i) # find the sequence max length
max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z)
max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z)
max_sequence = max_word if max_sequence > max_word else max_sequence # shrink the data size
max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 # shrink the data size
max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 # shrink the data size
##TODO for x
x = numpy.zeros((n_samples, maxlen_x, max_sequence)).astype('int64')
x_mask = numpy.zeros((n_samples, maxlen_x)).astype('float32')
##TODO for x_d1
x_d1 = numpy.zeros((n_samples, day1, maxlen_xd1, max_sequence_d1)).astype('int64')
x_d1_mask = numpy.zeros((n_samples, day1, maxlen_xd1)).astype('float32')
##TODO for x_d2
x_d2 = numpy.zeros((n_samples, day2, maxlen_xd2, max_sequence_d2)).astype('int64')
x_d2_mask = numpy.zeros((n_samples, day2, maxlen_xd2)).astype('float32')
final_mask = numpy.ones((n_samples, 1 + day1 + day2)).astype('float32')
# l = numpy.array(labels).astype('int64')
##TODO for label
l = numpy.zeros((n_samples,)).astype('int64')
for index, (i, j, k, ll) in enumerate(zip(sequence, sequence_d1, sequence_d2, labels)): # batch size
l[index] = ll
for idx, ss in enumerate(i): # time step
# x[idx, index, :sequence_length[idx]] = ss
if len(ss) < max_sequence:
x[index, idx, :len(ss)] = ss
else:
x[index, idx, :max_sequence] = ss[:max_sequence]
x_mask[index, idx] = 1.
for jj, day in enumerate(j):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d1:
x_d1[index, jj, idx, :len(ss)] = ss
else:
x_d1[index, jj, idx, :max_sequence_d1] = ss[:max_sequence_d1]
x_d1_mask[index, jj, idx] = 1.
for jj, day in enumerate(k):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d2:
x_d2[index, jj, idx, :len(ss)] = ss
else:
x_d2[index, jj, idx, :max_sequence_d2] = ss[:max_sequence_d2]
x_d2_mask[index, jj, idx] = 1.
'''
haha = numpy.absolute(numpy.sign(x))
hehe = numpy.absolute(numpy.sign(x_d1))
jiji = numpy.absolute(numpy.sign(x_d2))
'''
return x, x_mask, x_d1, x_d1_mask, x_d2, x_d2_mask, l, final_mask, max_sequence, max_sequence_d1, max_sequence_d2
def days(emb, sequence_mask, news_mask, keep_prob, is_training, options, elmo):
# emb batch,day,news, sequence,embedding, 32*3*40*13*100
# sequence_mask batch, day, news,sequence 32*3*40*13
# news_mask batch, day, news, 32*3*40
batch = tf.shape(emb)[0]
day = tf.shape(emb)[1]
new_s = tf.shape(emb)[2]
word = tf.shape(emb)[3]
word_level_inputs = tf.reshape(emb, [batch * day * new_s, word, options['dim_word']])
elmo = tf.reshape(elmo, [batch * day * new_s, word, 1024])
word_level_mask = tf.reshape(sequence_mask, [batch * day * new_s, word])
news_level_mask = tf.reshape(news_mask, [batch * day, new_s])
##TODO word level LSTM
word_encoder_out = bilstm_filter(word_level_inputs, word_level_mask, keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: batch*day*news,sequence,2*lstm_units(32*3*40)*12*600
#word_encoder_out = tf.concat(word_encoder_out, 2) * tf.expand_dims(word_level_mask, -1) # mask the output
word_encoder_out = tf.concat([tf.concat(word_encoder_out, 2), elmo], 2) * tf.expand_dims(word_level_mask, -1) # mask the output
#concat elmo
##TODO word level attention
word_level_output = attention_v2(word_encoder_out, word_level_mask, name='word_attention', keep=keep_prob, r=10,
is_training=is_training)
# word_level_output shape is (32*3*40)*600
'''
word_level_output = tf.reduce_sum(word_encoder_out * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)
'''
##TODO average word
# word_level_output = tf.reduce_sum(word_level_inputs * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)# word_level_output shape is (32*3*40)*100
if options['use_dropout']:
word_level_output = layers.dropout(word_level_output, keep_prob=keep_prob, is_training=is_training, seed=None)
news_level_input = tf.reshape(word_level_output, [batch * day, new_s, 2 * options['dim'] + 1024]) # (32*3)*40*600
news_level_input = news_level_input * tf.expand_dims(news_level_mask, -1) # mask before attention
##TODO news level attention
news_level_output = attention_v2(news_level_input, news_level_mask, name='news_attention', keep=keep_prob, r=10,
is_training=is_training) # shape is (32*3)*600
##TODO average news
# news_level_output = tf.reduce_sum(news_level_input * tf.expand_dims(news_level_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(news_level_mask, 1) + 1e-8, 1)
# shape is (32*3)*600
day_level_output = tf.reshape(news_level_output, [batch, day, 2 * options['dim'] + 1024]) # (32*3)*600
return day_level_output
def news(emb, sequence_mask, news_mask, keep_prob, is_training, options, elmo):
# emb batch,news, sequence,embedding, 32*40*13*100
# sequence_mask batch, news,sequence 32*40*13
# news_mask batch, news, 32*40
batch = tf.shape(emb)[0]
new_s = tf.shape(emb)[1]
word = tf.shape(emb)[2]
word_level_inputs = tf.reshape(emb, [batch * new_s, word, options['dim_word']])
elmo = tf.reshape(elmo, [batch * new_s, word, 1024])
word_level_mask = tf.reshape(sequence_mask, [batch * new_s, word])
##TODO word level LSTM
word_encoder_out = bilstm_filter(word_level_inputs, word_level_mask, keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: batch*news,sequence,2*lstm_units(32*40)*12*600
#word_encoder_out = tf.concat(word_encoder_out, 2) * tf.expand_dims(word_level_mask, -1) # mask the output
word_encoder_out = tf.concat([tf.concat(word_encoder_out, 2), elmo], 2) * tf.expand_dims(word_level_mask, -1)
#concat two lstm layers, also with the elmo embedding
word_level_output = attention_v2(word_encoder_out, word_level_mask, name='word_attention', keep=keep_prob, r=10,
is_training=is_training)
'''
word_level_output = tf.reduce_sum(word_encoder_out * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)
'''
# word_level_output shape is (32*40)*600
##TODO average word
# word_level_output = tf.reduce_sum(word_level_inputs * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)# word_level_output shape is (32*40)*100
if options['use_dropout']:
word_level_output = layers.dropout(word_level_output, keep_prob=keep_prob, is_training=is_training, seed=None)
news_level_input = tf.reshape(word_level_output, [batch, new_s, 2 * options['dim'] + 1024]) # 32*40*600
news_level_input = news_level_input * tf.expand_dims(news_mask, -1) # mask before attention
##TODO news level attention
news_level_output = attention_v2(news_level_input, news_mask, name='news_attention', keep=keep_prob, r=10,
is_training=is_training) # shape is 32*600
##TODO average news
# news_level_output = tf.reduce_sum(news_level_input * tf.expand_dims(news_mask, -1), 1) / tf.expand_dims(
# tf.reduce_sum(news_mask, 1) + 1e-8, 1)
# shape is 32*600
return news_level_output
def attention_v1(input, masks, name='attention', nin=600, keep=1.0, is_training=True):
# input is batch,time_step,hidden_state (32*40)*13*600 mask (32*40)*13
# hidden layer is:batch,hidden_shape,attention_hidden_size (32*40)*13*1200 or (32*40)*13*600
# attention shape after squeeze is (32*40)*13, # batch,time_step,attention_size (32*40)*13*1
with tf.variable_scope(name_or_scope=name, reuse=tf.AUTO_REUSE):
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='hidden', reuse=tf.AUTO_REUSE)
# hidden = layers.dropout(hidden, keep_prob=keep, is_training=is_training)
# hidden = tf.layers.batch_normalization(hidden, training=is_training)
# hidden=tf.nn.tanh(hidden)
attention = tf.layers.dense(hidden, 1, activation=None, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='out',
reuse=tf.AUTO_REUSE)
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(tf.expand_dims(masks, -1), 0.), padding,
attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1, name='softmax') * tf.expand_dims(masks,
-1) # 32*40*r #mask the attention here is not really neccesary,
results = tf.reduce_sum(input * attention, axis=1) # 32*600
# outputs = tf.squeeze(tf.matmul(tf.transpose(attention, [0, 2, 1]), input)) # transpose to batch,hidden,time_step
return results
def attention_v2(input, mask, name='attention', nin=600, keep=1.0, r=10, is_training=True):
# input is batch,time_step,hidden_state (32*40)*13*600 mask (32*40)*13
# hidden layer is:batch,hidden_shape,attention_hidden_size (32*40)*13*1200 or (32*40)*13*600
# attention shape after squeeze is (32*40)*13, # batch,time_step,attention_size (32*40)*13*1
with tf.variable_scope(name_or_scope=name, reuse=tf.AUTO_REUSE):
masks = tf.stack([mask] * r, -1) # copy r time for filling (32*40)*13*r
iden = tf.eye(r, batch_shape=[tf.shape(input)[0]]) # an identity matrix (32*40)*13*13
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='hidden', reuse=tf.AUTO_REUSE)
# hidden = layers.dropout(hidden, keep_prob=keep, is_training=is_training)
# hidden = tf.layers.batch_normalization(hidden, training=is_training)
# hidden=tf.nn.tanh(hidden)
attention = tf.layers.dense(hidden, r, activation=None, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='out',
reuse=tf.AUTO_REUSE) # attention shape is 32*40*r
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(masks, 0.), padding, attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1,
name='softmax') * masks # (32*40)*13*r #mask the attention here is not really neccesary,
penalty = tf.norm((tf.matmul(tf.transpose(attention, [0, 2, 1]), attention) - iden), ord='fro',
axis=(-2, -1)) # the Frobenius norm penalty 32 dimension
# attention = attention + beta * tf.expand_dims(tf.expand_dims(penalty, -1), -1) # expand twice
# outputs = tf.reduce_sum(input * attention, axis=1)#(32*40)*600
outputs = tf.matmul(tf.transpose(attention, [0, 2, 1]), input) # transpose to batch,hidden,time_step
##TODO average sentence attention
# results = tf.reduce_mean(outputs, 1) # average sentence attention
##TODO attention over attention
over_hidden = tf.layers.dense(outputs, nin, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='over_attention_hidden', reuse=tf.AUTO_REUSE)
over_attention = tf.layers.dense(over_hidden, 1, activation=None, use_bias=False,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='over_attention_out',
reuse=tf.AUTO_REUSE)
over_attention = tf.nn.softmax(over_attention, 1, name='over_attention_softmax')
results = tf.reduce_sum(outputs * over_attention, axis=1) # 32*600
'''
outputs = tf.reshape(outputs, [tf.shape(outputs)[0], -1])
##TODO becarful changed some thing
if name == 'sentence_attention':
outputs.set_shape([None, nin * (r ** 2)])
else:
outputs.set_shape([None, nin * r])
'''
return results # result shape is batch, hidden_unit (32*40)*600
def lstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):
with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):
sequence = tf.cast(tf.reduce_sum(mask, 1), tf.int32)
lstm_fw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)
keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)
cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)
outputs, _ = tf.nn.dynamic_rnn(cell_dp_fw, input, sequence_length=sequence, swap_memory=False,
dtype=tf.float32)
return outputs
def bilstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):
with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):
sequence = tf.cast(tf.reduce_sum(mask, 1), tf.int32)
lstm_fw_cell = rnn.LSTMBlockCell(dim,
forget_bias=1.0) # initializer=tf.orthogonal_initializer(), state_is_tuple=True
# back directions
lstm_bw_cell = rnn.LSTMBlockCell(dim, forget_bias=1.0)
keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)
cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)
cell_dp_bw = rnn.DropoutWrapper(cell=lstm_bw_cell, output_keep_prob=keep_rate)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_dp_fw, cell_dp_bw, input, sequence_length=sequence,
swap_memory=False,
dtype=tf.float32) # batch major
return outputs
def init_params(options, worddicts):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
# read embedding from GloVe
if options['embedding']:
with open(options['embedding'], 'r') as f:
for line in f:
tmp = line.split()
word = tmp[0]
vector = tmp[1:]
if word in worddicts and worddicts[word] < options['n_words']:
try:
params['Wemb'][worddicts[word], :] = vector
# encoder: bidirectional RNN
except ValueError as e:
print(str(e))
return params
def word_embedding(options, params):
embeddings = tf.get_variable("embeddings", shape=[options['n_words'], options['dim_word']],
initializer=tf.constant_initializer(numpy.array(
params['Wemb']))) # tf.constant_initializer(numpy.array(params['Wemb']))
return embeddings
def build_model(embedding, options):
""" Builds the entire computational graph used for training
"""
# description string: #words x #samples
with tf.device('/gpu:0'):
with tf.variable_scope('input'):
x = tf.placeholder(tf.int64, shape=[None, None, None],
name='x') # 3D vector batch,news and sequence(before embedding)40*32*13
x_mask = tf.placeholder(tf.float32, shape=[None, None], name='x_mask') # mask batch,news
x_elmo_d0 = tf.placeholder(tf.float32, shape=[None, None, None, None], name='x_elmo_d0')
y = tf.placeholder(tf.int64, shape=[None], name='y')
x_d1 = tf.placeholder(tf.int64, shape=[None, None, None, None], name='x_d1')
x_d1_mask = tf.placeholder(tf.float32, shape=[None, None, None], name='x_d1_mask')
x_elmo_d1 = tf.placeholder(tf.float32, shape=[None, None, None, None, None], name='x_elmo_d1')
x_d2 = tf.placeholder(tf.int64, shape=[None, None, None, None], name='x_d2')
x_d2_mask = tf.placeholder(tf.float32, shape=[None, None, None], name='x_d2_mask')
x_elmo_d2 = tf.placeholder(tf.float32, shape=[None, None, None, None, None], name='x_elmo_d2')
final_mask = tf.placeholder(tf.float32, shape=[None, None], name='final_mask')
tech = tf.placeholder(tf.float32, shape=[None, None, 7], name='technical') # shape is batch time unit
# final_mask shape is day*n_samples
##TODO important
keep_prob = tf.placeholder(tf.float32, [], name='keep_prob')
is_training = tf.placeholder(tf.bool, name='is_training')
##TODO important
sequence_mask = tf.cast(tf.abs(tf.sign(x)), tf.float32) # 3D
sequence_d1_mask = tf.cast(tf.abs(tf.sign(x_d1)), tf.float32) # 4D
sequence_d2_mask = tf.cast(tf.abs(tf.sign(x_d2)), tf.float32) # 4D
n_timesteps = tf.shape(x)[0] # time steps
n_samples = tf.shape(x)[1] # n samples
# # word embedding
##TODO word embedding
emb = tf.nn.embedding_lookup(embedding, x)
emb_d1 = tf.nn.embedding_lookup(embedding, x_d1)
emb_d2 = tf.nn.embedding_lookup(embedding, x_d2)
'''if options['use_dropout']:
emb = layers.dropout(emb, keep_prob=keep_prob, is_training=is_training)
'''
with tf.device('/gpu:0'):
# fed into the input of BILSTM from the official document
##TODO word level LSTM
with tf.name_scope('news'):
att = news(emb, sequence_mask, x_mask, keep_prob, is_training, options, x_elmo_d0)
##TODO att shape 32*200 att_day1 32*2*200 att_day2 32*4*200
with tf.name_scope('day1'):
att_day1 = days(emb_d1, sequence_d1_mask, x_d1_mask, keep_prob, is_training, options, x_elmo_d1)
# TODO bilstm layers
# Change the time step and batch
with tf.device('/gpu:0'):
with tf.name_scope('day2'):
att_day2 = days(emb_d2, sequence_d2_mask, x_d2_mask, keep_prob, is_training, options, x_elmo_d2)
with tf.name_scope('final'):
final = tf.concat([att_day2, att_day1, tf.expand_dims(att, 1)], 1)
'''if options['use_dropout']:
final = layers.dropout(final, keep_prob=keep_prob, is_training=is_training)
'''
# final shape is 8*32*600
if options['last_layer'] == 'LSTM':
final = bilstm_filter(final, final_mask, keep_prob, prefix='day_lstm', dim=100,
is_training=is_training) # output shape: batch,time_step,2*lstm_unit(concate) 32*7*600
# tech_ind = lstm_filter(tech, tf.ones(shape=[tf.shape(tech)[0],tf.shape(tech)[1]]), keep_prob, prefix='tech_lstm', dim=50,
# is_training=is_training)
##TODO day level attention
att_final = attention_v2(tf.concat(final, 2), final_mask, name='day_attention', keep=keep_prob, r=4,
is_training=is_training) # already masked after attention
##TODO take day lstm average
# att_final = tf.reduce_mean(tf.concat(final,2),1)
# tech_att = tf.reduce_mean(tf.concat(tech_ind,2),1)
##TODO take the lasts
# tech_att=tech_ind[:,-1,:]
# att_final = tf.concat([att_final,tech_att],axis=1)
logit = tf.layers.dense(att_final, 100, activation=tf.nn.tanh, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='ff', reuse=tf.AUTO_REUSE)
# logit = tf.layers.batch_normalization(logit, training=is_training)
# logit=tf.nn.tanh(logit)
'''
# logit1 = tf.reduce_sum(tf.concat(final,2) * tf.expand_dims(final_mask,-1),0) / tf.expand_dims(tf.reduce_sum(final_mask,0),1)
# logit2 = tf.reduce_max(ctx3 * tf.expand_dims(x1_mask,2),0)
'''
if options['last_layer'] == 'CNN':
att_ctx = tf.concat([att_day1, tf.expand_dims(att, 1)], 1)
xavier = layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32)
conv1 = tf.layers.conv1d(att_ctx, filters=options['CNN_filter'],
kernel_size=options['CNN_kernel'], padding='same', strides=1,
activation=tf.nn.relu, kernel_initializer=xavier, name='conv1')
conv2 = tf.layers.conv1d(final, filters=options['CNN_filter'],
kernel_size=options['CNN_kernel'], padding='same',
strides=1, activation=tf.nn.relu,
kernel_initializer=xavier,
name='conv2')
pool1 = tf.layers.max_pooling1d(conv1, pool_size=2, strides=2, padding='same',
data_format='channels_last', name='pool1')
pool2 = tf.layers.max_pooling1d(conv2, pool_size=2, strides=2, padding='same',
data_format='channels_last', name='pool2')
d1size = math.ceil(options['delay1'] / 2) * options['CNN_filter']
d2size = math.ceil(options['delay2'] / 2) * options['CNN_filter']
pool1_flat = tf.reshape(pool1, [-1, d1size])
pool2_flat = tf.reshape(pool2, [-1, d2size])
cnn_final = tf.concat([att, pool1_flat, pool2_flat], -1)
logit = tf.layers.dense(cnn_final, 300, activation=tf.nn.tanh, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='ff', reuse=tf.AUTO_REUSE)
# logit = tf.layers.batch_normalization(logit, training=is_training)
# logit=tf.nn.tanh(logit)
if options['use_dropout']:
logit = layers.dropout(logit, keep_prob=keep_prob, is_training=is_training, seed=None)
pred = tf.layers.dense(logit, 2, activation=None, use_bias=True,
kernel_initializer=layers.xavier_initializer(uniform=True, seed=None,
dtype=tf.float32),
name='fout', reuse=tf.AUTO_REUSE)
logger.info('Building f_cost...')
# todo not same
labels = tf.one_hot(y, depth=2, axis=1)
# labels = y
preds = tf.nn.softmax(pred, 1, name='softmax')
# preds = tf.nn.sigmoid(pred)
# pred=tf.reshape(pred,[-1])
cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)
# cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=pred),1)
# cost = -tf.reduce_sum((tf.cast(labels, tf.float32) * tf.log(preds + 1e-8)),axis=1)
# cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y)
logger.info('Done')
'''
logit1 = tf.reduce_sum(ctx1 * tf.expand_dims(x_mask, 2), 0) / tf.expand_dims(tf.reduce_sum(x_mask, 0), 1)
logit2 = tf.reduce_max(ctx1 * tf.expand_dims(x_mask, 2), 0)
logit = tf.concat([logit1, logit2], 1)
'''
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', tf.reduce_mean(cost))
tf.summary.histogram('predicted_value', preds)
summary = tf.summary.merge_all()
return is_training, cost, x, x_mask, y, n_timesteps, preds, summary
def predict_pro_acc(sess, cost, prepare_data, model_options, iterator, maxlen, correct_pred, pred, summary, eidx,
is_training, train_op, plot=None, writer=None, validate=False):
# fo = open(_s(prefix,'pre.txt'), "w")
num = 0
valid_acc = 0
total_cost = 0
loss = 0
result = 0
final_result = []
# sess.add_tensor_filter("val_test_spot")
for x_sent, x_d1_sent, x_d2_sent, y_sent, y_tech, elmo_d0, elmo_d1, elmo_d2 in iterator:
num += len(x_sent)
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y, final_mask, max_sequence_d0, \
max_sequence_d1, max_sequence_d2 = prepare_data(
x_sent,
x_d1_sent,
x_d2_sent,
y_sent,
model_options,
maxlen=maxlen)
if validate is True:
elmo_d0_embedding = get_elmo(elmo_d0, max_sequence_d0, model_options['cut_news'],
model_options['validate_elmo'])
elmo_d1_embedding = get_elmo(elmo_d1, max_sequence_d1, model_options['cut_news'],
model_options['validate_elmo'], day_flag=True)
elmo_d2_embedding = get_elmo(elmo_d2, max_sequence_d2, model_options['cut_news'],
model_options['validate_elmo'], day_flag=True)
else:
elmo_d0_embedding = get_elmo(elmo_d0, max_sequence_d0, model_options['cut_news'],
model_options['test_elmo'])
elmo_d1_embedding = get_elmo(elmo_d1, max_sequence_d1, model_options['cut_news'],
model_options['test_elmo'], day_flag=True)
elmo_d2_embedding = get_elmo(elmo_d2, max_sequence_d2, model_options['cut_news'],
model_options['test_elmo'], day_flag=True)
loss, result, preds = sess.run([cost, correct_pred, pred],
feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/x_elmo_d0:0': elmo_d0_embedding,
'input/y:0': data_y, 'input/x_d1:0': data_x_d1,
'input/x_d1_mask:0': data_x_d1_mask,
'input/x_elmo_d1:0': elmo_d1_embedding,
'input/x_d2:0': data_x_d2, 'input/x_d2_mask:0': data_x_d2_mask,
'input/x_elmo_d2:0': elmo_d2_embedding,
'input/final_mask:0': final_mask,
'input/technical:0': y_tech,
'input/keep_prob:0': 1.0,
'input/is_training:0': is_training})
valid_acc += result.sum()
total_cost += loss.sum()
if plot is not None:
if validate is True:
plot['validate'].append(loss.sum() / len(x_sent))
else:
plot['testing'].append(loss.sum() / len(x_sent))
final_result.extend(result.tolist())
final_acc = 1.0 * valid_acc / num
final_loss = 1.0 * total_cost / num
# if writer is not None:
# writer.add_summary(test_summary, eidx)
# print result,preds,loss,result_
print(preds, result, num)
return final_acc, final_loss, final_result
def train(
dim_word=100, # word vector dimensionality
dim=100, # the number of GRU units
encoder='lstm', # encoder model
decoder='lstm', # decoder model
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
decay_c=0., # L2 regularization penalty
clip_c=-1., # gradient clipping threshold
lrate=0.0004, # learning rate
n_words=100000, # vocabulary size
n_words_lemma=100000,
maxlen=100, # maximum length of the description
optimizer='adam',
batch_size=32,
valid_batch_size=32,
save_model='../../models/',
saveto='model.npz',
dispFreq=100,
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
use_dropout=False,
reload_=False,
verbose=False, # print verbose information for debug but slow speed
delay1=3,
delay2=7,
delay_tech=5,
types='title',
cut_word=False,
cut_news=False,
last_layer="LSTM",
CNN_filter=64,
CNN_kernel=3,
keep_prob=0.8,
datasets=[],
valid_datasets=[],
test_datasets=[],
tech_data=[],
dictionary=[],
kb_dicts=[],
embedding='', # pretrain embedding file, such as word2vec, GLOVE
dim_kb=5,
RUN_NAME="histogram_visualization",
wait_N=10,
train_elmo='',
validate_elmo='',
test_elmo=''
):
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s",
filename='./log_result.txt')
# Model options
model_options = locals().copy()
# tf.reset_default_graph()
# tf.set_random_seed(2345)
with open(dictionary, 'rb') as f:
worddicts = pkl.load(f)
logger.info("Loading knowledge base ...")
# reload options
if reload_ and os.path.exists(saveto):
logger.info("Reload options")
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
logger.debug(pprint.pformat(model_options))
logger.info("Loading data")
train = TextIterator(datasets[0], datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=True, shuffle_sentence=False)
train_valid = TextIterator(datasets[0], datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=False, shuffle_sentence=False)
valid = TextIterator(valid_datasets[0], valid_datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=False, shuffle_sentence=False)
test = TextIterator(test_datasets[0], test_datasets[1], tech_data,
dict=dictionary,
delay1=delay1,
delay2=delay2,
delay_tech=delay_tech,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_news=cut_news,
shuffle=False, shuffle_sentence=False)
# Initialize (or reload) the parameters using 'model_options'
# then build the tensorflow graph
logger.info("init_word_embedding")
params = init_params(model_options, worddicts)
embedding = word_embedding(model_options, params)
is_training, cost, x, x_mask, y, n_timesteps, pred, summary = build_model(embedding, model_options)
with tf.variable_scope('train'):
lr = tf.Variable(0.0, trainable=False)
def assign_lr(session, lr_value):
session.run(tf.assign(lr, lr_value))
logger.info('Building optimizers...')
# optimizer = tf.train.AdamOptimizer(learning_rate=lr)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=lr, rho=0.95)
logger.info('Done')
# print all variables
tvars = tf.trainable_variables()
for var in tvars:
print(var.name, var.shape)
lossL = tf.add_n([tf.nn.l2_loss(v) for v in tvars if ('embeddings' not in v.name and 'bias' not in v.name)]) #
lossL2 = lossL * 0.0005
print("don't do L2 variables:")
print([v.name for v in tvars if ('embeddings' in v.name or 'bias' in v.name)])
print("\n do L2 variables:")
print([v.name for v in tvars if ('embeddings' not in v.name and 'bias' not in v.name)])
cost = cost + lossL2
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), model_options['clip_c'])
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_op = optimizer.apply_gradients(zip(grads, tvars))
# train_op = optimizer.minimize(cost)
op_loss = tf.reduce_mean(cost)
op_L2 = tf.reduce_mean(lossL)
logger.info("correct_pred")
correct_pred = tf.equal(tf.argmax(input=pred, axis=1), y) # make prediction
logger.info("Done")
temp_accuracy = tf.cast(correct_pred, tf.float32) # change to float32
logger.info("init variables")
init = tf.global_variables_initializer()
logger.info("Done")
# saver
saver = tf.train.Saver(max_to_keep=15)
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.gpu_options.allow_growth = True
# gpu_options = tf.GPUOptions(allow_growth=True)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
training_writer = tf.summary.FileWriter("./logs/{}/training".format(RUN_NAME), sess.graph)
validate_writer = tf.summary.FileWriter("./logs/{}/validate".format(RUN_NAME), sess.graph)
testing_writer = tf.summary.FileWriter("./logs/{}/testing".format(RUN_NAME), sess.graph)
sess.run(init)
history_errs = []
history_valid_result = []
history_test_result = []
# reload history
if reload_ and os.path.exists(saveto):
logger.info("Reload history error")
history_errs = list(numpy.load(saveto)['history_errs'])
bad_counter = 0
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
loss_plot = defaultdict(list)
uidx = 0
estop = False
valid_acc_record = []
test_acc_record = []
best_num = -1
best_epoch_num = 0
lr_change_list = []
fine_tune_flag = 0
wait_counter = 0
wait_N = model_options['wait_N']
learning_rate = model_options['lrate']
assign_lr(sess, learning_rate)
for eidx in range(max_epochs):
n_samples = 0
training_cost = 0
training_acc = 0
for x, x_d1, x_d2, y, y_tech, elmo_d0, elmo_d1, elmo_d2 in train:
n_samples += len(x)
uidx += 1
keep_prob = model_options['keep_prob']
is_training = True
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y, final_mask,\
max_sequence_d0, max_sequence_d1, max_sequence_d2 = prepare_data(
x,
x_d1,
x_d2,
y,
model_options,
maxlen=maxlen)
elmo_d0_embedding = get_elmo(elmo_d0, max_sequence_d0, model_options['cut_news'],
model_options['train_elmo'])
elmo_d1_embedding = get_elmo(elmo_d1, max_sequence_d1, model_options['cut_news'],
model_options['train_elmo'], day_flag=True)
elmo_d2_embedding = get_elmo(elmo_d2, max_sequence_d2, model_options['cut_news'],
model_options['train_elmo'], day_flag=True)
print(data_x.shape, data_x_mask.shape, data_x_d1.shape, data_x_d1_mask.shape, data_x_d2.shape,
data_x_d2_mask.shape, final_mask.shape, data_y.shape)
assert data_y.shape[0] == data_x.shape[0], 'Size does not match'
if x is None:
logger.debug('Minibatch with zero sample under length {0}'.format(maxlen))
uidx -= 1
continue
ud_start = time.time()
_, loss, loss_no_mean, temp_acc, l2_check = sess.run([train_op, op_loss, cost, temp_accuracy, op_L2],
feed_dict={'input/x:0': data_x,
'input/x_mask:0': data_x_mask,
'input/x_elmo_d0:0': elmo_d0_embedding,
'input/y:0': data_y,
'input/x_d1:0': data_x_d1,
'input/x_d1_mask:0': data_x_d1_mask,
'input/x_elmo_d1:0': elmo_d1_embedding,
'input/x_d2:0': data_x_d2,
'input/x_d2_mask:0': data_x_d2_mask,
'input/x_elmo_d2:0': elmo_d2_embedding,
'input/final_mask:0': final_mask,
'input/technical:0': y_tech,
'input/keep_prob:0': keep_prob,
'input/is_training:0': is_training})
ud = time.time() - ud_start
training_cost += loss_no_mean.sum()
training_acc += temp_acc.sum()
loss_plot['training'].append(loss)
'''train_summary = sess.run(summary, feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/y:0': data_y,'input/keep_prob:0':keep_prob,'input/is_training:0':is_training})
training_writer.add_summary(train_summary, eidx)'''
if numpy.mod(uidx, dispFreq) == 0:
logger.debug('Epoch {0} Update {1} Cost {2} L2 {3} TIME {4}'.format(eidx, uidx, loss, l2_check, ud))
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
is_training = False
valid_acc, valid_loss, valid_final_result = predict_pro_acc(sess, cost, prepare_data, model_options,
valid, maxlen,
correct_pred, pred, summary, eidx,
is_training, train_op, loss_plot,
validate_writer, validate=True)
test_acc, test_loss, test_final_result = predict_pro_acc(sess, cost, prepare_data, model_options,
test, maxlen,
correct_pred, pred, summary, eidx,
is_training, train_op, loss_plot,
testing_writer)
# valid_err = 1.0 - valid_acc
valid_err = valid_loss
history_errs.append(valid_err)
history_valid_result.append(valid_final_result)
history_test_result.append(test_final_result)
loss_plot['validate_ep'].append(valid_loss)
loss_plot['testing_ep'].append(test_loss)
logger.debug('Epoch {0}'.format(eidx))
logger.debug('Valid cost {0}'.format(valid_loss))
logger.debug('Valid accuracy {0}'.format(valid_acc))
logger.debug('Test cost {0}'.format(test_loss))
logger.debug('Test accuracy {0}'.format(test_acc))
logger.debug('learning_rate: {0}'.format(learning_rate))
valid_acc_record.append(valid_acc)
test_acc_record.append(test_acc)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_num = best_num + 1
best_epoch_num = eidx
wait_counter = 0
logger.info("Saving...")
saver.save(sess, _s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
logger.info(_s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('{}.pkl'.format(saveto), 'wb'))
logger.info("Done")
if valid_err > numpy.array(history_errs).min():
wait_counter += 1
# wait_counter +=1 if valid_err>numpy.array(history_errs).min() else 0
if wait_counter >= wait_N:
logger.info("wait_counter max, need to half the lr")
# print 'wait_counter max, need to half the lr'
bad_counter += 1
wait_counter = 0
logger.debug('bad_counter: {0}'.format(bad_counter))
# TODO change the learining rate
# learning_rate = learning_rate * 0.9
# learning_rate = learning_rate
# assign_lr(sess, learning_rate)
lr_change_list.append(eidx)
logger.debug('lrate change to: {0}'.format(learning_rate))
# print 'lrate change to: ' + str(lrate)
if bad_counter > patience and fine_tune_flag == 0:
logger.debug('ATTENTION! INTO FINE TUNING STAGE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
optimizer = tf.train.MomentumOptimizer(learning_rate=0.000001, momentum=0.6)
fine_tune_flag = 1
bad_counter = 0
if bad_counter > patience and fine_tune_flag == 1:
logger.info("Early Stop!")
estop = True
break
if numpy.isnan(valid_err):
pdb.set_trace()
# finish after this many updates
if uidx >= finish_after:
logger.debug('Finishing after iterations! {0}'.format(uidx))
# print 'Finishing after %d iterations!' % uidx
estop = True
break
logger.debug('Seen samples: {0}'.format(n_samples))
logger.debug('Training accuracy: {0}'.format(1.0 * training_acc / n_samples))
loss_plot['training_ep'].append(training_cost / n_samples)
# print 'Seen %d samples' % n_samples
logger.debug('Saved loss_plot pickle')
with open("important_plot.pickle", 'wb') as handle:
pkl.dump(loss_plot, handle, protocol=pkl.HIGHEST_PROTOCOL)
if estop:
break
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
# Restore variables from disk.
saver.restore(sess, _s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
keep_prob = 1
is_training = False
logger.info('=' * 80)
logger.info('Final Result')
logger.info('=' * 80)
logger.debug('best epoch {0}'.format(best_epoch_num))
valid_acc, valid_cost, valid_final_result = predict_pro_acc(sess, cost, prepare_data, model_options, valid,
maxlen, correct_pred, pred, summary, eidx, train_op,
is_training, None)
logger.debug('Valid cost {0}'.format(valid_cost))
logger.debug('Valid accuracy {0}'.format(valid_acc))
# print 'Valid cost', valid_cost
# print 'Valid accuracy', valid_acc
test_acc, test_cost, test_final_result = predict_pro_acc(sess, cost, prepare_data, model_options, test,
maxlen, correct_pred, pred, summary, eidx, train_op,
is_training, None)
logger.debug('Test cost {0}'.format(test_cost))
logger.debug('Test accuracy {0}'.format(test_acc))
# print 'best epoch ', best_epoch_num
train_acc, train_cost, _ = predict_pro_acc(sess, cost, prepare_data, model_options, train_valid,
maxlen, correct_pred, pred, summary, eidx, train_op, is_training,
None)
logger.debug('Train cost {0}'.format(train_cost))
logger.debug('Train accuracy {0}'.format(train_acc))
valid_m = numpy.array(history_valid_result)
test_m = numpy.array(history_test_result)
valid_final_result = (numpy.array([valid_final_result]) == False)
test_final_result = (numpy.array([test_final_result]) == False)
# print(numpy.all(valid_m, axis = 0))
# print(numpy.all(test_m, axis=0))
print('validation: all prediction through every epoch that are the same:',
numpy.where(numpy.all(valid_m, axis=0)))
# Not right
print('testing: all prediction through every epoch that are the same:', numpy.where(numpy.all(test_m, axis=0)))
print('validation: final prediction that is False:', numpy.where(valid_final_result))
print('testing: final prediction that is False:', numpy.where(test_final_result))
if os.path.exists('history_predict.npz'):
logger.info("Load and save to history_predict.npz")
valid_history = numpy.load('history_predict.npz')['valid_final_result']
test_history = numpy.load('history_predict.npz')['test_final_result']
vv = numpy.concatenate((valid_history, valid_final_result), axis=0)
tt = numpy.concatenate((test_history, valid_final_result), axis=0)
print('Concate shape valid:', vv.shape)
print('Print all validate history outputs that return False', numpy.where(numpy.all(vv, axis=0)))
print('Concate shape test:', tt.shape)
print('Print all test history outputs that return False', numpy.where(numpy.all(tt, axis=0)))
numpy.savez('history_predict.npz', valid_final_result=vv, test_final_result=tt, **params)
else:
numpy.savez('history_predict.npz', valid_final_result=valid_final_result,
test_final_result=test_final_result, **params)
# print 'Train cost', train_cost
# print 'Train accuracy', train_acc
# print 'Test cost ', test_cost
# print 'Test accuracy ', test_acc
return None
if __name__ == '__main__':
pass
|
458937
|
import random
from adsimulator.templates.domains import get_functional_level_list, TLD_LIST
from adsimulator.templates.default_values import get_complementary_value
from adsimulator.utils.boolean import generate_boolean_value
from adsimulator.utils.parameters import get_dict_param_value, get_perc_param_value, print_domain_generation_parameters
from adsimulator.utils.domains import generate_trust_sid
def generate_domain(session, domain_name, domain_sid, domain_dn, parameters):
prob = get_dict_param_value("Domain", "functionalLevelProbability", parameters)
functional_level = random.choice(get_functional_level_list(prob))
print_domain_generation_parameters(prob)
session.run(
"""
MERGE (n:Base {name:$domain}) SET n:Domain, n.highvalue=true, n.objectid=$objectid,
n.distinguishedname=$dn, n.functionallevel=$fl
""",
domain=domain_name,
objectid=domain_sid,
dn=domain_dn,
fl=functional_level
)
return functional_level
def generate_trusts(session, domain_name, domain_names, parameters):
available_names = domain_names
current_domain_name = str(domain_name).split(".")[0]
if current_domain_name in available_names:
available_names.remove(current_domain_name)
sid_filtering_perc = get_perc_param_value("Domain", "Trusts", parameters)["SIDFilteringProbability"]
n_trust = get_dict_param_value("Domain", "Trusts", parameters)
if len(available_names) < n_trust["Inbound"]:
n_trust["Inbound"] = len(available_names)
print("Generating", str(n_trust["Inbound"]), "inbound domain trusts (sidfiltering probability:", sid_filtering_perc, "%)")
for i in range(0, n_trust["Inbound"]):
available_names = generate_inbound_trust(session, domain_name, available_names, sid_filtering_perc)
if len(available_names) < n_trust["Outbound"]:
n_trust["Outbound"] = len(available_names)
print("Generating", str(n_trust["Outbound"]), "outbound domain trusts (sidfiltering probability:", sid_filtering_perc, "%)")
for i in range(0, n_trust["Outbound"]):
available_names = generate_outbound_trust(session, domain_name, available_names, sid_filtering_perc)
if len(available_names) < n_trust["Bidirectional"]:
n_trust["Bidirectional"] = len(available_names)
print("Generating", str(n_trust["Bidirectional"]), "bidirectional domain trusts (sidfiltering probability:", sid_filtering_perc, "%)")
for i in range(0, n_trust["Bidirectional"]):
available_names = generate_bidirectional_trust(session, domain_name, available_names, sid_filtering_perc)
def generate_inbound_trust(session, domain_name, available_names, sid_filtering_perc):
sid_filtering = generate_boolean_value(sid_filtering_perc, get_complementary_value(sid_filtering_perc))
name = random.choice(available_names)
available_names.remove(name)
trust_name = name + random.choice(TLD_LIST)
trust_sid = generate_trust_sid()
session.run(
"""
MERGE (n:Domain {name:$domainname})
WITH n
MERGE (m: Domain {name:$trustname, objectid:$trustsid})
WITH n,m
MERGE (n)-[:TrustedBy {isacl:false, sidfiltering: $sidfiltering, transitive: 'false', trusttype: 'Forest'}]->(m)
""",
trustname=trust_name,
trustsid=trust_sid,
domainname=domain_name,
sidfiltering=sid_filtering
)
return available_names
def generate_outbound_trust(session, domain_name, available_names, sid_filtering_perc):
sid_filtering = generate_boolean_value(sid_filtering_perc, get_complementary_value(sid_filtering_perc))
name = random.choice(available_names)
available_names.remove(name)
trust_name = name + random.choice(TLD_LIST)
trust_sid = generate_trust_sid()
session.run(
"""
MERGE (n:Domain {name:$domainname})
WITH n
MERGE (m: Domain {name:$trustname, objectid:$trustsid})
WITH n,m
MERGE (m)-[:TrustedBy {isacl:false, sidfiltering: $sidfiltering, transitive: 'false', trusttype: 'Forest'}]->(n)
""",
trustname=trust_name,
trustsid=trust_sid,
domainname=domain_name,
sidfiltering=sid_filtering
)
return available_names
def generate_bidirectional_trust(session, domain_name, available_names, sid_filtering_perc):
sid_filtering = generate_boolean_value(sid_filtering_perc, get_complementary_value(sid_filtering_perc))
name = random.choice(available_names)
available_names.remove(name)
trust_name = name + "." + domain_name
trust_sid = generate_trust_sid()
session.run(
"""
MERGE (n:Domain {name:$domainname})
WITH n
MERGE (m: Domain {name:$trustname, objectid:$trustsid})
WITH n,m
MERGE (n)-[:TrustedBy {isacl:false, sidfiltering: $sidfiltering, transitive: 'true', trusttype: 'ParentChild'}]->(m)
MERGE (m)-[:TrustedBy {isacl:false, sidfiltering: $sidfiltering, transitive: 'true', trusttype: 'ParentChild'}]->(n)
""",
trustname=trust_name,
trustsid=trust_sid,
domainname=domain_name,
sidfiltering=sid_filtering
)
return available_names
|
458944
|
import numpy as np
import pytest
import torch
from obp.policy.offline_continuous import ContinuousNNPolicyLearner
# dim_context, pg_method, bandwidth, output_space, hidden_layer_size, activation, solver, alpha,
# batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum,
# early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, q_func_estimator_hyperparams, description
invalid_input_of_nn_policy_learner_init = [
(
0, #
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`dim_context`= 0, must be >= 1",
),
(
10,
"None", #
2,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"pg_method must be one of 'dgp', 'ipw', or 'dr'",
),
(
10,
"ipw",
-0.1, #
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`bandwidth`= -0.1, must be > 0",
),
(
10,
"ipw",
0.1,
("", ""), #
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"output_space must be tuple of integers or floats",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, ""), #
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"hidden_layer_size must be tuple of positive integers",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"None", #
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"activation must be one of 'identity', 'logistic', 'tanh', 'relu', or 'elu'",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"None", #
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"solver must be one of 'adam', 'adagrad', or 'sgd'",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
-1.0, #
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`alpha`= -1.0, must be >= 0.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
0, #
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"batch_size must be a positive integer or 'auto'",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0, #
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`learning_rate_init`= 0.0, must be > 0.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
0, #
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`max_iter`= 0, must be >= 1",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
None, #
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"shuffle must be a bool",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
"", #
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"'' cannot be used to seed",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
-1.0, #
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`tol`= -1.0, must be > 0.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
2.0, #
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"`momentum`= 2.0, must be <= 1.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
"", #
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"nesterovs_momentum must be a bool",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
None, #
0.1,
0.9,
0.999,
1e-8,
10,
None,
"early_stopping must be a bool",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"lbfgs", #
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True, #
0.1,
0.9,
0.999,
1e-8,
10,
None,
"solver must be one of 'adam', 'adagrad', or 'sgd',",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
2.0, #
0.9,
0.999,
1e-8,
10,
None,
"`validation_fraction`= 2.0, must be <= 1.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
2.0, #
0.999,
1e-8,
10,
None,
"`beta_1`= 2.0, must be <= 1.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
2.0, #
1e-8,
10,
None,
"`beta_2`= 2.0, must be <= 1.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
-1.0, #
10,
None,
"`epsilon`= -1.0, must be >= 0.0",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
0, #
None,
"`n_iter_no_change`= 0, must be >= 1",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
"", #
"q_func_estimator_hyperparams must be a dict,",
),
]
valid_input_of_nn_policy_learner_init = [
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"valid input",
),
(
10,
"dpg",
None,
(-10, 10),
(100, 50, 100),
"relu",
"adam",
0.001,
"auto",
0.0001,
200,
True,
123,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
None,
"valid input",
),
(
10,
"ipw",
0.1,
(-10, 10),
(100, 50, 100),
"logistic",
"sgd",
0.001,
50,
0.0001,
200,
True,
None,
1e-4,
0.9,
True,
True,
0.1,
0.9,
0.999,
1e-8,
10,
{},
"valid input",
),
]
@pytest.mark.parametrize(
"dim_context, pg_method, bandwidth, output_space, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, q_func_estimator_hyperparams, description",
invalid_input_of_nn_policy_learner_init,
)
def test_nn_policy_learner_init_using_invalid_inputs(
dim_context,
pg_method,
bandwidth,
output_space,
hidden_layer_size,
activation,
solver,
alpha,
batch_size,
learning_rate_init,
max_iter,
shuffle,
random_state,
tol,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
q_func_estimator_hyperparams,
description,
):
with pytest.raises(ValueError, match=f"{description}*"):
_ = ContinuousNNPolicyLearner(
dim_context=dim_context,
pg_method=pg_method,
bandwidth=bandwidth,
output_space=output_space,
hidden_layer_size=hidden_layer_size,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
max_iter=max_iter,
shuffle=shuffle,
random_state=random_state,
tol=tol,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
q_func_estimator_hyperparams=q_func_estimator_hyperparams,
)
@pytest.mark.parametrize(
"dim_context, pg_method, bandwidth, output_space, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, q_func_estimator_hyperparams, description",
valid_input_of_nn_policy_learner_init,
)
def test_nn_policy_learner_init_using_valid_inputs(
dim_context,
pg_method,
bandwidth,
output_space,
hidden_layer_size,
activation,
solver,
alpha,
batch_size,
learning_rate_init,
max_iter,
shuffle,
random_state,
tol,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
q_func_estimator_hyperparams,
description,
):
nn_policy_learner = ContinuousNNPolicyLearner(
dim_context=dim_context,
pg_method=pg_method,
bandwidth=bandwidth,
output_space=output_space,
hidden_layer_size=hidden_layer_size,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate_init=learning_rate_init,
max_iter=max_iter,
shuffle=shuffle,
random_state=random_state,
tol=tol,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
q_func_estimator_hyperparams=q_func_estimator_hyperparams,
)
assert isinstance(nn_policy_learner, ContinuousNNPolicyLearner)
def test_nn_policy_learner_create_train_data_for_opl():
context = np.ones((100, 2), dtype=np.int32)
action = np.zeros(100, dtype=np.int32)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array([0.5] * 100, dtype=np.float32)
learner1 = ContinuousNNPolicyLearner(dim_context=2, pg_method="dpg")
training_loader, validation_loader = learner1._create_train_data_for_opl(
context=context,
action=action,
reward=reward,
pscore=pscore,
)
assert isinstance(training_loader, torch.utils.data.DataLoader)
assert validation_loader is None
learner2 = ContinuousNNPolicyLearner(
dim_context=2,
pg_method="dpg",
early_stopping=True,
)
training_loader, validation_loader = learner2._create_train_data_for_opl(
context=context,
action=action,
reward=reward,
pscore=pscore,
)
assert isinstance(training_loader, torch.utils.data.DataLoader)
assert isinstance(validation_loader, torch.utils.data.DataLoader)
# context, action, reward, pscore, description
invalid_input_of_nn_policy_learner_fit = [
(
5, #
np.ones(5),
np.ones(5),
np.ones(5) * 0.5,
"context must be 2D array",
),
(
np.ones(5), #
np.ones(5),
np.ones(5),
np.ones(5) * 0.5,
"context must be 2D array",
),
(
np.ones((5, 2)),
5, #
np.ones(5),
np.ones(5) * 0.5,
"action_by_behavior_policy must be 1D array",
),
(
np.ones((5, 2)),
np.ones((5, 2)), #
np.ones(5),
np.ones(5) * 0.5,
"action_by_behavior_policy must be 1D array",
),
(
np.ones((5, 2)),
np.ones(5),
5, #
np.ones(5) * 0.5,
"reward must be 1D array",
),
(
np.ones((5, 2)),
np.ones(5),
np.ones((5, 2)), #
np.ones(5) * 0.5,
"reward must be 1D array",
),
(
np.ones((5, 2)),
np.ones(5),
np.ones(5),
0.5, #
"pscore must be 1D array",
),
(
np.ones((5, 2)),
np.ones(5),
np.ones(5),
np.ones((5, 2)) * 0.5, #
"pscore must be 1D array",
),
(
np.ones((4, 2)), #
np.ones(5),
np.ones(5),
np.ones(5) * 0.5,
"Expected `context.shape[0]",
),
(
np.ones((5, 2)),
np.ones(4), #
np.ones(5),
np.ones(5) * 0.5,
"Expected `context.shape[0]",
),
(
np.ones((5, 2)),
np.ones(5),
np.ones(4), #
np.ones(5) * 0.5,
"Expected `context.shape[0]",
),
(
np.ones((5, 2)),
np.ones(5),
np.ones(5),
np.arange(5) * 0.1, #
"pscore must be positive",
),
(
np.ones((5, 3)), #
np.ones(5),
np.ones(5),
np.ones(5) * 0.5,
"Expected `context.shape[1]",
),
]
valid_input_of_nn_policy_learner_fit = [
(
np.ones((5, 2)),
np.ones(5),
np.ones(5),
np.ones(5) * 0.5,
"valid input (pscore is given)",
),
(
np.ones((5, 2)),
np.ones(5),
np.ones(5),
None,
"valid input (pscore is not given)",
),
]
@pytest.mark.parametrize(
"context, action, reward, pscore, description",
invalid_input_of_nn_policy_learner_fit,
)
def test_nn_policy_learner_fit_using_invalid_inputs(
context,
action,
reward,
pscore,
description,
):
with pytest.raises(ValueError, match=f"{description}*"):
# set parameters
dim_context = 2
pg_method = "dpg"
learner = ContinuousNNPolicyLearner(
dim_context=dim_context, pg_method=pg_method
)
learner.fit(
context=context,
action=action,
reward=reward,
pscore=pscore,
)
@pytest.mark.parametrize(
"context, action, reward, pscore, description",
valid_input_of_nn_policy_learner_fit,
)
def test_nn_policy_learner_fit_using_valid_inputs(
context,
action,
reward,
pscore,
description,
):
# set parameters
dim_context = 2
pg_method = "dpg"
learner = ContinuousNNPolicyLearner(dim_context=dim_context, pg_method=pg_method)
learner.fit(
context=context,
action=action,
reward=reward,
pscore=pscore,
)
def test_nn_policy_learner_predict():
# synthetic data
context = np.ones((5, 2))
action = np.ones(5)
reward = np.ones(5)
# set parameters
dim_context = 2
pg_method = "dpg"
output_space = (-10, 10)
learner = ContinuousNNPolicyLearner(
dim_context=dim_context, pg_method=pg_method, output_space=output_space
)
learner.fit(
context=context,
action=action,
reward=reward,
)
# shape error
with pytest.raises(ValueError, match="context must be 2D array"):
learner.predict(context=np.ones(5))
with pytest.raises(ValueError, match="context must be 2D array"):
learner.predict(context="np.ones(5)")
# inconsistency between dim_context and context
with pytest.raises(ValueError, match="Expected `context.shape[1]*"):
learner.predict(context=np.ones((5, 3)))
# check output shape
predicted_actions = learner.predict(context=context)
assert predicted_actions.shape[0] == context.shape[0]
assert predicted_actions.ndim == 1
assert np.all(output_space[0] <= predicted_actions) or np.all(
predicted_actions <= output_space[1]
)
|
458954
|
from collie.utils import merge_docstrings
# START: test class definitions
# NOTE: we include classes here since pytest does not accept classes as fixtures
class BaseClass:
"""
This is the short description.
This is a longer description. It contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
**kwargs
References
----------
arg8: list
arg9: int
No description above, and that is okay!
"""
def __init__(self, arg1, arg2, arg3, **kwargs):
pass
class ChildClass(BaseClass):
"""
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
def __init__(self, arg1, arg2, arg3, arg4):
pass
class ChildClassWithArgs(BaseClass):
"""
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
*args: arguments
A description for these args here.
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
def __init__(self, arg1, arg2, arg3, arg4, *args):
pass
class ChildClassWithKwargs(BaseClass):
"""
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
def __init__(self, arg1, arg2, arg3, arg4, **kwargs):
pass
class ChildClassWithArgsAndKwargs(BaseClass):
"""
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
def __init__(self, arg1, arg2, arg3, arg4, *args, **kwargs):
pass
class ChildClassNoParamaters(BaseClass):
"""
No ``Parameters`` section at all here!
References
----------
arg8
"""
def __init__(self):
pass
class ChildClassParamatersOnly(BaseClass):
"""
Note that nothing is after the ``Parameters`` section here.
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
"""
def __init__(self, arg1, arg2, arg3, arg4, *args, **kwargs):
pass
class ChildClassExtraParamatersNoDoc(BaseClass):
"""
Note that nothing is after the ``Parameters`` section here.
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
"""
def __init__(self, arg1, arg2, arg3, arg4, extra, *args, **kwargs):
pass
class ChildClassWithTwoExtraSections(BaseClass):
"""
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
Notes
-----
This is a note. The above ``References`` section used to say ``Returns``, but classes do not
return anything and I did not feel inclined to change the description.
"""
def __init__(self, arg1, arg2, arg3, arg4, *args, **kwargs):
pass
# START: tests
def test_merge_docstrings():
expected = """
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
actual = merge_docstrings(BaseClass, ChildClass.__doc__, ChildClass.__init__)
print(expected)
print(actual)
assert actual == expected
def test_merge_docstrings_with_args():
expected = """
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
*args: arguments
A description for these args here.
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
actual = merge_docstrings(BaseClass, ChildClassWithArgs.__doc__, ChildClassWithArgs.__init__)
assert actual == expected
def test_merge_docstrings_with_kwargs():
expected = """
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
actual = merge_docstrings(BaseClass,
ChildClassWithKwargs.__doc__,
ChildClassWithKwargs.__init__)
assert actual == expected
def test_merge_docstrings_with_args_and_kwargs():
expected = """
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
"""
actual = merge_docstrings(BaseClass,
ChildClassWithArgsAndKwargs.__doc__,
ChildClassWithArgsAndKwargs.__init__)
assert actual == expected
def test_merge_docstrings_no_paramaters_section():
expected = """
No ``Parameters`` section at all here!
References
----------
arg8
"""
actual = merge_docstrings(BaseClass,
ChildClassNoParamaters.__doc__,
ChildClassNoParamaters.__init__)
assert actual == expected
def test_merge_docstrings_parameters_section_nothing_after():
expected = """
Note that nothing is after the ``Parameters`` section here.
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
"""
actual = merge_docstrings(BaseClass,
ChildClassParamatersOnly.__doc__,
ChildClassParamatersOnly.__init__)
assert actual == expected
def test_merge_docstrings_extra_parameter_included_with_no_documentation():
expected = """
Note that nothing is after the ``Parameters`` section here.
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
"""
actual = merge_docstrings(BaseClass,
ChildClassExtraParamatersNoDoc.__doc__,
ChildClassExtraParamatersNoDoc.__init__)
assert actual == expected
def test_merge_docstrings_with_two_extra_sections():
expected = """
This is the short description for the child.
This is a longer description for the child. It also contains many lines.
With line breaks, like this.
You can also have new paragraphs!
NOTE: This is an important note!
Look, a new line of documentation after the note!
Parameters
----------
arg1: str
The first argument
arg2: int
This argument's description is longer.
See how it is on a new line:
* Even with a bullet list now!
arg3: np.array
arg4: int
An important argument!
*args: arguments
**kwargs: keyword argument
Additional keyword arguments to pass into ``BaseClass``
References
----------
arg8: list
arg9: int
No description above, and that is okay!
arg10: str
This one is new.
Notes
-----
This is a note. The above ``References`` section used to say ``Returns``, but classes do not
return anything and I did not feel inclined to change the description.
"""
actual = merge_docstrings(BaseClass,
ChildClassWithTwoExtraSections.__doc__,
ChildClassWithTwoExtraSections.__init__)
assert actual == expected
|
459009
|
from flask import Flask, request
from flask_restful import Api, Resource
import json
import os
app = Flask(__name__)
api = Api(app)
class UserList(Resource):
filename = 'users.json'
def get_users(self):
users = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
users = json.loads(fp.read())
return users
def get(self):
if not os.path.exists(self.filename):
return 'users.json is not exists'
r = self.get_users()
s = ''
for d in r:
email = d['email']
password = d['password']
s += '[email: {}, pw: {}]'.format(email, password)
return s
def post(self):
r_json = request.get_json()
email = r_json['email']
password = r_json['password']
r = self.get_users()
for d in r:
if email == d['email']:
return '{} is aleady exists'.format(email)
_id = 0
for d in r:
_id = max(_id, d['id'])
_id = _id + 1
r_json['id'] = _id
r.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'email: {}, pw: {}'.format(email, password)
def put(self):
r_json = request.get_json()
_id = r_json['id']
password = r_json['password']
users = self.get_users()
found = False
for idx, _ in enumerate(users):
if users[idx]['id'] == _id:
found = True
users[idx]['password'] = password
if not found:
return '{} is not exists'.format(_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(users))
return 'update password successfully'
def delete(self):
r_json = request.get_json()
_id = r_json['id']
users = self.get_users()
found = False
for idx, _ in enumerate(users):
if users[idx]['id'] == _id:
found = True
del users[idx]
if not found:
return '{} is not exists'.format(_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(users))
return '{} deleted successfully'.format(_id)
class ArticleList(Resource):
filename = 'articles.json'
def get_articles(self):
articles = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
articles = json.loads(fp.read())
return articles
def get(self):
if not os.path.exists(self.filename):
return 'articles.json is not exists'
r = self.get_articles()
s = ''
for d in r:
article_id = d['id']
user_id = d['user_id']
title = d['title']
content = d['content']
s += '[id: {}, user_id: {}, title: {}, content: {}]'.format(article_id, user_id, title, content)
return s
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
title = r_json['title']
content = r_json['content']
r = self.get_articles()
a_id = 0
for d in r:
a_id = max(a_id, d['id'])
a_id = a_id + 1
r_json['id'] = a_id
r.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return ' The content of user {} is posted successfully'.format(user_id)
def put(self):
r_json = request.get_json()
article_id = r_json['id']
title = r_json['title']
content = r_json['content']
r = self.get_articles()
found = False
for idx, _ in enumerate(r):
if r[idx]['id'] == article_id:
found = True
r[idx]['title'] = title
r[idx]['content'] = content
if not found:
return 'article {} does not exist'.format(article_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'update title and content of {} successfully'.format(article_id)
def delete(self):
r_json = request.get_json()
article_id = r_json['id']
r = self.get_articles()
found = False
for idx, _ in enumerate(r):
if r[idx]['id'] == article_id:
found = True
del r[idx]
if not found:
return 'article {} does not exist',format(article_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return '{} deleted successfully'.format(article_id)
class CommentList(Resource):
filename = 'comments.json'
def get_comments(self):
comments = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
comments = json.loads(fp.read())
return comments
def get(self):
if not os.path.exists(self.filename):
return 'comments.json is not exists'
r = self.get_comments()
s = ''
for d in r:
comment_id = d['id']
user_id = d['user_id']
article_id = d['a_id']
content = d['content']
s += '[id: {}, user_id: {}, article_id: {}, content: {}]'.format(comment_id, user_id, article_id, content)
return s
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
article_id = r_json['a_id']
content = r_json['content']
r = self.get_comments()
c_id = 0
for d in r:
c_id = max(c_id, d['id'])
c_id = c_id + 1
r_json['id'] = c_id
r.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'The comment of user {} is posted successfully on article {}'.format(user_id, article_id)
def put(self):
r_json = request.get_json()
comment_id = r_json['id']
content = r_json['content']
r = self.get_comments()
found = False
for idx, _ in enumerate(r):
if r[idx]['id'] == comment_id:
found = True
r[idx]['content'] = content
if not found:
return 'comment {} does not exist'.format(comment_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'comment {} is updated successfully'.format(comment_id)
def delete(self):
r_json = request.get_json()
comment_id = r_json['id']
r = self.get_comments()
found = False
for idx, _ in enumerate(r):
if r[idx]['id'] == comment_id:
found = True
del r[idx]
if not found:
return 'comment {} does not exist'.format(comment_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'comment {} is deleted successfully'.format(comment_id)
class LikeList(Resource):
def get(self):
return ""
def post(self):
return ""
def put(self):
return ""
def delete(self):
return ""
api.add_resource(UserList, '/api/users')
api.add_resource(ArticleList, '/api/articles')
api.add_resource(CommentList, '/api/comments')
api.add_resource(LikeList, '/api/likes')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5002, debug=True)
|
459017
|
import demistomock as demisto # noqa: F401
from typing import Dict, Tuple, List
from datetime import timezone
from CommonServerPython import *
"""Cyberpion Integration for Cortex XSOAR (aka Demisto)
"""
''' IMPORTS '''
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f %Z'
DEFAULT_MAX_INCIDENTS_TO_FETCH = 200
CONNECTION_TIMEOUT = 30.0
READ_TIMEOUT = 30.0
VALID_STATUS_CODES = (200,)
NUM_OF_RETRIES = 3
BACKOFF_FACTOR = 1.0 # see documentation in CommonServerPython._http_request
ACTION_ITEM_TYPE_NAME = 'cyberpion_action_item'
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def get_domain_state(self, domain: str):
params = {
'verbosity': 'details',
'domain': domain
}
demisto.debug(f'getting domain state for domain- {domain}')
http_response = self._http_request(
method='GET',
url_suffix='/domainstate/',
params=params,
resp_type='json',
ok_codes=VALID_STATUS_CODES,
timeout=(CONNECTION_TIMEOUT, READ_TIMEOUT),
retries=NUM_OF_RETRIES,
backoff_factor=BACKOFF_FACTOR,
raise_on_status=True
)
if 'results' not in http_response:
raise Exception(f'bad response from server!. response: {json.dumps(http_response, indent=2)}')
http_response = http_response['results']
if len(http_response) == 0:
demisto.error(f'no response from server for domain: {domain}')
return {}
http_response = http_response[0]
demisto.debug(f'after getting domain state for domain- {domain}')
reverse_ips = http_response.get('ips')
if reverse_ips is None:
raise Exception(f'in server\'s response: ips is none. response: {json.dumps(http_response, indent=2)}')
if type(reverse_ips) is dict:
formatted_reverse_ips = '\n'.join(
[f'{k}: {v}' for k, v in reverse_ips.items()])
else:
formatted_reverse_ips = reverse_ips
http_response['ips'] = formatted_reverse_ips
domain_types = http_response.get('domain_types')
if domain_types is None:
raise Exception(
f'in server\'s response: domain_types is none. response: {json.dumps(http_response, indent=2)}')
domain_info = ''
for idx, domain_type in enumerate(domain_types, 1):
domain_info += f'{idx}.\n'
domain_info += '\n'.join(
[f'{k}: {v}' for k, v in domain_type.items()])
http_response['domain_types'] = domain_info
return http_response
def get_action_items(self,
min_severity: int,
alert_types: list = None,
show_only_active=True,
max_fetch: int = None,
last_fetched_creation_time: str = None,
domain: str = None
) -> List[dict]:
params = {
'verbosity': 'details',
'urgency__gte': min_severity,
'ordering': 'creation_time',
'is_open': 'true' if show_only_active else 'false'
}
if alert_types:
params['category'] = ','.join(alert_types)
if max_fetch:
params['page_size'] = max_fetch
if last_fetched_creation_time:
params['creation_time__gt'] = last_fetched_creation_time
if domain:
params['domain'] = domain
http_responses = []
# call API
params['page'] = str(1)
demisto.debug(f'getting action items, domain={domain}')
http_response = self._http_request(
method='GET',
url_suffix='/actionitems/',
params=params,
resp_type='json',
ok_codes=VALID_STATUS_CODES,
timeout=(CONNECTION_TIMEOUT, READ_TIMEOUT),
retries=NUM_OF_RETRIES,
backoff_factor=BACKOFF_FACTOR,
raise_on_status=True
)
demisto.debug(f'after getting action items, domain={domain}')
if 'results' not in http_response:
raise Exception('failed to read action items.\nError: got response without \'results\' key')
results = http_response['results']
for idx, action_item in enumerate(results):
technical_det = action_item.get('technical_details', {})
if technical_det is None:
raise Exception(f'technical details is none. {json.dumps(action_item, indent=2)}')
if type(technical_det) is dict:
formatted_technical_details = '\n'.join(
[f'{k}: {v}' for k, v in technical_det.items()])
else:
formatted_technical_details = technical_det
results[idx]['technical_details'] = formatted_technical_details
results[idx]['alert_type'] = ACTION_ITEM_TYPE_NAME
http_responses.append(results)
demisto.debug(f'finished getting action items, number of pages: {len(http_responses)}, domain={domain}')
final_results = []
for response in http_responses:
final_results += response
return final_results
def get_domain_action_items(self, domain: str,
min_severity: int,
alert_types: list = None,
show_only_active=True
) -> Dict[str, Any]:
# call API
return {
"Domain": domain,
"Vulnerabilities": self.get_action_items(domain=domain,
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active,
max_fetch=None)
}
''' HELPER FUNCTIONS '''
def convert_to_demisto_severity(severity: float) -> int:
"""Maps Cyberpion severity to Cortex XSOAR severity
Converts the Cyberpion alert severity level (1 to 10, float) to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``float``
:param severity: severity as returned from the Cyberpion API (float)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
if 0 <= severity <= 2.5:
return 1
elif 2.6 <= severity <= 5:
return 2
elif 5.1 <= severity <= 7.5:
return 3
elif 7.6 <= severity <= 10:
return 4
raise Exception('value of severity is not between 0-10. invalid value of severity: {}'.format(severity))
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_domain_action_items(domain='company1.com', min_severity=2)
client.get_action_items(max_fetch=2, min_severity=1, alert_types=['PKI'])
client.get_domain_state('company1.com')
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def fetch_incidents(client: Client,
max_fetch: int,
min_severity: int,
alert_types: list,
show_only_active: bool,
first_fetch: str = None
) -> Tuple[Dict[str, str], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
:type client: ``Client``
:param Client: Cyberpion integration client to use
:type max_fetch: ``int``
:param max_fetch: Maximum numbers of incidents per fetch
:type min_severity: `int`
:param min_severity:
minimum severity of the alert to search for.
Options are 1 to 10
:type alert_types: ``List[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:type first_fetch: `str`
:param first_fetch:
first date to fetch from. if null, all incidents will be fetched
:return:
A tuple containing two elements:
next_run (``Dict[str, str]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_run_dict = demisto.getLastRun()
if 'last_fetch' in last_run_dict:
last_fetch = last_run_dict['last_fetch']
demisto.debug('last fetch: {}'.format(str(last_fetch)))
else:
demisto.debug('no previous data... this means this is the first time we are fetching incidents')
last_fetch = first_fetch
demisto.debug("Cyberpion fetch incidents last run time\\first fetch: {}".format(
str(last_fetch) if last_fetch else 'fetching all incidents, without time filter'))
action_items = client.get_action_items(
max_fetch=max_fetch,
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active,
last_fetched_creation_time=last_fetch
)
incidents = []
for action_item in action_items:
creation_date = action_item['creation_time'] # must be string of a DATE_FORMAT
iso_format_data = datetime.strptime(creation_date, DATE_FORMAT).replace(
tzinfo=timezone.utc).isoformat()
incident = {
'name': '{} - {}'.format(action_item['title'], action_item['domain']),
# name is required field, must be set
'occurred': iso_format_data,
'rawJSON': json.dumps(action_item),
'severity': convert_to_demisto_severity(action_item['urgency']),
}
# put in last_incident_date the last action_items creation date. assuming it's ordered by creation date
# last_incident_date = creation_date
incidents.append(incident)
# last incident's time added to new_last_run_dict, so we can next time ask for incidents with creation_time__gt this time
if len(action_items) > 0:
last_incident_date = action_items[-1]['creation_time']
else:
# if no action items from last_incident_date to now, keep asking next time for (new incidents) from
# last_incident_date and on
last_incident_date = last_fetch
new_last_run_dict = {'last_fetch': last_incident_date}
return new_last_run_dict, incidents
def get_domain_state_command(client: Client, args: Dict[str, Any]) -> CommandResults:
domain = args.get('domain')
if not domain:
raise ValueError('no domain specified')
demisto.debug(f'getting domain state {domain}')
domain_state = client.get_domain_state(domain)
demisto.debug(f'creating domain state table for domain {domain}')
markdown = '### Cyberpion\n'
markdown += tableToMarkdown('Domain State', domain_state, headers=[
"id",
"domain",
"ips",
"risk_rank",
"vuln_count",
"cname_chain",
"domain_types",
"discovery_date",
])
demisto.debug(f'finished creating domain state table for domain {domain}')
return CommandResults(
readable_output=markdown,
outputs_prefix='Cyberpion',
outputs_key_field='id',
outputs={"DomainState": domain_state}
)
def get_domain_action_items_command(client: Client, args: Dict[str, Any], min_severity: int, alert_types: list = None,
show_only_active: bool = True) -> CommandResults:
domain = args.get('domain')
if not domain:
raise ValueError('no domain specified')
demisto.debug(f'getting action items for domain {domain}')
domain_data = client.get_domain_action_items(domain=domain,
min_severity=min_severity,
show_only_active=show_only_active,
alert_types=alert_types,
)
demisto.debug(f'creating action items table data for domain {domain}')
markdown = '### Cyberpion\n'
markdown += tableToMarkdown('Action Items', domain_data['Vulnerabilities'], headers=[
"domain",
"category",
"urgency",
"is_open",
"creation_time",
"link",
"title",
"impact",
"summary",
"solution",
"description",
"technical_details"
])
demisto.debug(f'finished creating table data for domain {domain}. returning command result')
return CommandResults(
readable_output=markdown,
outputs_prefix='Cyberpion.DomainData',
outputs_key_field='id',
outputs=domain_data
)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# get the service API url
base_url = demisto.params()['url']
api_key = demisto.params()['apikey']
min_severity = demisto.params()['minSeverity'] # mandatory
alert_types = demisto.params()['categories'] # mandatory
show_only_active = demisto.params()['ShowOnlyOpen'] # mandatory
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': 'Token {}'.format(api_key)
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'cyberpion-get-domain-state':
return_results(get_domain_state_command(client, demisto.args()))
elif demisto.command() == 'cyberpion-get-domain-action-items':
return_results(get_domain_action_items_command(client,
demisto.args(),
min_severity=min_severity,
alert_types=alert_types,
show_only_active=show_only_active))
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
max_fetch = demisto.params().get('maxFetch')
first_fetch: str = demisto.params().get('first_fetch')
if first_fetch:
months_back = datetime.now() - timedelta(days=30 * int(first_fetch))
first_fetch = datetime.strftime(months_back, DATE_FORMAT)
if not max_fetch:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
try:
max_fetch = int(max_fetch)
if max_fetch > 500 or max_fetch < 1:
raise ValueError()
except ValueError:
raise ValueError('max_fetch must be an integer between 1 to 500')
if max_fetch > DEFAULT_MAX_INCIDENTS_TO_FETCH:
max_fetch = DEFAULT_MAX_INCIDENTS_TO_FETCH
new_last_run_dict, incidents = fetch_incidents(
client=client,
max_fetch=max_fetch,
min_severity=min_severity,
show_only_active=show_only_active,
alert_types=alert_types,
first_fetch=first_fetch
)
# create incidents
demisto.incidents(incidents)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(new_last_run_dict)
else:
raise NotImplementedError(f'no such command: {demisto.command()}')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Cyberpion integration: Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
459019
|
from pytest import mark
from graphene.types import ID, Field, ObjectType, Schema
from graphene.types.scalars import String
from graphene.relay.mutation import ClientIDMutation
class SharedFields(object):
shared = String()
class MyNode(ObjectType):
# class Meta:
# interfaces = (Node, )
id = ID()
name = String()
class SaySomethingAsync(ClientIDMutation):
class Input:
what = String()
phrase = String()
@staticmethod
async def mutate_and_get_payload(self, info, what, client_mutation_id=None):
return SaySomethingAsync(phrase=str(what))
# MyEdge = MyNode.Connection.Edge
class MyEdge(ObjectType):
node = Field(MyNode)
cursor = String()
class OtherMutation(ClientIDMutation):
class Input(SharedFields):
additional_field = String()
name = String()
my_node_edge = Field(MyEdge)
@staticmethod
def mutate_and_get_payload(
self, info, shared="", additional_field="", client_mutation_id=None
):
edge_type = MyEdge
return OtherMutation(
name=shared + additional_field,
my_node_edge=edge_type(cursor="1", node=MyNode(name="name")),
)
class RootQuery(ObjectType):
something = String()
class Mutation(ObjectType):
say_promise = SaySomethingAsync.Field()
other = OtherMutation.Field()
schema = Schema(query=RootQuery, mutation=Mutation)
@mark.asyncio
async def test_node_query_promise():
executed = await schema.execute_async(
'mutation a { sayPromise(input: {what:"hello", clientMutationId:"1"}) { phrase } }'
)
assert not executed.errors
assert executed.data == {"sayPromise": {"phrase": "hello"}}
@mark.asyncio
async def test_edge_query():
executed = await schema.execute_async(
'mutation a { other(input: {clientMutationId:"1"}) { clientMutationId, myNodeEdge { cursor node { name }} } }'
)
assert not executed.errors
assert dict(executed.data) == {
"other": {
"clientMutationId": "1",
"myNodeEdge": {"cursor": "1", "node": {"name": "name"}},
}
}
|
459037
|
from __future__ import absolute_import, unicode_literals
from django.contrib.gis.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from wagtail.core import blocks
from wagtail.core.models import Orderable, Page
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
TabbedInterface,
)
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import StreamFieldPanel
from modelcluster.fields import ParentalKey
from wagtailgeowidget.blocks import GeoBlock, GeoAddressBlock
from wagtailgeowidget.edit_handlers import GeoPanel
class GeoLocation(models.Model):
title = models.CharField(max_length=255)
address = models.CharField(max_length=250, blank=True, null=True)
location = models.PointField(srid=4326, null=True, blank=True)
panels = [
FieldPanel('title'),
MultiFieldPanel([
FieldPanel('address'),
GeoPanel('location', address_field='address')
], _('Geo details')),
]
class GeoPageRelatedLocations(Orderable, GeoLocation):
page = ParentalKey(
'geopage.GeoPage',
related_name='related_locations',
on_delete=models.CASCADE
)
class GeoPage(Page):
address = models.CharField(max_length=250, blank=True, null=True)
location = models.PointField(srid=4326, null=True, blank=True)
content_panels = Page.content_panels + [
InlinePanel('related_locations', label="Related locations"),
]
location_panels = [
MultiFieldPanel([
FieldPanel('address'),
GeoPanel('location', address_field='address'),
], heading='Location')
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(location_panels, heading='Location'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
class GeoStreamPage(Page):
body = StreamField([
('map', GeoBlock()),
('map_struct', blocks.StructBlock([
('address', GeoAddressBlock(required=True)),
('map', GeoBlock(address_field='address')),
], icon='user'))
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
def get_context(self, request):
data = super(GeoStreamPage, self).get_context(request)
return data
class ClassicGeoPage(Page):
address = models.CharField(max_length=250, blank=True, null=True)
location = models.CharField(max_length=250, blank=True, null=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('address'),
GeoPanel('location', address_field='address', hide_latlng=True),
], _('Geo details')),
]
def get_context(self, request):
data = super(ClassicGeoPage, self).get_context(request)
return data
@cached_property
def point(self):
from wagtailgeowidget.helpers import geosgeometry_str_to_struct
return geosgeometry_str_to_struct(self.location)
@property
def lat(self):
return self.point['y']
@property
def lng(self):
return self.point['x']
|
459044
|
import collections
import contextlib
import wave
AudioFormat = collections.namedtuple('AudioFormat', 'rate channels width')
DEFAULT_RATE = 16000
DEFAULT_CHANNELS = 1
DEFAULT_WIDTH = 2
DEFAULT_FORMAT = AudioFormat(DEFAULT_RATE, DEFAULT_CHANNELS, DEFAULT_WIDTH)
class AudioFile:
def __init__(self, audio_path, as_path=False, audio_format=DEFAULT_FORMAT):
self.audio_path = audio_path
self.audio_format = audio_format
self.as_path = as_path
self.open_file = None
self.tmp_file_path = None
def __enter__(self):
if self.audio_path.endswith('.wav'):
self.open_file = wave.open(self.audio_path, 'r')
if read_audio_format_from_wav_file(self.open_file) == self.audio_format:
if self.as_path:
self.open_file.close()
return self.audio_path
return self.open_file
self.open_file.close()
def __exit__(self, *args):
if not self.as_path:
self.open_file.close()
if self.tmp_file_path is not None:
os.remove(self.tmp_file_path)
def read_audio_format_from_wav_file(wav_file):
return AudioFormat(wav_file.getframerate(), wav_file.getnchannels(), wav_file.getsampwidth())
def get_num_samples(pcm_buffer_size, audio_format=DEFAULT_FORMAT):
return pcm_buffer_size // (audio_format.channels * audio_format.width)
def get_pcm_duration(pcm_buffer_size, audio_format=DEFAULT_FORMAT):
"""Calculates duration in seconds of a binary PCM buffer (typically read from a WAV file)"""
return get_num_samples(pcm_buffer_size, audio_format) / audio_format.rate
def read_frames(wav_file, frame_duration_ms=30, yield_remainder=False):
audio_format = read_audio_format_from_wav_file(wav_file)
frame_size = int(audio_format.rate * (frame_duration_ms / 1000.0))
while True:
try:
data = wav_file.readframes(frame_size)
if not yield_remainder and get_pcm_duration(len(data), audio_format) * 1000 < frame_duration_ms:
break
yield data
except EOFError:
break
def read_frames_from_file(audio_path, audio_format=DEFAULT_FORMAT, frame_duration_ms=30, yield_remainder=False):
with AudioFile(audio_path, audio_format=audio_format) as wav_file:
for frame in read_frames(wav_file, frame_duration_ms=frame_duration_ms, yield_remainder=yield_remainder):
yield frame
def split(audio_frames,
audio_format=DEFAULT_FORMAT,
num_padding_frames=10,
threshold=0.5,
aggressiveness=3):
from webrtcvad import Vad # pylint: disable=import-outside-toplevel
if audio_format.channels != 1:
raise ValueError('VAD-splitting requires mono samples')
if audio_format.width != 2:
raise ValueError('VAD-splitting requires 16 bit samples')
if audio_format.rate not in [8000, 16000, 32000, 48000]:
raise ValueError(
'VAD-splitting only supported for sample rates 8000, 16000, 32000, or 48000')
if aggressiveness not in [0, 1, 2, 3]:
raise ValueError(
'VAD-splitting aggressiveness mode has to be one of 0, 1, 2, or 3')
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
vad = Vad(int(aggressiveness))
voiced_frames = []
frame_duration_ms = 0
frame_index = 0
for frame_index, frame in enumerate(audio_frames):
frame_duration_ms = get_pcm_duration(len(frame), audio_format) * 1000
if int(frame_duration_ms) not in [10, 20, 30]:
raise ValueError(
'VAD-splitting only supported for frame durations 10, 20, or 30 ms')
is_speech = vad.is_speech(frame, audio_format.rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
if num_voiced > threshold * ring_buffer.maxlen:
triggered = True
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
if num_unvoiced > threshold * ring_buffer.maxlen:
triggered = False
yield b''.join(voiced_frames), \
frame_duration_ms * max(0, frame_index - len(voiced_frames)), \
frame_duration_ms * frame_index
ring_buffer.clear()
voiced_frames = []
if len(voiced_frames) > 0:
yield b''.join(voiced_frames), \
frame_duration_ms * (frame_index - len(voiced_frames)), \
frame_duration_ms * (frame_index + 1)
|
459047
|
import os
import sys
import ast
import pytest
import warnings
warnings.filterwarnings("ignore")
from python_minifier import minify, unparse
def gather_files():
print('Interpreter version: ', sys.version_info)
print('sys.path: ', sys.path)
for sys_path in sys.path:
for subdir, dirs, files in os.walk(sys_path):
for file in filter(lambda f: f.endswith('.py'), [os.path.join(subdir, file) for file in files]):
yield file
@pytest.mark.parametrize('path', gather_files())
def test_file(path):
try:
with open(path, 'rb') as f:
source = f.read()
except IOError:
pytest.skip('IOError opening file')
try:
original_ast = ast.parse(source, path)
except SyntaxError:
pytest.skip('Invalid syntax in file')
# Test unparsing
unparse(original_ast)
# Test transforms
minify(source, filename=path)
|
459049
|
import json
import os
import streamlit as st
import plotly as py
import plotly.figure_factory as ff
from random import sample
from msmarco import load_msmarco_queries, load_msmarco_qrels, extract_querie_relevance
from embedding import create_document_embedding
from pandas import DataFrame
import tensorflow_hub as hub
from sentence_transformers import SentenceTransformer
from experiments import evaluate, create_vespa_body_request, vespa_search
os.environ["TFHUB_CACHE_DIR"] = "data/models"
QUERIES_FILE_PATH = "data/msmarco/train_test_set/msmarco-doctest-queries.tsv.gz"
RELEVANCE_FILE_PATH = "data/msmarco/train_test_set/msmarco-doctest-qrels.tsv.gz"
RANK_PROFILE_OPTIONS = (
"BM25",
"Native Rank",
"embedding(title) + embedding(body)",
"BM25 + embedding(title) + embedding(body)",
)
# todo: think how I am going to encode and present Scaled ranking functions
# "Scaled (AND) BM25 + title and body gse": "listwise_linear_bm25_gse_title_body_and",
# "Scaled (OR) BM25 + title and body gse": "listwise_linear_bm25_gse_title_body_or",
RANK_PROFILE_MAP = {
"BM25": "bm25",
"Native Rank": "default",
"embedding(title) + embedding(body)": {
"word2vec": "word2vec_title_body_all",
"gse": "gse_title_body_all",
"bert": "bert_title_body_all",
},
"BM25 + embedding(title) + embedding(body)": {
"word2vec": "bm25_word2vec_title_body_all",
"gse": "bm25_gse_title_body_all",
"bert": "bm25_bert_title_body_all",
},
}
# todo: I think I dont need RANK_PROFILE_EMBEDDING
# RANK_PROFILE_EMBEDDING = {
# "bm25": None,
# "default": None,
# "word2vec_title_body_all": "word2vec",
# "bm25_word2vec_title_body_all": "word2vec",
# "gse_title_body_all": "gse",
# "bm25_gse_title_body_all": "gse",
# "listwise_linear_bm25_gse_title_body_and": "gse",
# "listwise_linear_bm25_gse_title_body_or": "gse",
# "bert_title_body_all": "bert",
# "bm25_bert_title_body_all": "bert",
# }
GRAMMAR_OPTIONS = ["None", "AND", "OR", "weakAND"]
GRAMMAR_OPERATOR_MAP = {"AND": False, "OR": True}
EMBEDDING_OPTIONS = ["word2vec", "gse", "bert"]
ANN_OPTIONS = ["None", "title", "body", "title_body"]
LIMIT_HITS_GRAPH = 10
def get_rank_profile(rank_profile, embedding):
if "embedding" in rank_profile:
return RANK_PROFILE_MAP[rank_profile][embedding]
else:
return RANK_PROFILE_MAP[rank_profile]
@st.cache(ignore_hash=True)
def retrieve_model(model_type):
if model_type == "word2vec":
return {
"model": hub.load(
"https://tfhub.dev/google/Wiki-words-500-with-normalization/2"
),
"model_source": "tf_hub",
}
elif model_type == "gse":
return {
"model": hub.load("https://tfhub.dev/google/universal-sentence-encoder/4"),
"model_source": "tf_hub",
}
elif model_type == "bert":
return {
"model": SentenceTransformer("distilbert-base-nli-stsb-mean-tokens"),
"model_source": "bert",
}
def create_experiment_file_name(rank_profile, grammar_operator, ann, embedding, hits):
file_name = "grammar_{}_ann_{}_rank_{}_embedding_{}_hits_{}".format(
grammar_operator,
ann,
get_rank_profile(rank_profile, embedding),
embedding,
hits,
)
return file_name
def compute_all_options(
vespa_url,
vespa_port,
output_dir,
rank_profiles,
grammar_operators,
ann_operators,
embeddings,
hits,
):
query_relevance = sample_query_relevance_data(number_queries=None)
for rank_profile in rank_profiles:
for grammar_operator in grammar_operators:
grammar_operator = None if grammar_operator is "None" else grammar_operator
for ann in ann_operators:
ann = None if ann is "None" else ann
for embedding in embeddings:
file_name = create_experiment_file_name(
rank_profile, grammar_operator, ann, embedding, hits
)
file_path = os.path.join(output_dir, file_name)
if not os.path.exists(file_path):
model1 = retrieve_model(embedding)
try:
records, aggregate_metrics, position_freq = evaluate(
query_relevance=query_relevance,
parsed_rank_profile=get_rank_profile(
rank_profile, embedding
),
grammar_operator=grammar_operator,
ann_operator=ann,
embedding_type=embedding,
vespa_url=vespa_url,
vespa_port=vespa_port,
hits=int(hits),
model=model1,
)
except ValueError as e:
print(str(e))
continue
with open(file_path, "w") as f:
f.write(
json.dumps(
{
"aggregate_metrics": aggregate_metrics,
"position_freq": position_freq,
}
)
)
def load_all_options(
output_dir, rank_profiles, grammar_operators, ann_operators, embeddings, hits
):
results = []
for rank_profile in rank_profiles:
for grammar_operator in grammar_operators:
for ann in ann_operators:
for embedding in embeddings:
file_name = create_experiment_file_name(
rank_profile, grammar_operator, ann, embedding, hits
)
file_path = os.path.join(output_dir, file_name)
try:
result = json.load(open(file_path, "r"))
except FileNotFoundError:
continue
result.update(
{
"rank_profile": rank_profile,
"grammar_operator": grammar_operator,
"ann_operator": ann,
"embedding_type": embedding,
}
)
results.append(result)
return results
def main():
vespa_url = st.sidebar.text_input("Vespa url", "http://localhost")
vespa_port = st.sidebar.text_input("Vespa port", 8080)
page = st.sidebar.selectbox(
"Choose a page",
["Simple query", "Ranking function comparison", "Results summary", "Report"],
)
if page == "Simple query":
page_simple_query_page(vespa_url=vespa_url, vespa_port=vespa_port)
# elif page == "Ranking function comparison":
# page_ranking_function_comparison(vespa_url=vespa_url, vespa_port=vespa_port)
elif page == "Results summary":
page_results_summary(vespa_url=vespa_url, vespa_port=vespa_port)
def page_results_summary(vespa_url, vespa_port):
grammar_operators = st.multiselect("Choose grammar operators", GRAMMAR_OPTIONS)
ann_operators = st.multiselect("ANN operator", ANN_OPTIONS)
rank_profiles = st.multiselect("Choose rank profiles", RANK_PROFILE_OPTIONS)
embeddings = st.multiselect("Embedding type", EMBEDDING_OPTIONS)
output_dir = "data/msmarco/experiments"
if st.button("Evaluate"):
hits = 100
compute_all_options(
vespa_url,
vespa_port,
output_dir,
rank_profiles,
grammar_operators,
ann_operators,
embeddings,
hits,
)
results = load_all_options(
output_dir,
rank_profiles,
grammar_operators,
ann_operators,
embeddings,
hits,
)
position_freqs = []
ranking_names = []
results_summary = []
for result in results:
position_freqs.append(result["position_freq"])
ranking_names.append(result["aggregate_metrics"]["rank_name"])
results_summary.append(
{
"rank_name": result["aggregate_metrics"]["rank_name"],
"rank_profile": result["rank_profile"],
"grammar_operator": result["grammar_operator"],
"ann_operator": result["ann_operator"],
"embedding_type": result["embedding_type"],
"number_queries": result["aggregate_metrics"]["number_queries"],
"qps": result["aggregate_metrics"]["qps"],
"mrr": result["aggregate_metrics"]["mrr"],
"recall": result["aggregate_metrics"]["recall"],
"average_matched": result["aggregate_metrics"]["average_matched"],
}
)
display_results(position_freqs, ranking_names, results_summary, hits)
def display_results(
position_freqs, ranking_names, results_summary, hits, display_graph=True
):
if display_graph:
hits = min(hits, LIMIT_HITS_GRAPH)
z = [list(x) for x in zip(*position_freqs)]
z_text = z
x = ranking_names
y = [str(x + 1) for x in range(int(hits))]
fig = ff.create_annotated_heatmap(
z, x=x, y=y, annotation_text=z_text, colorscale=py.colors.diverging.RdYlGn
)
fig.update_layout(
xaxis_title_text="Rank profile", # xaxis label
yaxis_title_text="Position", # yaxis label
)
fig.update_yaxes(autorange="reversed")
st.plotly_chart(fig)
st.write(
DataFrame.from_records(results_summary).sort_values(by="mrr", ascending=False)
)
# def page_ranking_function_comparison(vespa_url, vespa_port):
# rank_profile_1 = st.sidebar.selectbox(
# "Ranking 1: rank profile", RANK_PROFILE_OPTIONS
# )
# grammar_operator_1 = st.sidebar.selectbox("Ranking 1: Grammar", ("AND", "OR"))
# ann_operator_1 = None
# if RANK_PROFILE_EMBEDDING[RANK_PROFILE_MAP[rank_profile_1]] in EMBEDDING_OPTIONS:
# ann_operator_1 = st.sidebar.selectbox(
# "Ranking 1: ANN operator", (None, "title", "body", "title_body")
# )
# rank_profile_2 = st.sidebar.selectbox(
# "Ranking 2: rank profile", RANK_PROFILE_OPTIONS
# )
# grammar_operator_2 = st.sidebar.selectbox("Ranking 2: Grammar", ("AND", "OR"))
# ann_operator_2 = None
# if RANK_PROFILE_EMBEDDING[RANK_PROFILE_MAP[rank_profile_2]] in EMBEDDING_OPTIONS:
# ann_operator_2 = st.sidebar.selectbox(
# "Ranking 2: ANN operator", (None, "title", "body", "title_body")
# )
# number_queries = int(st.text_input("Number of queries to send", "20"))
#
# hits = int(st.text_input("Number of hits to evaluate per query", "10"))
#
# if st.button("Evaluate"):
# query_relevance = sample_query_relevance_data(number_queries=number_queries)
#
# model1 = retrieve_model(
# RANK_PROFILE_EMBEDDING[RANK_PROFILE_MAP[rank_profile_1]]
# )
# records_1, aggregate_metrics_1, position_freq_1 = evaluate(
# query_relevance=query_relevance,
# parsed_rank_profile=RANK_PROFILE_MAP[rank_profile_1],
# grammar_operator=GRAMMAR_OPERATOR_MAP[grammar_operator_1],
# vespa_url=vespa_url,
# vespa_port=vespa_port,
# hits=int(hits),
# model=model1,
# ann=ann_operator_1,
# )
#
# model2 = retrieve_model(
# RANK_PROFILE_EMBEDDING[RANK_PROFILE_MAP[rank_profile_2]]
# )
# records_2, aggregate_metrics_2, position_freq_2 = evaluate(
# query_relevance=query_relevance,
# parsed_rank_profile=RANK_PROFILE_MAP[rank_profile_2],
# grammar_operator=GRAMMAR_OPERATOR_MAP[grammar_operator_2],
# vespa_url=vespa_url,
# vespa_port=vespa_port,
# hits=int(hits),
# model=model2,
# ann=ann_operator_2,
# )
# position_freqs = [position_freq_1, position_freq_2]
# ranking_names = [
# aggregate_metrics_1["rank_name"],
# aggregate_metrics_2["rank_name"],
# ]
# results_summary = [aggregate_metrics_1, aggregate_metrics_2]
#
# display_results(position_freqs, ranking_names, results_summary, hits)
def page_simple_query_page(vespa_url, vespa_port):
predefined_queries = st.checkbox("Use pre-defined queries")
if predefined_queries:
query_relevance = sample_query_relevance_data(number_queries=5)
query_relevance = {
query: relevant_id for _, (query, relevant_id) in query_relevance.items()
}
query = st.selectbox("Choose a query", list(query_relevance.keys()))
else:
query = st.text_input("Query", "")
st.markdown("---")
grammar_operator = st.selectbox("Choose grammar operators", GRAMMAR_OPTIONS)
grammar_operator = None if grammar_operator is "None" else grammar_operator
if grammar_operator is None:
available_ann_options = [x for x in ANN_OPTIONS if x is not "None"]
else:
available_ann_options = ANN_OPTIONS
ann_operator = st.selectbox("ANN operator", available_ann_options)
ann_operator = None if ann_operator is "None" else ann_operator
rank_profile = st.selectbox("Choose rank profiles", RANK_PROFILE_OPTIONS)
if "embedding" in rank_profile or ann_operator is not None:
embedding = st.selectbox("Embedding type", EMBEDDING_OPTIONS)
else:
embedding = None
embedding_vector = None
if embedding in EMBEDDING_OPTIONS:
model = retrieve_model(embedding)
embedding_vector = create_document_embedding(
text=query,
model=model["model"],
model_source=model["model_source"],
normalize=True,
)
st.markdown("---")
if query != "":
print_request_body = st.checkbox("Print request body?")
debug = st.checkbox("Debug?")
output_format = st.radio(
"Select output format", ("parsed vespa results", "raw vespa results")
)
tracelevel = None
trace = st.checkbox("Specify tracelevel?")
if trace:
tracelevel = st.selectbox("Tracelevel", [3, 9], 0)
request_body = create_vespa_body_request(
query=query,
parsed_rank_profile=get_rank_profile(
rank_profile=rank_profile, embedding=embedding
),
grammar_operator=grammar_operator,
ann_operator=ann_operator,
embedding_type=embedding,
hits=10,
embedding_vector=embedding_vector,
tracelevel=tracelevel,
)
search_results = vespa_search(
vespa_url=vespa_url, vespa_port=vespa_port, body=request_body
)
#
# Debug
#
if debug:
if "children" in search_results["root"]:
debug_data = []
for hit in search_results["root"]["children"]:
debug_data.append(
{
"complete_id": hit["id"],
"id": hit["fields"]["id"],
"title_dot_product": hit["fields"]["rankfeatures"].get(
"rankingExpression(dot_product_title)"
),
"body_dot_product": hit["fields"]["rankfeatures"].get(
"rankingExpression(dot_product_body)"
),
}
)
st.write(DataFrame.from_records(debug_data))
st.markdown("---")
if print_request_body:
st.write(request_body)
if output_format == "raw vespa results":
st.markdown("## Showing raw results")
st.write(search_results)
elif output_format == "parsed vespa results":
st.markdown("## Showing parsed results")
st.markdown("### Click to see more")
results_title = {}
if "children" in search_results["root"]:
for hit in search_results["root"]["children"]:
if (
predefined_queries
and hit["fields"]["id"] == query_relevance[query]
):
results_title["*** " + hit["fields"]["title"] + " ***"] = {
"url": hit["fields"]["url"],
"body": hit["fields"]["body"],
"relevance": hit["relevance"],
"id": hit["id"],
}
else:
results_title[hit["fields"]["title"]] = {
"url": hit["fields"]["url"],
"body": hit["fields"]["body"],
"relevance": hit["relevance"],
"id": hit["id"],
}
for title in results_title:
if st.checkbox(title):
st.markdown(
"* relevance: {}".format(results_title[title]["relevance"])
)
st.markdown("* docid: {}".format(results_title[title]["id"]))
st.markdown("* url: {}".format(results_title[title]["url"]))
st.markdown("* text:")
st.write(results_title[title]["body"])
else:
st.markdown("## No hits available")
@st.cache()
def sample_query_relevance_data(number_queries):
queries = load_msmarco_queries(queries_file_path=QUERIES_FILE_PATH)
qrels = load_msmarco_qrels(relevance_file_path=RELEVANCE_FILE_PATH)
if number_queries is not None:
qrels = {k: qrels[k] for k in sample(list(qrels), number_queries)}
query_relevance = extract_querie_relevance(qrels, queries)
return query_relevance
if __name__ == "__main__":
main()
|
459066
|
import datetime
import json
import lzma
import os
import os.path
import platform
import sys # for DataVisitor import ast, inspect
from collections import OrderedDict
from importlib import util
from lxml import etree
from pycsp3.dashboard import options
from pycsp3.problems.data import parsing
from pycsp3.tools.aggregator import build_similar_constraints
from pycsp3.tools.compactor import build_compact_forms
from pycsp3.tools.curser import OpOverrider, convert_to_namedtuples, is_namedtuple
from pycsp3.tools.slider import handle_slides
from pycsp3.tools.utilities import Stopwatch, GREEN, WHITE, Error
from pycsp3.tools.xcsp import build_document
None_Values = ['None', '', 'null'] # adding 'none'?
class Compilation:
string_model = None
string_data = None
model = None
data = None
solve = None
stopwatch = None
stopwatch2 = None
done = False
user_filename = None
@staticmethod
def load(console=False):
_load(console=console)
@staticmethod
def set_filename(_user_filename):
Compilation.user_filename = _user_filename
@staticmethod
def compile(disabling_opoverrider=True):
return _compile(disabling_opoverrider)
def _load_options():
options.set_values("data", "dataparser", "dataexport", "dataformat", "variant", "checker", "solver", "output")
options.set_flags("dataexport", "compress", "ev", "display", "time", "noComments", "recognizeSlides", "keepSmartConditions", "restrictTablesWrtDomains",
"safe", "solve", "dontcompactValues", "usemeta", "debug", "verbose")
if options.checker is None:
options.checker = "fast"
assert options.checker in {"complete", "fast", "none"}
options.parse(sys.argv[1:])
def _load_model():
try:
name = sys.argv[0]
assert name.strip().endswith(".py"), "The first argument has to be a python file." + str(name)
model_string = name[name.rfind(os.sep) + 1:name.rfind(".")]
specification = util.spec_from_file_location("", name)
model = util.module_from_spec(specification)
return model, model_string
except Exception:
usage("It was not possible to read the file: " + sys.argv[0])
raise
def _load_data():
def _load_data_sequence(raw_data):
od = [None if v in None_Values else int(v) if v and v.isdigit() else v for v in raw_data]
return OrderedDict([("f" + str(i), od[i]) for i, v in enumerate(raw_data)]), od
# return DataVisitor(raw_data).visit(ast.parse(inspect.getsource(Compilation.model)))
def _arg_value(s):
return None if s in None_Values else int(s) if s.isdigit() else s
def _load_multiple_data_pieces(): # formatting instructions not possible in that case
s = ""
for arg in args:
if "=" in arg:
t = arg.split('=')
value = _arg_value(t[1])
compilation_data[t[0]] = value
s += "-" + str(value)
else:
assert arg.endswith("json")
assert os.path.exists(arg), "The file " + arg + " does not exist (in the specified directory)." + str(os.path)
with open(arg) as f:
compilation_data.update(json.loads(f.read(), object_pairs_hook=OrderedDict))
s += "-" + arg.split(os.sep)[-1:][0].split(".")[:1][0]
return compilation_data, s
data = options.data
if data is None:
return OrderedDict(), ""
if data.endswith(".json"): # a single json file
assert os.path.exists(data), "The file " + data + " does not exist (in the specified directory)."
with open(data) as f:
return json.loads(f.read(), object_pairs_hook=OrderedDict), "-" + data.split(os.sep)[-1:][0].split(".")[:1][0]
compilation_data = OrderedDict() # the object used for recording the data, available in the model
# if '{' in data and '}' in data:
# compilation_data = json.loads(data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()), object_pairs_hook=OrderedDict)
# for k, v in compilation_data.items(): setattr(compilation_data, k, v) ordered_data = list(compilation_data.values())
if (data[0], data[-1]) in [('[', ']'), ('(', ')')]: # NB: these characters may be needed to be escaped as in \[2,3\]
args = data[1:-1].split(",")
if "json" in data:
return _load_multiple_data_pieces()
if '=' in data:
assert data.count('=') == data.count(',') + 1, "badly formed string of data " + data
ordered_data = []
for arg in args:
t = arg.split('=')
value = _arg_value(t[1])
compilation_data[t[0]] = value
ordered_data.append(value)
else:
compilation_data, ordered_data = _load_data_sequence(args)
else:
compilation_data, ordered_data = _load_data_sequence([data])
df = options.dataformat
if df:
if df[0] == '[':
assert df[-1] == ']'
df = df[1:-1]
df = df.split(',')
assert len(df) == len(ordered_data)
ss = "-".join(df).format(*ordered_data)
else:
ss = "-".join(str(v) for v in ordered_data)
string_data = "-" + ss
return compilation_data, string_data
def _load_dataparser(parser_file, data_value):
try:
compilation_data = parsing.register_fields(data_value) # the object used for recording data is returned, available in the model
specification = util.spec_from_file_location("", parser_file)
specification.loader.exec_module(util.module_from_spec(specification))
string_data = "-" + options.data.split(os.sep)[-1:][0].split(".")[:1][0] if options.data else None
if string_data is None:
string_data = Compilation.string_data if Compilation.string_data else "" # in case data are recorded through the dataparser (after asking the user)
return compilation_data, string_data
except Exception:
usage("It was not possible to correctly read the file: " + parser_file)
raise
def _load(*, console=False):
Compilation.stopwatch = Stopwatch()
_load_options()
if console is False:
Compilation.model, Compilation.string_model = _load_model()
if options.dataparser:
Compilation.data, Compilation.string_data = _load_dataparser(options.dataparser, options.data)
else:
Compilation.data, Compilation.string_data = _load_data()
Compilation.data = convert_to_namedtuples(Compilation.data)
Compilation.string_data = Compilation.string_data.replace("/", "-")
if len(Compilation.data) == 0:
Compilation.data = None
elif len(Compilation.data) == 1:
Compilation.data = Compilation.data[0] # the value instead of a tuple of size 1
else:
Compilation.string_model = "Console"
Compilation.string_data = ""
OpOverrider.enable()
options.time and print("\tWCK for loading model and data:", Compilation.stopwatch.elapsed_time(), "seconds")
def default_data(filename):
if filename[0] == '.':
fn = os.path.abspath('.') + filename[1:]
else:
fn = os.path.dirname(os.path.realpath(__file__)) + os.sep + "problems" + os.sep + "data" + os.sep + "json" + os.sep + filename
assert fn.endswith(".json")
assert os.path.exists(fn), "The file " + fn + " does not exist (in the specified directory)."
with open(fn) as f:
Compilation.data = convert_to_namedtuples(json.loads(f.read(), object_pairs_hook=OrderedDict))
Compilation.string_data = "-" + filename.split(os.sep)[-1:][0].split(".")[:1][0]
if len(Compilation.data) == 1:
Compilation.data = Compilation.data[0] # the value instead of a tuple of size 1
return Compilation.data
def _compile(disabling_opoverrider=False):
# used to save data in jSON
def prepare_for_json(obj):
if is_namedtuple(obj):
r = obj._asdict()
for k in r:
r[k] = prepare_for_json(r[k])
return r
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = prepare_for_json(obj[i])
return obj
return str(obj) if isinstance(obj, datetime.time) else obj
if Error.errorOccurrence:
return None
if disabling_opoverrider:
OpOverrider.disable()
if Compilation.user_filename is None and options.output is not None:
Compilation.set_filename(options.output)
if Compilation.user_filename is not None:
if options.output is None and options.verbose:
print(" * User-defined XML file name:", Compilation.user_filename)
filename = Compilation.user_filename
if filename.endswith(".xml"):
filename_prefix = filename[:-4] # can be useful if data are exported
else:
filename_prefix = Compilation.string_model + ("-" + options.variant if options.variant else "") + Compilation.string_data
filename = filename_prefix + ".xml"
stopwatch = Stopwatch()
if options.verbose:
print(" PyCSP3 (Python:" + platform.python_version() + ", Path:" + os.path.abspath(__file__) + ")\n")
build_similar_constraints()
options.time and print("\tWCK for generating groups:", stopwatch.elapsed_time(reset=True), "seconds")
handle_slides()
options.time and print("\tWCK for handling slides:", stopwatch.elapsed_time(reset=True), "seconds")
build_compact_forms()
options.time and print("\tWCK for compacting forms:", stopwatch.elapsed_time(reset=True), "seconds")
root = build_document()
if root is not None:
pretty_text = etree.tostring(root, pretty_print=True, xml_declaration=False, encoding='UTF-8').decode("UTF-8")
if options.display:
print("\n", pretty_text)
else:
with open(filename, "w") as f:
f.write(pretty_text)
print(" * Generating the file " + filename + " completed in " + GREEN + Compilation.stopwatch.elapsed_time() + WHITE + " seconds.")
if options.compress:
with lzma.open(filename + ".lzma", "w") as f:
f.write(bytes(pretty_text, 'utf-8'))
print("\tGeneration of the file " + filename + ".lzma completed.\n")
options.time and print("\tWCK for generating files:", stopwatch.elapsed_time(reset=True), "seconds")
if options.dataexport:
if isinstance(options.dataexport, bool):
json_prefix = options.data.split("/")[-1:][0].split(".")[:1][0] if options.dataparser else filename_prefix
# TODO if data are given with name as e.g., in [k=3,l=9,b=0,r=0,v=9] for Bibd, maybe we should sort them
else:
json_prefix = str(options.dataexport)
with open(json_prefix + '.json', 'w') as f:
json.dump(prepare_for_json(Compilation.data), f)
print(" Generation for data saving of the file " + json_prefix + '.json' + " completed.")
# print(" Total wall clock time:", Compilation.stopwatch.elapsed_time(), "seconds")
Compilation.done = True
cop = root is not None and root.attrib and root.attrib["type"] == "COP"
return filename, cop
def usage(message):
print(message)
print("\nThe PyCSP3 Compiler allows us to generate XCSP3 files.")
print("\n\nUsage: python3.5 <model> <data>")
print(" - <model> is the name of a Python file containing a PyCSP3 model (i.e., a Python file with code posting variables/constraints/objectives)")
print(" - <data> is either a fixed list of elementary data or the name of a JSON file")
# solver = s if s[0] not in {'[', '('} else s[1:re.search("[,)\]]", s).start()]
|
459068
|
from Core.App import App
from Core.Ui import *
from Services.Messages import Messages
from Services.Utils.Image.Config import Config as ImageLoaderConfig
from os import path
class FormInfo(QtWidgets.QDialog, UiFile.formInfo):
def __init__(self, windowTitle, videoData, formData, enableLabelTranslation=False, enableLabelSelection=False, enableFieldTranslation=False, enableFieldSelection=False):
super().__init__(parent=App.getActiveWindow(), useWindowGeometry=False)
self.setWindowTitle(windowTitle)
self.videoData = videoData
self.formData = formData
self.enableLabelTranslation = enableLabelTranslation
self.enableLabelSelection = enableLabelSelection
self.enableFieldTranslation = enableFieldTranslation
self.enableFieldSelection = enableFieldSelection
if self.videoData == None:
self.tabWidget.setTabVisible(1, False)
self.videoWidgetPlaceholder.hide()
self.line.hide()
else:
self.setWindowFlag(QtCore.Qt.WindowMaximizeButtonHint)
Utils.setPlaceholder(self.videoWidgetPlaceholder, Ui.VideoWidget(self.videoData, resizable=True, showMore=False))
self.setPreviewTab()
self.setFormData()
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
def setFormData(self):
for key, value in self.formData.items():
if not isinstance(key, QtCore.QObject):
key = QtWidgets.QLabel(T(str(key)) if self.enableLabelTranslation else str(key))
if self.videoData != None:
key.setText("{}:".format(key.text()))
if not isinstance(value, QtCore.QObject):
value = QtWidgets.QLabel(T(str(value)) if self.enableFieldTranslation else str(value))
if self.videoData != None:
value.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Preferred)
if self.enableLabelSelection and type(key) == QtWidgets.QLabel:
key.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse | QtCore.Qt.TextSelectableByKeyboard)
if self.enableFieldSelection and type(value) == QtWidgets.QLabel:
value.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse | QtCore.Qt.TextSelectableByKeyboard)
self.formInfoArea.layout().addRow(key, value)
def setPreviewTab(self):
self.previewWidget = Ui.VideoWidget(self.videoData, resizable=True, previewOnly=True)
Utils.setPlaceholder(self.previewVideoWidgetPlaceholder, self.previewWidget)
self.saveImageButton.clicked.connect(self.saveImage)
self.embedUrl = self.previewWidget.metaData["thumbnailImage"][0].format(width=1920, height=1080)
if self.embedUrl == "":
self.urlArea.setEnabled(False)
else:
self.urlData.setText(self.embedUrl)
self.urlData.setCursorPosition(0)
self.copyUrlButton.clicked.connect(self.copyUrl)
self.openUrlButton.clicked.connect(self.openUrl)
def saveImage(self):
DB.temp.updateDefaultDirectory()
directory = DB.temp.getDefaultDirectory()
filters = self.getAvailableFormats()
initialFilter = DB.temp.getDefaultFormat(ImageLoaderConfig.IMAGE_DATA_TYPE)
fileName = Utils.askSaveDirectory(Utils.joinPath(directory, self.createFileName()), filters, initialFilter)
if fileName != None:
try:
self.previewWidget.thumbnail_image.pixmap().save(fileName)
except:
Utils.info(*Messages.INFO.FILE_SYSTEM_ERROR)
else:
self.saveOptions(fileName)
if Utils.ask(
"save-complete",
"{}\n\n{}".format(T("#Save completed."), fileName),
okText="open",
cancelText="ok"
):
try:
Utils.openFile(fileName)
except:
Utils.info(*Messages.INFO.FILE_NOT_FOUND)
def createFileName(self):
return Utils.getValidFileName(
"[{type} {preview}] [{channel}] {id}".format(
type=self.formData["file-type"],
preview=T("preview"),
channel=self.formData["channel"],
id=self.videoData.id
)
)
def getAvailableFormats(self):
return ["jpg", "png"]
def saveOptions(self, absoluteFileName):
directory = path.dirname(absoluteFileName)
fileFormat = path.basename(absoluteFileName).rsplit(".", 1)[1]
DB.temp.setDefaultDirectory(directory)
DB.temp.setDefaultFormat(ImageLoaderConfig.IMAGE_DATA_TYPE, fileFormat)
def copyUrl(self):
Utils.copyToClipboard(self.embedUrl)
Utils.info("notification", "#Copied to clipboard.")
def openUrl(self):
Utils.openUrl(self.embedUrl)
|
459085
|
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String
from sqlalchemy.orm import backref, relationship
from okcupyd.db import Base, OKCBase
class User(OKCBase):
__tablename__ = 'user'
@classmethod
def from_profile(cls, profile):
return cls(okc_id=profile.id, handle=profile.username, age=profile.age,
location=profile.location)
handle = Column(String, nullable=False)
age = Column(String, nullable=False)
location = Column(String, nullable=False)
class OKCupydUser(Base):
__tablename__ = 'okcupyd_user'
inbox_last_updated = Column(DateTime, nullable=True)
outbox_last_updated = Column(DateTime, nullable=True)
user_id = Column(Integer, ForeignKey("user.id"), unique=True)
user = relationship("User", foreign_keys=[user_id],
backref=backref('okcupyd_user', uselist=False))
|
459087
|
load("@build_bazel_rules_swift//swift:repositories.bzl", "swift_rules_dependencies")
load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies")
load("@build_bazel_apple_support//lib:repositories.bzl", "apple_support_dependencies")
load("@rules_jvm_external//:defs.bzl", "maven_install")
load("@rules_detekt//detekt:dependencies.bzl", "rules_detekt_dependencies")
load("@io_bazel_rules_kotlin//kotlin:repositories.bzl", "kotlin_repositories")
load("@io_grpc_grpc_java//:repositories.bzl", "grpc_java_repositories")
load("@rules_proto_grpc//protobuf:repositories.bzl", "protobuf_repos")
load("@rules_proto_grpc//java:repositories.bzl", rules_proto_grpc_java_repos = "java_repos")
load("@rules_python//python:pip.bzl", "pip_install")
load("@robolectric//bazel:robolectric.bzl", "robolectric_repositories")
load("@rules_java//java:repositories.bzl", "rules_java_dependencies")
def _default_extra_swift_sources_impl(ctx):
ctx.file("WORKSPACE", "")
ctx.file("empty.swift", "")
ctx.file("BUILD.bazel", """
filegroup(
name = "extra_swift_srcs",
srcs = ["empty.swift"],
visibility = ["//visibility:public"],
)
objc_library(
name = "extra_private_dep",
module_name = "FakeDep",
visibility = ["//visibility:public"],
)""")
_default_extra_swift_sources = repository_rule(
implementation = _default_extra_swift_sources_impl,
)
def _default_extra_jni_deps_impl(ctx):
ctx.file("WORKSPACE", "")
ctx.file("BUILD.bazel", """
cc_library(
name = "extra_jni_dep",
visibility = ["//visibility:public"],
)""")
_default_extra_jni_deps = repository_rule(
implementation = _default_extra_jni_deps_impl,
)
def envoy_mobile_dependencies(extra_maven_dependencies = []):
if not native.existing_rule("envoy_mobile_extra_swift_sources"):
_default_extra_swift_sources(name = "envoy_mobile_extra_swift_sources")
if not native.existing_rule("envoy_mobile_extra_jni_deps"):
_default_extra_jni_deps(name = "envoy_mobile_extra_jni_deps")
swift_dependencies()
kotlin_dependencies(extra_maven_dependencies)
python_dependencies()
def swift_dependencies():
apple_support_dependencies()
apple_rules_dependencies(ignore_version_differences = True)
swift_rules_dependencies()
def kotlin_dependencies(extra_maven_dependencies = []):
rules_java_dependencies()
maven_install(
artifacts = [
"com.google.code.findbugs:jsr305:3.0.2",
"com.google.flatbuffers:flatbuffers-java:2.0.3",
# Kotlin
"org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.6.21",
"org.jetbrains.kotlin:kotlin-stdlib-common:1.6.21",
"org.jetbrains.kotlin:kotlin-stdlib:1.6.21",
"androidx.recyclerview:recyclerview:1.1.0",
"androidx.core:core:1.3.2",
# Dokka
"org.jetbrains.dokka:dokka-cli:1.5.31",
"org.jetbrains.dokka:javadoc-plugin:1.5.31",
# Test artifacts
"org.assertj:assertj-core:3.12.0",
"junit:junit:4.12",
"org.mockito:mockito-inline:2.28.2",
"org.mockito:mockito-core:2.28.2",
"com.squareup.okhttp3:okhttp:4.9.1",
"com.squareup.okhttp3:mockwebserver:4.9.1",
"io.github.classgraph:classgraph:4.8.121",
"io.netty:netty-all:4.1.74.Final",
# Android test artifacts
"androidx.test:core:1.3.0",
"androidx.test:rules:1.3.0",
"androidx.test:runner:1.3.0",
"androidx.test:monitor:1.3.0",
"androidx.test.ext:junit:1.1.2",
"org.robolectric:robolectric:4.4",
"org.hamcrest:hamcrest:2.2",
"com.google.truth:truth:1.1",
] + extra_maven_dependencies,
version_conflict_policy = "pinned",
repositories = [
"https://repo1.maven.org/maven2",
"https://maven.google.com",
],
)
kotlin_repositories()
rules_detekt_dependencies()
robolectric_repositories()
grpc_java_repositories(
omit_bazel_skylib = True,
omit_com_google_protobuf = True,
omit_com_google_protobuf_javalite = True,
omit_net_zlib = True,
)
protobuf_repos()
rules_proto_grpc_java_repos()
def python_dependencies():
# TODO: bifurcate dev deps vs. prod deps
# pip_install(
# requirements = ":dev_requirements.txt",
# )
pip_install(
requirements = "//third_party/python:requirements.txt",
timeout = 1000,
)
|
459113
|
from django import template
from django.template.defaultfilters import stringfilter
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
import os
import json
register = template.Library()
@register.simple_tag
def webpack(asset_name: str) -> str:
if settings.ENV == 'production':
# Read version-stamped urls from manifest.json once and cache for process lifetime
if not hasattr(webpack, 'manifest'):
webpack.manifest = json.loads(open(os.path.join(settings.STATIC_DIR, "build/manifest.json")).read())
return settings.STATIC_URL + "build/" + webpack.manifest[asset_name]
else:
return settings.WEBPACK_DEV_URL + "/" + asset_name
|
459129
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# from matplotlib.ticker import FormatStrFormatter
import librosa
import librosa.display
# original_sample = 'Real Test/full_train_test_true.wav'
# ds_sample = 'Real Test/full_train_test_ds.wav'
# reco_sample = 'Real Test/full_train_test_reco.wav'
# output_file_name = 'Real Test/real_full_train_test_spec_comp.pdf'
# original_sample = 'full_train_test_true.wav'
# ds_sample = 'full_train_test_ds.wav'
# reco_sample = 'full_train_test_reco.wav'
# output_file_name = 'full_train_test_spec_comp.pdf'
# original_sample = 'full_train_validation_true.wav'
# ds_sample = 'full_train_validation_ds.wav'
# reco_sample = 'full_train_validation_reco.wav'
# output_file_name = 'full_train_validation_spec_comp.pdf'
original_sample = 'overtrain_true.wav'
ds_sample = 'overtrain_ds.wav'
reco_sample = 'overtrain_reco.wav'
output_file_name = 'overtrain_spec_comp.pdf'
# original_sample = 'loss_function_comparison/gm_overtrain_true.wav'
# ds_sample = 'loss_function_comparison/gm_overtrain_ds.wav'
# reco_sample = 'loss_function_comparison/gm_overtrain_reco.wav'
# output_file_name = 'loss_function_comparison/gm_overtrain_spec_comp.pdf'
n_fft = 4*512
# n_fft = 256
y_true, sr_true = librosa.load(original_sample, sr=None)
y_ds, sr_ds = librosa.load(ds_sample, sr=None)
y_reco, sr_reco = librosa.load(reco_sample, sr=None)
def compute_signal_to_noise(truth, reco):
return 10.*np.log10(np.sqrt(np.sum(truth**2))/np.sqrt(
np.sum((truth - reco)**2)))
def plot_all(true_spectrogram, ds_spectrogram, reco_spectrogram,
true_waveform, ds_waveform, reco_waveform,
true_sr, ds_sr, reco_sr, ofile, n_fft):
# max_frame = 200
max_frame = 100
plt.figure(figsize=(8, 6))
if not (true_sr == ds_sr == reco_sr):
print('Warning: time axis on waveform plots will be meaningless')
# compute dB-scale magnitudes
true_dB = librosa.amplitude_to_db(true_spectrogram, ref=np.max)
ds_dB = librosa.amplitude_to_db(ds_spectrogram, ref=np.max)
reco_dB = librosa.amplitude_to_db(reco_spectrogram, ref=np.max)
# compute LSD
true_X = np.log10(np.abs(true_spectrogram)**2)
ds_X = np.log10(np.abs(ds_spectrogram)**2)
reco_X = np.log10(np.abs(reco_spectrogram)**2)
ds_X_diff_squared = (true_X - ds_X)**2
reco_X_diff_squared = (true_X - reco_X)**2
ds_lsd = np.mean(np.sqrt(np.mean(ds_X_diff_squared, axis=0)))
reco_lsd = np.mean(np.sqrt(np.mean(reco_X_diff_squared, axis=0)))
# spectrogram plots
# cmap = 'nipy_spectral'
# cmap = 'rainbow_r'
# cmap = 'gist_rainbow'
cmap = 'viridis'
# cmap = 'inferno_r'
# cmap = 'magma_r'
# cmap = 'plasma_r'
ax = plt.subplot(3, 2, 1)
plt.title('True Spectrum (dB)')
fig = librosa.display.specshow(true_dB,
sr=true_sr, y_axis='hz', x_axis='time',
hop_length=n_fft/4, cmap=cmap,
edgecolors='face')
fig.axes.set_xticklabels([])
plt.xlabel('')
plt.ylabel('frequency (Hz)')
ax = plt.subplot(3, 2, 3)
plt.title('Downsampled Spectrum (dB)')
fig = librosa.display.specshow(ds_dB,
sr=ds_sr, y_axis='hz', x_axis='time',
hop_length=n_fft/4, cmap=cmap,
edgecolors='face')
fig.axes.set_xticklabels([])
plt.xlabel('')
plt.ylabel('frequency (Hz)')
ax.text(0.65, 0.25, r'LSD={:.2}'.format(ds_lsd),
color='blue', fontsize=13, transform=ax.transAxes,
backgroundcolor='white')
ax = plt.subplot(3, 2, 5)
plt.title('Reconstructed Spectrum (dB)')
fig = librosa.display.specshow(reco_dB,
sr=reco_sr, y_axis='hz', x_axis='time',
hop_length=n_fft/4, cmap=cmap,
edgecolors='face')
plt.xlabel('time (s)')
plt.ylabel('frequency (Hz)')
ax.text(0.65, 0.25, r'LSD={:.2}'.format(reco_lsd),
color='blue', fontsize=13, transform=ax.transAxes,
backgroundcolor='white')
# compute SNR for waveform plots
ds_snr = compute_signal_to_noise(true_waveform, ds_waveform)
reco_snr = compute_signal_to_noise(true_waveform, reco_waveform)
# waveform plots
ax = plt.subplot(3, 2, 2)
ax.set_xticklabels([])
true_time = np.arange(max_frame, dtype=np.float)/float(true_sr)
plt.title('True Waveform (16 kbps)')
fig = plt.plot(true_time, true_waveform[:max_frame])
plt.ylabel('amplitude')
ax = plt.subplot(3, 2, 4)
ax.set_xticklabels([])
ds_time = np.arange(max_frame, dtype=np.float)/float(ds_sr)
plt.title('Downsampled Waveform (4 kbps)')
fig = plt.plot(ds_time, ds_waveform[:max_frame])
plt.ylabel('amplitude')
ax.text(0.05, 0.1, r'SNR={:.1f}'.format(ds_snr),
color='blue', fontsize=13, transform=ax.transAxes,
backgroundcolor='white')
ax = plt.subplot(3, 2, 6)
reco_time = np.arange(max_frame, dtype=np.float)/float(reco_sr)
plt.title('Reconstructed Waveform (16 kbps)')
fig = plt.plot(reco_time, reco_waveform[:max_frame])
plt.ylabel('amplitude')
plt.xlabel('time (s)')
ax.text(0.05, 0.1, r'SNR={:.1f}'.format(reco_snr),
color='blue', fontsize=13, transform=ax.transAxes,
backgroundcolor='white')
plt.tight_layout()
plt.savefig(ofile)
# Reads wav file and produces spectrum
def read_audio_spectrum(x, **kwd_args):
return librosa.core.stft(x, **kwd_args)
true_spectrogram = read_audio_spectrum(y_true, n_fft=n_fft)
ds_spectrogram = read_audio_spectrum(y_ds, n_fft=n_fft)
reco_spectrogram = read_audio_spectrum(y_reco, n_fft=n_fft)
plot_all(true_spectrogram, ds_spectrogram, reco_spectrogram,
y_true, y_ds, y_reco,
sr_true, sr_ds, sr_reco,
output_file_name, n_fft)
|
459210
|
from django.conf.urls import include, url
from rest_framework import routers
from . import viewsets
router = routers.DefaultRouter()
router.register(r'organizations', viewsets.OrganizationViewSet)
router.register(r'applications', viewsets.ApplicationViewSet)
router.register(r'tags', viewsets.TagViewSet)
router.register(r'people', viewsets.PersonViewSet)
router.register(r'engagements', viewsets.EngagementViewSet)
router.register(r'activities', viewsets.ActivityViewSet)
router.register(r'activities_types', viewsets.ActivityTypeViewSet)
router.register(r'users', viewsets.UserViewSet)
urlpatterns = [
url(r'^v0/', include(router.urls), name='v0'),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
459224
|
import unittest
import os
import shutil
import yaml
import xnmt
from xnmt import events, param_collections, persistence, utils
class TestPath(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
self.assertTrue(type(persistence.Path(""))==persistence.Path)
self.assertTrue(type(persistence.Path(".."))==persistence.Path)
self.assertTrue(type(persistence.Path(".2"))==persistence.Path)
self.assertTrue(type(persistence.Path("one.2"))==persistence.Path)
with self.assertRaises(ValueError):
persistence.Path(".one.")
persistence.Path("one..2")
def test_str(self):
self.assertEqual(str(persistence.Path("one.2")), "one.2")
self.assertEqual(str(persistence.Path("")), "")
def test_set(self):
s = {persistence.Path("one.2"), persistence.Path("one.1.3"), persistence.Path("one.1.3")}
self.assertIn(persistence.Path("one.2"), s)
self.assertEqual(len(s), 2)
def test_append(self):
self.assertEqual(str(persistence.Path("one").append("2")), "one.2")
self.assertEqual(str(persistence.Path("").append("2")), "2")
self.assertEqual(str(persistence.Path(".").append("2")), ".2")
self.assertEqual(str(persistence.Path(".1.2").append("2")), ".1.2.2")
with self.assertRaises(ValueError):
persistence.Path("one").append("")
with self.assertRaises(ValueError):
persistence.Path("one").append(".")
with self.assertRaises(ValueError):
persistence.Path("one").append("two.3")
def test_add_path(self):
self.assertEqual(str(persistence.Path("one").add_path(persistence.Path("2"))), "one.2")
self.assertEqual(str(persistence.Path("one").add_path(persistence.Path("2.3"))), "one.2.3")
self.assertEqual(str(persistence.Path("").add_path(persistence.Path("2.3"))), "2.3")
self.assertEqual(str(persistence.Path("one.2").add_path(persistence.Path(""))), "one.2")
self.assertEqual(str(persistence.Path("").add_path(persistence.Path(""))), "")
self.assertEqual(str(persistence.Path(".").add_path(persistence.Path(""))), ".")
self.assertEqual(str(persistence.Path(".").add_path(persistence.Path("one.two"))), ".one.two")
self.assertEqual(str(persistence.Path(".xy").add_path(persistence.Path("one.two"))), ".xy.one.two")
with self.assertRaises(NotImplementedError):
persistence.Path("one").add_path(persistence.Path(".2.3"))
def test_get_absolute(self):
self.assertEqual(persistence.Path(".").get_absolute(persistence.Path("1.2")), persistence.Path("1.2"))
self.assertEqual(persistence.Path(".x.y").get_absolute(persistence.Path("1.2")), persistence.Path("1.2.x.y"))
self.assertEqual(persistence.Path("..x.y").get_absolute(persistence.Path("1.2")), persistence.Path("1.x.y"))
self.assertEqual(persistence.Path("...x.y").get_absolute(persistence.Path("1.2")), persistence.Path("x.y"))
with self.assertRaises(ValueError):
persistence.Path("....x.y").get_absolute(persistence.Path("1.2"))
def test_descend_one(self):
self.assertEqual(str(persistence.Path("one.2.3").descend_one()), "2.3")
self.assertEqual(str(persistence.Path("3").descend_one()), "")
with self.assertRaises(ValueError):
persistence.Path("").descend_one()
with self.assertRaises(ValueError):
persistence.Path(".one.2").descend_one()
def test_len(self):
self.assertEqual(len(persistence.Path("")), 0)
self.assertEqual(len(persistence.Path("one")), 1)
self.assertEqual(len(persistence.Path("one.2.3")), 3)
with self.assertRaises(ValueError):
len(persistence.Path(".one"))
len(persistence.Path("."))
def test_get_item(self):
self.assertEqual(persistence.Path("one")[0], "one")
self.assertEqual(persistence.Path("one.2.3")[0], "one")
self.assertEqual(persistence.Path("one.2.3")[2], "3")
self.assertEqual(persistence.Path("one.2.3")[-1], "3")
with self.assertRaises(ValueError):
persistence.Path(".one.2.3")[-1]
def test_get_item_slice(self):
self.assertEqual(str(persistence.Path("one")[0:1]), "one")
self.assertEqual(str(persistence.Path("one.2.3")[1:3]), "2.3")
self.assertEqual(str(persistence.Path("one.2.3")[0:-1]), "one.2")
self.assertEqual(str(persistence.Path("one.2.3")[-1:]), "3")
with self.assertRaises(ValueError):
persistence.Path(".one.2.3")[0:1:-1]
def test_parent(self):
self.assertEqual(persistence.Path("one").parent(), persistence.Path(""))
self.assertEqual(persistence.Path("one.two.three").parent(), persistence.Path("one.two"))
self.assertEqual(persistence.Path(".one").parent(), persistence.Path("."))
with self.assertRaises(ValueError):
persistence.Path(".").parent()
with self.assertRaises(ValueError):
persistence.Path("").parent()
def test_eq(self):
self.assertEqual(persistence.Path(""), persistence.Path(""))
self.assertEqual(persistence.Path(".."), persistence.Path(".."))
self.assertEqual(persistence.Path("one.2"), persistence.Path("one.2"))
self.assertEqual(persistence.Path("one.2"), persistence.Path("one.2.3").parent())
self.assertNotEqual(persistence.Path("one.2"), persistence.Path("one.2.3"))
self.assertNotEqual(persistence.Path(""), persistence.Path("."))
def test_ancestors(self):
self.assertEqual(persistence.Path("").ancestors(), {persistence.Path("")})
self.assertEqual(persistence.Path("a").ancestors(), {persistence.Path(""), persistence.Path("a")})
self.assertEqual(persistence.Path("one.two.three").ancestors(), {persistence.Path(""), persistence.Path("one"), persistence.Path("one.two"), persistence.Path("one.two.three")})
class DummyClass(persistence.Serializable):
yaml_tag = "!DummyClass"
@persistence.serializable_init
def __init__(self, arg1, arg2="{V2}", arg3="{V3}"):
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
class DummyClass2(persistence.Serializable):
yaml_tag = "!DummyClass2"
@persistence.serializable_init
def __init__(self, arg1=persistence.bare(DummyClass)):
self.arg1 = arg1
class DummyClass3(persistence.Serializable):
yaml_tag = "!DummyClass3"
@persistence.serializable_init
def __init__(self, arg1=persistence.bare(DummyClass2)):
self.arg1 = arg1
class DummyClassForgotBare(persistence.Serializable):
yaml_tag = "!DummyClassForgotBare"
@persistence.serializable_init
def __init__(self, arg1=DummyClass("")):
self.arg1 = arg1
class TestPreloader(unittest.TestCase):
def setUp(self):
yaml.add_representer(DummyClass, xnmt.init_representer)
self.out_dir = "test/tmp"
utils.make_parent_dir(f"{self.out_dir}/asdf")
def test_experiment_names_from_file(self):
with open(f"{self.out_dir}/tmp.yaml", "w") as f_out:
yaml.dump({
"exp1": DummyClass(""),
"exp2": DummyClass(""),
"exp10": DummyClass("")
},
f_out)
self.assertListEqual(persistence.YamlPreloader.experiment_names_from_file(f"{self.out_dir}/tmp.yaml"),
["exp1", "exp10", "exp2"])
def test_inconsistent_loadserialized(self):
with open(f"{self.out_dir}/tmp1.yaml", "w") as f_out:
yaml.dump(DummyClass(arg1="v1"), f_out)
test_obj = yaml.load(f"""
a: !LoadSerialized
filename: {self.out_dir}/tmp1.yaml
bad_arg: 1
""")
with self.assertRaises(ValueError):
persistence.YamlPreloader.preload_obj(test_obj, "SOME_EXP_NAME", "SOME_EXP_DIR")
def test_inconsistent_loadserialized2(self):
with open(f"{self.out_dir}/tmp1.yaml", "w") as f_out:
yaml.dump(DummyClass(arg1="v1"), f_out)
test_obj = yaml.load(f"""
a: !LoadSerialized
filename: {self.out_dir}/tmp1.yaml
overwrite:
- path: a
- val: b
""")
with self.assertRaises(ValueError):
persistence.YamlPreloader.preload_obj(test_obj, "SOME_EXP_NAME", "SOME_EXP_DIR")
def test_placeholder_loadserialized(self):
with open(f"{self.out_dir}/tmp1.yaml", "w") as f_out:
yaml.dump(DummyClass(arg1="v1"), f_out)
test_obj = yaml.load(f"""
a: !LoadSerialized
filename: '{{EXP_DIR}}/{{EXP}}.yaml'
""")
persistence.YamlPreloader.preload_obj(test_obj, exp_name = "tmp1", exp_dir=self.out_dir)
def test_load_referenced_serialized_top(self):
with open(f"{self.out_dir}/tmp1.yaml", "w") as f_out:
yaml.dump(DummyClass(arg1="v1"), f_out)
test_obj = yaml.load(f"!LoadSerialized {{ filename: {self.out_dir}/tmp1.yaml }}")
loaded_obj = persistence.YamlPreloader._load_serialized(test_obj)
self.assertIsInstance(loaded_obj, DummyClass)
self.assertEqual(loaded_obj.arg1, "v1")
def test_load_referenced_serialized_nested(self):
with open(f"{self.out_dir}/tmp1.yaml", "w") as f_out:
yaml.dump(DummyClass(arg1="v1"), f_out)
test_obj = yaml.load(f"""
a: 1
b: !LoadSerialized
filename: {self.out_dir}/tmp1.yaml
overwrite:
- path: arg1
val: !LoadSerialized
filename: {self.out_dir}/tmp1.yaml
""")
loaded_obj = persistence.YamlPreloader._load_serialized(test_obj)
self.assertIsInstance(loaded_obj["b"], DummyClass)
self.assertIsInstance(loaded_obj["b"].arg1, DummyClass)
def test_resolve_kwargs(self):
test_obj = yaml.load("""
!DummyClass
kwargs:
arg1: 1
other_arg: 2
""")
persistence.YamlPreloader._resolve_kwargs(test_obj)
self.assertFalse(hasattr(test_obj, "kwargs"))
self.assertFalse(hasattr(test_obj, "arg2"))
self.assertEqual(getattr(test_obj, "arg1", None), 1)
self.assertEqual(getattr(test_obj, "other_arg", None), 2)
def test_resolve_bare_default_args(self):
test_obj = yaml.load("""
a: !DummyClass
arg1: !DummyClass2 {}
b: !DummyClass3 {}
""")
persistence.YamlPreloader._resolve_bare_default_args(test_obj)
self.assertIsInstance(test_obj["a"].arg1.arg1, DummyClass)
self.assertIsInstance(test_obj["b"].arg1, DummyClass2)
self.assertIsInstance(test_obj["b"].arg1.arg1, DummyClass)
def test_resolve_bare_default_args_illegal(self):
test_obj = yaml.load("""
a: !DummyClassForgotBare {}
""")
with self.assertRaises(ValueError):
persistence.YamlPreloader._resolve_bare_default_args(test_obj)
def test_format_strings(self):
test_obj = yaml.load("""
a: !DummyClass
arg1: '{V1}'
other_arg: 2
b: !DummyClass
arg1: 1
other_arg: '{V2}'
c: '{V1}/bla'
d: ['bla', 'bla.{V2}']
""")
persistence.YamlPreloader._format_strings(test_obj, {"V1":"val1", "V2":"val2"})
self.assertEqual(test_obj["a"].arg1, "val1")
self.assertEqual(test_obj["a"].other_arg, 2)
self.assertEqual(test_obj["a"].arg2, "val2")
self.assertFalse(hasattr(test_obj["a"], "arg3"))
self.assertEqual(test_obj["b"].arg1, 1)
self.assertEqual(test_obj["b"].other_arg, '{V2}')
self.assertEqual(test_obj["b"].arg2, "val2")
self.assertFalse(hasattr(test_obj["b"], "arg3"))
self.assertEqual(test_obj["c"], "val1/bla")
self.assertListEqual(test_obj["d"], ["bla", "bla.val2"])
class DummyArgClass(persistence.Serializable):
yaml_tag = "!DummyArgClass"
@persistence.serializable_init
def __init__(self, arg1, arg2):
pass # arg1 and arg2 are purposefully not kept
class DummyArgClass2(persistence.Serializable):
yaml_tag = "!DummyArgClass2"
@persistence.serializable_init
def __init__(self, v):
self.v = v
class TestSaving(unittest.TestCase):
def setUp(self):
events.clear()
xnmt.resolved_serialize_params = {}
yaml.add_representer(DummyArgClass, xnmt.init_representer)
yaml.add_representer(DummyArgClass2, xnmt.init_representer)
self.out_dir = os.path.join("test", "tmp")
utils.make_parent_dir(os.path.join(self.out_dir, "asdf"))
self.model_file = os.path.join(self.out_dir, "saved.mod")
param_collections.ParamManager.init_param_col()
param_collections.ParamManager.param_col.model_file = self.model_file
def test_shallow(self):
test_obj = yaml.load("""
a: !DummyArgClass
arg1: !DummyArgClass2
_xnmt_id: id1
v: some_val
arg2: !Ref { name: id1 }
""")
preloaded = persistence.YamlPreloader.preload_obj(root=test_obj,exp_name="exp1",exp_dir=self.out_dir)
initalized = persistence.initialize_if_needed(preloaded)
persistence.save_to_file(self.model_file, initalized)
def test_mid(self):
test_obj = yaml.load("""
a: !DummyArgClass
arg1: !DummyArgClass2
v: !DummyArgClass2
_xnmt_id: id1
v: some_val
arg2: !DummyArgClass2
v: !Ref { name: id1 }
""")
preloaded = persistence.YamlPreloader.preload_obj(root=test_obj,exp_name="exp1",exp_dir=self.out_dir)
initalized = persistence.initialize_if_needed(preloaded)
persistence.save_to_file(self.model_file, initalized)
def test_deep(self):
test_obj = yaml.load("""
a: !DummyArgClass
arg1: !DummyArgClass2
v: !DummyArgClass2
v: !DummyArgClass2
_xnmt_id: id1
v: some_val
arg2: !DummyArgClass2
v: !DummyArgClass2
v: !Ref { name: id1 }
""")
preloaded = persistence.YamlPreloader.preload_obj(root=test_obj,exp_name="exp1",exp_dir=self.out_dir)
initalized = persistence.initialize_if_needed(preloaded)
persistence.save_to_file(self.model_file, initalized)
def test_double_ref(self):
test_obj = yaml.load("""
a: !DummyArgClass
arg1: !DummyArgClass2
_xnmt_id: id1
v: some_val
arg2:
- !Ref { name: id1 }
- !Ref { name: id1 }
""")
preloaded = persistence.YamlPreloader.preload_obj(root=test_obj,exp_name="exp1",exp_dir=self.out_dir)
initalized = persistence.initialize_if_needed(preloaded)
persistence.save_to_file(self.model_file, initalized)
def tearDown(self):
try:
if os.path.isdir(os.path.join("test","tmp")):
shutil.rmtree(os.path.join("test","tmp"))
except:
pass
class TestReferences(unittest.TestCase):
def setUp(self):
events.clear()
xnmt.resolved_serialize_params = {}
yaml.add_representer(DummyArgClass, xnmt.init_representer)
yaml.add_representer(DummyArgClass2, xnmt.init_representer)
self.out_dir = os.path.join("test", "tmp")
utils.make_parent_dir(os.path.join(self.out_dir, "asdf"))
self.model_file = os.path.join(self.out_dir, "saved.mod")
param_collections.ParamManager.init_param_col()
param_collections.ParamManager.param_col.model_file = self.model_file
def test_simple_reference(self):
test_obj = yaml.load("""
!DummyArgClass
arg1: !DummyArgClass
arg1: !DummyArgClass2 { v: some_val }
arg2: !DummyArgClass2 { v: some_other_val }
arg2: !Ref { path: arg1 }
""")
preloaded = persistence.YamlPreloader.preload_obj(root=test_obj,exp_name="exp1",exp_dir=self.out_dir)
initialized = persistence.initialize_if_needed(preloaded)
dump = persistence._dump(initialized)
reloaded = yaml.load(dump)
if isinstance(reloaded.arg1, persistence.Ref):
reloaded.arg1, reloaded.arg2 = reloaded.arg2, reloaded.arg1
self.assertIsInstance(reloaded.arg1, DummyArgClass)
self.assertIsInstance(reloaded.arg2, persistence.Ref)
self.assertIsInstance(reloaded.arg1.arg1, DummyArgClass2)
self.assertIsInstance(reloaded.arg1.arg2, DummyArgClass2)
if __name__ == '__main__':
unittest.main()
|
459236
|
import random
import pykka
from mopidy.core import listener
from mopidy_youtube import logger, youtube
from mopidy_youtube.data import extract_video_id, format_video_uri
autoplay_enabled = False
strict_autoplay = False
max_autoplay_length = None
autoplayed = []
max_degrees_of_separation = 3
class YouTubeAutoplayer(pykka.ThreadingActor, listener.CoreListener):
def __init__(self, config, core):
super().__init__()
self.config = config
self.core = core
self.autoplay_enabled = config["youtube"]["autoplay_enabled"]
self.strict_autoplay = config["youtube"]["strict_autoplay"]
self.max_degrees_of_separation = config["youtube"]["max_degrees_of_separation"]
self.max_autoplay_length = config["youtube"]["max_autoplay_length"]
self.base_track_id = ""
self.degrees_of_separation = 0
# Called by mopidy on start of playback of a URI
# This function emulates the youtube autoplay functionality by retrieving the most
# most related video to a video just played by a youtube API call, adding this new
# video URI to the tracklist
#
# With the option "strict_autoplay" enabled, the next played URI will be the newly
# added video. Without the option "strict_autoplay" enabled [default], the autoplay
# functionality will only be executed if the end of the current tracklist is reached
#
# The autoplay functionality will not work correctly in combination with the repeat
# option and is therefore disabled if repeat is enabled
def track_playback_started(self, tl_track):
if not self.autoplay_enabled:
return None
[tlTrackId, track] = tl_track
if not track.uri.startswith("youtube:") and not track.uri.startswith("yt:"):
return None
try:
tl = self.core.tracklist
if tl.get_repeat().get() is True:
logger.warn("Autoplayer: will not add tracks when repeat is enabled.")
return None
if tl.get_random().get() is True:
logger.warn(
"Autoplayer: shuffle will not work when autoplay is enabled."
)
if self.strict_autoplay is False:
tlTracks = tl.get_tl_tracks().get()
if len(tlTracks) != 0:
if tlTrackId is not tlTracks[-1].tlid:
logger.debug("Autoplayer: called not at end of track list.")
return None
elif tl.get_consume().get() is True:
logger.warning(
"Autoplayer: when having consume track enabled, "
'try with "strict_autoplay" option enabled for '
"better results"
)
return None
current_track_id = extract_video_id(track.uri)
if self.max_degrees_of_separation:
if self.degrees_of_separation < self.max_degrees_of_separation:
self.degrees_of_separation += 1
logger.debug("incrementing autoplay degrees of separation")
else:
current_track_id = self.base_track_id
self.degrees_of_separation = 0
logger.debug("resetting to autoplay base track id")
if current_track_id not in autoplayed:
self.base_track_id = current_track_id
autoplayed.append(current_track_id) # avoid replaying track
self.degrees_of_separation = 0
logger.debug("setting new autoplay base id")
current_track = youtube.Video.get(current_track_id)
current_track.related_videos
related_videos = current_track.related_videos.get()
logger.debug(
f"autoplayer is adding a track related to {current_track.title.get()}"
)
# remove already autoplayed
related_videos[:] = [
related_video
for related_video in related_videos
if related_video.id not in autoplayed
]
# remove if track_length is 0 (probably a live video) or None
related_videos[:] = [
related_video
for related_video in related_videos
if related_video.length.get()
]
# remove if too long
if self.max_autoplay_length:
related_videos[:] = [
related_video
for related_video in related_videos
if related_video.length.get() < self.max_autoplay_length
]
if len(related_videos) == 0:
logger.warn(
f"could not get videos related to"
f"{current_track.title.get()}: ending autoplay"
)
return None
else:
next_video = random.choice(related_videos)
autoplayed.append(next_video.id)
uri = [format_video_uri(next_video.id)]
tl.add(uris=uri).get()
return None
except Exception as e:
logger.error('Autoplayer error "%s"', e)
return None
|
459314
|
import os
import platform
import tempfile
import unittest
import urllib
from cupy import testing
from cupyx.tools import install_library
import pytest
@testing.parameterize(
{'library': 'cudnn'},
{'library': 'cutensor'},
{'library': 'nccl'},
)
class TestInstallLibrary(unittest.TestCase):
@testing.slow
def test_install(self):
system = platform.system()
if system == 'Windows' and self.library == 'nccl':
pytest.skip('NCCL is only available for Linux')
# Try installing library for all supported CUDA versions
for rec in install_library.library_records[self.library]:
cuda = rec['cuda']
version = rec[self.library]
filenames = rec['assets'][system]['filenames']
with tempfile.TemporaryDirectory() as d:
install_library.install_lib(cuda, d, self.library)
self._check_installed(
d, cuda, self.library, version, filenames)
def _check_installed(self, prefix, cuda, lib, version, filenames):
install_root = os.path.join(prefix, cuda, lib, version)
assert os.path.isdir(install_root)
for _x, _y, files in os.walk(install_root):
for filename in filenames:
if filename in files:
return
pytest.fail('expected file cound not be found')
def test_urls(self):
assets = [r['assets']
for r in install_library.library_records[self.library]]
for asset in assets:
for system in asset.keys():
url = asset[system]['url']
with urllib.request.urlopen(
urllib.request.Request(url, method='HEAD')) as resp:
assert resp.getcode() == 200
def test_main(self):
install_library.main(
['--library', self.library, '--action', 'dump', '--cuda', 'null'])
|
459338
|
from django.core.management.base import NoArgsCommand, BaseCommand
from django.core.mail import send_mail, EmailMessage
from optparse import make_option
from texas.models import *
from datetime import datetime
import random
import string
import time
class Command(BaseCommand):
args = "<occurrence_id>"
help = "Send email blast to purchasers"
def handle(self, *args, **options):
occurrence_id = args[0]
purchases = Purchase.objects.filter(occurrence__id=occurrence_id,
status='P').order_by('user__email');
#purchases = purchases.filter(user__email='<EMAIL>')
last_email = ''
for purchase in purchases:
if purchase.user.email == last_email:
print 'skip'
else:
print purchase.user.email
event = purchase.occurrence.event
site = Site.objects.get(id=settings.SITE_ID)
subject = "New Ticketing Hours for %s - Please Read" % event.label
from_address = "<EMAIL>"
to_address = [purchase.user.email]
body = """Just 1 day away from the Fall Burn and I wanted to get some important information out to you. There will be new Ticketing Hours for this burn
Thursday 5pm - Friday 3am (CLOSED 3am - 9am)
Friday 9am - Saturday 1:30am (CLOSED 1:30am - 9am)
Saturday 9am - Saturday 8pm (CLOSED After 8pm till the end of the event)
Sunday - CLOSED
Monday - CLOSED
If you show up during closed hours, you will be asked to leave the site until 9am the following morning. There will be no hanging out in the parking lot during closed hours.
As the ticket purchaser it is your responsibility to forward this information on to those you purchased tickets for. Please pass it along to everyone you know who has a ticket.
Thank You!"""
if False and event.survival_guide:
body += """\n\nYour survival guide for the event is attached, please review it at your convenince."""
email = EmailMessage(subject, body, from_address, to_address)
if False and event.survival_guide:
try:
email.attach_file("/srv/django/tickets/static/%s" % (event.survival_guide))
except:
pass
email.send()
time.sleep(1)
last_email = purchase.user.email
|
459346
|
import matplotlib.pyplot as plt
def gantt_chart_plot(JOBS, SCHEDULE, Machine_available, Title):
bw = 0.3
plt.figure(figsize=(12, 0.7*(len(JOBS.keys()))))
idx = 0
for j in sorted(JOBS.keys()):
x = 0
y = JOBS[j]['release']
plt.fill_between([x,y],[idx-bw,idx-bw],[idx+bw,idx+bw], color='cyan', alpha=0.6, label="release constraint")
x = SCHEDULE[j]['start']
y = SCHEDULE[j]['finish']
plt.fill_between([x,y],[idx-bw,idx-bw],[idx+bw,idx+bw], color='red', alpha=0.5, label="process interval")
plt.plot([x,y,y,x,x], [idx-bw,idx-bw,idx+bw,idx+bw,idx-bw],color='k')
plt.text((SCHEDULE[j]['start'] + SCHEDULE[j]['finish'])/2.0,idx,
'Job ' + str(j), color='white', weight='bold',
horizontalalignment='center', verticalalignment='center')
idx += 1
plt.ylim(-0.5, idx-0.5)
plt.title('Job Schedule '+ Title)
plt.xlabel('Time')
plt.ylabel('Jobs')
plt.yticks(range(len(JOBS)), JOBS.keys())
plt.grid()
xlim = plt.xlim()
# order machine for plotting nicely
MACHINES = sorted(set([SCHEDULE[j]['machine'] for j in JOBS.keys()]))
plt.figure(figsize=(12, 0.7*len(MACHINES)))
for j in sorted(JOBS.keys()):
idx = MACHINES.index(SCHEDULE[j]['machine'])
x = 0
y = Machine_available[idx]
plt.fill_between([x,y],[idx-bw,idx-bw],[idx+bw,idx+bw], color='green', alpha=0.5)
x = SCHEDULE[j]['start']
y = SCHEDULE[j]['finish']
plt.fill_between([x,y],[idx-bw,idx-bw],[idx+bw,idx+bw], color='red', alpha=0.5)
plt.plot([x,y,y,x,x], [idx-bw,idx-bw,idx+bw,idx+bw,idx-bw],color='k')
plt.text((SCHEDULE[j]['start'] + SCHEDULE[j]['finish'])/2.0,idx,
'Job ' + str(j), color='white', weight='bold',
horizontalalignment='center', verticalalignment='center')
plt.xlim(xlim)
plt.ylim(-0.5, len(MACHINES)-0.5)
plt.title('Machine Schedule '+ Title)
plt.yticks(range(len(MACHINES)), MACHINES)
plt.ylabel('Machines')
plt.grid()
def formulate_jobs_dict(job_ids, release_times, process_intervals, wait_times):
job_dict = {}
for idx, j_id in enumerate(job_ids):
job_dict[j_id] = {}
job_dict[j_id]['release'] = release_times[idx]
job_dict[j_id]['duration'] = process_intervals[idx]
job_dict[j_id]['waiting'] = wait_times[idx]
return job_dict
def formulate_schedule_dict(job_ids, release_times, process_intervals, wait_times, machine_dispatches):
schedule_dict = {}
for idx, j_id in enumerate(job_ids):
schedule_dict[j_id] = {}
schedule_dict[j_id]['start'] = release_times[idx]+wait_times[idx]
schedule_dict[j_id]['finish'] = release_times[idx]+process_intervals[idx]+wait_times[idx]
schedule_dict[j_id]['machine'] = machine_dispatches[idx]
return schedule_dict
|
459349
|
import numpy as np
import json, os
import preprocessors.fft as fft
from utils.loader import load_grouped_train_data, load_train_data
from utils.config_name_creator import *
from sklearn.linear_model import LogisticRegression
import cPickle
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import log_loss, roc_curve, auc
from commons import reshape_data
def predict(model, x_test, n_test_examples, n_timesteps):
pred_1m = model.predict_proba(x_test)[:, 1]
pred_10m = np.reshape(pred_1m, (n_test_examples, n_timesteps))
pred_10m = np.mean(pred_10m, axis=1)
return pred_10m
def cross_validate(subject, data_path, reg_C, random_cv=False):
if random_cv:
d = load_train_data(data_path,subject)
x, y = d['x'], d['y']
skf = StratifiedKFold(y, n_folds=10)
else:
filenames_grouped_by_hour = cPickle.load(open('filenames.pickle'))
data_grouped_by_hour = load_grouped_train_data(data_path, subject, filenames_grouped_by_hour)
n_preictal, n_interictal = len(data_grouped_by_hour['preictal']), len(data_grouped_by_hour['interictal'])
hours_data = data_grouped_by_hour['preictal'] + data_grouped_by_hour['interictal']
hours_labels = np.concatenate((np.ones(n_preictal), np.zeros(n_interictal)))
n_folds = n_preictal
skf = StratifiedKFold(hours_labels, n_folds=n_folds)
preictal_probs, labels = [], []
for train_indexes, valid_indexes in skf:
x_train, x_valid = [], []
y_train, y_valid = [], []
for i in train_indexes:
x_train.extend(hours_data[i])
y_train.extend(hours_labels[i] * np.ones(len(hours_data[i])))
for i in valid_indexes:
x_valid.extend(hours_data[i])
y_valid.extend(hours_labels[i] * np.ones(len(hours_data[i])))
x_train = [x[..., np.newaxis] for x in x_train]
x_train = np.concatenate(x_train, axis=3)
x_train = np.rollaxis(x_train, axis=3)
y_train = np.array(y_train)
x_valid = [x[..., np.newaxis] for x in x_valid]
x_valid = np.concatenate(x_valid, axis=3)
x_valid = np.rollaxis(x_valid, axis=3)
y_valid = np.array(y_valid)
n_valid_examples = x_valid.shape[0]
n_timesteps = x_valid.shape[-1]
x_train, y_train = reshape_data(x_train, y_train)
data_scaler = StandardScaler()
x_train = data_scaler.fit_transform(x_train)
logreg = LogisticRegression(C=reg_C)
logreg.fit(x_train, y_train)
x_valid = reshape_data(x_valid)
x_valid = data_scaler.transform(x_valid)
p_valid = predict(logreg, x_valid, n_valid_examples, n_timesteps)
preictal_probs.extend(p_valid)
labels.extend(y_valid)
return preictal_probs, labels
def run_trainer():
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
# path
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
print data_path
if not os.path.exists(data_path):
fft.run_fft_preprocessor()
subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
for reg_C in [10000000, 100, 10, 1.0, 0.1, 0.01]:
print reg_C
all_valid_probs = []
all_valid_y = []
for subject in subjects:
p, y = cross_validate(subject, data_path, reg_C=reg_C)
all_valid_probs.extend(p)
all_valid_y.extend(y)
fpr, tpr, _ = roc_curve(all_valid_y, all_valid_probs, pos_label=1)
print auc(fpr, tpr)
print log_loss(all_valid_y, all_valid_probs)
if __name__ == '__main__':
run_trainer()
|
459362
|
import threading
import queue
import time
class track_cmd(object):
"""docstring for track"""
def __init__(self, _id):
super(track_cmd, self).__init__()
self.q = queue.Queue()
self.stop = True
self._msg = {"id": _id, "stat": None}
def start(self):
self.stop = False
# socket thread
self.track_thread = threading.Thread(target=self.track_loop)
self.track_thread.start()
def track_loop(self):
while not self.stop:
time.sleep(0)
try:
if not self.q.empty():
msg = self.q.get()
if msg is None:
self.stop = True
else:
self._msg = msg
except:
pass
def msg(self):
return dict(self._msg)
def stat(self):
return self._msg["stat"]
def complete(self, time_out=0):
if time_out > 0:
start = time.time()
while time.time() <= start + time_out:
time.sleep(0)
try:
stat = self.stat()
if any([stat > 1, stat < 0]):
break
except:
pass
else:
while True:
time.sleep(0)
try:
stat = self.stat()
if any([stat > 1, stat < 0]):
break
except:
pass
return self.stat()
|
459365
|
from typing import Dict, Union
import numpy as np
from keras import Input, Model
from keras.layers import Flatten, Dense, Dropout
from numpy import ndarray
from pandas import DataFrame
from sklearn.base import ClassifierMixin
from src.encoding.encoding_parser import EncodingParser
from src.predictive_model.models import PredictiveModels
class NNClassifier(ClassifierMixin):
"""
Neural Network classifier, implements the same methods as the sklearn models to make it simple to add
"""
# noinspection PyTypeChecker
def __init__(self, **kwargs: Dict[str, Union[int, str, float]]):
"""initializes the Neural Network classifier
:param kwargs: configuration containing the predictive_model parameters, encoding and training parameters
"""
self._n_hidden_layers = int(kwargs['n_hidden_layers'])
self._n_hidden_units = int(kwargs['n_hidden_units'])
self._activation = str(kwargs['activation'])
self._n_epochs = int(kwargs['n_epochs'])
self._encoding = str(kwargs['encoding'])
self._dropout_rate = float(kwargs['dropout_rate'])
self._is_binary_classifier = bool(kwargs['is_binary_classifier'])
self._encoding_parser = EncodingParser(self._encoding, self._is_binary_classifier,
task=PredictiveModels.CLASSIFICATION.value)
self._model = None
self.classes_ = None # this is set for compatibility reasons
def fit(self, train_data: DataFrame, targets: ndarray) -> None:
"""creates and fits the predictive_model
first the encoded data is parsed, then the predictive_model created and then trained
:param train_data: encoded training dataset
:param targets: encoded target dataset
"""
targets = DataFrame(targets, columns=['label'])
train_data = self._encoding_parser.parse_training_dataset(train_data)
targets = self._encoding_parser.parse_targets(targets)
self.classes_ = targets.T
model_inputs = Input(train_data.shape[1:])
predicted = model_inputs
if self._encoding in ['simpleIndex', 'complex', 'lastPayload']:
predicted = Flatten()(predicted)
for _ in range(self._n_hidden_layers):
predicted = Dense(self._n_hidden_units, activation=self._activation)(predicted)
predicted = Dropout(self._dropout_rate)(predicted)
if self._is_binary_classifier:
predicted = Dense(1, activation='sigmoid')(predicted)
else:
predicted = Dense(targets.shape[1], activation='softmax')(predicted)
self._model = Model(model_inputs, predicted)
if self._is_binary_classifier:
self._model.compile(loss='binary_crossentropy', optimizer='adam')
else:
self._model.compile(loss='categorical_crossentropy', optimizer='adam')
self._model.fit(train_data, targets, epochs=self._n_epochs)
def predict(self, test_data: DataFrame) -> ndarray:
"""returns predictive_model predictions
parses the encoded test dataset, then returns the predictive_model predictions
:param test_data: encoded test dataset
:return: predictive_model predictions
"""
test_data = self._encoding_parser.parse_testing_dataset(test_data)
predictions = self._model.predict(test_data)
if self._is_binary_classifier:
predictions = predictions.astype(bool)
else:
predictions = np.argmax(predictions, -1)
return predictions
def predict_proba(self, test_data: DataFrame) -> ndarray:
"""returns the classification probability
parses the test dataset and returns the raw prediction probabilities of the predictive_model
:param test_data: encoded test dataset
:return: predictive_model prediction probabilities
"""
test_data = self._encoding_parser.parse_testing_dataset(test_data)
predictions = self._model.predict(test_data)
if self._is_binary_classifier:
predictions = np.max(predictions, -1)
predictions = np.vstack((1 - predictions, predictions)).T
return predictions
def reset(self) -> None:
"""
placeholder to allow use with other sklearn algorithms
"""
|
459375
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from commcare_cloud.commands.command_base import CommandBase
class TestEnvironments(CommandBase):
command = 'test-environments'
help = "Run test environments"
def run(self, args, unknown_args):
import nose
nose.runmodule('commcare_cloud.manage_commcare_cloud',
argv=['manage-commcare-cloud', '-v'])
|
459385
|
from __future__ import absolute_import
from django import forms
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry import features, roles
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, Organization, OrganizationMember,
OrganizationMemberTeam
)
from sentry.web.frontend.base import BaseView
class NewOrganizationForm(forms.ModelForm):
name = forms.CharField(label=_('Organization Name'), max_length=200,
widget=forms.TextInput(attrs={'placeholder': _('My Company')}))
class Meta:
fields = ('name',)
model = Organization
class CreateOrganizationView(BaseView):
def get_form(self, request):
return NewOrganizationForm(request.POST or None)
def has_permission(self, request):
return features.has('organizations:create', actor=request.user)
def handle(self, request):
form = self.get_form(request)
if form.is_valid():
org = form.save()
om = OrganizationMember.objects.create(
organization=org,
user=request.user,
role=roles.get_top_dog().id,
)
team = org.team_set.create(
name=org.name,
)
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=om,
is_active=True
)
AuditLogEntry.objects.create(
organization=org,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=org.id,
event=AuditLogEntryEvent.ORG_ADD,
data=org.get_audit_log_data(),
)
url = reverse('sentry-create-project', args=[org.slug])
return HttpResponseRedirect('{}?team={}'.format(url, team.slug))
context = {
'form': form,
}
return self.respond('sentry/create-organization.html', context)
|
459392
|
from sqlalchemy.inspection import inspect
from .config import ALLOWED_METHODS
class Model(object):
__pks__ = None
__url__ = None
__cols__ = None
__rels__ = None
__table__ = None
__hidden__ = []
__version__ = '1'
__description__ = None
__methods__ = ALLOWED_METHODS
collection_suffix = 'List'
def __str__(self):
"""
:return: (str) primary key value
"""
return str(getattr(self, self.primary_key_field()))
@classmethod
def _load_columns(cls):
"""
"""
cls.__pks__ = []
cls.__cols__ = {}
for i in cls.__dict__:
if not (i.startswith('_') or i in cls.__hidden__):
col = getattr(cls, i)
try:
if col.primary_key:
cls.__pks__.append(i)
cls.__cols__[i] = col
except AttributeError:
pass
@classmethod
def _load_related(cls, **kwargs):
"""
"""
cls.__rels__ = {}
for r in inspect(cls).relationships:
try:
rel = r.argument.class_
except AttributeError:
rel = r.argument
key = rel.__name__
columns = rel().columns()
instance = getattr(cls, r.key)
cls.__rels__.update({key: dict(instance=instance, columns=columns)})
@classmethod
def columns(cls):
"""
:return:
"""
if cls.__cols__ is None:
cls._load_columns()
return cls.__cols__
@classmethod
def required(cls):
"""
:return:
"""
columns = []
for col, c in cls.columns().items():
if not (c.nullable or c.primary_key) or (c.primary_key and not c.autoincrement):
columns.append(col)
return columns
@classmethod
def searchable(cls):
"""
:return:
"""
columns = []
for col, c in cls.columns().items():
if c.type.python_type is str:
columns.append(col)
return columns
@classmethod
def optional(cls):
"""
:return:
"""
columns = []
for col, c in cls.columns().items():
if c.nullable:
columns.append(col)
return columns
@classmethod
def primary_key_field(cls):
"""
:return:
"""
if cls.__pks__ is None:
cls._load_columns()
return cls.__pks__[0]
@classmethod
def related(cls, name=None, **kwargs):
"""
:param name:
:return:
"""
if not cls.__rels__:
cls._load_related(**kwargs)
if name is None:
return cls.__rels__
for k in cls.__rels__.keys():
if k == name:
return cls.__rels__.get(k).get('instance'), \
cls.__rels__.get(k).get('columns')
return None, None
@classmethod
def submodel_from_url(cls, url):
"""
:param url:
:return:
"""
for r in inspect(cls).relationships:
try:
rel = r.argument.class_
except AttributeError:
rel = r.argument
if rel.__url__ == url:
return rel
@classmethod
def validate(cls, data):
"""
:param data:
"""
fields = cls.required() + cls.optional()
unknown = [k for k in data if k not in fields]
missing = list(set(cls.required()) - set(data.keys()))
return missing if len(missing) else None, unknown if len(unknown) else None
@classmethod
def description(cls):
"""
:return:
"""
related = {}
fields = []
for r in inspect(cls).relationships:
try:
rel = r.argument.class_
except AttributeError:
rel = r.argument
related.update({rel.__name__: rel.__url__})
for col, c in cls.columns().items():
fields.append(dict(
name=col,
type=c.type.python_type.__name__,
key=c.primary_key,
nullable=c.nullable,
unique=c.unique,
description=c.comment
))
return dict(
url=cls.__url__,
name=cls.__name__,
methods=list(cls.__methods__),
description=cls.__description__ or cls.__table__.comment,
related=related,
fields=fields
)
def to_dict(self, links=False):
"""
:param links:
:return:
"""
resp = {}
data = self if isinstance(self, dict) else self.__dict__
for k, v in data.items():
if k.startswith('_') or k in self.__hidden__:
continue
if isinstance(v, Model):
resp.update({v.__class__.__name__: v.to_dict(links)})
elif isinstance(v, list):
if len(v) > 0:
name = v[0].__class__.__name__ + self.collection_suffix
resp.update({name: [i.to_dict(links) for i in v]})
else:
resp.update({k: v})
if links:
resp['_links'] = self.links()
return resp
def links(self):
"""
:return:
"""
link_dict = dict(self=self.resource_uri())
for r in inspect(self.__class__).relationships:
try:
key = r.argument.class_.__name__
url = r.argument.class_.__url__
except AttributeError:
key = r.argument.__name__
url = r.argument.__url__
link_dict[key] = "{}{}".format(self.resource_uri(), url)
return link_dict
def resource_uri(self):
"""
:return:
"""
pk = getattr(self, self.primary_key_field())
return "{}/{}".format(self.__url__, pk) if pk else None
def update(self, attributes):
"""
:param attributes:
:return:
"""
for attr, val in attributes.items():
if attr in self.columns().keys():
setattr(self, attr, val)
return self
|
459421
|
import sys
import os
import glob
import re
import gzip
import click
import numpy as np
import csv
from collections import defaultdict
import logging
from typing import *
import velocyto as vcy
from ._run import _run
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG)
@click.command(short_help="Runs the velocity analysis for a Chromium Sample")
@click.argument("samplefolder",
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True,
writable=True,
resolve_path=True))
@click.argument("gtffile",
type=click.Path(exists=True,
file_okay=True,
dir_okay=False,
readable=True,
resolve_path=True))
@click.option("--metadatatable", "-s",
help="Table containing metadata of the various samples (csv fortmated rows are samples and cols are entries)",
default=None,
type=click.Path(resolve_path=True,
file_okay=True,
dir_okay=False,
readable=True))
@click.option("--mask", "-m",
help=".gtf file containing intervals to mask",
default=None,
type=click.Path(resolve_path=True,
file_okay=True,
dir_okay=False,
readable=True))
@click.option("--logic", "-l",
help="The logic to use for the filtering (default: Default)",
default="Default")
@click.option("--multimap", "-M",
help="""Consider not unique mappings (not reccomended)""",
default=False,
is_flag=True)
@click.option("--samtools-threads", "-@",
help="The number of threads to use to sort the bam by cellID file using samtools",
default=16)
@click.option("--samtools-memory",
help="The number of MB used for every thread by samtools to sort the bam file",
default=2048)
@click.option("--dtype", "-t",
help="The dtype of the loom file layers - if more than 6000 molecules/reads per gene per cell are expected set uint32 to avoid truncation (default run_10x: uint16)",
default="uint16")
@click.option("--dump", "-d",
help="For debugging purposes only: it will dump a molecular mapping report to hdf5. --dump N, saves a cell every N cells. If p is prepended a more complete (but huge) pickle report is printed (default: 0)",
default="0")
@click.option('--verbose', '-v',
help="Set the vebosity level: -v (only warinings) -vv (warinings and info) -vvv (warinings, info and debug)",
count=True, default=1)
def run10x(samplefolder: str, gtffile: str,
metadatatable: str, mask: str, logic: str, multimap: bool,
samtools_threads: int, samtools_memory: int, dtype: str, dump: str, verbose: str) -> None:
"""Runs the velocity analysis for a Chromium 10X Sample
10XSAMPLEFOLDER specifies the cellranger sample folder
GTFFILE genome annotation file
"""
# Check that the 10X analysis was run successfully
if not os.path.isfile(os.path.join(samplefolder, "_log")):
logging.error("This is an older version of cellranger, cannot check if the output are ready, make sure of this yourself")
elif "Pipestance completed successfully!" not in open(os.path.join(samplefolder, "_log")).read():
logging.error("The outputs are not ready")
bamfile = os.path.join(samplefolder, "outs", "possorted_genome_bam.bam")
bcmatches = glob.glob(os.path.join(samplefolder, os.path.normcase("outs/filtered_gene_bc_matrices/*/barcodes.tsv")))
if len(bcmatches) == 0:
bcmatches = glob.glob(os.path.join(samplefolder, os.path.normcase("outs/filtered_feature_bc_matrix/barcodes.tsv.gz")))
if len(bcmatches) == 0:
logging.error("Can not locate the barcodes.tsv file!")
bcfile = bcmatches[0]
outputfolder = os.path.join(samplefolder, "velocyto")
sampleid = os.path.basename(samplefolder.rstrip("/").rstrip("\\"))
assert not os.path.exists(os.path.join(outputfolder, f"{sampleid}.loom")), "The output already exist. Aborted!"
additional_ca = {}
try:
tsne_file = os.path.join(samplefolder, "outs", "analysis", "tsne", "2_components", "projection.csv")
if os.path.exists(tsne_file):
tsne = np.loadtxt(tsne_file, usecols=(1, 2), delimiter=',', skiprows=1)
additional_ca["_X"] = tsne[:, 0].astype('float32')
additional_ca["_Y"] = tsne[:, 1].astype('float32')
clusters_file = os.path.join(samplefolder, "outs", "analysis", "clustering", "graphclust", "clusters.csv")
if os.path.exists(clusters_file):
labels = np.loadtxt(clusters_file, usecols=(1, ), delimiter=',', skiprows=1)
additional_ca["Clusters"] = labels.astype('int') - 1
except Exception:
logging.error("Some IO problem in loading cellranger tsne/pca/kmeans files occurred!")
return _run(bamfile=(bamfile, ), gtffile=gtffile, bcfile=bcfile, outputfolder=outputfolder,
sampleid=sampleid, metadatatable=metadatatable, repmask=mask, onefilepercell=False,
logic=logic, without_umi=False, umi_extension="no", multimap=multimap, test=False, samtools_threads=samtools_threads,
samtools_memory=samtools_memory, dump=dump, loom_numeric_dtype=dtype, verbose=verbose, additional_ca=additional_ca)
|
459436
|
from torch.nn.modules.loss import _Loss
import torch
import math
import numpy as np
from scipy.optimize import linear_sum_assignment
import torch.nn.functional as F
def loss_calculation( pred_cent, pred_ref,pred_foot_ref, pred_rot, pred_num, pred_mode,
target_s,points, w, target_mode):
bs = 1
num_p = 1000
points = points.contiguous().view(bs * num_p, 1, 3) # 1000*1*3 input point cloud
pred_num = pred_num.contiguous().view(bs * num_p, 3)
pred_mode = pred_mode.contiguous().view(bs * num_p, 3)
pred_cent = pred_cent.contiguous().view(bs * num_p, 1, 3)
pred_ref = pred_ref.contiguous().view(bs * num_p, -1, 3)
pred_foot_ref = pred_foot_ref.contiguous().view(bs * num_p, -1, 3)
pred_rot = pred_rot.contiguous().view(bs * num_p, -1, 3)
pred_rot_foot = pred_rot.view(bs * num_p, -1, 3)
target_mode = target_mode.view(-1)
target_mode_ = target_mode
target_mode = target_mode.view(bs, 1, 1).repeat(1, num_p, 1).view(bs * num_p)
target_s = target_s.view(bs, 1, -1, 3).repeat(1, num_p, 1, 1).view(bs * num_p, -1, 3)
target_cent = target_s[:, 0, :].view(bs * num_p, -1, 3)
target_sym = target_s[:, 1:, :].view(bs * num_p, -1, 3)
target_sym_vec = torch.add(target_sym, -target_cent) # 1000,-1 ,3
# target_sym_vec = torch.norm(target_sym_vec,dim=2)
target_sym_vec = F.normalize(target_sym_vec,p=2,dim=2)
cent_pred = torch.add(points, pred_cent) # 1000,1,3
ref_pred = torch.add(points, pred_ref)
ref_foot_pred = torch.add(points, pred_foot_ref)
cross_entropy = torch.nn.CrossEntropyLoss()
mode_loss = cross_entropy(pred_mode, target_mode)
center_loss = torch.mean(torch.norm((cent_pred - target_cent), dim=2), dim=1) # (1000)
######### cost matrix
######### cosine angle of pred norm and target norm
mean_pred_ref = torch.mean(pred_ref, dim=0)
mean_ref_pred = torch.mean(ref_pred, dim=0)
mean_target_vec = torch.mean(target_sym_vec, dim=0)
cost_matrix = torch.zeros(mean_pred_ref.shape[0], target_sym_vec.shape[1])
for i in range(mean_pred_ref.shape[0]):
for j in range(mean_target_vec.shape[0]):
a = mean_pred_ref[i, :].view(1, 3)
b = mean_target_vec[j, :].view(3, 1)
product = torch.mm(a, b)
norm_a = torch.norm(a, dim=1)
norm_b = torch.norm(b,dim=0)
cost = torch.abs(product / (torch.add(norm_a, 0.00001)*torch.add(norm_b, 0.00001)))
cost_matrix[i, j] = torch.acos(cost.reshape(-1))
###### optimal assiment
###### min cost for each point is the point-wise solusion
row_id_, col_id_ = linear_sum_assignment(cost_matrix.detach().numpy())
if mean_target_vec.shape[0] >1:
corr = np.array([row_id_,col_id_]).T
ordered_id = corr[corr[:,1].argsort()]
row_id = ordered_id[:,0]
col_id = ordered_id[:,1]
else :
row_id = row_id_
col_id = col_id_
ref_out = ref_pred[:, row_id, :]
ref_out_vec = pred_ref[:, row_id, :]
ref_out_foot = ref_foot_pred[:, row_id, :]
ref_out_vec_foot = pred_foot_ref[:, row_id, :]
target_id = label_trans(torch.tensor(row_id)).cuda().float()
target_ref = ref_pt(points, target_cent, target_sym_vec)[:, col_id, :].cuda()
target_foot_ref = points + 0.5*(target_ref-points)
target_sym_vec_orderd = target_sym_vec[:,col_id,:]
id_loss = torch.nn.BCELoss()
mean_pred_num = torch.mean(pred_num, dim=0)
num_loss = id_loss(mean_pred_num, target_id) # (1)
ref_loss = 0
ref_foot_loss = 0
ref_co_loss = 0
rot_foot_loss = 0
rot_co_loss = 0
if target_mode_ != 0:
rot_foot_pred = torch.add(points, pred_rot_foot)#1000,1,3
point_to_cent = torch.add(-points, target_cent)#1000,1,3
product = torch.bmm(target_sym_vec.view(1000,1,3), point_to_cent.view(1000,3,1)).view(1000)
cos = product / (
torch.norm(point_to_cent.view(1000, 3), dim=1) * torch.norm(target_sym_vec.view(1000,3), dim=1)+0.00001).view(1000)
point_to_cent_nom = torch.norm(point_to_cent.view(1000,3), dim=1)
cent_to_foot = (-point_to_cent_nom * cos).view(1000,1).repeat(1,3)*(target_sym_vec.view(1000,3))
target_rot_foot = target_cent + cent_to_foot.view(1000,1,3)
rot_foot_loss = torch.mean(torch.norm(target_rot_foot - rot_foot_pred, dim=2), dim=1).cuda() #0.1
pt_to_foot = rot_foot_pred - points
rot_co_loss = torch.mean(torch.bmm(pt_to_foot.view(1000,1,3), cent_to_foot.view(1000,3,1)).view(-1)).cuda()**(2)#0.001
if target_mode_ != 1:
ref_out_len = torch.norm(ref_out_vec, dim=2)
ref_distance = torch.norm((ref_out - target_ref), dim=2)
ref_loss = torch.mean(torch.div(ref_distance, ref_out_len+0.00001), dim=1).cuda()
ref_foot_loss = torch.mean(torch.norm(ref_out_foot - target_foot_ref, dim=2), dim=1).cuda()#0.1
ref_co_loss = torch.mean(torch.mean(torch.norm(ref_out_vec_foot * 2 - pred_ref[:, row_id, :], dim=2), dim=1)).cuda()**(2)#0.1
#######caculate angle error
if target_mode_ == 1:
pred_axis = cent_pred.view(1000,3) - rot_foot_pred.view(1000,3)
best_norm = F.normalize(pred_axis,p=2,dim=1).view(1000,1,3)
target_norm = target_sym_vec_orderd[0, :].view(1, 3, 1).repeat(1000,1,1)
products = torch.abs(torch.bmm(best_norm, target_norm))
else:
best_ref = torch.mean(ref_out_vec, dim=0)
products = torch.zeros(best_ref.shape[0])
for i in range(best_ref.shape[0]):
best_norm = best_ref[i, :].view(1, 3).cuda()
target_norm = target_sym_vec_orderd[0, i, :].view(3, 1)
product = torch.abs(torch.mm(best_norm, target_norm) / (
torch.norm(best_norm, dim=1) * torch.norm(target_norm.contiguous().transpose(1, 0), dim=1))+0.00001)
products[i] = product
dis = torch.mean(w * center_loss + ref_loss + ref_foot_loss + rot_foot_loss, dim=0)
loss = dis + 2 * num_loss + mode_loss + w * 0.5*ref_co_loss + 0.5* w * rot_co_loss
center_dis = torch.mean(center_loss.view(bs, num_p), dim=1)
ref_dis = dis
angle_error = torch.mean(torch.acos(products) / math.pi * 180)
error_num = torch.mean(num_loss)
error_mode = torch.mean(mode_loss)
return loss, loss, center_dis.data.cpu(), ref_dis, angle_error, error_num.data.cpu(), error_mode.cpu()
def ref_pt(pt, cent, sym_vect):
pt_pred = torch.zeros(sym_vect.shape)
for i in range(sym_vect.shape[1]):
center = cent.view(1000,3,1)
norm = sym_vect[:, i, :].view(1000,1,3)
d = -torch.bmm(norm,center)
pt_ = pt-2*(torch.bmm(norm, pt.view(1000,3,1)) + d)*norm
pt_pred[:, i, :] = pt_.view(1000,3)
return pt_pred
def label_trans(input):
if input.shape[0] == 3:
label = torch.tensor([1, 1, 1])
elif input.shape[0] == 2:
if input.equal(torch.tensor([0, 1])) or input.equal(torch.tensor([1, 0])):
label = torch.tensor([1, 1, 0])
if input.equal(torch.tensor([0, 2])) or input.equal(torch.tensor([2, 0])):
label = torch.tensor([1, 0, 1])
if input.equal(torch.tensor([1, 2])) or input.equal(torch.tensor([2, 1])):
label = torch.tensor([0, 1, 1])
else: #input.shape[0] == 1:
if input.equal(torch.tensor([0])):
label = torch.tensor([1, 0, 0])
if input.equal(torch.tensor([1])):
label = torch.tensor([0, 1, 0])
if input.equal(torch.tensor([2])):
label = torch.tensor([0, 0, 1])
else:
try:
return label
except Exception as e:
print(e.args)
print(input)
return label
class Loss(_Loss):
def __init__(self, num_points_mesh):
super(Loss, self).__init__(True)
self.num_pt_mesh = num_points_mesh
def forward(self, pred_cent, pred_ref, pred_foot_ref, pred_rot,pred_num, pred_mode,
target_s, points, w, target_mode):
return loss_calculation(pred_cent, pred_ref, pred_foot_ref, pred_rot,pred_num, pred_mode,
target_s, points, w, target_mode)
|
459472
|
from configuration.utils import get_connection_helper_from_env
class Config:
connection_helper = get_connection_helper_from_env()
|
459481
|
import os
import pytest
from plio.io.io_controlnetwork import to_isis
from plio.io.io_controlnetwork import write_filelist
from autocnet.examples import get_path
from autocnet.matcher.suppression_funcs import error
from autocnet.graph.network import CandidateGraph
import pandas as pd
import numpy as np
@pytest.mark.xfail
class TestTwoImageMatching():
"""
Feature: As a user
I wish to automatically match two images to
Generate an ISIS control network
Scenario: Match two images
Given a manually specified adjacency structure named two_image_adjacency.json
When read create an adjacency graph
Then extract image data and attribute nodes
And find features and descriptors
And apply a FLANN matcher
Then create a C object from the graph matches
Then output a control network
"""
def setUp(self):
self.serial_numbers = {'AS15-M-0295_SML.png': '1971-07-31T01:24:11.754',
'AS15-M-0296_SML.png': '1971-07-31T01:24:36.970',
'AS15-M-0297_SML.png': '1971-07-31T01:25:02.243',
'AS15-M-0298_SML.png': '1971-07-31T01:25:27.457',
'AS15-M-0299_SML.png': '1971-07-31T01:25:52.669',
'AS15-M-0300_SML.png': '1971-07-31T01:26:17.923'}
for k, v in self.serial_numbers.items():
self.serial_numbers[k] = 'APOLLO15/METRIC/{}'.format(v)
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_two_image(self):
# Step: Create an adjacency graph
adjacency = get_path('two_image_adjacency.json')
basepath = get_path('Apollo15')
cg = CandidateGraph.from_adjacency(adjacency, basepath=basepath)
assert 2 == cg.number_of_nodes()
assert 1 == cg.number_of_edges()
# Step: Extract image data and attribute nodes
cg.extract_features(extractor_method='vlfeat')
for i, node in cg.nodes.data('data'):
assert node.nkeypoints in range(4000, 6000)
# Step: Compute the coverage ratios
for i, node in cg.nodes.data('data'):
ratio = node.coverage()
assert 0.98 < round(ratio, 8) < 0.99
cg.decompose_and_match(k=2, maxiteration=2)
assert isinstance(cg.edges[0,1]['data'].smembership, np.ndarray)
# Create fundamental matrix
cg.compute_fundamental_matrices()
for s, d, e in cg.edges.data('data'):
assert isinstance(e.fundamental_matrix, np.ndarray)
e.compute_fundamental_error(clean_keys=['fundamental'])
assert 'fundamental_equality' in e.costs.columns
matches, _ = e.clean(clean_keys=['fundamental'])
# Apply AMNS
cg.suppress(k=30, xkey='x', ykey='y', suppression_func=error)
# Step: Compute subpixel offsets for candidate points
cg.subpixel_register(clean_keys=['suppression'])
def tearDown(self):
try:
os.remove('TestTwoImageMatching.net')
os.remove('fromlist.lis')
except: pass
|
459486
|
import struct, sys
import dfu, device_platform
class ExecConfig:
def __init__(self, info, aes_crypto_cmd):
self.info = info
self.aes_crypto_cmd = aes_crypto_cmd
def match(self, info):
return info == self.info[0].ljust(0x40, '\0') + self.info[1].ljust(0x40, '\0') + self.info[2].ljust(0x80, '\0')
configs = [
ExecConfig(('SecureROM for s5l8947xsi, Copyright 2011, Apple Inc.', 'RELEASE', 'iBoot-1458.2'), aes_crypto_cmd=0x7060+1),
ExecConfig(('SecureROM for s5l8950xsi, Copyright 2011, Apple Inc.', 'RELEASE', 'iBoot-1145.3'), aes_crypto_cmd=0x7300+1),
ExecConfig(('SecureROM for s5l8955xsi, Copyright 2011, Apple Inc.', 'RELEASE', 'iBoot-1145.3.3'), aes_crypto_cmd=0x7340+1),
ExecConfig(('SecureROM for t8002si, Copyright 2007-2014, Apple Inc.', 'ROMRELEASE', 'iBoot-2651.0.0.1.31'), aes_crypto_cmd=0x86DC+1),
ExecConfig(('SecureROM for t8004si, Copyright 2007-2014, Apple Inc.', 'ROMRELEASE', 'iBoot-2651.0.0.3.3'), aes_crypto_cmd=0x786C+1),
ExecConfig(('SecureROM for s5l8960xsi, Copyright 2012, Apple Inc.', 'RELEASE', 'iBoot-1704.10'), aes_crypto_cmd=0x10000B9A8),
ExecConfig(('SecureROM for t8010si, Copyright 2007-2015, Apple Inc.', 'ROMRELEASE', 'iBoot-2696.0.0.1.33'), aes_crypto_cmd=0x10000C8F4),
ExecConfig(('SecureROM for t8011si, Copyright 2007-2015, Apple Inc.', 'ROMRELEASE', 'iBoot-3135.0.0.2.3'), aes_crypto_cmd=0x10000C994),
ExecConfig(('SecureROM for t8015si, Copyright 2007-2016, Apple Inc.', 'ROMRELEASE', 'iBoot-3332.0.0.1.23'), aes_crypto_cmd=0x100009E9C),
]
EXEC_MAGIC = 'execexec'[::-1]
DONE_MAGIC = 'donedone'[::-1]
MEMC_MAGIC = 'memcmemc'[::-1]
MEMS_MAGIC = 'memsmems'[::-1]
USB_READ_LIMIT = 0x8000
CMD_TIMEOUT = 5000
AES_BLOCK_SIZE = 16
AES_ENCRYPT = 16
AES_DECRYPT = 17
AES_GID_KEY = 0x20000200
AES_UID_KEY = 0x20000201
class PwnedUSBDevice():
def memset(self, address, c, length): self.command(self.cmd_memset(address, c, length), 0)
def memcpy(self, dest, src, length): self.command(self.cmd_memcpy(dest, src, length), 0)
def read_memory_ptr(self, address): return struct.unpack('<%s' % self.cmd_arg_type(), self.read_memory(address, self.cmd_arg_size()))[0]
def read_memory_uint8(self, address): return struct.unpack('<B', self.read_memory(address, 1))[0]
def read_memory_uint16(self, address): return struct.unpack('<H', self.read_memory(address, 2))[0]
def read_memory_uint32(self, address): return struct.unpack('<I', self.read_memory(address, 4))[0]
def read_memory_uint64(self, address): return struct.unpack('<Q', self.read_memory(address, 8))[0]
def write_memory(self, address, data): self.command(self.cmd_memcpy(address, self.cmd_data_address(3), len(data)) + data, 0)
def write_memory_ptr(self, address, value): self.write_memory(address, struct.pack('<%s' % self.cmd_arg_type(), value))
def write_memory_uint8(self, address, value): self.write_memory(address, struct.pack('<B', value))
def write_memory_uint16(self, address, value): self.write_memory(address, struct.pack('<H', value))
def write_memory_uint32(self, address, value): self.write_memory(address, struct.pack('<I', value))
def write_memory_uint64(self, address, value): self.write_memory(address, struct.pack('<Q', value))
def cmd_arg_type(self): return 'Q' if self.platform.arch == 'arm64' else 'I'
def cmd_arg_size(self): return 8 if self.platform.arch == 'arm64' else 4
def cmd_data_offset(self, index): return 16 + index * self.cmd_arg_size()
def cmd_data_address(self, index): return self.load_base() + self.cmd_data_offset(index)
def cmd_memcpy(self, dest, src, length): return struct.pack('<8s8x3%s' % self.cmd_arg_type(), MEMC_MAGIC, dest, src, length)
def cmd_memset(self, address, c, length): return struct.pack('<8s8x3%s' % self.cmd_arg_type(), MEMS_MAGIC, address, c, length)
def load_base(self):
if 'SRTG:' in self.serial_number:
return self.platform.dfu_image_base
else:
return self.platform.dfu_load_base
def image_base(self):
if 'SRTG:' in self.serial_number:
return self.platform.rom_base
else:
return self.platform.dfu_image_base
def usb_serial_number(self, key):
for pair in self.serial_number.split(' '):
if pair.startswith(key + ':'):
k,v = pair.split(':')
if v[0] == '[' and v[-1] == ']':
return v[1:-1]
else:
return int(v, 16)
return None
def aes(self, data, action, key):
assert len(data) % AES_BLOCK_SIZE == 0
(retval, received) = self.execute(len(data), self.config.aes_crypto_cmd, action, self.cmd_data_address(7), self.cmd_data_address(0), len(data), key, 0, 0, data)
assert retval & 0xFFFFFFFF == 0
return received[:len(data)]
def read_memory(self, address, length):
data = str()
while len(data) < length:
part_length = min(length - len(data), USB_READ_LIMIT - self.cmd_data_offset(0))
response = self.command(self.cmd_memcpy(self.cmd_data_address(0), address + len(data), part_length), self.cmd_data_offset(0) + part_length)
assert response[:8] == DONE_MAGIC
data += response[self.cmd_data_offset(0):]
return data
def command(self, request_data, response_length):
assert 0 <= response_length <= USB_READ_LIMIT
device = dfu.acquire_device()
assert self.serial_number == device.serial_number
dfu.send_data(device, '\0' * 16)
device.ctrl_transfer(0x21, 1, 0, 0, 0, 100)
device.ctrl_transfer(0xA1, 3, 0, 0, 6, 100)
device.ctrl_transfer(0xA1, 3, 0, 0, 6, 100)
dfu.send_data(device, request_data)
# HACK
if response_length == 0:
response = device.ctrl_transfer(0xA1, 2, 0xFFFF, 0, response_length + 1, CMD_TIMEOUT).tostring()[1:]
else:
response = device.ctrl_transfer(0xA1, 2, 0xFFFF, 0, response_length, CMD_TIMEOUT).tostring()
dfu.release_device(device)
assert len(response) == response_length
return response
def execute(self, response_length, *args):
cmd = str()
for i in range(len(args)):
if isinstance(args[i], (int, long)):
cmd += struct.pack('<%s' % self.cmd_arg_type(), args[i])
elif isinstance(args[i], basestring) and i == len(args) - 1:
cmd += args[i]
else:
print 'ERROR: usbexec.execute: invalid argument at position %s' % i
sys.exit(1)
if i == 0 and self.platform.arch != 'arm64':
cmd += '\0' * 4
response = self.command(EXEC_MAGIC + cmd, self.cmd_data_offset(0) + response_length)
done, retval = struct.unpack('<8sQ', response[:self.cmd_data_offset(0)])
assert done == DONE_MAGIC
return retval, response[self.cmd_data_offset(0):]
def __init__(self):
self.config = None
self.platform = None
device = dfu.acquire_device()
self.serial_number = device.serial_number
dfu.release_device(device)
for dp in device_platform.all_platforms:
if self.serial_number.startswith('CPID:%04x CPRV:%02x ' % (dp.cpid, dp.cprv)):
self.platform = dp
break
if self.platform is None:
print self.serial_number
print 'ERROR: No matching usbexec.platform found for this device.'
sys.exit(1)
info = self.read_memory(self.image_base() + 0x200, 0x100)
for config in configs:
if config.match(info):
self.config = config
break
if self.config is None:
print info
print 'ERROR: No matching usbexec.config found for this image.'
sys.exit(1)
|
459488
|
import json
import logging
import random
import sys
import deterministic_network
import member_replicated
import client
from statemachine import sequence_generator
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class TimeLogger(logging.Logger):
def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=None, extra=None):
extra = extra or {}
extra['simtime'] = self.network.now
return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=func, extra=extra)
def status_json(request, network):
rv = {}
for node_name, node in network.nodes.iteritems():
nrv = rv[node_name] = {}
for n in dir(node):
if not (n.startswith('leader_') or n.startswith('acceptor_') or n.startswith('replica_')):
continue
v = getattr(node, n)
if callable(v):
continue
if n == 'leader_scout':
if v:
v = {'scout_id': v.scout_id, 'scout_ballot_num': v.scout_ballot_num}
if n == 'acceptor_accepted':
v = sorted(v.iteritems())
if n == 'leader_commanders':
continue
nrv[n] = v
request.send_response(200)
request.send_header('Content-Type', 'text/json')
request.end_headers()
request.wfile.write(json.dumps(rv, sort_keys=True, indent=4, separators=(',', ': ')))
import string
def run_monitor_web(network):
class HandlerClass(SimpleHTTPRequestHandler):
protocol_version = 'HTTP/1.0'
def log_request(self, *args, **kwargs):
pass
def do_GET(self):
if self.path == '/status':
return status_json(self, network)
elif self.path == '/':
self.path = '/index.html'
return SimpleHTTPRequestHandler.do_GET(self)
httpd = BaseHTTPServer.HTTPServer(('', 8010), HandlerClass)
import threading
thd = threading.Thread(target=httpd.serve_forever)
thd.setDaemon(1)
thd.start()
if __name__ == "__main__":
if len(sys.argv) == 1:
rndseed = random.randint(0, sys.maxint)
pause = False
else:
rndseed = int(sys.argv[1])
pause = True
print "RANDOM SEED:", rndseed
network = deterministic_network.Network(rndseed, pause)
logging.basicConfig(
format="%(simtime)4.4f - %(name)s - %(message)s", level=logging.DEBUG)
logging.setLoggerClass(TimeLogger)
TimeLogger.network = network
# add the seed
seed = member_replicated.ClusterSeed(network.new_node(), initial_state=0)
# set up the members
members = [member_replicated.ClusterMember(network.new_node(), sequence_generator, peers=[seed.address])
for _ in range(3)]
# set up the client
client_node = network.new_node()
client = client.Client(client_node)
# kill and create nodes often
def modify():
if not network.rnd.randint(0, 2):
# KILL
if len(network.nodes) > 5:
victim = network.rnd.choice(network.nodes.keys())
if victim != client_node.address:
network.nodes[victim].kill()
else:
# create
if len(network.nodes) < 10:
node = member_replicated.ClusterMember(network.new_node(), sequence_generator, peers=network.nodes.keys())
node.start()
client_node.set_timer(network.rnd.uniform(2.0, 3.0), modify)
client_node.set_timer(1.0, modify)
# run_monitor_web(network)
for member in members:
member.start()
client.start()
network.run()
|
459494
|
import sys
import gaptrain as gt
import autode as ade
from autode.wrappers.keywords import GradientKeywords
gt.GTConfig.n_cores = 10
idx = int(sys.argv[2])
mol = ade.Molecule(smiles=sys.argv[1])
mol.optimise(method=ade.methods.XTB())
mol.print_xyz_file()
system = gt.System(box_size=[50, 50, 50])
system.add_molecules(gt.Molecule(f'{mol.name}.xyz'))
for _ in range(3):
data, gap = gt.active.train(system,
method_name='xtb',
max_active_iters=100,
temp=500)
tau = gt.Tau(configs=[system.configuration() for _ in range(3)],
e_lower=0.04336,
temp=300)
tau.calculate(gap=gap, method_name='xtb')
with open(f'{idx}_tau.txt', 'a') as tau_file:
print(sum(c.n_evals for c in data), len(data), str(tau), file=tau_file)
|
459558
|
from typing import Union, Any, List, Optional
import rdflib
from . import *
class TextPropertyMixin:
def from_user(self, value: Any) -> Union[None, rdflib.Literal]:
if value is None:
return None
if not isinstance(value, str):
raise TypeError(f'Expecting string, got {type(value)}')
return rdflib.Literal(value)
def to_user(self, value: Any) -> str:
return str(value)
class TextSingletonProperty(TextPropertyMixin, SingletonProperty):
def __init__(self, property_owner: Any, property_uri: str,
lower_bound: int, upper_bound: int,
validation_rules: Optional[List] = None,
initial_value: Optional[str] = None):
super().__init__(property_owner, property_uri,
lower_bound, upper_bound, validation_rules)
# See https://github.com/SynBioDex/pySBOL3/issues/208
# The empty string is boolean False, so explicitly check for None
# so that we don't discard an empty string as an initial value
if initial_value is not None:
self.set(initial_value)
class TextListProperty(TextPropertyMixin, ListProperty):
def __init__(self, property_owner: Any, property_uri: str,
lower_bound: int, upper_bound: int,
validation_rules: Optional[List] = None,
initial_value: Optional[str] = None):
super().__init__(property_owner, property_uri,
lower_bound, upper_bound, validation_rules)
if initial_value is not None:
self.set(initial_value)
def TextProperty(property_owner: Any, property_uri: str,
lower_bound: int, upper_bound: Union[int, float],
validation_rules: Optional[List] = None,
initial_value: Optional[Union[str, List[str]]] = None) -> Property:
if upper_bound == 1:
return TextSingletonProperty(property_owner, property_uri,
lower_bound, upper_bound,
validation_rules, initial_value)
return TextListProperty(property_owner, property_uri,
lower_bound, upper_bound,
validation_rules, initial_value)
|
459563
|
import pyeapi
from getpass import getpass
connection = pyeapi.client.connect(
transport="https",
host="arista8.lasthop.io",
username="pyclass",
password=getpass(),
port="443",
)
enable = getpass("Enable: ")
device = pyeapi.client.Node(connection, enablepwd=enable)
vlan_cfg = device.api("vlans")
print(vlan_cfg)
"""
>>> vlan_cfg
<pyeapi.api.vlans.Vlans object at 0x7f3f39eaac88>
>>> vlan_cfg.getall()
{'1': {'vlan_id': '1', 'name': 'default', 'state': 'active', 'trunk_groups': []}}
>>> vlan_cfg.get(1)
{'vlan_id': 1, 'name': 'default', 'state': 'active', 'trunk_groups': []}
>>> help(vlan_cfg.create)
>>> vlan_cfg.create(225)
True
>>> vlan_cfg.create(226)
True
>>> vlan_cfg.create(226)
True
>>> vlan_cfg.create(227)
True
>>> vlan_cfg.set_name(225, "blue")
True
>>> vlan_cfg.set_name(226, "red")
True
>>> vlan_cfg.set_name(227, "orange")
True
>>> vlan_cfg.getall()
{'1': {'vlan_id': '1', 'name': 'default', 'state': 'active', 'trunk_groups': []}, '225':
{'vlan_id': '225', 'name': 'blue', 'state': 'active', 'trunk_groups': []}, '226':
{'vlan_id': '226', 'name': 'red', 'state': 'active', 'trunk_groups': []}, '227':
{'vlan_id': '227', 'name': 'orange', 'state': 'active', 'trunk_groups': []}}
"""
|
459566
|
import numpy
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
# Define some test data which is close to Gaussian
data = numpy.random.normal(size=10000)
hist, bin_edges = numpy.histogram(data, density=True)
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
# Define model function to be used to fit to the data above:
def gauss(x, *p):
A, mu, sigma = p
return A*numpy.exp(-(x-mu)**2/(2.*sigma**2))
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
p0 = [1., 0., 1.]
coeff, var_matrix = curve_fit(gauss, bin_centres, hist, p0=p0)
# Get the fitted curve
hist_fit = gauss(bin_centres, *coeff)
plt.plot(bin_centres, hist, label='Test data')
plt.plot(bin_centres, hist_fit, label='Fitted data')
# Finally, lets get the fitting parameters, i.e. the mean and standard deviation:
print 'Fitted mean = ', coeff[1]
print 'Fitted standard deviation = ', coeff[2]
plt.show()
|
459624
|
from SWDErrors import *
import time
import Adafruit_BBIO.GPIO as GPIO
# --- BeagleBoneSWD
# Use of BeableBone GPIO to pilot SWD signal
# (c) <NAME> - www.disk91.com
#
# Usage :
# GPIO48 is connected to SWD_CLK signal
# GPIO60 is connected to SWD_IO signal
#
# Add in the header of the existing files
# from BeagleBoneSWD import *
# Modify existing files to use BeagleBoneSWD :
# busPirate = PirateSWD("/dev/ttyUSB0")
# busPirate = BeagleBoneSWD("")
#
class BeagleBoneNewSWD:
def __init__ (self,notused, vreg):
self.SWDIO = "P9_12"
self.SWDCK = "P9_15"
self.debug = False
self.debugFull = False
# GPIO 60 - P9_12 sur SWD_IO
GPIO.setup(self.SWDIO, GPIO.OUT)
GPIO.output(self.SWDIO, GPIO.LOW)
#GPIO 48 - P9_15 sur SWD_CLK
GPIO.setup(self.SWDCK, GPIO.OUT)
GPIO.output(self.SWDCK, GPIO.HIGH)
self.sendBytes([0xFF] * 8)
self.sendBytes([0x00] * 8)
self.sendBytes([0xFF] * 8)
self.sendBytes([0x79, 0xE7]) # activate SWD interface
self.resyncSWD()
def resetBP (self):
print "DEBUG : resetBP"
def tristatePins (self):
print "DEBUG : tristatePins"
# this is the fastest port-clearing scheme I could devise
def clear (self, more = 0):
print "DEBUG : clear"
def short_sleep(self):
#time.sleep(0.0001)
i=0
def readBits (self, count):
GPIO.setup(self.SWDIO, GPIO.IN)
ret = []
for x in xrange(0, count):
GPIO.output(self.SWDCK,GPIO.HIGH)
self.short_sleep()
GPIO.output(self.SWDCK,GPIO.LOW)
if GPIO.input(self.SWDIO):
ret.append(1)
else:
ret.append(0)
self.short_sleep()
GPIO.setup(self.SWDIO, GPIO.OUT)
GPIO.output(self.SWDIO, GPIO.LOW)
if self.debug:
print "DEBUG - readBits(%d)" % count + "values - %s" %ret
return ret
def sendBits ( self, bits ):
for b in bits:
if b == 0 :
GPIO.output(self.SWDIO, GPIO.LOW)
if self.debugFull:
print "DEBUG - writeBits 0"
else:
GPIO.output(self.SWDIO, GPIO.HIGH)
if self.debugFull:
print "DEBUG - writeBits 1"
GPIO.output(self.SWDCK,GPIO.HIGH)
self.short_sleep()
GPIO.output(self.SWDCK,GPIO.LOW)
self.short_sleep()
def skipBits (self, count):
if self.debug:
print "DEBUG - skipBits(%d)" % count
self.readBits (count)
def readBytes (self, count):
ret = []
for x in xrange(0, count):
v = self.readBits(8)
k = 0
for i in v:
k = 2*k + i
ret.append(k);
if self.debug:
print "DEBUG - readBytes : %s " % ret
return ret
def sendBytes (self, data):
if self.debug:
print "DEBUG - sendBytes %s" % data
for v in data:
db = [int(( v >> y) & 1) for y in range(7,-1, -1)]
self.sendBits(db)
#self.sendBits(db[::-1])
def resyncSWD (self):
self.sendBytes([0xFF] * 8)
self.sendBytes([0x00] * 8)
def readSWD (self, ap, register):
if self.debug:
print "DEBUG - readSWD %s " % [calcOpcode(ap, register, True)]
# transmit the opcode
self.sendBytes([calcOpcode(ap, register, True)])
# check the response
ack = self.readBits(3)
if ack[0:3] != [1,0,0]:
if ack[0:3] == [0,1,0]:
raise SWDWaitError(ack[0:3])
elif ack[0:3] == [0,0,1]:
raise SWDFaultError(ack[0:3])
else:
raise SWDProtocolError(ack[0:3])
# read the next 4 bytes
data = [reverseBits(b) for b in self.readBytes(4)]
data.reverse()
# read the parity bit and turnaround period
extra = self.readBits(3)
# check the parity
if sum([bitCount(x) for x in data[0:4]]) % 2 != extra[0]:
raise SWDParityError()
# idle clocking to allow transactions to complete
self.sendBytes([0x00])
self.sendBytes([0x00])
# return the data
return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]
def writeSWD (self, ap, register, data, ignoreACK = False):
if self.debug:
print "DEBUG - writeSWD %s " % [calcOpcode(ap, register, False)]
# transmit the opcode
self.sendBytes([calcOpcode(ap, register, False)])
# check the response if required
if ignoreACK:
self.skipBits(5)
else:
ack = self.readBits(5)
#print ack
if ack[0:3] != [1,0,0]:
if ack[0:3] == [0,1,0]:
raise SWDWaitError(ack[0:3])
elif ack[0:3] == [0,0,1]:
raise SWDFaultError(ack[0:3])
else:
raise SWDProtocolError(ack[0:3])
# mangle the data endianness
payload = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
payload[0] = reverseBits((data >> 0) & 0xFF)
payload[1] = reverseBits((data >> 8) & 0xFF)
payload[2] = reverseBits((data >> 16) & 0xFF)
payload[3] = reverseBits((data >> 24) & 0xFF)
# add the parity bit
if sum([bitCount(x) for x in payload[0:4]]) % 2:
payload[4] = 0x80
# output the data, idle clocking is on the end of the payload
self.sendBytes(payload)
def bitCount(int_type):
count = 0
while(int_type):
int_type &= int_type - 1
count += 1
return(count)
def reverseBits (x):
a = ((x & 0xAA) >> 1) | ((x & 0x55) << 1)
b = ((a & 0xCC) >> 2) | ((a & 0x33) << 2)
c = ((b & 0xF0) >> 4) | ((b & 0x0F) << 4)
return c
def calcOpcode (ap, register, read):
opcode = 0x00
opcode = opcode | (0x20 if read else 0x00)
opcode = opcode | (0x40 if ap else 0x00)
opcode = opcode | ((register & 0x01) << 4) | ((register & 0x02) << 2)
opcode = opcode | ((bitCount(opcode) & 1) << 2)
opcode = opcode | 0x81
return opcode
|
459628
|
import os
from winthingies.process import Process
from winthingies.win32.kernel32 import kernel32
current_process = kernel32.GetCurrentProcess()
CURRENT_PROCESS = Process(
os.getpid(),
current_process
)
|
459691
|
import flask
from tests.utils import assert_span_http_status_code
from . import BaseFlaskTestCase
class FlaskErrorhandlerTestCase(BaseFlaskTestCase):
def test_default_404_handler(self):
"""
When making a 404 request
And no user defined error handler is defined
We create the expected spans
"""
# Make our 404 request
res = self.client.get("/unknown")
self.assertEqual(res.status_code, 404)
spans = self.get_spans()
req_span = self.find_span_by_name(spans, "flask.request")
dispatch_span = self.find_span_by_name(spans, "flask.dispatch_request")
user_ex_span = self.find_span_by_name(spans, "flask.handle_user_exception")
http_ex_span = self.find_span_by_name(spans, "flask.handle_http_exception")
# flask.request span
self.assertEqual(req_span.error, 0)
assert_span_http_status_code(req_span, 404)
self.assertIsNone(req_span.get_tag("flask.endpoint"))
self.assertIsNone(req_span.get_tag("flask.url_rule"))
# flask.dispatch_request span
self.assertEqual(dispatch_span.error, 0)
self.assertIsNone(dispatch_span.get_tag("error.msg"))
self.assertIsNone(dispatch_span.get_tag("error.stack"))
self.assertIsNone(dispatch_span.get_tag("error.type"))
# flask.handle_user_exception span
self.assertEqual(user_ex_span.meta, dict())
self.assertEqual(user_ex_span.error, 0)
# flask.handle_http_exception span
self.assertEqual(http_ex_span.meta, dict())
self.assertEqual(http_ex_span.error, 0)
def test_abort_500(self):
"""
When making a 500 request
And no user defined error handler is defined
We create the expected spans
"""
@self.app.route("/500")
def endpoint_500():
flask.abort(500)
# Make our 500 request
res = self.client.get("/500")
self.assertEqual(res.status_code, 500)
spans = self.get_spans()
req_span = self.find_span_by_name(spans, "flask.request")
dispatch_span = self.find_span_by_name(spans, "flask.dispatch_request")
endpoint_span = self.find_span_by_name(spans, "tests.contrib.flask.test_errorhandler.endpoint_500")
user_ex_span = self.find_span_by_name(spans, "flask.handle_user_exception")
http_ex_span = self.find_span_by_name(spans, "flask.handle_http_exception")
# flask.request span
self.assertEqual(req_span.error, 1)
assert_span_http_status_code(req_span, 500)
self.assertEqual(req_span.get_tag("flask.endpoint"), "endpoint_500")
self.assertEqual(req_span.get_tag("flask.url_rule"), "/500")
# flask.dispatch_request span
self.assertEqual(dispatch_span.error, 1)
error_msg = dispatch_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("500 Internal Server Error"))
error_stack = dispatch_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = dispatch_span.get_tag("error.type")
self.assertEqual(error_type, "werkzeug.exceptions.InternalServerError")
# tests.contrib.flask.test_errorhandler.endpoint_500 span
self.assertEqual(endpoint_span.error, 1)
error_msg = endpoint_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("500 Internal Server Error"))
error_stack = endpoint_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = endpoint_span.get_tag("error.type")
self.assertEqual(error_type, "werkzeug.exceptions.InternalServerError")
# flask.handle_user_exception span
self.assertEqual(user_ex_span.meta, dict())
self.assertEqual(user_ex_span.error, 0)
# flask.handle_http_exception span
self.assertEqual(http_ex_span.meta, dict())
self.assertEqual(http_ex_span.error, 0)
def test_abort_500_custom_handler(self):
"""
When making a 500 request
And a user defined error handler is defined
We create the expected spans
"""
@self.app.errorhandler(500)
def handle_500(e):
return "whoops", 200
@self.app.route("/500")
def endpoint_500():
flask.abort(500)
# Make our 500 request
res = self.client.get("/500")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b"whoops")
spans = self.get_spans()
req_span = self.find_span_by_name(spans, "flask.request")
dispatch_span = self.find_span_by_name(spans, "flask.dispatch_request")
endpoint_span = self.find_span_by_name(spans, "tests.contrib.flask.test_errorhandler.endpoint_500")
handler_span = self.find_span_by_name(spans, "tests.contrib.flask.test_errorhandler.handle_500")
user_ex_span = self.find_span_by_name(spans, "flask.handle_user_exception")
http_ex_span = self.find_span_by_name(spans, "flask.handle_http_exception")
# flask.request span
self.assertEqual(req_span.error, 0)
assert_span_http_status_code(req_span, 200)
self.assertEqual(req_span.get_tag("flask.endpoint"), "endpoint_500")
self.assertEqual(req_span.get_tag("flask.url_rule"), "/500")
# flask.dispatch_request span
self.assertEqual(dispatch_span.error, 1)
error_msg = dispatch_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("500 Internal Server Error"))
error_stack = dispatch_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = dispatch_span.get_tag("error.type")
self.assertEqual(error_type, "werkzeug.exceptions.InternalServerError")
# tests.contrib.flask.test_errorhandler.endpoint_500 span
self.assertEqual(endpoint_span.error, 1)
error_msg = endpoint_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("500 Internal Server Error"))
error_stack = endpoint_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = endpoint_span.get_tag("error.type")
self.assertEqual(error_type, "werkzeug.exceptions.InternalServerError")
# tests.contrib.flask.test_errorhandler.handle_500 span
self.assertEqual(handler_span.error, 0)
self.assertIsNone(handler_span.get_tag("error.msg"))
self.assertIsNone(handler_span.get_tag("error.stack"))
self.assertIsNone(handler_span.get_tag("error.type"))
# flask.handle_user_exception span
self.assertEqual(user_ex_span.meta, dict())
self.assertEqual(user_ex_span.error, 0)
# flask.handle_http_exception span
self.assertEqual(http_ex_span.meta, dict())
self.assertEqual(http_ex_span.error, 0)
def test_raise_user_exception(self):
"""
When raising a custom user exception
And no user defined error handler is defined
We create the expected spans
"""
class FlaskTestException(Exception):
pass
@self.app.route("/error")
def endpoint_error():
raise FlaskTestException("custom error message")
# Make our 500 request
res = self.client.get("/error")
self.assertEqual(res.status_code, 500)
spans = self.get_spans()
req_span = self.find_span_by_name(spans, "flask.request")
dispatch_span = self.find_span_by_name(spans, "flask.dispatch_request")
endpoint_span = self.find_span_by_name(spans, "tests.contrib.flask.test_errorhandler.endpoint_error")
user_ex_span = self.find_span_by_name(spans, "flask.handle_user_exception")
http_ex_span = self.find_span_by_name(spans, "flask.handle_http_exception", required=False)
# flask.request span
self.assertEqual(req_span.error, 1)
assert_span_http_status_code(req_span, 500)
self.assertEqual(req_span.get_tag("flask.endpoint"), "endpoint_error")
self.assertEqual(req_span.get_tag("flask.url_rule"), "/error")
# flask.dispatch_request span
self.assertEqual(dispatch_span.error, 1)
error_msg = dispatch_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("custom error message"))
error_stack = dispatch_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = dispatch_span.get_tag("error.type")
self.assertEqual(error_type, "tests.contrib.flask.test_errorhandler.FlaskTestException")
# tests.contrib.flask.test_errorhandler.endpoint_500 span
self.assertEqual(endpoint_span.error, 1)
error_msg = endpoint_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("custom error message"))
error_stack = endpoint_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = endpoint_span.get_tag("error.type")
self.assertEqual(error_type, "tests.contrib.flask.test_errorhandler.FlaskTestException")
# flask.handle_user_exception span
self.assertEqual(user_ex_span.error, 1)
error_msg = user_ex_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("custom error message"))
error_stack = user_ex_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = user_ex_span.get_tag("error.type")
self.assertEqual(error_type, "tests.contrib.flask.test_errorhandler.FlaskTestException")
# flask.handle_http_exception span
self.assertIsNone(http_ex_span)
def test_raise_user_exception_handler(self):
"""
When raising a custom user exception
And a user defined error handler is defined
We create the expected spans
"""
class FlaskTestException(Exception):
pass
@self.app.errorhandler(FlaskTestException)
def handle_error(e):
return "whoops", 200
@self.app.route("/error")
def endpoint_error():
raise FlaskTestException("custom error message")
# Make our 500 request
res = self.client.get("/error")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b"whoops")
spans = self.get_spans()
req_span = self.find_span_by_name(spans, "flask.request")
dispatch_span = self.find_span_by_name(spans, "flask.dispatch_request")
endpoint_span = self.find_span_by_name(spans, "tests.contrib.flask.test_errorhandler.endpoint_error")
handler_span = self.find_span_by_name(spans, "tests.contrib.flask.test_errorhandler.handle_error")
user_ex_span = self.find_span_by_name(spans, "flask.handle_user_exception")
http_ex_span = self.find_span_by_name(spans, "flask.handle_http_exception", required=False)
# flask.request span
self.assertEqual(req_span.error, 0)
assert_span_http_status_code(req_span, 200)
self.assertEqual(req_span.get_tag("flask.endpoint"), "endpoint_error")
self.assertEqual(req_span.get_tag("flask.url_rule"), "/error")
# flask.dispatch_request span
self.assertEqual(dispatch_span.error, 1)
error_msg = dispatch_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("custom error message"))
error_stack = dispatch_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = dispatch_span.get_tag("error.type")
self.assertEqual(error_type, "tests.contrib.flask.test_errorhandler.FlaskTestException")
# tests.contrib.flask.test_errorhandler.endpoint_500 span
self.assertEqual(endpoint_span.error, 1)
error_msg = endpoint_span.get_tag("error.msg")
self.assertTrue(error_msg.startswith("custom error message"))
error_stack = endpoint_span.get_tag("error.stack")
self.assertTrue(error_stack.startswith("Traceback (most recent call last):"))
error_type = endpoint_span.get_tag("error.type")
self.assertEqual(error_type, "tests.contrib.flask.test_errorhandler.FlaskTestException")
# tests.contrib.flask.test_errorhandler.handle_error span
self.assertEqual(handler_span.error, 0)
# flask.handle_user_exception span
self.assertEqual(user_ex_span.error, 0)
self.assertEqual(user_ex_span.meta, dict())
# flask.handle_http_exception span
self.assertIsNone(http_ex_span)
|
459714
|
import sys
import argparse
import importlib
import pathlib
import os
from . import tester
from . import j1939
from . import logreader
from .errors import Error
# Remove once less users are using the old package structure.
from . import database as db
from .version import __version__
__author__ = '<NAME>'
class _ErrorSubparser:
def __init__(self, subparser_name, error_message):
self.subparser_name = subparser_name
self.error_message = error_message
def add_subparser(self, subparser_list):
err_parser = \
subparser_list.add_parser(self.subparser_name,
description = self.error_message)
err_parser.add_argument("args", nargs="*")
err_parser.set_defaults(func=self._print_error)
def _print_error(self, args):
raise ImportError(self.error_message)
def _load_subparser(subparser_name, subparsers):
"""Load a subparser for a CLI command in a safe manner.
i.e., if the subparser cannot be loaded due to an import error or
similar, no exception is raised if another command was invoked on
the CLI."""
try:
result = importlib.import_module(f'.subparsers.{subparser_name}',
package='cantools')
result.add_subparser(subparsers)
except ImportError as e:
result = _ErrorSubparser(subparser_name,
f'Command "{subparser_name}" is unavailable: "{e}"')
result.add_subparser(subparsers)
def _main():
parser = argparse.ArgumentParser(
description='Various CAN utilities.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('--version',
action='version',
version=__version__,
help='Print version information and exit.')
# Workaround to make the subparser required in Python 3.
subparsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
subparsers.required = True
# load all subparses which have a source file in the cantools
# module's 'subparsers' sub-directory
subparsers_dir = pathlib.Path(__file__).parent / 'subparsers'
for cur_file_name in os.listdir(subparsers_dir):
if cur_file_name.startswith('__'):
continue
if cur_file_name.endswith('.py'):
subparser_name = cur_file_name[:-3]
_load_subparser(subparser_name, subparsers)
elif (subparsers_dir / cur_file_name / "__init__.py").is_file():
subparser_name = cur_file_name
_load_subparser(subparser_name, subparsers)
args = parser.parse_args()
if args.debug:
args.func(args)
else:
try:
args.func(args)
except BaseException as e:
sys.exit('error: ' + str(e))
|
459750
|
import sys
import re
import yaml
from collections import defaultdict
from os import path
from glob import glob
# Generate markdown link definitions for all documents in each of the
# collections defined in Jekyll configuration.
CONFIG_FILE = '_config.yml'
DOCUMENT_EXTENSION = 'md'
INCLUDE_DIRECTORY = '_includes'
def load_config(fn):
with open(fn) as f:
return yaml.load(f)
def collections():
'''Return list of collections defined in CONFIG_FILE.'''
conf = load_config(CONFIG_FILE)
if 'collections' in conf:
return conf['collections']
else:
return []
def collection_path(collection):
'''Return path for given collection.'''
return '_' + collection
def documents(collection):
'''Return set of documents in given collection.'''
cpath = collection_path(collection)
qpath = path.join(cpath, '*.' + DOCUMENT_EXTENSION)
docs = glob(qpath)
return set([path.basename(d) for d in docs])
def slugify(name):
# modified from http://stackoverflow.com/q/5574042
#import unicodedata
slug = name
#slug = unicodedata.normalize('NFKD', slug)
slug = slug.encode('ascii', 'ignore')
# Note: allow slashes in slugs
slug = re.sub(r'[^a-zA-Z0-9/]+', '-', slug).strip('-')
slug = re.sub(r'[-]+', '-', slug)
# Note: Markdown link definitions are case-insensitive, so lower()
# is required to get the correct matches (see
# http://daringfireball.net/projects/markdown/syntax#link)
slug = slug.lower()
return slug
def linkdef_names(collection, document):
'''Return list of link definition names for collection, document
pair.'''
root = path.splitext(document)[0]
return [slugify(root), slugify(path.join(collection, root))]
def select_linkdefs(collection, linked_docs):
'''Return list of link definitions for given collection.
linked_docs is a map from candidate link definition names to
(collection, document pairs.'''
# helper for cmp_linked
def alpha_prefix(s):
'''Return non-empty initial alphabetic prefix of given string,
if any, and full string otherwise.'''
m = re.match(r'^([a-z]+)', s)
if m:
return m.group(1)
else:
return s
# sort function for candidates
def cmp_linked(a, b):
# prioritize exact collection matches, next matches by initial
# alphabetic prefix on collection, and finally just sort to
# default order.
if a[0] == collection:
return -1
if b[0] == collection:
return 1
if alpha_prefix(a[0]) == alpha_prefix(collection):
return -1
if alpha_prefix(b[0]) == alpha_prefix(collection):
return 1
return cmp(a, b)
selected = {}
for l in sorted(linked_docs):
candidates = linked_docs[l][:]
candidates.sort(cmp_linked)
selected[l] = candidates[0]
return selected
def generate_links(collection, links):
generated = []
for name, target in links.items():
dest_coll, dest_doc = target
s = '[%s]: ' % name
dest_doc = re.sub(r'\.md$', '.html', dest_doc)
if collection == dest_doc:
# local link
s += dest_doc
elif collection == '':
# non-local to subdir
s += path.join(dest_coll, dest_doc)
else:
# non-local to sister directory
s+= path.join('..', dest_coll, dest_doc)
generated.append(s)
return generated
def output_links(collection, links):
outfn = path.join(INCLUDE_DIRECTORY, collection + '-links.md')
def link_sort(a, b):
# sort by number of slashes first, text next
return cmp((a.count('/'), a), (b.count('/'), b))
with open(outfn, 'wt') as outf:
for l in sorted(links, link_sort):
print >> outf, l
def process_collections():
collection_documents = {}
for c in collections():
collection_documents[c] = documents(c)
# map potential link definition names to collection, document
# pairs that they may apply to.
linked_docs = defaultdict(list)
for c in collection_documents:
for d in collection_documents[c]:
for l in linkdef_names(c, d):
linked_docs[l].append((c, d))
linked_docs = dict(linked_docs)
# select link definitions for each collection.
selected_links = {}
for c in collection_documents:
selected_links[c] = select_linkdefs(c, linked_docs)
# generate link definition strings for each collection
generated_links = {}
for c in collection_documents:
generated_links[c] = generate_links(c, selected_links[c])
# finally, write into include directory
for c in collection_documents:
output_links(c, generated_links[c])
def main(argv=None):
if argv is None:
argv = sys.argv
process_collections()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
459828
|
import argparse
import os
import sys
import shutil
import subprocess
import shlex
import signal
import athanor
from athanor.utils import partial_match
class AthanorLauncher:
name = "Athanor"
root = os.path.abspath(os.path.dirname(athanor.__file__))
startup = os.path.join(
os.path.abspath(os.path.dirname(athanor.__file__)), "startup.py"
)
game_template = os.path.abspath(
os.path.join(
os.path.abspath(os.path.dirname(athanor.__file__)), "game_template"
)
)
def __init__(self):
self.parser = self.create_parser()
self.applications = []
self.choices = ["start", "stop", "noop"]
self.operations = {
"_noop": self.operation_noop,
"start": self.operation_start,
"stop": self.operation_stop,
"_passthru": self.operation_passthru,
}
self.profile_path = None
def create_parser(self):
"""
Creates an ArgumentParser for this launcher.
"""
parser = argparse.ArgumentParser(
description="BOO", formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"--init", nargs=1, action="store", dest="init", metavar="<folder>"
)
parser.add_argument(
"--app", nargs=1, action="store", dest="app", metavar="<folder>"
)
parser.add_argument(
"operation",
nargs="?",
action="store",
metavar="<operation>",
default="_noop",
)
return parser
def ensure_running(self, app):
"""
Checks whether a named app is running.
Args:
app (str): The name of the appplication being checked.
Raises:
ValueError (str): If the app is not running.
"""
pidfile = os.path.join(os.getcwd(), f"{app}.pid")
if not os.path.exists(pidfile):
raise ValueError(f"Process {app} is not running!")
with open(pidfile, "r") as p:
if not (pid := int(p.read())):
raise ValueError(f"Process pid for {app} corrupted.")
try:
# This doesn't actually do anything except verify that the process exists.
os.kill(pid, 0)
except OSError:
print(f"Process ID for {app} seems stale. Removing stale pidfile.")
os.remove(pidfile)
return False
return True
def ensure_stopped(self, app):
"""
Checks whether a named app is not running.
Args:
app (str): The name of the appplication being checked.
Raises:
ValueError (str): If the app is running.
"""
pidfile = os.path.join(os.getcwd(), f"{app}.pid")
if not os.path.exists(pidfile):
return True
with open(pidfile, "r") as p:
if not (pid := int(p.read())):
raise ValueError(f"Process pid for {app} corrupted.")
try:
os.kill(pid, 0)
except OSError:
return True
return False
def set_profile_path(self, args):
cur_dir = os.getcwd()
if not os.path.exists(os.path.join(cur_dir, "appdata")):
raise ValueError(f"Current directory is not a valid {self.name} profile!")
self.profile_path = cur_dir
def operation_start(self, op, args, unknown):
for app in self.applications:
if not self.ensure_stopped(app):
raise ValueError(f"Process {app} is already running!")
for app in self.applications:
env = os.environ.copy()
env["ATHANOR_PROFILE"] = self.profile_path
env["ATHANOR_APPNAME"] = app
cmd = f"{sys.executable} {self.startup}"
subprocess.Popen(shlex.split(cmd), env=env)
def operation_noop(self, op, args, unknown):
pass
def operation_stop(self, op, args, unknown):
for app in self.applications:
if not self.ensure_running(app):
raise ValueError(f"Process {app} is not running.")
for app in self.applications:
pidfile = os.path.join(os.getcwd(), f"{app}.pid")
with open(pidfile, "r") as p:
if not (pid := int(p.read())):
raise ValueError(f"Process pid for {app} corrupted.")
os.kill(pid, signal.SIGTERM)
os.remove(pidfile)
print(f"Stopped process {pid} - {app}")
def operation_passthru(self, op, args, unknown):
"""
God only knows what people typed here. Let their program figure it out! Overload this to
process the operation.
"""
raise Exception(f"Unsupported command {op}")
def option_init(self, name, un_args):
prof_path = os.path.join(os.getcwd(), name)
if not os.path.exists(prof_path):
shutil.copytree(self.game_template, prof_path)
os.rename(
os.path.join(prof_path, "gitignore"),
os.path.join(prof_path, ".gitignore"),
)
print(f"Game Profile created at {prof_path}")
else:
print(f"Game Profile at {prof_path} already exists!")
def run(self):
args, unknown_args = self.parser.parse_known_args()
option = args.operation.lower()
operation = option
if option not in self.choices:
option = "_passthru"
try:
if args.init:
self.option_init(args.init[0], unknown_args)
option = "_noop"
operation = "_noop"
if option in ["start", "stop", "_passthru"]:
# first, ensure we are running this program from the proper directory.
self.set_profile_path(args)
os.chdir(self.profile_path)
# next, insert the new cwd into path.
import sys
sys.path.insert(0, os.getcwd())
# now we should be able to import appdata!
from appdata.config import Launcher
l_config = Launcher()
l_config.setup()
# choose either all apps or a specific app to focus on.
if args.app:
if not (found := partial_match(args.app[0], l_config.applications)):
raise ValueError(
f"No registered Athanor application: {args.app[0]}"
)
self.applications = [found]
else:
self.applications = l_config.applications
# Find and execute the operation.
if not (op_func := self.operations.get(option, None)):
raise ValueError(f"No operation: {option}")
op_func(operation, args, unknown_args)
except Exception as e:
import sys
import traceback
traceback.print_exc(file=sys.stdout)
print(f"Something done goofed: {e}")
|
459830
|
from functools import partial
from .util import ViewTestCase
from Tutkain.src import test
class TestTest(ViewTestCase):
def assertAlwaysYields(self, content, expected, producer):
self.set_view_content(content)
for n in range(len(content)):
self.assertEquals(expected, producer(n), n)
def test_current(self):
current = partial(test.current, self.view)
self.assertAlwaysYields("(deftest foo)", "foo", current)
self.assertAlwaysYields("(deftest foo ())", "foo", current)
self.assertAlwaysYields("(deftest ^:foo bar)", "bar", current)
self.set_view_content("(doseq (deftest ^:foo bar))")
self.assertEquals(None, test.current(self.view, 6))
self.assertEquals("bar", test.current(self.view, 7))
self.assertEquals("bar", test.current(self.view, 26))
self.assertEquals(None, test.current(self.view, 27))
|
459838
|
from textwrap import dedent
from pyexcel.internal.common import get_sheet_headers
from pyexcel.internal.generators import SheetStream
from pyexcel.plugins.sources.output_to_memory import WriteSheetToMemory
from nose.tools import eq_
from pyexcel_io import manager as manager
def test_save_to():
file_type = "csv"
io = manager.get_io(file_type)
g = (i for i in [[1, 2], [3, 4]])
ss = WriteSheetToMemory(
file_type=file_type, file_stream=io, lineterminator="\n"
)
sheet_stream = SheetStream("test", g)
ss.write_data(sheet_stream)
content = io.getvalue()
expected = dedent(
"""\
1,2
3,4
"""
)
assert content == expected
def test_get_sheet_headers():
data = iter([["a", "b", "c"], [1, 2, 3]])
sheet_stream = SheetStream("test", data)
colnames_array = get_sheet_headers(sheet_stream)
eq_(colnames_array, ["a", "b", "c"])
|
459843
|
import matplotlib as mpl
mpl.use('Agg') # noqa
import argparse
import os
import json
import pickle
import yaml
import numpy
import hashlib
from jinja2 import Environment, FileSystemLoader
from ann_benchmarks import results
from ann_benchmarks.datasets import get_dataset
from ann_benchmarks.plotting.plot_variants import (all_plot_variants
as plot_variants)
from ann_benchmarks.plotting.metrics import all_metrics as metrics
from ann_benchmarks.plotting.utils import (get_plot_label, compute_metrics,
compute_all_metrics,
create_pointset,
create_linestyles)
import plot
colors = [
"rgba(166,206,227,1)",
"rgba(31,120,180,1)",
"rgba(178,223,138,1)",
"rgba(51,160,44,1)",
"rgba(251,154,153,1)",
"rgba(227,26,28,1)",
"rgba(253,191,111,1)",
"rgba(255,127,0,1)",
"rgba(202,178,214,1)"
]
point_styles = {
"o": "circle",
"<": "triangle",
"*": "star",
"x": "cross",
"+": "rect",
}
def convert_color(color):
r, g, b, a = color
return "rgba(%(r)d, %(g)d, %(b)d, %(a)d)" % {
"r": r * 255, "g": g * 255, "b": b * 255, "a": a}
def convert_linestyle(ls):
new_ls = {}
for algo in ls.keys():
algostyle = ls[algo]
new_ls[algo] = (convert_color(algostyle[0]),
convert_color(algostyle[1]),
algostyle[2], point_styles[algostyle[3]])
return new_ls
def get_run_desc(properties):
return "%(dataset)s_%(count)d_%(distance)s" % properties
def get_dataset_from_desc(desc):
return desc.split("_")[0]
def get_count_from_desc(desc):
return desc.split("_")[1]
def get_distance_from_desc(desc):
return desc.split("_")[2]
def get_dataset_label(desc):
return "{} (k = {})".format(get_dataset_from_desc(desc),
get_count_from_desc(desc))
def directory_path(s):
if not os.path.isdir(s):
raise argparse.ArgumentTypeError("'%s' is not a directory" % s)
return s + "/"
def prepare_data(data, xn, yn):
"""Change format from (algo, instance, dict) to (algo, instance, x, y)."""
res = []
for algo, algo_name, result in data:
res.append((algo, algo_name, result[xn], result[yn]))
return res
parser = argparse.ArgumentParser()
parser.add_argument(
'--plottype',
help='Generate only the plots specified',
nargs='*',
choices=plot_variants.keys(),
default=plot_variants.keys())
parser.add_argument(
'--outputdir',
help='Select output directory',
default='.',
type=directory_path,
action='store')
parser.add_argument(
'--latex',
help='generates latex code for each plot',
action='store_true')
parser.add_argument(
'--scatter',
help='create scatterplot for data',
action='store_true')
parser.add_argument(
'--recompute',
help='Clears the cache and recomputes the metrics',
action='store_true')
args = parser.parse_args()
def get_lines(all_data, xn, yn, render_all_points):
""" For each algorithm run on a dataset, obtain its performance
curve coords."""
plot_data = []
for algo in sorted(all_data.keys(), key=lambda x: x.lower()):
xs, ys, ls, axs, ays, als = \
create_pointset(prepare_data(all_data[algo], xn, yn), xn, yn)
if render_all_points:
xs, ys, ls = axs, ays, als
plot_data.append({"name": algo, "coords": zip(xs, ys), "labels": ls,
"scatter": render_all_points})
return plot_data
def create_plot(all_data, xn, yn, linestyle, j2_env, additional_label="",
plottype="line"):
xm, ym = (metrics[xn], metrics[yn])
render_all_points = plottype == "bubble"
plot_data = get_lines(all_data, xn, yn, render_all_points)
latex_code = j2_env.get_template("latex.template").\
render(plot_data=plot_data, caption=get_plot_label(xm, ym),
xlabel=xm["description"], ylabel=ym["description"])
plot_data = get_lines(all_data, xn, yn, render_all_points)
button_label = hashlib.sha224((get_plot_label(xm, ym) + additional_label)
.encode("utf-8")).hexdigest()
return j2_env.get_template("chartjs.template").\
render(args=args, latex_code=latex_code, button_label=button_label,
data_points=plot_data,
xlabel=xm["description"], ylabel=ym["description"],
plottype=plottype, plot_label=get_plot_label(xm, ym),
label=additional_label, linestyle=linestyle,
render_all_points=render_all_points)
def build_detail_site(data, label_func, j2_env, linestyles, batch=False):
for (name, runs) in data.items():
print("Building '%s'" % name)
all_runs = runs.keys()
label = label_func(name)
data = {"normal": [], "scatter": []}
for plottype in args.plottype:
xn, yn = plot_variants[plottype]
data["normal"].append(create_plot(
runs, xn, yn, convert_linestyle(linestyles), j2_env))
if args.scatter:
data["scatter"].append(
create_plot(runs, xn, yn, convert_linestyle(linestyles),
j2_env, "Scatterplot ", "bubble"))
# create png plot for summary page
data_for_plot = {}
for k in runs.keys():
data_for_plot[k] = prepare_data(runs[k], 'k-nn', 'qps')
plot.create_plot(
data_for_plot, False,
'linear', 'log', 'k-nn', 'qps',
args.outputdir + name + '.png',
linestyles, batch)
output_path = \
args.outputdir + name + '.html'
with open(output_path, "w") as text_file:
text_file.write(j2_env.get_template("detail_page.html").
render(title=label, plot_data=data,
args=args, batch=batch))
def build_index_site(datasets, algorithms, j2_env, file_name):
dataset_data = {'batch': [], 'non-batch': []}
for mode in ['batch', 'non-batch']:
distance_measures = sorted(
set([get_distance_from_desc(e) for e in datasets[mode].keys()]))
sorted_datasets = sorted(
set([get_dataset_from_desc(e) for e in datasets[mode].keys()]))
for dm in distance_measures:
d = {"name": dm.capitalize(), "entries": []}
for ds in sorted_datasets:
matching_datasets = [e for e in datasets[mode].keys()
if get_dataset_from_desc(e) == ds and # noqa
get_distance_from_desc(e) == dm]
sorted_matches = sorted(
matching_datasets,
key=lambda e: int(get_count_from_desc(e)))
for idd in sorted_matches:
d["entries"].append(
{"name": idd, "desc": get_dataset_label(idd)})
dataset_data[mode].append(d)
with open(args.outputdir + "index.html", "w") as text_file:
text_file.write(j2_env.get_template("summary.html").
render(title="ANN-Benchmarks",
dataset_with_distances=dataset_data,
algorithms=algorithms))
def load_all_results():
"""Read all result files and compute all metrics"""
all_runs_by_dataset = {'batch': {}, 'non-batch': {}}
all_runs_by_algorithm = {'batch': {}, 'non-batch': {}}
cached_true_dist = []
old_sdn = None
for mode in ["non-batch", "batch"]:
for properties, f in results.load_all_results(batch_mode=(mode == "batch")):
sdn = get_run_desc(properties)
if sdn != old_sdn:
dataset, _ = get_dataset(properties["dataset"])
cached_true_dist = list(dataset["distances"])
old_sdn = sdn
algo_ds = get_dataset_label(sdn)
desc_suffix = ("-batch" if mode == "batch" else "")
algo = properties["algo"] + desc_suffix
sdn += desc_suffix
ms = compute_all_metrics(
cached_true_dist, f, properties, args.recompute)
all_runs_by_algorithm[mode].setdefault(
algo, {}).setdefault(algo_ds, []).append(ms)
all_runs_by_dataset[mode].setdefault(
sdn, {}).setdefault(algo, []).append(ms)
return (all_runs_by_dataset, all_runs_by_algorithm)
j2_env = Environment(loader=FileSystemLoader("./templates/"), trim_blocks=True)
j2_env.globals.update(zip=zip, len=len)
runs_by_ds, runs_by_algo = load_all_results()
dataset_names = [get_dataset_label(x) for x in list(
runs_by_ds['batch'].keys()) + list(runs_by_ds['non-batch'].keys())]
algorithm_names = list(runs_by_algo['batch'].keys(
)) + list(runs_by_algo['non-batch'].keys())
linestyles = {**create_linestyles(dataset_names),
**create_linestyles(algorithm_names)}
build_detail_site(
runs_by_ds['non-batch'],
lambda label: get_dataset_label(label), j2_env, linestyles, False)
build_detail_site(
runs_by_ds['batch'],
lambda label: get_dataset_label(label), j2_env, linestyles, True)
build_detail_site(
runs_by_algo['non-batch'],
lambda x: x, j2_env, linestyles, False)
build_detail_site(
runs_by_algo['batch'], lambda x: x, j2_env, linestyles, True)
build_index_site(runs_by_ds, runs_by_algo, j2_env, "index.html")
|
459865
|
class Solution:
def getHappyString(self, n: int, k: int) -> str:
base = 1 << (n - 1)
if k > 3 * base:
return ''
table = {'a': 'bc', 'b': 'ac', 'c': 'ab'}
k -= 1
index, k = divmod(k, base)
result = chr(ord('a') + index)
base >>= 1
while base > 0:
index, k = divmod(k, base)
result += table[result[-1]][index]
base >>= 1
return result
|
459867
|
import torch
from sklearn.metrics import normalized_mutual_info_score
eps = 1e-10
class MixedMultiLabelLoss(torch.nn.modules.loss._Loss):
def __init__(self, exclusive_classes_mask: torch.tensor, excl_loss=torch.nn.CrossEntropyLoss(),
non_excl_loss=torch.nn.BCEWithLogitsLoss()):
super(MixedMultiLabelLoss, self).__init__()
assert exclusive_classes_mask.dtype == torch.bool, "Only boolean mask are allowed"
self.exclusive_classes = exclusive_classes_mask
self.excl_loss = excl_loss
self.non_excl_loss = non_excl_loss
def __call__(self, output, target, *args, **kwargs) -> torch.tensor:
assert output.shape[1] == self.exclusive_classes.squeeze().shape[0], \
f"boolean mask shape {self.exclusive_classes.squeeze().shape}, " \
f"different from output number of classes {output.shape[1]}"
excl_output = output[:, self.exclusive_classes]
excl_target = target[:, self.exclusive_classes]
excl_target = excl_target.argmax(dim=1)
non_excl_output = output[:, ~self.exclusive_classes]
non_excl_target = target[:, ~self.exclusive_classes]
excl_loss = self.excl_loss(excl_output, excl_target)
non_excl_loss = self.non_excl_loss(non_excl_output, non_excl_target)
return excl_loss + non_excl_loss
class MutualInformationLoss(torch.nn.modules.loss._Loss):
def __init__(self, penalize_inactive=False, inactive_weight=1e-2):
self.penalize_inactive = penalize_inactive
self.inactive_weight = inactive_weight
super(MutualInformationLoss, self).__init__()
def __call__(self, output, *args, **kwargs) -> torch.tensor:
output_probability = torch.nn.Sigmoid()(output)
mi_loss = 1 - mutual_information(output_probability, normalized=True)
if self.penalize_inactive:
mean_inactivation = (1 / torch.sum(output_probability, dim=0)) * output_probability.shape[0]
inactivation_loss = torch.sum(mean_inactivation) * self.inactive_weight
return mi_loss + inactivation_loss
return mi_loss
def _conditional_probabilities(x):
# Normalized probability over all the outputs on each sample that each outputs holds true
z = 0.99
d = torch.as_tensor(x.shape[1])
beta = torch.log(z * (d - 1) / (1 - z))
sum_outputs_over_sample = torch.reshape(torch.sum(torch.exp(beta * x), 1), (x.shape[0], 1))
cond_probabilities = torch.div(torch.exp(beta * x), sum_outputs_over_sample + eps) + eps
return torch.squeeze(cond_probabilities)
def _conditional_entropy(output, sample_probability):
# Compute the conditional entropy by summing over all the outputs and over all samples the product of...
cond_probabilities = _conditional_probabilities(output)
log_cond_prob = torch.log(cond_probabilities)
c_entropy_on_sample = torch.sum(torch.multiply(cond_probabilities, log_cond_prob), 1)
cond_entropy = - torch.sum(c_entropy_on_sample * torch.squeeze(sample_probability), 0)
cond_entropy = torch.squeeze(cond_entropy)
return cond_entropy
def _entropy(output, sample_probability):
# Compute the marginal_probabilities of each output by summing on all the samples the cond_probabilities
cond_probabilities = _conditional_probabilities(output) * sample_probability
marginal_probabilities = torch.sum(cond_probabilities, 0)
marginal_probabilities = torch.reshape(marginal_probabilities, (1, output.shape[1]))
# Compute the entropy by summing on all the outputs the product among the marginal_probabilities and their log
entropy = - torch.sum(torch.multiply(marginal_probabilities, torch.log(marginal_probabilities)), 1)
return torch.squeeze(entropy)
def mutual_information(output: torch.Tensor, sample_probability=None, normalized=False) -> torch.tensor:
# Sample probability: if not given may be supposed to be = 1/n_sample.
# Anyway need to be normalized to sum(p(xi))= 1
if sample_probability is None:
n_samples = torch.as_tensor(output.shape[0])
sample_probability = 1 / n_samples
else:
# assert sample_probability.shape.ndims == 1, "Wrong sample_probability. Should be an array (n_sample, 1), " \
# "received an array with shape " + sample_probability.shape
sample_probability = sample_probability / (torch.sum(sample_probability) + eps) + eps
sample_probability = torch.reshape(sample_probability, shape=(sample_probability.shape[0], 1))
entropy_t = _entropy(output, sample_probability)
cond_entropy_t = _conditional_entropy(output, sample_probability)
mutual_info_t = entropy_t - cond_entropy_t
if normalized:
normalized_mutual_info_t = mutual_info_t / entropy_t
scikit_nmi = normalized_mutual_info_score(output, )
return normalized_mutual_info_t
return mutual_info_t
|
459910
|
import sys
import argparse
import logging
import numpy as np
import statistics
import tensorflow as tf
from scipy.spatial.distance import cosine
from linguistic_style_transfer_model.config import global_config
from linguistic_style_transfer_model.utils import log_initializer, lexicon_helper
logger = logging.getLogger(global_config.logger_name)
def load_glove_model(glove_file):
logger.debug("Loading Glove Model")
model = dict()
with open(glove_file) as f:
for line in f:
split_line = line.split()
word = split_line[0]
embedding = np.array([float(val) for val in split_line[1:]])
model[word] = embedding
logger.debug("Done. {} words loaded!".format(len(model)))
return model
def get_sentence_embedding(tokens, model):
embeddings = np.asarray([model[token] for token in tokens if token in model])
min_embedding = np.min(embeddings, axis=0)
max_embedding = np.max(embeddings, axis=0)
mean_embedding = np.mean(embeddings, axis=0)
sentence_embedding = np.concatenate([min_embedding, max_embedding, mean_embedding], axis=0)
return sentence_embedding
def get_content_preservation_score(actual_word_lists, generated_word_lists, embedding_model):
sentiment_words = lexicon_helper.get_sentiment_words()
cosine_distances = list()
skip_count = 0
for word_list_1, word_list_2 in zip(actual_word_lists, generated_word_lists):
cosine_similarity = 0
words_1 = set(word_list_1)
words_2 = set(word_list_2)
words_1 -= sentiment_words
words_2 -= sentiment_words
try:
cosine_similarity = 1 - cosine(
get_sentence_embedding(words_1, embedding_model),
get_sentence_embedding(words_2, embedding_model))
cosine_distances.append(cosine_similarity)
except ValueError:
skip_count += 1
logger.debug("Skipped lines: {} :-: {}".format(word_list_1, word_list_2))
logger.debug("{} lines skipped due to errors".format(skip_count))
mean_cosine_distance = statistics.mean(cosine_distances) if cosine_distances else 0
del sentiment_words
return mean_cosine_distance
def get_word_overlap_score(actual_word_lists, generated_word_lists):
english_stopwords = lexicon_helper.get_stopwords()
sentiment_words = lexicon_helper.get_sentiment_words()
scores = list()
for word_list_1, word_list_2 in zip(actual_word_lists, generated_word_lists):
score = 0
words_1 = set(word_list_1)
words_2 = set(word_list_2)
words_1 -= sentiment_words
words_1 -= english_stopwords
words_2 -= sentiment_words
words_2 -= english_stopwords
word_intersection = words_1 & words_2
word_union = words_1 | words_2
if word_union:
score = len(word_intersection) / len(word_union)
scores.append(score)
word_overlap_score = statistics.mean(scores) if scores else 0
del english_stopwords
del sentiment_words
return word_overlap_score
def run_content_preservation_evaluator(source_file_path, target_file_path, embeddings_file):
glove_model = load_glove_model(embeddings_file)
actual_word_lists, generated_word_lists = list(), list()
with open(source_file_path) as source_file, open(target_file_path) as target_file:
for line_1, line_2 in zip(source_file, target_file):
actual_word_lists.append(tf.keras.preprocessing.text.text_to_word_sequence(line_1))
generated_word_lists.append(tf.keras.preprocessing.text.text_to_word_sequence(line_2))
content_preservation_score = get_content_preservation_score(
actual_word_lists, generated_word_lists, glove_model)
word_overlap_score = get_word_overlap_score(
actual_word_lists, generated_word_lists)
return [content_preservation_score, word_overlap_score]
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--embeddings-file-path", type=str, required=True)
parser.add_argument("--source-file-path", type=str, required=True)
parser.add_argument("--target-file-path", type=str, required=True)
global logger
logger = log_initializer.setup_custom_logger(global_config.logger_name, "DEBUG")
options = vars(parser.parse_args(args=argv))
[content_preservation_score, word_overlap_score] = run_content_preservation_evaluator(
options["source_file_path"], options["target_file_path"], options["embeddings_file_path"])
logger.info("Aggregate content preservation: {}".format(content_preservation_score))
logger.info("Aggregate word overlap: {}".format(word_overlap_score))
if __name__ == "__main__":
main(sys.argv[1:])
|
459925
|
import time
from django.conf import settings
from jwcrypto import jwk, jwt
SIGNING_ALG = "RS256"
EXPIRY_TIME = 15 * 60 # 15 minutes
ID_PRIVATE_KEY = jwk.JWK.from_pem(settings.IDENTITY_RSA_PRIVATE_KEY.encode("utf-8"))
def mint_access_jwt(key: jwk.JWK, urn: str) -> jwt.JWT:
"""
Mint a JWT with the following claims:
- use -> access - this says that this JWT is strictly an access JWT
- iat -> now - this says that this JWT isn't active until the current time.
this protects us from attacks from clock skew
- exp -> expiry_time - this makes sure our JWT is only valid for EXPIRY_TIME
"""
now = time.time()
expiry_time = now + EXPIRY_TIME
token = jwt.JWT(
header={"alg": SIGNING_ALG},
claims={"sub": urn, "use": "access", "iat": now, "exp": expiry_time},
)
token.make_signed_token(key)
return token
def mint_refresh_jwt(key: jwk.JWK, urn: str) -> jwt.JWT:
"""
Mint a JWT with the following claims:
- use -> refresh - this says that this JWT is strictly a refresh JWT
- iat -> now - this says that this JWT isn't active until the current time.
this protects us from attacks from clock skew
- no exp claim because refresh JWTs do not expire
"""
now = time.time()
token = jwt.JWT(
header={"alg": SIGNING_ALG}, claims={"sub": urn, "use": "refresh", "iat": now}
)
token.make_signed_token(key)
return token
|
459955
|
import openmc
import openmc.examples
import pytest
@pytest.fixture(scope='module')
def myplot():
plot = openmc.Plot(name='myplot')
plot.width = (100., 100.)
plot.origin = (2., 3., -10.)
plot.pixels = (500, 500)
plot.filename = 'myplot'
plot.type = 'slice'
plot.basis = 'yz'
plot.background = 'black'
plot.background = (0, 0, 0)
plot.color_by = 'material'
m1, m2 = openmc.Material(), openmc.Material()
plot.colors = {m1: (0, 255, 0), m2: (0, 0, 255)}
plot.colors = {m1: 'green', m2: 'blue'}
plot.mask_components = [openmc.Material()]
plot.mask_background = 'white'
plot.mask_background = (255, 255, 255)
plot.overlap_color = (255, 211, 0)
plot.overlap_color = 'yellow'
plot.show_overlaps = True
plot.level = 1
plot.meshlines = {
'type': 'tally',
'id': 1,
'linewidth': 2,
'color': (40, 30, 20)
}
return plot
def test_attributes(myplot):
assert myplot.name == 'myplot'
def test_repr(myplot):
r = repr(myplot)
assert isinstance(r, str)
def test_from_geometry():
width = 25.
s = openmc.Sphere(r=width/2, boundary_type='vacuum')
c = openmc.Cell(region=-s)
univ = openmc.Universe(cells=[c])
geom = openmc.Geometry(univ)
for basis in ('xy', 'yz', 'xz'):
plot = openmc.Plot.from_geometry(geom, basis)
assert plot.origin == pytest.approx((0., 0., 0.))
assert plot.width == pytest.approx((width, width))
assert plot.basis == basis
def test_highlight_domains():
plot = openmc.Plot()
plot.color_by = 'material'
plots = openmc.Plots([plot])
model = openmc.examples.pwr_pin_cell()
mats = {m for m in model.materials if 'UO2' in m.name}
plots.highlight_domains(model.geometry, mats)
def test_xml_element(myplot):
elem = myplot.to_xml_element()
assert 'id' in elem.attrib
assert 'color_by' in elem.attrib
assert 'type' in elem.attrib
assert elem.find('origin') is not None
assert elem.find('width') is not None
assert elem.find('pixels') is not None
assert elem.find('background').text == '0 0 0'
newplot = openmc.Plot.from_xml_element(elem)
attributes = ('id', 'color_by', 'filename', 'type', 'basis', 'level',
'meshlines', 'show_overlaps', 'origin', 'width', 'pixels',
'background', 'mask_background')
for attr in attributes:
assert getattr(newplot, attr) == getattr(myplot, attr), attr
def test_plots(run_in_tmpdir):
p1 = openmc.Plot(name='plot1')
p1.origin = (5., 5., 5.)
p2 = openmc.Plot(name='plot2')
p2.origin = (-3., -3., -3.)
plots = openmc.Plots([p1, p2])
assert len(plots) == 2
p3 = openmc.Plot(name='plot3')
plots.append(p3)
assert len(plots) == 3
plots.export_to_xml()
# from_xml
new_plots = openmc.Plots.from_xml()
assert len(plots)
assert plots[0].origin == p1.origin
assert plots[1].origin == p2.origin
|
459976
|
from PATHS import *
import pandas as pd
import json
def get_imdb_ids():
return json.load(open(imdb_ids_path))
def get_ratings():
df = pd.read_csv('ratings.tsv', sep='\t', header=0)
return df
def map_ratings():
ids = get_imdb_ids()
df = get_ratings()
df_ids = df['tconst'].tolist()
cntr = 0
for imdb_id in ids:
if imdb_id in df_ids:
df_val = df[df['tconst'] == imdb_id]
with open("./bollywood_ratings.csv", 'a') as f:
print("FETCHING: " + str(imdb_id) + " DONE: " + str(cntr))
f.write(str(imdb_id) + "," + str(df_val.values[0][1]) + "," + str(df_val.values[0][2]) + "\n")
f.close()
# else:
# print(imdb_id)
cntr = cntr + 1
if __name__ == '__main__':
map_ratings()
|
460049
|
from codecs import open
from tempfile import NamedTemporaryFile
from multiprocessing import Manager
from collections import OrderedDict
from genmod.utils import VariantPrinter
from genmod.vcf_tools import (get_variant_dict, get_info_dict,
get_variant_id, HeaderParser)
def setup_vcf_file():
"""
Print some variants to a vcf file and return the filename
"""
vcf_lines = [
'##fileformat=VCFv4.1\n',
'##INFO=<ID=MQ,Number=1,Type=Float,Description="RMS Mapping Quality">\n',
'##contig=<ID=1,length=249250621,assembly=b37>\n',
'##reference=file:///humgen/gsa-hpprojects/GATK/bundle'\
'/current/b37/human_g1k_v37.fasta\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tfather\tmother\tproband\n',
'1\t11900\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/1:60\t0/1:60\t1/1:60\n',
'1\t879585\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/1:60\t0/0:60\t0/1:60\n',
'1\t879586\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/1:60\t0/1:60\n',
'1\t947378\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n',
'1\t973348\t.\tG\tA\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n',
'3\t879585\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/1:60\t0/0:60\t0/1:60\n',
'3\t879586\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/1:60\t0/1:60\n',
'3\t947378\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n',
'3\t973348\t.\tG\tA\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n'
]
vcf_file = NamedTemporaryFile(mode='w+t', delete=False, suffix='.vcf')
vcf_file.writelines(vcf_lines)
vcf_file.seek(0)
vcf_file.close()
return vcf_file.name
def test_variant_printer():
"""Test the variant printer"""
vcf_file = setup_vcf_file()
variant_queue = Manager().Queue()
head = HeaderParser()
outfile = NamedTemporaryFile(mode='w+t', delete=False, suffix='.vcf')
outfile.close()
variant_printer = VariantPrinter(
task_queue=variant_queue,
head=head,
mode='chromosome',
outfile = outfile.name
)
variant_printer.start()
batch = OrderedDict()
for line in open(vcf_file):
line = line.rstrip()
if line.startswith('#'):
if line.startswith('##'):
head.parse_meta_data(line)
else:
head.parse_header_line(line)
else:
variant_dict = get_variant_dict(line, head.header)
variant_id = get_variant_id(variant_dict)
variant_dict['variant_id'] = variant_id
variant_dict['info_dict'] = get_info_dict(variant_dict['INFO'])
variant_queue.put(variant_dict)
variant_queue.put(None)
variant_printer.join()
variants = []
with open(outfile.name, 'r', 'utf-8-sig') as f:
for line in f:
variants.append(line.rstrip().split('\t'))
assert variants[0][0] == '1'
assert variants[0][2] == '11900'
|
460077
|
import unittest
import os
from modules.DatabaseModule.DBManager import DBManager
from opentera.db.models.TeraUser import TeraUser
from opentera.config.ConfigManager import ConfigManager
from tests.opentera.db.models.BaseModelsTest import BaseModelsTest
class DBManagerTeraUserAccessTest(BaseModelsTest):
filename = os.path.join(os.path.dirname(__file__), 'DBManagerTeraUserAccessTest.db')
SQLITE = {
'filename': filename
}
def setUp(self):
super().setUp()
self.admin_user = TeraUser.get_user_by_username('admin')
self.test_user = TeraUser.get_user_by_username('user')
def test_instance(self):
self.assertNotEqual(self.admin_user, None)
self.assertNotEqual(self.test_user, None)
def test_admin_get_accessible_users_ids(self):
users = DBManager.userAccess(self.admin_user).get_accessible_users()
self.assertEqual(len(users), 6)
def test_admin_accessible_sites(self):
sites = DBManager.userAccess(self.admin_user).get_accessible_sites()
self.assertEqual(len(sites), 2)
def test_admin_accessible_sites(self):
sites = DBManager.userAccess(self.test_user).get_accessible_sites()
self.assertEqual(len(sites), 1)
|
460153
|
import cv2
import torch
import math
import numpy as np
def describe_opencv(model, img, kpts, N, mag_factor, use_gpu = True):
"""
Rectifies patches around openCV keypoints, and returns patches tensor
"""
patches = []
for kp in kpts:
x,y = kp.pt
s = kp.size
a = kp.angle
s = mag_factor * s / N
cos = math.cos(a * math.pi / 180.0)
sin = math.sin(a * math.pi / 180.0)
M = np.matrix([
[+s * cos, -s * sin, (-s * cos + s * sin) * N / 2.0 + x],
[+s * sin, +s * cos, (-s * sin - s * cos) * N / 2.0 + y]])
patch = cv2.warpAffine(img, M, (N, N),
flags=cv2.WARP_INVERSE_MAP + \
cv2.INTER_CUBIC + cv2.WARP_FILL_OUTLIERS)
patches.append(patch)
patches = torch.from_numpy(np.asarray(patches)).float()
patches = torch.unsqueeze(patches,1)
if use_gpu:
patches = patches.cuda()
descrs = model(patches)
return descrs.detach().cpu().numpy()
|
460156
|
import asyncio
import logging
from hearthstone.asyncio import asyncio_utils
from hearthstone.battlebots.random_bot import RandomBot
from hearthstone.simulator.core.randomizer import DefaultRandomizer
from hearthstone.simulator.host.async_host import AsyncHost
logger = logging.getLogger(__name__)
async def main():
hosts = [AsyncHost({f'RandomBot{i}': RandomBot(i + j) for i in range(8)})
for j in range(25)
]
for j, host in enumerate(hosts):
host.tavern.randomizer = DefaultRandomizer(j)
tasks = [asyncio_utils.create_task(host.async_play_game(), logger=logger) for host in hosts]
await asyncio.gather(*tasks)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
460166
|
r"""
============================
Single Gramian angular field
============================
A Gramian angular field is an image obtained from a time series, representing
some kind of temporal correlation between each pair of values from the time
series. Two methods are available: Gramian angular summation field and Gramian
angular difference field.
It is implemented as :class:`pyts.image.GramianAngularField`.
In this example, the considered time series is the sequence of the sine
function values for 1000 equally-spaced points in the interval
:math:`[0, 4\pi]`.
Both the corresponding Gramnian angular summation and difference fields are
plotted.
Since the API is designed for machine learning, the
:meth:`~pyts.image.GramianAngularField.transform` method of the
:class:`pyts.image.GramianAngularField` class expects a data set of time series
as input, so the time series is transformed into a data set with a single time
series (``X = np.array([x])``) and the first element of the data set of
Gramian angular fields is retrieved (``ax_gasf.imshow(X_gasf[0], ...``).
"""
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
from pyts.image import GramianAngularField
# Create a toy time series using the sine function
time_points = np.linspace(0, 4 * np.pi, 1000)
x = np.sin(time_points)
X = np.array([x])
# Compute Gramian angular fields
gasf = GramianAngularField(method='summation')
X_gasf = gasf.fit_transform(X)
gadf = GramianAngularField(method='difference')
X_gadf = gadf.fit_transform(X)
# Plot the time series and its recurrence plot
width_ratios = (2, 7, 7, 0.4)
height_ratios = (2, 7)
width = 10
height = width * sum(height_ratios) / sum(width_ratios)
fig = plt.figure(figsize=(width, height))
gs = fig.add_gridspec(2, 4, width_ratios=width_ratios,
height_ratios=height_ratios,
left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.1, hspace=0.1)
# Define the ticks and their labels for both axes
time_ticks = np.linspace(0, 4 * np.pi, 9)
time_ticklabels = [r'$0$', r'$\frac{\pi}{2}$', r'$\pi$',
r'$\frac{3\pi}{2}$', r'$2\pi$', r'$\frac{5\pi}{2}$',
r'$3\pi$', r'$\frac{7\pi}{2}$', r'$4\pi$']
value_ticks = [-1, 0, 1]
reversed_value_ticks = value_ticks[::-1]
# Plot the time series on the left with inverted axes
ax_left = fig.add_subplot(gs[1, 0])
ax_left.plot(x, time_points)
ax_left.set_xticks(reversed_value_ticks)
ax_left.set_xticklabels(reversed_value_ticks, rotation=90)
ax_left.set_yticks(time_ticks)
ax_left.set_yticklabels(time_ticklabels, rotation=90)
ax_left.set_ylim((0, 4 * np.pi))
ax_left.invert_xaxis()
# Plot the time series on the top
ax_top1 = fig.add_subplot(gs[0, 1])
ax_top2 = fig.add_subplot(gs[0, 2])
for ax in (ax_top1, ax_top2):
ax.plot(time_points, x)
ax.set_xticks(time_ticks)
ax.set_xticklabels(time_ticklabels)
ax.set_yticks(value_ticks)
ax.xaxis.tick_top()
ax.set_xlim((0, 4 * np.pi))
ax_top1.set_yticklabels(value_ticks)
ax_top2.set_yticklabels([])
# Plot the Gramian angular fields on the bottom right
ax_gasf = fig.add_subplot(gs[1, 1])
ax_gasf.imshow(X_gasf[0], cmap='rainbow', origin='lower',
extent=[0, 4 * np.pi, 0, 4 * np.pi])
ax_gasf.set_xticks([])
ax_gasf.set_yticks([])
ax_gasf.set_title('Gramian Angular Summation Field', y=-0.09)
ax_gadf = fig.add_subplot(gs[1, 2])
im = ax_gadf.imshow(X_gadf[0], cmap='rainbow', origin='lower',
extent=[0, 4 * np.pi, 0, 4 * np.pi])
ax_gadf.set_xticks([])
ax_gadf.set_yticks([])
ax_gadf.set_title('Gramian Angular Difference Field', y=-0.09)
# Add colorbar
ax_cbar = fig.add_subplot(gs[1, 3])
fig.colorbar(im, cax=ax_cbar)
plt.show()
|
460178
|
from data.data_generation import generate_gaussian
from manifold_alignment.ssma import ManifoldAlignment
import numpy as np
from utils.classification_list import lda_pred, accuracy_stats
import matplotlib.pyplot as plt
# my test function to see if it works. Should be in the 80s range of
# accuracy
def test_ma_gaussian(ma_method='wang', n_components=2, plot=False):
# define some dictionaries with empty labeled lists
X ={}; Y={};
X['label'] = []; X['unlabel'] = []; X['test'] = []
Y['label'] = []; Y['unlabel'] = []; Y['test'] = []
# assign labels from gaussian dataset
X1, X2, XT1, XT2, \
Y1, Y2, YT1, YT2, \
U1, U2 = generate_gaussian(plot_data=plot)
# create appropriate data structures based off of
# the manifold alignment class criteria
X['label'] = [X1, X2]
X['unlabel'] = [U1, U2]
X['test'] = [XT1, XT2]
Y['label'] = [Y1 , Y2]
Y['test'] = [YT1, YT2]
print(np.shape(X['label'][0]), np.shape(Y['label'][0]))
print(np.shape(X['unlabel'][0]))
print(np.shape(X['test'][0]), np.shape(Y['test'][0]))
print(np.shape(X['label'][1]), np.shape(Y['label'][1]))
print(np.shape(X['unlabel'][1]))
print(np.shape(X['test'][1]), np.shape(Y['test'][1]))
ma_method = ManifoldAlignment(ma_method=ma_method,
lap_method='personal')
ma_method.fit(X,Y)
Xproj = ma_method.transform(X, n_components=2)
Y['pred'] = lda_pred(Xproj['train'],
Xproj['test'],
Y['label'],
Y['test'])
Acc_stats = accuracy_stats(Y['pred'], Y['test'])
Lg = ma_method.L_g
Vs = ma_method.V_s
Vd = ma_method.V_d
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=3, ncols=1,
figsize=(10,10))
ax[0].spy(Lg, precision=1E-5, markersize=.2)
ax[0].set_title('Geometric Laplacian')
ax[1].spy(Vs, precision=1E-5, markersize=.2)
ax[1].set_title('Similarity Potential')
ax[2].spy(Vd, precision=1E-5, markersize=.2)
ax[2].set_title('Dissimilarity Potential')
plt.show()
print('AA - Domain 1: {s}'.format(s=Acc_stats['AA'][0]))
print('AA - Domain 2: {s}'.format(s=Acc_stats['AA'][1]))
if __name__ == "__main__":
test_ma_gaussian(ma_method='ssma', n_components=3, plot=True)
|
460186
|
from typing import Dict, Optional
import attr
from terra_sdk.core import AccAddress, Coins, ValAddress
from ..params import APIParams
from ._base import BaseAsyncAPI, sync_bind
__all__ = ["AsyncDistributionAPI", "DistributionAPI", "Rewards"]
@attr.s
class Rewards:
rewards: Dict[ValAddress, Coins] = attr.ib()
"""Delegator rewards, indexed by validator operator address."""
total: Coins = attr.ib()
"""Total sum of rewards."""
class AsyncDistributionAPI(BaseAsyncAPI):
async def rewards(self, delegator: AccAddress, params: Optional[APIParams] = None) -> Rewards:
"""Fetches the staking reward data for a delegator.
Args:
delegator (AccAddress): delegator account address
params (APIParams): optional parameters
Returns:
Rewards: delegator rewards
"""
res = await self._c._get(
f"/cosmos/distribution/v1beta1/delegators/{delegator}/rewards",
params
)
return Rewards(
rewards={
item["validator_address"]: Coins.from_data(item["reward"] or [])
for item in res["rewards"]
},
total=Coins.from_data(res["total"]),
)
async def validator_commission(self, validator: ValAddress, params: Optional[APIParams] = None) -> Coins:
"""Fetches the commission reward data for a validator.
Args:
validator (ValAddress): validator operator address
params (APIParams): optional parameters
Returns:
ValidatorCommission: validator rewards
"""
res = await self._c._get(
f"/cosmos/distribution/v1beta1/validators/{validator}/commission",
params
)
commission = res["commission"]
return Coins.from_data(commission["commission"])
async def withdraw_address(self, delegator: AccAddress, params: Optional[APIParams] = None) -> AccAddress:
"""Fetches the withdraw address associated with a delegator.
Args:
delegator (AccAddress): delegator account address
params (APIParams): optional parameters
Returns:
AccAddress: withdraw address
"""
res = await self._c._get(
f"/cosmos/distribution/v1beta1/delegators/{delegator}/withdraw_address",
params
)
return res.get("withdraw_address")
async def community_pool(self, params: Optional[APIParams] = None) -> Coins:
"""Fetches the community pool.
Args:
params (APIParams): optional parameters
Returns:
Coins: community pool
"""
res = await self._c._get("/cosmos/distribution/v1beta1/community_pool",params)
return Coins.from_data(res.get("pool"))
async def parameters(self, params: Optional[APIParams] = None) -> dict:
"""Fetches the Distribution module parameters.
Args:
params (APIParams): optional parameters
Returns:
dict: Distribution module parameters
"""
res = await self._c._get("/cosmos/distribution/v1beta1/params", params)
return res.get("params")
class DistributionAPI(AsyncDistributionAPI):
@sync_bind(AsyncDistributionAPI.rewards)
def rewards(self, delegator: AccAddress, params: Optional[APIParams] = None) -> Rewards:
pass
rewards.__doc__ = AsyncDistributionAPI.rewards.__doc__
@sync_bind(AsyncDistributionAPI.validator_commission)
def validator_commission(self, validator: ValAddress, params: Optional[APIParams] = None) -> Coins:
pass
validator_commission.__doc__ = AsyncDistributionAPI.validator_commission.__doc__
@sync_bind(AsyncDistributionAPI.withdraw_address)
def withdraw_address(self, delegator: AccAddress, params: Optional[APIParams] = None) -> AccAddress:
pass
withdraw_address.__doc__ = AsyncDistributionAPI.withdraw_address.__doc__
@sync_bind(AsyncDistributionAPI.community_pool)
def community_pool(self, params: Optional[APIParams] = None) -> Coins:
pass
community_pool.__doc__ = AsyncDistributionAPI.community_pool.__doc__
@sync_bind(AsyncDistributionAPI.parameters)
def parameters(self, params: Optional[APIParams] = None) -> dict:
pass
parameters.__doc__ = AsyncDistributionAPI.parameters.__doc__
|
460201
|
import json
from rgd.views import _SpatialDetailView
from . import models
class FMVMetaDetailView(_SpatialDetailView):
model = models.FMVMeta
def _get_extent(self, object):
extent = super()._get_extent(object)
if object.ground_union is not None:
# All or none of these will be set, only check one
extent['collect'] = object.ground_union.json
extent['ground_frames'] = object.ground_frames.json
extent['frame_numbers'] = object._blob_to_array(object.frame_numbers)
return extent
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['frame_rate'] = json.dumps(self.object.fmv_file.frame_rate)
return context
|
460203
|
import os
import random
import decimal
import string
import hashlib
import sha3
import pyqrcode
from deprecated import deprecated
# Number of PIP in 1 BIP
PIP = 1000000000000000000
# Prefixes
PREFIX_ADDR = 'Mx'
PREFIX_PUBKEY = 'Mp'
PREFIX_CHECK = 'Mc'
PREFIX_TX = 'Mt'
@deprecated("Use 'to_bip', 'to_pip' shortcuts or MinterHelper methods")
class MinterConvertor:
"""
Class contains different converters
"""
# PIP in BIP
DEFAULT = 1000000000000000000
@classmethod
def convert_value(cls, value, to, prec=33):
"""
Convert values from/to pip/bip.
Args:
value (string|int|Decimal|float): value to convert
to (string): coin to convert value to
prec (int): decimal context precision (decimal number length)
Returns:
int|Decimal
"""
# Get default decimal context
context = decimal.getcontext()
# Set temporary decimal context for calculation
decimal.setcontext(
decimal.Context(prec=prec, rounding=decimal.ROUND_DOWN)
)
# PIP in BIP in Decimal
default = decimal.Decimal(str(cls.DEFAULT))
# Value in Decimal
value = decimal.Decimal(str(value))
# Make conversion
if to == 'pip':
value = int(value * default)
elif to == 'bip':
value /= default
# Reset decimal context to default
decimal.setcontext(context)
return value
@classmethod
def encode_coin_name(cls, symbol):
"""
Add nulls to coin name
Args:
symbol (string): coin symbol
Returns:
string
"""
return symbol + chr(0) * (10 - len(symbol))
@classmethod
def decode_coin_name(cls, symbol):
"""
Args:
symbol (bytes|str)
Returns:
string
"""
if hasattr(symbol, 'decode'):
symbol = symbol.decode()
return symbol.replace(chr(0), '')
class MinterHelper:
"""
Class which contains different helpers
"""
@staticmethod
def keccak_hash(data, digest_bits=256):
"""
Create Keccak hash.
Args:
data (bytes)
digest_bits (int)
Returns:
hex (string)
"""
if digest_bits == 256:
khash = sha3.keccak_256()
else:
raise NotImplementedError
khash.update(data)
return khash.hexdigest()
@staticmethod
@deprecated('Unnecessary method')
def hex2bin(string):
return bytes.fromhex(string)
@classmethod
def hex2bin_recursive(cls, _dict):
"""
Recursively convert hexdigit dict values to bytes.
Args:
_dict (dict)
Returns:
dict
"""
def ctype_xdigit(s):
"""
Checks if all of the characters in "s" are hexadecimal 'digits'.
Args:
s (string): string to check
"""
return all(c in string.hexdigits for c in s)
for k, v in _dict.items():
if type(v) == dict:
cls.hex2bin_recursive(v)
elif type(v) == str and ctype_xdigit(v):
try:
_dict[k] = bytes.fromhex(v)
except ValueError:
pass
return _dict
@staticmethod
@deprecated('Unnecessary method')
def bin2hex(bts):
return bts.hex()
@staticmethod
@deprecated('Unnecessary method')
def bin2int(number):
return int.from_bytes(number, 'big')
@staticmethod
def get_validator_address(pub_key, upper=True):
"""
Get validator address from it's pub key (Mp...).
Validator address is used in signing blocks.
Args:
pub_key (string): candidate public key (Mp....)
upper (bool)
Returns:
string, validator address
"""
pub_key = bytes.fromhex(MinterHelper.prefix_remove(pub_key))
vaddress = hashlib.sha256(pub_key).hexdigest()[:40]
return vaddress.upper() if upper else vaddress
@staticmethod
def generate_qr(text, fn=None, path='', error='H', version=None, mode=None,
output='svg', module_color='black', background='white',
quiet_zone=4):
"""
Generate QR code from text and save to file.
Detailed documentation for `pyqrcode` package can be found
here: https://pythonhosted.org/PyQRCode/index.html
Args:
text (str): Text, that should be encoded to QR
fn (str): Filename for generated QR.
If not provided random filename is generated.
path (str): Path to save generate QR
error (str|int): The error parameter sets the error correction
level of the code.
Each level has an associated name given by a
letter: L, M, Q, or H;
each level can correct up to 7, 15, 25, or 30
percent of the data respectively.
version (int): The version parameter specifies the size and data
capacity of the code.
Versions are any integer between 1 and 40
mode (str): The mode param sets how the contents will be encoded.
Three of the four possible encodings are available.
By default, the object uses the most efficient
encoding for the contents. You can override this
behavior by setting this parameter.
output (str): Render modes. Available: text|terminal|svg.
In `text`|`terminal` modes QR code is printed,
`svg` mode saves QR code to file `fn` to path `path`.
module_color (str): String color of QR code data.
Is used only for `terminal` and `svg` modes.
background (str): String color of QR code background.
Is used only for `terminal` and `svg` modes.
quiet_zone (int): QR code quiet zone.
Returns:
fnpath (str): Path to generated QR
"""
# Generate QR code object
qrcode = pyqrcode.create(content=text, error=error, version=version,
mode=mode)
# Render QR code depending on `output` param
if output == 'text':
print(qrcode.text(quiet_zone=quiet_zone))
elif output == 'terminal':
print(
qrcode.terminal(
module_color=module_color, background=background,
quiet_zone=quiet_zone
)
)
elif output == 'svg':
# Generate filename, if not provided
if not fn:
fn = text + str(random.randint(10000, 99999))
fn = hashlib.sha256(fn.encode()).hexdigest()[:10]
fnpath = os.path.join(path, fn + '.svg')
# Save QR code to file
qrcode.svg(file=fnpath, module_color=module_color,
background=background, quiet_zone=quiet_zone)
return fnpath
else:
raise Exception('Wrong QR code render mode')
@staticmethod
def bytes_len(value, encoding='utf-8'):
"""
Count bytes length
Args:
value (str|bytes)
encoding (str)
"""
if type(value) is str:
value = bytes(value, encoding=encoding)
return len(value)
@staticmethod
def encode_coin_name(symbol):
"""
Add nulls to coin name
Args:
symbol (string): coin symbol
Returns:
string
"""
return symbol + chr(0) * (10 - len(symbol))
@staticmethod
def decode_coin_name(symbol):
"""
Args:
symbol (bytes|str)
Returns:
string
"""
if hasattr(symbol, 'decode'):
symbol = symbol.decode()
return symbol.replace(chr(0), '')
@staticmethod
def to_pip(value):
"""
Convert BIPs to PIPs.
Always cast value to str, due to float behaviour:
Decimal(0.1) = Decimal('0.10000000000004524352345234')
Decimal('0.1') = Decimal('0.1')
Args:
value (str|float|int|Decimal): value in BIP
Returns:
int
"""
return int(decimal.Decimal(str(value)) * decimal.Decimal(PIP))
@staticmethod
def to_bip(value):
"""
Convert PIPs to BIPs.
Use dynamic Decimal precision, depending on value length.
Args:
value (int|str|Decimal): value in PIP
Returns:
Decimal
"""
# Check if value is correct PIP value
value = str(value)
if not value.isdigit():
raise ValueError(f'{value} is not correct PIP value')
# Get default decimal context
context = decimal.getcontext()
# Set temporary decimal context for calculation
decimal.setcontext(
decimal.Context(prec=len(value), rounding=decimal.ROUND_DOWN)
)
# Convert value
value = decimal.Decimal(value) / decimal.Decimal(PIP)
# Reset decimal context to default
decimal.setcontext(context)
return value
@staticmethod
def prefix_add(value, prefix):
if prefix not in [PREFIX_ADDR, PREFIX_PUBKEY, PREFIX_CHECK, PREFIX_TX]:
raise ValueError(f"Unknown prefix '{prefix}'")
return prefix + value
@staticmethod
def prefix_remove(value):
value = value.replace(PREFIX_ADDR, '')
value = value.replace(PREFIX_PUBKEY, '')
value = value.replace(PREFIX_CHECK, '')
value = value.replace(PREFIX_TX, '')
return value
@deprecated("Deprecated. Use 'MinterHelper' class instead")
class MinterPrefix:
"""
Class with minter prefixes and operations with them.
"""
# Minter wallet address prefix
ADDRESS = 'Mx'
# Minter public key prefix
PUBLIC_KEY = 'Mp'
# Minter redeem check prefix
CHECK = 'Mc'
# Minter transaction prefix
TRANSACTION = 'Mt'
@staticmethod
def remove_prefix(string, prefix):
return string.replace(prefix, '')
|
460214
|
import eel
eel.init('client')
@eel.expose
def PYejecutarScript():
#el contenido del script se recibe y se guarda en la variable x par aluego ejecutarla
texto="ejecutar query"
print('Ejecutando query ')
return texto
@eel.expose
def PYAbrirArchivo(x):
#guardar la ruta del archivo en la variable x
f = open (x,'r')
mensaje = f.read()
contenidoQuery=mensaje
f.close()
print('Query abierto')
return contenidoQuery
@eel.expose
def PYguardarArchivo(x,y):
#guardar la ruta del archivo en la variable x
# guardar el contenido de un query en y
# y luego se sobre escribe el archivo
file = open(x, "w")
file.write(y)
file.close()
print('Guardado')
return y
@eel.expose
def PYcrearBD():
contenidoBD="DB"
print('Se ha creado la base de datos')
return contenidoBD
@eel.expose
def PYcrearTabla():
#se guarda el nombre de la tabla en la variable x
contenidoTabla="tabla"
print('Se ha creado la tabla')
return contenidoTabla
eel.start('main.html', size=(1024, 768))
#let input = document.querySelector("input[name='abrir']");
#
# let textarea = document.querySelector("textarea[name='query1']");
|
460224
|
import os
import unittest
from unittest import TestCase
import pandas as pd
from tests.test_utils import create_train_test_prod_split, split
from uq360.utils.utils import UseTransformer
from uq360.algorithms.blackbox_metamodel.short_text_classification import ShortTextClassificationWrapper
import numpy as np
import logging
import tensorflow as tf
tf.get_logger().setLevel(logging.ERROR)
@unittest.skip("too long")
class TestShortTextClassification(TestCase):
def _generate_mock_data(self, n_samples, n_classes, n_features):
from sklearn.datasets import make_classification
return make_classification(n_samples=n_samples, n_features=n_features, n_classes=n_classes,
n_informative=n_features, n_redundant=0, random_state=42, class_sep=10)
# Note, this test is expected to take some time (about 2 mins)
def test_short_text_predictor(self):
x, y = self.get_text_data()
x_train, y_train, x_test, y_test, x_prod, y_prod = create_train_test_prod_split(x, y)
obj = UseTransformer()
x_train_encoded = obj.transform(X=x_train)
x_test_encoded = obj.transform(X=x_test)
x_prod_encoded = obj.transform(X=x_prod)
x = np.concatenate((x_train_encoded, x_test_encoded, x_prod_encoded), axis=0)
y = np.concatenate((y_train, y_test, y_prod), axis=0)
# use the base model and grab the top confidence for every data point that we have
model = self.train_model(x_train_encoded, y_train)
x_proba = model.predict_proba(x)
confs_sorted = np.sort(x_proba)
top_confs = confs_sorted[:, -1]
# find the median
median = np.median(top_confs)
# create two buckets
less_than_median = np.where(top_confs < median)
greater_than_median = np.where(top_confs >= median)
x_train_new, y_train_new, x_test_new, y_test_new, prod_test_data, prod_test_label = split(x, y,
less_than_median,
greater_than_median,
0.3)
# train a new model using the training data created in the previous step
model_trained_on_conf_based_split = self.train_model(x_train_new, y_train_new)
# acc on test data
acc = model_trained_on_conf_based_split.score(x_test_new, y_test_new)
# acc on prod data
acc = model_trained_on_conf_based_split.score(prod_test_data, prod_test_label)
print("acc on prod", acc)
p1 = ShortTextClassificationWrapper(base_model=model_trained_on_conf_based_split)
p1.fit(x_train_new, y_train_new, x_test_new, y_test_new)
y_mean, y_pred, y_score = p1.predict(prod_test_data)
delta = abs(y_mean - acc * 100)
self.assertTrue(delta <= 5)
def train_model(self, x, y):
"""
returns model object
"""
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
model.fit(x, y)
return model
def get_text_data(self):
li_data = []
li_labels = []
li_len = []
local_file = os.path.abspath(
os.path.join(os.getcwd(), "..", "data", "text", "atis", "atis.train.w-intent.iob.csv"))
df = pd.read_csv(local_file, index_col=None, header=0)
li_data.append(df['example'])
li_labels.append(df['intent'])
frame = pd.concat(li_data, axis=0, ignore_index=True)
npdata = frame.to_numpy()
frame_labels = pd.concat(li_labels, axis=0, ignore_index=True)
npdata_labels = frame_labels.to_numpy()
return npdata, npdata_labels
if __name__ == '__main__':
unittest.main()
|
460230
|
import unittest
import json
from httmock import urlmatch, HTTMock, response
from six.moves.urllib.parse import unquote
from xunleipy.remote import XunLeiRemote
@urlmatch(netloc=r'(.*\.)?xunlei\.com')
def xunlei_mock(url, request):
headers = {}
content = {}
if url.path == '/check':
headers = {
'Set-Cookie': 'check_result=0:!tst;'
}
elif url.path == '/sec2login/':
headers = {
'Set-Cookie': 'userid=test1234;'
}
elif url.path == '/createTask':
body = request.body
body = unquote(body)[5:]
task_list = json.loads(body)['tasks']
tasks = []
for task in task_list:
tasks.append({
'name': task['name'],
'url': task['url'],
'taskid': 1,
'result': 0,
'msg': '',
'id': 1
})
content = {
'rtn': 0,
'tasks': tasks
}
elif url.path == '/del':
content = {
'rtn': 0,
'tasks': [
{'msg': '', 'id': '50', 'result': 0},
{'msg': '', 'id': '51', 'result': 0}
]
}
return response(200, content, headers, None, 5, request)
class LoginTest(unittest.TestCase):
def test_create_task_success(self):
with HTTMock(xunlei_mock):
u = 'testname'
p = '<PASSWORD>pass'
xlr = XunLeiRemote(u, p)
res = xlr.add_tasks_to_remote(
"8498352EB4F5208X0001",
task_list=[{
'url': 'ed2k://|file|=cu6wpmujve4rbsv4xdqd2r5ogkmgksgo|/',
'gcid': '',
'cid': '',
'name': '.WEB-HR.AAC.1024x576.x264.mkv',
'filesize': 514276262
}]
)
data = res
self.assertEqual(data['rtn'], 0)
self.assertTrue('taskid' in data['tasks'][0])
def test_create_tasks_by_urls_success(self):
with HTTMock(xunlei_mock):
u = 'testname'
p = '<PASSWORD>'
xlr = XunLeiRemote(u, p)
url_list = [
'ed2k://|file|=cu6wpmujve4rbsv4xdqd2r5ogkmgksgo|1234|as/',
'movietrailers.apple.com/movies/wb/prisoners/prisoners-tlr1_h720p.mov' # NOQA
]
res = xlr.add_urls_to_remote(
"8498352EB4F5208X0001",
url_list=url_list
)
data = res
self.assertEqual(data['rtn'], 0)
self.assertTrue('taskid' in data['tasks'][0])
for task in data['tasks']:
self.assertTrue(task['url'] in url_list)
class DeleteTestCase(unittest.TestCase):
def test_delete_tasks_by_task_infos(self):
with HTTMock(xunlei_mock):
u = 'testname'
p = '<PASSWORD>'
xlr = XunLeiRemote(u, p)
res = xlr.delete_tasks_by_task_infos(xlr.pid, [])
for data in res:
self.assertTrue('result' in data)
self.assertEqual(data['result'], 0)
def test_delete_all_tasks_in_recycle(self):
with HTTMock(xunlei_mock):
u = 'testname'
p = '<PASSWORD>'
xlr = XunLeiRemote(u, p)
res = xlr.delete_all_tasks_in_recycle(xlr.pid)
self.assertEqual(res['rtn'], 0)
self.assertTrue('tasks' in res)
for data in res['tasks']:
self.assertTrue('result' in data)
self.assertEqual(data['result'], 0)
if __name__ == '__main__':
unittest.main()
|
460250
|
import pytest
import clavier
@pytest.mark.parametrize(
"w1,w2,expected", [("kitten", "sitting", 3), ("saturday", "sunday", 3)]
)
def test_levenshtein(w1, w2, expected):
"""Tests word distance by assuming the distance between characters is always 1."""
keyboard = clavier.load_qwerty()
def mock_distance(c1, c2):
return 1 if c1 != c2 else 0
keyboard.char_distance = mock_distance
assert keyboard.word_distance(w1, w2) == expected
|
460271
|
import ipaddress
import logging
from django.db import models
from django.utils.safestring import mark_safe
from netfields import InetAddressField, NetManager
from peering.enums import BGPState, IPFamily
from peering.fields import TTLField
from utils.models import ChangeLoggedModel, TaggableModel
from .mixins import PolicyMixin
class AbstractGroup(ChangeLoggedModel, TaggableModel, PolicyMixin):
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True, max_length=255)
comments = models.TextField(blank=True)
import_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_import_routing_policies"
)
export_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_export_routing_policies"
)
communities = models.ManyToManyField("Community", blank=True)
check_bgp_session_states = models.BooleanField(default=False)
bgp_session_states_update = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
ordering = ["name", "slug"]
def export_policies(self):
return self.export_routing_policies.all()
def import_policies(self):
return self.import_routing_policies.all()
def get_peering_sessions_list_url(self):
raise NotImplementedError()
def get_peering_sessions(self):
raise NotImplementedError()
def poll_peering_sessions(self):
raise NotImplementedError()
class BGPSession(ChangeLoggedModel, TaggableModel, PolicyMixin):
"""
Abstract class used to define common caracteristics of BGP sessions.
A BGP session is always defined with the following fields:
* a unique service reference, blank or user defined
* an autonomous system, it can also be called a peer
* an IP address used to establish the session
* a plain text password
* an encrypted version of the password if the user asked for encryption
* a TTL for multihoping
* an enabled or disabled status telling if the session should be
administratively up or down
* import routing policies to apply to prefixes sent by the remote device
* export routing policies to apply to prefixed sent to the remote device
* a BGP state giving the current operational state of session (it will
remain to unkown if the is disabled)
* a received prefix count (it will stay none if polling is disabled)
* a advertised prefix count (it will stay none if polling is disabled)
* a date and time record of the last established state of the session
* comments that consist of plain text that can use the markdown format
"""
service_reference = models.CharField(
max_length=255,
unique=True,
blank=True,
null=True,
help_text="Optional internal service reference",
)
autonomous_system = models.ForeignKey("AutonomousSystem", on_delete=models.CASCADE)
ip_address = InetAddressField(store_prefix_length=False, verbose_name="IP address")
password = <PASSWORD>.<PASSWORD>(max_length=255, blank=True, null=True)
encrypted_password = models.CharField(max_length=255, blank=True, null=True)
multihop_ttl = TTLField(
blank=True,
default=1,
verbose_name="Multihop TTL",
help_text="Use a value greater than 1 for BGP multihop sessions",
)
enabled = models.BooleanField(default=True)
import_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_import_routing_policies"
)
export_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_export_routing_policies"
)
bgp_state = models.CharField(
max_length=50, choices=BGPState.choices, blank=True, null=True
)
received_prefix_count = models.PositiveIntegerField(blank=True, default=0)
advertised_prefix_count = models.PositiveIntegerField(blank=True, default=0)
last_established_state = models.DateTimeField(blank=True, null=True)
comments = models.TextField(blank=True)
objects = NetManager()
logger = logging.getLogger("peering.manager.peering")
class Meta:
abstract = True
ordering = ["service_reference", "autonomous_system", "ip_address"]
def __str__(self):
return (
self.service_reference
or f"AS{self.autonomous_system.asn} - {self.ip_address}"
)
@property
def ip_address_version(self):
return ipaddress.ip_address(self.ip_address).version
def _merge_policies(self, merged_policies, new_policies):
for policy in new_policies:
# Only merge universal policies or policies of same IP family
if policy in merged_policies or policy.address_family not in (
IPFamily.ALL,
self.ip_address_version,
):
continue
merged_policies.append(policy)
return merged_policies
def export_policies(self):
return self.export_routing_policies.all()
def merged_export_policies(self, reverse=False):
merged = [p for p in self.export_policies()]
# Merge policies from nested objects (first AS, then BGP group)
self._merge_policies(merged, self.autonomous_system.export_policies())
group = None
if hasattr(self, "ixp_connection"):
group = self.ixp_connection.internet_exchange_point
else:
group = self.bgp_group
if group:
self._merge_policies(merged, group.export_policies())
return list(reversed(merged)) if reverse else merged
def import_policies(self):
return self.import_routing_policies.all()
def merged_import_policies(self, reverse=False):
# Get own policies
merged = [p for p in self.import_policies()]
# Merge policies from nested objects (first AS, then BGP group)
self._merge_policies(merged, self.autonomous_system.import_policies())
group = None
if hasattr(self, "ixp_connection"):
group = self.ixp_connection.internet_exchange_point
else:
group = self.bgp_group
if group:
self._merge_policies(merged, group.import_policies())
return list(reversed(merged)) if reverse else merged
def merged_communities(self):
merged = [c for c in self.autonomous_system.communities.all()]
group = None
if hasattr(self, "ixp_connection"):
group = self.ixp_connection.internet_exchange_point
else:
group = self.bgp_group
for c in group.communities.all():
if c not in merged:
merged.append(c)
return merged
def poll(self):
raise NotImplementedError
def get_bgp_state_html(self):
"""
Return an HTML element based on the BGP state.
"""
if self.bgp_state == BGPState.IDLE:
badge = "danger"
elif self.bgp_state in (BGPState.CONNECT, BGPState.ACTIVE):
badge = "warning"
elif self.bgp_state in (BGPState.OPENSENT, BGPState.OPENCONFIRM):
badge = "info"
elif self.bgp_state == BGPState.ESTABLISHED:
badge = "success"
else:
badge = "secondary"
return mark_safe(
f'<span class="badge badge-{badge}">{self.get_bgp_state_display() or "Unknown"}</span>'
)
def encrypt_password(self, commit=True):
"""
Sets the `encrypted_password` field if a crypto module is found for the given
platform. The field will be set to `None` otherwise.
Returns `True` if the encrypted password has been changed, `False` otherwise.
"""
try:
router = getattr(self, "router")
except AttributeError:
router = getattr(self.ixp_connection, "router", None)
if not router or not router.platform or not router.encrypt_passwords:
return False
if not self.password and self.encrypted_password:
self.encrypted_password = ""
if commit:
self.save()
return True
if not self.encrypted_password:
# If the password is not encrypted yet, do it
self.encrypted_password = router.platform.encrypt_password(self.password)
else:
# Try to re-encrypt the encrypted password, if the resulting string is the
# same it means the password matches the router platform algorithm
is_up_to_date = self.encrypted_password == router.platform.encrypt_password(
self.encrypted_password
)
if not is_up_to_date:
self.encrypted_password = router.platform.encrypt_password(
self.password
)
# Check if the encrypted password matches the clear one
# Force re-encryption if there a difference
if self.password != router.platform.decrypt_password(self.encrypted_password):
self.encrypted_password = router.platform.encrypt_password(self.password)
if commit:
self.save()
return True
class Template(ChangeLoggedModel, TaggableModel):
name = models.CharField(max_length=128)
template = models.TextField()
comments = models.TextField(blank=True)
class Meta:
abstract = True
ordering = ["name"]
def render(self, variables):
raise NotImplementedError()
def __str__(self):
return self.name
|
460300
|
import pybullet_data
import glob
import pybullet
import pybullet_utils.bullet_client as bc
import time
import numpy as np
from gym.utils import seeding
import gym
import os
import inspect
from myGym.envs.camera import Camera
import pkg_resources
currentdir = pkg_resources.resource_filename("myGym", "envs")
repodir = pkg_resources.resource_filename("myGym", "./")
class BaseEnv(gym.Env):
"""
The base class for environments without rendering
Parameters:
:param gui_on: (bool) Whether or not to use PyBullet built-in GUI
:param objects_dir_path: (str) Path to directory with URDF files for objects
:param max_steps: (int) The maximum number of actions per episode
:param show_bounding_boxes_gui: (bool) Whether or not to show bounding boxes in GUI
:param changing_light_gui: (bool) Whether or not to change light in GUI
:param shadows_on_gui: (bool) Whether or not to show shadows in GUI
"""
metadata = {'render.modes': [
'human', 'rgb_array'], 'video.frames_per_second': 50}
def __init__(self,
gui_on=True,
objects_dir_path=pkg_resources.resource_filename("myGym", "envs/"),
max_steps=1024,
show_bounding_boxes_gui=False,
changing_light_gui=False,
shadows_on_gui=True
):
self.gui_on = gui_on
self.max_steps = max_steps
self.show_bounding_boxes_gui = show_bounding_boxes_gui
self.changing_light_gui = changing_light_gui
self.shadows_on_gui = shadows_on_gui
# Set episode information
self.episode_start_time = None
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_final_reward = []
self.episode_final_distance = []
self.episode_number = 0
self.episode_steps = 0
self.episode_max_time = 300
self.episode_info = ""
# Set general params
self.time_step = 1. / 240.
self.urdf_root = pybullet_data.getDataPath()
self.observation = {}
# Set objects information
self.objects_dir_path = objects_dir_path
self.env_objects = []
self.scene_objects_uids = {}
self.all_objects_filenames = self._get_all_urdf_filenames(self.objects_dir_path)
# Set GUI
self._connect_to_physics_server()
# Set env params and load models
self._set_physics()
self._setup_scene()
self._set_observation_space()
self._set_action_space()
def _connect_to_physics_server(self):
"""
Connect to the PyBullet physics server in SHARED_MEMORY, GUI or DIRECT mode
"""
if self.gui_on:
self.p = bc.BulletClient(connection_mode=pybullet.GUI)
# if (self.p < 0):
# self.p = bc.BulletClient(connection_mode=p.GUI)
self._set_gui_mode()
else:
self.p = bc.BulletClient(connection_mode=pybullet.DIRECT)
self.p.setPhysicsEngineParameter(enableFileCaching=0)
def _set_gui_mode(self):
"""
Set GUI parameters: camera, shadows, extra elements
"""
self.p.resetDebugVisualizerCamera(3.3, 0, -41, [0.0, 0.0, 0.33])
self.p.configureDebugVisualizer(self.p.COV_ENABLE_SHADOWS, self.shadows_on_gui)
self.p.configureDebugVisualizer(self.p.COV_ENABLE_GUI, 0)
def _set_physics(self):
"""
Set physics engine parameters
"""
self.p.setGravity(0, 0, -9.81)
self.p.setPhysicsEngineParameter(solverResidualThreshold=0.001, numSolverIterations=150, numSubSteps=20, useSplitImpulse=1, collisionFilterMode=1, constraintSolverType=self.p.CONSTRAINT_SOLVER_LCP_DANTZIG, globalCFM=0.000001, contactBreakingThreshold=0.001)
self.p.setTimeStep(self.time_step)
self.p.setRealTimeSimulation(0)
self.p.setPhysicsEngineParameter(enableConeFriction=1)
print(self.p.getPhysicsEngineParameters())
def _setup_scene(self):
"""
Set up scene elements (furniture, objects, robots)
"""
raise NotImplementedError
def _set_observation_space(self):
"""
Set limits of observations
"""
raise NotImplementedError
def _set_action_space(self):
"""
Set limits of actions
"""
raise NotImplementedError
def _get_observation(self):
"""
Get info about the state of the environment
Returns:
:return observation: (object) Observation of the environment
"""
raise NotImplementedError
def step(self, action):
"""
Apply action on the environment
Parameters:
:param action: (object) An action provided by the agent
Returns:
:return observation: (object)
:return reward: (float)
:return done: (bool):
:return info: (dict):
"""
raise NotImplementedError
def _add_scene_object_uid(self, scene_object_uid, name):
"""
Call this method in order to enable texturization of object
Parameters:
:param scene_object: (int)
"""
self.scene_objects_uids[scene_object_uid] = name
def get_scene_object_uid_by_name(self, name):
for uid, object_name in self.scene_objects_uids.items():
if name == object_name:
return uid
return None
def seed(self, seed=None):
"""
Set the seed for this env's random number generator(s)
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def hard_reset(self):
"""
Full reset of the simulation. Delete and load again all objects and reset physics.
"""
self.p.resetSimulation()
self.p.disconnect()
self._connect_to_physics_server()
self.scene_objects_uids = {}
#self.episode_number = 0
self._set_physics()
self._setup_scene()
def _restart_episode(self):
"""
Reset episode information and delete all objects
"""
self.p.removeAllUserDebugItems()
self.episode_start_time = time.time()
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_steps = 0
def reset(self, hard=False):
"""
Reset the state of the environment
"""
if hard:
self.hard_reset()
else:
self._remove_all_objects()
self._restart_episode()
def _draw_bounding_boxes(self):
"""
Show bounding boxes in tne PyBullet GUI
"""
for object in self.env_objects:
object.draw_bounding_box()
def _compute_reward(self):
"""
Compute reward for the agent
"""
return NotImplementedError
def _print_episode_summary(self, info_dict={}):
"""
Show an extra information about the episode
Parameters:
:param info_dict: (dict) Extra info
"""
if self.episode_failed:
episode_status = "FAILURE"
else:
episode_status = "SUCCESS"
print("#---------Episode-Summary---------#")
print("Episode number: " + str(self.episode_number))
print("Episode's number of steps: " + str(self.episode_steps))
print("Episode status: " + episode_status)
print("Episode info: " + self.episode_info)
print("Episode reward: " + str(self.episode_reward))
print("Last step reward: " + str(self.reward.rewards_history[-1]))
print("#---------------------------------#")
for key, value in info_dict.items():
print(key + ": " + str(value))
def _get_random_urdf_filenames(self, n, used_objects=None):
"""
Sample random URDF files from directory with objects URDFs
Parameters:
:param n: (int) Number of URDF's
:param used_objects: (list) Specified subset of objects
Returns:
:return selected_objects_filenames: (list)
"""
if used_objects or (self.all_objects_filenames is None):
all_objects_filenames = []
for object_name in used_objects:
if "virtual" in object_name:
all_objects_filenames.append(object_name)
for file in self.all_objects_filenames:
if '/'+object_name+'.' in file:
all_objects_filenames.append(file)
else:
# uses self.all_objects_filenames
pass
assert all_objects_filenames is not None
selected_objects_filenames = []
total_num_objects = len(all_objects_filenames)
if (n <= total_num_objects):
selected_objects = np.random.choice(
np.arange(total_num_objects), n, replace=True)
else:
selected_objects = list(np.arange(total_num_objects))
remain = n - total_num_objects
selected_objects += list(np.random.choice(
np.arange(total_num_objects), remain))
for object_id in selected_objects:
selected_objects_filenames.append(all_objects_filenames[object_id])
return selected_objects_filenames
def _get_all_urdf_filenames(self, dir):
"""
Get all URDF filenames from directory
Parameters:
:param dir: (int) Number of URDFs
Returns:
:return filenames: (list)
"""
list_all = []
for (dirpath, dirnames, filenames) in os.walk(self.objects_dir_path):
if '_old' not in dirpath and 'urdf' in dirpath:
list_all += [os.path.join(dirpath, file) for file in filenames]
return list_all
def _remove_object(self, object):
"""
Totally remove object from the simulation
Parameters:
:param object: (EnvObject) Object to remove
"""
self.env_objects.remove(object)
self.p.removeBody(object.uid)
def _remove_all_objects(self):
"""
Remove all objects from simulation (not scene objects or robots)
"""
env_objects_copy = self.env_objects[:]
for env_object in env_objects_copy:
self._remove_object(env_object)
def get_texturizable_objects_uids(self):
"""
Get all objects in the environment, on which textures can be applied
Returns:
:return texturizable_objects_uids: (list)
"""
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def get_colorizable_objects_uids(self):
"""
Get all objects in the environment, which color can be changed
Returns:
:return colorizable_objects_uids: (list)
"""
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def __del__(self):
"""
Disconnect from the physics server
"""
self.p.disconnect()
class CameraEnv(BaseEnv):
"""
The class for environments with rendering
Parameters:
:param camera_resolution: (list) The number of pixels in image (WxH)
:param shadows_on: (bool) Whether or not to use shadows while rendering, only applies to ER_TINY_RENDERER
:param render_on: (bool) Turn on rendering
:param renderer: (int) self.p.ER_TINY_RENDERER (CPU) or self.p.ER_BULLET_HARDWARE_OPENGL (GPU)
:param active_cameras: (list) Set 1 at a position(=camera number) to save images from this camera
"""
def __init__(self, camera_resolution=[640, 480], shadows_on=True,
render_on=True, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
active_cameras=None, **kwargs):
super(CameraEnv, self).__init__(**kwargs)
self.camera_resolution = camera_resolution
self.shadows_on = shadows_on
self.render_on = render_on
self.renderer = renderer
self.active_cameras = active_cameras
self.cameras = []
self.set_light()
self._set_cameras()
def set_light(self, light_direction=[1, 1, 1], light_color=[0.1, 0.1, 0.1],
light_distance=1., light_ambient=1., light_diffuse=1.,
light_specular=1.):
"""
Set light parameters for rendering, doesn't affect PyBullet GUI. Appart from light_direction, all parameters only apply to ER_TINY_RENDERER.
Parameters:
:param light_direction: (list) Specifies the world position of the light source
:param light_color: (list) Directional light color in RGB in range 0..1
:param light_distance: (float) Distance of the light along the normalized light_direction
:param light_ambient: (float) Light ambient coefficient in range 0..1
:param light_diffuse: (float) Light diffuse coefficient in range 0..1
:param light_specular: (float) Light specular coefficient in range 0..1
"""
self.light_direction = light_direction
self.light_color = light_color
self.light_distance = light_distance
self.light_ambient = light_ambient
self.light_diffuse = light_diffuse
self.light_specular = light_specular
def get_render_parameters(self):
"""
Return environment parameters for rendering, initially is intended to
use by cameras
Returns:
:return render_parameters: (dict) Render parameters
"""
return {
"width": self.camera_resolution[0],
"height": self.camera_resolution[1],
"lightDirection": self.light_direction,
"lightColor": self.light_color,
"lightDistance": self.light_distance,
"shadow": 1 if self.shadows_on else 0,
"lightAmbientCoeff": self.light_ambient,
"lightDiffuseCoeff": self.light_diffuse,
"lightSpecularCoeff": self.light_specular,
"renderer": self.renderer
}
def _set_cameras(self):
"""
Set cameras available to use for rendering
"""
raise NotImplementedError
def get_cameras(self):
return self.cameras
def add_camera(self, **kwargs):
"""
Add new camera to the environment
Parameters:
:param position: (list) Eye position in Cartesian world coordinates
:prarm target_position: (list) Position of the target point
:param up_vector: (list) Up vector of the camera
:param up_axis_index: (int) Either 1 for Y or 2 for Z axis up
:param yaw: (float) Yaw angle in degrees left/right around up-axis
:param pitch: (float) Pitch in degrees up/down
:param roll: (float) Roll in degrees around forward vector
:param distance: (float) Distance from eye to focus point
:param field_of_view: (float) Field of view
:param near_plane_distance: (float) Near plane distance
:param far_plane_distance: (float) Far plane distance
"""
self.cameras.append(Camera(env=self, **kwargs))
def set_active_cameras(self, active_cameras):
if (len(active_cameras) == len(self.cameras)):
self.active_cameras = active_cameras
def change_current_camera(self, camera_num):
print("Change camera to " + str(self.current_camera))
self.current_camera = camera_num
def render(self, mode="rgb_array", camera_id=None):
"""
Get image (image, depth, segmentation_mask) from camera or active cameras
Parameters:
:param mode: (str) rgb_array to return RGB image
:param camera_id: (int) Get image from specified camera
Returns:
:return camera_data: (dict) Key: camera_id, Value: info from camera
"""
if mode != "rgb_array":
return np.array([])
camera_data = {}
if self.render_on:
if camera_id is not None:
camera_data[camera_id] = self.cameras[camera_id].render()
else:
for camera_num in range(len(self.active_cameras)):
if self.active_cameras[camera_num]:
camera_data[camera_num] = self.cameras[camera_num].render()
return camera_data
def project_point_to_camera_image(self, point, camera_id):
"""
Project 3D point in Cartesian world coordinates to 2D point in pixel space
Parameters:
:param point: (list) 3D point in Cartesian world coordinates
:param camera_id: (int) Index of camera to project on
Returns:
:return 2d_point: (list) 2D coordinates of point on imageg
"""
return self.cameras[camera_id].project_point_to_image(point)
def get_camera_opencv_matrix_values(self, camera_id):
"""
Compute values of OpenCV matrix
Parameters:
:param camera_id: (int) Index of camera to get matrix from
Returns:
:return values: (dict) fx, fy, cx, cy values
"""
return self.cameras[camera_id].get_opencv_camera_matrix_values()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.