id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
26472
|
import numpy as np
from ..local_interpolation import ThirdOrderHermitePolynomialInterpolation
from .runge_kutta import AbstractESDIRK, ButcherTableau
γ = 0.26
a21 = γ
a31 = 0.13
a32 = 0.84033320996790809
a41 = 0.22371961478320505
a42 = 0.47675532319799699
a43 = -0.06470895363112615
a51 = 0.16648564323248321
a52 = 0.10450018841591720
a53 = 0.03631482272098715
a54 = -0.13090704451073998
a61 = 0.13855640231268224
a62 = 0
a63 = -0.04245337201752043
a64 = 0.02446657898003141
a65 = 0.61943039072480676
a71 = 0.13659751177640291
a72 = 0
a73 = -0.05496908796538376
a74 = -0.04118626728321046
a75 = 0.62993304899016403
a76 = 0.06962479448202728
# Predictors taken from
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/tableaus/sdirk_tableaus.jl#L1444 # noqa: E501
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/perform_step/kencarp_kvaerno_perform_step.jl#L1123 # noqa: E501
# This is with the exception of α21, which is mistakenly set to zero.
#
# See also /devdocs/predictor_dirk.md
α21 = 1.0
α31 = -1.366025403784441
α32 = 2.3660254037844357
α41 = -0.19650552613122207
α42 = 0.8113579546496623
α43 = 0.38514757148155954
α51 = 0.10375304369958693
α52 = 0.937994698066431
α53 = -0.04174774176601781
α61 = -0.17281112873898072
α62 = 0.6235784481025847
α63 = 0.5492326806363959
α71 = a61
α72 = a62
α73 = a63
α74 = a64
α75 = a65
α76 = γ
_kvaerno5_tableau = ButcherTableau(
a_lower=(
np.array([a21]),
np.array([a31, a32]),
np.array([a41, a42, a43]),
np.array([a51, a52, a53, a54]),
np.array([a61, a62, a63, a64, a65]),
np.array([a71, a72, a73, a74, a75, a76]),
),
a_diagonal=np.array([0, γ, γ, γ, γ, γ, γ]),
a_predictor=(
np.array([α21]),
np.array([α31, α32]),
np.array([α41, α42, α43]),
np.array([α51, α52, α53, 0]),
np.array([α61, α62, α63, 0, 0]),
np.array([α71, α72, α73, α74, α75, α76]),
),
b_sol=np.array([a71, a72, a73, a74, a75, a76, γ]),
b_error=np.array(
[a71 - a61, a72 - a62, a73 - a63, a74 - a64, a75 - a65, a76 - γ, γ]
),
c=np.array(
[0.52, 1.230333209967908, 0.8957659843500759, 0.43639360985864756, 1.0, 1.0]
),
)
class Kvaerno5(AbstractESDIRK):
r"""Kvaerno's 5/4 method.
A-L stable stiffly accurate 5th order ESDIRK method. Has an embedded 4th order
method for adaptive step sizing. Uses 7 stages.
When solving an ODE over the interval $[t_0, t_1]$, note that this method will make
some evaluations slightly past $t_1$.
??? cite "Reference"
```bibtex
@article{kvaerno2004singly,
title={Singly diagonally implicit Runge--Kutta methods with an explicit first
stage},
author={Kv{\ae}rn{\o}, Anne},
journal={BIT Numerical Mathematics},
volume={44},
number={3},
pages={489--502},
year={2004},
publisher={Springer}
}
```
"""
tableau = _kvaerno5_tableau
interpolation_cls = ThirdOrderHermitePolynomialInterpolation.from_k
def order(self, terms):
return 5
|
26481
|
from django.utils.translation import ugettext_lazy as _
SHIPPING_STANDARD = 'Standard'
SHIPPING_EXPEDITED = 'Expedited'
SHIPPING_PRIORITY = 'Priority'
SHIPPING_SPEED_CATEGORIES = (
(SHIPPING_STANDARD, _("Standard")),
(SHIPPING_EXPEDITED, _("Expedited")),
(SHIPPING_PRIORITY, _("Priority")),
)
METHOD_CONSUMER = 'Consumer'
METHOD_REMOVAL = 'Removal'
FULFILLMENT_METHODS = (
(METHOD_CONSUMER, _("Consumer")),
(METHOD_REMOVAL, _("Removal")),
)
FILL_OR_KILL = 'FillOrKill'
FILL_ALL = 'FillAll'
FILL_ALL_AVAILABLE = 'FillAllAvailable'
class MwsFulfillmentError(BaseException):
pass
|
26499
|
import unittest
import torch
import numpy as np
from spectralgp.samplers import MeanEllipticalSlice
class TestMeanEllipticalSlice(unittest.TestCase):
def test_m_ess(self, nsamples=10000):
pmean = torch.zeros(2)
pmean[0] = -2.
prior_dist = torch.distributions.MultivariateNormal(pmean, covariance_matrix=torch.eye(2))
lmean = torch.zeros(2)
lmean[0] = 2.
likelihood = torch.distributions.MultivariateNormal(lmean, covariance_matrix=torch.eye(2))
prior_inv = torch.inverse(prior_dist.covariance_matrix)
lik_inv = torch.inverse(likelihood.covariance_matrix)
true_postsigma = torch.inverse(prior_inv + lik_inv)
true_postmu = true_postsigma.matmul(prior_inv.matmul(pmean) + lik_inv.matmul(lmean))
def lfn(x):
lmean = torch.zeros(2)
lmean[0] = 2.
likelihood = torch.distributions.MultivariateNormal(lmean, covariance_matrix=torch.eye(2))
return likelihood.log_prob(x)
#lfn = lambda x: likelihood.log_prob(x)
init = torch.zeros(2)
m_ess_runner = MeanEllipticalSlice(init, prior_dist, lfn, nsamples)
samples, _ = m_ess_runner.run()
samples = samples.numpy()
samples = samples[:, int(nsamples/2):]
est_mean = np.mean(samples,1)
print(est_mean)
est_cov = np.cov(samples)
print(np.linalg.norm(est_mean - true_postmu.numpy()))
print(np.linalg.norm(est_cov - true_postsigma.numpy()))
# import matplotlib.pyplot as plt
# N = 60
# X = np.linspace(-3, 3, N)
# Y = np.linspace(-3, 4, N)
# X, Y = np.meshgrid(X, Y)
# # Pack X and Y into a single 3-dimensional array
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X
# pos[:, :, 1] = Y
# pos = torch.tensor(pos).float()
# posterior_dist = torch.distributions.MultivariateNormal(true_postmu, true_postsigma)
# Z = posterior_dist.log_prob(pos).numpy()
# plt.contourf(X, Y, Z)
# plt.scatter(samples[0,:], samples[1,:], color='black', alpha = 0.3)
# plt.show()
if __name__ == "__main__":
unittest.main()
|
26532
|
from mongoengine import StringField, EmailField, BooleanField
from flask.ext.login import UserMixin
import requests
import json
from mongoengine import Document
from social.apps.flask_app.me.models import FlaskStorage
class User(Document, UserMixin):
username = StringField(max_length=200)
password = StringField(max_length=200, default='')
name = StringField(max_length=100)
fullname = StringField(max_length=100)
first_name = StringField(max_length=100)
last_name = StringField(max_length=100)
email = EmailField()
active = BooleanField(default=True)
def facebook_api(self, url, fields=None):
params = {
'access_token': self.get_social_auth("facebook").extra_data['access_token']
}
if fields:
params["fields"] = ",".join(fields)
res = requests.get(url, params=params)
if res.status_code != 200:
raise Exception("Status was %s" % res.status_code)
return json.loads(res.content)
def get_facebook_albums(self):
return self.facebook_api("https://graph.facebook.com/v2.2/me/albums", fields=["id", "name"])["data"]
def get_facebook_photos(self, album_id):
photos = []
url = "https://graph.facebook.com/v2.2/%s/photos" % album_id
while url:
ret = self.facebook_api(url, fields=[
"id", "created_time", "from", "height", "width", "name", "source"
])
photos += ret["data"]
url = ret.get("paging", {}).get("next")
return photos
def get_social_auth(self, provider):
return FlaskStorage.user.get_social_auth_for_user(self, provider=provider).get()
def is_active(self):
return self.active
|
26534
|
from twisted.internet import reactor, task
class CounterManager(object):
counters = []
@classmethod
def add_counter(cls, counter):
cls.counters.append(counter)
@classmethod
def has_active_counters(cls):
return all([not c.is_active for c in cls.counters])
class Counter(object):
def __init__(self, name, between_time, counter=5):
self.name = name
self.between_time = between_time
self.counter = counter
self.is_active = True
CounterManager.add_counter(self)
def start(self):
self.loop_handler = task.LoopingCall(self.count)
self.loop_handler.start(self.between_time)
def count(self):
if self.counter == 0:
self.is_active = False
self.loop_handler.stop()
if CounterManager.has_active_counters():
print 'No counters active. Stopping!'
reactor.stop()
else:
print self.name + ':', self.counter
self.counter -= 1
print 'Start'
Counter('1', 0.5).start()
Counter('2', 1).start()
Counter('3', 0.1).start()
reactor.run()
|
26573
|
import struct
import os
current_pe = None
class PE:
"""Basic PE parsing.
Ref:
- https://hshrzd.wordpress.com/pe-bear/
- https://blog.kowalczyk.info/articles/pefileformat.html
"""
X86_64 = 0x8664
X86_32 = 0x14c
ARM = 0x1c0
ARM64 = 0xaa64
ARMNT = 0x1c4
AM33 = 0x1d3
IA64 = 0x200
EFI = 0xebc
MIPS = 0x166
MIPS16 = 0x266
MIPSFPU = 0x366
MIPSFPU16 = 0x466
WCEMIPSV2 = 0x169
POWERPC = 0x1f0
POWERPCFP = 0x1f1
SH3 = 0x1a2
SH3DSP = 0x1a3
SH4 = 0x1a6
SH5 = 0x1a8
THUMP = 0x1c2
RISCV32 = 0x5032
RISCV64 = 0x5064
RISCV128 = 0x5128
M32R = 0x9041
dos_magic = b'MZ'
ptr_to_pe_header = None
pe_magic = b'PE'
machine = X86_32
num_of_sections = None
size_of_opt_header = None
dll_charac = None
opt_magic = b'\x02\x0b'
entry_point = None
base_of_code = None
image_base = None
def __init__(self, pe=""):
if not os.access(pe, os.R_OK):
err("'{0}' not found/readable".format(pe))
err("Failed to get file debug information, most of gef features will not work")
return
with open(pe, "rb") as fd:
# off 0x0
self.dos_magic = fd.read(2)
if self.dos_magic != PE.dos_magic:
self.machine = None
return
# off 0x3c
fd.seek(0x3c)
self.ptr_to_pe_header, = struct.unpack("<I", fd.read(4))
# off_pe + 0x0
fd.seek(self.ptr_to_pe_header)
self.pe_magic = fd.read(2)
# off_pe + 0x4
fd.seek(self.ptr_to_pe_header + 0x4)
self.machine, self.num_of_sections = struct.unpack("<HH", fd.read(4))
# off_pe + 0x14
fd.seek(self.ptr_to_pe_header + 0x14)
self.size_of_opt_header, self.dll_charac = struct.unpack("<HH", fd.read(4))
# off_pe + 0x18
self.opt_magic = fd.read(2)
# off_pe + 0x28
fd.seek(self.ptr_to_pe_header + 0x28)
self.entry_point, self.base_of_code = struct.unpack("<II", fd.read(8))
# off_pe + 0x30
self.image_base, = struct.unpack("<I", fd.read(4))
return
def is_valid(self):
return self.dos_magic == PE.DOS_MAGIC and self.pe_magic == PE.pe_magic
def get_machine_name(self):
return {
0x14c: "X86",
0x166: "MIPS",
0x169: "WCEMIPSV2",
0x1a2: "SH3",
0x1a3: "SH3DSP",
0x1a6: "SH4",
0x1a8: "SH5",
0x1c0: "ARM",
0x1c2: "THUMP",
0x1c4: "ARMNT",
0x1d3: "AM33",
0x1f0: "PowerPC",
0x1f1: "PowerPCFP",
0x200: "IA64",
0x266: "MIPS16",
0x366: "MIPSFPU",
0x466: "MIPSFPU16",
0xebc: "EFI",
0x5032: "RISCV32",
0x5064: "RISCV64",
0x5128: "RISCV128",
0x8664: "X86_64",
0x9041: "M32R",
0xaa64: "ARM64",
None: None
}[self.machine]
@lru_cache()
def get_pe_headers(filename=None):
"""Return an PE object with info from `filename`. If not provided, will return
the currently debugged file."""
if filename is None:
filename = get_filepath()
if filename.startswith("target:"):
warn("Your file is remote, you should try using `gef-remote` instead")
return
return PE(filename)
@lru_cache()
def is_pe64(filename=None):
"""Checks if `filename` is an PE64."""
pe = current_pe or get_pe_headers(filename)
return pe.machine == PE.X86_64
@lru_cache()
def is_pe32(filename=None):
"""Checks if `filename` is an PE32."""
pe = current_pe or get_pe_headers(filename)
return pe.machine == PE.X86_32
|
26591
|
import hashlib
import logging
import os
import shutil
import traceback
from contextlib import closing
from pywb.utils.loaders import BlockLoader
from webrecorder.rec.storage.base import BaseStorage
from webrecorder.rec.storage.storagepaths import add_local_store_prefix, strip_prefix
logger = logging.getLogger('wr.io')
# ============================================================================
class DirectLocalFileStorage(BaseStorage):
"""Webrecorder storage (local files)."""
def __init__(self):
"""Initialize Webrecorder storage."""
super(DirectLocalFileStorage, self).__init__(os.environ['STORAGE_ROOT'])
def delete_collection_dir(self, dir_path):
"""Delete collection directory.
:param str dir_path: directory path
:returns: whether successful or not
:rtype: bool
"""
local_dir = os.path.join(self.storage_root, dir_path)
try:
logger.debug('Local Store: Deleting Directory: ' + local_dir)
parent_dir = os.path.dirname(local_dir)
shutil.rmtree(local_dir)
os.removedirs(parent_dir)
return True
except Exception as e:
if e.errno != 2:
logger.error(str(e))
return False
def do_upload(self, target_url, full_filename):
"""Upload file into local file storage.
:param str target_url: target URL
:param str full_filename: path
:returns: whether successful or not
:rtype: bool
"""
os.makedirs(os.path.dirname(target_url), exist_ok=True)
try:
if full_filename != target_url:
shutil.copyfile(full_filename, target_url)
else:
logger.debug('Local Store: Same File, No Upload')
return True
except Exception as e:
logger.error(str(e))
return False
def is_valid_url(self, target_url):
"""Return whether given target URL is an existing file.
:param str target_url: target URL
:returns: whether given target URL is an existing file
:rtype: bool
"""
return os.path.isfile(target_url)
def get_client_url(self, target_url):
"""Get client URL.
:param str target_url: target URL
:returns: client URL
:rtype: str
"""
return add_local_store_prefix(target_url.replace(os.path.sep, '/'))
def client_url_to_target_url(self, client_url):
"""Get target URL (from client URL).
:param str client URL: client URL
:returns: target URL
:rtype: str
"""
return strip_prefix(client_url)
def do_delete(self, target_url, client_url):
"""Delete file from storage.
:param str target_url: target URL
:returns: whether successful or not
:rtype: bool
"""
try:
logger.debug('Local Store: Deleting: ' + target_url)
os.remove(target_url)
# if target_url.startswith(self.storage_root):
# os.removedirs(os.path.dirname(target_url))
return True
except Exception as e:
if e.errno != 2:
logger.error(str(e))
return False
# ============================================================================
class LocalFileStorage(DirectLocalFileStorage):
"""Webrecorder storage w/ Redis interface (local files).
:ivar StrictRedis redis: Redis interface
"""
def __init__(self, redis):
"""Initialize Webrecorder storage w/ Redis interface.
:param StrictRedis redis: Redis interface
"""
self.redis = redis
super(LocalFileStorage, self).__init__()
### BEGIN PERMA CUSTOMIZATIONS
### First pass at https://github.com/harvard-lil/perma/issues/2614
def delete_collection(self, collection):
"""Delete collection.
:param collection: collection
:type: n.s.
:returns: whether successful or not
:rtype: bool
"""
path = collection.get_dir_path()
if path:
try:
dirpath = os.path.join(self.storage_root, path)
return (self.redis.publish('handle_delete_dir', dirpath) > 0)
except Exception:
logger.error("Failed attempt to delete collection {}".format(collection), exc_info=True)
return False
return False
### END PERMA CUSTOMIZATIONS
def do_delete(self, target_url, client_url):
"""Delete file.
:param str target_url: target URL
:param str client_url: client URL (unused argument)
:returns: whether successful or not
:rtype: bool
"""
return self.redis.publish('handle_delete_file', target_url) > 0
def get_checksum_and_size(self, filepath_or_url):
"""Returns the checksum of the supplied URL or filepath and the size of the resource
:param str filepath_or_url: The URL or filepath to the resource that the checksum and size is desired for
:return: A three tuple containing the kind of checksum, the checksum itself, and size
:rtype: tuple[str|None, str|None, int|None]
"""
m = hashlib.md5()
amount = 1024 * 1024
total_size = 0
with closing(BlockLoader().load(filepath_or_url)) as f:
while True:
chunk = f.read(amount)
chunk_size = len(chunk)
if chunk_size == 0:
break
total_size += chunk_size
m.update(chunk)
return 'md5', m.hexdigest(), total_size
|
26625
|
from .dictionary import BertDictionary
from .text_planning_dataset import TextPlanningDataset
__all__ = [
'BertDictionary',
'TextPlanningDataset',
]
|
26634
|
import numpy as np
import pandas as pd
import seaborn as sns
from nninst.backend.tensorflow.model import AlexNet
from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import (
alexnet_imagenet_inter_class_similarity_frequency,
)
from nninst.op import Conv2dOp, DenseOp
np.random.seed(0)
sns.set()
threshold = 0.5
frequency = int(500 * 0.1)
label = "import"
variant = None
base_name = f"alexnet_imagenet_inter_class_similarity_frequency_{frequency}"
cmap = "Greens"
same_class_similarity = []
diff_class_similarity = []
layer_names = []
layers = AlexNet.graph().load().ops_in_layers(Conv2dOp, DenseOp)
for layer_name in [
None,
*layers,
]:
similarity = alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer_name
).load()
same_class_similarity.append(
np.mean(similarity[np.eye(similarity.shape[0], dtype=bool)])
)
diff_class_similarity.append(
np.mean(
similarity[
np.tri(similarity.shape[0], similarity.shape[1], k=-1, dtype=bool)
]
)
)
if layer_name is None:
file_name = base_name
layer_names.append("All")
else:
file_name = base_name + "_" + layer_name[: layer_name.index("/")]
layer_names.append(layer_name[: layer_name.index("/")])
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
for layer_name, similarity in zip(
["avg", "first_half", "second_half"],
[
np.mean(
[
alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer
).load()
for layer in layers
],
axis=0,
),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[:len(layers) // 2]], axis=0),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[len(layers) // 2:]], axis=0),
],
):
file_name = base_name + "_" + layer_name
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
summary_df = pd.DataFrame(
{
"Same Class": same_class_similarity,
"Diff Class": diff_class_similarity,
"Layer": layer_names,
}
)
summary_df.to_csv(f"{base_name}_summary.csv", index=False)
|
26658
|
from django import template
import mistune
register = template.Library()
@register.filter
def markdown(value):
markdown = mistune.Markdown()
return markdown(value)
|
26694
|
from parsel import Selector
from utils import (
download,
remove_big_whitespaces_selector,
find_id_in_url,
catch_errors,
get_last_part_url,
)
from data import VideoContent, GalleryContent, ImageContent, Meme, Author, Page
import re
ROOT = "https://m.demotywatory.pl"
def scrap(url):
html = download(url)
return parse(html)
def parse(html):
document = Selector(text=html)
memes = [
catch_errors(parse_meme, element) for element in document.css(".demotivator")
]
memes = [meme for meme in memes if meme is not None]
title = document.css("title::text").get()
next_page_url = "/demotywatory/page/" + get_last_part_url(
document.css("a.next-page::attr(href)").get()
)
return Page(title, memes, next_page_url)
def parse_gallery(html):
title = html.css("a::text").get()
url = html.css("a::attr(href)").get()
slides = []
gallery_html = download(ROOT + url)
gallery_page_document = Selector(text=gallery_html)
for slide_element in gallery_page_document.css(".rsSlideContent"):
slide = slide_element.css("img::attr(src)").get()
slides = slides + [slide]
next_gallery_page_url = gallery_page_document.css(
".gall_next_page > a::attr(href)"
).get()
while next_gallery_page_url is not None:
gallery_html = download(ROOT + url + next_gallery_page_url)
gallery_page_document = Selector(text=gallery_html)
for slide_element in gallery_page_document.css(".rsSlideContent"):
slide = slide_element.css("img::attr(src)").get()
slides = slides + [slide]
next_gallery_page_url = gallery_page_document.css(
".gall_next_page > a::attr(href)"
).get()
slides = [slide for slide in slides if slide is not None]
return (title, url, GalleryContent(slides), None)
def parse_content(html):
clazz = html.attrib["class"]
if "image_gallery" in clazz:
return parse_gallery(html)
elif "image" in clazz or "image_gif" in clazz:
image = html.css("img.demot_pic")
title = image.attrib["alt"]
src = image.attrib["src"].replace("//upl", "/upl")
url = html.css("a::attr(href)").get()
return (title, url, ImageContent(src), None)
elif "video_mp4" in clazz:
src = html.css("source::attr(src)").get().replace("//upl", "/upl")
title = html.css(".demot_title::text").get()
description = html.css(".demot_description::text").get()
url = html.css("a::attr(href)").get()
return (title, url, VideoContent(src), description)
return (None, None, None, None)
def parse_meme(m):
title, url, content, description = parse_content(m)
if url is None:
return
points = None
points_text = m.css(".up_votes::text").get()
try:
points = int(points_text)
except:
pass
comment_count = None
comments_count_text = m.css(".demot-comments a::text").get()
try:
comment_count = int(comments_count_text)
except:
pass
return Meme(
title,
ROOT + url,
"/demotywatory/{}".format(find_id_in_url(url)),
content,
None,
None,
points,
comment_count,
)
|
26706
|
import sys
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser(
description="""Command-line bin abundance estimator.
Print the median RPKM abundance for each bin in each sample to STDOUT.
Will read the RPKM file into memory - beware.""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
parser.add_argument('rpkmpath', help='Path to RPKM file')
parser.add_argument('clusterspath', help='Path to clusters.tsv')
parser.add_argument('headerpath', help='Path to list of headers')
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
# Check files
for infile in (args.rpkmpath, args.clusterspath, args.headerpath):
if not os.path.isfile(infile):
raise FileNotFoundError(infile)
# Load Vamb
sys.path.append('../vamb')
import vamb
# Load in files
with open(args.headerpath) as file:
indexof = {line.strip():i for i,line in enumerate(file)}
with open(args.clusterspath) as file:
clusters = vamb.vambtools.read_clusters(file)
# Check that all clusters names are in headers:
for cluster in clusters.values():
for header in cluster:
if header not in indexof:
raise KeyError("Header not found in headerlist: {}".format(header))
# Load RPKM and check it
rpkm = vamb.vambtools.read_npz(args.rpkmpath)
nsamples = rpkm.shape[1]
if len(indexof) != len(rpkm):
raise ValueError("Not the same number of headers as rows in RPKM file")
# Now estimate abundances
for clustername, cluster in clusters.items():
depths = np.empty((len(cluster), nsamples), dtype=np.float32)
for row, header in enumerate(cluster):
index = indexof[header]
depths[row] = rpkm[index]
median_depths = np.median(depths, axis=0)
print(clustername, end='\t')
print('\t'.join([str(i) for i in median_depths]))
|
26707
|
from pupa.scrape import Scraper
from pupa.scrape import Event
import lxml.html
from datetime import datetime
import pytz
DUPLICATE_EVENT_URLS = ('http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=445731',
'http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=452515',
'http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=452513')
class MiamidadeEventScraper(Scraper):
def lxmlize(self, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
return doc
def scrape(self):
local_timezone = pytz.timezone("US/Eastern")
base_calendar_url = "http://www.miamidade.gov/cob/county-commission-calendar.asp"
#things get messy more than a few months out
#so we're just pulling 3 months. If we want three
#more, they are called "nxx", "nxy" and "nxz"
months = ["cur","nex","nxw"]
for m in months:
doc = self.lxmlize(base_calendar_url + "?next={}".format(m))
events = doc.xpath("//table[contains(@style,'dotted #ccc')]")
for event in events:
rows = event.xpath(".//tr")
for row in rows:
heading, data = row.xpath(".//td")
h = heading.text_content().lower().replace(":","").strip()
if h == "event":
title = data.text_content()
link = data.xpath(".//a")[0].attrib["href"]
elif h == "event date":
when = datetime.strptime(data.text, '%m/%d/%y %H:%M%p')
when = local_timezone.localize(when)
elif h == "location":
where = data.text
elif h == "description":
description = data.text
if link in DUPLICATE_EVENT_URLS:
continue
if title == "Mayor's FY 2016-17 Proposed Budget Public Meeting":
continue
if not description:
description = ""
status = "confirmed"
if "cancelled" in title.lower():
status = "cancelled"
e = Event(name=title,
start_time=when,
timezone="US/Eastern",
location_name=where,
description=description,
status=status)
e.add_source(link)
yield e
e = Event(name="Mayor's FY 2016-17 Proposed Budget Public Meeting",
start_time=local_timezone.localize(datetime.strptime('08/08/16 06:00PM', '%m/%d/%y %H:%M%p')),
timezone="US/Eastern",
location_name='111 NW 1st Street',
description='Pursuant to Section 2-1800A of the County Code, a Public Meeting has been scheduled by the Honorable <NAME>, Mayor, Miami-Dade County, to discuss the FY 2016-17 budget, tax rates, and fee changes.',
status='confirmed')
e.add_source('http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=447192')
yield e
|
26714
|
from datetime import datetime, date
from marqeta.response_models.address_response_model import AddressResponseModel
from marqeta.response_models.identification_response_model import IdentificationResponseModel
from marqeta.response_models import datetime_object
import json
import re
class BusinessProprietorResponseModel(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def first_name(self):
return self.json_response.get('first_name', None)
@property
def middle_name(self):
return self.json_response.get('middle_name', None)
@property
def last_name(self):
return self.json_response.get('last_name', None)
@property
def alternative_names(self):
return self.json_response.get('alternative_names', None)
@property
def title(self):
return self.json_response.get('title', None)
@property
def home(self):
if 'home' in self.json_response:
return AddressResponseModel(self.json_response['home'])
@property
def ssn(self):
return self.json_response.get('ssn', None)
@property
def dob(self):
if 'dob' in self.json_response:
return datetime_object('dob', self.json_response)
@property
def phone(self):
return self.json_response.get('phone', None)
@property
def email(self):
return self.json_response.get('email', None)
@property
def identifications(self):
if 'identifications' in self.json_response:
return [IdentificationResponseModel(val) for val in self.json_response['identifications']]
def __repr__(self):
return '<Marqeta.response_models.business_proprietor_response_model.BusinessProprietorResponseModel>' + self.__str__()
|
26757
|
import logging
import sys
from datetime import datetime
import requests
from tqdm import tqdm
import log
from datamodel.advisory import AdvisoryRecord
from datamodel.commit import Commit
from filtering.filter import filter_commits
from git.git import GIT_CACHE, Git
from git.version_to_tag import get_tag_for_version
from log.util import init_local_logger
# from processing.commit.feature_extractor import extract_features
from processing.commit.preprocessor import preprocess_commit
from ranking.rank import rank
from ranking.rules import apply_rules
# from util.profile import profile
from stats.execution import Counter, ExecutionTimer, execution_statistics
_logger = init_local_logger()
SECS_PER_DAY = 86400
TIME_LIMIT_BEFORE = 3 * 365 * SECS_PER_DAY
TIME_LIMIT_AFTER = 180 * SECS_PER_DAY
MAX_CANDIDATES = 1000
core_statistics = execution_statistics.sub_collection("core")
# @profile
def prospector( # noqa: C901
vulnerability_id: str,
repository_url: str,
publication_date: str = "",
vuln_descr: str = "",
tag_interval: str = "",
version_interval: str = "",
modified_files: "list[str]" = [],
code_tokens: "list[str]" = [],
time_limit_before: int = TIME_LIMIT_BEFORE,
time_limit_after: int = TIME_LIMIT_AFTER,
use_nvd: bool = False,
nvd_rest_endpoint: str = "",
backend_address: str = "",
git_cache: str = GIT_CACHE,
limit_candidates: int = MAX_CANDIDATES,
active_rules: "list[str]" = ["ALL"],
model_name: str = "",
) -> "list[Commit]":
_logger.info("begin main commit and CVE processing")
# -------------------------------------------------------------------------
# advisory record extraction
# -------------------------------------------------------------------------
advisory_record = AdvisoryRecord(
vulnerability_id=vulnerability_id,
repository_url=repository_url,
description=vuln_descr,
from_nvd=use_nvd,
nvd_rest_endpoint=nvd_rest_endpoint,
)
_logger.pretty_log(advisory_record)
advisory_record.analyze(use_nvd=use_nvd)
_logger.info(f"{advisory_record.code_tokens=}")
if publication_date != "":
advisory_record.published_timestamp = int(
datetime.strptime(publication_date, r"%Y-%m-%dT%H:%M%z").timestamp()
)
if len(code_tokens) > 0:
advisory_record.code_tokens += tuple(code_tokens)
# drop duplicates
advisory_record.code_tokens = list(set(advisory_record.code_tokens))
# FIXME this should be handled better (or '' should not end up in the modified_files in
# the first place)
if modified_files != [""]:
advisory_record.paths += modified_files
_logger.info(f"{advisory_record.code_tokens=}")
# print(advisory_record.paths)
# -------------------------------------------------------------------------
# retrieval of commit candidates
# -------------------------------------------------------------------------
with ExecutionTimer(
core_statistics.sub_collection(name="retrieval of commit candidates")
):
_logger.info(
"Downloading repository {} in {}..".format(repository_url, git_cache)
)
repository = Git(repository_url, git_cache)
repository.clone()
tags = repository.get_tags()
_logger.debug(f"Found tags: {tags}")
_logger.info("Done retrieving %s" % repository_url)
prev_tag = None
following_tag = None
if tag_interval != "":
prev_tag, following_tag = tag_interval.split(":")
elif version_interval != "":
vuln_version, fixed_version = version_interval.split(":")
prev_tag = get_tag_for_version(tags, vuln_version)[0]
following_tag = get_tag_for_version(tags, fixed_version)[0]
since = None
until = None
if advisory_record.published_timestamp:
since = advisory_record.published_timestamp - time_limit_before
until = advisory_record.published_timestamp + time_limit_after
candidates = repository.get_commits(
since=since,
until=until,
ancestors_of=following_tag,
exclude_ancestors_of=prev_tag,
filter_files="*.java",
)
_logger.info("Found %d candidates" % len(candidates))
# if some code_tokens were found in the advisory text, require
# that candidate commits touch some file whose path contains those tokens
# NOTE: this works quite well for Java, not sure how general this criterion is
# -------------------------------------------------------------------------
# commit filtering
#
# Here we apply additional criteria to discard commits from the initial
# set extracted from the repository
# # -------------------------------------------------------------------------
# if advisory_record.code_tokens != []:
# _logger.info(
# "Detected tokens in advisory text, searching for files whose path contains those tokens"
# )
# _logger.info(advisory_record.code_tokens)
# if modified_files == [""]:
# modified_files = advisory_record.code_tokens
# else:
# modified_files.extend(advisory_record.code_tokens)
# candidates = filter_by_changed_files(candidates, modified_files, repository)
with ExecutionTimer(core_statistics.sub_collection(name="commit filtering")):
candidates = filter_commits(candidates)
_logger.debug(f"Collected {len(candidates)} candidates")
if len(candidates) > limit_candidates:
_logger.error(
"Number of candidates exceeds %d, aborting." % limit_candidates
)
_logger.error(
"Possible cause: the backend might be unreachable or otherwise unable to provide details about the advisory."
)
sys.exit(-1)
# -------------------------------------------------------------------------
# commit preprocessing
# -------------------------------------------------------------------------
with ExecutionTimer(
core_statistics.sub_collection(name="commit preprocessing")
) as timer:
raw_commit_data = dict()
missing = []
try:
# Exploit the preprocessed commits already stored in the backend
# and only process those that are missing. Note: the endpoint
# does not exist (yet)
r = requests.get(
backend_address
+ "/commits/"
+ repository_url
+ "?commit_id="
+ ",".join(candidates)
)
_logger.info("The backend returned status '%d'" % r.status_code)
if r.status_code != 200:
_logger.error("This is weird...Continuing anyway.")
missing = candidates
else:
raw_commit_data = r.json()
_logger.info(
"Found {} preprocessed commits".format(len(raw_commit_data))
)
except requests.exceptions.ConnectionError:
_logger.error(
"Could not reach backend, is it running? The result of commit pre-processing will not be saved.",
exc_info=log.config.level < logging.WARNING,
)
missing = candidates
preprocessed_commits: "list[Commit]" = []
for idx, commit in enumerate(raw_commit_data):
if (
commit
): # None results are not in the DB, collect them to missing list, they need local preprocessing
preprocessed_commits.append(Commit.parse_obj(commit))
else:
missing.append(candidates[idx])
_logger.info("Preprocessing commits...")
first_missing = len(preprocessed_commits)
pbar = tqdm(missing)
with Counter(
timer.collection.sub_collection(name="commit preprocessing")
) as counter:
counter.initialize("preprocessed commits", unit="commit")
for commit_id in pbar:
counter.increment("preprocessed commits")
preprocessed_commits.append(
preprocess_commit(repository.get_commit(commit_id))
)
_logger.pretty_log(advisory_record)
_logger.debug(f"preprocessed {len(preprocessed_commits)} commits")
payload = [c.__dict__ for c in preprocessed_commits[first_missing:]]
# -------------------------------------------------------------------------
# save preprocessed commits to backend
# -------------------------------------------------------------------------
with ExecutionTimer(
core_statistics.sub_collection(name="save preprocessed commits to backend")
):
_logger.info("Sending preprocessing commits to backend...")
try:
r = requests.post(backend_address + "/commits/", json=payload)
_logger.info(
"Saving to backend completed (status code: %d)" % r.status_code
)
except requests.exceptions.ConnectionError:
_logger.error(
"Could not reach backend, is it running?"
"The result of commit pre-processing will not be saved."
"Continuing anyway.....",
exc_info=log.config.level < logging.WARNING,
)
# TODO compute actual rank
# This can be done by a POST request that creates a "search" job
# whose inputs are the AdvisoryRecord, and the repository URL
# The API returns immediately indicating a job id. From this
# id, a URL can be constructed to poll the results asynchronously.
# ranked_results = [repository.get_commit(c) for c in preprocessed_commits]
# -------------------------------------------------------------------------
# analyze candidates by applying rules and ML predictor
# -------------------------------------------------------------------------
with ExecutionTimer(
core_statistics.sub_collection(name="analyze candidates")
) as timer:
_logger.info("Extracting features from commits...")
# annotated_candidates = []
# with Counter(timer.collection.sub_collection("commit analysing")) as counter:
# counter.initialize("analyzed commits", unit="commit")
# # TODO remove "proactive" invocation of feature extraction
# for commit in tqdm(preprocessed_commits):
# counter.increment("analyzed commits")
# annotated_candidates.append(extract_features(commit, advisory_record))
annotated_candidates = apply_rules(
preprocessed_commits, advisory_record, active_rules=active_rules
)
annotated_candidates = rank(annotated_candidates, model_name=model_name)
return annotated_candidates, advisory_record
# def filter_by_changed_files(
# candidates: "list[str]", modified_files: "list[str]", git_repository: Git
# ) -> list:
# """
# Takes a list of commit ids in input and returns in output the list
# of ids of the commits that modify at least one path that contains one of the strings
# in "modified_files"
# """
# modified_files = [f.lower() for f in modified_files if f != ""]
# if len(modified_files) == 0:
# return candidates
# filtered_candidates = []
# if len(modified_files) != 0:
# for commit_id in candidates:
# commit_obj = git_repository.get_commit(commit_id)
# commit_changed_files = commit_obj.get_changed_files()
# for ccf in commit_changed_files:
# for f in modified_files:
# ccf = ccf.lower()
# if f in ccf:
# # if f in [e.lower() for e in ccf]:
# # print(f, commit_obj.get_id())
# filtered_candidates.append(commit_obj.get_id())
# return list(set(filtered_candidates))
|
26765
|
import copy
from troposphere import (
Ref, FindInMap, Not, Equals, And, Condition, Join, ec2, autoscaling,
If, GetAtt, Output
)
from troposphere import elasticloadbalancing as elb
from troposphere.autoscaling import Tag as ASTag
from troposphere.route53 import RecordSetType
from stacker.blueprints.base import Blueprint
from stacker.blueprints.variables.types import TroposphereType
from stacker.blueprints.variables.types import (
CFNCommaDelimitedList,
CFNNumber,
CFNString,
EC2VPCId,
EC2KeyPairKeyName,
EC2SecurityGroupId,
EC2SubnetIdList,
)
CLUSTER_SG_NAME = "%sSG"
ELB_SG_NAME = "%sElbSG"
ELB_NAME = "%sLoadBalancer"
class AutoscalingGroup(Blueprint):
VARIABLES = {
'VpcId': {'type': EC2VPCId, 'description': 'Vpc Id'},
'DefaultSG': {'type': EC2SecurityGroupId,
'description': 'Top level security group.'},
'BaseDomain': {
'type': CFNString,
'default': '',
'description': 'Base domain for the stack.'},
'PrivateSubnets': {'type': EC2SubnetIdList,
'description': 'Subnets to deploy private '
'instances in.'},
'PublicSubnets': {'type': EC2SubnetIdList,
'description': 'Subnets to deploy public (elb) '
'instances in.'},
'AvailabilityZones': {'type': CFNCommaDelimitedList,
'description': 'Availability Zones to deploy '
'instances in.'},
'InstanceType': {'type': CFNString,
'description': 'EC2 Instance Type',
'default': 'm3.medium'},
'MinSize': {'type': CFNNumber,
'description': 'Minimum # of instances.',
'default': '1'},
'MaxSize': {'type': CFNNumber,
'description': 'Maximum # of instances.',
'default': '5'},
'SshKeyName': {'type': EC2KeyPairKeyName},
'ImageName': {
'type': CFNString,
'description': 'The image name to use from the AMIMap (usually '
'found in the config file.)'},
'ELBHostName': {
'type': CFNString,
'description': 'A hostname to give to the ELB. If not given '
'no ELB will be created.',
'default': ''},
'ELBCertName': {
'type': CFNString,
'description': 'The SSL certificate name to use on the ELB.',
'default': ''},
'ELBCertType': {
'type': CFNString,
'description': 'The SSL certificate type to use on the ELB.',
'default': ''},
}
def create_conditions(self):
self.template.add_condition(
"CreateELB",
Not(Equals(Ref("ELBHostName"), "")))
self.template.add_condition(
"SetupDNS",
Not(Equals(Ref("BaseDomain"), "")))
self.template.add_condition(
"UseSSL",
Not(Equals(Ref("ELBCertName"), "")))
self.template.add_condition(
"CreateSSLELB",
And(Condition("CreateELB"), Condition("UseSSL")))
self.template.add_condition(
"SetupELBDNS",
And(Condition("CreateELB"), Condition("SetupDNS")))
self.template.add_condition(
"UseIAMCert",
Not(Equals(Ref("ELBCertType"), "acm")))
def create_security_groups(self):
t = self.template
asg_sg = CLUSTER_SG_NAME % self.name
elb_sg = ELB_SG_NAME % self.name
t.add_resource(ec2.SecurityGroup(
asg_sg,
GroupDescription=asg_sg,
VpcId=Ref("VpcId")))
# ELB Security group, if ELB is used
t.add_resource(
ec2.SecurityGroup(
elb_sg,
GroupDescription=elb_sg,
VpcId=Ref("VpcId"),
Condition="CreateELB"))
# Add SG rules here
# Allow ELB to connect to ASG on port 80
t.add_resource(ec2.SecurityGroupIngress(
"%sElbToASGPort80" % self.name,
IpProtocol="tcp", FromPort="80", ToPort="80",
SourceSecurityGroupId=Ref(elb_sg),
GroupId=Ref(asg_sg),
Condition="CreateELB"))
# Allow Internet to connect to ELB on port 80
t.add_resource(ec2.SecurityGroupIngress(
"InternetTo%sElbPort80" % self.name,
IpProtocol="tcp", FromPort="80", ToPort="80",
CidrIp="0.0.0.0/0",
GroupId=Ref(elb_sg),
Condition="CreateELB"))
t.add_resource(ec2.SecurityGroupIngress(
"InternetTo%sElbPort443" % self.name,
IpProtocol="tcp", FromPort="443", ToPort="443",
CidrIp="0.0.0.0/0",
GroupId=Ref(elb_sg),
Condition="CreateSSLELB"))
def setup_listeners(self):
no_ssl = [elb.Listener(
LoadBalancerPort=80,
Protocol='HTTP',
InstancePort=80,
InstanceProtocol='HTTP'
)]
# Choose proper certificate source
acm_cert = Join("", [
"arn:aws:acm:", Ref("AWS::Region"), ":", Ref("AWS::AccountId"),
":certificate/", Ref("ELBCertName")])
iam_cert = Join("", [
"arn:aws:iam::", Ref("AWS::AccountId"), ":server-certificate/",
Ref("ELBCertName")])
cert_id = If("UseIAMCert", iam_cert, acm_cert)
with_ssl = copy.deepcopy(no_ssl)
with_ssl.append(elb.Listener(
LoadBalancerPort=443,
InstancePort=80,
Protocol='HTTPS',
InstanceProtocol="HTTP",
SSLCertificateId=cert_id))
listeners = If("UseSSL", with_ssl, no_ssl)
return listeners
def create_load_balancer(self):
t = self.template
elb_name = ELB_NAME % self.name
elb_sg = ELB_SG_NAME % self.name
t.add_resource(elb.LoadBalancer(
elb_name,
HealthCheck=elb.HealthCheck(
Target='HTTP:80/',
HealthyThreshold=3,
UnhealthyThreshold=3,
Interval=5,
Timeout=3),
Listeners=self.setup_listeners(),
SecurityGroups=[Ref(elb_sg), ],
Subnets=Ref("PublicSubnets"),
Condition="CreateELB"))
# Setup ELB DNS
t.add_resource(
RecordSetType(
'%sDnsRecord' % elb_name,
# Appends a '.' to the end of the domain
HostedZoneName=Join("", [Ref("BaseDomain"), "."]),
Comment='Router ELB DNS',
Name=Join('.', [Ref("ELBHostName"), Ref("BaseDomain")]),
Type='CNAME',
TTL='120',
ResourceRecords=[
GetAtt(elb_name, 'DNSName')],
Condition="SetupELBDNS"))
def get_launch_configuration_parameters(self):
return {
'ImageId': FindInMap('AmiMap', Ref("AWS::Region"),
Ref('ImageName')),
'InstanceType': Ref("InstanceType"),
'KeyName': Ref("SshKeyName"),
'SecurityGroups': self.get_launch_configuration_security_groups(),
}
def get_autoscaling_group_parameters(self, launch_config_name, elb_name):
return {
'AvailabilityZones': Ref("AvailabilityZones"),
'LaunchConfigurationName': Ref(launch_config_name),
'MinSize': Ref("MinSize"),
'MaxSize': Ref("MaxSize"),
'VPCZoneIdentifier': Ref("PrivateSubnets"),
'LoadBalancerNames': If("CreateELB", [Ref(elb_name), ], []),
'Tags': [ASTag('Name', self.name, True)],
}
def get_launch_configuration_security_groups(self):
sg_name = CLUSTER_SG_NAME % self.name
return [Ref("DefaultSG"), Ref(sg_name)]
def create_autoscaling_group(self):
name = "%sASG" % self.name
launch_config = "%sLaunchConfig" % name
elb_name = ELB_NAME % self.name
t = self.template
t.add_resource(autoscaling.LaunchConfiguration(
launch_config,
**self.get_launch_configuration_parameters()
))
t.add_resource(autoscaling.AutoScalingGroup(
name,
**self.get_autoscaling_group_parameters(launch_config, elb_name)
))
def create_template(self):
self.create_conditions()
self.create_security_groups()
self.create_load_balancer()
self.create_autoscaling_group()
class FlexibleAutoScalingGroup(Blueprint):
""" A more flexible AutoscalingGroup Blueprint.
Uses TroposphereTypes to make creating AutoscalingGroups and their
associated LaunchConfiguration more flexible. This comes at the price of
doing less for you.
"""
VARIABLES = {
"LaunchConfiguration": {
"type": TroposphereType(autoscaling.LaunchConfiguration),
"description": "The LaunchConfiguration for the autoscaling "
"group.",
},
"AutoScalingGroup": {
"type": TroposphereType(autoscaling.AutoScalingGroup),
"description": "The Autoscaling definition. Do not provide a "
"LaunchConfiguration parameter, that will be "
"automatically added from the LaunchConfiguration "
"Variable.",
},
}
def create_launch_configuration(self):
t = self.template
variables = self.get_variables()
self.launch_config = t.add_resource(variables["LaunchConfiguration"])
t.add_output(
Output("LaunchConfiguration", Value=self.launch_config.Ref())
)
def add_launch_config_variable(self, asg):
if getattr(asg, "LaunchConfigurationName", False):
raise ValueError("Do not provide a LaunchConfigurationName "
"variable for the AutoScalingGroup config.")
asg.LaunchConfigurationName = self.launch_config.Ref()
return asg
def create_autoscaling_group(self):
t = self.template
variables = self.get_variables()
asg = variables["AutoScalingGroup"]
asg = self.add_launch_config_variable(asg)
t.add_resource(asg)
t.add_output(Output("AutoScalingGroup", Value=asg.Ref()))
def create_template(self):
self.create_launch_configuration()
self.create_autoscaling_group()
|
26831
|
from typing import TypedDict
from backend.common.sitevars.sitevar import Sitevar
class ContentType(TypedDict):
secret_key: str
class FlaskSecrets(Sitevar[ContentType]):
DEFAULT_SECRET_KEY: str = "thebluealliance"
@staticmethod
def key() -> str:
return "flask.secrets"
@staticmethod
def description() -> str:
return "Secret key for Flask session"
@staticmethod
def default_value() -> ContentType:
return ContentType(secret_key=FlaskSecrets.DEFAULT_SECRET_KEY)
@classmethod
def secret_key(cls) -> str:
secret_key = cls.get().get("secret_key")
return secret_key if secret_key else FlaskSecrets.DEFAULT_SECRET_KEY
|
26846
|
from stronghold.views import StrongholdPublicMixin
import django
from django.views.generic import View
from django.views.generic.base import TemplateResponseMixin
if django.VERSION[:2] < (1, 9):
from django.utils import unittest
else:
import unittest
class StrongholdMixinsTests(unittest.TestCase):
def test_public_mixin_sets_attr(self):
class TestView(StrongholdPublicMixin, View):
pass
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
def test_public_mixin_sets_attr_with_multiple_mixins(self):
class TestView(StrongholdPublicMixin, TemplateResponseMixin, View):
template_name = 'dummy.html'
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
|
26859
|
import tensorflow as tf
def cosine_similarity(x, y, eps=1e-6):
z = tf.batch_matmul(x, tf.transpose(y, perm=[0,2,1]))
z /= tf.sqrt(tf.multiply(tf.expand_dims(tf.reduce_sum(tf.multiply(x,x), 2), 2),tf.expand_dims(tf.reduce_sum(tf.multiply(y,y), 2), 1)) + eps)
return z
|
26868
|
import redis
import json
from . import config
redis_instance = None
def set_up(host, port, db):
global redis_instance
redis_instance = redis.StrictRedis(host=host, port=port, db=db)
class ModuleStorage():
def __init__(self, module_id):
self.key_prefix = "module:" + config.config.enabled_modules[module_id]["storage_prefix"]
@property
def redis(self):
return redis_instance
def prefixed_key(self, key):
return f"{self.key_prefix}:{key}"
def get(self, key):
data_json = redis_instance.get(self.prefixed_key(key))
if not data_json:
return None
data = json.loads(data_json)
return data.get("data")
def set(self, key, value):
data_json = json.dumps({"data": value})
return redis_instance.set(self.prefixed_key(key), data_json)
|
26877
|
import gzip
import pickle
import os
def analyze(data_path):
"""
Run the comparison on the given data file
:param data_path:
:return:
"""
if data_path.endswith(".gz"):
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
else:
with open(data_path, 'r') as f:
S, true_model = pickle.load(f)
print("True model:")
print(true_model)
T = float(S.shape[0])
N = S.sum(axis=0)
print("lambda0: ", true_model.bias_model.lambda0.mean())
print("Average event count: ", N.mean(), " +- ", N.std())
print("Average event count: ", (N/T).mean(), " +- ", (N/T).std())
# seed = 2650533028
K = 50
C = 5
T = 100000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
analyze(data_path)
|
26977
|
from . import Plugin
class VmstatPlugin(Plugin):
targets = [
{
'match': '^servers\.(?P<server>[^\.]+)\.vmstat\.(?P<type>.*)$',
'target_type': 'rate',
'tags': {'unit': 'Page'}
}
]
def sanitize(self, target):
target['tags']['type'] = target['tags']['type'].replace('pgpg', 'paging_')
target['tags']['type'] = target['tags']['type'].replace('pswp', 'swap_')
# vim: ts=4 et sw=4:
|
26978
|
import fileinput
from collections import Counter
BOXES = [line.strip() for line in fileinput.input()]
DOUBLES = 0
TRIPLES = 0
COMMON = None
for box_1 in BOXES:
doubles = 0
triples = 0
for char, count in Counter(box_1).items():
if count == 2:
doubles += 1
elif count == 3:
triples += 1
if doubles > 0:
DOUBLES += 1
if triples > 0:
TRIPLES += 1
for box_2 in BOXES:
if box_1 == box_2:
continue
diffs = 0
for i in range(len(box_1)):
if box_1[i] != box_2[i]:
diffs += 1
if diffs == 1:
COMMON = ''.join(a for a, b in zip(box_1, box_2) if a == b)
print "Checksum for list of box IDs:", DOUBLES * TRIPLES
print "Common letters for right IDs:", COMMON
|
26991
|
import FWCore.ParameterSet.Config as cms
simEcalDigis = cms.EDProducer("EcalSelectiveReadoutProducer",
# Label of input EB and EE digi collections
digiProducer = cms.string('simEcalUnsuppressedDigis'),
# Instance name of input EB digi collections
EBdigiCollection = cms.string(''),
# Instance name of input EB digi collections
EEdigiCollection = cms.string(''),
# Instance name of output EB SR flags collection
EBSrFlagCollection = cms.string('ebSrFlags'),
# Instance name of output EE SR flags collection
EESrFlagCollection = cms.string('eeSrFlags'),
# Instance name of output EB digis collection
EBSRPdigiCollection = cms.string('ebDigis'),
# Instance name of output EE digis collection
EESRPdigiCollection = cms.string('eeDigis'),
# Label name of input ECAL trigger primitive collection
trigPrimProducer = cms.string('simEcalTriggerPrimitiveDigis'),
# Instance name of ECAL trigger primitive collection
trigPrimCollection = cms.string(''),
# Neighbour eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaEta = cms.int32(1),
# Neighbouring eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaPhi = cms.int32(1),
# Index of time sample (staring from 1) the first DCC weights is implied
ecalDccZs1stSample = cms.int32(3),
# ADC to GeV conversion factor used in ZS filter for EB
ebDccAdcToGeV = cms.double(0.035),
# ADC to GeV conversion factor used in ZS filter for EE
eeDccAdcToGeV = cms.double(0.06),
#DCC ZS FIR weights.
#d-efault value set of DCC firmware used in CRUZET and CRAFT
dccNormalizedWeights = cms.vdouble(-1.1865, 0.0195, 0.2900, 0.3477, 0.3008,
0.2266),
# Switch to use a symetric zero suppression (cut on absolute value). For
# studies only, for time being it is not supported by the hardware.
symetricZS = cms.bool(False),
# ZS energy threshold in GeV to apply to low interest channels of barrel
srpBarrelLowInterestChannelZS = cms.double(3*.035),
# ZS energy threshold in GeV to apply to low interest channels of endcap
srpEndcapLowInterestChannelZS = cms.double(3*0.06),
# ZS energy threshold in GeV to apply to high interest channels of barrel
srpBarrelHighInterestChannelZS = cms.double(-1.e9),
# ZS energy threshold in GeV to apply to high interest channels of endcap
srpEndcapHighInterestChannelZS = cms.double(-1.e9),
#switch to run w/o trigger primitive. For debug use only
trigPrimBypass = cms.bool(False),
#for debug mode only:
trigPrimBypassLTH = cms.double(1.0),
#for debug mode only:
trigPrimBypassHTH = cms.double(1.0),
#for debug mode only
trigPrimBypassWithPeakFinder = cms.bool(True),
# Mode selection for "Trig bypass" mode
# 0: TT thresholds applied on sum of crystal Et's
# 1: TT thresholds applies on compressed Et from Trigger primitive
# @ee trigPrimByPass_ switch
trigPrimBypassMode = cms.int32(0),
#number of events whose TT and SR flags must be dumped (for debug purpose):
dumpFlags = cms.untracked.int32(0),
#logical flag to write out SrFlags
writeSrFlags = cms.untracked.bool(True),
#switch to apply selective readout decision on the digis and produce
#the "suppressed" digis
produceDigis = cms.untracked.bool(True),
#Trigger Tower Flag to use when a flag is not found from the input
#Trigger Primitive collection. Must be one of the following values:
# 0: low interest, 1: mid interest, 3: high interest
# 4: forced low interest, 5: forced mid interest, 7: forced high interest
defaultTtf_ = cms.int32(4),
# SR->action flag map
actions = cms.vint32(1, 3, 3, 3, 5, 7, 7, 7)
)
|
27051
|
from django.http.response import Http404
from django.http import HttpResponse
from blogs.helpers import unmark, clean_text
from blogs.views.blog import resolve_address
from feedgen.feed import FeedGenerator
import mistune
def feed(request):
blog = resolve_address(request)
if not blog:
raise Http404("Blog does not exist")
all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date')
fg = FeedGenerator()
fg.id(blog.useful_domain())
fg.author({'name': blog.subdomain, 'email': 'hidden'})
fg.title(blog.title)
fg.subtitle(blog.meta_description or clean_text(unmark(blog.content)[:160]) or blog.title)
fg.link(href=f"{blog.useful_domain()}/", rel='alternate')
for post in all_posts:
fe = fg.add_entry()
fe.id(f"{blog.useful_domain()}/{post.slug}/")
fe.title(post.title)
fe.author({'name': blog.subdomain, 'email': 'hidden'})
fe.link(href=f"{blog.useful_domain()}/{post.slug}/")
fe.content(clean_text(mistune.html(post.content)), type="html")
fe.published(post.published_date)
fe.updated(post.published_date)
if request.GET.get('type') == 'rss':
fg.link(href=f"{blog.useful_domain()}/feed/?type=rss", rel='self')
rssfeed = fg.rss_str(pretty=True)
return HttpResponse(rssfeed, content_type='application/rss+xml')
else:
fg.link(href=f"{blog.useful_domain()}/feed/", rel='self')
atomfeed = fg.atom_str(pretty=True)
return HttpResponse(atomfeed, content_type='application/atom+xml')
|
27088
|
import numpy as np
import random
import pandas as pd
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import select
from sqlalchemy import and_
from sqlalchemy import between
from sqlalchemy.sql import exists
from sqlalchemy import desc
from datetime import datetime, timezone, timedelta
from damadicsDBMapping import *
from sequenced_data_handler import SequenceDataHandler
# IP Address: 172.16.17.32
# User: dbAdmin
# Password: <PASSWORD>
# Database: damadics
class ValveDataHandler(SequenceDataHandler):
'''
TODO: column information here
'''
#Method definition
def __init__(self, start_time, end_time, selected_features, sequence_length = 1, sequence_stride = 1, data_scaler = None):
#Public properties
self._start_time = start_time
self._end_time = end_time
self._selected_features = selected_features
self._rectify_labels = False
self._data_scaler = data_scaler
# Database connection
# self._db_connection = mysql.connector.connect(user = 'root', password = '<PASSWORD>#', database = 'damadics')
self._load_from_db = True
self._column_names = {0: 'timestamp', 1: 'externalControllerOutput', 2: 'undisturbedMediumFlow', 3: 'pressureValveInlet', 4:'pressureValveOutlet',
5: 'mediumTemperature', 6: 'rodDisplacement', 7: 'disturbedMediumFlow', 8: 'selectedFault', 9: 'faultType', 10: 'faultIntensity'}
# Entire Dataset
self._df = None
self._X = None
self._y = None
# Splitting. This is what is used to train
self._df_train = None
self._df_test = None
#create one time session
self._sqlsession = None
print("init")
#super init
super().__init__(sequence_length, sequence_stride, len(selected_features), data_scaler)
def connect_to_db(self,username,pasw,host,dbname):
# self.username = username
# self.pasw = pasw
# self.host = host
self.dbname = dbname
databaseString = "mysql+mysqldb://"+username+":"+pasw+"@"+host+"/"+dbname
self._sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
self._sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print("e:", e)
print("Error in connection to the database")
def extract_data_from_db(self):
startTime = datetime.now()
self._df = self._sqlsession.query(ValveReading).filter(ValveReading.timestamp.between (self._start_time,self._end_time) )
self._df = pd.read_sql(self._df.statement, self._df.session.bind)
#dataPoints = self._sqlsession.query(exists().where(ValveReading.timestamp == '2018-07-27 15:56:22')).scalar()
#dataPoints = self._sqlsession.query(ValveReading).order_by(ValveReading.timestamp)
# TODO: need to check whether dataPoints is of type DataFrame. Needs to be in type DataFrame
# TODO: check whether column names are extracted out
# All the data with selected features is saved in this variable
# TODO: check if self._selected_features is an array of indexes or strings
# self._df = df.iloc[:, self._selected_features].values
# Assumption that the output is only one column and is located at the last column out of all the selected features
# Below if self._selected_features is an array of indexes
column_names = ['externalControllerOutput', 'pressureValveInlet',
'pressureValveOutlet', 'mediumTemperature','rodDisplacement', 'disturbedMediumFlow', 'selectedFault']
self._X = self._df.loc[:, column_names[:-1]].values
self._y = self._df.loc[:, column_names[len(column_names) - 1]].values
# Below if self._selected_features is an array of strings
# inputs = df.loc[:, column_names[:-1]].values
# outputs = df.loc[:, column_names[len(column_names) - 1]].values
# for data in self._df:
# print(self._df)
print("Extracting data from database runtime:", datetime.now() - startTime)
def one_hot_encode(self, num_readings):
startTime = datetime.now()
fault_column = list()
one_hot_matrix = np.zeros((num_readings, 20))
fault_column = self._y
for i in range(num_readings):
one_hot_matrix[i, int(fault_column[i] - 1)] = 1
print("One-hot-encoding:", datetime.now() - startTime)
return one_hot_matrix
# Private
def find_samples(self, data_samples):
'''
Assumptions made when using this functions
1.) The value always starts of as NOT BROKEN. First faultType value is 20.
2.) Function is used to entire dataset and not in chunks
'''
# TODO: handle cases when the first readings start of as a broken value
# TODO: ask David if he wants a minimum amount of samples in the dataset
startTime = datetime.now()
small_list, big_list = list(), list()
normal_status = 20.0
isBroken = False
counter = 0
for i in range(len(self._y)):
# If True, then the current status of the valve is that it is broken
if (isBroken):
# The valve has been fixed and is back to its normal status
if (self._y[i] == normal_status):
isBroken = False
counter += 1
# Save everything from the small_list into the big_list
small_list = np.vstack(small_list)
big_list.append(small_list)
small_list = list()
small_list.append(data_samples[i, :])
# The current status of the valve is that it is not broken
else:
if (self._y[i] != normal_status):
isBroken = True
# small_list = np.append(data_samples[i, :], small_list)
small_list.append(data_samples[i, :])
print("Splitting into samples:",datetime.now() - startTime)
print("counter:", counter)
return big_list, counter
#
#
#
#
#
#
#
# # Private
# def find_samples(self, data_samples):
#
# '''
# Assumptions made when using this function
# 1.) The valve always starts of as NOT BROKEN. First faultType value is 20.
# 2.) Function is used to entire dataset and not in chunks
# '''
#
# # TODO: handle cases when the first readings starts of as a broken valve
# # TODO: ask David if he wants a minimum amount of samples in the dataset
#
# small_list, big_list = list(), list()``
# normal_status = 20.0
# isBroken = False
# # Counter for the number of samples there are in the dataset
# counter = 0
#
# for i in range(len(self._y)):
# # If True, then the current status of the valve is that it is broken
# if (isBroken):
# # The valve has been fixed and is back to its normal status
# if (self._y[i] == normal_status):
# isBroken = False
# counter += 1
# # Save everything from the small_list into the big_list
# small_list = np.vstack(small_list)
# big_list.append(small_list)
# # Clear the small_list (reinitialize)
# small_list = list()
# small_list.append(data_samples[i, :])
# # The current status of the valve is that it is not broken
# else:
# # Broken valve discovered
# if (self._y[i] != normal_status):
# isBroken = True
# small_list.append(data_samples[i, :])
#
# # SPECIAL CASE: the simulation does not end with a fixed valve. Therefore we shall whatever is inside the small_list and say that it is an entire sample
# if (self._y[i] != 20):
# counter += 1
# small_list = np.vstack(small_list)
# big_list.append(small_list)
#
# return big_list, counter
# Public
def load_data(self, verbose = 0, cross_validation_ratio = 0, test_ratio = 0, unroll = True):
"""Load the data using the specified parameters"""
'''
TODO: extracting data from MySQL database using SQLALCHEMY
Functions called here: generate_df_with_rul(self, df), generate_train_arrays(self, cross_validation_ratio = 0), generate_test_arrays(self),
create_sequenced_train_data(self), create_sequenced_test_data(self)
X: df[timestamp, ..., selectedFault]
y: df['faultType']
'''
# dataPoints = self._sqlsession.query(ValveReading)
if verbose == 1:
print("Loading data for dataset {} with window_size of {}, stride of {}. Cros-Validation ratio {}".format(self._dataset_number,
self._sequence_length, self._sequence_stride, cross_validation_ratio))
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio + test_ratio > 1:
print("Sum of cross validation and test ratios is greater than 1. Need to pick smaller ratios.")
return
if self._load_from_db == True:
print("Loading data from database")
# These variables are where the entire data is saved at
self.extract_data_from_db()
# One hot encoding
output_one_hot_matrix = self.one_hot_encode(self._df.shape[0])
# Finds samples within the inputs
self._X, num_samples = self.find_samples(self._X)
self._y, _ = self.find_samples(output_one_hot_matrix)
# self._df_train = self.load_db_into_df(self._file_train_data)
# self._df_test = self.load_db_into_df(self._file_test_data)
# self._df_train, num_units, trimmed_rul_train = self.generate_df_with_rul(self._df_train)
else:
print("Loading data from memory")
#Reset arrays
"""
self._X_train_list = list()
self._X_crossVal_list = list()
self._X_test_list = list()
self._y_train_list = list()
self._y_crossVal_list = list()
self._y_test_list = list()
"""
# Split up the data into its different samples
#Modify properties in the parent class, and let the parent class finish the data processing
self.train_cv_test_split(cross_validation_ratio, test_ratio, num_samples)
self.print_sequence_shapes()
# Unroll = True for ANN
# Unroll = False for RNN
self.generate_train_data(unroll)
self.generate_crossValidation_data(unroll)
self.generate_test_data(unroll)
#
self._load_from_db = False # As long as the dataframe doesnt change, there is no need to reload from file
# Private
def train_cv_test_split(self, cross_validation_ratio, test_ratio, num_samples):
''' From the dataframes generate the feature arrays and their labels'''
print("split_samples num_samples:", num_samples)
print("cross_validation_ratio:", cross_validation_ratio)
print("test_ratio:", test_ratio)
startTime = datetime.now()
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio != 0 or test_ratio != 0:
self._X_train_list, self._y_train_list, self._X_crossVal_list, self._y_crossVal_list, self._X_test_list, self._y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
print("Train, cv, and test splitting:",datetime.now() - startTime)
print()
# Private
def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
'''Split the samples according to their respective ratios'''
shuffled_samples = list(range(0, num_samples))
random.shuffle(shuffled_samples)
num_crossVal = int(cross_validation_ratio * num_samples)
#print("num_crossVal:", num_crossVal)
num_test = int(test_ratio * num_samples)
#print("num_test:", num_test)
num_train = num_samples - num_crossVal - num_test
#print("num_train:", num_train)
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
print(self._y[0])
for i in range(num_train):
#print("i:", i)
X_train_list.append(self._X[shuffled_samples[i]])
y_train_list.append(self._y[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]][-1].reshape(1, 20))
# x = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_train_list.append(self._y[shuffled_samples[i]])
# x += 1
# for x in range(self._y[shuffled_samples[i]].shape[0]):
# if (self._y[shuffled_samples[i]][x][19] != 1 and len(y_train_list) == 0):
# y_train_list.append(self._y[shuffled_samples[i]])
# print(len(y_train_list))
for j in range(num_train, num_train + num_crossVal):
#print("j:", j)
X_crossVal_list.append(self._X[shuffled_samples[j]])
y_crossVal_list.append(self._y[shuffled_samples[j]][-1].reshape(1, 20))
# y = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][y][19] != 1):
# y_crossVal_list.append(self._y[shuffled_samples[i]])
# y += 1
# for y in range(self._y[shuffled_samples[j]].shape[0]):
# if (self._y[shuffled_samples[j]][y][19] != 1 and len(y_crossVal_list) == 0):
# y_crossVal_list.append(self._y[shuffled_samples[j]])
for k in range(num_train + num_crossVal, num_samples):
#print("k:", k)
X_test_list.append(self._X[shuffled_samples[k]])
y_test_list.append(self._y[shuffled_samples[k]][-1].reshape(1, 20))
# z = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_test_list.append(self._y[shuffled_samples[i]])
# z += 1
# for z in range(self._y[shuffled_samples[k]].shape[0]):
# if (self._y[shuffled_samples[k]][z][19] != 1 and len(y_test_list) == 0):
# y_test_list.append(self._y[shuffled_samples[k]])
#print("X_test_list shape:", len(X_test_list[0].shape))
return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# def train_cv_test_split(self, cross_validation_ratio = 0, test_ratio = 0, num_samples):
# """From the dataframes generate the feature arrays and their labels"""
#
# '''
# Functions called here: split_samples(self, df, splitting_ratio), generate_cross_validation_from_df(self, df, sequence_length)
# '''
#
# X_train_list, y_train_list = list(), list()
# X_crossVal_list, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list()
#
# if cross_validation_ratio < 0 or cross_validation_ratio > 1 :
# print("Error, cross validation must be between 0 and 1")
# return
#
# if test_ratio < 0 or test_ratio > 1 :
# print("Error, test ratio must be between 0 and 1")
# return
#
# if cross_validation_ratio != 0 or test_ratio != 0:
# X_train_list, X_test_list, X_crossVal_list, y_crossVal_list, y_train_list, y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
#
# return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# Private
# def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
# """Split the samples according to their respective ratios"""
#
# shuffled_samples = list(range(0, num_samples))
# random.shuffle(shuffled_samples)
#
# num_crossVal = int(cross_validation_ratio * num_samples)
# num_test = int(test_ratio * num_samples)
# num_train = num_samples - num_crossVal - num_test
#
# X_train_list, y_train_list = list(), list()
# X_crossVal, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list(), list()
#
# for i in range(num_train):
# X_train_list.append(self._X[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]])
#
# for j in range(num_train, num_train + num_crossVal):
# X_crossVal.append(self._X[shuffled_samples[j]])
# y_crossVal_list.append(self._y[shuffled_samples[j]])
#
# for k in range(num_train + num_crossVal, num_samples):
# X_test.append(self._X[shuffled_samples[k]])
# y_test_list.append(self._y[shuffled_samples[k]])
#
# return X_train_list, X_test, X_crossVal, y_crossVal_list, y_train_list, y_test
#Property definition
@property
def df(self):
return self._df
@df.setter
def df(self, df):
self._df = df
@property
def X(self):
return self.X
@X.setter
def X(self, X):
self.X = X
@property
def y(self):
return self._y
@y.setter
def df(self, y):
self._y = y
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self,start_time):
self._start_time = start_time
@property
def sqlsession(self):
return self._sqlsession
@sqlsession.setter
def sqlsession(self,sqlsession):
self._sqlsession = sqlsession
def __str__(self):
return "<ValveReading(timestamp='%s',externalControllerOutput='%s',undisturbedMediumFlow='%s',pressureValveInlet='%s',pressureValveOutlet='%s',mediumTemperature='%s',\
rodDisplacement='%s',disturbedMediumFlow='%s',selectedFault='%s',faultType='%s',faultIntensity='%s')>"\
%(str(self._timestamp),self._externalControllerOutput,self._undisturbedMediumFlow,self.pressureValveInlet,\
self.pressureValveOutlet,self.mediumTemperature,self.rodDisplacement,self.disturbedMediumFlow,self.selectedFault,\
self.faultType,self.faultIntensity)
# def selectedFeatures(self):
# return self._selectedFeatures
#
# @selectedFeatures.setter
# def selectedFeatures(self, selectedFeatures):
# self._selectedFeatures = selectedFeatures
#
# @property
# def max_rul(self):
# return self._max_rul
#
# @max_rul.setter
# def max_rul(self, max_rul):
# self._max_rul = max_rul
#
# @property
# def rectify_labels(self):
# return self._rectify_labels
#
# @rectify_labels.setter
# def rectify_labels(self, rectify_labels):
# self._rectify_labels = rectify_labels
#
# #ReadOnly Properties
#
# @property
# def dataset_number(self):
# return self._dataset_number
#
# @property
# def data_folder(self):
# return self._data_folder
#
# @property
# def file_train_data(self):
# return self._file_train_data
#
# @property
# def file_test_data(self):
# return self._file_test_data
#
# @property
# def file_rul(self):
# return self._file_rul
#
# @property
# def load_from_file(self):
# return self._load_from_db
#
# @property
# def column_names(self):
# return self._column_names
#
# @property
# def df_train(self):
# return self._df_train
#
# @property
# def df_test(self):
# return self._df_test
#
#
#
# #Auxiliary functions
#
# def compute_training_rul(df_row, *args):
# """Compute the RUL at each entry of the DF"""
#
# max_rul = args[1]
# rul_vector = args[0]
# rul_vector_index = int(df_row['Unit Number']) - 1
#
#
# if max_rul > 0 and rul_vector[rul_vector_index] - df_row['Cycle'] > max_rul:
# return max_rul
# else:
# return rul_vector[rul_vector_index] - df_row['Cycle']
|
27094
|
import json
import pygments.formatters
import pygments.lexers
def pretty_view(mapping: dict, /) -> str:
"""
Args:
mapping:
Returns:
"""
dumped_mapping = json.dumps(mapping, ensure_ascii=False, indent=4)
pretty_mapping = pygments.highlight(
dumped_mapping,
pygments.lexers.JsonLexer(), # noqa
pygments.formatters.TerminalFormatter(bg="light"), # noqa
)
return pretty_mapping
|
27138
|
import threading
import time
from dredis.db import NUMBER_OF_REDIS_DATABASES, DB_MANAGER, KEY_CODEC
DEFAULT_GC_INTERVAL = 500 # milliseconds
DEFAULT_GC_BATCH_SIZE = 10000 # number of storage keys to delete in a batch
class KeyGarbageCollector(threading.Thread):
def __init__(self, gc_interval=DEFAULT_GC_INTERVAL, batch_size=DEFAULT_GC_BATCH_SIZE):
threading.Thread.__init__(self, name="Key Garbage Collector")
self._gc_interval_in_secs = gc_interval / 1000.0 # convert to seconds
self._batch_size = batch_size
def run(self):
while True:
self.collect()
time.sleep(self._gc_interval_in_secs)
def collect(self):
for db_id in range(NUMBER_OF_REDIS_DATABASES):
with DB_MANAGER.thread_lock:
self._collect(DB_MANAGER.get_db(db_id))
def _collect(self, db):
deleted = 0
with db.write_batch() as batch:
for deleted_db_key, _ in db.iterator(prefix=KEY_CODEC.MIN_DELETED_VALUE):
_, _, deleted_key_value = KEY_CODEC.decode_key(deleted_db_key)
for db_key, _ in db.iterator(prefix=deleted_key_value):
deleted += 1
batch.delete(db_key)
if deleted == self._batch_size:
return
batch.delete(deleted_db_key)
|
27172
|
from sysu_dataset import SYSU
import numpy as np
import scipy
import itertools
import cv2
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from config import *
vox_size=54
all_tups = np.array(list(itertools.product(range(vox_size), repeat=2)))
rot_array = np.arange(vox_size*vox_size).reshape([vox_size,vox_size])
K = 5
T = 10
class SYSUdataset(Dataset):
def __init__(self, test=False, full_train=False):
# Underlying dataset and features
self.dataset = SYSU()
# What to return
self.images = DATA_IMAGES
self.images_3D = DATA_IMAGES_3D
self.op_flow = DATA_OP_FLOW
self.op_flow_2D = DATA_OP_FLOW_2D
self.single_feature = DATA_SINGLE_FEAT
self.augmentation = DATA_AUGMENTATION
# Train, validation, test split
self.train = full_train
if test:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[1]
else:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[0]
def __len__(self):
return len(self.vid_ids)
def image_transforms(self, numpy_imgs):
''' Transformations on a list of images
Returns
-------
images : Torch Tensor
Stacked tensor of all images with the transformations applied
'''
# Get random parameters to apply same transformation to all images in list
color_jitter = transforms.ColorJitter.get_params(.25,.25,.25,.25)
rotation_param = transforms.RandomRotation.get_params((-15,15))
crop_params = None
# Apply transformations
images = []
for numpy_img in numpy_imgs:
i = transforms.functional.to_pil_image(numpy_img)
i = transforms.functional.resize(i, (224,224))
if self.train:
i = color_jitter(i)
i = transforms.functional.rotate(i, rotation_param)
i = transforms.functional.to_tensor(i)
i = transforms.functional.normalize(i, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
images.append(i)
return torch.stack(images)
def op_flow_transforms(self, op_flow):
''' Transformations on a tensor of optical flow voxel grids
Parameters
----------
op_flow : ndarray
Returns
-------
op_flow : Torch Tensor
A torch tensor of an optical flow voxel grid with the
transformations (rotation, scale, translation) applied to it
'''
def translate(op_flow):
# op_flow[:,0::3,:,:,:] ---> x axis vectors
# op_flow = scipy.ndimage.interpolation.shift(op_flow, [0,0,x_move,y_move,z_move], cval=0, order=0) # Slower alternative
# Get amount to shift
max_shift = int(op_flow.shape[2] * 0.10)
x_move, y_move, z_move = np.random.randint(-max_shift, max_shift, 3)
# Translate values
if x_move > 0:
op_flow[:,:,x_move:,:,:] = op_flow[:,:,:-x_move,:,:]
op_flow[:,:,:x_move,:,:] = 0
elif x_move < 0:
op_flow[:,:,:x_move,:,:] = op_flow[:,:,-x_move:,:,:]
op_flow[:,:,x_move:,:,:] = 0
if y_move > 0:
op_flow[:,:,:,y_move:,:] = op_flow[:,:,:,:-y_move,:]
op_flow[:,:,:,:y_move,:] = 0
elif y_move < 0:
op_flow[:,:,:,:y_move,:] = op_flow[:,:,:,-y_move:,:]
op_flow[:,:,:,y_move:,:] = 0
if z_move > 0:
op_flow[:,:,:,:,z_move:] = op_flow[:,:,:,:,:-z_move]
op_flow[:,:,:,:,:z_move] = 0
elif z_move < 0:
op_flow[:,:,:,:,:z_move] = op_flow[:,:,:,:,-z_move:]
op_flow[:,:,:,:,z_move:] = 0
return op_flow
def rotate(op_flow):
''' Rotate an optical flow tensor a random amount about the y axis '''
# Get angle
angle = np.random.randint(-45, 45)
# Rotate positions
rot_mat = scipy.ndimage.interpolation.rotate(rot_array, angle, (0,1), reshape=False, order=0)
op_flow_new = np.zeros(op_flow.shape, dtype=np.float32)
tup = all_tups[rot_mat]
op_flow_new = op_flow[:,:,tup[:, :, 0],:,tup[:, :, 1]].transpose(2,3,0,4,1)
# Rotate flow vectors
cos = np.cos(np.radians(-angle))
sin = np.sin(np.radians(-angle))
x_copy = op_flow_new[:,0].copy()
z_copy = op_flow_new[:,2].copy()
op_flow_new[:,0] = x_copy * cos + z_copy * sin
op_flow_new[:,2] = x_copy * -sin + z_copy * cos
return op_flow_new
def scale(op_flow):
return op_flow
# import datetime as dt
if self.train:
op_flow = translate(op_flow)
op_flow = rotate(op_flow)
return torch.from_numpy(op_flow)
def get_3D_op_flow(self, vid_id):
# Load the data
feat_values = np.load("{}/{:05}.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_nonzero = np.load("{}/{:05}.nonzeros.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_shape = np.load("{}/{:05}.shape.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
# Rebuild the feature from the saved data
feature = np.zeros(feat_shape, np.float32)
feature[tuple(feat_nonzero)] = feat_values
return feature
def __getitem__(self, idx):
vid_id = self.vid_ids[idx]
to_return = []
# Images
if self.images:
images = np.load('{}/{:05}.npy'.format(CACHE_2D_IMAGES_SYSU, vid_id))
images = self.image_transforms(images)
to_return.append(images)
# Optical flow 3D
if self.op_flow:
op_flow = self.get_3D_op_flow(vid_id)
op_flow = self.op_flow_transforms(op_flow)
to_return.append(op_flow)
# Labels
to_return.append(self.dataset.get_label(vid_id))
return to_return
def get_train_loader():
dataset = SYSUdataset(full_train=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=True, num_workers=NUM_WORKERS,
pin_memory=True)
def get_test_loader():
dataset = SYSUdataset(test=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,
pin_memory=True)
|
27207
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = atm.homedir() + 'datastore/merra/daily/'
year = 2014
subset = '_40E-120E_90S-90N'
def get_var(datadir, varnm, subset, year):
filenm = '%smerra_%s%s_%d.nc' % (datadir, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
uq_int = get_var(datadir, 'UFLXQV', subset, year)
vq_int = get_var(datadir, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfcbar = mfc.mean(dim='YDim').mean(dim='XDim')
# Test atm.gradient
a = atm.constants.radius_earth.values
latdim, londim = 1, 2
lat = atm.get_coord(uq_int, 'lat')
latrad = np.radians(lat)
latrad[abs(lat) > 89] = np.nan
coslat = xray.DataArray(np.cos(latrad), coords={'YDim' : lat})
lon = atm.get_coord(uq_int, 'lon')
lonrad = np.radians(lon)
mfc_x = atm.gradient(uq_int, lonrad, londim) / (a*coslat)
mfc_y = atm.gradient(vq_int * coslat, latrad, latdim) / (a*coslat)
mfc_test = mfc_x + mfc_y
mfc_test = - atm.precip_convert(mfc_test, 'kg/m2/s', 'mm/day')
mfc_test_bar = mfc_test.mean(dim='YDim').mean(dim='XDim')
diff = mfc_test - mfc
print(diff.max())
print(diff.min())
plt.plot(mfcbar)
plt.plot(mfc_test_bar)
print(mfc_test_bar - mfcbar)
# ----------------------------------------------------------------------
# Vertical gradient du/dp
lon1, lon2 = 40, 120
pmin, pmax = 100, 300
subset_dict = {'XDim' : (lon1, lon2), 'Height' : (pmin, pmax)}
urls = merra.merra_urls([year])
month, day = 7, 15
url = urls['%d%02d%02d' % (year, month, day)]
with xray.open_dataset(url) as ds:
u = atm.subset(ds['U'], subset_dict, copy=False)
u = u.mean(dim='TIME')
pres = u['Height']
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dp = np.gradient(pres)
# Calc 1
dims = u.shape
dudp = np.nan * u
for i in range(dims[1]):
for j in range(dims[2]):
dudp.values[:, i, j] = np.gradient(u[:, i, j], dp)
# Test atm.gradient
dudp_test = atm.gradient(u, pres, axis=0)
diff = dudp_test - dudp
print(diff.max())
print(diff.min())
|
27229
|
import sys
from csvcols import get_column
categories = get_column(sys.argv[1], col=1)
descriptions = get_column(sys.argv[1], col=2)
for c, n in categories.most_common(len(categories)):
print("%6d %s" % (n, c))
for d, n in descriptions.most_common(len(descriptions)):
print("%6d %s" % (n, d))
|
27251
|
from __future__ import annotations
from .configs import *
from . import shared as td
import hashlib
# if TYPE_CHECKING:
# from ..opentele import *
class AuthKeyType(IntEnum):
"""
Type of `AuthKey`
### Attributes:
Generated (`IntEnum`):
Generated key
Temporary (`IntEnum`):
Temporary key
ReadFromFile (`IntEnum`):
Key red from file
Local (`IntEnum`):
Local key
"""
Generated = 0
Temporary = 1
ReadFromFile = 2
Local = 3
class AuthKey(BaseObject):
"""
Authorization key used for [MTProto](https://core.telegram.org/mtproto)
It's also used to encrypt and decrypt local tdata
### Attributes:
DcId (DcId):
Data Center ID (from 1 to 5).
type (AuthKeyType):
Type of the key.
key (bytes):
The actual key, 256 `bytes` in length.
"""
kSize = 256
def __init__(self, key: bytes = bytes(), type: AuthKeyType = AuthKeyType.Generated, dcId: DcId = DcId.Invalid) -> None: # type: ignore
self.__type = type
self.__dcId = dcId
self.__key = key
# if (type == self.Type.Generated) or (type == self.Type.Temporary):
# self.__creationtime = ...
self.__countKeyId()
@property
def dcId(self) -> DcId:
return self.__dcId
@property
def type(self) -> AuthKeyType:
return self.__type
@property
def key(self) -> bytes:
return self.__key
def write(self, to: QDataStream) -> None:
to.writeRawData(self.key)
def __countKeyId(self) -> None:
hash = hashlib.sha1(self.__key).digest()
self.__keyId = int.from_bytes(hash[12 : 12 + 8], "little")
def prepareAES_oldmtp(
self, msgKey: bytes, send: bool
) -> typing.Tuple[bytes, bytes]:
x = 0 if send else 8
sha1_a = hashlib.sha1(msgKey[:16] + self.__key[x : x + 32]).digest()
sha1_b = hashlib.sha1(
self.__key[x + 32 : x + 32 + 16]
+ msgKey[:16]
+ self.__key[x + 48 : x + 48 + 16]
).digest()
sha1_c = hashlib.sha1(self.__key[x + 64 : x + 64 + 32] + msgKey[:16]).digest()
sha1_d = hashlib.sha1(msgKey[:16] + self.__key[x + 96 : x + 96 + 32]).digest()
aesKey = sha1_a[:8] + sha1_b[8 : 8 + 12] + sha1_c[4 : 4 + 12]
aesIv = sha1_a[8 : 8 + 12] + sha1_b[:8] + sha1_c[16 : 16 + 4] + sha1_d[:8]
return aesKey, aesIv
@staticmethod
def FromStream(
stream: QDataStream,
type: AuthKeyType = AuthKeyType.ReadFromFile,
dcId: DcId = DcId(0),
) -> AuthKey:
keyData = stream.readRawData(AuthKey.kSize)
return AuthKey(keyData, type, dcId)
|
27334
|
import sys
map_file = sys.argv[1]
raw_test_file = sys.argv[2]
output_file = sys.argv[3]
date_set = ('year_0_number', 'year_1_number', 'year_2_number', 'year_3_number', 'month_0_number', 'month_0_name', 'month_1_name', 'day_0_number', 'day_1_number')
def replace_date(tok):
if tok == 'year_0_number':
tok = 'year_0'
elif tok == 'year_1_number':
tok = 'year_1'
elif tok == 'year_2_number':
tok = 'year_2'
elif tok == 'year_3_number':
tok = 'year_3'
elif tok == 'month_0_number':
tok = 'month_0'
elif tok == 'month_0_name':
tok = 'month_0'
elif tok == 'month_1_name':
tok = 'month_1'
elif tok == 'day_0_number':
tok = 'day_0'
elif tok == 'day_1_number':
tok = 'day_1'
return tok
mapping_list = list()
with open(map_file) as f:
map_list = f.readlines()
print(len(map_list))
for line in map_list:
line = line.strip()
if line != '{}':
line = line[1:-1]
entity_dict = dict()
if ',' in line:
entity_list = line.split('",')
for entity in entity_list:
entity = entity.split(':')
anon = entity[0].strip()[1:-1]
if entity[1].strip()[-1] == '"':
deanon = entity[1].strip()[1:-1].lower()
else:
deanon = entity[1].strip()[1:].lower()
entity_dict[anon] = deanon
else:
entity = line.split(':')
anon = entity[0].strip()[1:-1]
deanon = entity[1].strip()[1:-1].lower()
entity_dict[anon] = deanon
# print(entity_dict)
mapping_list.append(entity_dict)
else:
mapping_list.append([])
print(len(mapping_list))
with open(raw_test_file) as f:
output_list = f.readlines()
all_sent_list = list()
for index, line in enumerate(output_list):
entities = mapping_list[index]
if not len(entities):
all_sent_list.append(line)
continue
sent_list = line.strip().split(' ')
# print(entities)
new_sent = ''
for tok in sent_list:
if tok in date_set:
tok = replace_date(tok)
print(tok)
if tok in entities.keys():
deanon = entities[tok]
new_sent += deanon + ' '
else:
new_sent += tok + ' '
new_sent += '\n'
# print(new_sent)
all_sent_list.append(new_sent)
with open(output_file, 'w') as out:
for sent in all_sent_list:
out.write(sent)
# print(all_sent_list)
|
27361
|
import argparse
import os, socket
from datetime import datetime
import shutil
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from model import UNet
from warp import WarpingLayerBWFlow
from torch.utils.tensorboard import SummaryWriter
from dataloader import llenDataset
from torch.utils.data import DataLoader
import cv2
import kornia
import random
def save_checkpoint(state, epoch, output_directory):
checkpoint_filename = os.path.join(output_directory, 'checkpoint-' + str(epoch) + '.pth')
torch.save(state, checkpoint_filename)
# Parse arguments
parser = argparse.ArgumentParser(description='Low light enhancement')
parser.add_argument('--data-path', default='./data', type=str, help='path to the dataset')
parser.add_argument('--epochs', default=50, type=int, help='n of epochs (default: 50)')
parser.add_argument('--bs', default=1, type=int, help='[train] batch size(default: 1)')
parser.add_argument('--bs-test', default=1, type=int, help='[test] batch size (default: 1)')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate (default: 1e-4)')
parser.add_argument('--gpu', default='0', type=str, help='GPU id to use (default: 0)')
parser.add_argument('--checkpoint', default=None, type=str, help='path to checkpoint')
parser.add_argument('--log', default=None, type=str, help='folder to log')
parser.add_argument('--weight', default=20, type=float, help='weight of consistency loss')
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
train_set = llenDataset(args.data_path, type='train')
train_loader = DataLoader(train_set, batch_size=args.bs, shuffle=True, num_workers=8, pin_memory=True)
torch.manual_seed(ord('c')+137)
random.seed(ord('c')+137)
np.random.seed(ord('c')+137)
start_epoch = 0
model = UNet(n_channels=3, bilinear=True).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
criterion = nn.L1Loss()
warp = WarpingLayerBWFlow().cuda()
# Create logger
if args.log==None:
log_dir = os.path.join(os.path.abspath(os.getcwd()), 'logs', datetime.now().strftime('%b%d_%H-%M-%S_') + socket.gethostname())
else:
log_dir = os.path.join(os.path.abspath(os.getcwd()), 'logs', args.log)
os.makedirs(log_dir)
logger = SummaryWriter(log_dir)
# Log arguments
with open(os.path.join(log_dir, "config.txt"), "a") as f:
print(args, file=f)
iters = 0
for epoch in range(start_epoch, args.epochs):
# log learning rate
for i, param_group in enumerate(optimizer.param_groups):
logger.add_scalar('Lr/lr_' + str(i), float(param_group['lr']), epoch)
# Training stage
print('Epoch', epoch, 'train in progress...')
model.train()
for i, (input, target, flow) in enumerate(train_loader):
input, target, flow= input.cuda(), target.cuda(), flow.cuda()
# the 1st pass
pred = model(input)
loss = criterion(pred, target)
# the 2nd pass
input_t = warp(input, flow)
input_t_pred = model(input_t)
pred_t = warp(pred, flow)
loss_t = criterion(input_t_pred, pred_t)
total_loss = loss + loss_t * args.weight
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
logger.add_scalar('Train/Loss', loss.item(), iters)
logger.add_scalar('Train/Loss_t', loss_t.item(), iters)
iters += 1
if (i + 1) % 10 == 0:
print('Train Epoch: {0} [{1}/{2}]\t'
'l1Loss={Loss1:.8f} '
'conLoss={Loss2:.8f} '.format(
epoch, i + 1, len(train_loader), Loss1=loss.item(), Loss2=loss_t.item()))
save_checkpoint(model.state_dict(), epoch, log_dir)
print()
logger.close()
|
27370
|
import os
import json
import shutil
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from typing import NamedTuple
from tqdm.autonotebook import tqdm
from cfdata.tabular import TabularData
from cftool.ml import ModelPattern
from cftool.ml import EnsemblePattern
from cftool.dist import Parallel
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from cftool.ml.utils import patterns_type
from cftool.ml.utils import Comparer
from cftool.ml.utils import Estimator
from .pipeline import SimplePipeline
from .pipeline import CarefreePipeline
from ...data import MLData
from ...data import MLInferenceData
from ...trainer import get_sorted_checkpoints
from ...constants import SCORES_FILE
from ...constants import WARNING_PREFIX
from ...constants import CHECKPOINTS_FOLDER
from ...constants import ML_PIPELINE_SAVE_NAME
from ...dist.ml import Experiment
from ...dist.ml import ExperimentResults
from ...misc.toolkit import to_2d
from ...misc.toolkit import get_latest_workplace
from ...models.ml.protocol import MLCoreProtocol
def register_core(name: str) -> Callable[[Type], Type]:
return MLCoreProtocol.register(name)
pipelines_type = Dict[str, List[SimplePipeline]]
various_pipelines_type = Union[
SimplePipeline,
List[SimplePipeline],
Dict[str, SimplePipeline],
pipelines_type,
]
def _to_pipelines(pipelines: various_pipelines_type) -> pipelines_type:
if isinstance(pipelines, dict):
pipeline_dict = {}
for key, value in pipelines.items():
if isinstance(value, list):
pipeline_dict[key] = value
else:
pipeline_dict[key] = [value]
else:
if not isinstance(pipelines, list):
pipelines = [pipelines]
pipeline_dict = {}
for pipeline in pipelines:
assert pipeline.model is not None
key = pipeline.model.__identifier__
pipeline_dict.setdefault(key, []).append(pipeline)
return pipeline_dict
def evaluate(
data: Union[MLData, MLInferenceData],
*,
metrics: Union[str, List[str]],
metric_configs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
contains_labels: bool = True,
pipelines: Optional[various_pipelines_type] = None,
predict_config: Optional[Dict[str, Any]] = None,
other_patterns: Optional[Dict[str, patterns_type]] = None,
comparer_verbose_level: Optional[int] = 1,
) -> Comparer:
if not contains_labels:
err_msg = "`cflearn.evaluate` must be called with `contains_labels = True`"
raise ValueError(err_msg)
if metric_configs is None:
metric_configs = [{} for _ in range(len(metrics))]
patterns = {}
x, y = data.x_train, data.y_train
if pipelines is None:
msg = None
if y is None:
msg = "either `pipelines` or `y` should be provided"
if other_patterns is None:
msg = "either `pipelines` or `other_patterns` should be provided"
if msg is not None:
raise ValueError(msg)
else:
pipelines = _to_pipelines(pipelines)
# get data
# TODO : different pipelines may have different labels
if y is not None:
y = to_2d(y)
else:
if not isinstance(x, str):
raise ValueError("`x` should be str when `y` is not provided")
data_pipeline = list(pipelines.values())[0][0]
if not isinstance(data_pipeline, CarefreePipeline):
raise ValueError("only `CarefreePipeline` can handle file inputs")
cf_data = data_pipeline.cf_data
assert cf_data is not None
x, y = cf_data.read_file(x, contains_labels=contains_labels)
y = cf_data.transform(x, y).y
# get metrics
if predict_config is None:
predict_config = {}
predict_config.setdefault("contains_labels", contains_labels)
for name, pipeline_list in pipelines.items():
patterns[name] = [
pipeline.to_pattern(**predict_config) for pipeline in pipeline_list
]
if other_patterns is not None:
for other_name in other_patterns.keys():
if other_name in patterns:
print(
f"{WARNING_PREFIX}'{other_name}' is found in "
"`other_patterns`, it will be overwritten"
)
update_dict(other_patterns, patterns)
if isinstance(metrics, list):
metrics_list = metrics
else:
assert isinstance(metrics, str)
metrics_list = [metrics]
if isinstance(metric_configs, list):
metric_configs_list = metric_configs
else:
assert isinstance(metric_configs, dict)
metric_configs_list = [metric_configs]
estimators = [
Estimator(metric, metric_config=metric_config)
for metric, metric_config in zip(metrics_list, metric_configs_list)
]
comparer = Comparer(patterns, estimators)
comparer.compare(data, y, verbose_level=comparer_verbose_level)
return comparer
def task_loader(
workplace: str,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
compress: bool = True,
) -> SimplePipeline:
export_folder = os.path.join(workplace, ML_PIPELINE_SAVE_NAME)
m = pipeline_base.load(export_folder=export_folder, compress=compress)
assert isinstance(m, SimplePipeline)
return m
def load_experiment_results(
results: ExperimentResults,
pipeline_base: Type[SimplePipeline],
) -> pipelines_type:
pipelines_dict: Dict[str, Dict[int, SimplePipeline]] = {}
iterator = list(zip(results.workplaces, results.workplace_keys))
for workplace, workplace_key in tqdm(iterator, desc="load"):
pipeline = task_loader(workplace, pipeline_base)
model, str_i = workplace_key
pipelines_dict.setdefault(model, {})[int(str_i)] = pipeline
return {k: [v[i] for i in sorted(v)] for k, v in pipelines_dict.items()}
class RepeatResult(NamedTuple):
data: Optional[TabularData]
experiment: Optional[Experiment]
pipelines: Optional[Dict[str, List[SimplePipeline]]]
patterns: Optional[Dict[str, List[ModelPattern]]]
def repeat_with(
data: MLData,
*,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
workplace: str = "_repeat",
models: Union[str, List[str]] = "fcnn",
model_configs: Optional[Dict[str, Dict[str, Any]]] = None,
predict_config: Optional[Dict[str, Any]] = None,
sequential: Optional[bool] = None,
num_jobs: int = 1,
num_repeat: int = 5,
return_patterns: bool = True,
compress: bool = True,
use_tqdm: bool = True,
available_cuda_list: Optional[List[int]] = None,
resource_config: Optional[Dict[str, Any]] = None,
task_meta_kwargs: Optional[Dict[str, Any]] = None,
is_fix: bool = False,
**kwargs: Any,
) -> RepeatResult:
if os.path.isdir(workplace) and not is_fix:
print(f"{WARNING_PREFIX}'{workplace}' already exists, it will be erased")
shutil.rmtree(workplace)
kwargs = shallow_copy_dict(kwargs)
if isinstance(models, str):
models = [models]
if sequential is None:
sequential = num_jobs <= 1
if model_configs is None:
model_configs = {}
def is_buggy(i_: int, model_: str) -> bool:
i_workplace = os.path.join(workplace, model_, str(i_))
i_latest_workplace = get_latest_workplace(i_workplace)
if i_latest_workplace is None:
return True
checkpoint_folder = os.path.join(i_latest_workplace, CHECKPOINTS_FOLDER)
if not os.path.isfile(os.path.join(checkpoint_folder, SCORES_FILE)):
return True
if not get_sorted_checkpoints(checkpoint_folder):
return True
return False
def fetch_config(core_name: str) -> Dict[str, Any]:
local_kwargs = shallow_copy_dict(kwargs)
assert model_configs is not None
local_core_config = model_configs.setdefault(core_name, {})
local_kwargs["core_name"] = core_name
local_kwargs["core_config"] = shallow_copy_dict(local_core_config)
return shallow_copy_dict(local_kwargs)
pipelines_dict: Optional[Dict[str, List[SimplePipeline]]] = None
if sequential:
cuda = kwargs.pop("cuda", None)
experiment = None
tqdm_settings = kwargs.setdefault("tqdm_settings", {})
tqdm_settings["tqdm_position"] = 2
if not return_patterns:
print(
f"{WARNING_PREFIX}`return_patterns` should be "
"True when `sequential` is True, because patterns "
"will always be generated"
)
return_patterns = True
pipelines_dict = {}
if not use_tqdm:
iterator = models
else:
iterator = tqdm(models, total=len(models), position=0)
for model in iterator:
local_pipelines = []
sub_iterator = range(num_repeat)
if use_tqdm:
sub_iterator = tqdm(
sub_iterator,
total=num_repeat,
position=1,
leave=False,
)
for i in sub_iterator:
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
local_workplace = os.path.join(workplace, model, str(i))
local_config.setdefault("workplace", local_workplace)
m = pipeline_base(**local_config)
m.fit(data, cuda=cuda)
local_pipelines.append(m)
pipelines_dict[model] = local_pipelines
else:
if num_jobs <= 1:
print(
f"{WARNING_PREFIX}we suggest setting `sequential` "
f"to True when `num_jobs` is {num_jobs}"
)
# data
data_folder = Experiment.dump_data_bundle(
data.x_train,
data.y_train,
data.x_valid,
data.y_valid,
workplace=workplace,
)
# experiment
experiment = Experiment(
num_jobs=num_jobs,
available_cuda_list=available_cuda_list,
resource_config=resource_config,
)
for model in models:
for i in range(num_repeat):
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
experiment.add_task(
model=model,
compress=compress,
root_workplace=workplace,
workplace_key=(model, str(i)),
config=local_config,
data_folder=data_folder,
**(task_meta_kwargs or {}),
)
# finalize
results = experiment.run_tasks(use_tqdm=use_tqdm)
if return_patterns:
pipelines_dict = load_experiment_results(results, pipeline_base)
patterns = None
if return_patterns:
assert pipelines_dict is not None
if predict_config is None:
predict_config = {}
patterns = {
model: [m.to_pattern(**predict_config) for m in pipelines]
for model, pipelines in pipelines_dict.items()
}
cf_data = None
if patterns is not None:
m = patterns[models[0]][0].model
if isinstance(m, CarefreePipeline):
cf_data = m.cf_data
return RepeatResult(cf_data, experiment, pipelines_dict, patterns)
def pack_repeat(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_jobs: int = 1,
) -> List[str]:
sub_workplaces = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplaces.append(get_latest_workplace(stuff_path))
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def pick_from_repeat_and_pack(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_pick: int,
num_jobs: int = 1,
) -> List[str]:
score_workplace_pairs = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplace = get_latest_workplace(stuff_path)
assert sub_workplace is not None, "internal error occurred"
score_path = os.path.join(sub_workplace, CHECKPOINTS_FOLDER, SCORES_FILE)
with open(score_path, "r") as f:
score = float(max(json.load(f).values()))
score_workplace_pairs.append((score, sub_workplace))
score_workplace_pairs = sorted(score_workplace_pairs)[::-1]
sub_workplaces = [pair[1] for pair in score_workplace_pairs[:num_pick]]
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def make_toy_model(
model: str = "fcnn",
config: Optional[Dict[str, Any]] = None,
*,
pipeline_type: str = "ml.carefree",
is_classification: bool = False,
cf_data_config: Optional[Dict[str, Any]] = None,
data_tuple: Optional[Tuple[np.ndarray, np.ndarray]] = None,
cuda: Optional[str] = None,
) -> SimplePipeline:
if config is None:
config = {}
if data_tuple is not None:
x_np, y_np = data_tuple
else:
if not is_classification:
x, y = [[0]], [[1.0]]
else:
x, y = [[0], [1]], [[1], [0]]
x_np, y_np = map(np.array, [x, y])
model_config = {}
if model in ("fcnn", "tree_dnn"):
model_config = {
"hidden_units": [100],
"batch_norm": False,
"dropout": 0.0,
}
base_config = {
"core_name": model,
"core_config": model_config,
"output_dim": 1 + int(is_classification),
"num_epoch": 2,
"max_epoch": 4,
}
updated = update_dict(config, base_config)
m = SimplePipeline.make(pipeline_type, updated)
assert isinstance(m, SimplePipeline)
if cf_data_config is None:
cf_data_config = {}
cf_data_config = update_dict(
cf_data_config,
dict(
valid_columns=list(range(x_np.shape[1])),
label_process_method="identical",
),
)
data = MLData.with_cf_data(
x_np,
y_np,
is_classification=is_classification,
cf_data_config=cf_data_config,
valid_split=0.0,
)
m.fit(data, cuda=cuda)
return m
__all__ = [
"register_core",
"evaluate",
"task_loader",
"load_experiment_results",
"repeat_with",
"pack_repeat",
"pick_from_repeat_and_pack",
"make_toy_model",
"ModelPattern",
"EnsemblePattern",
]
|
27387
|
import os
import torch
import torch.nn as nn
import numpy as np
import pickle
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
# used in test time, no backprop
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_ids, epoch, total_steps):
save_filename = '%s_%s.pth' % (epoch_label, network_label)
save_infoname = '%s.pkl' % (epoch_label)
save_path = os.path.join(self.save_dir, save_filename)
save_infoname = os.path.join(self.save_dir, save_infoname)
torch.save(network.cpu().state_dict(), save_path)
network.cuda()
info = {'epoch':epoch, 'total_steps':total_steps}
filehandler = open(save_infoname, "wb")
pickle.dump(info, filehandler)
filehandler.close()
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
if os.path.exists(save_path):
network.load_state_dict(torch.load(save_path))
print("Found checkpoints. Network loaded.")
else:
print("Not found checkpoints. Network from scratch.")
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
|
27404
|
import numpy as np
from gym import spaces
from agents import SimpleAgentClass
# Create agents for the CMA-ES, NEAT and WANN agents
# defined in the weight-agnostic paper repo:
# https://github.com/google/brain-tokyo-workshop/tree/master/WANNRelease/
# -------------------------------------------------------------------
# Here begins copy/paste from WANNRelease code linked above
def weightedRandom(weights):
"""Returns random index, with each choices chance weighted
Args:
weights - (np_array) - weighting of each choice
[N X 1]
Returns:
i - (int) - chosen index
"""
minVal = np.min(weights)
weights = weights - minVal # handle negative vals
cumVal = np.cumsum(weights)
pick = np.random.uniform(0, cumVal[-1])
for i in range(len(weights)):
if cumVal[i] >= pick:
return i
def selectAct(action, actSelect):
"""Selects action based on vector of actions
Single Action:
- Hard: a single action is chosen based on the highest index
- Prob: a single action is chosen probablistically with higher values
more likely to be chosen
We aren't selecting a single action:
- Softmax: a softmax normalized distribution of values is returned
- Default: all actions are returned
Args:
action - (np_array) - vector weighting each possible action
[N X 1]
Returns:
i - (int) or (np_array) - chosen index
[N X 1]
"""
if actSelect == 'softmax':
action = softmax(action)
elif actSelect == 'prob':
action = weightedRandom(np.sum(action,axis=0))
else:
action = action.flatten()
return action
def act(weights, aVec, nInput, nOutput, inPattern):
"""Returns FFANN output given a single input pattern
If the variable weights is a vector it is turned into a square weight matrix.
Allows the network to return the result of several samples at once if given a matrix instead of a vector of inputs:
Dim 0 : individual samples
Dim 1 : dimensionality of pattern (# of inputs)
Args:
weights - (np_array) - ordered weight matrix or vector
[N X N] or [N**2]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
nInput - (int) - number of input nodes
nOutput - (int) - number of output nodes
inPattern - (np_array) - input activation
[1 X nInput] or [nSamples X nInput]
Returns:
output - (np_array) - output activation
[1 X nOutput] or [nSamples X nOutput]
"""
# Turn weight vector into weight matrix
if np.ndim(weights) < 2:
nNodes = int(np.sqrt(np.shape(weights)[0]))
wMat = np.reshape(weights, (nNodes, nNodes))
else:
nNodes = np.shape(weights)[0]
wMat = weights
wMat[np.isnan(wMat)]=0
# Vectorize input
if np.ndim(inPattern) > 1:
nSamples = np.shape(inPattern)[0]
else:
nSamples = 1
# Run input pattern through ANN
nodeAct = np.zeros((nSamples,nNodes))
nodeAct[:,0] = 1 # Bias activation
nodeAct[:,1:nInput+1] = inPattern
# Propagate signal through hidden to output nodes
iNode = nInput+1
for iNode in range(nInput+1,nNodes):
rawAct = np.dot(nodeAct, wMat[:,iNode]).squeeze()
nodeAct[:,iNode] = applyAct(aVec[iNode], rawAct)
#print(nodeAct)
output = nodeAct[:,-nOutput:]
return output
def applyAct(actId, x):
"""Returns value after an activation function is applied
Lookup table to allow activations to be stored in numpy arrays
case 1 -- Linear
case 2 -- Unsigned Step Function
case 3 -- Sin
case 4 -- Gausian with mean 0 and sigma 1
case 5 -- Hyperbolic Tangent [tanh] (signed)
case 6 -- Sigmoid unsigned [1 / (1 + exp(-x))]
case 7 -- Inverse
case 8 -- Absolute Value
case 9 -- Relu
case 10 -- Cosine
case 11 -- Squared
Args:
actId - (int) - key to look up table
x - (???) - value to be input into activation
[? X ?] - any type or dimensionality
Returns:
output - (float) - value after activation is applied
[? X ?] - same dimensionality as input
"""
if actId == 1: # Linear
value = x
if actId == 2: # Unsigned Step Function
value = 1.0*(x>0.0)
#value = (np.tanh(50*x/2.0) + 1.0)/2.0
elif actId == 3: # Sin
value = np.sin(np.pi*x)
elif actId == 4: # Gaussian with mean 0 and sigma 1
value = np.exp(-np.multiply(x, x) / 2.0)
elif actId == 5: # Hyperbolic Tangent (signed)
value = np.tanh(x)
elif actId == 6: # Sigmoid (unsigned)
value = (np.tanh(x/2.0) + 1.0)/2.0
elif actId == 7: # Inverse
value = -x
elif actId == 8: # Absolute Value
value = abs(x)
elif actId == 9: # Relu
value = np.maximum(0, x)
elif actId == 10: # Cosine
value = np.cos(np.pi*x)
elif actId == 11: # Squared
value = x**2
else:
value = x
return value
# End of copypaste
# -------------------------------------------------------------------
# This action is original to this repository
def create_wann_agent(agent_path, agent_type, env):
"""
Load and return a WANN agent.
The agent has a function `get_action` that takes in
an observation and returns an appropiate action.
"""
np_data = np.load(agent_path)
wMat = np_data["wMat"]
aVec = np_data["aVec"]
# TODO support for other input spaces?
nInput = env.observation_space.shape[0]
nOutput = 0
action_type = "all"
if isinstance(env.action_space, spaces.Box):
nOutput = env.action_space.shape[0]
elif isinstance(env.action_space, spaces.Discrete):
nOutput = env.action_space.n
action_type = "prob"
else:
raise ValueError("Unsupported action space")
def get_action(obs):
# Includes batch-size
output = act(wMat, aVec, nInput, nOutput, obs)
action = selectAct(output, action_type)
return action
agent = SimpleAgentClass(lambda obs: get_action(obs))
return agent
|
27442
|
import json
from pathlib import Path
import numpy as np
from matplotlib import path
current_dir = Path(__file__).parent
__all__ = list(p.stem for p in current_dir.glob("*.json"))
def __getattr__(name: str) -> path.Path:
file_path = current_dir / (name + ".json")
if file_path.exists():
data = json.loads(file_path.read_text())
return path.Path(
vertices=data["vertices"], codes=np.array(data["codes"], np.uint8)
)
raise AttributeError(
f"No {name}.json file found in {current_dir.absolute()}."
)
|
27479
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserProfile
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = ''
self.fields['password1'].help_text = ''
self.fields['password2'].help_text = ''
class Meta:
model = User
fields = (
'username',
'email',
'password1',
'password2'
)
def save(self):
user = User.objects.create_user(username=self.cleaned_data['username'], password=self.cleaned_data['<PASSWORD>'])
user.email = self.cleaned_data['email']
user.save()
return user
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_pic', 'bio')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['profile_pic', 'bio']
|
27593
|
import logging, operator, functools, itertools, array, ptypes
from ptypes import *
from .headers import *
from . import portable
class Signature(pint.enum, uint16):
# We'll just store all signature types here
_values_ = [
('IMAGE_DOS_SIGNATURE', 0x5a4d),
('IMAGE_OS2_SIGNATURE', 0x454e),
('IMAGE_OS2_SIGNATURE_LE', 0x454c),
('IMAGE_NT_SIGNATURE', 0x4550),
]
class IMAGE_DOS_HEADER(pstruct.type):
class e_magic(Signature): pass
class Relocation(pstruct.type):
_fields_ = [
( uint16, 'offset' ),
( uint16, 'segment' ),
]
def linear(self):
return self['segment'].int()*0x10 + self['offset'].int()
def decode(self, **attrs):
p = self.getparent(ptype.boundary)
attrs.setdefault('offset', p['Stub'].getoffset()+self.linear())
return self.new(ptype.undefined, **attrs)
def summary(self):
seg, offset = self['segment'], self['offset']
return "(segment:offset) {:04x}:{:04x} (linear) {:05x}".format(seg.int(), offset.int(), (seg.int() * 0x10 + offset.int()) & 0xfffff)
def repr(self):
return self.summary()
class Oem(pstruct.type):
_fields_ = [
( dyn.array(uint16, 4), 'e_reserved' ),
( uint16, 'e_oemid' ),
( uint16, 'e_oeminfo' ),
( dyn.array(uint16, 10), 'e_reserved2' ),
]
# FIXME: this implementation should be properly tested as there's a chance it could be fucked with
def __e_oem(self):
res = self['e_lfarlc'].li
fields = ['e_magic', 'e_cblp', 'e_cp', 'e_crlc', 'e_cparhdr', 'e_minalloc', 'e_maxalloc', 'e_ss', 'e_sp', 'e_csum', 'e_ip', 'e_cs', 'e_lfarlc', 'e_ovno']
# if our calculated size for the field directly matches the Oem
# structure, then this for sure is going to be a PECOFF executable.
t = IMAGE_DOS_HEADER.Oem
if res.int() == sum(self[fld].li.size() for fld in fields) + t().a.size() + 4:
return t
# otherwise we need to pad it with whatever the input claims it should be
return dyn.block(max(0, res.int() - sum(self[fld].li.size() for fld in fields)))
def __e_lfanew(self):
paragraphs, relocations = self['e_cparhdr'].li, self['e_lfarlc'].li
fields = ['e_magic', 'e_cblp', 'e_cp', 'e_crlc', 'e_cparhdr', 'e_minalloc', 'e_maxalloc', 'e_ss', 'e_sp', 'e_csum', 'e_ip', 'e_cs', 'e_lfarlc', 'e_ovno', 'e_oem']
# if everything matches, then there's a pointer here for PECOFF executables
if 0x10 * paragraphs.int() == relocations.int() == sum(self[fld].li.size() for fld in fields) + 4:
return dyn.rpointer(Next, self, pint.uint32_t)
# otherwise, there isn't anything here.
return pint.uint_t
def __e_rlc(self):
res = self['e_crlc'].li
return dyn.array(IMAGE_DOS_HEADER.Relocation, res.int())
def __e_parhdr(self):
res = 0x10 * self['e_cparhdr'].li.int()
fields = ['e_magic', 'e_cblp', 'e_cp', 'e_crlc', 'e_cparhdr', 'e_minalloc', 'e_maxalloc', 'e_ss', 'e_sp', 'e_csum', 'e_ip', 'e_cs', 'e_lfarlc', 'e_ovno']
fields+= ['e_oem', 'e_rlc', 'e_lfanew']
return dyn.block(res - sum(self[fld].li.size() for fld in fields))
def filesize(self):
res = self['e_cp'].li.int()
if res > 0:
cp = res - 1
return cp * 0x200 + self['e_cblp'].li.int()
return 0
def headersize(self):
res = self['e_cparhdr'].li
return res.int() * 0x10
def datasize(self):
res = self.headersize()
return (self.filesize() - res) if res > 0 else 0
def __e_lfarlc(self):
res = self['e_crlc'].li
t = dyn.array(IMAGE_DOS_HEADER.Relocation, res.int())
return dyn.rpointer(t, self, uint16)
#e_cparhdr << 4
#e_cp << 9
_fields_ = [
( e_magic, 'e_magic' ),
( uint16, 'e_cblp' ), # bytes in last page / len mod 512 / UsedBytesInLastPage
( uint16, 'e_cp' ), # pages / 512b pagees / FileSizeInPages
( uint16, 'e_crlc' ), # relocation count / reloc entries count / NumberOfRelocationItems
( uint16, 'e_cparhdr' ), # header size in paragraphs (paragraph=0x10) / number of paragraphs before image / HeaderSizeInParagraphs
( uint16, 'e_minalloc' ), # required paragraphs / minimum number of bss paragraphs / MinimumExtraParagraphs
( uint16, 'e_maxalloc' ), # requested paragraphs / maximum number of bss paragraphs / MaximumExtraParagraphs
( uint16, 'e_ss' ), # ss / stack of image / InitialRelativeSS
( uint16, 'e_sp' ), # sp / sp of image / InitialSP
( uint16, 'e_csum' ), # checksum / checksum (ignored) / Checksum
( uint16, 'e_ip' ), # ip / ip of entry / InitialIP
( uint16, 'e_cs' ), # cs / cs of entry / InitialrmwwelativeIp
( __e_lfarlc, 'e_lfarlc' ), # relocation table
( uint16, 'e_ovno'), # overlay number
#( uint32, 'EXE_SYM_TAB'), # from inc/exe.inc
# all the data below here changes based on the linker:
# Borland, ARJ, LZEXE, PKLITE, LHARC, LHA, CRUNCH, BSA, LARC, etc..
( __e_oem, 'e_oem'), # oem and reserved data
( __e_lfanew, 'e_lfanew'),
( __e_rlc, 'e_rlc' ), # relocations?
( __e_parhdr, 'e_parhdr'),
]
### What file format the next header is
class NextHeader(ptype.definition):
cache = {}
### What file format the data is
class NextData(ptype.definition):
cache = {}
class Next(pstruct.type):
def __Header(self):
t = self['Signature'].li.serialize()
return NextHeader.withdefault(t, type=t)
def __Data(self):
t = self['Signature'].li.serialize()
return NextData.withdefault(t, type=t)
_fields_ = [
(Signature, 'Signature'),
(__Header, 'Header'),
(__Data, 'Data'),
]
def Header(self):
return self['Header']
def Data(self):
return self['Data']
## Portable Executable (PE)
@NextHeader.define
class IMAGE_NT_HEADERS(pstruct.type, Header):
type = b'PE'
def __Padding(self):
'''Figure out the PE header size and pad according to SizeOfHeaders'''
p = self.getparent(File)
sz = p['Header']['e_lfanew'].li.int()
opt = self['OptionalHeader'].li
f = functools.partial(operator.getitem, self)
res = map(f, ('SignaturePadding', 'FileHeader', 'OptionalHeader', 'DataDirectory', 'Sections'))
res = sum(map(operator.methodcaller('blocksize'), res))
res += 2
return dyn.block(opt['SizeOfHeaders'].int() - res - sz)
def __DataDirectory(self):
cls = self.__class__
length = self['OptionalHeader'].li['NumberOfRvaAndSizes'].int()
if length > 0x10: # XXX
logging.warning("{:s} : OptionalHeader.NumberOfRvaAndSizes specified >0x10 entries ({:#x}) for the DataDirectory. Assuming the maximum of 0x10.".format('.'.join((cls.__module__, cls.__name__)), length))
length = 0x10
return dyn.clone(portable.DataDirectory, length=length)
def __Sections(self):
header = self['FileHeader'].li
length = header['NumberOfSections'].int()
return dyn.clone(portable.SectionTableArray, length=length)
_fields_ = [
(uint16, 'SignaturePadding'),
(portable.IMAGE_FILE_HEADER, 'FileHeader'),
(portable.IMAGE_OPTIONAL_HEADER, 'OptionalHeader'),
(__DataDirectory, 'DataDirectory'),
(__Sections, 'Sections'),
(__Padding, 'Padding'),
]
def FileHeader(self):
'''Return the FileHeader which contains a number of sizes used by the file.'''
return self['FileHeader']
def getaddressbyoffset(self, offset):
section = self['Sections'].getsectionbyoffset(offset)
return section.getaddressbyoffset(offset)
def getoffsetbyaddress(self, address):
section = self['Sections'].getsectionbyaddress(address)
return section.getoffsetbyaddress(address)
def loadconfig(self):
return self['DataDirectory'][10]['Address'].d.li
def tls(self):
return self['DataDirectory'][9]['Address'].d.li
def relocateable(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'DYNAMIC_BASE' in characteristics
def has_seh(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'NO_SEH' not in characteristics
def has_nx(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'NX_COMPAT' in characteristics
def has_integrity(self):
characteristics = self['OptionalHeader']['DllCharacteristics']
return 'FORCE_INTEGRITY' in characteristics
def is64(self):
return self['OptionalHeader'].li.is64()
def checksum(self):
p = self.getparent(File)
res = self['OptionalHeader']['Checksum']
# Make a copy of our checksum initialized to 0
field = res.copy(offset=res.offset - p.offset).set(0)
# Make a copy of our File header, and overwrite the original
# checksum with 0 so that we can calculate what the checksum
# is supposed to be.
data = bytearray(p.serialize())
data[field.offset : field.offset + field.size()] = field.serialize()
# Pad the data so that it's a multiple of a dword
res = 4 - len(data) % 4
padding = b'\0' * (res % 4)
# Calculate 16-bit checksum
res = sum(array.array('I' if len(array.array('I', 4 * b'\0')) > 1 else 'H', bytes(data) + padding))
checksum = len(data)
checksum += res & 0xffff
checksum += res // 0x10000
checksum += checksum // 0x10000
checksum &= 0xffff
# Clamp the result to 32-bits
return checksum & 0xffffffff
def Machine(self):
return self['FileHeader']['Machine']
Portable = IMAGE_NT_HEADERS64 = IMAGE_NT_HEADERS
class SegmentEntry(pstruct.type):
'''
Base class for a section entry that both memory-backed and file-backed
entries inherit from.
'''
def properties(self):
res = super(SegmentEntry, self).properties()
if hasattr(self, 'Section'):
res['SectionName'] = self.Section['Name'].str()
return res
class MemorySegmentEntry(SegmentEntry):
'''
This SegmentEntry represents the structure of a segment that has been
already mapped into memory. This honors the SectionAlignment field from
the OptionalHeader when padding the segment's data.
'''
noncontiguous = True
def __Padding(self):
p = self.getparent(Next)
header = p.Header()
optionalheader = header['OptionalHeader'].li
return dyn.align(optionalheader['SectionAlignment'].int(), undefined=True)
_fields_ = [
(__Padding, 'Padding'),
(lambda self: dyn.block(self.Section.getloadedsize()), 'Data'),
]
class FileSegmentEntry(SegmentEntry):
'''
This SegmentEntry represents the structure of a segment that is on the
disk and hasn't been mapped into memory. This honors the FileAlignment
field from the OptionalHeader when padding the segment's data.
'''
def __Padding(self):
p = self.getparent(Next)
header = p.Header()
optionalheader = header['OptionalHeader'].li
return dyn.align(optionalheader['FileAlignment'].int(), undefined=True)
_fields_ = [
(__Padding, 'Padding'),
(lambda self: dyn.block(self.Section.getreadsize()), 'Data'),
]
class SegmentTableArray(parray.type):
'''
This is a simple array of segment entries where each entry is individually
tied directly to the SectionTableEntry that it is associated with. Each
entry is aligned depending on whether it is being loaded from disk or has
been already loaded into memory.
'''
def _object_(self):
p = self.getparent(Next)
header = p.Header()
sections = header['Sections']
entry = MemorySegmentEntry if isinstance(self.source, ptypes.provider.memorybase) else FileSegmentEntry
return dyn.clone(entry, Section=sections[len(self.value)])
@NextData.define
class IMAGE_NT_DATA(pstruct.type, Header):
type = b'PE'
def __Segments(self):
header = self.p.Header()
fileheader = header['FileHeader'].li
# Warn the user if we're unable to determine whether the source is a
# file-backed or memory-backed provider.
if all(not isinstance(self.source, item) for item in {ptypes.provider.memorybase, ptypes.provider.fileobj}):
cls = self.__class__
logging.warning("{:s} : Unknown ptype source.. treating as a fileobj : {!r}".format('.'.join((cls.__module__, cls.__name__)), self.source))
return dyn.clone(SegmentTableArray, length=fileheader['NumberOfSections'].int())
def __CertificatePadding(self):
header = self.p.Header()
if len(header['DataDirectory']) < 4:
return ptype.undefined
res = header['DataDirectory'][4]
offset, size = res['Address'].int(), res['Size'].int()
if offset == 0 or isinstance(self.source, ptypes.provider.memorybase):
return ptype.undefined
if isinstance(self.source, ptypes.provider.bounded) and offset < self.source.size():
res = self['Segments'].li.getoffset() + self['Segments'].blocksize()
return dyn.block(offset - res)
return ptype.undefined
def __Certificate(self):
header = self.p.Header()
if len(header['DataDirectory']) < 4:
return ptype.undefined
res = header['DataDirectory'][4]
offset, size = res['Address'].int(), res['Size'].int()
if offset == 0 or isinstance(self.source, ptypes.provider.memorybase):
return ptype.undefined
if isinstance(self.source, ptypes.provider.bounded) and offset < self.source.size():
return dyn.clone(parray.block, _object_=portable.headers.Certificate, blocksize=lambda self, size=size: size)
return ptype.undefined
_fields_ = [
(__Segments, 'Segments'),
(__CertificatePadding, 'CertificatePadding'),
(__Certificate, 'Certificate'),
]
@NextHeader.define
class DosExtender(pstruct.type, Header):
type = b'DX'
_fields_ = [
(word, 'MinRModeParams'),
(word, 'MaxRModeParams'),
(word, 'MinIBuffSize'),
(word, 'MaxIBuffSize'),
(word, 'NIStacks'),
(word, 'IStackSize'),
(dword, 'EndRModeOffset'),
(word, 'CallBuffSize'),
(word, 'Flags'),
(word, 'UnprivFlags'),
(dyn.block(104), 'Reserv'),
]
@NextHeader.define
class PharLap(pstruct.type, Header):
type = b'MP'
_fields_ = [
(word, 'SizeRemaind'),
(word, 'ImageSize'),
(word, 'NRelocs'),
(word, 'HeadSize'),
(word, 'MinExtraPages'),
(dword, 'ESP'),
(word, 'CheckSum'),
(dword, 'EIP'),
(word, 'FirstReloc'),
(word, 'NOverlay'),
(word, 'Reserved'),
]
class SegInfo(pstruct.type):
_fields_ = [
(word, 'Selector'),
(word, 'Flags'),
(dword, 'BaseOff'),
(dword, 'MinAlloc'),
]
class RunTimeParams(DosExtender): pass
class RepeatBlock(pstruct.type):
_fields_ = [
(word, 'Count'),
(lambda s: dyn.block(s['Count'].li.int()), 'String'),
]
@NextHeader.define
class PharLap3(PharLap, Header):
type = b'P3'
class OffsetSize(pstruct.type):
def __Offset(self):
t = getattr(self, '_object_', ptype.block)
return dyn.rpointer(lambda _: dyn.clone(t, blocksize=lambda _:self['Size'].li.int()), self.getparent(PharLap3), dword)
_fields_ = [
(__Offset, 'Offset'),
(dword, 'Size'),
]
def summary(self):
return '{:#x}:{:+#x}'.format(self['Offset'].int(), self['Size'].int())
_fields_ = [
(word, 'Level'),
(word, 'HeaderSize'),
(dword, 'FileSize'),
(word, 'CheckSum'),
(dyn.clone(OffsetSize, _object_=PharLap.RunTimeParams), 'RunTimeParams'),
(OffsetSize, 'Reloc'),
(dyn.clone(OffsetSize, _object_=dyn.clone(parray.block, _object_=PharLap.SegInfo)), 'SegInfo'),
(word, 'SegEntrySize'),
(OffsetSize, 'Image'),
(OffsetSize, 'SymTab'),
(OffsetSize, 'GDTLoc'),
(OffsetSize, 'LDTLoc'),
(OffsetSize, 'IDTLoc'),
(OffsetSize, 'TSSLoc'),
(dword, 'MinExtraPages'),
(dword, 'MaxExtraPages'),
(dword, 'Base'),
(dword, 'ESP'),
(word, 'SS'),
(dword, 'EIP'),
(word, 'CS'),
(word, 'LDT'),
(word, 'TSS'),
(word, 'Flags'),
(dword, 'MemReq'),
(dword, 'Checksum32'),
(dword, 'StackSize'),
(dyn.block(0x100), 'Reserv'),
]
@NextHeader.define
class NeHeader(pstruct.type):
type = b'NE'
class NE_Pointer(pstruct.type):
_fields_ = [
( uint16, 'Index' ),
( uint16, 'Offset' )
]
class NE_Version(pstruct.type):
_fields_ = [
( uint8, 'Minor' ),
( uint8, 'Major' )
]
_fields_ = [
( uint8, 'LinkVersion' ),
( uint8, 'LinkRevision' ),
( uint16, 'EntryOffset' ),
( uint16, 'EntryLength' ),
( uint32, 'CRC' ),
( uint8, 'ProgramFlags' ),
( uint8, 'ApplicationFlags' ),
( uint8, 'AutoDataSegmentIndex' ),
( uint16, 'HeapSize' ),
( uint16, 'StackSize' ),
( NE_Pointer, 'EntryPointer' ),
( NE_Pointer, 'StackPointer' ),
( uint16, 'SegmentCount' ),
( uint16, 'ModuleCount' ),
( uint16, 'NRNamesSize' ),
( uint16, 'SegmentOffset' ),
( uint16, 'ResourceOffset' ),
( uint16, 'RNamesOffset' ),
( uint16, 'ModuleOffset' ),
( uint16, 'ImportOffset' ),
( uint32, 'NRNamesOffset' ),
( uint16, 'MoveableEntryPointcount' ),
( uint16, 'AlignmentSize' ),
( uint16, 'ResourceCount' ),
( uint8, 'TargetOS' ),
( uint8, 'OS2_Flags' ),
( uint16, 'ReturnThunksOffset' ),
( uint16, 'SegmentThunksOffset' ),
( uint16, 'SwapMinimumSize' ),
( NE_Version, 'ExpectedVersion' )
]
### FileBase
class File(pstruct.type, ptype.boundary):
def __Padding(self):
dos = self['Header'].li
ofs = dos['e_lfarlc'].int()
return dyn.block(ofs - self.blocksize()) if ofs > 0 else dyn.block(0)
def __Relocations(self):
dos = self['Header'].li
ofs = dos['e_lfarlc'].int()
return dyn.array(Dos.Relocation, dos['e_crlc'].li.int() if ofs == self.blocksize() else 0)
def __Extra(self):
res = self['Header'].li.headersize()
if res > 0:
return dyn.block(res - self.blocksize())
return ptype.undefined
def __Stub(self):
# everything up to e_lfanew
dos = self['Header'].li
res = dos['e_lfanew'].int()
if res > 0:
return dyn.block(res - self.blocksize())
return ptype.undefined
def __Next(self):
dos = self['Header'].li
if dos['e_lfanew'].int() == self.blocksize():
return Next
return dyn.block(dos.filesize() - self.blocksize())
def __NotLoaded(self):
sz = self['Header'].blocksize()
sz+= self['Extra'].blocksize()
sz+= self['Stub'].blocksize()
sz+= self['Next'].blocksize()
if isinstance(self.source, ptypes.provider.bounded):
return dyn.block(self.source.size() - sz)
return ptype.undefined
_fields_ = [
(IMAGE_DOS_HEADER, 'Header'),
(__Extra, 'Extra'),
(__Stub, 'Stub'),
(__Next, 'Next'),
#(__NotLoaded, 'NotLoaded'),
]
if __name__ == '__main__':
import sys
import ptypes, pecoff.Executable
if len(sys.argv) == 2:
filename = sys.argv[1]
ptypes.setsource(ptypes.prov.file(filename, 'rb'))
z = pecoff.Executable.File()
z=z.l
else:
filename = 'obj/kernel32.dll'
ptypes.setsource(ptypes.prov.file(filename, 'rb'))
for x in range(10):
print(filename)
try:
z = pecoff.Executable.File()
z=z.l
break
except IOError:
pass
filename = '../'+filename
v=z['next']['header']
sections = v['Sections']
exports = v['DataDirectory'][0]
while exports['Address'].int() != 0:
exports = exports['Address'].d.l
print(exports.l)
break
imports = v['DataDirectory'][1]
while imports['Address'].int() != 0:
imports = imports['Address'].d.l
print(imports.l)
break
relo = v['DataDirectory'][5]['Address'].d.l
baseaddress = v['OptionalHeader']['ImageBase']
section = sections[0]
data = section.data().serialize()
for item in relo.filter(section):
for type, offset in item.getrelocations(section):
print(type, offset)
continue
|
27603
|
Candies = [int(x) for x in input("Enter the numbers with space: ").split()]
extraCandies=int(input("Enter the number of extra candies: "))
Output=[ ]
i=0
while(i<len(Candies)):
if(Candies[i]+extraCandies>=max(Candies)):
Output.append("True")
else:
Output.append("False")
i+=1
print(Output)
|
27649
|
import _recurrence_map
import numpy as np
def poincare_map(ts, ts2=None, threshold=0.1):
rec_dist = poincare_recurrence_dist(ts, ts2)
return (rec_dist < threshold).astype(int)
def poincare_recurrence_dist(ts, ts2=None):
if ts2 is None:
return _recurrence_map.recurrence_map(ts, ts)
else:
return _recurrence_map.recurrence_map(ts, ts2)
|
27659
|
import os
from test import test_support
# Skip this test if _tkinter does not exist.
test_support.import_module('_tkinter')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, '..', 'lib-tk', 'test'))
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
test_support.run_unittest(
*runtktests.get_tests(gui=False, packages=['test_ttk']))
if __name__ == '__main__':
test_main()
|
27666
|
from django.db import models
from lbworkflow.models import BaseWFObj
class Purchase(BaseWFObj):
title = models.CharField("Title", max_length=255)
reason = models.CharField("Reason", max_length=255)
def __str__(self):
return self.reason
class Item(models.Model):
purchase = models.ForeignKey(
Purchase,
on_delete=models.CASCADE,
)
name = models.CharField("Name", max_length=255)
qty = models.IntegerField("Qty")
note = models.CharField("Note", max_length=255)
class Meta:
verbose_name = "Purchase Item"
def __str__(self):
return self.name
|
27668
|
from sanic import Request, Sanic
from sanic.response import text
from sanic_ext import openapi
from sanic_ext.extensions.openapi.definitions import ExternalDocumentation
from utils import get_spec
def test_external_docs(app: Sanic):
@app.route("/test0")
@openapi.document("http://example.com/more", "Find more info here")
async def handler0(request: Request):
return text("ok")
@app.route("/test1")
@openapi.definition(
document=ExternalDocumentation(
"http://example.com/more", "Find more info here"
)
)
async def handler1(request: Request):
return text("ok")
@app.route("/test2")
@openapi.definition(document="http://example.com/more")
async def handler2(request: Request):
return text("ok")
@app.route("/test3")
async def handler3(request: Request):
"""
openapi:
---
summary: This is a summary.
externalDocs:
description: Find more info here
url: http://example.com/more
"""
return text("ok")
@app.route("/test4")
@openapi.document(
ExternalDocumentation("http://example.com/more", "Find more info here")
)
async def handler4(request: Request):
return text("ok")
spec = get_spec(app)
paths = spec["paths"]
assert len(paths) == 5
for i in range(5):
doc_obj = paths[f"/test{i}"]["get"]["externalDocs"]
assert doc_obj["url"] == "http://example.com/more"
if i != 2:
assert doc_obj["description"] == "Find more info here"
|
27738
|
from typing import Dict, List, Tuple, Union
from collections import OrderedDict
from functools import lru_cache
import warnings
from torch.utils.data import BatchSampler, DataLoader
from catalyst.core.callback import (
Callback,
CallbackWrapper,
IBackwardCallback,
ICriterionCallback,
IOptimizerCallback,
ISchedulerCallback,
)
from catalyst.typing import RunnerCriterion, RunnerOptimizer, RunnerScheduler
def get_original_callback(callback: Callback) -> Callback:
"""Docs."""
while isinstance(callback, CallbackWrapper):
callback = callback.callback
return callback
def callback_isinstance(callback: Callback, class_or_tuple) -> bool:
"""Check if callback is the same type as required ``class_or_tuple``
Args:
callback: callback to check
class_or_tuple: class_or_tuple to compare with
Returns:
bool: true if first object has the required type
"""
callback = get_original_callback(callback)
return isinstance(callback, class_or_tuple)
def sort_callbacks_by_order(
callbacks: Union[List, Dict, OrderedDict]
) -> "OrderedDict[str, Callback]":
"""Creates an sequence of callbacks and sort them.
Args:
callbacks: either list of callbacks or ordered dict
Returns:
sequence of callbacks sorted by ``callback order``
Raises:
TypeError: if `callbacks` is out of `None`, `dict`, `OrderedDict`, `list`
"""
if callbacks is None:
output = OrderedDict()
elif isinstance(callbacks, (dict, OrderedDict)):
output = [(k, v) for k, v in callbacks.items()]
output = sorted(output, key=lambda x: x[1].order)
output = OrderedDict(output)
elif isinstance(callbacks, list):
output = sorted(callbacks, key=lambda x: x.order)
output = OrderedDict([(i, value) for i, value in enumerate(output)])
else:
raise TypeError(
f"Callbacks must be either Dict/OrderedDict or list, "
f"got {type(callbacks)}"
)
return output
@lru_cache(maxsize=42)
def is_str_intersections(origin_string: str, strings: Tuple):
"""Docs."""
return any(x in origin_string for x in strings)
def get_loader_batch_size(loader: DataLoader):
"""Docs."""
batch_size = loader.batch_size
if batch_size is not None:
return batch_size
batch_size = loader.batch_sampler.batch_size
if batch_size is not None:
return batch_size
raise NotImplementedError(
"No `batch_size` found,"
"please specify it with `loader.batch_size`,"
"or `loader.batch_sampler.batch_size`"
)
def get_loader_num_samples(loader: DataLoader):
"""Docs."""
batch_size = get_loader_batch_size(loader)
if isinstance(loader.batch_sampler, BatchSampler):
# pytorch default item-based samplers
if loader.drop_last:
return (len(loader.dataset) // batch_size) * batch_size
else:
return len(loader.dataset)
else:
# pytorch batch-based samplers
return len(loader) * batch_size
def check_callbacks(
callbacks: OrderedDict,
criterion: RunnerCriterion = None,
optimizer: RunnerOptimizer = None,
scheduler: RunnerScheduler = None,
):
"""Docs."""
callback_exists = lambda callback_fn: any(
callback_isinstance(x, callback_fn) for x in callbacks.values()
)
if criterion is not None and not callback_exists(ICriterionCallback):
warnings.warn(
"No ``ICriterionCallback/CriterionCallback`` were found "
"while runner.criterion is not None."
"Do you compute the loss during ``runner.handle_batch``?"
)
if (criterion is not None or optimizer is not None) and not callback_exists(
IBackwardCallback
):
warnings.warn(
"No ``IBackwardCallback/BackwardCallback`` were found "
"while runner.criterion/optimizer is not None."
"Do you backward the loss during ``runner.handle_batch``?"
)
if optimizer is not None and not callback_exists(IOptimizerCallback):
warnings.warn(
"No ``IOptimizerCallback/OptimizerCallback`` were found "
"while runner.optimizer is not None."
"Do run optimisation step pass during ``runner.handle_batch``?"
)
if scheduler is not None and not callback_exists(ISchedulerCallback):
warnings.warn(
"No ``ISchedulerCallback/SchedulerCallback`` were found "
"while runner.scheduler is not None."
"Do you make scheduler step during ``runner.handle_batch``?"
)
__all__ = [
"get_original_callback",
"callback_isinstance",
"check_callbacks",
"is_str_intersections",
"get_loader_batch_size",
"get_loader_num_samples",
"sort_callbacks_by_order",
]
|
27744
|
from ..utils.util import ObnizUtil
class ObnizMeasure:
def __init__(self, obniz):
self.obniz = obniz
self._reset()
def _reset(self):
self.observers = []
def echo(self, params):
err = ObnizUtil._required_keys(
params, ["io_pulse", "pulse", "pulse_width", "io_echo", "measure_edges"]
)
if err:
raise Exception(
"Measure start param '" + err + "' required, but not found "
)
self.params = ObnizUtil._key_filter(
params,
[
"io_pulse",
"pulse",
"pulse_width",
"io_echo",
"measure_edges",
"timeout",
"callback",
],
)
echo = {}
echo["io_pulse"] = self.params["io_pulse"]
echo["pulse"] = self.params["pulse"]
echo["pulse_width"] = self.params["pulse_width"]
echo["io_echo"] = self.params["io_echo"]
echo["measure_edges"] = self.params["measure_edges"]
if type(self.params.get("timeout")) is int:
echo["timeout"] = self.params["timeout"]
self.obniz.send({"measure": {"echo": echo}})
if "callback" in self.params:
self.observers.append(self.params["callback"])
def notified(self, obj):
if len(self.observers):
callback = self.observers.pop(0)
callback(obj["echo"])
|
27872
|
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from cropwatch.apps.ioTank.models import ioTank, SensorReading
from cropwatch.apps.metrics.tasks import *
class Command(BaseCommand):
help = 'Performs uptime validation every 5'
def handle(self, *args, **options):
accounts = AccountSettings.objects.filter(notify_iotank_emergency=True)
email_subject = "ioTank offline."
for account in accounts:
bots = ioTank.objects.filter(owner=account.user)
for bot in bots:
try:
reading = SensorReading.objects.filter(bot=bot).order_by('-timestamp').first()
if reading.timestamp < timezone.now() - relativedelta(minutes=15):
msg = "ioTank:" + str(bot.name) + " has not communicated with the server in over 15 minutes"
print(msg)
if account.notify_email is True and account.email_daily > 0:
send_email.apply_async((email_subject, msg, account.user.email, account.user.id))
except:
print(bot)
print(SensorReading.objects.filter(bot=bot))
|
27878
|
import os
import numpy as np
import tensorflow as tf
from PIL import Image
def modcrop(im, modulo):
if len(im.shape) == 3:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1], :]
elif len(im.shape) == 2:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1]]
else: raise AttributeError
return im
def shave(im, border):
if len(im.shape) == 3:
return im[border[0] : -border[0],
border[1] : -border[1], :]
elif len(im.shape) == 2:
return im[border[0] : -border[0],
border[1] : -border[1]]
else: raise AttributeError
def compute_psnr(im1, im2):
if im1.shape != im2.shape:
raise Exception('the shapes of two images are not equal')
rmse = np.sqrt(((np.asfarray(im1) - np.asfarray(im2)) ** 2).mean())
psnr = 20 * np.log10(255.0 / rmse)
return psnr
def main():
# folder path
folder = '../datas/Set60/ISO6400'
# generate the file list
filepath = os.listdir(folder)
filepath.sort()
im_input = tf.placeholder('float', [1, None, None, 3], name='im_input')
# create a session for running operations in the graph
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
with open('./graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'im_input:0': im_input}, return_elements=['output:0'])
record_psnr = []
for i in np.arange(1, 20+1, 1):
for p in np.arange(1, 3+1, 1):
psnrs = []
im = np.array(Image.open(os.path.join(folder, '%03d/%03dMP%d.PNG' % (i, i, p))))
#Image.fromarray(im).show()
for g in np.arange(1, 10+1, 1):
im_n = np.array(Image.open(os.path.join(folder, '%03d/%03dN%02dP%d.PNG' % (i, i, g, p))))
#Image.fromarray(im_n).show()
im_n = im_n.astype(np.float32) / 255.0
im_n = np.expand_dims(im_n, axis=0)
im_dn = sess.run(output, feed_dict={im_input: im_n})
im_dn = np.squeeze(im_dn) * 255.0
im_dn = np.maximum(im_dn, 0)
im_dn = np.minimum(im_dn, 255)
#Image.fromarray(np.asarray(im_dn, dtype=np.uint8)).show()
psnr = compute_psnr(im, np.asarray(im_dn, dtype=np.uint8))
print('i%03d p%d g%02d: %.2f dB' % (i, p, g, psnr))
psnrs.append(psnr)
record_psnr.append(psnrs)
print('%.2f+-%.3f dB' % (np.mean(record_psnr), np.mean(np.std(record_psnr, 1))))
if __name__ == '__main__':
main()
|
27891
|
n = int(input())
c = [0]*n
for i in range(n):
l = int(input())
S = input()
for j in range(l):
if (S[j]=='0'):
continue
for k in range(j,l):
if (S[k]=='1'):
c[i] = c[i]+1
for i in range(n):
print(c[i])
|
27893
|
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from .. import BaseModel, register_model
from .knowledge_base import KGEModel
@register_model("rotate")
class RotatE(KGEModel):
r"""
Implementation of RotatE model from the paper `"RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space"
<https://openreview.net/forum?id=HkgEQnRqYQ>`.
borrowed from `KnowledgeGraphEmbedding<https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding>`
"""
def __init__(
self, nentity, nrelation, hidden_dim, gamma, double_entity_embedding=False, double_relation_embedding=False
):
super(RotatE, self).__init__(nentity, nrelation, hidden_dim, gamma, True, double_relation_embedding)
def score(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == "head-batch":
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
|
27949
|
from __future__ import annotations
from typing import TYPE_CHECKING
from dependency_injector.wiring import Provide, inject
from server.container import AppContainer
from ._types import BotInventoryContainers, LootGenerationConfig
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from tarkov.bots.bots import BotInventory
from tarkov.bots.generator.preset import BotGeneratorPreset
from tarkov.inventory.repositories import ItemTemplatesRepository
class BaseLootGenerator:
@inject
def __init__( # pylint: disable=too-many-arguments
self,
inventory_containers: BotInventoryContainers,
bot_inventory: BotInventory,
config: LootGenerationConfig,
preset: BotGeneratorPreset,
templates_repository: ItemTemplatesRepository = Provide[
AppContainer.repos.templates
],
):
self.inventory_containers = inventory_containers
self.bot_inventory = bot_inventory
self.config = config
self.preset = preset
self.templates_repository = templates_repository
def generate(self) -> None:
raise NotImplementedError
|
27956
|
from django.test import TestCase
from dojo.models import Test
from dojo.tools.cloudsploit.parser import CloudsploitParser
class TestCloudsploitParser(TestCase):
def test_cloudsploit_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_zero_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_one_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_cloudsploit_parser_with_many_vuln_has_many_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_many_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
|
28020
|
class Node():
def __init__(self, value):
self.value = value
self.adjacentlist = []
self.visited = False
class Graph():
def DFS(self, node, traversal):
node.visited = True
traversal.append(node.value)
for element in node.adjacentlist:
if element.visited is False:
self.DFS(element, traversal)
return traversal
node1 = Node("A")
node2 = Node("B")
node3 = Node("C")
node4 = Node("D")
node5 = Node("E")
node6 = Node("F")
node7 = Node("G")
node8 = Node("H")
node1.adjacentlist.append(node2)
node1.adjacentlist.append(node3)
node1.adjacentlist.append(node4)
node2.adjacentlist.append(node5)
node2.adjacentlist.append(node6)
node4.adjacentlist.append(node7)
node6.adjacentlist.append(node8)
graph = Graph()
print(graph.DFS(node1, []))
|
28036
|
from .EncoderRNN import EncoderRNN
from .DecoderRNN import DecoderRNN
from .TopKDecoder import TopKDecoder
from .seq2seq import Seq2seq
|
28067
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import *
PRIMITIVES = [
'MBI_k3_e3',
'MBI_k3_e6',
'MBI_k5_e3',
'MBI_k5_e6',
'MBI_k3_e3_se',
'MBI_k3_e6_se',
'MBI_k5_e3_se',
'MBI_k5_e6_se',
# 'skip',
]
OPS = {
'MBI_k3_e3' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 3, s, affine=aff, act_func=act),
'MBI_k3_e6' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 3, s, affine=aff, act_func=act),
'MBI_k5_e3' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 5, s, affine=aff, act_func=act),
'MBI_k5_e6' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 5, s, affine=aff, act_func=act),
'MBI_k3_e3_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic , oc, 3, s, affine=aff, act_func=act),
'MBI_k3_e6_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic*2, oc, 3, s, affine=aff, act_func=act),
'MBI_k5_e3_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic , oc, 5, s, affine=aff, act_func=act),
'MBI_k5_e6_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic*2, oc, 5, s, affine=aff, act_func=act),
# 'skip' : lambda ic, mc, oc, s, aff, act: IdentityLayer(ic, oc),
}
class MixedOP(nn.Module):
def __init__(self, in_channels, out_channels, stride, affine, act_func, num_ops, mc_num_dict, lat_lookup):
super(MixedOP, self).__init__()
self.num_ops = num_ops
self.lat_lookup = lat_lookup
self.mc_num_dict = mc_num_dict
self.m_ops = nn.ModuleList()
for i in range(num_ops):
primitive = PRIMITIVES[i]
mid_channels = self.mc_num_dict[i]
op = OPS[primitive](in_channels, mid_channels, out_channels, stride, affine, act_func)
self.m_ops.append(op)
self._initialize_log_alphas()
self.reset_switches()
def fink_ori_idx(self, idx):
count = 0
for ori_idx in range(len(self.switches)):
if self.switches[ori_idx]:
count += 1
if count == (idx + 1):
break
return ori_idx
def forward(self, x, sampling, mode):
if sampling:
weights = self.log_alphas[self.switches]
if mode == 'gumbel':
weights = F.gumbel_softmax(F.log_softmax(weights, dim=-1), self.T, hard=False)
idx = torch.argmax(weights).item()
self.switches[idx] = False
elif mode == 'gumbel_2':
weights = F.gumbel_softmax(F.log_softmax(weights, dim=-1), self.T, hard=False)
idx = torch.argmax(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'min_alphas':
idx = torch.argmin(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'max_alphas':
idx = torch.argmax(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'random':
idx = random.choice(range(len(weights)))
idx = self.fink_ori_idx(idx)
self.reset_switches()
else:
raise ValueError('invalid sampling mode...')
op = self.m_ops[idx]
return op(x), 0
else:
weights = F.gumbel_softmax(self.log_alphas, self.T, hard=False)
lats = self.get_lookup_latency(x.size(-1))
out = sum(w*op(x) for w, op in zip(weights, self.m_ops))
out_lat = sum(w*lat for w, lat in zip(weights, lats))
return out, out_lat
def get_lookup_latency(self, size):
lats = []
for idx, op in enumerate(self.m_ops):
if isinstance(op, IdentityLayer):
lats.append(0)
else:
key = '{}_{}_{}_{}_{}_k{}_s{}_{}'.format(
op.name,
size,
op.in_channels,
op.se_channels,
op.out_channels,
op.kernel_size,
op.stride,
op.act_func)
mid_channels = op.mid_channels
lats.append(self.lat_lookup[key][mid_channels])
return lats
def _initialize_log_alphas(self):
alphas = torch.zeros((self.num_ops,))
log_alphas = F.log_softmax(alphas, dim=-1)
self.register_parameter('log_alphas', nn.Parameter(log_alphas))
def reset_switches(self):
self.switches = [True] * self.num_ops
def set_temperature(self, T):
self.T = T
class MixedStage(nn.Module):
def __init__(self, ics, ocs, ss, affs, acts, mc_num_ddict, lat_lookup, stage_type):
super(MixedStage, self).__init__()
self.lat_lookup = lat_lookup
self.mc_num_ddict = mc_num_ddict
self.stage_type = stage_type # 0 for stage6 || 1 for stage1 || 2 for stage2 || 3 for stage3/4/5
self.start_res = 0 if ((ics[0] == ocs[0]) and (ss[0] == 1)) else 1
self.num_res = len(ics) - self.start_res + 1
# stage6
if stage_type == 0:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
# stage1
elif stage_type == 1:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
# stage2
elif stage_type == 2:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
self.block3 = MixedOP(ics[2], ocs[2], ss[2], affs[2], acts[2], len(PRIMITIVES), mc_num_ddict['block3'], lat_lookup)
# stage3, stage4, stage5
elif stage_type == 3:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
self.block3 = MixedOP(ics[2], ocs[2], ss[2], affs[2], acts[2], len(PRIMITIVES), mc_num_ddict['block3'], lat_lookup)
self.block4 = MixedOP(ics[3], ocs[3], ss[3], affs[3], acts[3], len(PRIMITIVES), mc_num_ddict['block4'], lat_lookup)
else:
raise ValueError('invalid stage_type...')
self._initialize_betas()
def forward(self, x, sampling, mode):
res_list = [x,]
lat_list = [0.,]
# stage6
if self.stage_type == 0:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
# stage1
elif self.stage_type == 1:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
# stage2
elif self.stage_type == 2:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
out3, lat3 = self.block3(out2, sampling, mode)
res_list.append(out3)
lat_list.append(lat1+lat2+lat3)
# stage3, stage4, stage5
elif self.stage_type == 3:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
out3, lat3 = self.block3(out2, sampling, mode)
res_list.append(out3)
lat_list.append(lat1+lat2+lat3)
out4, lat4 = self.block4(out3, sampling, mode)
res_list.append(out4)
lat_list.append(lat1+lat2+lat3+lat4)
else:
raise ValueError
weights = F.softmax(self.betas, dim=-1)
out = sum(w*res for w, res in zip(weights, res_list[self.start_res:]))
out_lat = sum(w*lat for w, lat in zip(weights, lat_list[self.start_res:]))
return out, out_lat
def _initialize_betas(self):
betas = torch.zeros((self.num_res))
self.register_parameter('betas', nn.Parameter(betas))
class Network(nn.Module):
def __init__(self, num_classes, mc_num_dddict, lat_lookup):
super(Network, self).__init__()
self.lat_lookup = lat_lookup
self.mc_num_dddict = mc_num_dddict
self.first_stem = ConvLayer(3, 32, kernel_size=3, stride=2, affine=False, act_func='relu')
self.second_stem = MBInvertedResBlock(32, 32, 8, 16, kernel_size=3, stride=1, affine=False, act_func='relu')
self.stage1 = MixedStage(
ics = [16,24],
ocs = [24,24],
ss = [2,1],
affs = [False, False],
acts = ['relu', 'relu'],
mc_num_ddict = mc_num_dddict['stage1'],
lat_lookup = lat_lookup,
stage_type = 1,)
self.stage2 = MixedStage(
ics = [24,40,40],
ocs = [40,40,40],
ss = [2,1,1],
affs = [False, False, False],
acts = ['swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage2'],
lat_lookup = lat_lookup,
stage_type = 2,)
self.stage3 = MixedStage(
ics = [40,80,80,80],
ocs = [80,80,80,80],
ss = [2,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage3'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage4 = MixedStage(
ics = [80,112,112,112],
ocs = [112,112,112,112],
ss = [1,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage4'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage5 = MixedStage(
ics = [112,192,192,192],
ocs = [192,192,192,192],
ss = [2,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage5'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage6 = MixedStage(
ics = [192,],
ocs = [320,],
ss = [1,],
affs = [False,],
acts = ['swish',],
mc_num_ddict = mc_num_dddict['stage6'],
lat_lookup = lat_lookup,
stage_type = 0,)
self.feature_mix_layer = ConvLayer(320, 1280, kernel_size=1, stride=1, affine=False, act_func='swish')
self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = LinearLayer(1280, num_classes)
self._initialization()
def forward(self, x, sampling, mode='max'):
out_lat = self.lat_lookup['base'] if not sampling else 0.0
x = self.first_stem(x)
x = self.second_stem(x)
x, lat = self.stage1(x, sampling, mode)
out_lat += lat
x, lat = self.stage2(x, sampling, mode)
out_lat += lat
x, lat = self.stage3(x, sampling, mode)
out_lat += lat
x, lat = self.stage4(x, sampling, mode)
out_lat += lat
x, lat = self.stage5(x, sampling, mode)
out_lat += lat
x, lat = self.stage6(x, sampling, mode)
out_lat += lat
x = self.feature_mix_layer(x)
x = self.global_avg_pooling(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, out_lat
def set_temperature(self, T):
for m in self.modules():
if isinstance(m, MixedOP):
m.set_temperature(T)
def weight_parameters(self):
_weight_parameters = []
for k, v in self.named_parameters():
if not (k.endswith('log_alphas') or k.endswith('betas')):
_weight_parameters.append(v)
return _weight_parameters
def arch_parameters(self):
_arch_parameters = []
for k, v in self.named_parameters():
if k.endswith('log_alphas') or k.endswith('betas'):
_arch_parameters.append(v)
return _arch_parameters
def log_alphas_parameters(self):
_log_alphas_parameters = []
for k, v in self.named_parameters():
if k.endswith('log_alphas'):
_log_alphas_parameters.append(v)
return _log_alphas_parameters
def betas_parameters(self):
_betas_parameters = []
for k, v in self.named_parameters():
if k.endswith('betas'):
_betas_parameters.append(v)
return _betas_parameters
def reset_switches(self):
for m in self.modules():
if isinstance(m, MixedOP):
m.reset_switches()
def _initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
|
28144
|
import djclick as click
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from .forms import AddOrganizerForm
from .slack_client import slack
# "Get organizers info" functions used in 'new_event' and 'copy_event' management commands.
def get_main_organizer():
"""
We're asking user for name and address of main organizer, and return
a list of dictionary.
"""
team = []
click.echo(_("Let's talk about the team. First the main organizer:"))
main_name = click.prompt(click.style(
"First and last name", bold=True, fg='yellow'
))
main_email = click.prompt(click.style(
"E-mail address", bold=True, fg='yellow'
))
team.append({'name': main_name, 'email': main_email})
click.echo("All right, the main organizer is {0} ({1})".format(main_name, main_email))
return team
def get_team(team):
"""
We're asking user for names and address of the rest of the team,
and append that to a list we got from get_main_organizer
"""
add_team = click.confirm(click.style(
"Do you want to add additional team members?", bold=True, fg='yellow'
), default=False)
i = 1
while add_team:
i += 1
name = click.prompt(click.style(
f"First and last name of #{i} member", bold=True, fg='yellow'
))
email = click.prompt(click.style(
f"E-mail address of #{i} member", bold=True, fg='yellow'
))
if len(name) > 0:
team.append({'name': name, 'email': email})
click.echo(
f"All right, the #{i} team member of Django Girls is {name} ({email})"
)
add_team = click.confirm(click.style(
"Do you want to add additional team members?", bold=True, fg='yellow'
), default=False)
return team
def create_users(team, event):
"""
Create or get User objects based on team list
"""
members = []
for member in team:
member['event'] = event.pk
form = AddOrganizerForm(member)
user = form.save()
members.append(user)
return members
def brag_on_slack_bang(city, country, team):
"""
This is posting a message about Django Girls new event to #general channel on Slack!
"""
if settings.ENABLE_SLACK_NOTIFICATIONS:
text = f":django_pony: :zap: Woohoo! :tada: New Django Girls alert! " \
f"Welcome Django Girls {city}, {country}. " \
f"Congrats {', '.join(['{} {}'.format(x.first_name, x.last_name) for x in team])}!"
slack.chat.post_message(
channel='#general',
text=text,
username='Django Girls',
icon_emoji=':django_heart:'
)
|
28167
|
import sys
from typing import Any
from django.conf import settings
if sys.version_info >= (3, 8):
from typing import Literal
ModeType = Literal["once", "none", "all"]
else:
ModeType = str
class Settings:
defaults = {"HIDE_COLUMNS": True, "MODE": "once"}
def get_setting(self, key: str) -> Any:
try:
return settings.PERF_REC[key]
except (AttributeError, KeyError):
return self.defaults.get(key, None)
@property
def HIDE_COLUMNS(self) -> bool:
return self.get_setting("HIDE_COLUMNS")
@property
def MODE(self) -> ModeType:
return self.get_setting("MODE")
perf_rec_settings = Settings()
|
28180
|
import functools
from django.contrib import messages
from django.urls import reverse
from django.shortcuts import redirect
def full_profile_required(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
if (request.user
and request.user.id # FIXME test mocks mess with the above object so we have to check the id
and (not request.user.attendeeprofile or not request.user.attendeeprofile.gender)):
messages.warning(
request,
"Please update your profile to continue using the EuroPython website."
)
return redirect(reverse('user_panel:profile_settings'))
return func(request, *args, **kwargs)
return wrapper
|
28183
|
import numpy as np
from mldftdat.pyscf_utils import *
from mldftdat.workflow_utils import safe_mem_cap_mb
from pyscf.dft.numint import eval_ao, make_mask
from mldftdat.density import LDA_FACTOR,\
contract21_deriv, contract21, GG_AMIN
def dtauw(rho_data):
return - get_gradient_magnitude(rho_data)**2 / (8 * rho_data[0,:]**2 + 1e-16),\
1 / (8 * rho_data[0,:] + 1e-16)
def dsdp(s):
return 1 / (2 * s)
def dasinhsdp(s):
return arcsinh_deriv(s) / (2 * s + 1e-10)
def ds2(rho_data):
# s = |nabla n| / (b * n)
rho = rho_data[0,:]
b = 2 * (3 * np.pi * np.pi)**(1.0/3)
s = get_gradient_magnitude(rho_data) / (b * rho**(4.0/3) + 1e-16)
s2 = s**2
return -8.0 * s2 / (3 * rho + 1e-16),\
1 / (b * rho**(4.0/3) + 1e-16)**2
def dalpha(rho_data):
rho = rho_data[0,:]
tau = rho_data[5,:]
tau0 = get_uniform_tau(rho) + 1e-16
mag_grad = get_gradient_magnitude(rho_data)
tauw = get_single_orbital_tau(rho, mag_grad)
dwdn, dwds = dtauw(rho_data)
return 5.0 * (tauw - tau) / (3 * tau0 * rho + 1e-16) - dwdn / tau0,\
- dwds / tau0,\
1 / tau0
LDA_FACTOR = - 3.0 / 4.0 * (3.0 / np.pi)**(1.0/3)
def v_semilocal(rho_data, F, dfdp, dfdalpha):
# 0 - n, 1 - p, 2 - nabla^2, 3 - alpha
v = np.zeros((4, rho_data.shape[1]))
rho = rho_data[0,:]
elda = LDA_FACTOR * rho**(4.0/3)
# dE/dn line 1
v[0] = 4.0 / 3 * LDA_FACTOR * rho**(1.0/3) * F
# dE/dp line 1
v[1] = elda * dfdp
# dE/dalpha line 1
v[3] = elda * dfdalpha
return v
def v_basis_transform(rho_data, v_npalpha):
"""
Transforms the basis of the exchange potential from
density, reduced gradient, and alpha to
density, contracted gradient, and kinetic energy.
v_npalpha is a 3xN array:
0 - Functional derivative of the exchange energy
explicitly with respect to the density, i.e.
not accounting for derivatives of the XEF features
wrt density
1 - Functional derivative wrt the square of the reduced
gradient p
2 - ZERO (Functional derivative wrt normalized laplacian)
3 - Functional derivative wrt the isoorbital indicator
alpha
Returns a 3xN array:
0 - Full functional derivative of the exchange energy
wrt the density, accounting for dp/dn and dalpha/dn
1 - Derivative wrt sigma, the contracted gradient |nabla n|^2
2 - ZERO (Derivative wrt the laplacian fo the density)
3 - Derivative wrt tau, the kinetic energy density
"""
v_nst = np.zeros(v_npalpha.shape)
# dE/dn lines 1-3
v_nst[0] = v_npalpha[0]
dpdn, dpdsigma = ds2(rho_data)
# dE/dn line 4 term 1
v_nst[0] += v_npalpha[1] * dpdn
# dE/dsigma term 1
v_nst[1] += v_npalpha[1] * dpdsigma
dadn, dadsigma, dadtau = dalpha(rho_data)
# dE/dn line 4 term 2
v_nst[0] += v_npalpha[3] * dadn
# dE/dsigma term 2
v_nst[1] += v_npalpha[3] * dadsigma
# dE/dtau
v_nst[3] = v_npalpha[3] * dadtau
return v_nst
def v_nonlocal_general(rho_data, grid, dedg, density, auxmol,
g, gr2, ovlp, l = 0, mul = 1.0):
# g should have shape (2l+1, N)
N = grid.weights.shape[0]
lc = get_dft_input2(rho_data)[:3]
if l == 0:
dedb = dedg.reshape(1, -1)
elif l == 1:
#dedb = 2 * elda * g * dfdg
dedb = 2 * dedg * g #/ (np.linalg.norm(g, axis=0) + 1e-10)
elif l == 2:
dedb = 2 * dedg * g / np.sqrt(5)
elif l == -2:
dedb = dedg
l = 2
elif l == -1:
dedb = dedg
l = 1
else:
raise ValueError('angular momentum code l=%d unknown' % l)
rho, s, alpha = lc
a = np.pi * (mul * rho / 2 + 1e-16)**(2.0 / 3)
scale = 1
fac = (6 * np.pi**2)**(2.0/3) / (16 * np.pi)
scale += GG_SMUL * fac * s**2
scale += GG_AMUL * 0.6 * fac * (alpha - 1)
a = a * scale
cond = a < GG_AMIN
da = np.exp(a[cond] / GG_AMIN - 1)
a[cond] = GG_AMIN * np.exp(a[cond] / GG_AMIN - 1)
# (ngrid * (2l+1), naux)
dedb[:,rho<1e-8] = 0
dedaux = np.dot((dedb * grid.weights).T.flatten(), ovlp)
dgda = l / (2 * a) * g - gr2
#print(dgda.shape, gr2.shape)
dgda[:,rho<1e-8] = 0
dadn = mul * a / (3 * (mul * rho / 2 + 1e-16))
dadp = GG_SMUL * np.pi * fac * (mul * rho / 2 + 1e-16)**(2.0/3)
dadalpha = GG_AMUL * 0.6 * np.pi * fac * (mul * rho / 2 + 1e-16)**(2.0/3)
dadn[cond] *= da
dadp[cond] *= da
dadalpha[cond] *= da
# add in line 3 of dE/dn, line 2 of dE/dp and dE/dalpha
v_npa = np.zeros((4, N))
deda = np.einsum('mi,mi->i', dedb, dgda)
v_npa[0] = deda * dadn
v_npa[1] = deda * dadp
v_npa[3] = deda * dadalpha
return v_npa, dedaux
def v_nonlocal(rho_data, grid, dedg, density, auxmol,
g, gr2, ovlp, l=0, a0=8.0, fac_mul=0.25,
amin=GG_AMIN, l_add=0, **kwargs):
#print(l, l_add, a0, fac_mul, amin)
# g should have shape (2l+1, N)
N = grid.weights.shape[0]
lc = get_dft_input2(rho_data)[:3]
if l == 0:
dedb = dedg.reshape(1, -1)
elif l == 1:
dedb = 2 * dedg * g
elif l == 2:
dedb = 2 * dedg * g / np.sqrt(5)
elif l == -2:
dedb = dedg
l = 2
elif l == -1:
dedb = dedg
l = 1
else:
raise ValueError('angular momentum code l=%d unknown' % l)
rho, s, alpha = lc
ratio = alpha + 5./3 * s**2
fac = fac_mul * 1.2 * (6 * np.pi**2)**(2.0/3) / np.pi
a = np.pi * (rho / 2 + 1e-16)**(2.0 / 3)
scale = a0 + (ratio-1) * fac
a = a * scale
cond = a < amin
da = np.exp(a[cond] / amin - 1)
a[cond] = amin * np.exp(a[cond] / amin - 1)
# (ngrid * (2l+1), naux)
dedb[:,rho<1e-8] = 0
dedaux = np.dot((dedb * grid.weights).T.flatten(), ovlp)
dgda = (l + l_add) / (2 * a) * g - gr2
dgda[:,rho<1e-8] = 0
dadn = 2 * a / (3 * rho + 1e-16)
dadalpha = np.pi * fac * (rho / 2 + 1e-16)**(2.0/3)
dadp = 5./3 * dadalpha
dadn[cond] *= da
dadp[cond] *= da
dadalpha[cond] *= da
# add in line 3 of dE/dn, line 2 of dE/dp and dE/dalpha
v_npa = np.zeros((4, N))
deda = np.einsum('mi,mi->i', dedb, dgda)
v_npa[0] = deda * dadn
v_npa[1] = deda * dadp
v_npa[3] = deda * dadalpha
return v_npa, dedaux
def functional_derivative_loop(mol, mlfunc, dEddesc,
raw_desc, raw_desc_r2,
rho_data, density, ovlps, grid):
"""
Core functional derivative loop for the CIDER features,
called by NLNumInt.
Args:
mol (pyscf.gto.Mole): molecule object
mlfunc (MLFunctional): Exchange functional
dEddesc (np.ndarray): ngrid x ndesc array of energy derivatives
with respect to the descriptors.
raw_desc (np.ndarray): raw CIDER descriptor vectors
raw_desc_r2 (np.ndarray): raw CIDER descriptor vectors <r^2>
for use in functional derivative with respect to the Gaussian
exponents
rho_data (np.ndarray): 6 x ngrid
density (np.ndarray): density in DF basis space
ovlps (np.ndarray): Overlaps of the CIDER descriptor functions with
the DF basis
grid: contains coords and weights of the real-space grid
"""
gg_dict = {
'a0': mlfunc.a0,
'amin': mlfunc.amin,
'fac_mul': mlfunc.fac_mul
}
N = grid.weights.shape[0]
naux = mol.auxmol.nao_nr()
sprefac = 2 * (3 * np.pi * np.pi)**(1.0/3)
n43 = rho_data[0]**(4.0/3)
svec = rho_data[1:4] / (sprefac * n43 + 1e-20)
v_npa = np.zeros((4, N))
v_aniso = np.zeros((3, N))
v_aux = np.zeros(naux)
for i, d in enumerate(mlfunc.desc_order):
if d == 0:
v_npa[0] += dEddesc[:,i]
elif d == 1:
v_npa[1] += dEddesc[:,i]
elif d == 2:
v_npa[3] += dEddesc[:,i]
else:
gg_kwargs = gg_dict
l_add = 0
if d in [3, 10, 11]:
if d == 3:
g = raw_desc[6]
ovlp = ovlps[0]
gr2 = raw_desc_r2[6:7]
elif d == 10:
g = raw_desc[15]
ovlp = ovlps[3]
gr2 = raw_desc_r2[15:16]
if mlfunc.desc_version == 'c':
l_add = 2
mul = 1.0
else:
mul = 0.25**(2./3)
gg_kwargs = {
'a0': mlfunc.a0 * mul,
'fac_mul': mlfunc.fac_mul * mul,
'amin': mlfunc.amin * mul
}
else:
g = raw_desc[16]
ovlp = ovlps[4]
gr2 = raw_desc_r2[16:17]
if mlfunc.desc_version == 'c':
mul = 2.0
else:
mul = 4**(2./3)
gg_kwargs = {
'a0': mlfunc.a0 * mul,
'fac_mul': mlfunc.fac_mul * mul,
'amin': mlfunc.amin * mul
}
l = 0
elif d == 4:
g = raw_desc[7:10]
gr2 = raw_desc_r2[7:10]
ovlp = ovlps[1]
l = 1
elif d == 6:
g = raw_desc[10:15]
gr2 = raw_desc_r2[10:15]
ovlp = ovlps[2]
l = 2
elif d == 5:
g = raw_desc[7:10]
gr2 = raw_desc_r2[7:10]
ovlp = ovlps[1]
dfmul = svec
v_aniso += dEddesc[:,i] * g
l = -1
elif d == 7:
l = -2
g = raw_desc[10:15]
gr2 = raw_desc_r2[10:15]
ovlp = ovlps[2]
dfmul = contract21_deriv(svec)
ddesc_dsvec = contract21(g, svec)
v_aniso += dEddesc[:,i] * 2 * ddesc_dsvec
elif d == 8:
g2 = raw_desc[10:15]
g2r2 = raw_desc_r2[10:15]
ovlp2 = ovlps[2]
g1 = raw_desc[7:10]
g1r2 = raw_desc_r2[7:10]
ovlp1 = ovlps[1]
dfmul = contract21_deriv(svec, g1)
ddesc_dsvec = contract21(g2, g1)
ddesc_dg1 = contract21(g2, svec)
v_aniso += dEddesc[:,i] * ddesc_dsvec
vtmp1, dedaux1 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * ddesc_dg1,
density, mol.auxmol, g1,
g1r2, ovlp1, l=-1, **gg_kwargs)
vtmp2, dedaux2 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * dfmul,
density, mol.auxmol, g2,
g2r2, ovlp2, l=-2, **gg_kwargs)
vtmp = vtmp1 + vtmp2
dedaux = dedaux1 + dedaux2
elif d == 9:
g2 = raw_desc[10:15]
g2r2 = raw_desc_r2[10:15]
ovlp2 = ovlps[2]
g1 = raw_desc[7:10]
g1r2 = raw_desc_r2[7:10]
ovlp1 = ovlps[1]
dfmul = contract21_deriv(g1)
ddesc_dg1 = 2 * contract21(g2, g1)
vtmp1, dedaux1 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * ddesc_dg1,
density, mol.auxmol, g1,
g1r2, ovlp1, l=-1, **gg_kwargs)
vtmp2, dedaux2 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * dfmul,
density, mol.auxmol, g2,
g2r2, ovlp2, l=-2, **gg_kwargs)
vtmp = vtmp1 + vtmp2
dedaux = dedaux1 + dedaux2
else:
raise NotImplementedError('Cannot take derivative for code %d' % d)
if d in [5, 7]:
vtmp, dedaux = v_nonlocal(rho_data, grid,
dEddesc[:,i] * dfmul,
density, mol.auxmol, g,
gr2, ovlp, l=l, **gg_kwargs)
elif d in [8, 9]:
pass
else:
vtmp, dedaux = v_nonlocal(rho_data, grid,
dEddesc[:,i],
density, mol.auxmol, g,
gr2, ovlp, l=l, l_add=l_add,
**gg_kwargs)
v_npa += vtmp
v_aux += dedaux
vtmp = None
dedaux = None
vmol = np.einsum('a,aij->ij', v_aux, mol.ao_to_aux)
v_nst = v_basis_transform(rho_data, v_npa)
v_nst[0] += np.einsum('ap,ap->p', -4.0 * svec / (3 * rho_data[0] + 1e-20), v_aniso)
v_grad = v_aniso / (sprefac * n43 + 1e-20)
return v_nst, v_grad, vmol
def get_density_in_basis(ao_to_aux, rdm1):
return np.einsum('npq,pq->n', ao_to_aux, rdm1)
def arcsinh_deriv(x):
return 1 / np.sqrt(x * x + 1)
def get_chi(alpha):
return 1 / (1 + alpha**2)
def chi_deriv(alpha):
return -2 * alpha / (1 + alpha**2)**2
|
28194
|
import os
from geofeather import to_geofeather, from_geofeather
from pandas.testing import assert_frame_equal
import pytest
def test_points_geofeather(tmpdir, points_wgs84):
"""Confirm that we can round-trip points to / from feather file"""
filename = tmpdir / "points_wgs84.feather"
to_geofeather(points_wgs84, filename)
assert os.path.exists(filename)
df = from_geofeather(filename)
assert_frame_equal(df, points_wgs84)
assert df.crs == points_wgs84.crs
def test_points_geofeather_proj4(tmpdir, points_albers_conus_proj4):
"""Confirm that we can round-trip points to / from feather file with a proj4 defined CRS"""
filename = tmpdir / "points_albers_conus.feather"
to_geofeather(points_albers_conus_proj4, filename)
df = from_geofeather(filename)
assert_frame_equal(df, points_albers_conus_proj4)
# equality comparision fails for CRS object constructed from proj4, even though they are still the same
if hasattr(df.crs, "to_proj4"):
assert df.crs.to_proj4() == points_albers_conus_proj4.crs.to_proj4()
else:
assert df.crs == points_albers_conus_proj4.crs
def test_points_geofeather_wkt(tmpdir, points_albers_conus_wkt):
"""Confirm that we can round-trip points to / from feather file with a wkt defined CRS"""
filename = tmpdir / "points_albers_conus.feather"
to_geofeather(points_albers_conus_wkt, filename)
df = from_geofeather(filename)
assert_frame_equal(df, points_albers_conus_wkt)
assert df.crs == points_albers_conus_wkt.crs
def test_missing_crs_warning(tmpdir, points_wgs84):
"""Confirm that a warning is raised if the crs file is missing"""
filename = tmpdir / "points_wgs84.feather"
to_geofeather(points_wgs84, filename)
os.remove("{}.crs".format(filename))
with pytest.warns(UserWarning) as warning:
df = from_geofeather(filename)
assert (
"coordinate reference system file is missing" in warning[0].message.args[0]
)
assert df.crs is None
def test_lines_geofeather(tmpdir, lines_wgs84):
"""Confirm that we can round-trip lines to / from feather file"""
filename = tmpdir / "lines_wgs84.feather"
to_geofeather(lines_wgs84, filename)
assert os.path.exists(filename)
df = from_geofeather(filename)
assert_frame_equal(df, lines_wgs84)
assert df.crs == lines_wgs84.crs
def test_polygons_geofeather(tmpdir, polygons_wgs84):
"""Confirm that we can round-trip polygons to / from feather file"""
filename = tmpdir / "polygons_wgs84.feather"
to_geofeather(polygons_wgs84, filename)
assert os.path.exists(filename)
df = from_geofeather(filename)
assert_frame_equal(df, polygons_wgs84)
assert df.crs == polygons_wgs84.crs
|
28285
|
import urwid
from console.app import app
from console.widgets.help import HelpDialog
class Pane(urwid.WidgetPlaceholder):
"""
A widget which allows for easy display of dialogs.
"""
def __init__(self, widget=urwid.SolidFill(' ')):
urwid.WidgetPlaceholder.__init__(self, widget)
self.widget = widget
self.dialog = None
def show_dialog(self, dialog):
if not self.dialog:
self.dialog = dialog
self.original_widget = urwid.Overlay(
urwid.LineBox(dialog),
self.original_widget,
align=getattr(dialog, 'align', 'center'),
width=getattr(dialog, 'width', ('relative', 99)),
valign=getattr(dialog, 'valign', 'middle'),
height=getattr(dialog, 'height', 'pack'),
)
app.draw_screen()
def close_dialog(self):
if self.dialog:
self.original_widget = self.widget
self.dialog = None
app.draw_screen()
def keypress(self, size, event):
if not self.handle_event(event):
return self.original_widget.keypress(size, event)
return super(Pane, self).keypress(size, event)
def handle_event(self, event):
if event == 'close-dialog':
self.close_dialog()
else:
return event
def get_help_dialog(self):
return HelpDialog()
|
28323
|
import base64
import hashlib
import json
import logging
from dataclasses import dataclass
import boto3
log = logging.getLogger()
region = "us-east-1"
def handle(event: dict, context):
request = event["Records"][0]["cf"]["request"]
try:
authenticate(request["headers"])
except Exception as e:
log.error(repr(e))
return unauthorized
return request
def authenticate(headers: dict):
domain = headers["host"][0]["value"]
auth = headers["authorization"][0]["value"]
auth_type, creds = auth.split(" ")
if auth_type != "Basic":
raise ValueError("Invalid auth type: " + auth_type)
username, password = base64.b64decode(creds).decode().split(":")
user = get_user(domain, username)
if hash_password(password, user.password_salt) != user.password_hash:
raise ValueError("Invalid password for " + username)
@dataclass
class User:
username: str
password_hash: str
password_salt: str
def get_user(domain: str, username: str) -> User:
data = boto3.client("ssm", region_name=region).get_parameter(
Name=f"/s3pypi/{domain}/users/{username}",
WithDecryption=True,
)["Parameter"]["Value"]
return User(username, **json.loads(data))
def hash_password(password: str, salt: str) -> str:
return hashlib.sha1((password + salt).encode()).hexdigest()
unauthorized = dict(
status="401",
statusDescription="Unauthorized",
headers={
"www-authenticate": [
{"key": "WWW-Authenticate", "value": 'Basic realm="Login"'}
]
},
)
|
28353
|
import os
from fnmatch import fnmatch
from typing import Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
from typing_extensions import Literal
from . import lib
from .otf import TemporaryOTF
from .util import PathOrArray, _kwargs_for, imread
def rl_cleanup():
"""Release GPU buffer and cleanup after deconvolution
Call this before program quits to release global GPUBuffer d_interpOTF.
- Resets any bleach corrections
- Removes OTF from GPU buffer
- Destroys cuFFT plan
- Releases GPU buffers
"""
return lib.RL_cleanup()
def rl_init(
rawdata_shape: Tuple[int, int, int],
otfpath: str,
dzdata: float = 0.5,
dxdata: float = 0.1,
dzpsf: float = 0.1,
dxpsf: float = 0.1,
deskew: float = 0,
rotate: float = 0,
width: int = 0,
):
"""Initialize GPU for deconvolution.
Prepares cuFFT plan for deconvolution with a given data shape and OTF.
Must be used prior to :func:`pycudadecon.rl_decon`
Parameters
----------
rawdata_shape : Tuple[int, int, int]
3-tuple of data shape
otfpath : str
Path to OTF TIF
dzdata : float, optional
Z-step size of data, by default 0.5
dxdata : float, optional
XY pixel size of data, by default 0.1
dzpsf : float, optional
Z-step size of the OTF, by default 0.1
dxpsf : float, optional
XY pixel size of the OTF, by default 0.1
deskew : float, optional
Deskew angle. If not 0.0 then deskewing will be performed before
deconvolution, by default 0
rotate : float, optional
Rotation angle; if not 0.0 then rotation will be performed around Y
axis after deconvolution, by default 0
width : int, optional
If deskewed, the output image's width, by default 0 (do not crop)
Examples
--------
>>> rl_init(im.shape, otfpath)
>>> decon_result = rl_decon(im)
>>> rl_cleanup()
"""
nz, ny, nx = rawdata_shape
lib.RL_interface_init(
nx,
ny,
nz,
dxdata,
dzdata,
dxpsf,
dzpsf,
deskew,
rotate,
width,
otfpath.encode(),
)
def rl_decon(
im: np.ndarray,
background: Union[int, Literal["auto"]] = 80,
n_iters: int = 10,
shift: int = 0,
save_deskewed: bool = False,
output_shape: Optional[Tuple[int, int, int]] = None,
napodize: int = 15,
nz_blend: int = 0,
pad_val: float = 0.0,
dup_rev_z: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Perform Richardson Lucy Deconvolution.
Performs actual deconvolution. GPU must first be initialized with
:func:`pycudadecon.rl_init`
Parameters
----------
im : np.ndarray
3D image volume to deconvolve
background : int or 'auto'
User-supplied background to subtract. If 'auto', the median value of the
last Z plane will be used as background. by default 80
n_iters : int, optional
Number of iterations, by default 10
shift : int, optional
If deskewed, the output image's extra shift in X (positive->left),
by default 0
save_deskewed : bool, optional
Save deskewed raw data as well as deconvolution result, by default False
output_shape : tuple of int, optional
Specify the output shape after deskewing. Usually this is unnecessary and
can be autodetected. Mostly intended for use within a
:class:`pycudadecon.RLContext` context, by default None
napodize : int, optional
Number of pixels to soften edge with, by default 15
nz_blend : int, optional
Number of top and bottom sections to blend in to reduce axial ringing,
by default 0
pad_val : float, optional
Value with which to pad image when deskewing, by default 0.0
dup_rev_z : bool, optional
Duplicate reversed stack prior to decon to reduce axial ringing,
by default False
Returns
-------
np.ndarray or 2-tuple of np.ndarray
The deconvolved result. If `save_deskewed` is `True`, returns
`(decon_result, deskew_result)`
Raises
------
ValueError
If im.ndim is not 3, or `output_shape` is provided but not length 3
"""
if im.ndim != 3:
raise ValueError("Only 3D arrays supported")
nz, ny, nx = im.shape
if output_shape is None:
output_shape = (lib.get_output_nz(), lib.get_output_ny(), lib.get_output_nx())
elif len(output_shape) != 3:
raise ValueError("Decon output shape must have length==3")
decon_result = np.empty(tuple(output_shape), dtype=np.float32)
if save_deskewed:
deskew_result = np.empty_like(decon_result)
else:
deskew_result = np.empty(1, dtype=np.float32)
# must be 16 bit going in
if not np.issubdtype(im.dtype, np.uint16):
im = im.astype(np.uint16)
if isinstance(background, str) and background == "auto":
background = np.median(im[-1])
rescale = False # not sure if this works yet...
if not im.flags["C_CONTIGUOUS"]:
im = np.ascontiguousarray(im)
lib.RL_interface(
im,
nx,
ny,
nz,
decon_result,
deskew_result,
background,
rescale,
save_deskewed,
n_iters,
shift,
napodize,
nz_blend,
pad_val,
dup_rev_z,
)
if save_deskewed:
return decon_result, deskew_result
else:
return decon_result
def quickDecon(image: np.ndarray, otfpath: str, **kwargs):
"""Perform deconvolution of `image` with otf at `otfpath`.
Not currently used...
"""
rl_init(image.shape, otfpath, **_kwargs_for(rl_init, kwargs))
result = rl_decon(image, **_kwargs_for(rl_decon, kwargs))
lib.RL_cleanup()
return result
class RLContext:
"""Context manager to setup the GPU for RL decon
Takes care of handing the OTF to the GPU, preparing a cuFFT plane,
and cleaning up after decon. Internally, this calls :func:`rl_init`,
stores the shape of the expected output volume after any deskew/decon,
then calls :func:`rl_cleanup` when exiting the context.
For parameters, see :func:`rl_init`.
Examples
--------
>>> with RLContext(data.shape, otfpath, dz) as ctx:
... result = rl_decon(data, ctx.out_shape)
"""
def __init__(
self,
rawdata_shape: Tuple[int, int, int],
otfpath: str,
dzdata: float = 0.5,
dxdata: float = 0.1,
dzpsf: float = 0.1,
dxpsf: float = 0.1,
deskew: float = 0,
rotate: float = 0,
width: int = 0,
):
self.kwargs = locals()
self.kwargs.pop("self")
self.out_shape: Optional[Tuple[int, int, int]] = None
def __enter__(self):
"""Setup the context and return the ZYX shape of the output image"""
rl_init(**self.kwargs)
self.out_shape = (lib.get_output_nz(), lib.get_output_ny(), lib.get_output_nx())
return self
def __exit__(self, typ, val, traceback):
# exit receives a tuple with any exceptions raised during processing
# if __exit__ returns True, exceptions will be supressed
lib.RL_cleanup()
# alias
rl_context = RLContext
def _yield_arrays(
images: Union[PathOrArray, Sequence[PathOrArray]], fpattern="*.tif"
) -> Iterator[np.ndarray]:
"""Yield arrays from an array, path, or sequence of either.
Parameters
----------
images : Union[PathOrArray, Sequence[PathOrArray]]
an array, path, or sequence of either
fpattern : str, optional
used to filter files in a directory, by default "*.tif"
Yields
-------
Iterator[np.ndarray]
Arrays (read from paths if necessary)
Raises
------
OSError
If a directory is provided and no files match fpattern.
"""
if isinstance(images, np.ndarray):
yield images
elif isinstance(images, str):
if os.path.isfile(images):
yield imread(images)
elif os.path.isdir(images):
imfiles = [f for f in os.listdir(images) if fnmatch(f, fpattern)]
if not len(imfiles):
raise OSError(
'No files matching pattern "{}" found in directory: {}'.format(
fpattern, images
)
)
for fpath in imfiles:
yield imread(os.path.join(images, fpath))
else:
for item in images:
yield from _yield_arrays(item)
def decon(
images: Union[PathOrArray, Sequence[PathOrArray]],
psf: PathOrArray,
fpattern: str = "*.tif",
**kwargs
) -> Union[np.ndarray, List[np.ndarray]]:
"""Deconvolve an image or images with a PSF or OTF file.
If `images` is a directory, use the `fpattern` argument to select files
by filename pattern.
Parameters
----------
images : str, np.ndarray, or sequence of either
The array, filepath, directory, or list/tuple thereof to deconvolve
psf : str or np.ndarray
a filepath of a PSF or OTF file, or a 3D numpy PSF array. Function will
auto-detect whether the file is a 3D PSF or a filepath representing a 2D
complex OTF.
fpattern : str, optional
Filepattern to use when a directory is provided in the `images` argument,
by default `*.tif`
** kwargs
All other kwargs must be valid for either :func:`rl_init` or :func:`rl_decon`.
Returns
-------
np.ndarray or list of array
The deconvolved image(s)
Raises
------
ValueError
If save_deskewed is True and deskew is unset or 0
IOError
If a directory is provided as input and ``fpattern`` yields no files
NotImplementedError
If ``psf`` is provided as a complex, 2D numpy array (OTFs can only be
provided as filenames created with :func:`pycudadecon.make_otf`)
Examples
--------
deconvolve a 3D TIF volume with a 3D PSF volume (e.g. a single bead stack)
>>> result = decon('/path/to/image.tif', '/path/to/psf.tif')
deconvolve all TIF files in a specific directory that match a certain
`filename pattern <https://docs.python.org/3.6/library/fnmatch.html>`_,
(in this example, all TIFs with the string '560nm' in their name)
>>> result = decon(
... '/directory/with/images', '/path/to/psf.tif', fpattern='*560nm*.tif'
... )
deconvolve a list of images, provided either as np.ndarrays, filepaths,
or directories
>>> imarray = tifffile.imread('some_other_image.tif')
>>> inputs = ['/directory/with/images', '/path/to/image.tif', imarray]
>>> result = decon(inputs, '/path/to/psf.tif', fpattern='*560nm*.tif')
"""
if kwargs.get("save_deskewed"):
if kwargs.get("deskew", 1) == 0:
raise ValueError("Cannot use save_deskewed=True with deskew=0")
if not kwargs.get("deskew"):
raise ValueError("Must set deskew != 0 when using save_deskewed=True")
init_kwargs = _kwargs_for(rl_init, kwargs)
decon_kwargs = _kwargs_for(rl_decon, kwargs)
out = []
with TemporaryOTF(psf, **kwargs) as otf:
arraygen = _yield_arrays(images, fpattern)
# first, assume that all of the images are the same shape...
# in which case we can prevent a lot of GPU IO
# grab and store the shape of the first item in the generator
next_im = next(arraygen)
shp = next_im.shape
with RLContext(shp, otf.path, **init_kwargs) as ctx:
while True:
out.append(
rl_decon(next_im, output_shape=ctx.out_shape, **decon_kwargs)
)
try:
next_im = next(arraygen)
# here we check to make sure that the images are still the same
# shape... if not, we'll continue below
if next_im.shape != shp:
break
except StopIteration:
next_im = None
break
# if we had a shape mismatch, there will still be images left to process
# process them the slow way here...
if next_im is not None:
for imarray in [next_im, *arraygen]:
with RLContext(imarray.shape, otf.path, **init_kwargs) as ctx:
out.append(
rl_decon(imarray, output_shape=ctx.out_shape, **decon_kwargs)
)
if isinstance(images, (list, tuple)) and len(images) > 1:
return out
else:
return out[0]
|
28464
|
import numpy as np
# import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
# def plothist(x):
# vmin = x.min()-1
# vmax = x.max()+1
# bins = np.arange(vmin, vmax, (vmax - vmin)/50)
# plt.hist(x, bins=bins)
# plt.show()
# def scatterpred(pred):
# plt.scatter(pred[:,0], pred[:,1])
# plt.show()
# def scatter_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 8)
# plt.scatter(c[:,0], c[:,1], color='r')
# plt.show()
def most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
assign[d.argmin()] += 1
return assign.argmax()
def mean_on_most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
mean = np.zeros(c.shape)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
idx = d.argmin()
assign[idx] += 1
mean[idx,:] += x[i]
idx = assign.argmax()
return mean[idx,:] / assign[idx]
# def best_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 3)
# plt.scatter(c[:,0], c[:,1], color='g')
# n = most_assigned(pred, c)
# plt.scatter(c[n,0], c[n,1], color='r')
# plt.show()
def clustering_joints(y_pred, k=3):
_,nb_spl,nb_joints,dim = y_pred.shape
y = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j]
c,v = kmeans(d, k)
n = most_assigned(d, c)
y[s,j,:] = c[n]
return y
def clustering_grid(y_pred, size=10):
_, nb_spl, nb_joints, dim = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j,:]
xmin = d[:,0].min()
ymin = d[:,1].min()
xmax = d[:,0].max()
ymax = d[:,1].max()
xstep = (xmax - xmin) / size
ystep = (ymax - ymin) / size
c = np.zeros((size * size, dim))
for x in range(size):
for y in range(size):
c[x + size*y, 0] = xmin + (x + 0.5) * xstep
c[x + size*y, 1] = ymin + (y + 0.5) * ystep
yp[s,j,:] = mean_on_most_assigned(d, c)
return yp
def mean_joints(y_pred):
_, nb_spl, dim, nb_joints = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, dim, nb_joints))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,:,j]
yp[s, 0, j] = d[:,0].mean()
yp[s, 1, j] = d[:,1].mean()
return yp
|
28490
|
def find_metric_transformation_by_name(metric_transformations, metric_name):
for metric in metric_transformations:
if metric["metricName"] == metric_name:
return metric
def find_metric_transformation_by_namespace(metric_transformations, metric_namespace):
for metric in metric_transformations:
if metric["metricNamespace"] == metric_namespace:
return metric
class MetricFilters:
def __init__(self):
self.metric_filters = []
def add_filter(
self, filter_name, filter_pattern, log_group_name, metric_transformations
):
self.metric_filters.append(
{
"filterName": filter_name,
"filterPattern": filter_pattern,
"logGroupName": log_group_name,
"metricTransformations": metric_transformations,
}
)
def get_matching_filters(
self, prefix=None, log_group_name=None, metric_name=None, metric_namespace=None
):
result = []
for f in self.metric_filters:
prefix_matches = prefix is None or f["filterName"].startswith(prefix)
log_group_matches = (
log_group_name is None or f["logGroupName"] == log_group_name
)
metric_name_matches = (
metric_name is None
or find_metric_transformation_by_name(
f["metricTransformations"], metric_name
)
)
namespace_matches = (
metric_namespace is None
or find_metric_transformation_by_namespace(
f["metricTransformations"], metric_namespace
)
)
if (
prefix_matches
and log_group_matches
and metric_name_matches
and namespace_matches
):
result.append(f)
return result
def delete_filter(self, filter_name=None, log_group_name=None):
for f in self.metric_filters:
if f["filterName"] == filter_name and f["logGroupName"] == log_group_name:
self.metric_filters.remove(f)
return self.metric_filters
|
28493
|
from makememe.generator.prompts.prompt import Prompt
import datetime
from PIL import Image
from makememe.generator.design.image_manager import Image_Manager
class Waiting(Prompt):
name = "Waiting"
description = "waiting"
def __init__(self):
self.instruction = """
###
Message:I've been waiting for SpaceX to launch the starship for ever
Meme:{"subject": "SpaceX Startship"}
###
Message:I can't wait for makememe.ai to launch, but it's taking a little while
Meme:{"subject": "makememe.ai"}
###
Message:Drakes new album is going to be fire. Why do I have to wait
Meme:{"subject": "Drakes new album"}
###
Message:I want to create an NFT, but opensea.com is taking a while to load
Meme:{"subject": "opensea.com"}
###
"""
def create(self, meme_text):
with Image.open(f"makememe/static/meme_pics/{self.name.lower()}.jpg").convert(
"RGBA"
) as base:
overlay_image = Image_Manager.add_text(
base=base,
text=meme_text["subject"],
position=(600, 950),
font_size=40,
wrapped_width=20,
)
watermark = Image_Manager.add_text(
base=base, text="makememe.ai", position=(30, 1100), font_size=20
)
base = Image.alpha_composite(base, watermark)
out = Image.alpha_composite(base, overlay_image)
if out.mode in ("RGBA", "P"):
out = out.convert("RGB")
# User.objects.filter()
date = datetime.datetime.now()
image_name = f"{date}.jpg"
file_location = f"makememe/static/creations/{image_name}"
out.save(file_location)
return image_name
|
28511
|
import unittest
import torch
from torchvision.models.resnet import BasicBlock, Bottleneck
from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone
class TestBackBones(unittest.TestCase):
def count_layers(self, model):
if isinstance(model[4][0], BasicBlock):
n_convs = 2
elif isinstance(model[4][0], Bottleneck):
n_convs = 3
else:
raise ValueError("Backbone layer block not supported!")
return sum([len(model[i]) for i in range(4, 8)]) * n_convs + 2
def test_resnet(self):
rn_18 = ResNetBackbone('resnet18')
rn_34 = ResNetBackbone('resnet34')
rn_50 = ResNetBackbone('resnet50')
rn_101 = ResNetBackbone('resnet101')
rn_152 = ResNetBackbone('resnet152')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(rn_18(tensor).shape[1], 512)
self.assertEqual(rn_34(tensor).shape[1], 512)
self.assertEqual(rn_50(tensor).shape[1], 2048)
self.assertEqual(rn_101(tensor).shape[1], 2048)
self.assertAlmostEqual(rn_152(tensor).shape[1], 2048)
self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18)
self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34)
self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50)
self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101)
self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152)
with self.assertRaises(ValueError):
ResNetBackbone('resnet51')
def test_mobilenet(self):
mobilenet = MobileNetBackbone('mobilenet_v2')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(mobilenet(tensor).shape[1], 1280)
|
28528
|
import argparse
from differential.plugins.base import Base
class Gazelle(Base):
@classmethod
def get_aliases(cls):
return "gz",
@classmethod
def get_help(cls):
return "Gazelle插件,适用于未经过大规模结构改动的Gazelle站点"
@classmethod
def add_parser(cls, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
super().add_parser(parser)
return parser
|
28531
|
import numpy as np
import tensorflow as tf
import unittest
from xcenternet.model.evaluation.overlap import compute_overlap
from xcenternet.model.evaluation.mean_average_precision import MAP
class TestMeanAveragePrecision(unittest.TestCase):
def setUp(self):
self.map_bboxes = np.array(
[
[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]],
[[0, 0, 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]],
],
dtype=np.float64,
)
self.map_labels = np.array([[0, 0, 1], [0, 0, 0]])
self.map_predictions = np.array(
[
[
[10, 40, 40, 90, 0.1, 0], # overlap 1.00 with bbox #2, low prob
[60, 10, 90, 60, 0.5, 0], # overlap 0.29 with bbox #1
[10, 30, 50, 90, 0.7, 0], # overlap 0.625 with bbox #2
[0, 0, 100, 90, 0.7, 1], # overlap 0.9 with bbox #3
[0, 0, 100, 80, 0.7, 1], # overlap 0.8 with bbox #3
],
[
[20, 20, 30, 50, 0.6, 0], # 0.21 overlap with #2
[2, 0, 10, 11, 0.8, 0], # overlap with #1
[0, 2, 14, 10, 0.9, 0], # overlap with #1
[0, 0, 10, 10, 0.7, 1], # no ground truth for 1
[80, 20, 100, 50, 0.1, 1], # no ground truth for 1
],
],
dtype=np.float32,
)
self.map_masks = np.array([[1, 1, 1], [1, 1, 1]], dtype=np.float32)
self.result_1 = {"overall": 3 / 4, "weighted": 2 / 3, "per_class": {0: (0.5, 2), 1: (1.0, 1)}}
self.result_both = {"overall": 2 / 3, "weighted": 4 / 9, "per_class": {0: (1 / 3, 5), 1: (1.0, 1)}}
def test_compute_overlap(self):
boxes1 = np.array([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64)
boxes2 = np.array([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np.float64)
overlap = compute_overlap(boxes1, boxes2)
self.assertAlmostEqual(1.0, overlap[0][0])
self.assertAlmostEqual(6 / 11, overlap[0][1])
self.assertAlmostEqual(0.0, overlap[0][2])
self.assertAlmostEqual(0.5, overlap[1][0])
self.assertAlmostEqual(4 / 9, overlap[1][1])
self.assertAlmostEqual(0.0, overlap[1][2])
def test_map_update_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
mean_average_precision.update_state(self.map_predictions[1], self.map_bboxes[1], self.map_labels[1])
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def test_map_update_batch_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant([self.map_predictions[0]]),
tf.constant([self.map_bboxes[0]]),
tf.constant([self.map_labels[0]]),
tf.constant([self.map_masks[0]]),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_batch_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant(self.map_predictions),
tf.constant(self.map_bboxes),
tf.constant(self.map_labels),
tf.constant(self.map_masks),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def _assert_map(self, first, second):
self.assertAlmostEqual(first["overall"], second["overall"])
self.assertAlmostEqual(first["weighted"], second["weighted"])
self.assertAlmostEqual(first["per_class"][0][0], second["per_class"][0][0]) # mAP
self.assertAlmostEqual(first["per_class"][0][1], second["per_class"][0][1]) # num objects
self.assertAlmostEqual(first["per_class"][1][0], second["per_class"][1][0]) # mAP
self.assertAlmostEqual(first["per_class"][1][1], second["per_class"][1][1]) # num objects
if __name__ == "__main__":
unittest.main()
|
28552
|
import os
import subprocess
import tempfile
try:
from PyQt5.QtCore import QBuffer, QIODevice, Qt
from PyQt5.QtGui import QImage
except ImportError:
from PySide2.QtCore import QBuffer, QIODevice, Qt
from PySide2.QtGui import QImage
from .texture_format import TextureFormat
def imageToBytes(image):
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
image.save(buffer, 'png')
data = buffer.data()
buffer.close()
return data
def loadImage(path):
tex_format = TextureFormat(path)
if tex_format in {'png', 'bmp', 'tga', 'tif', 'tiff', 'jpg', 'jpeg'}:
image = QImage(path)
if not image.isNull():
return image
else:
return
temp_path = os.path.join(tempfile.gettempdir(), str(os.getpid()) + 'hammer_temp_image.png')
temp_path = temp_path.replace('\\', '/')
subprocess.call('iconvert -g off "{0}" "{1}"'.format(path, temp_path))
if os.path.exists(temp_path):
image = QImage(temp_path)
os.remove(temp_path)
return image
|
28580
|
import time
from signal import pause
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
map_edge_parse = {'falling':GPIO.FALLING, 'rising':GPIO.RISING, 'both':GPIO.BOTH}
map_pull_parse = {'pull_up':GPIO.PUD_UP, 'pull_down':GPIO.PUD_DOWN, 'pull_off':GPIO.PUD_OFF}
map_edge_print = {GPIO.FALLING: 'falling', GPIO.RISING: 'rising', GPIO.BOTH: 'both'}
map_pull_print = {GPIO.PUD_UP:'pull_up', GPIO.PUD_DOWN: 'pull_down', GPIO.PUD_OFF: 'pull_off'}
def parse_edge_key(edge):
if edge in [GPIO.FALLING, GPIO.RISING, GPIO.BOTH]:
return edge
try:
result = map_edge_parse[edge.lower()]
except KeyError:
result = edge
raise KeyError('Unknown Edge type {edge}'.format(edge=edge))
return result
def parse_pull_up_down(pull_up_down):
if pull_up_down in [GPIO.PUD_UP, GPIO.PUD_DOWN, GPIO.PUD_OFF]:
return pull_up_down
try:
result = map_pull_parse[pull_up_down]
except KeyError:
result = pull_up_down
raise KeyError('Unknown Pull Up/Down type {pull_up_down}'.format(pull_up_down=pull_up_down))
return result
def print_edge_key(edge):
try:
result = map_edge_print[edge]
except KeyError:
result = edge
return result
def print_pull_up_down(pull_up_down):
try:
result = map_pull_print[pull_up_down]
except KeyError:
result = pull_up_down
return result
# This function takes a holding time (fractional seconds), a channel, a GPIO state and an action reference (function).
# It checks if the GPIO is in the state since the function was called. If the state
# changes it return False. If the time is over the function returns True.
def checkGpioStaysInState(holdingTime, gpioChannel, gpioHoldingState):
# Get a reference start time (https://docs.python.org/3/library/time.html#time.perf_counter)
startTime = time.perf_counter()
# Continously check if time is not over
while True:
time.sleep(0.1)
currentState = GPIO.input(gpioChannel)
if holdingTime < (time.perf_counter() - startTime):
break
# Return if state does not match holding state
if (gpioHoldingState != currentState):
return False
# Else: Wait
if (gpioHoldingState != currentState):
return False
return True
class SimpleButton:
def __init__(self, pin, action=lambda *args: None, action2=lambda *args: None, name=None,
bouncetime=500, antibouncehack=False, edge='falling', hold_time=.3, hold_mode=None, pull_up_down='pull_up'):
self.edge = parse_edge_key(edge)
self.hold_time = hold_time
self.hold_mode = hold_mode
self.pull_up = True
self.pull_up_down = parse_pull_up_down(pull_up_down)
self.pin = pin
self.name = name
self.bouncetime = bouncetime
self.antibouncehack = antibouncehack
GPIO.setup(self.pin, GPIO.IN, pull_up_down=self.pull_up_down)
self._action = action
self._action2 = action2
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler,
bouncetime=self.bouncetime)
self.callback_with_pin_argument = False
def callbackFunctionHandler(self, *args):
if len(args) > 0 and args[0] == self.pin and not self.callback_with_pin_argument:
logger.debug('Remove pin argument by callbackFunctionHandler - args before: {}'.format(args))
args = args[1:]
logger.debug('args after: {}'.format(args))
if self.antibouncehack:
time.sleep(0.1)
inval = GPIO.input(self.pin)
if inval != GPIO.LOW:
return None
if self.hold_mode in ('Repeat', 'Postpone', 'SecondFunc', 'SecondFuncRepeat'):
return self.longPressHandler(*args)
else:
logger.info('{}: execute callback'.format(self.name))
return self.when_pressed(*args)
@property
def when_pressed(self):
logger.info('{}: action'.format(self.name))
return self._action
@property
def when_held(self):
logger.info('{}: action2'.format(self.name))
return self._action2
@when_pressed.setter
def when_pressed(self, func):
logger.info('{}: set when_pressed')
self._action = func
GPIO.remove_event_detect(self.pin)
logger.info('add new action')
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler, bouncetime=self.bouncetime)
def set_callbackFunction(self, callbackFunction):
self.when_pressed = callbackFunction
def longPressHandler(self, *args):
logger.info('{}: longPressHandler, mode: {}'.format(self.name, self.hold_mode))
# instant action (except Postpone mode)
if self.hold_mode != "Postpone":
self.when_pressed(*args)
# action(s) after hold_time
if self.hold_mode == "Repeat":
# Repeated call of main action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
elif self.hold_mode == "Postpone":
# Postponed call of main action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFunc":
# Call of secondary action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFuncRepeat":
# Repeated call of secondary action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
def __del__(self):
logger.debug('remove event detection')
GPIO.remove_event_detect(self.pin)
@property
def is_pressed(self):
if self.pull_up:
return not GPIO.input(self.pin)
return GPIO.input(self.pin)
def __repr__(self):
return '<SimpleButton-{}(pin={},edge={},hold_mode={},hold_time={},bouncetime={},antibouncehack={},pull_up_down={})>'.format(
self.name, self.pin, print_edge_key(self.edge), self.hold_mode, self.hold_time, self.bouncetime,self.antibouncehack,print_pull_up_down(self.pull_up_down)
)
if __name__ == "__main__":
print('please enter pin no to test')
pin = int(input())
func = lambda *args: print('FunctionCall with {}'.format(args))
btn = SimpleButton(pin=pin, action=func, hold_mode='Repeat')
pause()
|
28584
|
import pytest
import time
from .utils import (
init_app, init_db, clean_db,
add_flow, add_run, add_step, add_task, add_artifact,
_test_list_resources, _test_single_resource, add_metadata, get_heartbeat_ts
)
pytestmark = [pytest.mark.integration_tests]
# Fixtures begin
@pytest.fixture
def cli(loop, aiohttp_client):
return init_app(loop, aiohttp_client)
@pytest.fixture
async def db(cli):
async_db = await init_db(cli)
yield async_db
await clean_db(async_db)
# Fixtures end
async def test_list_tasks(cli, db):
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
_step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, [])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, [])
_task = await create_task(db, step=_step)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, [_task])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, [_task])
async def test_list_tasks_non_numerical(cli, db):
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
_step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, [])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, [])
_task = await create_task(db, step=_step, task_name="bar")
_, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, None)
_, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, None)
assert len(data) == 1
assert data[0]['task_name'] == 'bar'
assert data[0]['task_id'] != 'bar'
async def test_single_task(cli, db):
await _test_single_resource(cli, db, "/flows/HelloFlow/runs/404/steps/none/tasks/5", 404, {})
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_single_task_non_numerical(cli, db):
_task = await create_task(db, task_name="bar")
_, data = await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/bar".format(**_task), 200, None)
assert data['task_name'] == 'bar'
assert data['task_id'] != 'bar'
async def test_list_old_metadata_task_attempts(cli, db):
# Test tasks with old (missing attempt) metadata
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_artifact_first = await create_ok_artifact_for_task(db, _task)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['finished_at'] = _artifact_first['ts_epoch']
_task_first_attempt['duration'] = _artifact_first['ts_epoch'] - \
_task_first_attempt['ts_epoch']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _artifact_second['ts_epoch'] - \
_task_second_attempt['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_old_metadata_task_with_multiple_attempts(cli, db):
# Test tasks with old (missing attempt) metadata
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_artifact_first = await create_ok_artifact_for_task(db, _task)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
_task['attempt_id'] = 1
_task['finished_at'] = _artifact_second['ts_epoch']
_task['duration'] = _artifact_second['ts_epoch'] - \
_task['ts_epoch']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_with_attempt_metadata(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _artifact_first['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_task['status'] = 'unknown'
_task['finished_at'] = _attempt_done_first['ts_epoch']
_task['duration'] = _attempt_done_first['ts_epoch'] - _task['started_at']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, True) # status 'completed'
_task['status'] = 'completed'
_task['finished_at'] = _attempt_ok_first['ts_epoch']
_task['duration'] = _attempt_ok_first['ts_epoch'] - _task['started_at']
_task['task_ok'] = None # intended behavior, status refinement location field should remain empty when metadata exists.
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_failed_status_with_heartbeat(cli, db):
_task = await create_task(db, last_heartbeat_ts=1, status="failed")
_task['finished_at'] = 1000 # should be last heartbeat in this case, due to every other timestamp missing.
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
async def test_task_running_status_with_heartbeat(cli, db):
hb_freeze = get_heartbeat_ts()
_task = await create_task(db, last_heartbeat_ts=hb_freeze)
_task['finished_at'] = None # should not have a finished at for running tasks.
_task['duration'] = hb_freeze * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
async def test_list_task_attempts(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'unknown'
_task_first_attempt['task_ok'] = 'location'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
# Second attempt counts as completed as well due to the _task_ok existing.
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'unknown'
_task_second_attempt['task_ok'] = 'location'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_task_with_attempt_ok_completed(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True) # status = 'completed'
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _attempt_ok['ts_epoch']
_task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at']
_task['status'] = 'completed'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_with_attempt_ok_failed(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _artifact_first['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
_task['status'] = 'failed'
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_task['finished_at'] = _attempt_ok['ts_epoch']
_task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_list_task_multiple_attempts_failure(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
# Mark first attempt as 'failure' and second as 'completed'
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'failed'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch']
_task_first_attempt['duration'] = _attempt_ok_first['ts_epoch'] - _task_first_attempt['started_at']
# Second attempt counts as completed as well due to the _task_ok existing.
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'completed'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
_task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch']
_task_second_attempt['duration'] = _attempt_ok_second['ts_epoch'] - _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_task_attempts_with_attempt_metadata(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
# attempt metadata is written but no artifacts exist yet.
# Queries should return a second attempt at this point already!
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['task_ok'] = 'location' # should have location for status artifact
_task_first_attempt['status'] = 'unknown' # 'unknown' because we cannot determine correct status from DB as attempt_ok is missing
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'running'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['duration'] = int(round(time.time() * 1000)) - _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
# Write attempt_ok data for first attempt to check for status changes.
_first_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False)
# NOTE: in current implementation, attempt_ok overrides attempt-done as a more accurate timestamp for finished_at.
_task_first_attempt['finished_at'] = _first_attempt_ok['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_first_attempt['task_ok'] = None # should have no task_ok location, as status can be determined from db.
_task_first_attempt['status'] = 'failed' # 'failed' because now we have attempt_ok false in db.
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
async def test_task_attempt_statuses_with_attempt_ok_failed(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_attempt_done_second = await create_task_attempt_done_metadata(db, _task, attempt=1)
_attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
# NOTE: In the current implementation attempt_ok overrides attempt-done ts_epoch as the finished_at
# as a more accurate timestamp for when a task finished.
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'failed'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'completed'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
# Test cases from the google docs table.
# status 'completed' tests
#
# STATUS: attempt_ok in task metadata for the attempt is set to True
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT: created_at property for attempt_ok attribute for the attempt in task metadata
# NOTE: for a more accurate finished_at timestamp, use the greatest timestamp out of task_ok / attempt_ok / attempt-done
# as this is the latest write_timestamp for the task
async def test_task_attempt_status_completed(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt = await create_task_attempt_metadata(db, _task, 0)
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True)
_attempt_done = await create_task_attempt_done_metadata(db, _task, 0)
_task['status'] = 'completed'
_task['started_at'] = _attempt['ts_epoch']
_task['finished_at'] = _attempt_done['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
# status 'running' tests
#
# STATUS 'running':
# Has all of
# Has a start time (NOTE: this requires 'attempt' metadata to be present)
# attempt_ok does not exist in the task metadata
# Has logged a heartbeat in the last x minutes (NOTE: we actually rely on heartbeat for running status.)
# No subsequent attempt exists
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT: does not apply (NULL)
async def test_task_attempt_status_running(cli, db):
_task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts()) # default status: 'running'
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt = await create_task_attempt_metadata(db, _task, 0)
_task['started_at'] = _attempt['ts_epoch']
_task['finished_at'] = None
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
# status 'failed' tests
#
# STATUS:
# Either of
# attempt_ok in task metadata for the attempt is set to False
# No heartbeat has been logged for the task in the last x minutes and no new attempt has started
# A newer attempt exists
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT:
# Either of (in priority)
# created_at property for attempt_ok attribute for the attempt in task metadata
# The timestamp in the heartbeat column for the task if no subsequent attempt is detected
# If a subsequent attempt exists, use the start time of the subsequent attempt
async def test_task_attempt_status_failed_with_existing_subsequent_attempt(cli, db):
_task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts())
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_first_attempt = dict(_task)
_second_attempt = dict(_task)
# we explicitly leave out attempt completion metadata for attempt 0 to test that it fails correctly
# when attempt 1 exists.
# ATTEMPT-0
_first_attempt_meta = await create_task_attempt_metadata(db, _task, 0)
_first_attempt['started_at'] = _first_attempt_meta['ts_epoch']
_first_attempt['duration'] = _first_attempt['last_heartbeat_ts'] * 1000 - _first_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_first_attempt])
# ATTEMPT-1
_second_attempt_meta = await create_task_attempt_metadata(db, _task, 1)
_second_attempt['attempt_id'] = 1
_second_attempt['started_at'] = _second_attempt_meta['ts_epoch']
_second_attempt['duration'] = _second_attempt['last_heartbeat_ts'] * 1000 - _second_attempt['started_at']
# first attempt should be failed due to second attempt existing.
# finished_at timestamp should be the started_at of the second attempt due to it existing.
_first_attempt['status'] = 'failed'
_first_attempt['finished_at'] = _second_attempt['started_at']
_first_attempt['duration'] = _first_attempt['finished_at'] - _first_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_second_attempt, _first_attempt])
# Resource Helpers / factories
async def create_ok_artifact_for_task(db, task, attempt=0):
"Creates and returns a _task_ok artifact for a task"
_task = (await add_artifact(
db,
flow_id=task.get("flow_id"),
run_number=task.get("run_number"),
run_id=task.get("run_id"),
step_name=task.get("step_name"),
task_id=task.get("task_id"),
task_name=task.get("task_name"),
artifact={
"name": "_task_ok",
"location": "location",
"ds_type": "ds_type",
"sha": "sha",
"type": "type",
"content_type": "content_type",
"attempt_id": attempt
})
).body
return _task
async def create_task(db, step=None, status="running", task_id=None, task_name=None, last_heartbeat_ts=None):
"Creates and returns a task with specific status. Optionally creates the task for a specific step if provided."
if not step:
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
step = (await add_step(
db,
flow_id=_run.get("flow_id"),
run_number=_run.get("run_number"),
step_name="step")
).body
_task = (await add_task(
db,
flow_id=step.get("flow_id"),
run_number=step.get("run_number"),
step_name=step.get("step_name"),
task_id=task_id,
task_name=task_name,
last_heartbeat_ts=last_heartbeat_ts)
).body
_task['status'] = status
return _task
async def create_metadata_for_task(db, task, metadata={}, tags=None):
"Creates a metadata record for a task"
_meta = (await add_metadata(db,
flow_id=task.get("flow_id"),
run_number=task.get("run_number"),
run_id=task.get("run_id"),
step_name=task.get("step_name"),
task_id=task.get("task_id"),
task_name=task.get("task_name"),
tags=tags,
metadata=metadata)
).body
return _meta
async def create_task_attempt_metadata(db, task, attempt=0):
"Create 'attempt' metadata for a task"
return await create_metadata_for_task(
db,
task,
metadata={
"type": "attempt",
"field_name": "attempt",
"value": str(attempt)
}
)
async def create_task_attempt_done_metadata(db, task, attempt: int = 0):
"Create 'attempt-done' metadata for a task"
return await create_metadata_for_task(
db,
task,
metadata={
"type": "attempt-done",
"field_name": "attempt-done",
"value": str(attempt)
}
)
async def create_task_attempt_ok_metadata(db, task, attempt_id: int, attempt_ok: bool = False):
"Create 'attempt_ok' metadata for a task"
return await create_metadata_for_task(
db,
task,
tags=["attempt_id:{attempt_id}".format(attempt_id=attempt_id)],
metadata={
"type": "internal_attempt_status",
"field_name": "attempt_ok",
"value": str(attempt_ok)
}
)
|
28604
|
import os
from pathlib import Path
import pandas as pd
from lime.lime_tabular import LimeTabularExplainer
from ml_editor.data_processing import get_split_by_author
FEATURE_DISPLAY_NAMES = {
"num_questions": "물음표 빈도",
"num_periods": "마침표 빈도",
"num_commas": "쉼표 빈도",
"num_exclam": "느낌표 빈도",
"num_quotes": "따옴표 빈도",
"num_colon": "콜론 빈도",
"num_semicolon": "세미콜론 빈도",
"num_stops": "불용어 빈도",
"num_words": "단어 개수",
"num_chars": "문자 개수",
"num_diff_words": "어휘 다양성",
"avg_word_len": "평균 단어 길이",
"polarity": "긍정적인 감성",
"ADJ": "형용사 빈도",
"ADP": "전치사 빈도",
"ADV": "부사 빈도",
"AUX": "조동사 빈도",
"CONJ": "접속사 빈도",
"DET": "한정사 빈도",
"INTJ": "감탄사 빈도",
"NOUN": "명사 빈도",
"NUM": "숫자 빈도",
"PART": "불변화사 빈도",
"PRON": "대명사 빈도",
"PROPN": "고유 명사 빈도",
"PUNCT": "구두점 빈도",
"SCONJ": "종속 접속사 빈도",
"SYM": "기호 빈도",
"VERB": "동사 빈도",
"X": "다른 단어의 빈도",
}
POS_NAMES = {
"ADJ": "adjective",
"ADP": "adposition",
"ADV": "adverb",
"AUX": "auxiliary verb",
"CONJ": "coordinating conjunction",
"DET": "determiner",
"INTJ": "interjection",
"NOUN": "noun",
"NUM": "numeral",
"PART": "particle",
"PRON": "pronoun",
"PROPN": "proper noun",
"PUNCT": "punctuation",
"SCONJ": "subordinating conjunction",
"SYM": "symbol",
"VERB": "verb",
"X": "other",
}
FEATURE_ARR = [
"num_questions",
"num_periods",
"num_commas",
"num_exclam",
"num_quotes",
"num_colon",
"num_stops",
"num_semicolon",
"num_words",
"num_chars",
"num_diff_words",
"avg_word_len",
"polarity",
]
FEATURE_ARR.extend(POS_NAMES.keys())
def get_explainer():
"""
훈련 데이터를 사용해 LIME 설명 도구를 준비합니다.
직렬화하지 않아도 될만큼 충분히 빠릅니다.
:return: LIME 설명 도구 객체
"""
curr_path = Path(os.path.dirname(__file__))
data_path = Path("../data/writers_with_features.csv")
df = pd.read_csv(curr_path / data_path)
train_df, test_df = get_split_by_author(df, test_size=0.2, random_state=40)
explainer = LimeTabularExplainer(
train_df[FEATURE_ARR].values,
feature_names=FEATURE_ARR,
class_names=["low", "high"],
)
return explainer
EXPLAINER = get_explainer()
def simplify_order_sign(order_sign):
"""
사용자에게 명확한 출력을 위해 기호를 단순화합니다.
:param order_sign: 비교 연산자 입력
:return: 단순화된 연산자
"""
if order_sign in ["<=", "<"]:
return "<"
if order_sign in [">=", ">"]:
return ">"
return order_sign
def get_recommended_modification(simple_order, impact):
"""
연산자와 영향 타입에 따라 추천 문장을 생성합니다.
:param simple_order: 단순화된 연산자
:param impact: 변화가 긍정적인지 부정적인지 여부
:return: 추천 문자열
"""
bigger_than_threshold = simple_order == ">"
has_positive_impact = impact > 0
if bigger_than_threshold and has_positive_impact:
return "높일 필요가 없습니다"
if not bigger_than_threshold and not has_positive_impact:
return "높이세요"
if bigger_than_threshold and not has_positive_impact:
return "낮추세요"
if not bigger_than_threshold and has_positive_impact:
return "낮출 필요가 없습니다"
def parse_explanations(exp_list):
"""
LIME이 반환한 설명을 사용자가 읽을 수 있도록 파싱합니다.
:param exp_list: LIME 설명 도구가 반환한 설명
:return: 사용자에게 전달한 문자열을 담은 딕셔너리 배열
"""
parsed_exps = []
for feat_bound, impact in exp_list:
conditions = feat_bound.split(" ")
# 추천으로 표현하기 힘들기 때문에
# 1 <= a < 3 와 같은 이중 경계 조건은 무시합니다
if len(conditions) == 3:
feat_name, order, threshold = conditions
simple_order = simplify_order_sign(order)
recommended_mod = get_recommended_modification(simple_order, impact)
parsed_exps.append(
{
"feature": feat_name,
"feature_display_name": FEATURE_DISPLAY_NAMES[feat_name],
"order": simple_order,
"threshold": threshold,
"impact": impact,
"recommendation": recommended_mod,
}
)
return parsed_exps
def get_recommendation_string_from_parsed_exps(exp_list):
"""
플래스크 앱에서 출력할 수 있는 추천 텍스트를 생성합니다.
:param exp_list: 설명을 담은 딕셔너리의 배열
:return: HTML 추천 텍스트
"""
recommendations = []
for i, feature_exp in enumerate(exp_list):
recommendation = "%s %s" % (
feature_exp["recommendation"],
feature_exp["feature_display_name"],
)
font_color = "green"
if feature_exp["recommendation"] in ["Increase", "Decrease"]:
font_color = "red"
rec_str = """<font color="%s">%s) %s</font>""" % (
font_color,
i + 1,
recommendation,
)
recommendations.append(rec_str)
rec_string = "<br/>".join(recommendations)
return rec_string
|
28671
|
import argparse
import os
import json
from torch.utils.tensorboard import SummaryWriter
import random
import numpy as np
import zipfile
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
from LAUG.nlu.jointBERT_new.dataloader import Dataloader
from LAUG.nlu.jointBERT_new.jointBERT import JointBERT
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
parser = argparse.ArgumentParser(description="Train a model.")
parser.add_argument('--config_path',
help='path to config file')
if __name__ == '__main__':
args = parser.parse_args()
config = json.load(open(args.config_path))
data_dir = config['data_dir']
output_dir = config['output_dir']
log_dir = config['log_dir']
DEVICE = config['DEVICE']
set_seed(config['seed'])
if 'multiwoz' in data_dir:
print('-'*20 + 'dataset:multiwoz' + '-'*20)
from LAUG.nlu.jointBERT_new.multiwoz.postprocess import is_slot_da, calculateF1, recover_intent
elif 'camrest' in data_dir:
print('-' * 20 + 'dataset:camrest' + '-' * 20)
from LAUG.nlu.jointBERT_new.camrest.postprocess import is_slot_da, calculateF1, recover_intent
elif 'crosswoz' in data_dir:
print('-' * 20 + 'dataset:crosswoz' + '-' * 20)
from LAUG.nlu.jointBERT_new.crosswoz.postprocess import is_slot_da, calculateF1, recover_intent
elif 'frames' in data_dir:
print('-' * 20 + 'dataset:frames' + '-' * 20)
from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent
intent_vocab = json.load(open(os.path.join(data_dir, 'intent_vocab.json')))
tag_vocab = json.load(open(os.path.join(data_dir, 'tag_vocab.json')))
req_vocab = json.load(open(os.path.join(data_dir, 'req_vocab.json')))
req_slot_vocab = json.load(open(os.path.join(data_dir, 'req_slot_vocab.json')))
slot_intent_vocab = json.load(open(os.path.join(data_dir,'slot_intent_vocab.json')))
print('intent_vocab = ',intent_vocab)
print('tag_vocab = ', tag_vocab)
print('req_vocab = ', req_vocab)
print('req_slot_vocab = ', req_slot_vocab)
print('='*100)
dataloader = Dataloader(intent_vocab=intent_vocab, tag_vocab=tag_vocab, req_vocab=req_vocab, req_slot_vocab=req_slot_vocab, slot_intent_vocab=slot_intent_vocab,
pretrained_weights=config['model']['pretrained_weights'])
print('intent num:', len(intent_vocab))
print('tag num:', len(tag_vocab))
print('req num:', len(req_vocab))
for data_key in ['train', 'val', 'test']:
dataloader.load_data(json.load(open(os.path.join(data_dir, '{}_data.json'.format(data_key)))), data_key,
cut_sen_len=config['cut_sen_len'], use_bert_tokenizer=config['use_bert_tokenizer'])
print('{} set size: {}'.format(data_key, len(dataloader.data[data_key])))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = SummaryWriter(log_dir)
model = JointBERT(config['model'], DEVICE, dataloader.tag_dim, dataloader.intent_dim, dataloader.req_dim, dataloader, dataloader.intent_weight, dataloader.req_weight)
model.to(DEVICE)
if config['model']['finetune']:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay) and p.requires_grad],
'weight_decay': config['model']['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=config['model']['learning_rate'],
eps=config['model']['adam_epsilon'])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config['model']['warmup_steps'],
num_training_steps=config['model']['max_step'])
else:
for n, p in model.named_parameters():
if 'bert' in n:
p.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=config['model']['learning_rate'])
for name, param in model.named_parameters():
print(name, param.shape, param.device, param.requires_grad)
max_step = config['model']['max_step']
check_step = config['model']['check_step']
batch_size = config['model']['batch_size']
print('check_step = {}, batch_size = {}'.format(check_step, batch_size))
model.zero_grad()
train_slot_loss, train_intent_loss, train_req_loss = 0, 0, 0
best_val_f1 = 0.
writer.add_text('config', json.dumps(config))
for step in range(1, max_step + 1):
model.train()
batched_data = dataloader.get_train_batch(batch_size)
batched_data = tuple(t.to(DEVICE) for t in batched_data)
word_seq_tensor, tag_seq_tensor, intent_tensor, req_tensor, req_mask_tensor, word_mask_tensor, tag_mask_tensor,base_tag_mask_tensor, context_seq_tensor, context_mask_tensor = batched_data
if not config['model']['context']:
context_seq_tensor, context_mask_tensor = None, None
_, _, _, slot_loss, intent_loss, req_loss = model.forward(word_seq_tensor, word_mask_tensor, tag_seq_tensor, tag_mask_tensor,
intent_tensor, req_tensor, req_mask_tensor, context_seq_tensor, context_mask_tensor)
train_slot_loss += slot_loss.item()
train_intent_loss += intent_loss.item()
train_req_loss += req_loss.item()
loss = slot_loss + intent_loss + req_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
if config['model']['finetune']:
scheduler.step() # Update learning rate schedule
model.zero_grad()
if step % check_step == 0:
train_slot_loss = train_slot_loss / check_step
train_intent_loss = train_intent_loss / check_step
train_req_loss = train_req_loss / check_step
print('[%d|%d] step' % (step, max_step))
print('\t slot loss:', train_slot_loss)
print('\t intent loss:', train_intent_loss)
print('\t request loss:', train_req_loss)
predict_golden = {'intent': [], 'slot': [], 'req':[],'overall': []}
val_slot_loss, val_intent_loss,val_req_loss = 0, 0,0
model.eval()
for pad_batch, ori_batch, real_batch_size in dataloader.yield_batches(batch_size, data_key='val'):
pad_batch = tuple(t.to(DEVICE) for t in pad_batch)
word_seq_tensor, tag_seq_tensor, intent_tensor, req_tensor, req_mask_tensor, word_mask_tensor, tag_mask_tensor, base_tag_mask_tensor, context_seq_tensor, context_mask_tensor = pad_batch
if not config['model']['context']:
context_seq_tensor, context_mask_tensor = None, None
with torch.no_grad():
slot_logits, intent_logits, req_logits,slot_loss, intent_loss,req_loss = model.forward(word_seq_tensor,
word_mask_tensor,
tag_seq_tensor,
tag_mask_tensor,
intent_tensor,
req_tensor,
req_mask_tensor,
context_seq_tensor,
context_mask_tensor)
val_slot_loss += slot_loss.item() * real_batch_size
val_intent_loss += intent_loss.item() * real_batch_size
val_req_loss += req_loss.item() * real_batch_size
for j in range(real_batch_size):
predict_intent, predict_req, predict_slot, predict_overall = recover_intent(dataloader, intent_logits[j], req_logits[j*dataloader.req_dim: (j+1)*dataloader.req_dim], slot_logits[j*dataloader.slot_intent_dim:(j+1)*dataloader.slot_intent_dim], base_tag_mask_tensor[j*dataloader.slot_intent_dim:(j+1)*dataloader.slot_intent_dim],
ori_batch[j][0], ori_batch[j][-4])
#assert(ori_batch[j][3] != [])
predict_golden['overall'].append({
'predict': predict_overall,
'golden': ori_batch[j][3]
})
predict_golden['req'].append({
'predict':predict_req,
'golden':ori_batch[j][5] #req
})
'''
predict_golden['slot'].append({
'predict': predict_slot,#[x for x in predicts if is_slot_da(x)],
'golden': ori_batch[j][1]#tag
})
'''
predict_golden['intent'].append({
'predict': predict_intent,
'golden': ori_batch[j][2]#intent
})
for j in range(10):
writer.add_text('val_sample_{}'.format(j),
json.dumps(predict_golden['overall'][j], indent=2, ensure_ascii=False),
global_step=step)
total = len(dataloader.data['val'])
val_slot_loss /= total
val_intent_loss /= total
val_req_loss /= total
print('%d samples val' % total)
print('\t slot loss:', val_slot_loss)
print('\t intent loss:', val_intent_loss)
print('\t req loss:', val_req_loss)
writer.add_scalar('intent_loss/train', train_intent_loss, global_step=step)
writer.add_scalar('intent_loss/val', val_intent_loss, global_step=step)
writer.add_scalar('req_loss/train', train_req_loss, global_step=step)
writer.add_scalar('req_loss/val', val_req_loss, global_step=step)
writer.add_scalar('slot_loss/train', train_slot_loss, global_step=step)
writer.add_scalar('slot_loss/val', val_slot_loss, global_step=step)
for x in ['intent','req','overall']:
#for x in ['intent', 'slot', 'req','overall']:# pass slot
precision, recall, F1 = calculateF1(predict_golden[x], x=='overall')
print('-' * 20 + x + '-' * 20)
print('\t Precision: %.2f' % (100 * precision))
print('\t Recall: %.2f' % (100 * recall))
print('\t F1: %.2f' % (100 * F1))
writer.add_scalar('val_{}/precision'.format(x), precision, global_step=step)
writer.add_scalar('val_{}/recall'.format(x), recall, global_step=step)
writer.add_scalar('val_{}/F1'.format(x), F1, global_step=step)
if F1 > best_val_f1:
best_val_f1 = F1
torch.save(model.state_dict(), os.path.join(output_dir, 'pytorch_model.bin'))
print('best val F1 %.4f' % best_val_f1)
print('save on', output_dir)
train_slot_loss, train_intent_loss = 0, 0
writer.add_text('val overall F1', '%.2f' % (100 * best_val_f1))
writer.close()
model_path = os.path.join(output_dir, 'pytorch_model.bin')
zip_path = config['zipped_model_path']
print('zip model to', zip_path)
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf:
zf.write(model_path)
|
28688
|
from pylayers.antprop.aarray import *
import matplotlib.pyplot as plt
import pdb
print('--------------')
print('antprop/test_subarray.py')
print('--------------')
fcGHz = 60
lamda = 0.3/fcGHz
N1 = [ 4,4,1]
N2 = [ 2,2,1]
dm1 = [lamda/2.,lamda/2.,0]
dm2 = [3*lamda,3*lamda,0]
A1 = AntArray(fGHz=np.array([fcGHz]),N=N1,dm=dm1,typant='Omni')
A2 = AntArray(fGHz=np.array([fcGHz]),N=N2,dm=dm2,array=A1)
#A1.eval()
|
28689
|
import json
from requests import Request
from pydantic import BaseModel
from pydoc import locate
from typing import List, Optional
import dataclasses
from dataclasses import dataclass
from datetime import datetime
SCHEMAS = {}
class ResourceBaseSchema(BaseModel):
id: Optional[str]
private: Optional[bool]
canRead: Optional[List[str]]
canWrite: Optional[List[str]]
owner: Optional[str]
anonymousComments: Optional[bool]
comments: Optional[List[str]]
createdAt: Optional[str]
updatedAt: Optional[str]
class Config:
fields = {'id': '_id'}
class ResourceInfo(BaseModel):
resourceType: str
resourceId: str
class Comment(BaseModel):
id: Optional[str]
owner: Optional[str]
comments: Optional[List[str]]
text: Optional[str]
flagged: Optional[bool]
resource: Optional[ResourceInfo]
otherResources: Optional[List[ResourceInfo]]
closed: Optional[bool]
assignedTo: Optional[List[str]]
labels: Optional[List[str]]
priority: Optional[str]
status: Optional[str]
view: Optional[dict]
screenshot: Optional[str]
class Config:
fields = {'id': '_id'}
def clean_empty(d):
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_empty(v) for v in d) if v is not None]
return {k: v for k, v in ((k, clean_empty(v)) for k, v in d.items()) if v is not None}
class ResourceBase(object):
def __init__(self, session, basepath, me, name, methods):
self.s = session
self._path = basepath + '/' + name
self.me = me
self._comment_path = basepath + '/comments/' + name
self.name = name
self.methods = methods
self.method_dict = {
'list': {
'method': 'GET',
},
'create': {
'method': 'POST',
},
'get': {
'method': 'GET',
},
'update': {
'method': 'PUT',
},
'delete': {
'method': 'DELETE',
},
'comment_get': {
'method': 'GET',
},
'comment_create': {
'method': 'POST'}
}
for method in self.methods:
if method == 'retrieve':
self.__setattr__('retrieve', 'x')
self.schema = None
self.comment_schema = Comment
def _prep_request(self, method, path, comment, data, params):
assert method in self.methods, 'method {} not supported for {} calls'.format(method, self.name)
if comment:
url = self._comment_path + path
if data:
dataclass_instance = self.comment_schema.parse_obj(data)
data = clean_empty(dataclass_instance.dict(by_alias=True))
else:
url = self._path + path
if data:
if isinstance(data, list):
data_list = []
if self.schema:
for d in data:
if isinstance(d, dict):
dataclass_instance = self.schema.parse_obj(d)
data_list.append(clean_empty(dataclass_instance.dict(by_alias=True)))
elif isinstance(d, str):
data_list.append(d)
data = data_list
elif self.schema:
if isinstance(data, dict):
dataclass_instance = self.schema.parse_obj(data)
else:
dataclass_instance = data
data = clean_empty(dataclass_instance.dict(by_alias=True))
return self.s.prepare_request(Request(self.method_dict[method]['method'], url, json=data, params=params))
def _parse_response(self, response, comment=False, schema=None):
"""Parse the request response
Arguments:
response {Response} -- A response from the server
comment {bool} -- Whether or not the response is a comment
schema {Schema} -- Optional schema to parse the response with
Returns:
Schema / dict -- An object derived from SpeckleObject if possible, otherwise
a dict of the response resource
"""
if schema:
# If a schema is defined, then try to parse it with that
return schema.parse_obj(response)
elif comment:
return self.comment_schema.parse_obj(response)
elif 'type' in response:
# Otherwise, check if the incoming type is within the dict of loaded schemas
types = response['type'].split('/')
for t in reversed(types):
if t in SCHEMAS:
return SCHEMAS[t].parse_obj(response)
if self.schema:
return self.schema.parse_obj(response)
return response
def make_request(self, method, path, data=None, comment=False, schema=None, params=None):
r = self._prep_request(method, path, comment, data, params)
resp = self.s.send(r)
resp.raise_for_status()
response_payload = resp.json()
assert response_payload['success'] == True, json.dumps(response_payload)
if 'resources' in response_payload:
return [self._parse_response(resource, comment, schema) for resource in response_payload['resources']]
elif 'resource' in response_payload:
return self._parse_response(response_payload['resource'], comment, schema)
else:
return response_payload # Not sure what to do in this scenario or when it might occur
|
28721
|
from __future__ import print_function, division, absolute_import, unicode_literals
from numbers import Number
import numpy as np
from voluptuous import Schema, Required, Any, Range
from mitxgraders.comparers.baseclasses import CorrelatedComparer
from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero
from mitxgraders.helpers.validatorfuncs import text_string
from mitxgraders.exceptions import ConfigError
def get_linear_fit_error(x, y):
"""
Get total error in a linear regression y = ax + b between samples x and y.
If x is constant, returns the result of get_offset_fit_error(x, y).
Arguments:
x, y: flat numpy array
Usage
=====
Zero error in a linear relationship:
>>> x = np.array([2, 5, 8])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered linearly related
>>> x = np.array([1, 1, 1])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_linear_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack([x, np.ones(len(x))]).T
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
if rank == 1:
# The input values x are constant. Return the linear offset error.
return get_offset_fit_error(x, y)
return np.sqrt(residuals.item())
def get_proportional_fit_error(x, y):
"""
Get total error in a linear regression y = ax between samples x and y, with
zero constant term.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not proportional:
>>> x = np.array([2, 5, 8])
>>> result = get_proportional_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
0.76200...
Zero error in a proportional relationship:
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered proportional
>>> x = np.array([1, 1, 1])
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_proportional_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack(x)
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
return np.sqrt(residuals.item())
def get_offset_fit_error(x, y):
"""
Get total error in a linear regression y = x + b between samples x and y,
with slope term equal to 1.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not constant-offset:
>>> x = np.array([2, 5, 8])
>>> result = get_offset_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
4.242640...
Zero error in a constant-offset relationship:
>>> result = get_offset_fit_error(x, x + 5)
>>> round(result, 6)
0.0
"""
mean = np.mean(y - x)
return np.sqrt(sum(np.square(x + mean - y)))
def get_equals_fit_error(x, y):
"""
Get total error in the difference between two samples.
Arguments:
x, y: compatible numpy arrays
"""
return np.sqrt(sum(np.square(x - y)))
class LinearComparer(CorrelatedComparer):
"""
Used to check that there is an linear relationship between student's input
and the expected answer.
The general linear relationship is expected = a * student + b. The comparer
can check for four subtypes:
equals: (a, b) = (1, 0)
proportional: b = 0
offset: a = 1
linear: neither a nor b fixed
Configuration
=============
The first four configuration keys determine the amount of partial credit
given for a specific type of linear relationship. If set to None, the
relationship is not checked.
equals (None | number): defaults to 1.0
proportional (None | number): defaults to 0.5
offset (None | number): defaults to None
linear (None | number): defaults to None
The remaining configuration keys specify a feedback message to be given
in each case:
equals_msg (str): defaults to ''
proportional_msg (str): defaults to 'The submitted answer differs from
an expected answer by a constant factor.'
offset_msg (str): defaults to ''
linear_msg (str): defaults to ''
NOTE:
LinearComparer can be used with MatrixGrader, but the linear
relationship must be the same for all entries. Essentially, this means
we test for
expected_array = sclar_a * expected_array + scalar_b * ONES
where ONES is a matrix of all ones.
The ONES offset works as expected for vectors, but is probably not what
you want for matrices.
"""
schema_config = Schema({
Required('equals', default=1.0): Any(None, Range(0, 1)),
Required('proportional', default=0.5): Any(None, Range(0, 1)),
Required('offset', default=None): Any(None, Range(0, 1)),
Required('linear', default=None): Any(None, Range(0, 1)),
Required('equals_msg', default=''): text_string,
Required('proportional_msg', default=(
'The submitted answer differs from an expected answer by a '
'constant factor.'
)): text_string,
Required('offset_msg', default=''): text_string,
Required('linear_msg', default=''): text_string,
})
all_modes = ('equals', 'proportional', 'offset', 'linear')
zero_compatible_modes = ('equals', 'offset')
def __init__(self, config=None, **kwargs):
super(LinearComparer, self).__init__(config, **kwargs)
self.modes = tuple(mode for mode in self.all_modes if self.config[mode] is not None)
error_calculators = {
'equals': get_equals_fit_error,
'proportional': get_proportional_fit_error,
'offset': get_offset_fit_error,
'linear': get_linear_fit_error,
}
@staticmethod
def check_comparing_zero(comparer_params_evals, student_evals, tolerance):
"""
Check whether student input is nearly zero, or author input is exactly zero
"""
student_zero = all([
is_nearly_zero(x, tolerance, reference=y)
for x, y in zip(student_evals, comparer_params_evals)
])
expected_zero = all(np.all(x == 0.0) for [x] in comparer_params_evals)
return student_zero or expected_zero
def get_valid_modes(self, is_comparing_zero):
"""
Returns a copy of self.modes, first removing 'proportional' and 'linear'
when is_comparing_zero is truthy.
"""
if is_comparing_zero:
return tuple(mode for mode in self.modes
if mode in self.zero_compatible_modes)
return self.modes
def __call__(self, comparer_params_evals, student_evals, utils):
student_evals_norm = np.linalg.norm(student_evals)
# Validate student input shape...only needed for MatrixGrader
if hasattr(utils, 'validate_shape'):
# in numpy, scalars have empty tuples as their shapes
expected_0 = comparer_params_evals[0][0]
scalar_expected = isinstance(expected_0, Number)
shape = tuple() if scalar_expected else expected_0.shape
utils.validate_shape(student_evals[0], shape)
# Raise an error if there is less than 3 samples
if len(student_evals) < 3:
msg = 'Cannot perform linear comparison with less than 3 samples'
raise ConfigError(msg)
is_comparing_zero = self.check_comparing_zero(comparer_params_evals,
student_evals, utils.tolerance)
filtered_modes = self.get_valid_modes(is_comparing_zero)
# Get the result for each mode
# flatten in case individual evals are arrays (as in MatrixGrader)
student = np.array(student_evals).flatten()
expected = np.array(comparer_params_evals).flatten()
errors = [self.error_calculators[mode](student, expected) for mode in filtered_modes]
results = [
{'grade_decimal': self.config[mode], 'msg': self.config[mode+'_msg']}
if is_nearly_zero(error, utils.tolerance, reference=student_evals_norm)
else
{'grade_decimal': 0, 'msg': ''}
for mode, error in zip(filtered_modes, errors)
]
# Get the best result using max.
# For a list of pairs, max compares by 1st index and uses 2nd to break ties
key = lambda result: (result['grade_decimal'], result['msg'])
return max(results, key=key)
|
28728
|
import argparse
import os
import glob
import pandas as pd
from libraryTools import imageRegionOfInterest
#filename,width,height,class,xmin,ymin,xmax,ymax
#20170730_132530-(F00000).jpeg,576,1024,sinaleira,221,396,246,437
valid_images = [".jpg",".gif",".png",".tga",".jpeg"]
def run(image_path, classNameList = ["someclass"], searchSubdir = False):
global classes_qtd
global images_total_qtd
global images_without_classes_qtd
global xml_list
classes_qtd = []
images_total_qtd = 0
images_without_classes_qtd = 0
xml_list = []
searchFolder(image_path, classNameList, searchSubdir)
print()
print('Total Images: ', images_total_qtd)
print('Images without classes: ', images_without_classes_qtd)
print('Classes: ')
for q in classes_qtd:
print( q)
def searchFolder(image_path, classNameList, searchSubdir):
global valid_images
global classes_qtd
global images_total_qtd
global images_without_classes_qtd
global xml_list
print("Folder", image_path)
obj = imageRegionOfInterest(image_path)
for filename in os.listdir(image_path):
if searchSubdir and os.path.isdir(os.path.join(image_path, filename)):
searchFolder(os.path.join(image_path, filename), classNameList, searchSubdir)
name, ext = os.path.splitext(filename)
if ext.lower() not in valid_images:
continue
print(filename)
images_total_qtd = images_total_qtd + 1
obj.setFileImage(filename)
points = obj.loadBoxFromTxt()
if len(points)>0:
for point in points:
iclass = int(point[4])
while len(classes_qtd) < iclass+1:
classes_qtd.append(0)
classes_qtd[iclass] = classes_qtd[iclass] + 1
else:
images_without_classes_qtd = images_without_classes_qtd + 1
return
#=============================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="images path")
ap.add_argument('-className', nargs='*', help='class name list (0..9 positions, max 10), e.g. -classes dog cat')
ap.add_argument('-s', '--subdir', action='store_true', help="Search sub folders")
args = vars(ap.parse_args())
run(args["path"], args["className"], args["subdir"])
|
28776
|
import pytest
import friendly_traceback
from friendly_traceback.console_helpers import _get_info
from ..syntax_errors_formatting_cases import descriptions
friendly_traceback.set_lang("en")
where = "parsing_error_source"
cause = "cause"
@pytest.mark.parametrize("filename", descriptions.keys())
def test_syntax_errors(filename):
expected = descriptions[filename]
try:
exec("from . import %s" % filename)
except SyntaxError:
friendly_traceback.explain_traceback(redirect="capture")
info = _get_info()
assert expected[where] == info[where] # noqa
assert expected[cause] in info[cause] # noqa
|
28790
|
import os
import re
import fnmatch
from logfetch_base import log, is_in_date_range
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
log(colored('Including log {0}\n'.format(filename), 'blue'), args, True)
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
log(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'), args, True)
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"-\d{13}-", filename)
if timestamps:
return is_in_date_range(args, int(str(timestamps[-1]).replace("-", "")[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId)
|
28796
|
from Main import main, __version__ as ESVersion
from argparse import Namespace
import random
from tkinter import Checkbutton, OptionMenu, Tk, LEFT, RIGHT, BOTTOM, TOP, StringVar, IntVar, Frame, Label, W, E, Entry, Spinbox, Button, filedialog, messagebox
def guiMain(args=None):
mainWindow = Tk()
mainWindow.wm_title("Entrance Shuffle %s" % ESVersion)
topFrame = Frame(mainWindow)
rightHalfFrame = Frame(topFrame)
checkBoxFrame = Frame(rightHalfFrame)
createSpoilerVar = IntVar()
createSpoilerCheckbutton = Checkbutton(checkBoxFrame, text="Create Spoiler Log", variable=createSpoilerVar)
suppressRomVar = IntVar()
suppressRomCheckbutton = Checkbutton(checkBoxFrame, text="Do not create patched Rom", variable=suppressRomVar)
quickSwapVar = IntVar()
quickSwapCheckbutton = Checkbutton(checkBoxFrame, text="Enabled L/R Item quickswapping", variable=quickSwapVar)
dungeonItemsVar = IntVar()
dungeonItemsCheckbutton = Checkbutton(checkBoxFrame, text="Place Dungeon Items (Compasses/Maps)", onvalue=0, offvalue=1, variable=dungeonItemsVar)
beatableOnlyVar = IntVar()
beatableOnlyCheckbutton = Checkbutton(checkBoxFrame, text="Only ensure seed is beatable, not all items must be reachable", variable=beatableOnlyVar)
shuffleGanonVar = IntVar()
shuffleGanonCheckbutton = Checkbutton(checkBoxFrame, text="Include Ganon's Tower and Pyramid Hole in shuffle pool", variable=shuffleGanonVar)
createSpoilerCheckbutton.pack(expand=True, anchor=W)
suppressRomCheckbutton.pack(expand=True, anchor=W)
quickSwapCheckbutton.pack(expand=True, anchor=W)
dungeonItemsCheckbutton.pack(expand=True, anchor=W)
beatableOnlyCheckbutton.pack(expand=True, anchor=W)
shuffleGanonCheckbutton.pack(expand=True, anchor=W)
fileDialogFrame = Frame(rightHalfFrame)
romDialogFrame = Frame(fileDialogFrame)
baseRomLabel = Label(romDialogFrame, text='Base Rom')
romVar = StringVar()
romEntry = Entry(romDialogFrame, textvariable=romVar)
def RomSelect():
rom = filedialog.askopenfilename()
romVar.set(rom)
romSelectButton = Button(romDialogFrame, text='Select Rom', command=RomSelect)
baseRomLabel.pack(side=LEFT)
romEntry.pack(side=LEFT)
romSelectButton.pack(side=LEFT)
spriteDialogFrame = Frame(fileDialogFrame)
baseSpriteLabel = Label(spriteDialogFrame, text='Link Sprite')
spriteVar = StringVar()
spriteEntry = Entry(spriteDialogFrame, textvariable=spriteVar)
def SpriteSelect():
sprite = filedialog.askopenfilename()
spriteVar.set(sprite)
spriteSelectButton = Button(spriteDialogFrame, text='Select Sprite', command=SpriteSelect)
baseSpriteLabel.pack(side=LEFT)
spriteEntry.pack(side=LEFT)
spriteSelectButton.pack(side=LEFT)
romDialogFrame.pack()
spriteDialogFrame.pack()
checkBoxFrame.pack()
fileDialogFrame.pack()
drowDownFrame = Frame(topFrame)
modeFrame = Frame(drowDownFrame)
modeVar = StringVar()
modeVar.set('open')
modeOptionMenu = OptionMenu(modeFrame, modeVar, 'standard', 'open', 'swordless')
modeOptionMenu.pack(side=RIGHT)
modeLabel = Label(modeFrame, text='Game Mode')
modeLabel.pack(side=LEFT)
logicFrame = Frame(drowDownFrame)
logicVar = StringVar()
logicVar.set('noglitches')
logicOptionMenu = OptionMenu(logicFrame, logicVar, 'noglitches', 'minorglitches')
logicOptionMenu.pack(side=RIGHT)
logicLabel = Label(logicFrame, text='Game logic')
logicLabel.pack(side=LEFT)
goalFrame = Frame(drowDownFrame)
goalVar = StringVar()
goalVar.set('ganon')
goalOptionMenu = OptionMenu(goalFrame, goalVar, 'ganon', 'pedestal', 'dungeons', 'triforcehunt', 'crystals')
goalOptionMenu.pack(side=RIGHT)
goalLabel = Label(goalFrame, text='Game goal')
goalLabel.pack(side=LEFT)
difficultyFrame = Frame(drowDownFrame)
difficultyVar = StringVar()
difficultyVar.set('normal')
difficultyOptionMenu = OptionMenu(difficultyFrame, difficultyVar, 'normal', 'timed', 'timed-ohko', 'timed-countdown')
difficultyOptionMenu.pack(side=RIGHT)
difficultyLabel = Label(difficultyFrame, text='Game difficulty')
difficultyLabel.pack(side=LEFT)
algorithmFrame = Frame(drowDownFrame)
algorithmVar = StringVar()
algorithmVar.set('vt25')
algorithmOptionMenu = OptionMenu(algorithmFrame, algorithmVar, 'freshness', 'flood', 'vt21', 'vt22', 'vt25')
algorithmOptionMenu.pack(side=RIGHT)
algorithmLabel = Label(algorithmFrame, text='Item distribution algorithm')
algorithmLabel.pack(side=LEFT)
shuffleFrame = Frame(drowDownFrame)
shuffleVar = StringVar()
shuffleVar.set('full')
shuffleOptionMenu = OptionMenu(shuffleFrame, shuffleVar, 'vanilla', 'simple', 'restricted', 'full', 'madness', 'insanity', 'dungeonsfull', 'dungeonssimple')
shuffleOptionMenu.pack(side=RIGHT)
shuffleLabel = Label(shuffleFrame, text='Entrance shuffle algorithm')
shuffleLabel.pack(side=LEFT)
heartbeepFrame = Frame(drowDownFrame)
heartbeepVar = StringVar()
heartbeepVar.set('normal')
heartbeepOptionMenu = OptionMenu(heartbeepFrame, heartbeepVar, 'normal', 'half', 'quarter', 'off')
heartbeepOptionMenu.pack(side=RIGHT)
heartbeepLabel = Label(heartbeepFrame, text='Heartbeep sound rate')
heartbeepLabel.pack(side=LEFT)
modeFrame.pack(expand=True, anchor=E)
logicFrame.pack(expand=True, anchor=E)
goalFrame.pack(expand=True, anchor=E)
difficultyFrame.pack(expand=True, anchor=E)
algorithmFrame.pack(expand=True, anchor=E)
shuffleFrame.pack(expand=True, anchor=E)
heartbeepFrame.pack(expand=True, anchor=E)
bottomFrame = Frame(mainWindow)
seedLabel = Label(bottomFrame, text='Seed #')
seedVar = StringVar()
seedEntry = Entry(bottomFrame, textvariable=seedVar)
countLabel = Label(bottomFrame, text='Count')
countVar = StringVar()
countSpinbox = Spinbox(bottomFrame, from_=1, to=100, textvariable=countVar)
def generateRom():
guiargs = Namespace
guiargs.seed = int(seedVar.get()) if seedVar.get() else None
guiargs.count = int(countVar.get()) if countVar.get() != '1' else None
guiargs.mode = modeVar.get()
guiargs.logic = logicVar.get()
guiargs.goal = goalVar.get()
guiargs.difficulty = difficultyVar.get()
guiargs.algorithm = algorithmVar.get()
guiargs.shuffle = shuffleVar.get()
guiargs.heartbeep = heartbeepVar.get()
guiargs.create_spoiler = bool(createSpoilerVar.get())
guiargs.suppress_rom = bool(suppressRomVar.get())
guiargs.nodungeonitems = bool(dungeonItemsVar.get())
guiargs.beatableonly = bool(beatableOnlyVar.get())
guiargs.quickswap = bool(quickSwapVar.get())
guiargs.shuffleganon = bool(shuffleGanonVar.get())
guiargs.rom = romVar.get()
guiargs.jsonout = None
guiargs.sprite = spriteVar.get() if spriteVar.get() else None
try:
if guiargs.count is not None:
seed = guiargs.seed
for i in range(guiargs.count):
main(seed=seed, args=guiargs)
seed = random.randint(0, 999999999)
else:
main(seed=guiargs.seed, args=guiargs)
except Exception as e:
messagebox.showerror(title="Error while creating seed", message=str(e))
else:
messagebox.showinfo(title="Success", message="Rom patched successfully")
generateButton = Button(bottomFrame, text='Generate Patched Rom', command=generateRom)
seedLabel.pack(side=LEFT)
seedEntry.pack(side=LEFT)
countLabel.pack(side=LEFT)
countSpinbox.pack(side=LEFT)
generateButton.pack(side=LEFT)
drowDownFrame.pack(side=LEFT)
rightHalfFrame.pack(side=RIGHT)
topFrame.pack(side=TOP)
bottomFrame.pack(side=BOTTOM)
if args is not None:
# load values from commandline args
createSpoilerVar.set(int(args.create_spoiler))
suppressRomVar.set(int(args.suppress_rom))
if args.nodungeonitems:
dungeonItemsVar.set(int(not args.nodungeonitems))
beatableOnlyVar.set(int(args.beatableonly))
quickSwapVar.set(int(args.quickswap))
if args.count:
countVar.set(str(args.count))
if args.seed:
seedVar.set(str(args.seed))
modeVar.set(args.mode)
difficultyVar.set(args.difficulty)
goalVar.set(args.goal)
algorithmVar.set(args.algorithm)
shuffleVar.set(args.shuffle)
heartbeepVar.set(args.heartbeep)
logicVar.set(args.logic)
romVar.set(args.rom)
shuffleGanonVar.set(args.shuffleganon)
if args.sprite is not None:
spriteVar.set(args.sprite)
mainWindow.mainloop()
if __name__ == '__main__':
guiMain()
|
28825
|
def test_socfaker_application_status(socfaker_fixture):
assert socfaker_fixture.application.status in ['Active', 'Inactive', 'Legacy']
def test_socfaker_application_account_status(socfaker_fixture):
assert socfaker_fixture.application.account_status in ['Enabled', 'Disabled']
def test_socfaker_name(socfaker_fixture):
assert socfaker_fixture.application.name
def test_socfaker_application_logon_timestamp(socfaker_fixture):
assert socfaker_fixture.application.logon_timestamp
|
28850
|
import numpy as np
#Simulater Setting
#------------------------------
MINUTES=60000000000
TIMESTEP = np.timedelta64(10*MINUTES)
PICKUPTIMEWINDOW = np.timedelta64(10*MINUTES)
#It can enable the neighbor car search system to determine the search range according to the set search distance and the size of the grid.
#It use dfs to find the nearest idle vehicles in the area.
NeighborCanServer = False
#You can adjust the size of the experimental area by entering latitude and longitude.
#The order, road network and grid division will be adaptive. Adjust to fit selected area
FocusOnLocalRegion = False
LocalRegionBound = (104.035,104.105,30.625,30.695)
if FocusOnLocalRegion == False:
LocalRegionBound = (104.011, 104.125, 30.618, 30.703)
#Input parameters
VehiclesNumber = 6000
SideLengthMeter = 800
VehiclesServiceMeter = 800
DispatchMode = "Simulation"
DemandPredictionMode = "None"
#["TransportationClustering","KmeansClustering","SpectralClustering"]
ClusterMode = "Grid"
|
28898
|
from django.urls import reverse
from ..links.document_file_links import (
link_document_file_delete, link_document_file_download_quick
)
from ..links.favorite_links import (
link_document_favorites_add, link_document_favorites_remove
)
from ..links.trashed_document_links import link_document_restore
from ..models import TrashedDocument
from ..permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_view, permission_trashed_document_restore
)
from .base import GenericDocumentViewTestCase
from .mixins.favorite_document_mixins import FavoriteDocumentTestMixin
class FavoriteDocumentLinkTestCase(
FavoriteDocumentTestMixin, GenericDocumentViewTestCase
):
def test_favorite_document_add_link_no_permission(self):
self._create_test_document_stub()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_add_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_add_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_no_permission(self):
self._create_test_document_stub()
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_remove_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
class DocumentsLinksTestCase(GenericDocumentViewTestCase):
def test_document_file_delete_link_no_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_delete_link_with_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_delete.view,
args=(self.test_document.files.first().pk,)
)
)
def test_document_file_download_link_no_permission(self):
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_download_link_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_download_quick.view,
args=(self.test_document.file_latest.pk,)
)
)
class TrashedDocumentsLinksTestCase(GenericDocumentViewTestCase):
def setUp(self):
super().setUp()
self.test_document.delete()
self.test_trashed_document = TrashedDocument.objects.get(
pk=self.test_document.pk
)
self.add_test_view(test_object=self.test_trashed_document)
self.context = self.get_test_view()
def test_trashed_document_restore_link_no_permission(self):
resolved_link = link_document_restore.resolve(context=self.context)
self.assertEqual(resolved_link, None)
def test_trashed_document_restore_link_with_permission(self):
self.grant_access(
obj=self.test_document, permission=permission_trashed_document_restore
)
resolved_link = link_document_restore.resolve(context=self.context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_restore.view,
args=(self.test_trashed_document.pk,)
)
)
|
28907
|
from antlr4 import *
from antlr4.error.ErrorListener import ErrorListener
from antlr.SBHasmLexer import SBHasmLexer
from antlr.SBHasmListener import SBHasmListener
from antlr.SBHasmParser import SBHasmParser
class MyErrorListener(ErrorListener):
def __init__(self):
super(MyErrorListener, self).__init__()
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("SyntaxError in {},{} msg={}".format(line, column, msg))
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("reportAmbiguity")
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
raise Exception("reportAttemptingFullContext")
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("reportContextSensitivity")
class SBHCodeSizeListener(SBHasmListener):
def __init__(self):
self.cmd_cnt = 0
def enterCmd(self, ctx):
self.cmd_cnt += 1
def enterSonst(self, ctx):
self.cmd_cnt += 1
class Pickup:
def __init__(self, item):
self.item = item
def __str__(self):
return "Pickup"
class Mem:
def __init__(self, slot):
self.slot = slot
def __str__(self):
return self.slot
class Dir:
def __init__(self, direction):
self.dir = direction
class SevenBillionHumansParser:
def __init__(self, filepath=None, source=None):
if source:
self.parse(InputStream(source))
elif filepath:
self.parse(FileStream(filepath))
def parse(self, source_stream):
lexer = SBHasmLexer(source_stream)
stream = CommonTokenStream(lexer)
parser = SBHasmParser(stream)
# parser._listeners = [ MyErrorListener() ]
tree = parser.asm()
printer = SBHCodeSizeListener()
walker = ParseTreeWalker()
walker.walk(printer, tree)
self.cmd_size = printer.cmd_cnt
if __name__ == '__main__':
s = SevenBillionHumansParser("../solutions/55 - Data Flowers/size-10_speed-23.asm")
|
28920
|
from django.forms import ModelForm, inlineformset_factory, BaseInlineFormSet
from . import models
class AuthorContainerForm(ModelForm):
class Meta:
model = models.AuthorContainer
exclude = ('id',)
class AuthorForm(ModelForm):
class Meta:
model = models.Author
fields = ('first_name', 'last_name')
class BookForm(ModelForm):
class Meta:
model = models.Book
fields = ('title', 'isbn',)
BookFormset = inlineformset_factory(models.Author, models.Book, form=BookForm, can_delete=True, extra=0)
class BaseAuthorFormset(BaseInlineFormSet):
def add_fields(self, form, index):
super(BaseAuthorFormset, self).add_fields(form, index)
form.nested_book = BookFormset(
instance=form.instance,
data=form.data if form.is_bound else None,
files=form.files if form.is_bound else None,
prefix='nested_book-%s-%s' % (
form.prefix,
BookFormset.get_default_prefix()))
def is_valid(self):
result = super(BaseAuthorFormset, self).is_valid()
if self.is_bound:
for form in self.forms:
if hasattr(form, 'nested_book'):
result = result and form.nested_book.is_valid()
return result
def save(self, commit=True):
result = super(BaseAuthorFormset, self).save(commit=commit)
for form in self.forms:
if hasattr(form, 'nested_book'):
if not self._should_delete_form(form):
form.nested_book.save(commit=commit)
return result
AuthorsFormset = inlineformset_factory(models.AuthorContainer, models.Author, formset=BaseAuthorFormset,
form=AuthorForm, extra=0)
|
28927
|
import os.path
from pi3d import *
from pi3d.Buffer import Buffer
from pi3d.Shape import Shape
from pi3d.Texture import Texture
CUBE_PARTS = ['front', 'right', 'top', 'bottom', 'left', 'back']
BOTTOM_INDEX = 3
def loadECfiles(path, fname, suffix='jpg', nobottom=False):
"""Helper for loading environment cube faces.
TODO nobottom will redraw the top on the bottom of cube. It really should
substitute a blank (black) texture instead!
Arguments:
*path*
to the image files relative to the top directory.
*fname*
The stem of the file name without the _top, _bottom, _right etc.
Keyword arguments:
*suffix*
String to add after the '_top','_bottom' has been added to the stem.
*nobottom*
If True then only load five parts into array the bottom will be
drawn with the previous image i.e. top.
"""
if nobottom:
parts = [p for p in CUBE_PARTS if p != 'bottom']
else:
parts = CUBE_PARTS
files = (os.path.join(path, '%s_%s.%s' % (fname, p, suffix)) for p in parts)
return [Texture(f) for f in files]
class EnvironmentCube(Shape):
""" 3d model inherits from Shape"""
def __init__(self, camera=None, light=None, size=500.0, maptype="HALFCROSS", name="", x=0.0, y=0.0, z=0.0,
rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0, nobottom=False):
"""uses standard constructor for Shape extra Keyword arguments:
*size*
Dimensions of the cube
*maptype*
HALFCROSS (default) or CROSS any other defaults to CUBE type
and will require 6 (or 5 with nobottom) image files to render it
"""
super(EnvironmentCube,self).__init__(camera, light, name, x, y, z, rx, ry, rz,
1.0, 1.0, 1.0, cx, cy, cz)
self.width = size
self.height = size
self.depth = size
self.ssize = 36
self.ttype = GL_TRIANGLES
self.nobottom = nobottom
ww = size / 2.0
hh = size / 2.0
dd = size / 2.0
#cuboid data - faces are separated out for texturing..
self.vertices = ((-ww, hh, dd), (ww, hh, dd), (ww,-hh, dd), (-ww, -hh, dd),
(ww, hh, dd), (ww, hh, -dd), (ww, -hh, -dd), (ww, -hh, dd),
(-ww, hh, dd), (-ww, hh, -dd), (ww, hh, -dd), (ww, hh, dd),
(ww, -hh, dd), (ww, -hh, -dd), (-ww, -hh, -dd),(-ww, -hh, dd),
(-ww, -hh, dd),(-ww, -hh, -dd),(-ww, hh, -dd), (-ww, hh, dd),
(-ww, hh, -dd),(ww, hh, -dd), (ww, -hh, -dd), (-ww,-hh,-dd))
self.normals = ((0.0, 0.0, 1), (0.0, 0.0, 1), (0.0, 0.0, 1), (0.0, 0.0, 1),
(1, 0.0, 0), (1, 0.0, 0), (1, 0.0, 0), (1, 0.0, 0),
(0.0, 1, 0), (0.0, 1, 0), (0.0, 1, 0), (0.0, 1, 0),
(0.0, -1, 0), (0,- 1, 0), (0.0, -1, 0), (0.0, -1, 0),
(-1, 0.0, 0), (-1, 0.0, 0), (-1, 0.0, 0), (-1, 0.0, 0),
(0.0, 0.0, -1),(0.0, 0.0, -1),(0.0, 0.0, -1), (0.0, 0.0, -1))
self.indices = ((3, 0, 1), (2, 3, 1), (7, 4, 5), (6, 7, 5),
(11, 8, 9), (10, 11, 9), (15, 12, 13), (14, 15, 13),
(17, 18, 19),(16, 17, 19),(22, 21, 20), (23, 22, 20))
if maptype == "CROSS":
self.tex_coords = ((1.0, 0.34), (0.75, 0.34), (0.75, 0.661), (1.0, 0.661), #front
(0.75, 0.34), (0.5, 0.34), (0.5, 0.661), (0.75, 0.661), #right
(0.251, 0.0), (0.251, 0.34), (0.498, 0.34), (0.498, 0.0), #top
(0.498, 0.998), (0.498, 0.66), (0.251, 0.66), (0.251, 0.998), #bottom
(0.0, 0.661), (0.25, 0.661), (0.25, 0.34), (0.0, 0.34), #left
(0.25, 0.34), (0.5, 0.34), (0.5, 0.661), (0.25, 0.661)) #back
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
elif maptype == "HALFCROSS":
self.tex_coords = ((0.25,0.25), (0.25,0.75), (-0.25,0.75), (-0.25,0.25), #front
(0.25,0.75), (0.75,0.75), (0.75,1.25), (0.25,1.25), #right
(0.25,0.25), (0.75,0.25), (0.75,0.75), (0.25,0.75), #top
(0,0), (1,0), (1,1), (0,1), #bottom
(0.25,-0.25), (0.75,-0.25), (0.75,0.25), (0.25,0.25), #left
(0.75,0.25), (0.75,0.75), (1.25,0.75), (1.25,0.25)) #back
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
else:
self.tex_coords = ((0.002,0.002), (0.998,0.002), (0.998,0.998),(0.002,0.998),
(0.002,0.002), (0.998,0.002), (0.998,0.998), (0.002,0.998),
(0.002,0.998), (0.002,0.002), (0.998,0.002), (0.998,0.998),
(0.998,0.002), (0.998,0.998), (0.002,0.998), (0.002,0.002),
(0.998,0.998), (0.002,0.998), (0.002,0.002), (0.998,0.002),
(0.998,0.002), (0.002,0.002), (0.002,0.998), (0.998,0.998))
self.buf = []
self.buf.append(Buffer(self, self.vertices[0:4], self.tex_coords[0:4], ((3,0,1), (2,3,1)), self.normals[0:4])) #front
self.buf.append(Buffer(self, self.vertices[4:8], self.tex_coords[4:8], ((3,0,1), (2,3,1)), self.normals[4:8])) #right
self.buf.append(Buffer(self, self.vertices[8:12], self.tex_coords[8:12], ((3,0,1), (2,3,1)), self.normals[8:12])) #top
self.buf.append(Buffer(self, self.vertices[12:16], self.tex_coords[12:16], ((3,0,1), (2,3,1)), self.normals[12:16])) #bottom
self.buf.append(Buffer(self, self.vertices[16:20], self.tex_coords[16:20], ((3,0,1), (2,3,1)), self.normals[16:20])) #left
self.buf.append(Buffer(self, self.vertices[20:24], self.tex_coords[20:24], ((3,1,0), (2,1,3)), self.normals[20:24])) #back
def set_draw_details(self, shader, textures, ntiles=0.0, shiny=0.0, umult=1.0, vmult=1.0):
"""overrides this method in Shape to cope with nobottom option"""
if not (type(textures) is list):
textures = [textures]
elif len(textures) == 5:
# this should be the only circumstance. Saves setting it in the constructor
self.nobottom = True
for i, b in enumerate(self.buf):
j = i - 1 if (self.nobottom and i >= BOTTOM_INDEX) else i
b.set_draw_details(shader, [textures[j]], ntiles, shiny, umult, vmult)
|
29016
|
import os
import json
import time
import torch
# Called when the deployed service starts
def init():
global model
global device
# Get the path where the deployed model can be found.
model_filename = 'obj_segmentation.pkl'
model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = torch.load(model_path, map_location=device)
# Handle requests to the service
def run(data):
try:
start_at = time.time()
inputs = json.loads(data)
img_data_list = inputs["instances"]
img_tensor_list = [torch.tensor(item) for item in img_data_list]
model.eval()
with torch.no_grad():
predictions = model([item.to(device) for item in img_tensor_list])
pred_data_list = [{
"masks": prediction['masks'][0, 0].mul(255).byte().cpu().numpy().tolist(),
"boxes": prediction['boxes'].numpy().tolist(),
"labels": prediction['labels'].numpy().tolist(),
"scores": prediction['scores'].numpy().tolist(),
} for prediction in predictions]
return {"predictions": pred_data_list,
"elapsed_time": time.time() - start_at}
except Exception as e:
error = str(e)
return error
|
29021
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module contains (and isolates) logic used to find entities based on entity type,
list selection criteria and search terms.
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException
from pyparsing import alphas, alphanums
from utils.py3porting import is_string, to_unicode
from annalist import layout
from annalist.util import valid_id, extract_entity_id
from annalist.models.recordtype import RecordType
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitytypeinfo import EntityTypeInfo
# -------------------------------------------------------------------
# Auxilliary functions
# -------------------------------------------------------------------
def order_entity_key(entity):
"""
Function returns sort key for ordering entities by type and entity id
Use with `sorted`, thus:
sorted(entities, order_entity_key)
"""
type_id = entity.get_type_id()
entity_id = entity.get_id()
key = ( 0 if type_id.startswith('_') else 1, type_id,
0 if entity_id.startswith('_') else 1, entity_id
)
return key
# -------------------------------------------------------------------
# EntityFinder
# -------------------------------------------------------------------
class EntityFinder(object):
"""
Logic for enumerating entities matching a supplied type, selector and/or search string.
"""
def __init__(self, coll, selector=None):
"""
Initialize entity finder for collection and selector.
"""
super(EntityFinder, self).__init__()
self._coll = coll
self._site = coll.get_site()
self._selector = EntitySelector(selector, FieldComparison(coll))
# self._subtypes = None
return
def get_collection_type_ids(self, altscope):
"""
Returns iterator over possible type ids in current collection.
Each type is returned as a candidate type identifier string
"""
return self._coll.cache_get_all_type_ids(altscope=altscope)
def get_collection_subtype_ids(self, supertype_id, altscope):
"""
Returns a iterator of type ids for all subtypes of the supplied type
accessible in the indicated scope from the current collection, including
the identified type itself.
"""
if not valid_id(supertype_id):
log.warning("EntityFinder.get_collection_subtype_ids: invalid type_id %s"%(supertype_id,))
return
supertype_info = EntityTypeInfo(self._coll, supertype_id)
supertype_uri = supertype_info.get_type_uri()
if supertype_uri is not None:
for try_subtype_id in self.get_collection_type_ids(altscope):
try_subtype = self._coll.cache_get_type(try_subtype_id)
if try_subtype:
try_subtype_uri = try_subtype.get_uri()
if ( ( supertype_uri == try_subtype_uri ) or
( supertype_uri in self._coll.cache_get_supertype_uris(try_subtype_uri) ) ):
yield try_subtype_id
else:
log.warning("EntityFinder.get_collection_subtype_ids: no type_uri for %s"%(supertype_id,))
def get_type_entities(self, type_id, user_permissions, altscope):
"""
Iterate over entities from collection matching the supplied type.
'altscope' is used to determine the extent of data to be included in the listing:
a value of 'all' means that site-wide entyities are icnluded in the listing.
Otherwise only collection entities are included.
"""
#@@
# log.info("get_type_entities: type_id %s, user_permissions %r"%(type_id,user_permissions))
#@@
entitytypeinfo = EntityTypeInfo(self._coll, type_id)
for e in entitytypeinfo.enum_entities_with_implied_values(
user_permissions, altscope=altscope
):
if e.get_id() != layout.INITIAL_VALUES_ID:
#@@
# log.info(" yield: %s"%(e.get_id(),))
#@@
yield e
return
def get_subtype_entities(self, type_id, user_permissions, altscope):
"""
Iterate over entities from collection that are of the indicated type
or any of its subtypes.
'altscope' is used to determine the extent of data to be included in the listing:
a value of 'all' means that site-wide entities are included in the listing.
Otherwise only collection entities are included.
"""
for subtype_id in self.get_collection_subtype_ids(type_id, "all"):
subtype_info = EntityTypeInfo(self._coll, subtype_id)
es = subtype_info.enum_entities_with_implied_values(
user_permissions, altscope=altscope
)
#@@
# es = list(es) #@@ Force strict eval
# log.info("get_subtype_entities: %r"%([e.get_id() for e in es],))
#@@
for e in es:
if e.get_id() != layout.INITIAL_VALUES_ID:
yield e
return
def get_all_types_entities(self, types, user_permissions, altscope):
"""
Iterate over all entities of all types from a supplied type iterator
"""
#@@
# log.info("@@@@ get_all_types_entities")
#@@
for t in types:
for e in self.get_type_entities(t, user_permissions, altscope):
#@@
# log.info("get_all_types_entities: type %s/%s"%(t,e.get_id()))
#@@
yield e
return
def get_base_entities(self, type_id=None, user_permissions=None, altscope=None):
"""
Iterate over base entities from collection, matching the supplied type id if supplied.
If a type_id is supplied, site data values are included.
"""
entities = None
if type_id:
entities = self.get_subtype_entities(type_id, user_permissions, altscope)
# return self.get_type_entities(type_id, user_permissions, scope)
else:
entities = self.get_all_types_entities(
self.get_collection_type_ids(altscope="all"), user_permissions, altscope
)
#@@
# entities = list(entities) #@@ Force strict eval
# log.info("get_base_entities: %r"%([(e.get_type_id(), e.get_id()) for e in entities],))
#@@
return entities
def search_entities(self, entities, search):
"""
Iterate over entities from supplied iterator containing supplied search term.
"""
for e in entities:
if self.entity_contains(e, search):
yield e
return
def get_entities(self,
user_permissions=None, type_id=None, altscope=None, context=None, search=None
):
"""
Iterates over entities of the specified type, matching search term and visible to
supplied user permissions.
"""
entities = self._selector.filter(
self.get_base_entities(type_id, user_permissions, altscope), context=context
)
if search:
entities = self.search_entities(entities, search)
return entities
def get_entities_sorted(self,
user_permissions=None, type_id=None, altscope=None, context={}, search=None
):
"""
Get sorted list of entities of the specified type, matching search term and
visible to supplied user permissions.
"""
entities = self.get_entities(
user_permissions, type_id=type_id, altscope=altscope,
context=context, search=search
)
#@@
# entities = list(entities) #@@ Force strict eval
# log.info("get_entities_sorted: %r"%([e.get_id() for e in entities],))
#@@
return sorted(entities, key=order_entity_key)
@classmethod
def entity_contains(cls, e, search):
"""
Returns True if entity contains/matches search term, else False.
Search term None (or blank) matches all entities.
>>> e1 = { 'p:a': '1', 'p:b': '2', 'p:c': '3', 'annal:property_uri': 'annal:member' }
>>> EntityFinder.entity_contains(e1, "1")
True
>>> EntityFinder.entity_contains(e1, "3")
True
>>> EntityFinder.entity_contains(e1, "nothere")
False
>>> EntityFinder.entity_contains(e1, "annal:member")
True
>>> e2 = { 'list': ['l1', 'l2', 'l3'] \
, 'dict': {'p:a': 'd1', 'p:b': 'd2', 'p:c': 'd3'} \
}
>>> EntityFinder.entity_contains(e2, "l1")
True
>>> EntityFinder.entity_contains(e2, "d3")
True
>>> EntityFinder.entity_contains(e2, "nothere")
False
"""
if search:
# Entity is not a dict, so scan entity keys for search
for key in e:
val = e[key]
if cls.value_contains(val, search):
return True
return False
return True
@classmethod
def value_contains(cls, val, search):
"""
Helper function tests for search term in dictionary, list or string values.
Other values are not searched.
"""
if isinstance(val, dict):
for k in val:
if cls.value_contains(val[k], search):
return True
elif isinstance(val, list):
for e in val:
if cls.value_contains(e, search):
return True
elif is_string(val):
return search in val
return False
# -------------------------------------------------------------------
# EntitySelector
# -------------------------------------------------------------------
class EntitySelector(object):
"""
This class implements a selector filter. It is initialized with a selector
expression, and may be invoked as a filter applied to an entity generator,
or as a predicate applied to a single entity.
>>> e = { 'p:a': '1', 'p:b': '2', 'p:c': '3', '@type': ["http://example.com/type", "foo:bar"] }
>>> c = { 'view': { 'v:a': '1', 'v:b': ['2', '3'] } }
>>> f1 = "'1' == [p:a]"
>>> f2 = "[p:a]=='2'"
>>> f3 = ""
>>> f4 = "'http://example.com/type' in [@type]"
>>> f5 = "'foo:bar' in [@type]"
>>> f6 = "'bar:foo' in [@type]"
>>> f7 = "[p:a] in view[v:a]"
>>> f8 = "[p:b] in view[v:b]"
>>> f9 = "[p:a] in view[v:b]"
>>> f10 = "[annal:field_entity_type] in view[annal:view_entity_type]"
>>> f11 = "foo:bar in [@type]"
>>> f12 = "bar:foo in [@type]"
>>> EntitySelector(f1).select_entity(e, c)
True
>>> EntitySelector(f2).select_entity(e, c)
False
>>> EntitySelector(f3).select_entity(e, c)
True
>>> EntitySelector(f4).select_entity(e, c)
True
>>> EntitySelector(f5).select_entity(e, c)
True
>>> EntitySelector(f6).select_entity(e, c)
False
>>> EntitySelector(f7).select_entity(e, c)
True
>>> EntitySelector(f8).select_entity(e, c)
True
>>> EntitySelector(f9).select_entity(e, c)
False
>>> EntitySelector(f10).select_entity(e, c)
True
>>> EntitySelector(f11).select_entity(e, c)
True
>>> EntitySelector(f12).select_entity(e, c)
False
"""
def __init__(self, selector, fieldcomp=None):
self._fieldcomp = fieldcomp
# Returns None if no filter is applied, otherwise a predcicate function
self._selector = self.compile_selector_filter(selector)
return
def filter(self, entities, context=None):
"""
Iterate over selection of entities from supplied iterator, using the
selection specification supplied to the constructor of the current object.
entities is an iterator over entities from which selection is made
context is a dictionary of context values that may be referenced by
the selector in choosing entities to be returned.
If no filtering is applied, the supplied iterator is returned as-is.
"""
if self._selector:
entities = self._filter(entities, context)
return entities
def _filter(self, entities, context):
"""
Internal helper applies selector to entity iterator, returning a new iterator.
"""
for e in entities:
if self._selector(e, context):
yield e
return
def select_entity(self, entity, context={}):
"""
Apply selector to an entity, and returns True if the entity is selected
"""
if self._selector:
return self._selector(entity, context)
return True
@classmethod #@@ @staticmethod, no cls?
def parse_selector(cls, selector):
"""
Parse a selector and return list of tokens
Selector formats:
ALL (or blank) match any entity
<val1> == <val2> values are same
<val1> in <val2> second value is list containing 1st value,
or values are same, or val1 is None.
<val1> <name> <val2> invoke comparison method from supplied
FieldComparison object
<val1> and <val2> may be:
[<field-id>] refers to field in entity under test
<name>[<field-id>] refers to field of context value, or None if the
indicated context value or field is not defined.
"<string>" literal string value. Quotes within are escaped.
<field_id> values are URIs or CURIEs, using characters defined by RFC3986,
except "[" and "]"
RFC3986:
unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
reserved = gen-delims / sub-delims
gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
/ "*" / "+" / "," / ";" / "="
Parser uses pyparsing combinators (cf. http://pyparsing.wikispaces.com).
"""
def get_value(val_list):
if len(val_list) == 1:
return { 'type': 'literal', 'name': None, 'field_id': None, 'value': val_list[0] }
elif val_list[0] == '[':
return { 'type': 'entity', 'name': None, 'field_id': val_list[1], 'value': None }
elif val_list[1] == '[':
return { 'type': 'context', 'name': val_list[0], 'field_id': val_list[2], 'value': None }
else:
return { 'type': 'unknown', 'name': None, 'field_id': None, 'value': None }
p_name = Word(alphas+"_", alphanums+"_")
p_id = Word(alphas+"_@", alphanums+"_-.~:/?#@!$&'()*+,;=)")
p_val = ( Group( Literal("[") + p_id + Literal("]") )
| Group( p_name + Literal("[") + p_id + Literal("]") )
| Group( QuotedString('"', "\\") )
| Group( QuotedString("'", "\\") )
| Group( p_id )
)
p_comp = ( Literal("==") | Literal("in") | p_name )
p_selector = ( p_val + p_comp + p_val + StringEnd() )
try:
resultlist = p_selector.parseString(selector).asList()
except ParseException:
return None
resultdict = {}
if resultlist:
resultdict['val1'] = get_value(resultlist[0])
resultdict['comp'] = resultlist[1]
resultdict['val2'] = get_value(resultlist[2])
return resultdict
def compile_selector_filter(self, selector):
"""
Return filter for for testing entities matching a supplied selector.
Returns None if no selection is performed; i.e. all possible entities are selected.
Selector formats: see `parse_selector` above.
This function returns a filter function compiled from the supplied selector.
"""
def get_entity(field_id):
"Get field from entity tested by filter"
def get_entity_f(e, c):
return e.get(field_id, None)
return get_entity_f
#
def get_context(name, field_id):
"Get field from named value in current display context"
def get_context_f(e, c):
if name in c and c[name]:
return c[name].get(field_id, None)
return None
return get_context_f
#
def get_literal(value):
"Get literal value specified directly in selector string"
def get_literal_f(e, c):
return value
return get_literal_f
#
def get_val_f(selval):
if selval['type'] == "entity":
return get_entity(selval['field_id'])
elif selval['type'] == "context":
return get_context(selval['name'], selval['field_id'])
elif selval['type'] == "literal":
return get_literal(selval['value'])
else:
msg = "Unrecognized value type from selector (%s)"%selval['type']
raise ValueError(msg)
assert False, "Unrecognized value type from selector"
#
def match_eq(v1f, v2f):
def match_eq_f(e, c):
return v1f(e, c) == v2f(e, c)
return match_eq_f
#
def match_in(v1f, v2f):
def match_in_f(e, c):
v1 = v1f(e, c)
if not v1: return True
v2 = v2f(e, c)
if isinstance(v2, list):
return v1 in v2
return v1 == v2
return match_in_f
#
def match_subtype(v1f, v2f):
def match_subtype_f(e, c):
return self._fieldcomp.subtype(v1f(e, c), v2f(e, c))
return match_subtype_f
#
if selector in {None, "", "ALL"}:
return None
sel = self.parse_selector(selector)
if not sel:
msg = "Unrecognized selector syntax (%s)"%selector
raise ValueError(msg)
v1f = get_val_f(sel['val1'])
v2f = get_val_f(sel['val2'])
if sel['comp'] == "==":
return match_eq(v1f, v2f)
if sel['comp'] == "in":
return match_in(v1f, v2f)
if sel['comp'] == "subtype":
return match_subtype(v1f, v2f)
# Drop through: raise error
msg = "Unrecognized entity selector (%s)"%selector
raise ValueError(msg)
# -------------------------------------------------------------------
# FieldComparison
# -------------------------------------------------------------------
class FieldComparison(object):
"""
Logic for comparing fields using additional context information not available
directly to 'EntitySelector'
"""
def __init__(self, coll):
super(FieldComparison, self).__init__()
self._coll = coll
self._site = coll.get_site()
return
def get_uri_type_info(self, type_uri):
"""
Return typeinfo corresponding to the supplied type URI
"""
t = self._coll.get_uri_type(type_uri)
return t and EntityTypeInfo(self._coll, t.get_id())
def subtype(self, type1_uri, type2_uri):
"""
Returns True if the first type is a subtype of the second type, where both
types are supplied as type URIs. Returns True if both URIs are the same.
If type1_uri is not specified, assume no restriction.
If type2_uri is not specified, assume it does not satisfy the restriction.
"""
# log.info("FieldComparison.subtype(%s, %s)"%(type1_uri, type2_uri))
if not type2_uri or (type1_uri == type2_uri):
return True
if not type1_uri:
return False
type1_info = self.get_uri_type_info(type1_uri)
type1_supertype_uris = (type1_info and type1_info.get_all_type_uris()) or []
# log.info("FieldComparison.subtype: type1_uris (supertypes) %r"%(type1_uris,))
return type2_uri in type1_supertype_uris
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
29024
|
import argparse
import h5py
import sys
import os
from savu.version import __version__
class NXcitation(object):
def __init__(self, description, doi, endnote, bibtex):
self.description = description.decode('UTF-8')
self.doi = doi.decode('UTF-8')
self.endnote = endnote.decode('UTF-8')
self.bibtex = bibtex.decode('UTF-8')
def get_bibtex_ref(self):
return self.bibtex.split(',')[0].split('{')[1] \
if self.bibtex else ""
def get_first_author(self):
parts = self.endnote.split('\n')
for part in parts:
if part.startswith("%A"):
return part.replace("%A", "").strip()
def get_date(self):
parts = self.endnote.split('\n')
for part in parts:
if part.startswith("%D"):
return part.replace("%D", "").strip()
def get_description_with_author(self):
return "%s \\ref{%s}(%s, %s)" % (self.description,
self.get_bibtex_ref(),
self.get_first_author(),
self.get_date())
class NXcitation_manager(object):
def __init__(self):
self.NXcite_list = []
def add_citation(self, citation):
self.NXcite_list.append(citation)
def get_full_endnote(self):
return "\n\n".join([cite.endnote for cite in self.NXcite_list])
def get_full_bibtex(self):
return "\n".join([cite.bibtex for cite in self.NXcite_list])
def get_description_with_citations(self):
return ". ".join([cite.get_description_with_author() for cite in
self.NXcite_list])
def __str__(self):
return "\nDESCRIPTION\n%s\n\nBIBTEX\n%s\n\nENDNOTE\n%s" % \
(self.get_description_with_citations(), self.get_full_bibtex(),
self.get_full_endnote())
class NXciteVisitor(object):
def __init__(self):
self.citation_manager = NXcitation_manager()
def _visit_NXcite(self, name, obj):
if "NX_class" in list(obj.attrs.keys()):
if obj.attrs["NX_class"] in ["NXcite"]:
citation = NXcitation(obj['description'][0],
obj['doi'][0],
obj['endnote'][0],
obj['bibtex'][0])
self.citation_manager.add_citation(citation)
def get_citation_manager(self, nx_file, entry):
nx_file[entry].visititems(self._visit_NXcite)
return self.citation_manager
def __check_input_params(args):
""" Check for required input arguments.
"""
if len(args) != 2:
print("Input and output filename need to be specified")
print("Exiting with error code 1 - incorrect number of inputs")
sys.exit(1)
if not os.path.exists(args[0]):
print(("Input file '%s' does not exist" % args[0]))
print("Exiting with error code 2 - Input file missing")
sys.exit(2)
def __option_parser(doc=True):
""" Option parser for command line arguments.
"""
version = "%(prog)s " + __version__
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='Input data file.')
parser.add_argument('out_file', help='Output file to extract citation \
information to.')
parser.add_argument('--version', action='version', version=version)
return parser if doc==True else parser.parse_args()
def main(in_file=None, quiet=False):
# when calling directly from tomo_recon.py
if in_file:
log_folder = os.path.join(os.path.dirname(in_file),"run_log")
out_file = os.path.join(log_folder, "citations.txt")
else:
args = __option_parser(doc=False)
in_file = args.in_file
out_file = args.out_file
infile = h5py.File(in_file, 'r')
citation_manager = NXciteVisitor().get_citation_manager(infile, "/")
if citation_manager is not None:
with open(out_file, 'w') as outfile:
outfile.write(citation_manager.__str__())
if not quiet:
print("Extraction complete")
if __name__ == '__main__':
main()
|
29027
|
import sys, json
# simple JSON echo script
for line in sys.stdin:
print json.dumps(json.loads(line))
|
29050
|
from directory_forms_api_client.actions import PardotAction
from directory_forms_api_client.helpers import Sender
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from formtools.wizard.views import NamedUrlSessionWizardView
from contact.views import BaseNotifyFormView
from core import mixins
from core.datastructures import NotifySettings
from domestic.forms import (
CompanyDetailsForm,
HelpForm,
PersonalDetailsForm,
UKEFContactForm,
)
class UKEFHomeView(TemplateView):
template_name = 'domestic/ukef/home_page.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['trade_finance_bullets'] = [
'working capital support',
'bond support',
'credit insurance',
]
context['project_finance_bullets'] = [
'UKEF buyer credit guarantees',
'direct lending',
'credit and bond insurance',
]
return context
class ContactView(BaseNotifyFormView):
template_name = 'domestic/ukef/contact_form.html'
form_class = UKEFContactForm
success_url = reverse_lazy('domestic:uk-export-contact-success')
notify_settings = NotifySettings(
agent_template=settings.UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID,
agent_email=settings.UKEF_CONTACT_AGENT_EMAIL_ADDRESS,
user_template=settings.UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID,
)
def form_valid(self, form):
user_email = form.cleaned_data['email']
self.request.session['user_email'] = user_email
return super().form_valid(form)
class SuccessPageView(TemplateView):
template_name = 'domestic/ukef/contact_form_success.html'
def get(self, *args, **kwargs):
if not self.request.session.get('user_email'):
return HttpResponseRedirect(reverse_lazy('domestic:uk-export-contact'))
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['user_email'] = self.request.session.get('user_email')
return super().get_context_data(**kwargs)
@method_decorator(never_cache, name='dispatch')
class GetFinanceLeadGenerationFormView(
mixins.PrepopulateFormMixin,
mixins.PreventCaptchaRevalidationMixin,
NamedUrlSessionWizardView,
):
success_url = reverse_lazy(
'domestic:uk-export-finance-lead-generation-form-success',
)
PERSONAL_DETAILS = 'your-details'
COMPANY_DETAILS = 'company-details'
HELP = 'help'
form_list = (
(PERSONAL_DETAILS, PersonalDetailsForm),
(COMPANY_DETAILS, CompanyDetailsForm),
(HELP, HelpForm),
)
templates = {
PERSONAL_DETAILS: 'domestic/finance/lead_generation_form/step-personal.html',
COMPANY_DETAILS: 'domestic/finance/lead_generation_form/step-company.html',
HELP: 'domestic/finance/lead_generation_form/step-help.html',
}
def get_form_kwargs(self, *args, **kwargs):
# skipping `PrepopulateFormMixin.get_form_kwargs`
return super(mixins.PrepopulateFormMixin, self).get_form_kwargs(*args, **kwargs)
def get_form_initial(self, step):
initial = super().get_form_initial(step)
if self.request.user.is_authenticated:
if step == self.PERSONAL_DETAILS and self.request.user.company:
initial.update(
{
'email': self.request.user.email,
'phone': getattr(self.request.user.company, 'mobile_number', ''),
'firstname': self.guess_given_name,
'lastname': self.guess_family_name,
}
)
elif step == self.COMPANY_DETAILS and self.request.user.company:
company = self.request.user.company
_sectors = getattr(company, 'sectors', [])
_industry = _sectors[0] if _sectors else None
initial.update(
{
'not_companies_house': False,
'company_number': getattr(company, 'number', ''),
'trading_name': getattr(company, 'name', ''),
'address_line_one': getattr(company, 'address_line_1', ''),
'address_line_two': getattr(company, 'address_line_2', ''),
'address_town_city': getattr(company, 'locality', ''),
'address_post_code': getattr(company, 'postal_code', ''),
'industry': _industry,
}
)
return initial
def get_template_names(self):
return [self.templates[self.steps.current]]
def done(self, form_list, **kwargs):
form_data = self.serialize_form_list(form_list)
sender = Sender(email_address=form_data['email'], country_code=None)
action = PardotAction(
pardot_url=settings.UKEF_FORM_SUBMIT_TRACKER_URL,
form_url=reverse('domestic:uk-export-finance-lead-generation-form', kwargs={'step': self.PERSONAL_DETAILS}),
sender=sender,
)
response = action.save(form_data)
response.raise_for_status()
return redirect(self.success_url)
@staticmethod
def serialize_form_list(form_list):
data = {}
for form in form_list:
data.update(form.cleaned_data)
return data
|
29087
|
from django.core.serializers.json import DjangoJSONEncoder
class CallableJSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if callable(obj):
return obj()
return super().default(obj)
|
29090
|
import sys
import os
from multiprocessing import Process, Queue, Manager
from threading import Timer
from wadi_harness import Harness
from wadi_debug_win import Debugger
import time
import hashlib
def test(msg):
while True:
print 'Process 2:' + msg
#print msg
def test2():
print 'Process 1'
time.sleep(2)
while True:
print 'Process 1'
def run_harness(t):
harness = Harness(sys.argv[1],sys.argv[2],t)
harness.run()
def run_debugger(q):
debugger = Debugger(q)
debugger.run_Browser('IE')
def timeout_debug(dp):
print '[*] Terminating Debugger Process PID: %d' % dp.pid
dp.terminate()
class wadi():
def __init__(self, args=None):
if args:
self.args = args
else:
pass
def writeTestCases(self,tcases,msg):
self.msg = msg[0]
self.code = msg[1]
self.add = msg[2]
self.testcases = tcases
self.hash = hashlib.md5()
self.b = self.code+self.add
self.hash.update(self.b)
self.dgst = self.hash.hexdigest()
self.path = "./"+self.dgst
if os.path.exists(self.path):
print "[*] Duplicate Crash: %s" % self.dgst
else:
os.makedirs(self.path)
f = open(self.path + "/" +self.dgst+".crash","w+b")
f.write(self.msg)
f.close()
print "[*] Written Crash file to: %s" % self.dgst+".crash"
for i in range(10):
self.tcase = self.testcases.pop()
f2 = open(self.path+"/"+self.dgst+"_"+str(i)+".html","w+b")
f2.write(self.tcase)
f2.close()
print "[*] Written testcases to %s" % self.path+"/"+self.dgst+str(i)+".html"
print "[*] Last TestCase Folder '%s'" % self.dgst
def close(self):
sys.exit()
def run(self):
self.queue = Manager().list()
self.tcases = Manager().list()
self.server_pid = None
self.debugger_pid = None
self.init = 0
while True:
if not self.server_pid:
self.server_process = Process(target=run_harness, args=(self.tcases,))
self.server_process.start()
self.server_pid = self.server_process.pid
print '[*] Running Server Process %s ' % (self.server_pid,)
#self.server_pid =
if not self.debugger_pid:
self.debugger_process = Process(target=run_debugger,args=(self.queue,))
self.debugger_process.start()
self.debugger_pid = self.debugger_process.pid
timer = Timer(120.0,timeout_debug,(self.debugger_process,))
timer.daemon = True
timer.start()
if not self.debugger_process.is_alive():
print "[*] Debugger Process %s exited" % self.debugger_pid
timer.cancel()
self.lenq = len(self.queue)
self.lentc = len(self.tcases)
if self.lenq:
self.msg = self.queue.pop()
#self.msg = self.queue.get()
print "[*] Wooops Crash !!!!"
print "[*] %s" % self.msg[0]
else:
print "[*] No Crashes"
#if not self.tcases.empty():
if self.lentc and self.lenq:
#self.tc = self.tcases.get()
self.writeTestCases(self.tcases, self.msg)
else:
print "[*] No TestCases"
self.debugger_pid = None
else:
pass
if __name__ == '__main__':
#try:
w = wadi()
w.run()
#except:
# w.close()
|
29104
|
from apps.flow.settings import config
if config.SERVER_ENV != 'dev':
from gevent import monkey
monkey.patch_all()
else:
pass
from apps.flow.views.deploy import deploy
from apps.flow.views.flow import flow
from library.api.tFlask import tflask
def create_app():
app = tflask(config)
register_blueprints(app)
return app
def register_blueprints(app):
app.register_blueprint(flow, url_prefix="/v1/flow")
app.register_blueprint(deploy, url_prefix="/v1/deploy")
if __name__ == '__main__':
create_app().run(port=config.PORT)
|
29131
|
from twisted.trial import unittest
from ..eventsource import EventSourceParser
class FakeTransport:
disconnecting = False
def parse_events(s):
fields = []
p = EventSourceParser(lambda name, data: fields.append((name,data)))
p.makeConnection(FakeTransport())
p.dataReceived(s)
return fields
class EventSource(unittest.TestCase):
def test_parse(self):
fields = []
p = EventSourceParser(lambda name, data: fields.append((name,data)))
p.makeConnection(FakeTransport())
self.failUnlessEqual(fields, [])
p.dataReceived(": comment")
self.failUnlessEqual(fields, [])
p.dataReceived("\n")
self.failUnlessEqual(fields, [])
p.dataReceived("\n")
self.failUnlessEqual(fields, [("", "comment")])
p.dataReceived("data: one line\n\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line")])
p.dataReceived("data: two\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line")])
p.dataReceived("lines\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line")])
p.dataReceived("\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line"),
("data", "two\nlines"),
])
|
29157
|
import torch
import torch.nn.functional as F
from torch import nn
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher_crowd
import numpy as np
import time
# the network frmawork of the regression branch
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 2)
# the network frmawork of the classification branch
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchor_points = num_anchor_points
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _ = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
# generate the reference points in grid layout
def generate_anchor_points(stride=16, row=3, line=3):
row_step = stride / row
line_step = stride / line
shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2
shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
anchor_points = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
return anchor_points
# shift the meta-anchor to get an acnhor points
def shift(shape, stride, anchor_points):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchor_points.shape[0]
K = shifts.shape[0]
all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2)))
all_anchor_points = all_anchor_points.reshape((K * A, 2))
return all_anchor_points
# this class generate all reference points on all pyramid levels
class AnchorPoints(nn.Module):
def __init__(self, pyramid_levels=None, strides=None, row=3, line=3):
super(AnchorPoints, self).__init__()
if pyramid_levels is None:
self.pyramid_levels = [3, 4, 5, 6, 7]
else:
self.pyramid_levels = pyramid_levels
if strides is None:
self.strides = [2 ** x for x in self.pyramid_levels]
self.row = row
self.line = line
def forward(self, image):
image_shape = image.shape[2:]
image_shape = np.array(image_shape)
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]
all_anchor_points = np.zeros((0, 2)).astype(np.float32)
# get reference points for each level
for idx, p in enumerate(self.pyramid_levels):
anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line)
shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points)
all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0)
all_anchor_points = np.expand_dims(all_anchor_points, axis=0)
# send reference points to device
if torch.cuda.is_available():
return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda()
else:
return torch.from_numpy(all_anchor_points.astype(np.float32))
class Decoder(nn.Module):
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(Decoder, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
return [P3_x, P4_x, P5_x]
# the defenition of the P2PNet model
class P2PNet(nn.Module):
def __init__(self, backbone, row=2, line=2):
super().__init__()
self.backbone = backbone
self.num_classes = 2
# the number of all anchor points
num_anchor_points = row * line
self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points)
self.classification = ClassificationModel(num_features_in=256, \
num_classes=self.num_classes, \
num_anchor_points=num_anchor_points)
self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line)
self.fpn = Decoder(256, 512, 512)
def forward(self, samples: NestedTensor):
# get the backbone features
features = self.backbone(samples)
# forward the feature pyramid
features_fpn = self.fpn([features[1], features[2], features[3]])
batch_size = features[0].shape[0]
# run the regression and classification branch
regression = self.regression(features_fpn[1]) * 100 # 8x
classification = self.classification(features_fpn[1])
anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1)
# decode the points as prediction
output_coord = regression + anchor_points
output_class = classification
out = {'pred_logits': output_class, 'pred_points': output_coord}
return out
class SetCriterion_Crowd(nn.Module):
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[0] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_points):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], 0,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
return losses
def loss_points(self, outputs, targets, indices, num_points):
assert 'pred_points' in outputs
idx = self._get_src_permutation_idx(indices)
src_points = outputs['pred_points'][idx]
target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.mse_loss(src_points, target_points, reduction='none')
losses = {}
losses['loss_point'] = loss_bbox.sum() / num_points
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs):
loss_map = {
'labels': self.loss_labels,
'points': self.loss_points,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_points, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']}
indices1 = self.matcher(output1, targets)
num_points = sum(len(t["labels"]) for t in targets)
num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_points)
num_boxes = torch.clamp(num_points / get_world_size(), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes))
return losses
# create the P2PNet model
def build(args, training):
# treats persons as a single class
num_classes = 1
backbone = build_backbone(args)
model = P2PNet(backbone, args.row, args.line)
if not training:
return model
weight_dict = {'loss_ce': 1, 'loss_points': args.point_loss_coef}
losses = ['labels', 'points']
matcher = build_matcher_crowd(args)
criterion = SetCriterion_Crowd(num_classes, \
matcher=matcher, weight_dict=weight_dict, \
eos_coef=args.eos_coef, losses=losses)
return model, criterion
|
29238
|
from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
l = [int(x) for x in s.strip().split()]
n = len(l)
for i in range(n):
for j in range(i):
if l[i] + l[j] > 2020:
continue
for k in range(j):
if l[i] + l[j] + l[k] == 2020:
return str(l[i] * l[j] * l[k])
|
29257
|
from prefixdate import parse_parts
from opensanctions import helpers as h
from opensanctions.util import remove_namespace
def parse_address(context, el):
country = el.get("countryDescription")
if country == "UNKNOWN":
country = None
# context.log.info("Addrr", el=el)
return h.make_address(
context,
street=el.get("street"),
po_box=el.get("poBox"),
city=el.get("city"),
place=el.get("place"),
postal_code=el.get("zipCode"),
region=el.get("region"),
country=country,
country_code=el.get("countryIso2Code"),
)
def parse_entry(context, entry):
subject_type = entry.find("./subjectType")
schema = context.lookup_value("subject_type", subject_type.get("code"))
if schema is None:
context.log.warning("Unknown subject type", type=subject_type)
return
entity = context.make(schema)
entity.id = context.make_slug(entry.get("euReferenceNumber"))
entity.add("notes", entry.findtext("./remark"))
entity.add("topics", "sanction")
sanction = h.make_sanction(context, entity)
regulation = entry.find("./regulation")
source_url = regulation.findtext("./publicationUrl", "")
sanction.set("sourceUrl", source_url)
sanction.add("program", regulation.get("programme"))
sanction.add("reason", regulation.get("numberTitle"))
sanction.add("startDate", regulation.get("entryIntoForceDate"))
sanction.add("listingDate", regulation.get("publicationDate"))
for name in entry.findall("./nameAlias"):
if entry.get("strong") == "false":
entity.add("weakAlias", name.get("wholeName"))
else:
entity.add("name", name.get("wholeName"))
entity.add("title", name.get("title"), quiet=True)
entity.add("firstName", name.get("firstName"), quiet=True)
entity.add("middleName", name.get("middleName"), quiet=True)
entity.add("lastName", name.get("lastName"), quiet=True)
entity.add("position", name.get("function"), quiet=True)
gender = h.clean_gender(name.get("gender"))
entity.add("gender", gender, quiet=True)
for node in entry.findall("./identification"):
type = node.get("identificationTypeCode")
schema = "Passport" if type == "passport" else "Identification"
passport = context.make(schema)
passport.id = context.make_id("ID", entity.id, node.get("logicalId"))
passport.add("holder", entity)
passport.add("authority", node.get("issuedBy"))
passport.add("type", node.get("identificationTypeDescription"))
passport.add("number", node.get("number"))
passport.add("number", node.get("latinNumber"))
passport.add("startDate", node.get("issueDate"))
passport.add("startDate", node.get("issueDate"))
passport.add("country", node.get("countryIso2Code"))
passport.add("country", node.get("countryDescription"))
for remark in node.findall("./remark"):
passport.add("summary", remark.text)
context.emit(passport)
for node in entry.findall("./address"):
address = parse_address(context, node)
h.apply_address(context, entity, address)
for child in node.getchildren():
if child.tag in ("regulationSummary"):
continue
elif child.tag == "remark":
entity.add("notes", child.text)
elif child.tag == "contactInfo":
prop = context.lookup_value("contact_info", child.get("key"))
if prop is None:
context.log.warning("Unknown contact info", node=child)
else:
entity.add(prop, child.get("value"))
else:
context.log.warning("Unknown address component", node=child)
for birth in entry.findall("./birthdate"):
partialBirth = parse_parts(
birth.get("year"), birth.get("month"), birth.get("day")
)
entity.add("birthDate", birth.get("birthdate"))
entity.add("birthDate", partialBirth)
address = parse_address(context, birth)
if address is not None:
entity.add("birthPlace", address.get("full"))
entity.add("country", address.get("country"))
for node in entry.findall("./citizenship"):
entity.add("nationality", node.get("countryIso2Code"), quiet=True)
entity.add("nationality", node.get("countryDescription"), quiet=True)
context.emit(entity, target=True, unique=True)
context.emit(sanction)
def crawl(context):
path = context.fetch_resource("source.xml", context.dataset.data.url)
context.export_resource(path, "text/xml", title=context.SOURCE_TITLE)
doc = context.parse_resource_xml(path)
doc = remove_namespace(doc)
for entry in doc.findall(".//sanctionEntity"):
parse_entry(context, entry)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.